From 7319d02b1e93841ee0b4a74c2bfbe9fc288cadbd Mon Sep 17 00:00:00 2001 From: Nick Schrock Date: Thu, 12 Oct 2023 01:58:32 -0400 Subject: [PATCH] External Assets Concept Page (#16935) ## Summary & Motivation Adds an External Assets concept page (motivation described in https://github.com/dagster-io/dagster/pull/16754). This also contains a code change necessary because of the bug demonstrated in https://github.com/dagster-io/dagster/pull/17077. ## How I Tested These Changes BK. Also loaded examples in `dagster dev` --------- Co-authored-by: Erin Cochran Co-authored-by: Yuhan Luo <4531914+yuhan@users.noreply.github.com> --- docs/content/_navigation.json | 4 + docs/content/api/modules.json | 2 +- docs/content/concepts.mdx | 4 + .../concepts/assets/external-assets.mdx | 334 ++++++++++++++++++ .../images/concepts/assets/external-asset.png | Bin 0 -> 327433 bytes ...external-assets-normal-dep-on-external.png | Bin 0 -> 356464 bytes .../assets/external-assets-show-detail.png | Bin 0 -> 352307 bytes .../assets/external_assets/__init__.py | 0 .../external_assets/external_asset_deps.py | 6 + .../external_asset_events_using_python_api.py | 21 ++ .../external_asset_using_sensor.py | 36 ++ .../normal_asset_depending_on_external.py | 15 + .../external_assets/single_declaration.py | 3 + .../update_external_asset_via_op.py | 24 ++ .../external_asset_tests/__init__.py | 0 .../test_external_asset_python_api.py | 11 + .../test_external_asset_sensor.py | 23 ++ .../test_external_assets_decls.py | 45 +++ .../test_external_assets_with_ops.py | 14 + python_modules/dagster/dagster/__init__.py | 4 + .../_core/definitions/external_asset.py | 4 + .../_core/definitions/sensor_definition.py | 4 + .../definitions_tests/test_external_assets.py | 49 +++ 23 files changed, 602 insertions(+), 1 deletion(-) create mode 100644 docs/content/concepts/assets/external-assets.mdx create mode 100644 docs/next/public/images/concepts/assets/external-asset.png create mode 100644 docs/next/public/images/concepts/assets/external-assets-normal-dep-on-external.png create mode 100644 docs/next/public/images/concepts/assets/external-assets-show-detail.png create mode 100644 examples/docs_snippets/docs_snippets/concepts/assets/external_assets/__init__.py create mode 100644 examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_deps.py create mode 100644 examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_events_using_python_api.py create mode 100644 examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_using_sensor.py create mode 100644 examples/docs_snippets/docs_snippets/concepts/assets/external_assets/normal_asset_depending_on_external.py create mode 100644 examples/docs_snippets/docs_snippets/concepts/assets/external_assets/single_declaration.py create mode 100644 examples/docs_snippets/docs_snippets/concepts/assets/external_assets/update_external_asset_via_op.py create mode 100644 examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/__init__.py create mode 100644 examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_python_api.py create mode 100644 examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_sensor.py create mode 100644 examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_decls.py create mode 100644 examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_with_ops.py diff --git a/docs/content/_navigation.json b/docs/content/_navigation.json index 499a50ec9f833..6c07d6aa1ea0c 100644 --- a/docs/content/_navigation.json +++ b/docs/content/_navigation.json @@ -105,6 +105,10 @@ { "title": "Asset checks (Experimental)", "path": "/concepts/assets/asset-checks" + }, + { + "title": "External assets (Experimental)", + "path": "/concepts/assets/external-assets" } ] }, diff --git a/docs/content/api/modules.json b/docs/content/api/modules.json index a2b134013878e..74fe0d8fae09a 100644 --- a/docs/content/api/modules.json +++ b/docs/content/api/modules.json @@ -1 +1 @@ -{"": {"dagster_pandera": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandera

\nimport itertools\nimport re\nfrom typing import TYPE_CHECKING, Callable, Sequence, Type, Union\n\nimport dagster._check as check\nimport pandas as pd\nimport pandera as pa\nfrom dagster import (\n    DagsterType,\n    TableColumn,\n    TableColumnConstraints,\n    TableConstraints,\n    TableSchema,\n    TypeCheck,\n    TypeCheckContext,\n)\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.libraries import DagsterLibraryRegistry\n\nfrom .version import __version__\n\n# NOTE: Pandera supports multiple dataframe libraries. Most of the alternatives\n# to pandas implement a pandas-like API wrapper around an underlying library\n# that can handle big data (a weakness of pandas). Typically this means the\n# data is only partly loaded into memory, or is distributed across multiple\n# nodes. Because Dagster types perform runtime validation within a single\n# Python process, it's not clear at present how to interface the more complex\n# validation computations on distributed dataframes with Dagster Types.\n\n# Therefore, for the time being dagster-pandera only supports pandas dataframes.\n# However, some commented-out scaffolding has been left in place for support of\n# alternatives in the future. These sections are marked with "TODO: pending\n# alternative dataframe support".\n\nif TYPE_CHECKING:\n    ValidatableDataFrame = pd.DataFrame\n\nDagsterLibraryRegistry.register("dagster-pandera", __version__)\n\n# ########################\n# ##### VALID DATAFRAME CLASSES\n# ########################\n\n# This layer of indirection is used because we may support alternative dataframe classes in the\n# future.\nVALID_DATAFRAME_CLASSES = (pd.DataFrame,)\n\n\n# ########################\n# ##### PANDERA SCHEMA TO DAGSTER TYPE\n# ########################\n\n\n
[docs]def pandera_schema_to_dagster_type(\n schema: Union[pa.DataFrameSchema, Type[pa.SchemaModel]],\n) -> DagsterType:\n """Convert a Pandera dataframe schema to a `DagsterType`.\n\n The generated Dagster type will be given an automatically generated `name`. The schema's `title`\n property, `name` property, or class name (in that order) will be used. If neither `title` or\n `name` is defined, a name of the form `DagsterPanderaDataframe<n>` is generated.\n\n Additional metadata is also extracted from the Pandera schema and attached to the returned\n `DagsterType` as a metadata dictionary. The extracted metadata includes:\n\n - Descriptions on the schema and constituent columns and checks.\n - Data types for each column.\n - String representations of all column-wise checks.\n - String representations of all row-wise (i.e. "wide") checks.\n\n The returned `DagsterType` type will call the Pandera schema's `validate()` method in its type\n check function. Validation is done in `lazy` mode, i.e. pandera will attempt to validate all\n values in the dataframe, rather than stopping on the first error.\n\n If validation fails, the returned `TypeCheck` object will contain two pieces of metadata:\n\n - `num_failures` total number of validation errors.\n - `failure_sample` a table containing up to the first 10 validation errors.\n\n Args:\n schema (Union[pa.DataFrameSchema, Type[pa.SchemaModel]]):\n\n Returns:\n DagsterType: Dagster Type constructed from the Pandera schema.\n\n """\n if not (\n isinstance(schema, pa.DataFrameSchema)\n or (isinstance(schema, type) and issubclass(schema, pa.SchemaModel))\n ):\n raise TypeError(\n "schema must be a pandera `DataFrameSchema` or a subclass of a pandera `SchemaModel`"\n )\n\n name = _extract_name_from_pandera_schema(schema)\n norm_schema = (\n schema.to_schema()\n if isinstance(schema, type) and issubclass(schema, pa.SchemaModel)\n else schema\n )\n tschema = _pandera_schema_to_table_schema(norm_schema)\n type_check_fn = _pandera_schema_to_type_check_fn(norm_schema, tschema)\n\n return DagsterType(\n type_check_fn=type_check_fn,\n name=name,\n description=norm_schema.description,\n metadata={\n "schema": MetadataValue.table_schema(tschema),\n },\n typing_type=pd.DataFrame,\n )
\n\n\n# call next() on this to generate next unique Dagster Type name for anonymous schemas\n_anonymous_schema_name_generator = (f"DagsterPanderaDataframe{i}" for i in itertools.count(start=1))\n\n\ndef _extract_name_from_pandera_schema(\n schema: Union[pa.DataFrameSchema, Type[pa.SchemaModel]],\n) -> str:\n if isinstance(schema, type) and issubclass(schema, pa.SchemaModel):\n return (\n getattr(schema.Config, "title", None)\n or getattr(schema.Config, "name", None)\n or schema.__name__\n )\n elif isinstance(schema, pa.DataFrameSchema):\n return schema.title or schema.name or next(_anonymous_schema_name_generator)\n\n\ndef _pandera_schema_to_type_check_fn(\n schema: pa.DataFrameSchema,\n table_schema: TableSchema,\n) -> Callable[[TypeCheckContext, object], TypeCheck]:\n def type_check_fn(_context, value: object) -> TypeCheck:\n if isinstance(value, VALID_DATAFRAME_CLASSES):\n try:\n # `lazy` instructs pandera to capture every (not just the first) validation error\n schema.validate(value, lazy=True)\n except pa.errors.SchemaErrors as e:\n return _pandera_errors_to_type_check(e, table_schema)\n except Exception as e:\n return TypeCheck(\n success=False,\n description=f"Unexpected error during validation: {e}",\n )\n else:\n return TypeCheck(\n success=False,\n description=(\n f"Must be one of {VALID_DATAFRAME_CLASSES}, not {type(value).__name__}."\n ),\n )\n\n return TypeCheck(success=True)\n\n return type_check_fn\n\n\nPANDERA_FAILURE_CASES_SCHEMA = TableSchema(\n columns=[\n TableColumn(\n name="schema_context",\n type="string",\n description="`Column` for column-wise checks, or `DataFrameSchema`",\n ),\n TableColumn(\n name="column",\n type="string",\n description="Column of value that failed the check, or `None` for wide checks.",\n ),\n TableColumn(\n name="check", type="string", description="Description of the failed Pandera check."\n ),\n TableColumn(name="check_number", description="Index of the failed check."),\n TableColumn(\n name="failure_case", type="number | string", description="Value that failed a check."\n ),\n TableColumn(\n name="index",\n type="number | string",\n description="Index (row) of value that failed a check.",\n ),\n ]\n)\n\n\ndef _pandera_errors_to_type_check(\n error: pa.errors.SchemaErrors, _table_schema: TableSchema\n) -> TypeCheck:\n return TypeCheck(\n success=False,\n description=str(error),\n )\n\n\ndef _pandera_schema_to_table_schema(schema: pa.DataFrameSchema) -> TableSchema:\n df_constraints = _pandera_schema_wide_checks_to_table_constraints(schema.checks)\n columns = [_pandera_column_to_table_column(col) for k, col in schema.columns.items()]\n return TableSchema(columns=columns, constraints=df_constraints)\n\n\ndef _pandera_schema_wide_checks_to_table_constraints(\n checks: Sequence[Union[pa.Check, pa.Hypothesis]]\n) -> TableConstraints:\n return TableConstraints(other=[_pandera_check_to_table_constraint(check) for check in checks])\n\n\ndef _pandera_check_to_table_constraint(pa_check: Union[pa.Check, pa.Hypothesis]) -> str:\n return _get_pandera_check_identifier(pa_check)\n\n\ndef _pandera_column_to_table_column(pa_column: pa.Column) -> TableColumn:\n constraints = TableColumnConstraints(\n nullable=pa_column.nullable,\n unique=pa_column.unique,\n other=[_pandera_check_to_column_constraint(pa_check) for pa_check in pa_column.checks],\n )\n name = check.not_none(pa_column.name, "name")\n name = name if isinstance(name, str) else "/".join(name)\n return TableColumn(\n name=name,\n type=str(pa_column.dtype),\n description=pa_column.description,\n constraints=constraints,\n )\n\n\nCHECK_OPERATORS = {\n "equal_to": "==",\n "not_equal_to": "!=",\n "less_than": "<",\n "less_than_or_equal_to": "<=",\n "greater_than": ">",\n "greater_than_or_equal_to": ">=",\n}\n\n\ndef _extract_operand(error_str: str) -> str:\n match = re.search(r"(?<=\\().+(?=\\))", error_str)\n return match.group(0) if match else ""\n\n\ndef _pandera_check_to_column_constraint(pa_check: pa.Check) -> str:\n if pa_check.description:\n return pa_check.description\n elif pa_check.name in CHECK_OPERATORS:\n assert isinstance(\n pa_check.error, str\n ), "Expected pandera check to have string `error` attr."\n return f"{CHECK_OPERATORS[pa_check.name]} {_extract_operand(pa_check.error)}"\n else:\n return _get_pandera_check_identifier(pa_check)\n\n\ndef _get_pandera_check_identifier(pa_check: Union[pa.Check, pa.Hypothesis]) -> str:\n return pa_check.description or pa_check.error or pa_check.name or str(pa_check)\n\n\n__all__ = [\n "pandera_schema_to_dagster_type",\n]\n
", "current_page_name": "_modules/dagster_pandera", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandera"}, "dagster_pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pipes

\nimport base64\nimport datetime\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport warnings\nimport zlib\nfrom abc import ABC, abstractmethod\nfrom contextlib import ExitStack, contextmanager\nfrom io import StringIO\nfrom queue import Queue\nfrom threading import Event, Thread\nfrom typing import (\n    IO,\n    TYPE_CHECKING,\n    Any,\n    ClassVar,\n    Dict,\n    Generic,\n    Iterable,\n    Iterator,\n    Literal,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    TextIO,\n    Type,\n    TypedDict,\n    TypeVar,\n    Union,\n    cast,\n    get_args,\n)\n\nif TYPE_CHECKING:\n    from unittest.mock import MagicMock\n\n# ########################\n# ##### PROTOCOL\n# ########################\n\n# This represents the version of the protocol, rather than the version of the package. It must be\n# manually updated whenever there are changes to the protocol.\nPIPES_PROTOCOL_VERSION = "0.1"\n\nPipesExtras = Mapping[str, Any]\nPipesParams = Mapping[str, Any]\n\n\n# ##### MESSAGE\n\n\ndef _make_message(method: str, params: Optional[Mapping[str, Any]]) -> "PipesMessage":\n    return {\n        PIPES_PROTOCOL_VERSION_FIELD: PIPES_PROTOCOL_VERSION,\n        "method": method,\n        "params": params,\n    }\n\n\n# Can't use a constant for TypedDict key so this value is repeated in `ExtMessage` defn.\nPIPES_PROTOCOL_VERSION_FIELD = "__dagster_pipes_version"\n\n\nclass PipesMessage(TypedDict):\n    """A message sent from the external process to the orchestration process."""\n\n    __dagster_pipes_version: str\n    method: str\n    params: Optional[Mapping[str, Any]]\n\n\n###### PIPES CONTEXT\n\n\nclass PipesContextData(TypedDict):\n    """The serializable data passed from the orchestration process to the external process. This gets\n    wrapped in a :py:class:`PipesContext`.\n    """\n\n    asset_keys: Optional[Sequence[str]]\n    code_version_by_asset_key: Optional[Mapping[str, Optional[str]]]\n    provenance_by_asset_key: Optional[Mapping[str, Optional["PipesDataProvenance"]]]\n    partition_key: Optional[str]\n    partition_key_range: Optional["PipesPartitionKeyRange"]\n    partition_time_window: Optional["PipesTimeWindow"]\n    run_id: str\n    job_name: Optional[str]\n    retry_number: int\n    extras: Mapping[str, Any]\n\n\nclass PipesPartitionKeyRange(TypedDict):\n    """A range of partition keys."""\n\n    start: str\n    end: str\n\n\nclass PipesTimeWindow(TypedDict):\n    """A span of time delimited by a start and end timestamp. This is defined for time-based partitioning schemes."""\n\n    start: str  # timestamp\n    end: str  # timestamp\n\n\nclass PipesDataProvenance(TypedDict):\n    """Provenance information for an asset."""\n\n    code_version: str\n    input_data_versions: Mapping[str, str]\n    is_user_provided: bool\n\n\nPipesAssetCheckSeverity = Literal["WARN", "ERROR"]\n\nPipesMetadataRawValue = Union[int, float, str, Mapping[str, Any], Sequence[Any], bool, None]\n\n\nclass PipesMetadataValue(TypedDict):\n    type: "PipesMetadataType"\n    raw_value: PipesMetadataRawValue\n\n\n# Infer the type from the raw value on the orchestration end\nPIPES_METADATA_TYPE_INFER = "__infer__"\n\nPipesMetadataType = Literal[\n    "__infer__",\n    "text",\n    "url",\n    "path",\n    "notebook",\n    "json",\n    "md",\n    "float",\n    "int",\n    "bool",\n    "dagster_run",\n    "asset",\n    "null",\n]\n\n# ########################\n# ##### UTIL\n# ########################\n\n_T = TypeVar("_T")\n\n\n
[docs]class DagsterPipesError(Exception):\n pass
\n\n\n
[docs]class DagsterPipesWarning(Warning):\n pass
\n\n\ndef _assert_not_none(value: Optional[_T], desc: Optional[str] = None) -> _T:\n if value is None:\n raise DagsterPipesError(f"Missing required property: {desc}")\n return value\n\n\ndef _assert_defined_asset_property(value: Optional[_T], key: str) -> _T:\n return _assert_not_none(value, f"`{key}` is undefined. Current step does not target an asset.")\n\n\n# This should only be called under the precondition that the current step targets assets.\ndef _assert_single_asset(data: PipesContextData, key: str) -> None:\n asset_keys = data["asset_keys"]\n assert asset_keys is not None\n if len(asset_keys) != 1:\n raise DagsterPipesError(f"`{key}` is undefined. Current step targets multiple assets.")\n\n\ndef _resolve_optionally_passed_asset_key(\n data: PipesContextData,\n asset_key: Optional[str],\n method: str,\n) -> str:\n asset_key = _assert_opt_param_type(asset_key, str, method, "asset_key")\n\n defined_asset_keys = data["asset_keys"]\n if defined_asset_keys:\n if asset_key and asset_key not in defined_asset_keys:\n raise DagsterPipesError(\n f"Invalid asset key. Expected one of `{defined_asset_keys}`, got `{asset_key}`."\n )\n if not asset_key:\n if len(defined_asset_keys) != 1:\n raise DagsterPipesError(\n f"Calling `{method}` without passing an asset key is undefined. Current step"\n " targets multiple assets."\n )\n asset_key = defined_asset_keys[0]\n\n if not asset_key:\n raise DagsterPipesError(\n f"Calling `{method}` without passing an asset key is undefined. Current step"\n " does not target a specific asset."\n )\n\n return asset_key\n\n\ndef _assert_defined_partition_property(value: Optional[_T], key: str) -> _T:\n return _assert_not_none(\n value, f"`{key}` is undefined. Current step does not target any partitions."\n )\n\n\n# This should only be called under the precondition that the current steps targets assets.\ndef _assert_single_partition(data: PipesContextData, key: str) -> None:\n partition_key_range = data["partition_key_range"]\n assert partition_key_range is not None\n if partition_key_range["start"] != partition_key_range["end"]:\n raise DagsterPipesError(f"`{key}` is undefined. Current step targets multiple partitions.")\n\n\ndef _assert_defined_extra(extras: PipesExtras, key: str) -> Any:\n if key not in extras:\n raise DagsterPipesError(f"Extra `{key}` is undefined. Extras must be provided by user.")\n return extras[key]\n\n\ndef _assert_param_type(value: _T, expected_type: Any, method: str, param: str) -> _T:\n if not isinstance(value, expected_type):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected `{expected_type}`, got"\n f" `{type(value)}`."\n )\n return value\n\n\ndef _assert_opt_param_type(value: _T, expected_type: Any, method: str, param: str) -> _T:\n if not (isinstance(value, expected_type) or value is None):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected"\n f" `Optional[{expected_type}]`, got `{type(value)}`."\n )\n return value\n\n\ndef _assert_env_param_type(\n env_params: PipesParams, key: str, expected_type: Type[_T], cls: Type\n) -> _T:\n value = env_params.get(key)\n if not isinstance(value, expected_type):\n raise DagsterPipesError(\n f"Invalid type for parameter `{key}` passed from orchestration side to"\n f" `{cls.__name__}`. Expected `{expected_type}`, got `{type(value)}`."\n )\n return value\n\n\ndef _assert_opt_env_param_type(\n env_params: PipesParams, key: str, expected_type: Type[_T], cls: Type\n) -> Optional[_T]:\n value = env_params.get(key)\n if value is not None and not isinstance(value, expected_type):\n raise DagsterPipesError(\n f"Invalid type for parameter `{key}` passed from orchestration side to"\n f" `{cls.__name__}`. Expected `Optional[{expected_type}]`, got `{type(value)}`."\n )\n return value\n\n\ndef _assert_param_value(value: _T, expected_values: Iterable[_T], method: str, param: str) -> _T:\n if value not in expected_values:\n raise DagsterPipesError(\n f"Invalid value for parameter `{param}` of `{method}`. Expected one of"\n f" `{expected_values}`, got `{value}`."\n )\n return value\n\n\ndef _assert_opt_param_value(\n value: _T, expected_values: Sequence[_T], method: str, param: str\n) -> _T:\n if value is not None and value not in expected_values:\n raise DagsterPipesError(\n f"Invalid value for parameter `{param}` of `{method}`. Expected one of"\n f" `{expected_values}`, got `{value}`."\n )\n return value\n\n\ndef _json_serialize_param(value: Any, method: str, param: str) -> str:\n try:\n serialized = json.dumps(value)\n except (TypeError, OverflowError):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected a JSON-serializable"\n f" type, got `{type(value)}`."\n )\n return serialized\n\n\n_METADATA_VALUE_KEYS = frozenset(PipesMetadataValue.__annotations__.keys())\n_METADATA_TYPES = frozenset(get_args(PipesMetadataType))\n\n\ndef _normalize_param_metadata(\n metadata: Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]],\n method: str,\n param: str,\n) -> Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]:\n _assert_param_type(metadata, dict, method, param)\n new_metadata: Dict[str, PipesMetadataValue] = {}\n for key, value in metadata.items():\n if not isinstance(key, str):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected a dict with string"\n f" keys, got a key `{key}` of type `{type(key)}`."\n )\n elif isinstance(value, dict):\n if not {*value.keys()} == _METADATA_VALUE_KEYS:\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected a dict with"\n " string keys and values that are either raw metadata values or dictionaries"\n f" with schema `{{raw_value: ..., type: ...}}`. Got a value `{value}`."\n )\n _assert_param_value(value["type"], _METADATA_TYPES, method, f"{param}.{key}.type")\n new_metadata[key] = cast(PipesMetadataValue, value)\n else:\n new_metadata[key] = {"raw_value": value, "type": PIPES_METADATA_TYPE_INFER}\n return new_metadata\n\n\ndef _param_from_env_var(env_var: str) -> Any:\n raw_value = os.environ.get(env_var)\n return decode_env_var(raw_value) if raw_value is not None else None\n\n\n
[docs]def encode_env_var(value: Any) -> str:\n """Encode value by serializing to JSON, compressing with zlib, and finally encoding with base64.\n `base64_encode(compress(to_json(value)))` in function notation.\n\n Args:\n value (Any): The value to encode. Must be JSON-serializable.\n\n Returns:\n str: The encoded value.\n """\n serialized = _json_serialize_param(value, "encode_env_var", "value")\n compressed = zlib.compress(serialized.encode("utf-8"))\n encoded = base64.b64encode(compressed)\n return encoded.decode("utf-8") # as string
\n\n\n
[docs]def decode_env_var(value: str) -> Any:\n """Decode a value by decoding from base64, decompressing with zlib, and finally deserializing from\n JSON. `from_json(decompress(base64_decode(value)))` in function notation.\n\n Args:\n value (Any): The value to decode.\n\n Returns:\n Any: The decoded value.\n """\n decoded = base64.b64decode(value)\n decompressed = zlib.decompress(decoded)\n return json.loads(decompressed.decode("utf-8"))
\n\n\ndef _emit_orchestration_inactive_warning() -> None:\n warnings.warn(\n "This process was not launched by a Dagster orchestration process. All calls to the"\n " `dagster-pipes` context or attempts to initialize `dagster-pipes` abstractions"\n " are no-ops.",\n category=DagsterPipesWarning,\n )\n\n\ndef _get_mock() -> "MagicMock":\n from unittest.mock import MagicMock\n\n return MagicMock()\n\n\nclass _PipesLogger(logging.Logger):\n def __init__(self, context: "PipesContext") -> None:\n super().__init__(name="dagster-pipes")\n self.addHandler(_PipesLoggerHandler(context))\n\n\nclass _PipesLoggerHandler(logging.Handler):\n def __init__(self, context: "PipesContext") -> None:\n super().__init__()\n self._context = context\n\n def emit(self, record: logging.LogRecord) -> None:\n self._context._write_message( # noqa: SLF001\n "log", {"message": record.getMessage(), "level": record.levelname}\n )\n\n\n# ########################\n# ##### IO - BASE\n# ########################\n\n\n
[docs]class PipesContextLoader(ABC):\n
[docs] @abstractmethod\n @contextmanager\n def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:\n """A `@contextmanager` that loads context data injected by the orchestration process.\n\n This method should read and yield the context data from the location specified by the passed in\n `PipesParams`.\n\n Args:\n params (PipesParams): The params provided by the context injector in the orchestration\n process.\n\n Yields:\n PipesContextData: The context data.\n """
\n\n\nT_MessageChannel = TypeVar("T_MessageChannel", bound="PipesMessageWriterChannel")\n\n\n
[docs]class PipesMessageWriter(ABC, Generic[T_MessageChannel]):\n
[docs] @abstractmethod\n @contextmanager\n def open(self, params: PipesParams) -> Iterator[T_MessageChannel]:\n """A `@contextmanager` that initializes a channel for writing messages back to Dagster.\n\n This method should takes the params passed by the orchestration-side\n :py:class:`PipesMessageReader` and use them to construct and yield a\n :py:class:`PipesMessageWriterChannel`.\n\n Args:\n params (PipesParams): The params provided by the message reader in the orchestration\n process.\n\n Yields:\n PipesMessageWriterChannel: Channel for writing messagse back to Dagster.\n """
\n\n\n
[docs]class PipesMessageWriterChannel(ABC, Generic[T_MessageChannel]):\n """Object that writes messages back to the Dagster orchestration process."""\n\n
[docs] @abstractmethod\n def write_message(self, message: PipesMessage) -> None:\n """Write a message to the orchestration process.\n\n Args:\n message (PipesMessage): The message to write.\n """
\n\n\n
[docs]class PipesParamsLoader(ABC):\n """Object that loads params passed from the orchestration process by the context injector and\n message reader. These params are used to respectively bootstrap the\n :py:class:`PipesContextLoader` and :py:class:`PipesMessageWriter`.\n """\n\n
[docs] @abstractmethod\n def is_dagster_pipes_process(self) -> bool:\n """Whether or not this process has been provided with provided with information to create\n a PipesContext or should instead return a mock.\n """
\n\n
[docs] @abstractmethod\n def load_context_params(self) -> PipesParams:\n """PipesParams: Load params passed by the orchestration-side context injector."""
\n\n
[docs] @abstractmethod\n def load_messages_params(self) -> PipesParams:\n """PipesParams: Load params passed by the orchestration-side message reader."""
\n\n\nT_BlobStoreMessageWriterChannel = TypeVar(\n "T_BlobStoreMessageWriterChannel", bound="PipesBlobStoreMessageWriterChannel"\n)\n\n\n
[docs]class PipesBlobStoreMessageWriter(PipesMessageWriter[T_BlobStoreMessageWriterChannel]):\n """Message writer channel that periodically uploads message chunks to some blob store endpoint."""\n\n def __init__(self, *, interval: float = 10):\n self.interval = interval\n\n
[docs] @contextmanager\n def open(self, params: PipesParams) -> Iterator[T_BlobStoreMessageWriterChannel]:\n """Construct and yield a :py:class:`PipesBlobStoreMessageWriterChannel`.\n\n Args:\n params (PipesParams): The params provided by the message reader in the orchestration\n process.\n\n Yields:\n PipesBlobStoreMessageWriterChannel: Channel that periodically uploads message chunks to\n a blob store.\n """\n channel = self.make_channel(params)\n with channel.buffered_upload_loop():\n yield channel
\n\n
[docs] @abstractmethod\n def make_channel(self, params: PipesParams) -> T_BlobStoreMessageWriterChannel: ...
\n\n\n
[docs]class PipesBlobStoreMessageWriterChannel(PipesMessageWriterChannel):\n """Message writer channel that periodically uploads message chunks to some blob store endpoint."""\n\n def __init__(self, *, interval: float = 10):\n self._interval = interval\n self._buffer: Queue[PipesMessage] = Queue()\n self._counter = 1\n\n
[docs] def write_message(self, message: PipesMessage) -> None:\n self._buffer.put(message)
\n\n
[docs] def flush_messages(self) -> Sequence[PipesMessage]:\n items = []\n while not self._buffer.empty():\n items.append(self._buffer.get())\n return items
\n\n
[docs] @abstractmethod\n def upload_messages_chunk(self, payload: StringIO, index: int) -> None: ...
\n\n
[docs] @contextmanager\n def buffered_upload_loop(self) -> Iterator[None]:\n thread = None\n is_task_complete = Event()\n try:\n thread = Thread(target=self._upload_loop, args=(is_task_complete,), daemon=True)\n thread.start()\n yield\n finally:\n is_task_complete.set()\n if thread:\n thread.join(timeout=60)
\n\n def _upload_loop(self, is_task_complete: Event) -> None:\n start_or_last_upload = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n if self._buffer.empty() and is_task_complete.is_set():\n break\n elif is_task_complete.is_set() or (now - start_or_last_upload).seconds > self._interval:\n payload = "\\n".join([json.dumps(message) for message in self.flush_messages()])\n if len(payload) > 0:\n self.upload_messages_chunk(StringIO(payload), self._counter)\n start_or_last_upload = now\n self._counter += 1\n time.sleep(1)
\n\n\n
[docs]class PipesBufferedFilesystemMessageWriterChannel(PipesBlobStoreMessageWriterChannel):\n """Message writer channel that periodically writes message chunks to an endpoint mounted on the filesystem.\n\n Args:\n interval (float): interval in seconds between chunk uploads\n """\n\n def __init__(self, path: str, *, interval: float = 10):\n super().__init__(interval=interval)\n self._path = path\n\n
[docs] def upload_messages_chunk(self, payload: IO, index: int) -> None:\n message_path = os.path.join(self._path, f"{index}.json")\n with open(message_path, "w") as f:\n f.write(payload.read())
\n\n\n# ########################\n# ##### IO - DEFAULT\n# ########################\n\n\n
[docs]class PipesDefaultContextLoader(PipesContextLoader):\n """Context loader that loads context data from either a file or directly from the provided params.\n\n The location of the context data is configured by the params received by the loader. If the params\n include a key `path`, then the context data will be loaded from a file at the specified path. If\n the params instead include a key `data`, then the corresponding value should be a dict\n representing the context data.\n """\n\n FILE_PATH_KEY = "path"\n DIRECT_KEY = "data"\n\n
[docs] @contextmanager\n def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:\n if self.FILE_PATH_KEY in params:\n path = _assert_env_param_type(params, self.FILE_PATH_KEY, str, self.__class__)\n with open(path, "r") as f:\n data = json.load(f)\n yield data\n elif self.DIRECT_KEY in params:\n data = _assert_env_param_type(params, self.DIRECT_KEY, dict, self.__class__)\n yield cast(PipesContextData, data)\n else:\n raise DagsterPipesError(\n f'Invalid params for {self.__class__.__name__}, expected key "{self.FILE_PATH_KEY}"'\n f' or "{self.DIRECT_KEY}", received {params}',\n )
\n\n\n
[docs]class PipesDefaultMessageWriter(PipesMessageWriter):\n """Message writer that writes messages to either a file or the stdout or stderr stream.\n\n The write location is configured by the params received by the writer. If the params include a\n key `path`, then messages will be written to a file at the specified path. If the params instead\n include a key `stdio`, then messages then the corresponding value must specify either `stderr`\n or `stdout`, and messages will be written to the selected stream.\n """\n\n FILE_PATH_KEY = "path"\n STDIO_KEY = "stdio"\n STDERR = "stderr"\n STDOUT = "stdout"\n\n
[docs] @contextmanager\n def open(self, params: PipesParams) -> Iterator[PipesMessageWriterChannel]:\n if self.FILE_PATH_KEY in params:\n path = _assert_env_param_type(params, self.FILE_PATH_KEY, str, self.__class__)\n yield PipesFileMessageWriterChannel(path)\n elif self.STDIO_KEY in params:\n stream = _assert_env_param_type(params, self.STDIO_KEY, str, self.__class__)\n if stream == self.STDERR:\n yield PipesStreamMessageWriterChannel(sys.stderr)\n elif stream == self.STDOUT:\n yield PipesStreamMessageWriterChannel(sys.stdout)\n else:\n raise DagsterPipesError(\n f'Invalid value for key "std", expected "{self.STDERR}" or "{self.STDOUT}" but'\n f" received {stream}"\n )\n else:\n raise DagsterPipesError(\n f'Invalid params for {self.__class__.__name__}, expected key "path" or "std",'\n f" received {params}"\n )
\n\n\n
[docs]class PipesFileMessageWriterChannel(PipesMessageWriterChannel):\n """Message writer channel that writes one message per line to a file."""\n\n def __init__(self, path: str):\n self._path = path\n\n
[docs] def write_message(self, message: PipesMessage) -> None:\n with open(self._path, "a") as f:\n f.write(json.dumps(message) + "\\n")
\n\n\n
[docs]class PipesStreamMessageWriterChannel(PipesMessageWriterChannel):\n """Message writer channel that writes one message per line to a `TextIO` stream."""\n\n def __init__(self, stream: TextIO):\n self._stream = stream\n\n
[docs] def write_message(self, message: PipesMessage) -> None:\n self._stream.writelines((json.dumps(message), "\\n"))
\n\n\nDAGSTER_PIPES_CONTEXT_ENV_VAR = "DAGSTER_PIPES_CONTEXT"\nDAGSTER_PIPES_MESSAGES_ENV_VAR = "DAGSTER_PIPES_MESSAGES"\n\n\n
[docs]class PipesEnvVarParamsLoader(PipesParamsLoader):\n """Params loader that extracts params from environment variables."""\n\n
[docs] def is_dagster_pipes_process(self) -> bool:\n # use the presence of DAGSTER_PIPES_CONTEXT to discern if we are in a pipes process\n return DAGSTER_PIPES_CONTEXT_ENV_VAR in os.environ
\n\n
[docs] def load_context_params(self) -> PipesParams:\n return _param_from_env_var(DAGSTER_PIPES_CONTEXT_ENV_VAR)
\n\n
[docs] def load_messages_params(self) -> PipesParams:\n return _param_from_env_var(DAGSTER_PIPES_MESSAGES_ENV_VAR)
\n\n\n# ########################\n# ##### IO - S3\n# ########################\n\n\n
[docs]class PipesS3MessageWriter(PipesBlobStoreMessageWriter):\n """Message writer that writes messages by periodically writing message chunks to an S3 bucket.\n\n Args:\n client (Any): A boto3.client("s3") object.\n interval (float): interval in seconds between upload chunk uploads\n """\n\n # client is a boto3.client("s3") object\n def __init__(self, client: Any, *, interval: float = 10):\n super().__init__(interval=interval)\n # Not checking client type for now because it's a boto3.client object and we don't want to\n # depend on boto3.\n self._client = client\n\n
[docs] def make_channel(\n self,\n params: PipesParams,\n ) -> "PipesS3MessageWriterChannel":\n bucket = _assert_env_param_type(params, "bucket", str, self.__class__)\n key_prefix = _assert_opt_env_param_type(params, "key_prefix", str, self.__class__)\n return PipesS3MessageWriterChannel(\n client=self._client,\n bucket=bucket,\n key_prefix=key_prefix,\n interval=self.interval,\n )
\n\n\n
[docs]class PipesS3MessageWriterChannel(PipesBlobStoreMessageWriterChannel):\n """Message writer channel for writing messages by periodically writing message chunks to an S3 bucket.\n\n Args:\n client (Any): A boto3.client("s3") object.\n bucket (str): The name of the S3 bucket to write to.\n key_prefix (Optional[str]): An optional prefix to use for the keys of written blobs.\n interval (float): interval in seconds between upload chunk uploads\n """\n\n # client is a boto3.client("s3") object\n def __init__(\n self, client: Any, bucket: str, key_prefix: Optional[str], *, interval: float = 10\n ):\n super().__init__(interval=interval)\n self._client = client\n self._bucket = bucket\n self._key_prefix = key_prefix\n\n
[docs] def upload_messages_chunk(self, payload: IO, index: int) -> None:\n key = f"{self._key_prefix}/{index}.json" if self._key_prefix else f"{index}.json"\n self._client.put_object(\n Body=payload.read(),\n Bucket=self._bucket,\n Key=key,\n )
\n\n\n# ########################\n# ##### IO - DBFS\n# ########################\n\n\n
[docs]class PipesDbfsContextLoader(PipesContextLoader):\n """Context loader that reads context from a JSON file on DBFS."""\n\n
[docs] @contextmanager\n def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:\n unmounted_path = _assert_env_param_type(params, "path", str, self.__class__)\n path = os.path.join("/dbfs", unmounted_path.lstrip("/"))\n with open(path, "r") as f:\n yield json.load(f)
\n\n\n
[docs]class PipesDbfsMessageWriter(PipesBlobStoreMessageWriter):\n """Message writer that writes messages by periodically writing message chunks to a directory on DBFS."""\n\n
[docs] def make_channel(\n self,\n params: PipesParams,\n ) -> "PipesBufferedFilesystemMessageWriterChannel":\n unmounted_path = _assert_env_param_type(params, "path", str, self.__class__)\n return PipesBufferedFilesystemMessageWriterChannel(\n path=os.path.join("/dbfs", unmounted_path.lstrip("/")),\n interval=self.interval,\n )
\n\n\n# ########################\n# ##### CONTEXT\n# ########################\n\n\n
[docs]def open_dagster_pipes(\n *,\n context_loader: Optional[PipesContextLoader] = None,\n message_writer: Optional[PipesMessageWriter] = None,\n params_loader: Optional[PipesParamsLoader] = None,\n) -> "PipesContext":\n """Initialize the Dagster Pipes context.\n\n This function should be called near the entry point of a pipes process. It will load injected\n context information from Dagster and spin up the machinery for streaming messages back to\n Dagster.\n\n If the process was not launched by Dagster, this function will emit a warning and return a\n `MagicMock` object. This should make all operations on the context no-ops and prevent your code\n from crashing.\n\n Args:\n context_loader (Optional[PipesContextLoader]): The context loader to use. Defaults to\n :py:class:`PipesDefaultContextLoader`.\n message_writer (Optional[PipesMessageWriter]): The message writer to use. Defaults to\n :py:class:`PipesDefaultMessageWriter`.\n params_loader (Optional[PipesParamsLoader]): The params loader to use. Defaults to\n :py:class:`PipesEnvVarParamsLoader`.\n\n Returns:\n PipesContext: The initialized context.\n """\n if PipesContext.is_initialized():\n return PipesContext.get()\n\n params_loader = params_loader or PipesEnvVarParamsLoader()\n if params_loader.is_dagster_pipes_process():\n context_loader = context_loader or PipesDefaultContextLoader()\n message_writer = message_writer or PipesDefaultMessageWriter()\n context = PipesContext(params_loader, context_loader, message_writer)\n else:\n _emit_orchestration_inactive_warning()\n context = _get_mock()\n PipesContext.set(context)\n return context
\n\n\n
[docs]class PipesContext:\n """The context for a Dagster Pipes process.\n\n This class is analogous to :py:class:`~dagster.OpExecutionContext` on the Dagster side of the Pipes\n connection. It provides access to information such as the asset key(s) and partition key(s) in\n scope for the current step. It also provides methods for logging and emitting results that will\n be streamed back to Dagster.\n\n This class should not be directly instantiated by the user. Instead it should be initialized by\n calling :py:func:`open_dagster_pipes()`, which will return the singleton instance of this class.\n After `open_dagster_pipes()` has been called, the singleton instance can also be retrieved by\n calling :py:func:`PipesContext.get`.\n """\n\n _instance: ClassVar[Optional["PipesContext"]] = None\n\n
[docs] @classmethod\n def is_initialized(cls) -> bool:\n """bool: Whether the context has been initialized."""\n return cls._instance is not None
\n\n
[docs] @classmethod\n def set(cls, context: "PipesContext") -> None:\n """Set the singleton instance of the context."""\n cls._instance = context
\n\n
[docs] @classmethod\n def get(cls) -> "PipesContext":\n """Get the singleton instance of the context. Raises an error if the context has not been initialized."""\n if cls._instance is None:\n raise Exception(\n "PipesContext has not been initialized. You must call `open_dagster_pipes()`."\n )\n return cls._instance
\n\n def __init__(\n self,\n params_loader: PipesParamsLoader,\n context_loader: PipesContextLoader,\n message_writer: PipesMessageWriter,\n ) -> None:\n context_params = params_loader.load_context_params()\n messages_params = params_loader.load_messages_params()\n self._io_stack = ExitStack()\n self._data = self._io_stack.enter_context(context_loader.load_context(context_params))\n self._message_channel = self._io_stack.enter_context(message_writer.open(messages_params))\n self._message_channel.write_message(_make_message("opened", {}))\n self._logger = _PipesLogger(self)\n self._materialized_assets: Set[str] = set()\n self._closed: bool = False\n\n def __enter__(self) -> "PipesContext":\n return self\n\n def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n self.close()\n\n
[docs] def close(self) -> None:\n """Close the pipes connection. This will flush all buffered messages to the orchestration\n process and cause any further attempt to write a message to raise an error. This method is\n idempotent-- subsequent calls after the first have no effect.\n """\n if not self._closed:\n self._message_channel.write_message(_make_message("closed", {}))\n self._io_stack.close()\n self._closed = True
\n\n @property\n def is_closed(self) -> bool:\n """bool: Whether the context has been closed."""\n return self._closed\n\n def _write_message(self, method: str, params: Optional[Mapping[str, Any]] = None) -> None:\n if self._closed:\n raise DagsterPipesError("Cannot send message after pipes context is closed.")\n message = _make_message(method, params)\n self._message_channel.write_message(message)\n\n # ########################\n # ##### PUBLIC API\n # ########################\n\n @property\n def is_asset_step(self) -> bool:\n """bool: Whether the current step targets assets."""\n return self._data["asset_keys"] is not None\n\n @property\n def asset_key(self) -> str:\n """str: The AssetKey for the currently scoped asset. Raises an error if 0 or multiple assets\n are in scope.\n """\n asset_keys = _assert_defined_asset_property(self._data["asset_keys"], "asset_key")\n _assert_single_asset(self._data, "asset_key")\n return asset_keys[0]\n\n @property\n def asset_keys(self) -> Sequence[str]:\n """Sequence[str]: The AssetKeys for the currently scoped assets. Raises an error if no\n assets are in scope.\n """\n asset_keys = _assert_defined_asset_property(self._data["asset_keys"], "asset_keys")\n return asset_keys\n\n @property\n def provenance(self) -> Optional[PipesDataProvenance]:\n """Optional[PipesDataProvenance]: The provenance for the currently scoped asset. Raises an\n error if 0 or multiple assets are in scope.\n """\n provenance_by_asset_key = _assert_defined_asset_property(\n self._data["provenance_by_asset_key"], "provenance"\n )\n _assert_single_asset(self._data, "provenance")\n return next(iter(provenance_by_asset_key.values()))\n\n @property\n def provenance_by_asset_key(self) -> Mapping[str, Optional[PipesDataProvenance]]:\n """Mapping[str, Optional[PipesDataProvenance]]: Mapping of asset key to provenance for the\n currently scoped assets. Raises an error if no assets are in scope.\n """\n provenance_by_asset_key = _assert_defined_asset_property(\n self._data["provenance_by_asset_key"], "provenance_by_asset_key"\n )\n return provenance_by_asset_key\n\n @property\n def code_version(self) -> Optional[str]:\n """Optional[str]: The code version for the currently scoped asset. Raises an error if 0 or\n multiple assets are in scope.\n """\n code_version_by_asset_key = _assert_defined_asset_property(\n self._data["code_version_by_asset_key"], "code_version"\n )\n _assert_single_asset(self._data, "code_version")\n return next(iter(code_version_by_asset_key.values()))\n\n @property\n def code_version_by_asset_key(self) -> Mapping[str, Optional[str]]:\n """Mapping[str, Optional[str]]: Mapping of asset key to code version for the currently\n scoped assets. Raises an error if no assets are in scope.\n """\n code_version_by_asset_key = _assert_defined_asset_property(\n self._data["code_version_by_asset_key"], "code_version_by_asset_key"\n )\n return code_version_by_asset_key\n\n @property\n def is_partition_step(self) -> bool:\n """bool: Whether the current step is scoped to one or more partitions."""\n return self._data["partition_key_range"] is not None\n\n @property\n def partition_key(self) -> str:\n """str: The partition key for the currently scoped partition. Raises an error if 0 or\n multiple partitions are in scope.\n """\n partition_key = _assert_defined_partition_property(\n self._data["partition_key"], "partition_key"\n )\n return partition_key\n\n @property\n def partition_key_range(self) -> "PipesPartitionKeyRange":\n """PipesPartitionKeyRange: The partition key range for the currently scoped partition or\n partitions. Raises an error if no partitions are in scope.\n """\n partition_key_range = _assert_defined_partition_property(\n self._data["partition_key_range"], "partition_key_range"\n )\n return partition_key_range\n\n @property\n def partition_time_window(self) -> Optional["PipesTimeWindow"]:\n """Optional[PipesTimeWindow]: The partition time window for the currently scoped partition\n or partitions. Returns None if partitions in scope are not temporal. Raises an error if no\n partitions are in scope.\n """\n # None is a valid value for partition_time_window, but we check that a partition key range\n # is defined.\n _assert_defined_partition_property(\n self._data["partition_key_range"], "partition_time_window"\n )\n return self._data["partition_time_window"]\n\n @property\n def run_id(self) -> str:\n """str: The run ID for the currently executing pipeline run."""\n return self._data["run_id"]\n\n @property\n def job_name(self) -> Optional[str]:\n """Optional[str]: The job name for the currently executing run. Returns None if the run is\n not derived from a job.\n """\n return self._data["job_name"]\n\n @property\n def retry_number(self) -> int:\n """int: The retry number for the currently executing run."""\n return self._data["retry_number"]\n\n
[docs] def get_extra(self, key: str) -> Any:\n """Get the value of an extra provided by the user. Raises an error if the extra is not defined.\n\n Args:\n key (str): The key of the extra.\n\n Returns:\n Any: The value of the extra.\n """\n return _assert_defined_extra(self._data["extras"], key)
\n\n @property\n def extras(self) -> Mapping[str, Any]:\n """Mapping[str, Any]: Key-value map for all extras provided by the user."""\n return self._data["extras"]\n\n # ##### WRITE\n\n
[docs] def report_asset_materialization(\n self,\n metadata: Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]] = None,\n data_version: Optional[str] = None,\n asset_key: Optional[str] = None,\n ) -> None:\n """Report to Dagster that an asset has been materialized. Streams a payload containing\n materialization information back to Dagster. If no assets are in scope, raises an error.\n\n Args:\n metadata (Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]]):\n Metadata for the materialized asset. Defaults to None.\n data_version (Optional[str]): The data version for the materialized asset.\n Defaults to None.\n asset_key (Optional[str]): The asset key for the materialized asset. If only a\n single asset is in scope, default to that asset's key. If multiple assets are in scope,\n this must be set explicitly or an error will be raised.\n """\n asset_key = _resolve_optionally_passed_asset_key(\n self._data, asset_key, "report_asset_materialization"\n )\n if asset_key in self._materialized_assets:\n raise DagsterPipesError(\n f"Calling `report_asset_materialization` with asset key `{asset_key}` is undefined."\n " Asset has already been materialized, so no additional data can be reported"\n " for it."\n )\n metadata = (\n _normalize_param_metadata(metadata, "report_asset_materialization", "metadata")\n if metadata\n else None\n )\n data_version = _assert_opt_param_type(\n data_version, str, "report_asset_materialization", "data_version"\n )\n self._write_message(\n "report_asset_materialization",\n {"asset_key": asset_key, "data_version": data_version, "metadata": metadata},\n )\n self._materialized_assets.add(asset_key)
\n\n
[docs] def report_asset_check(\n self,\n check_name: str,\n passed: bool,\n severity: PipesAssetCheckSeverity = "ERROR",\n metadata: Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]] = None,\n asset_key: Optional[str] = None,\n ) -> None:\n """Report to Dagster that an asset check has been performed. Streams a payload containing\n check result information back to Dagster. If no assets or associated checks are in scope, raises an error.\n\n Args:\n check_name (str): The name of the check.\n passed (bool): Whether the check passed.\n severity (PipesAssetCheckSeverity): The severity of the check. Defaults to "ERROR".\n metadata (Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]]):\n Metadata for the check. Defaults to None.\n asset_key (Optional[str]): The asset key for the check. If only a single asset is in\n scope, default to that asset's key. If multiple assets are in scope, this must be\n set explicitly or an error will be raised.\n """\n asset_key = _resolve_optionally_passed_asset_key(\n self._data, asset_key, "report_asset_check"\n )\n check_name = _assert_param_type(check_name, str, "report_asset_check", "check_name")\n passed = _assert_param_type(passed, bool, "report_asset_check", "passed")\n metadata = (\n _normalize_param_metadata(metadata, "report_asset_check", "metadata")\n if metadata\n else None\n )\n self._write_message(\n "report_asset_check",\n {\n "asset_key": asset_key,\n "check_name": check_name,\n "passed": passed,\n "metadata": metadata,\n "severity": severity,\n },\n )
\n\n @property\n def log(self) -> logging.Logger:\n """logging.Logger: A logger that streams log messages back to Dagster."""\n return self._logger
\n
", "current_page_name": "_modules/dagster_pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pipes"}, "index": {"alabaster_version": "0.7.13", "body": "

All modules for which code is available

\n", "current_page_name": "_modules/index", "customsidebar": null, "favicon_url": null, "logo_url": null, "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "Overview: module code"}}, "dagster": {"_config": {"config_schema": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.config_schema

\nfrom typing import TYPE_CHECKING, Any, Dict, List, Mapping, Sequence, Type, Union\n\nfrom typing_extensions import TypeAlias\n\nif TYPE_CHECKING:\n    from dagster._config import ConfigType, Field\n\n# Eventually, the below `UserConfigSchema` should be renamed to `ConfigSchema` and the class\n# definition should be dropped. The reason we don't do this now is that sphinx autodoc doesn't\n# support type aliases, so there is no good way to gracefully attach a docstring to this and have it\n# show up in the docs. See: https://github.com/sphinx-doc/sphinx/issues/8934\n#\n# Unfortunately mypy doesn't support recursive types, which would be used to properly define the\n# List/Dict elements of this union: `Dict[str, ConfigSchema]`, `List[ConfigSchema]`.\nUserConfigSchema: TypeAlias = Union[\n    Type[Union[bool, float, int, str]],\n    Type[Union[Dict[Any, Any], List[Any]]],\n    "ConfigType",\n    "Field",\n    Mapping[str, Any],\n    Sequence[Any],\n]\n\n\n
[docs]class ConfigSchema:\n """Placeholder type for config schemas.\n\n Any time that it appears in documentation, it means that any of the following types are\n acceptable:\n\n #. A Python scalar type that resolves to a Dagster config type\n (:py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n or :py:class:`~python:str`). For example:\n\n * ``@op(config_schema=int)``\n * ``@op(config_schema=str)``\n\n #. A built-in python collection (:py:class:`~python:list`, or :py:class:`~python:dict`).\n :py:class:`~python:list` is exactly equivalent to :py:class:`~dagster.Array` [\n :py:class:`~dagster.Any` ] and :py:class:`~python:dict` is equivalent to\n :py:class:`~dagster.Permissive`. For example:\n\n * ``@op(config_schema=list)``\n * ``@op(config_schema=dict)``\n\n #. A Dagster config type:\n\n * :py:data:`~dagster.Any`\n * :py:class:`~dagster.Array`\n * :py:data:`~dagster.Bool`\n * :py:data:`~dagster.Enum`\n * :py:data:`~dagster.Float`\n * :py:data:`~dagster.Int`\n * :py:data:`~dagster.IntSource`\n * :py:data:`~dagster.Noneable`\n * :py:class:`~dagster.Permissive`\n * :py:class:`~dagster.Map`\n * :py:class:`~dagster.ScalarUnion`\n * :py:class:`~dagster.Selector`\n * :py:class:`~dagster.Shape`\n * :py:data:`~dagster.String`\n * :py:data:`~dagster.StringSource`\n\n\n #. A bare python dictionary, which will be automatically wrapped in\n :py:class:`~dagster.Shape`. Values of the dictionary are resolved recursively\n according to the same rules. For example:\n\n * ``{'some_config': str}`` is equivalent to ``Shape({'some_config: str})``.\n\n * ``{'some_config1': {'some_config2': str}}`` is equivalent to\n ``Shape({'some_config1: Shape({'some_config2: str})})``.\n\n #. A bare python list of length one, whose single element will be wrapped in a\n :py:class:`~dagster.Array` is resolved recursively according to the same\n rules. For example:\n\n * ``[str]`` is equivalent to ``Array[str]``.\n\n * ``[[str]]`` is equivalent to ``Array[Array[str]]``.\n\n * ``[{'some_config': str}]`` is equivalent to ``Array(Shape({'some_config: str}))``.\n\n #. An instance of :py:class:`~dagster.Field`.\n """\n\n def __init__(self):\n raise NotImplementedError(\n "ConfigSchema is a placeholder type and should not be instantiated."\n )
\n
", "current_page_name": "_modules/dagster/_config/config_schema", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.config_schema"}, "config_type": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.config_type

\nimport typing\nfrom enum import Enum as PythonEnum\nfrom typing import TYPE_CHECKING, Dict, Iterator, Optional, Sequence, cast\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import UserConfigSchema\nfrom dagster._serdes import whitelist_for_serdes\n\nif TYPE_CHECKING:\n    from .snap import ConfigSchemaSnapshot, ConfigTypeSnap\n\n\n@whitelist_for_serdes\nclass ConfigTypeKind(PythonEnum):\n    ANY = "ANY"\n    SCALAR = "SCALAR"\n    ENUM = "ENUM"\n\n    SELECTOR = "SELECTOR"\n    STRICT_SHAPE = "STRICT_SHAPE"\n    PERMISSIVE_SHAPE = "PERMISSIVE_SHAPE"\n    SCALAR_UNION = "SCALAR_UNION"\n\n    MAP = "MAP"\n\n    # Closed generic types\n    ARRAY = "ARRAY"\n    NONEABLE = "NONEABLE"\n\n    @staticmethod\n    def has_fields(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return kind == ConfigTypeKind.SELECTOR or ConfigTypeKind.is_shape(kind)\n\n    @staticmethod\n    def is_closed_generic(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return (\n            kind == ConfigTypeKind.ARRAY\n            or kind == ConfigTypeKind.NONEABLE\n            or kind == ConfigTypeKind.SCALAR_UNION\n            or kind == ConfigTypeKind.MAP\n        )\n\n    @staticmethod\n    def is_shape(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return kind == ConfigTypeKind.STRICT_SHAPE or kind == ConfigTypeKind.PERMISSIVE_SHAPE\n\n    @staticmethod\n    def is_selector(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return kind == ConfigTypeKind.SELECTOR\n\n\nclass ConfigType:\n    """The class backing DagsterTypes as they are used processing configuration data."""\n\n    def __init__(\n        self,\n        key: str,\n        kind: ConfigTypeKind,\n        given_name: Optional[str] = None,\n        description: Optional[str] = None,\n        type_params: Optional[Sequence["ConfigType"]] = None,\n    ):\n        self.key: str = check.str_param(key, "key")\n        self.kind: ConfigTypeKind = check.inst_param(kind, "kind", ConfigTypeKind)\n        self.given_name: Optional[str] = check.opt_str_param(given_name, "given_name")\n        self._description: Optional[str] = check.opt_str_param(description, "description")\n        self.type_params: Optional[Sequence[ConfigType]] = (\n            check.sequence_param(type_params, "type_params", of_type=ConfigType)\n            if type_params\n            else None\n        )\n\n        # memoized snap representation\n        self._snap: Optional["ConfigTypeSnap"] = None\n\n    @property\n    def description(self) -> Optional[str]:\n        return self._description\n\n    @staticmethod\n    def from_builtin_enum(builtin_enum: typing.Any) -> "ConfigType":\n        check.invariant(BuiltinEnum.contains(builtin_enum), "param must be member of BuiltinEnum")\n        return _CONFIG_MAP[builtin_enum]\n\n    def post_process(self, value):\n        """Implement this in order to take a value provided by the user\n        and perform computation on it. This can be done to coerce data types,\n        fetch things from the environment (e.g. environment variables), or\n        to do custom validation. If the value is not valid, throw a\n        PostProcessingError. Otherwise return the coerced value.\n        """\n        return value\n\n    def get_snapshot(self) -> "ConfigTypeSnap":\n        from .snap import snap_from_config_type\n\n        if self._snap is None:\n            self._snap = snap_from_config_type(self)\n\n        return self._snap\n\n    def type_iterator(self) -> Iterator["ConfigType"]:\n        yield self\n\n    def get_schema_snapshot(self) -> "ConfigSchemaSnapshot":\n        from .snap import ConfigSchemaSnapshot\n\n        return ConfigSchemaSnapshot({ct.key: ct.get_snapshot() for ct in self.type_iterator()})\n\n\n@whitelist_for_serdes\nclass ConfigScalarKind(PythonEnum):\n    INT = "INT"\n    STRING = "STRING"\n    FLOAT = "FLOAT"\n    BOOL = "BOOL"\n\n\n# Scalars, Composites, Selectors, Lists, Optional, Any\n\n\nclass ConfigScalar(ConfigType):\n    def __init__(\n        self,\n        key: str,\n        given_name: Optional[str],\n        scalar_kind: ConfigScalarKind,\n        **kwargs: typing.Any,\n    ):\n        self.scalar_kind = check.inst_param(scalar_kind, "scalar_kind", ConfigScalarKind)\n        super(ConfigScalar, self).__init__(\n            key, kind=ConfigTypeKind.SCALAR, given_name=given_name, **kwargs\n        )\n\n\nclass BuiltinConfigScalar(ConfigScalar):\n    def __init__(self, scalar_kind, description=None):\n        super(BuiltinConfigScalar, self).__init__(\n            key=type(self).__name__,\n            given_name=type(self).__name__,\n            scalar_kind=scalar_kind,\n            description=description,\n        )\n\n\nclass Int(BuiltinConfigScalar):\n    def __init__(self):\n        super(Int, self).__init__(scalar_kind=ConfigScalarKind.INT, description="")\n\n\nclass String(BuiltinConfigScalar):\n    def __init__(self):\n        super(String, self).__init__(scalar_kind=ConfigScalarKind.STRING, description="")\n\n\nclass Bool(BuiltinConfigScalar):\n    def __init__(self):\n        super(Bool, self).__init__(scalar_kind=ConfigScalarKind.BOOL, description="")\n\n\nclass Float(BuiltinConfigScalar):\n    def __init__(self):\n        super(Float, self).__init__(scalar_kind=ConfigScalarKind.FLOAT, description="")\n\n    def post_process(self, value):\n        return float(value)\n\n\nclass Any(ConfigType):\n    def __init__(self):\n        super(Any, self).__init__(\n            key="Any",\n            given_name="Any",\n            kind=ConfigTypeKind.ANY,\n        )\n\n\n
[docs]class Noneable(ConfigType):\n """Defines a configuration type that is the union of ``NoneType`` and the type ``inner_type``.\n\n Args:\n inner_type (type):\n The type of the values that this configuration type can contain.\n\n **Examples:**\n\n .. code-block:: python\n\n config_schema={"name": Noneable(str)}\n\n config={"name": "Hello"} # Ok\n config={"name": None} # Ok\n config={} # Error\n """\n\n def __init__(self, inner_type: object):\n from .field import resolve_to_config_type\n\n self.inner_type = cast(ConfigType, resolve_to_config_type(inner_type))\n super(Noneable, self).__init__(\n key=f"Noneable.{self.inner_type.key}",\n kind=ConfigTypeKind.NONEABLE,\n type_params=[self.inner_type],\n )\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.inner_type.type_iterator()\n yield from super().type_iterator()
\n\n\n
[docs]class Array(ConfigType):\n """Defines an array (list) configuration type that contains values of type ``inner_type``.\n\n Args:\n inner_type (type):\n The type of the values that this configuration type can contain.\n """\n\n def __init__(self, inner_type: object):\n from .field import resolve_to_config_type\n\n self.inner_type = cast(ConfigType, resolve_to_config_type(inner_type))\n super(Array, self).__init__(\n key=f"Array.{self.inner_type.key}",\n type_params=[self.inner_type],\n kind=ConfigTypeKind.ARRAY,\n )\n\n @public\n @property\n def description(self) -> str:\n """A human-readable description of this Array type."""\n return f"List of {self.key}"\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.inner_type.type_iterator()\n yield from super().type_iterator()
\n\n\n
[docs]class EnumValue:\n """Define an entry in a :py:class:`Enum`.\n\n Args:\n config_value (str):\n The string representation of the config to accept when passed.\n python_value (Optional[Any]):\n The python value to convert the enum entry in to. Defaults to the ``config_value``.\n description (Optional[str]):\n A human-readable description of the enum entry.\n\n """\n\n def __init__(\n self,\n config_value: str,\n python_value: Optional[object] = None,\n description: Optional[str] = None,\n ):\n self.config_value = check.str_param(config_value, "config_value")\n self.python_value = config_value if python_value is None else python_value\n self.description = check.opt_str_param(description, "description")
\n\n\n
[docs]class Enum(ConfigType):\n """Defines a enum configuration type that allows one of a defined set of possible values.\n\n Args:\n name (str):\n The name of the enum configuration type.\n enum_values (List[EnumValue]):\n The set of possible values for the enum configuration type.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(\n config_schema=Field(\n Enum(\n 'CowboyType',\n [\n EnumValue('good'),\n EnumValue('bad'),\n EnumValue('ugly'),\n ]\n )\n )\n )\n def resolve_standoff(context):\n # ...\n """\n\n def __init__(self, name: str, enum_values: Sequence[EnumValue]):\n check.str_param(name, "name")\n super(Enum, self).__init__(key=name, given_name=name, kind=ConfigTypeKind.ENUM)\n self.enum_values = check.sequence_param(enum_values, "enum_values", of_type=EnumValue)\n self._valid_python_values = {ev.python_value for ev in enum_values}\n check.invariant(len(self._valid_python_values) == len(enum_values))\n self._valid_config_values = {ev.config_value for ev in enum_values}\n check.invariant(len(self._valid_config_values) == len(enum_values))\n\n @property\n def config_values(self):\n return [ev.config_value for ev in self.enum_values]\n\n def is_valid_config_enum_value(self, config_value):\n return config_value in self._valid_config_values\n\n def post_process(self, value: typing.Any) -> typing.Any:\n if isinstance(value, PythonEnum):\n value = value.name\n\n for ev in self.enum_values:\n if ev.config_value == value:\n return ev.python_value\n\n check.failed(f"Should never reach this. config_value should be pre-validated. Got {value}")\n\n @classmethod\n def from_python_enum(cls, enum, name=None):\n """Create a Dagster enum corresponding to an existing Python enum.\n\n Args:\n enum (enum.EnumMeta):\n The class representing the enum.\n name (Optional[str]):\n The name for the enum. If not present, `enum.__name__` will be used.\n\n Example:\n .. code-block:: python\n\n class Color(enum.Enum):\n RED = enum.auto()\n GREEN = enum.auto()\n BLUE = enum.auto()\n\n @op(\n config_schema={"color": Field(Enum.from_python_enum(Color))}\n )\n def select_color(context):\n assert context.op_config["color"] == Color.RED\n """\n if name is None:\n name = enum.__name__\n return cls(name, [EnumValue(v.name, python_value=v) for v in enum])\n\n @classmethod\n def from_python_enum_direct_values(cls, enum, name=None):\n """Create a Dagster enum corresponding to an existing Python enum, where the direct values are passed instead of symbolic values (IE, enum.symbol.value as opposed to enum.symbol).\n\n This is necessary for internal usage, as the symbolic values are not serializable.\n\n Args:\n enum (enum.EnumMeta):\n The class representing the enum.\n name (Optional[str]):\n The name for the enum. If not present, `enum.__name__` will be used.\n\n Example:\n .. code-block:: python\n\n class Color(enum.Enum):\n RED = enum.auto()\n GREEN = enum.auto()\n BLUE = enum.auto()\n\n @op(\n config_schema={"color": Field(Enum.from_python_enum(Color))}\n )\n def select_color(context):\n assert context.op_config["color"] == Color.RED.value\n """\n if name is None:\n name = enum.__name__\n return cls(name, [EnumValue(v.name, python_value=v.value) for v in enum])
\n\n\n
[docs]class ScalarUnion(ConfigType):\n """Defines a configuration type that accepts a scalar value OR a non-scalar value like a\n :py:class:`~dagster.List`, :py:class:`~dagster.Dict`, or :py:class:`~dagster.Selector`.\n\n This allows runtime scalars to be configured without a dictionary with the key ``value`` and\n instead just use the scalar value directly. However this still leaves the option to\n load scalars from a json or pickle file.\n\n Args:\n scalar_type (type):\n The scalar type of values that this configuration type can hold. For example,\n :py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n or :py:class:`~python:str`.\n non_scalar_schema (ConfigSchema):\n The schema of a non-scalar Dagster configuration type. For example, :py:class:`List`,\n :py:class:`Dict`, or :py:class:`~dagster.Selector`.\n key (Optional[str]):\n The configuation type's unique key. If not set, then the key will be set to\n ``ScalarUnion.{scalar_type}-{non_scalar_schema}``.\n\n **Examples:**\n\n .. code-block:: yaml\n\n graph:\n transform_word:\n inputs:\n word:\n value: foobar\n\n\n becomes, optionally,\n\n\n .. code-block:: yaml\n\n graph:\n transform_word:\n inputs:\n word: foobar\n """\n\n def __init__(\n self,\n scalar_type: typing.Any,\n non_scalar_schema: UserConfigSchema,\n _key: Optional[str] = None,\n ):\n from .field import resolve_to_config_type\n\n self.scalar_type = check.inst(\n cast(ConfigType, resolve_to_config_type(scalar_type)), ConfigType\n )\n self.non_scalar_type = resolve_to_config_type(non_scalar_schema)\n\n check.param_invariant(self.scalar_type.kind == ConfigTypeKind.SCALAR, "scalar_type")\n check.param_invariant(\n self.non_scalar_type.kind\n in {ConfigTypeKind.STRICT_SHAPE, ConfigTypeKind.SELECTOR, ConfigTypeKind.ARRAY},\n "non_scalar_type",\n )\n\n # https://github.com/dagster-io/dagster/issues/2133\n key = check.opt_str_param(\n _key, "_key", f"ScalarUnion.{self.scalar_type.key}-{self.non_scalar_type.key}"\n )\n\n super(ScalarUnion, self).__init__(\n key=key,\n kind=ConfigTypeKind.SCALAR_UNION,\n type_params=[self.scalar_type, self.non_scalar_type],\n )\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.scalar_type.type_iterator()\n yield from self.non_scalar_type.type_iterator()\n yield from super().type_iterator()
\n\n\nConfigAnyInstance: Any = Any()\nConfigBoolInstance: Bool = Bool()\nConfigFloatInstance: Float = Float()\nConfigIntInstance: Int = Int()\nConfigStringInstance: String = String()\n\n_CONFIG_MAP: Dict[check.TypeOrTupleOfTypes, ConfigType] = {\n BuiltinEnum.ANY: ConfigAnyInstance,\n BuiltinEnum.BOOL: ConfigBoolInstance,\n BuiltinEnum.FLOAT: ConfigFloatInstance,\n BuiltinEnum.INT: ConfigIntInstance,\n BuiltinEnum.STRING: ConfigStringInstance,\n}\n\n\n_CONFIG_MAP_BY_NAME: Dict[str, ConfigType] = {\n "Any": ConfigAnyInstance,\n "Bool": ConfigBoolInstance,\n "Float": ConfigFloatInstance,\n "Int": ConfigIntInstance,\n "String": ConfigStringInstance,\n}\n\nALL_CONFIG_BUILTINS = set(_CONFIG_MAP.values())\n\n\ndef get_builtin_scalar_by_name(type_name: str):\n if type_name not in _CONFIG_MAP_BY_NAME:\n check.failed(f"Scalar {type_name} is not supported")\n return _CONFIG_MAP_BY_NAME[type_name]\n
", "current_page_name": "_modules/dagster/_config/config_type", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.config_type"}, "field": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.field

\nfrom typing import Any, Optional, Union, cast, overload\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.errors import DagsterInvalidConfigError, DagsterInvalidDefinitionError\nfrom dagster._serdes import serialize_value\nfrom dagster._seven import is_subclass\nfrom dagster._utils import is_enum_value\nfrom dagster._utils.typing_api import is_closed_python_optional_type, is_typing_type\n\nfrom .config_type import Array, ConfigAnyInstance, ConfigType, ConfigTypeKind\nfrom .field_utils import FIELD_NO_DEFAULT_PROVIDED, Map, all_optional_type\n\n\ndef _is_config_type_class(obj) -> bool:\n    return isinstance(obj, type) and is_subclass(obj, ConfigType)\n\n\ndef helpful_list_error_string() -> str:\n    return "Please use a python list (e.g. [int]) or dagster.Array (e.g. Array(int)) instead."\n\n\nVALID_CONFIG_DESC = """\n1. A Python primitive type that resolve to dagster config\n   types: int, float, bool, str.\n\n2. A dagster config type: Int, Float, Bool, String, StringSource, Path, Any,\n   Array, Noneable, Selector, Shape, Permissive, etc.\n\n3. A bare python dictionary, which is wrapped in Shape. Any\n   values in the dictionary get resolved by the same rules, recursively.\n\n4. A bare python list of length one which itself is config type.\n   Becomes Array with list element as an argument.\n"""\n\n\n@overload\ndef resolve_to_config_type(obj: Union[ConfigType, UserConfigSchema]) -> ConfigType:\n    pass\n\n\n@overload\ndef resolve_to_config_type(obj: object) -> Union[ConfigType, bool]:\n    pass\n\n\ndef resolve_to_config_type(obj: object) -> Union[ConfigType, bool]:\n    from .field_utils import convert_fields_to_dict_type\n\n    # Short circuit if it's already a Config Type\n    if isinstance(obj, ConfigType):\n        return obj\n\n    if isinstance(obj, dict):\n        # Dicts of the special form {type: value} are treated as Maps\n        # mapping from the type to value type, otherwise treat as dict type\n        if len(obj) == 1:\n            key = next(iter(obj.keys()))\n            key_type = resolve_to_config_type(key)\n            if not isinstance(key, str):\n                if not key_type:\n                    raise DagsterInvalidDefinitionError(\n                        f"Invalid key in map specification: {key!r} in map {obj}"\n                    )\n\n                if not key_type.kind == ConfigTypeKind.SCALAR:  # type: ignore\n                    raise DagsterInvalidDefinitionError(\n                        f"Non-scalar key in map specification: {key!r} in map {obj}"\n                    )\n\n                inner_type = resolve_to_config_type(obj[key])\n\n                if not inner_type:\n                    raise DagsterInvalidDefinitionError(\n                        f"Invalid value in map specification: {obj[str]!r} in map {obj}"\n                    )\n                return Map(key_type, inner_type)\n        return convert_fields_to_dict_type(obj)\n\n    if isinstance(obj, list):\n        if len(obj) != 1:\n            raise DagsterInvalidDefinitionError("Array specifications must only be of length 1")\n\n        inner_type = resolve_to_config_type(obj[0])\n\n        if not inner_type:\n            raise DagsterInvalidDefinitionError(\n                f"Invalid member of array specification: {obj[0]!r} in list {obj}"\n            )\n        return Array(inner_type)\n\n    if BuiltinEnum.contains(obj):\n        return ConfigType.from_builtin_enum(obj)\n\n    from .primitive_mapping import (\n        is_supported_config_python_builtin,\n        remap_python_builtin_for_config,\n    )\n\n    if is_supported_config_python_builtin(obj):\n        return remap_python_builtin_for_config(obj)\n\n    if obj is None:\n        return ConfigAnyInstance\n\n    # Special error messages for passing a DagsterType\n    from dagster._core.types.dagster_type import DagsterType, List, ListType\n    from dagster._core.types.python_set import Set, _TypedPythonSet\n    from dagster._core.types.python_tuple import Tuple, _TypedPythonTuple\n\n    if _is_config_type_class(obj):\n        check.param_invariant(\n            False,\n            "dagster_type",\n            f"Cannot pass config type class {obj} to resolve_to_config_type. This error usually"\n            " occurs when you pass a dagster config type class instead of a class instance into"\n            ' another dagster config type. E.g. "Noneable(Permissive)" should instead be'\n            ' "Noneable(Permissive())".',\n        )\n\n    if isinstance(obj, type) and is_subclass(obj, DagsterType):\n        raise DagsterInvalidDefinitionError(\n            f"You have passed a DagsterType class {obj!r} to the config system. "\n            "The DagsterType and config schema systems are separate. "\n            f"Valid config values are:\\n{VALID_CONFIG_DESC}"\n        )\n\n    if is_closed_python_optional_type(obj):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use typing.Optional as a config type. If you want this field to be "\n            "optional, please use Field(<type>, is_required=False), and if you want this field to "\n            "be required, but accept a value of None, use dagster.Noneable(<type>)."\n        )\n\n    if is_typing_type(obj):\n        raise DagsterInvalidDefinitionError(\n            f"You have passed in {obj} to the config system. Types from "\n            "the typing module in python are not allowed in the config system. "\n            "You must use types that are imported from dagster or primitive types "\n            "such as bool, int, etc."\n        )\n\n    if obj is List or isinstance(obj, ListType):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use List in the context of config. " + helpful_list_error_string()\n        )\n\n    if obj is Set or isinstance(obj, _TypedPythonSet):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use Set in the context of a config field. " + helpful_list_error_string()\n        )\n\n    if obj is Tuple or isinstance(obj, _TypedPythonTuple):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use Tuple in the context of a config field. " + helpful_list_error_string()\n        )\n\n    if isinstance(obj, DagsterType):\n        raise DagsterInvalidDefinitionError(\n            f"You have passed an instance of DagsterType {obj.display_name} to the config "\n            f"system (Repr of type: {obj!r}). "\n            "The DagsterType and config schema systems are separate. "\n            f"Valid config values are:\\n{VALID_CONFIG_DESC}",\n        )\n\n    # This means that this is an error and we are return False to a callsite\n    # We do the error reporting there because those callsites have more context\n    return False\n\n\ndef has_implicit_default(config_type):\n    if config_type.kind == ConfigTypeKind.NONEABLE:\n        return True\n\n    return all_optional_type(config_type)\n\n\n
[docs]class Field:\n """Defines the schema for a configuration field.\n\n Fields are used in config schema instead of bare types when one wants to add a description,\n a default value, or to mark it as not required.\n\n Config fields are parsed according to their schemas in order to yield values available at\n job execution time through the config system. Config fields can be set on ops, on\n loaders for custom, and on other pluggable components of the system, such as resources, loggers,\n and executors.\n\n\n Args:\n config (Any): The schema for the config. This value can be any of:\n\n 1. A Python primitive type that resolves to a Dagster config type\n (:py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n :py:class:`~python:str`, or :py:class:`~python:list`).\n\n 2. A Dagster config type:\n\n * :py:data:`~dagster.Any`\n * :py:class:`~dagster.Array`\n * :py:data:`~dagster.Bool`\n * :py:data:`~dagster.Enum`\n * :py:data:`~dagster.Float`\n * :py:data:`~dagster.Int`\n * :py:data:`~dagster.IntSource`\n * :py:data:`~dagster.Noneable`\n * :py:class:`~dagster.Permissive`\n * :py:class:`~dagster.ScalarUnion`\n * :py:class:`~dagster.Selector`\n * :py:class:`~dagster.Shape`\n * :py:data:`~dagster.String`\n * :py:data:`~dagster.StringSource`\n\n 3. A bare python dictionary, which will be automatically wrapped in\n :py:class:`~dagster.Shape`. Values of the dictionary are resolved recursively\n according to the same rules.\n\n 4. A bare python list of length one which itself is config type.\n Becomes :py:class:`Array` with list element as an argument.\n\n default_value (Any):\n A default value for this field, conformant to the schema set by the ``dagster_type``\n argument. If a default value is provided, ``is_required`` should be ``False``.\n\n Note: for config types that do post processing such as Enum, this value must be\n the pre processed version, ie use ``ExampleEnum.VALUE.name`` instead of\n ``ExampleEnum.VALUE``\n\n is_required (bool):\n Whether the presence of this field is required. Defaults to true. If ``is_required``\n is ``True``, no default value should be provided.\n\n description (str):\n A human-readable description of this config field.\n\n Examples:\n .. code-block:: python\n\n @op(\n config_schema={\n 'word': Field(str, description='I am a word.'),\n 'repeats': Field(Int, default_value=1, is_required=False),\n }\n )\n def repeat_word(context):\n return context.op_config['word'] * context.op_config['repeats']\n """\n\n def _resolve_config_arg(self, config):\n if isinstance(config, ConfigType):\n return config\n\n config_type = resolve_to_config_type(config)\n if not config_type:\n raise DagsterInvalidDefinitionError(\n f"Attempted to pass {config!r} to a Field that expects a valid "\n "dagster type usable in config (e.g. Dict, Int, String et al)."\n )\n return config_type\n\n def __init__(\n self,\n config: Any,\n default_value: Any = FIELD_NO_DEFAULT_PROVIDED,\n is_required: Optional[bool] = None,\n description: Optional[str] = None,\n ):\n from .post_process import resolve_defaults\n from .validate import validate_config\n\n self.config_type = check.inst(self._resolve_config_arg(config), ConfigType)\n\n self._description = check.opt_str_param(description, "description")\n\n check.opt_bool_param(is_required, "is_required")\n\n if default_value != FIELD_NO_DEFAULT_PROVIDED:\n check.param_invariant(\n not (callable(default_value)), "default_value", "default_value cannot be a callable"\n )\n\n if is_required is True:\n check.param_invariant(\n default_value == FIELD_NO_DEFAULT_PROVIDED,\n "default_value",\n "required arguments should not specify default values",\n )\n\n self._default_value = default_value\n\n # check explicit default value\n if self.default_provided:\n if self.config_type.kind == ConfigTypeKind.ENUM and is_enum_value(default_value):\n raise DagsterInvalidDefinitionError(\n (\n "You have passed into a python enum value as the default value "\n "into of a config enum type {name}. You must pass in the underlying "\n "string represention as the default value. One of {value_set}."\n ).format(\n value_set=[ev.config_value for ev in self.config_type.enum_values],\n name=self.config_type.given_name,\n )\n )\n\n evr = validate_config(self.config_type, default_value)\n if not evr.success:\n raise DagsterInvalidConfigError(\n "Invalid default_value for Field.",\n evr.errors,\n default_value,\n )\n\n if is_required is None:\n is_optional = has_implicit_default(self.config_type) or self.default_provided\n is_required = not is_optional\n\n # on implicitly optional - set the default value\n # by resolving the defaults of the type\n if is_optional and not self.default_provided:\n evr = resolve_defaults(self.config_type, None)\n if not evr.success:\n raise DagsterInvalidConfigError(\n "Unable to resolve implicit default_value for Field.",\n evr.errors,\n None,\n )\n self._default_value = evr.value\n self._is_required = is_required\n\n @public\n @property\n def is_required(self) -> bool:\n """Whether a value for this field must be provided at runtime.\n\n Cannot be True if a default value is provided.\n """\n return self._is_required\n\n @public\n @property\n def default_provided(self) -> bool:\n """Was a default value provided.\n\n Returns:\n bool: Yes or no\n """\n return self._default_value != FIELD_NO_DEFAULT_PROVIDED\n\n @public\n @property\n def default_value(self) -> Any:\n """The default value for the field.\n\n Raises an exception if no default value was provided.\n """\n check.invariant(self.default_provided, "Asking for default value when none was provided")\n return self._default_value\n\n @public\n @property\n def description(self) -> Optional[str]:\n """A human-readable description of this config field, if provided."""\n return self._description\n\n @property\n def default_value_as_json_str(self) -> str:\n check.invariant(self.default_provided, "Asking for default value when none was provided")\n return serialize_value(self.default_value)\n\n def __repr__(self) -> str:\n return ("Field({config_type}, default={default}, is_required={is_required})").format(\n config_type=self.config_type,\n default=(\n "@" if self._default_value == FIELD_NO_DEFAULT_PROVIDED else self._default_value\n ),\n is_required=self.is_required,\n )
\n\n\ndef check_opt_field_param(obj: object, param_name: str) -> Optional[Field]:\n return check.opt_inst_param(cast(Optional[Field], obj), param_name, Field)\n
", "current_page_name": "_modules/dagster/_config/field", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.field"}, "field_utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.field_utils

\n# encoding: utf-8\nimport hashlib\nimport os\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, List, Mapping, Optional, Sequence, Type\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.errors import DagsterInvalidConfigDefinitionError\n\nfrom .config_type import Array, ConfigType, ConfigTypeKind\n\nif TYPE_CHECKING:\n    from dagster._config import Field\n\n\ndef all_optional_type(config_type: ConfigType) -> bool:\n    check.inst_param(config_type, "config_type", ConfigType)\n\n    if ConfigTypeKind.is_shape(config_type.kind):\n        for field in config_type.fields.values():  # type: ignore\n            if field.is_required:\n                return False\n        return True\n\n    if ConfigTypeKind.is_selector(config_type.kind):\n        if len(config_type.fields) == 1:  # type: ignore\n            for field in config_type.fields.values():  # type: ignore\n                if field.is_required:\n                    return False\n            return True\n\n    return False\n\n\nclass __FieldValueSentinel:\n    pass\n\n\nclass __InferOptionalCompositeFieldSentinel:\n    pass\n\n\nFIELD_NO_DEFAULT_PROVIDED = __FieldValueSentinel\n\nINFER_OPTIONAL_COMPOSITE_FIELD = __InferOptionalCompositeFieldSentinel\n\n\nclass _ConfigHasFields(ConfigType):\n    def __init__(self, fields, **kwargs):\n        self.fields = expand_fields_dict(fields)\n        super(_ConfigHasFields, self).__init__(**kwargs)\n\n    def type_iterator(self) -> Iterator["ConfigType"]:\n        for field in self.fields.values():\n            yield from field.config_type.type_iterator()\n        yield from super().type_iterator()\n\n\nFIELD_HASH_CACHE: Dict[str, Any] = {}\n\n\ndef _memoize_inst_in_field_cache(passed_cls, defined_cls, key):\n    if key in FIELD_HASH_CACHE:\n        return FIELD_HASH_CACHE[key]\n\n    defined_cls_inst = super(defined_cls, passed_cls).__new__(defined_cls)\n    defined_cls_inst._initialized = False  # noqa: SLF001\n    FIELD_HASH_CACHE[key] = defined_cls_inst\n    return defined_cls_inst\n\n\ndef _add_hash(m, string):\n    m.update(string.encode("utf-8"))\n\n\ndef compute_fields_hash(fields, description, field_aliases=None):\n    m = hashlib.sha1()  # so that hexdigest is 40, not 64 bytes\n    if description:\n        _add_hash(m, ":description: " + description)\n\n    for field_name in sorted(list(fields.keys())):\n        field = fields[field_name]\n        _add_hash(m, ":fieldname:" + field_name)\n        if field.default_provided:\n            _add_hash(m, ":default_value: " + field.default_value_as_json_str)\n        _add_hash(m, ":is_required: " + str(field.is_required))\n        _add_hash(m, ":type_key: " + field.config_type.key)\n        if field.description:\n            _add_hash(m, ":description: " + field.description)\n\n    field_aliases = check.opt_dict_param(\n        field_aliases, "field_aliases", key_type=str, value_type=str\n    )\n    for field_name in sorted(list(field_aliases.keys())):\n        field_alias = field_aliases[field_name]\n        _add_hash(m, ":fieldname: " + field_name)\n        _add_hash(m, ":fieldalias: " + field_alias)\n\n    return m.hexdigest()\n\n\ndef _define_shape_key_hash(fields, description, field_aliases):\n    return "Shape." + compute_fields_hash(fields, description, field_aliases=field_aliases)\n\n\n
[docs]class Shape(_ConfigHasFields):\n """Schema for configuration data with string keys and typed values via :py:class:`Field`.\n\n Unlike :py:class:`Permissive`, unspecified fields are not allowed and will throw a\n :py:class:`~dagster.DagsterInvalidConfigError`.\n\n Args:\n fields (Dict[str, Field]):\n The specification of the config dict.\n field_aliases (Dict[str, str]):\n Maps a string key to an alias that can be used instead of the original key. For example,\n an entry {"foo": "bar"} means that someone could use "bar" instead of "foo" as a\n top level string key.\n """\n\n def __new__(\n cls,\n fields,\n description=None,\n field_aliases=None,\n ):\n return _memoize_inst_in_field_cache(\n cls,\n Shape,\n _define_shape_key_hash(expand_fields_dict(fields), description, field_aliases),\n )\n\n def __init__(\n self,\n fields,\n description=None,\n field_aliases=None,\n ):\n # if we hit in the field cache - skip double init\n if self._initialized:\n return\n\n fields = expand_fields_dict(fields)\n super(Shape, self).__init__(\n kind=ConfigTypeKind.STRICT_SHAPE,\n key=_define_shape_key_hash(fields, description, field_aliases),\n description=description,\n fields=fields,\n )\n self.field_aliases = check.opt_dict_param(\n field_aliases, "field_aliases", key_type=str, value_type=str\n )\n self._initialized = True
\n\n\n
[docs]class Map(ConfigType):\n """Defines a config dict with arbitrary scalar keys and typed values.\n\n A map can contrain arbitrary keys of the specified scalar type, each of which has\n type checked values. Unlike :py:class:`Shape` and :py:class:`Permissive`, scalar\n keys other than strings can be used, and unlike :py:class:`Permissive`, all\n values are type checked.\n\n Args:\n key_type (type):\n The type of keys this map can contain. Must be a scalar type.\n inner_type (type):\n The type of the values that this map type can contain.\n key_label_name (string):\n Optional name which describes the role of keys in the map.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(config_schema=Field(Map({str: int})))\n def partially_specified_config(context) -> List:\n return sorted(list(context.op_config.items()))\n """\n\n def __init__(self, key_type, inner_type, key_label_name=None):\n from .field import resolve_to_config_type\n\n self.key_type = resolve_to_config_type(key_type)\n self.inner_type = resolve_to_config_type(inner_type)\n self.given_name = key_label_name\n\n check.inst_param(self.key_type, "key_type", ConfigType)\n check.inst_param(self.inner_type, "inner_type", ConfigType)\n check.param_invariant(\n self.key_type.kind == ConfigTypeKind.SCALAR, "key_type", "Key type must be a scalar"\n )\n check.opt_str_param(self.given_name, "name")\n\n super(Map, self).__init__(\n key="Map.{key_type}.{inner_type}{name_key}".format(\n key_type=self.key_type.key,\n inner_type=self.inner_type.key,\n name_key=f":name: {key_label_name}" if key_label_name else "",\n ),\n # We use the given name field to store the key label name\n # this is used elsewhere to give custom types names\n given_name=key_label_name,\n type_params=[self.key_type, self.inner_type],\n kind=ConfigTypeKind.MAP,\n )\n\n @public\n @property\n def key_label_name(self) -> Optional[str]:\n """Name which describes the role of keys in the map, if provided."""\n return self.given_name\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.key_type.type_iterator()\n yield from self.inner_type.type_iterator()\n yield from super().type_iterator()
\n\n\ndef _define_permissive_dict_key(fields, description):\n return (\n "Permissive." + compute_fields_hash(fields, description=description)\n if fields\n else "Permissive"\n )\n\n\n
[docs]class Permissive(_ConfigHasFields):\n """Defines a config dict with a partially specified schema.\n\n A permissive dict allows partial specification of the config schema. Any fields with a\n specified schema will be type checked. Other fields will be allowed, but will be ignored by\n the type checker.\n\n Args:\n fields (Dict[str, Field]): The partial specification of the config dict.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(config_schema=Field(Permissive({'required': Field(String)})))\n def map_config_op(context) -> List:\n return sorted(list(context.op_config.items()))\n """\n\n def __new__(cls, fields=None, description=None):\n return _memoize_inst_in_field_cache(\n cls,\n Permissive,\n _define_permissive_dict_key(\n expand_fields_dict(fields) if fields else None, description\n ),\n )\n\n def __init__(self, fields=None, description=None):\n # if we hit in field cache avoid double init\n if self._initialized:\n return\n\n fields = expand_fields_dict(fields) if fields else None\n super(Permissive, self).__init__(\n key=_define_permissive_dict_key(fields, description),\n kind=ConfigTypeKind.PERMISSIVE_SHAPE,\n fields=fields or dict(),\n description=description,\n )\n self._initialized = True
\n\n\ndef _define_selector_key(fields, description):\n return "Selector." + compute_fields_hash(fields, description=description)\n\n\n
[docs]class Selector(_ConfigHasFields):\n """Define a config field requiring the user to select one option.\n\n Selectors are used when you want to be able to present several different options in config but\n allow only one to be selected. For example, a single input might be read in from either a csv\n file or a parquet file, but not both at once.\n\n Note that in some other type systems this might be called an 'input union'.\n\n Functionally, a selector is like a :py:class:`Dict`, except that only one key from the dict can\n be specified in valid config.\n\n Args:\n fields (Dict[str, Field]): The fields from which the user must select.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(\n config_schema=Field(\n Selector(\n {\n 'haw': {'whom': Field(String, default_value='honua', is_required=False)},\n 'cn': {'whom': Field(String, default_value='\u4e16\u754c', is_required=False)},\n 'en': {'whom': Field(String, default_value='world', is_required=False)},\n }\n ),\n is_required=False,\n default_value={'en': {'whom': 'world'}},\n )\n )\n def hello_world_with_default(context):\n if 'haw' in context.op_config:\n return 'Aloha {whom}!'.format(whom=context.op_config['haw']['whom'])\n if 'cn' in context.op_config:\n return '\u4f60\u597d, {whom}!'.format(whom=context.op_config['cn']['whom'])\n if 'en' in context.op_config:\n return 'Hello, {whom}!'.format(whom=context.op_config['en']['whom'])\n """\n\n def __new__(cls, fields, description=None):\n return _memoize_inst_in_field_cache(\n cls,\n Selector,\n _define_selector_key(expand_fields_dict(fields), description),\n )\n\n def __init__(self, fields, description=None):\n # if we hit in field cache avoid double init\n if self._initialized:\n return\n\n fields = expand_fields_dict(fields)\n super(Selector, self).__init__(\n key=_define_selector_key(fields, description),\n kind=ConfigTypeKind.SELECTOR,\n fields=fields,\n description=description,\n )\n self._initialized = True
\n\n\n# Config syntax expansion code below\n\n\ndef is_potential_field(potential_field: object) -> bool:\n from .field import Field, resolve_to_config_type\n\n return isinstance(potential_field, (Field, dict, list)) or bool(\n resolve_to_config_type(potential_field)\n )\n\n\ndef convert_fields_to_dict_type(fields: Mapping[str, object]):\n return _convert_fields_to_dict_type(fields, fields, [])\n\n\ndef _convert_fields_to_dict_type(\n original_root: object, fields: Mapping[str, object], stack: List[str]\n) -> Shape:\n return Shape(_expand_fields_dict(original_root, fields, stack))\n\n\ndef expand_fields_dict(fields: Mapping[str, object]) -> Mapping[str, "Field"]:\n return _expand_fields_dict(fields, fields, [])\n\n\ndef _expand_fields_dict(\n original_root: object, fields: Mapping[str, object], stack: List[str]\n) -> Mapping[str, "Field"]:\n check.mapping_param(fields, "fields")\n return {\n name: _convert_potential_field(original_root, value, stack + [name])\n for name, value in fields.items()\n }\n\n\ndef expand_list(original_root: object, the_list: Sequence[object], stack: List[str]) -> Array:\n if len(the_list) != 1:\n raise DagsterInvalidConfigDefinitionError(\n original_root, the_list, stack, "List must be of length 1"\n )\n\n inner_type = _convert_potential_type(original_root, the_list[0], stack)\n if not inner_type:\n raise DagsterInvalidConfigDefinitionError(\n original_root,\n the_list,\n stack,\n "List have a single item and contain a valid type i.e. [int]. Got item {}".format(\n repr(the_list[0])\n ),\n )\n\n return Array(inner_type)\n\n\ndef expand_map(original_root: object, the_dict: Mapping[object, object], stack: List[str]) -> Map:\n if len(the_dict) != 1:\n raise DagsterInvalidConfigDefinitionError(\n original_root, the_dict, stack, "Map dict must be of length 1"\n )\n\n key = next(iter(the_dict.keys()))\n key_type = _convert_potential_type(original_root, key, stack)\n if not key_type or not key_type.kind == ConfigTypeKind.SCALAR:\n raise DagsterInvalidConfigDefinitionError(\n original_root,\n the_dict,\n stack,\n f"Map dict must have a scalar type as its only key. Got key {key!r}",\n )\n\n inner_type = _convert_potential_type(original_root, the_dict[key], stack)\n if not inner_type:\n raise DagsterInvalidConfigDefinitionError(\n original_root,\n the_dict,\n stack,\n "Map must have a single value and contain a valid type i.e. {{str: int}}. Got item {}"\n .format(repr(the_dict[key])),\n )\n\n return Map(key_type, inner_type)\n\n\ndef convert_potential_field(potential_field: object) -> "Field":\n return _convert_potential_field(potential_field, potential_field, [])\n\n\ndef _convert_potential_type(original_root: object, potential_type, stack: List[str]):\n from .field import resolve_to_config_type\n\n if isinstance(potential_type, Mapping):\n # A dictionary, containing a single key which is a type (int, str, etc) and not a string is interpreted as a Map\n if len(potential_type) == 1:\n key = next(iter(potential_type.keys()))\n if not isinstance(key, str) and _convert_potential_type(original_root, key, stack):\n return expand_map(original_root, potential_type, stack)\n\n # Otherwise, the dictionary is interpreted as a Shape\n return Shape(_expand_fields_dict(original_root, potential_type, stack))\n\n if isinstance(potential_type, list):\n return expand_list(original_root, potential_type, stack)\n\n return resolve_to_config_type(potential_type)\n\n\ndef _convert_potential_field(\n original_root: object, potential_field: object, stack: List[str]\n) -> "Field":\n from .field import Field\n\n if potential_field is None:\n raise DagsterInvalidConfigDefinitionError(\n original_root, potential_field, stack, reason="Fields cannot be None"\n )\n\n if not is_potential_field(potential_field):\n raise DagsterInvalidConfigDefinitionError(original_root, potential_field, stack)\n\n if isinstance(potential_field, Field):\n return potential_field\n\n return Field(_convert_potential_type(original_root, potential_field, stack))\n\n\ndef config_dictionary_from_values(\n values: Mapping[str, Any], config_field: "Field"\n) -> Dict[str, Any]:\n """Converts a set of config values into a dictionary representation,\n in particular converting EnvVar objects into Dagster config inputs\n and processing data structures such as dicts, lists, and structured Config classes.\n """\n assert ConfigTypeKind.is_shape(config_field.config_type.kind)\n\n from dagster._config.pythonic_config import _config_value_to_dict_representation\n\n return check.is_dict(_config_value_to_dict_representation(None, values))\n\n\ndef _create_direct_access_exception(cls: Type, env_var_name: str) -> Exception:\n return RuntimeError(\n f'Attempted to directly retrieve environment variable {cls.__name__}("{env_var_name}").'\n f" {cls.__name__} defers resolution of the environment variable value until run time, and"\n " should only be used as input to Dagster config or resources.\\n\\nTo access the"\n f" environment variable value, call `get_value` on the {cls.__name__}, or use os.getenv"\n " directly."\n )\n\n\nclass IntEnvVar(int):\n """Class used to represent an environment variable in the Dagster config system.\n\n The environment variable will be resolved to an int value when the config is\n loaded.\n """\n\n name: str\n\n @classmethod\n def create(cls, name: str) -> "IntEnvVar":\n var = IntEnvVar(0)\n var.name = name\n return var\n\n def __int__(self) -> int:\n """Raises an exception of the EnvVar value is directly accessed. Users should instead use\n the `get_value` method, or use the EnvVar as an input to Dagster config or resources.\n """\n raise _create_direct_access_exception(self.__class__, self.env_var_name)\n\n def __str__(self) -> str:\n return str(int(self))\n\n def get_value(self, default: Optional[int] = None) -> Optional[int]:\n """Returns the value of the environment variable, or the default value if the\n environment variable is not set. If no default is provided, None will be returned.\n """\n value = os.getenv(self.name, default=default)\n return int(value) if value else None\n\n @property\n def env_var_name(self) -> str:\n """Returns the name of the environment variable."""\n return self.name\n\n\nclass EnvVar(str):\n """Class used to represent an environment variable in the Dagster config system.\n\n This class is intended to be used to populate config fields or resources.\n The environment variable will be resolved to a string value when the config is\n loaded.\n\n To access the value of the environment variable, use the `get_value` method.\n """\n\n @classmethod\n def int(cls, name: str) -> "IntEnvVar":\n return IntEnvVar.create(name=name)\n\n def __str__(self) -> str:\n """Raises an exception of the EnvVar value is directly accessed. Users should instead use\n the `get_value` method, or use the EnvVar as an input to Dagster config or resources.\n """\n raise _create_direct_access_exception(self.__class__, self.env_var_name)\n\n @property\n def env_var_name(self) -> str:\n """Returns the name of the environment variable."""\n return super().__str__()\n\n def get_value(self, default: Optional[str] = None) -> Optional[str]:\n """Returns the value of the environment variable, or the default value if the\n environment variable is not set. If no default is provided, None will be returned.\n """\n return os.getenv(self.env_var_name, default=default)\n
", "current_page_name": "_modules/dagster/_config/field_utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.field_utils"}, "pythonic_config": {"config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.pythonic_config.config

\nimport re\nfrom enum import Enum\nfrom typing import (\n    Any,\n    Dict,\n    List,\n    Mapping,\n    Optional,\n    Set,\n    Type,\n    cast,\n)\n\nfrom pydantic import BaseModel\nfrom typing_extensions import TypeVar\n\nimport dagster._check as check\nfrom dagster import (\n    Field as DagsterField,\n    Shape,\n)\nfrom dagster._config.field_utils import (\n    EnvVar,\n    IntEnvVar,\n    Permissive,\n)\nfrom dagster._core.definitions.definition_config_schema import (\n    DefinitionConfigSchema,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidConfigDefinitionError,\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidPythonicConfigDefinitionError,\n)\nfrom dagster._utils.cached_method import CACHED_METHOD_FIELD_SUFFIX\n\nfrom .attach_other_object_to_context import (\n    IAttachDifferentObjectToOpContext as IAttachDifferentObjectToOpContext,\n)\nfrom .conversion_utils import _convert_pydantic_field, safe_is_subclass\nfrom .pydantic_compat_layer import (\n    USING_PYDANTIC_2,\n    ModelFieldCompat,\n    model_config,\n    model_fields,\n)\nfrom .typing_utils import BaseConfigMeta\n\ntry:\n    from functools import cached_property  # type: ignore  # (py37 compat)\nexcept ImportError:\n\n    class cached_property:\n        pass\n\n\nINTERNAL_MARKER = "__internal__"\n\n# ensure that this ends with the internal marker so we can do a single check\nassert CACHED_METHOD_FIELD_SUFFIX.endswith(INTERNAL_MARKER)\n\n\ndef _is_frozen_pydantic_error(e: Exception) -> bool:\n    """Parses an error to determine if it is a Pydantic error indicating that the instance\n    is immutable. We use this to attach a more helpful error message.\n    """\n    return "Instance is frozen" in str(  # Pydantic 2.x error\n        e\n    ) or "is immutable and does not support item assignment" in str(  # Pydantic 1.x error\n        e\n    )\n\n\nclass MakeConfigCacheable(BaseModel):\n    """This class centralizes and implements all the chicanery we need in order\n    to support caching decorators. If we decide this is a bad idea we can remove it\n    all in one go.\n    """\n\n    # Pydantic config for this class\n    # Cannot use kwargs for base class as this is not support for pydnatic<1.8\n    class Config:\n        # Various pydantic model config (https://docs.pydantic.dev/usage/model_config/)\n        # Necessary to allow for caching decorators\n        arbitrary_types_allowed = True\n        # Avoid pydantic reading a cached property class as part of the schema\n        if USING_PYDANTIC_2:\n            ignored_types = (cached_property,)\n        else:\n            keep_untouched = (cached_property,)\n        # Ensure the class is serializable, for caching purposes\n        frozen = True\n\n    def __setattr__(self, name: str, value: Any):\n        from .resource import ConfigurableResourceFactory\n\n        # This is a hack to allow us to set attributes on the class that are not part of the\n        # config schema. Pydantic will normally raise an error if you try to set an attribute\n        # that is not part of the schema.\n\n        if self._is_field_internal(name):\n            object.__setattr__(self, name, value)\n            return\n\n        try:\n            return super().__setattr__(name, value)\n        except (TypeError, ValueError) as e:\n            clsname = self.__class__.__name__\n            if _is_frozen_pydantic_error(e):\n                if isinstance(self, ConfigurableResourceFactory):\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic resource and does not support item assignment,"\n                        " as it inherits from 'pydantic.BaseModel' with frozen=True. If trying to"\n                        " maintain state on this resource, consider building a separate, stateful"\n                        " client class, and provide a method on the resource to construct and"\n                        " return the stateful client."\n                    ) from e\n                else:\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic config class and does not support item"\n                        " assignment, as it inherits from 'pydantic.BaseModel' with frozen=True."\n                    ) from e\n            elif "object has no field" in str(e):\n                field_name = check.not_none(\n                    re.search(r"object has no field \\"(.*)\\"", str(e))\n                ).group(1)\n                if isinstance(self, ConfigurableResourceFactory):\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic resource and does not support manipulating"\n                        f" undeclared attribute '{field_name}' as it inherits from"\n                        " 'pydantic.BaseModel' without extra=\\"allow\\". If trying to maintain"\n                        " state on this resource, consider building a separate, stateful client"\n                        " class, and provide a method on the resource to construct and return the"\n                        " stateful client."\n                    ) from e\n                else:\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic config class and does not support manipulating"\n                        f" undeclared attribute '{field_name}' as it inherits from"\n                        " 'pydantic.BaseModel' without extra=\\"allow\\"."\n                    ) from e\n            else:\n                raise\n\n    def _is_field_internal(self, name: str) -> bool:\n        return name.endswith(INTERNAL_MARKER)\n\n\nT = TypeVar("T")\n\n\ndef ensure_env_vars_set_post_init(set_value: T, input_value: Any) -> T:\n    """Pydantic 2.x utility. Ensures that Pydantic field values are set to the appropriate\n    EnvVar or IntEnvVar objects post-model-instantiation, since Pydantic 2.x will cast\n    EnvVar or IntEnvVar values to raw strings or ints as part of the model instantiation process.\n    """\n    if isinstance(set_value, dict) and isinstance(input_value, dict):\n        for key, value in input_value.items():\n            if isinstance(value, (EnvVar, IntEnvVar)):\n                set_value[key] = value\n            elif isinstance(value, (dict, list)):\n                set_value[key] = ensure_env_vars_set_post_init(set_value[key], value)\n    if isinstance(set_value, List) and isinstance(input_value, List):\n        for i in range(len(set_value)):\n            value = input_value[i]\n            if isinstance(value, (EnvVar, IntEnvVar)):\n                set_value[i] = value\n            elif isinstance(value, (dict, list)):\n                set_value[i] = ensure_env_vars_set_post_init(set_value[i], value)\n\n    return set_value\n\n\n
[docs]class Config(MakeConfigCacheable, metaclass=BaseConfigMeta):\n """Base class for Dagster configuration models, used to specify config schema for\n ops and assets. Subclasses :py:class:`pydantic.BaseModel`.\n\n Example definition:\n\n .. code-block:: python\n\n from pydantic import Field\n\n class MyAssetConfig(Config):\n my_str: str = "my_default_string"\n my_int_list: List[int]\n my_bool_with_metadata: bool = Field(default=False, description="A bool field")\n\n\n Example usage:\n\n .. code-block:: python\n\n @asset\n def asset_with_config(config: MyAssetConfig):\n assert config.my_str == "my_default_string"\n assert config.my_int_list == [1, 2, 3]\n assert config.my_bool_with_metadata == False\n\n asset_with_config(MyAssetConfig(my_int_list=[1, 2, 3], my_bool_with_metadata=True))\n\n """\n\n def __init__(self, **config_dict) -> None:\n """This constructor is overridden to handle any remapping of raw config dicts to\n the appropriate config classes. For example, discriminated unions are represented\n in Dagster config as dicts with a single key, which is the discriminator value.\n """\n modified_data = {}\n for key, value in config_dict.items():\n field = model_fields(self).get(key)\n\n # This is useful in Pydantic 2.x when reconstructing a config object from a dict\n # e.g. when instantiating a resource at runtime from its config dict\n # In Pydantic 1.x, this is a no-op, since a non-required field without a\n # value provided will default to None (required & optional are the same in 1.x)\n if field and not field.is_required() and value is None:\n continue\n\n if field and field.discriminator:\n nested_dict = value\n\n discriminator_key = check.not_none(field.discriminator)\n if isinstance(value, Config):\n nested_dict = _discriminated_union_config_dict_to_selector_config_dict(\n discriminator_key,\n value._get_non_none_public_field_values(), # noqa: SLF001\n )\n\n nested_items = list(check.is_dict(nested_dict).items())\n check.invariant(\n len(nested_items) == 1,\n "Discriminated union must have exactly one key",\n )\n discriminated_value, nested_values = nested_items[0]\n\n modified_data[key] = {\n **nested_values,\n discriminator_key: discriminated_value,\n }\n else:\n modified_data[key] = value\n\n for key, field in model_fields(self).items():\n if field.is_required() and key not in modified_data:\n modified_data[key] = None\n\n super().__init__(**modified_data)\n if USING_PYDANTIC_2:\n self.__dict__ = ensure_env_vars_set_post_init(self.__dict__, modified_data)\n\n def _convert_to_config_dictionary(self) -> Mapping[str, Any]:\n """Converts this Config object to a Dagster config dictionary, in the same format as the dictionary\n accepted as run config or as YAML in the launchpad.\n\n Inner fields are recursively converted to dictionaries, meaning nested config objects\n or EnvVars will be converted to the appropriate dictionary representation.\n """\n public_fields = self._get_non_none_public_field_values()\n return {\n k: _config_value_to_dict_representation(model_fields(self).get(k), v)\n for k, v in public_fields.items()\n }\n\n def _get_non_none_public_field_values(self) -> Mapping[str, Any]:\n """Returns a dictionary representation of this config object,\n ignoring any private fields, and any optional fields that are None.\n\n Inner fields are returned as-is in the dictionary,\n meaning any nested config objects will be returned as config objects, not dictionaries.\n """\n output = {}\n for key, value in self.__dict__.items():\n if self._is_field_internal(key):\n continue\n field = model_fields(self).get(key)\n\n if field:\n resolved_field_name = field.alias or key\n output[resolved_field_name] = value\n else:\n output[key] = value\n return output\n\n @classmethod\n def to_config_schema(cls) -> DefinitionConfigSchema:\n """Converts the config structure represented by this class into a DefinitionConfigSchema."""\n return DefinitionConfigSchema(infer_schema_from_config_class(cls))\n\n @classmethod\n def to_fields_dict(cls) -> Dict[str, DagsterField]:\n """Converts the config structure represented by this class into a dictionary of dagster.Fields.\n This is useful when interacting with legacy code that expects a dictionary of fields but you\n want the source of truth to be a config class.\n """\n return cast(Shape, cls.to_config_schema().as_field().config_type).fields
\n\n\ndef _discriminated_union_config_dict_to_selector_config_dict(\n discriminator_key: str, config_dict: Mapping[str, Any]\n):\n """Remaps a config dictionary which is a member of a discriminated union to\n the appropriate structure for a Dagster config selector.\n\n A discriminated union with key "my_key" and value "my_value" will be represented\n as {"my_key": "my_value", "my_field": "my_field_value"}. When converted to a selector,\n this should be represented as {"my_value": {"my_field": "my_field_value"}}.\n """\n updated_dict = dict(config_dict)\n discriminator_value = updated_dict.pop(discriminator_key)\n wrapped_dict = {discriminator_value: updated_dict}\n return wrapped_dict\n\n\ndef _config_value_to_dict_representation(field: Optional[ModelFieldCompat], value: Any):\n """Converts a config value to a dictionary representation. If a field is provided, it will be used\n to determine the appropriate dictionary representation in the case of discriminated unions.\n """\n from dagster._config.field_utils import EnvVar, IntEnvVar\n\n if isinstance(value, dict):\n return {k: _config_value_to_dict_representation(None, v) for k, v in value.items()}\n elif isinstance(value, list):\n return [_config_value_to_dict_representation(None, v) for v in value]\n elif isinstance(value, EnvVar):\n return {"env": value.env_var_name}\n elif isinstance(value, IntEnvVar):\n return {"env": value.name}\n if isinstance(value, Config):\n if field and field.discriminator:\n return {\n k: v\n for k, v in _discriminated_union_config_dict_to_selector_config_dict(\n field.discriminator,\n value._convert_to_config_dictionary(), # noqa: SLF001\n ).items()\n }\n else:\n return {k: v for k, v in value._convert_to_config_dictionary().items()} # noqa: SLF001\n elif isinstance(value, Enum):\n return value.name\n\n return value\n\n\n
[docs]class PermissiveConfig(Config):\n """Subclass of :py:class:`Config` that allows arbitrary extra fields. This is useful for\n config classes which may have open-ended inputs.\n\n Example definition:\n\n .. code-block:: python\n\n class MyPermissiveOpConfig(PermissiveConfig):\n my_explicit_parameter: bool\n my_other_explicit_parameter: str\n\n\n Example usage:\n\n .. code-block:: python\n\n @op\n def op_with_config(config: MyPermissiveOpConfig):\n assert config.my_explicit_parameter == True\n assert config.my_other_explicit_parameter == "foo"\n assert config.dict().get("my_implicit_parameter") == "bar"\n\n op_with_config(\n MyPermissiveOpConfig(\n my_explicit_parameter=True,\n my_other_explicit_parameter="foo",\n my_implicit_parameter="bar"\n )\n )\n\n """\n\n # Pydantic config for this class\n # Cannot use kwargs for base class as this is not support for pydantic<1.8\n class Config:\n extra = "allow"
\n\n\ndef infer_schema_from_config_class(\n model_cls: Type["Config"],\n description: Optional[str] = None,\n fields_to_omit: Optional[Set[str]] = None,\n) -> DagsterField:\n from .config import Config\n from .resource import ConfigurableResourceFactory, _is_annotated_as_resource_type\n\n """Parses a structured config class and returns a corresponding Dagster config Field."""\n fields_to_omit = fields_to_omit or set()\n\n check.param_invariant(\n safe_is_subclass(model_cls, Config),\n "Config type annotation must inherit from dagster.Config",\n )\n\n fields: Dict[str, DagsterField] = {}\n for key, pydantic_field_info in model_fields(model_cls).items():\n if _is_annotated_as_resource_type(\n pydantic_field_info.annotation, pydantic_field_info.metadata\n ):\n continue\n\n resolved_field_name = pydantic_field_info.alias if pydantic_field_info.alias else key\n if key not in fields_to_omit:\n if isinstance(pydantic_field_info.default, DagsterField):\n raise DagsterInvalidDefinitionError(\n "Using 'dagster.Field' is not supported within a Pythonic config or resource"\n " definition. 'dagster.Field' should only be used in legacy Dagster config"\n " schemas. Did you mean to use 'pydantic.Field' instead?"\n )\n\n try:\n fields[resolved_field_name] = _convert_pydantic_field(pydantic_field_info)\n except DagsterInvalidConfigDefinitionError as e:\n raise DagsterInvalidPythonicConfigDefinitionError(\n config_class=model_cls,\n field_name=key,\n invalid_type=e.current_value,\n is_resource=model_cls is not None\n and safe_is_subclass(model_cls, ConfigurableResourceFactory),\n )\n\n shape_cls = Permissive if model_config(model_cls).get("extra") == "allow" else Shape\n\n docstring = model_cls.__doc__.strip() if model_cls.__doc__ else None\n\n return DagsterField(config=shape_cls(fields), description=description or docstring)\n
", "current_page_name": "_modules/dagster/_config/pythonic_config/config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.pythonic_config.config"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.pythonic_config.io_manager

\nfrom abc import abstractmethod\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    Mapping,\n    Optional,\n    Type,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeVar\n\nfrom dagster._core.definitions.definition_config_schema import (\n    CoercableToConfigSchema,\n)\nfrom dagster._core.definitions.resource_definition import (\n    ResourceDefinition,\n    ResourceFunction,\n)\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.storage.io_manager import IOManager, IOManagerDefinition\nfrom dagster._utils.cached_method import cached_method\n\nfrom .attach_other_object_to_context import (\n    IAttachDifferentObjectToOpContext as IAttachDifferentObjectToOpContext,\n)\nfrom .config import Config\nfrom .conversion_utils import TResValue\nfrom .resource import (\n    AllowDelayedDependencies,\n    ConfigurableResourceFactory,\n    PartialResource,\n    ResourceId,\n    ResourceWithKeyMapping,\n    Self,\n)\nfrom .type_check_utils import safe_is_subclass\n\ntry:\n    from functools import cached_property  # type: ignore  # (py37 compat)\nexcept ImportError:\n\n    class cached_property:\n        pass\n\n\nTIOManagerValue = TypeVar("TIOManagerValue", bound=IOManager)\n\n\nclass ConfigurableIOManagerFactoryResourceDefinition(IOManagerDefinition, AllowDelayedDependencies):\n    def __init__(\n        self,\n        configurable_resource_cls: Type,\n        resource_fn: ResourceFunction,\n        config_schema: Any,\n        description: Optional[str],\n        resolve_resource_keys: Callable[[Mapping[int, str]], AbstractSet[str]],\n        nested_resources: Mapping[str, Any],\n        input_config_schema: Optional[Union[CoercableToConfigSchema, Type[Config]]] = None,\n        output_config_schema: Optional[Union[CoercableToConfigSchema, Type[Config]]] = None,\n        dagster_maintained: bool = False,\n    ):\n        input_config_schema_resolved: CoercableToConfigSchema = (\n            cast(Type[Config], input_config_schema).to_config_schema()\n            if safe_is_subclass(input_config_schema, Config)\n            else cast(CoercableToConfigSchema, input_config_schema)\n        )\n        output_config_schema_resolved: CoercableToConfigSchema = (\n            cast(Type[Config], output_config_schema).to_config_schema()\n            if safe_is_subclass(output_config_schema, Config)\n            else cast(CoercableToConfigSchema, output_config_schema)\n        )\n        super().__init__(\n            resource_fn=resource_fn,\n            config_schema=config_schema,\n            description=description,\n            input_config_schema=input_config_schema_resolved,\n            output_config_schema=output_config_schema_resolved,\n        )\n        self._resolve_resource_keys = resolve_resource_keys\n        self._nested_resources = nested_resources\n        self._configurable_resource_cls = configurable_resource_cls\n        self._dagster_maintained = dagster_maintained\n\n    @property\n    def configurable_resource_cls(self) -> Type:\n        return self._configurable_resource_cls\n\n    @property\n    def nested_resources(\n        self,\n    ) -> Mapping[str, Any]:\n        return self._nested_resources\n\n    def _resolve_required_resource_keys(\n        self, resource_mapping: Mapping[int, str]\n    ) -> AbstractSet[str]:\n        return self._resolve_resource_keys(resource_mapping)\n\n\nclass IOManagerWithKeyMapping(ResourceWithKeyMapping, IOManagerDefinition):\n    """Version of ResourceWithKeyMapping wrapper that also implements IOManagerDefinition."""\n\n    def __init__(\n        self,\n        resource: ResourceDefinition,\n        resource_id_to_key_mapping: Dict[ResourceId, str],\n    ):\n        ResourceWithKeyMapping.__init__(self, resource, resource_id_to_key_mapping)\n        IOManagerDefinition.__init__(\n            self, resource_fn=self.resource_fn, config_schema=resource.config_schema\n        )\n\n\n
[docs]class ConfigurableIOManagerFactory(ConfigurableResourceFactory[TIOManagerValue]):\n """Base class for Dagster IO managers that utilize structured config. This base class\n is useful for cases in which the returned IO manager is not the same as the class itself\n (e.g. when it is a wrapper around the actual IO manager implementation).\n\n This class is a subclass of both :py:class:`IOManagerDefinition` and :py:class:`Config`.\n Implementers should provide an implementation of the :py:meth:`resource_function` method,\n which should return an instance of :py:class:`IOManager`.\n\n\n Example definition:\n\n .. code-block:: python\n\n class ExternalIOManager(IOManager):\n\n def __init__(self, connection):\n self._connection = connection\n\n def handle_output(self, context, obj):\n ...\n\n def load_input(self, context):\n ...\n\n class ConfigurableExternalIOManager(ConfigurableIOManagerFactory):\n username: str\n password: str\n\n def create_io_manager(self, context) -> IOManager:\n with database.connect(username, password) as connection:\n return MyExternalIOManager(connection)\n\n defs = Definitions(\n ...,\n resources={\n "io_manager": ConfigurableExternalIOManager(\n username="dagster",\n password=EnvVar("DB_PASSWORD")\n )\n }\n )\n\n """\n\n def __init__(self, **data: Any):\n ConfigurableResourceFactory.__init__(self, **data)\n\n @abstractmethod\n def create_io_manager(self, context) -> TIOManagerValue:\n """Implement as one would implement a @io_manager decorator function."""\n raise NotImplementedError()\n\n def create_resource(self, context: InitResourceContext) -> TIOManagerValue:\n return self.create_io_manager(context)\n\n @classmethod\n def configure_at_launch(cls: "Type[Self]", **kwargs) -> "PartialIOManager[Self]":\n """Returns a partially initialized copy of the IO manager, with remaining config fields\n set at runtime.\n """\n return PartialIOManager(cls, data=kwargs)\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableIOManagerFactoryResourceDefinition:\n return ConfigurableIOManagerFactoryResourceDefinition(\n self.__class__,\n resource_fn=self._get_initialize_and_run_fn(),\n config_schema=self._config_schema,\n description=self.__doc__,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n input_config_schema=self.__class__.input_config_schema(),\n output_config_schema=self.__class__.output_config_schema(),\n dagster_maintained=self._is_dagster_maintained(),\n )\n\n @classmethod\n def input_config_schema(\n cls,\n ) -> Optional[Union[CoercableToConfigSchema, Type[Config]]]:\n return None\n\n @classmethod\n def output_config_schema(\n cls,\n ) -> Optional[Union[CoercableToConfigSchema, Type[Config]]]:\n return None
\n\n\nclass PartialIOManager(Generic[TResValue], PartialResource[TResValue]):\n def __init__(\n self,\n resource_cls: Type[ConfigurableResourceFactory[TResValue]],\n data: Dict[str, Any],\n ):\n PartialResource.__init__(self, resource_cls, data)\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableIOManagerFactoryResourceDefinition:\n input_config_schema = None\n output_config_schema = None\n if safe_is_subclass(self.resource_cls, ConfigurableIOManagerFactory):\n factory_cls: Type[ConfigurableIOManagerFactory] = cast(\n Type[ConfigurableIOManagerFactory], self.resource_cls\n )\n input_config_schema = factory_cls.input_config_schema()\n output_config_schema = factory_cls.output_config_schema()\n\n return ConfigurableIOManagerFactoryResourceDefinition(\n self.resource_cls,\n resource_fn=self._state__internal__.resource_fn,\n config_schema=self._state__internal__.config_schema,\n description=self._state__internal__.description,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self._state__internal__.nested_resources,\n input_config_schema=input_config_schema,\n output_config_schema=output_config_schema,\n dagster_maintained=self.resource_cls._is_dagster_maintained(), # noqa: SLF001\n )\n\n\n
[docs]class ConfigurableIOManager(ConfigurableIOManagerFactory, IOManager):\n """Base class for Dagster IO managers that utilize structured config.\n\n This class is a subclass of both :py:class:`IOManagerDefinition`, :py:class:`Config`,\n and :py:class:`IOManager`. Implementers must provide an implementation of the\n :py:meth:`handle_output` and :py:meth:`load_input` methods.\n\n Example definition:\n\n .. code-block:: python\n\n class MyIOManager(ConfigurableIOManager):\n path_prefix: List[str]\n\n def _get_path(self, context) -> str:\n return "/".join(context.asset_key.path)\n\n def handle_output(self, context, obj):\n write_csv(self._get_path(context), obj)\n\n def load_input(self, context):\n return read_csv(self._get_path(context))\n\n defs = Definitions(\n ...,\n resources={\n "io_manager": MyIOManager(path_prefix=["my", "prefix"])\n }\n )\n\n """\n\n def create_io_manager(self, context) -> IOManager:\n return self
\n\n\nclass ConfigurableLegacyIOManagerAdapter(ConfigurableIOManagerFactory):\n """Adapter base class for wrapping a decorated, function-style I/O manager\n with structured config.\n\n To use this class, subclass it, define config schema fields using Pydantic,\n and implement the ``wrapped_io_manager`` method.\n\n Example:\n .. code-block:: python\n\n class OldIOManager(IOManager):\n def __init__(self, base_path: str):\n ...\n\n @io_manager(config_schema={"base_path": str})\n def old_io_manager(context):\n base_path = context.resource_config["base_path"]\n\n return OldIOManager(base_path)\n\n class MyIOManager(ConfigurableLegacyIOManagerAdapter):\n base_path: str\n\n @property\n def wrapped_io_manager(self) -> IOManagerDefinition:\n return old_io_manager\n """\n\n @property\n @abstractmethod\n def wrapped_io_manager(self) -> IOManagerDefinition:\n raise NotImplementedError()\n\n def create_io_manager(self, context) -> IOManager:\n raise NotImplementedError(\n "Because we override resource_fn in the adapter, this is never called."\n )\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableIOManagerFactoryResourceDefinition:\n return ConfigurableIOManagerFactoryResourceDefinition(\n self.__class__,\n resource_fn=self.wrapped_io_manager.resource_fn,\n config_schema=self._config_schema,\n description=self.__doc__,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n dagster_maintained=self._is_dagster_maintained(),\n )\n
", "current_page_name": "_modules/dagster/_config/pythonic_config/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.pythonic_config.io_manager"}, "resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.pythonic_config.resource

\nimport contextlib\nimport inspect\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Generator,\n    Generic,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Set,\n    Type,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias, TypeGuard, get_args, get_origin\n\nfrom dagster import (\n    Field as DagsterField,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._config.field_utils import config_dictionary_from_values\nfrom dagster._config.pythonic_config.typing_utils import (\n    TypecheckAllowPartialResourceInitParams,\n)\nfrom dagster._config.validate import validate_config\nfrom dagster._core.definitions.definition_config_schema import (\n    ConfiguredDefinitionConfigSchema,\n    DefinitionConfigSchema,\n)\nfrom dagster._core.errors import DagsterInvalidConfigError\nfrom dagster._core.execution.context.init import InitResourceContext, build_init_resource_context\nfrom dagster._utils.cached_method import cached_method\n\nfrom .attach_other_object_to_context import (\n    IAttachDifferentObjectToOpContext as IAttachDifferentObjectToOpContext,\n)\nfrom .pydantic_compat_layer import (\n    model_fields,\n)\n\ntry:\n    from functools import cached_property  # type: ignore  # (py37 compat)\nexcept ImportError:\n\n    class cached_property:\n        pass\n\n\nfrom abc import ABC, abstractmethod\n\nfrom pydantic import BaseModel\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.resource_definition import (\n    ResourceDefinition,\n    ResourceFunction,\n    ResourceFunctionWithContext,\n    ResourceFunctionWithoutContext,\n    has_at_least_one_parameter,\n)\nfrom dagster._core.storage.io_manager import IOManagerDefinition\n\nfrom .config import Config, MakeConfigCacheable, infer_schema_from_config_class\nfrom .conversion_utils import (\n    TResValue,\n    _curry_config_schema,\n)\nfrom .typing_utils import BaseResourceMeta, LateBoundTypesForResourceTypeChecking\n\nSelf = TypeVar("Self", bound="ConfigurableResourceFactory")\nResourceId: TypeAlias = int\n\n\nclass AllowDelayedDependencies:\n    _nested_partial_resources: Mapping[str, ResourceDefinition] = {}\n\n    def _resolve_required_resource_keys(\n        self, resource_mapping: Mapping[int, str]\n    ) -> AbstractSet[str]:\n        from dagster._core.execution.build_resources import wrap_resource_for_execution\n\n        # All dependent resources which are not fully configured\n        # must be specified to the Definitions object so that the\n        # resource can be configured at runtime by the user\n        nested_partial_resource_keys = {\n            attr_name: resource_mapping.get(id(resource_def))\n            for attr_name, resource_def in self._nested_partial_resources.items()\n        }\n        check.invariant(\n            all(pointer_key is not None for pointer_key in nested_partial_resource_keys.values()),\n            "Any partially configured, nested resources must be provided to Definitions"\n            f" object: {nested_partial_resource_keys}",\n        )\n\n        # Recursively get all nested resource keys\n        nested_resource_required_keys: Set[str] = set()\n        for v in self._nested_partial_resources.values():\n            nested_resource_required_keys.update(\n                _resolve_required_resource_keys_for_resource(v, resource_mapping)\n            )\n\n        resources, _ = separate_resource_params(\n            cast(Type[BaseModel], self.__class__), self.__dict__\n        )\n        for v in resources.values():\n            nested_resource_required_keys.update(\n                _resolve_required_resource_keys_for_resource(\n                    wrap_resource_for_execution(v), resource_mapping\n                )\n            )\n\n        out = set(cast(Set[str], nested_partial_resource_keys.values())).union(\n            nested_resource_required_keys\n        )\n        return out\n\n\nclass InitResourceContextWithKeyMapping(InitResourceContext):\n    """Passes along a mapping from ResourceDefinition id to resource key alongside the\n    InitResourceContext. This is used to resolve the required resource keys for\n    resources which may hold nested partial resources.\n    """\n\n    def __init__(\n        self,\n        context: InitResourceContext,\n        resource_id_to_key_mapping: Mapping[ResourceId, str],\n    ):\n        super().__init__(\n            resource_config=context.resource_config,\n            resources=context.resources,\n            instance=context.instance,\n            resource_def=context.resource_def,\n            dagster_run=context.dagster_run,\n            log_manager=context.log,\n        )\n        self._resource_id_to_key_mapping = resource_id_to_key_mapping\n        self._resources_by_id = {\n            resource_id: getattr(context.resources, resource_key, None)\n            for resource_id, resource_key in resource_id_to_key_mapping.items()\n        }\n\n    @property\n    def resources_by_id(self) -> Mapping[ResourceId, Any]:\n        return self._resources_by_id\n\n    def replace_config(self, config: Any) -> "InitResourceContext":\n        return InitResourceContextWithKeyMapping(\n            super().replace_config(config), self._resource_id_to_key_mapping\n        )\n\n\nclass ResourceWithKeyMapping(ResourceDefinition):\n    """Wrapper around a ResourceDefinition which helps the inner resource resolve its required\n    resource keys. This is useful for resources which may hold nested resources. At construction\n    time, they are unaware of the resource keys of their nested resources - the resource id to\n    key mapping is used to resolve this.\n    """\n\n    def __init__(\n        self,\n        resource: ResourceDefinition,\n        resource_id_to_key_mapping: Dict[ResourceId, str],\n    ):\n        self._resource = resource\n        self._resource_id_to_key_mapping = resource_id_to_key_mapping\n\n        ResourceDefinition.__init__(\n            self,\n            resource_fn=self.setup_context_resources_and_call,\n            config_schema=resource.config_schema,\n            description=resource.description,\n            version=resource.version,\n        )\n\n    def setup_context_resources_and_call(self, context: InitResourceContext):\n        """Wrapper around the wrapped resource's resource_fn which attaches its\n        resource id to key mapping to the context, and then calls the nested resource's resource_fn.\n        """\n        context_with_key_mapping = InitResourceContextWithKeyMapping(\n            context, self._resource_id_to_key_mapping\n        )\n\n        if has_at_least_one_parameter(self._resource.resource_fn):\n            return self._resource.resource_fn(context_with_key_mapping)\n        else:\n            return cast(ResourceFunctionWithoutContext, self._resource.resource_fn)()\n\n    @property\n    def required_resource_keys(self) -> AbstractSet[str]:\n        return _resolve_required_resource_keys_for_resource(\n            self._resource, self._resource_id_to_key_mapping\n        )\n\n    @property\n    def wrapped_resource(self) -> ResourceDefinition:\n        return self._resource\n\n    @property\n    def inner_resource(self):\n        return self._resource\n\n\ndef attach_resource_id_to_key_mapping(\n    resource_def: Any, resource_id_to_key_mapping: Dict[ResourceId, str]\n) -> Any:\n    from .io_manager import IOManagerWithKeyMapping\n\n    if isinstance(resource_def, (ConfigurableResourceFactory, PartialResource)):\n        defn = resource_def.get_resource_definition()\n        return (\n            IOManagerWithKeyMapping(defn, resource_id_to_key_mapping)\n            if isinstance(defn, IOManagerDefinition)\n            else ResourceWithKeyMapping(defn, resource_id_to_key_mapping)\n        )\n    return resource_def\n\n\nCoercibleToResource: TypeAlias = Union[\n    ResourceDefinition, "ConfigurableResourceFactory", "PartialResource"\n]\n\n\ndef is_coercible_to_resource(val: Any) -> TypeGuard[CoercibleToResource]:\n    return isinstance(val, (ResourceDefinition, ConfigurableResourceFactory, PartialResource))\n\n\nclass ConfigurableResourceFactoryResourceDefinition(ResourceDefinition, AllowDelayedDependencies):\n    def __init__(\n        self,\n        configurable_resource_cls: Type,\n        resource_fn: ResourceFunction,\n        config_schema: Any,\n        description: Optional[str],\n        resolve_resource_keys: Callable[[Mapping[int, str]], AbstractSet[str]],\n        nested_resources: Mapping[str, Any],\n        dagster_maintained: bool = False,\n    ):\n        super().__init__(\n            resource_fn=resource_fn,\n            config_schema=config_schema,\n            description=description,\n        )\n        self._configurable_resource_cls = configurable_resource_cls\n        self._resolve_resource_keys = resolve_resource_keys\n        self._nested_resources = nested_resources\n        self._dagster_maintained = dagster_maintained\n\n    @property\n    def configurable_resource_cls(self) -> Type:\n        return self._configurable_resource_cls\n\n    @property\n    def nested_resources(\n        self,\n    ) -> Mapping[str, Any]:\n        return self._nested_resources\n\n    def _resolve_required_resource_keys(\n        self, resource_mapping: Mapping[int, str]\n    ) -> AbstractSet[str]:\n        return self._resolve_resource_keys(resource_mapping)\n\n    def _is_dagster_maintained(self) -> bool:\n        return self._dagster_maintained\n\n\nclass ConfigurableResourceFactoryState(NamedTuple):\n    nested_partial_resources: Mapping[str, Any]\n    resolved_config_dict: Dict[str, Any]\n    config_schema: DefinitionConfigSchema\n    schema: DagsterField\n    nested_resources: Dict[str, Any]\n    resource_context: Optional[InitResourceContext]\n\n\nclass ConfigurableResourceFactory(\n    Generic[TResValue],\n    Config,\n    TypecheckAllowPartialResourceInitParams,\n    AllowDelayedDependencies,\n    ABC,\n    metaclass=BaseResourceMeta,\n):\n    """Base class for creating and managing the lifecycle of Dagster resources that utilize structured config.\n\n    Users should directly inherit from this class when they want the object passed to user-defined\n    code (such as an asset or op) to be different than the object that defines the configuration\n    schema and is passed to the :py:class:`Definitions` object. Cases where this is useful include is\n    when the object passed to user code is:\n\n    * An existing class from a third-party library that the user does not control.\n    * A complex class that requires substantial internal state management or itself requires arguments beyond its config values.\n    * A class with expensive initialization that should not be invoked on code location load, but rather lazily on first use in an op or asset during a run.\n    * A class that you desire to be a plain Python class, rather than a Pydantic class, for whatever reason.\n\n    This class is a subclass of both :py:class:`ResourceDefinition` and :py:class:`Config`, and\n    must implement ``create_resource``, which creates the resource to pass to user code.\n\n    Example definition:\n\n    .. code-block:: python\n\n        class DatabaseResource(ConfigurableResourceFactory[Database]):\n            connection_uri: str\n\n            def create_resource(self, _init_context) -> Database:\n                # For example Database could be from a third-party library or require expensive setup.\n                # Or you could just prefer to separate the concerns of configuration and runtime representation\n                return Database(self.connection_uri)\n\n    To use a resource created by a factory in a job, you must use the Resource type annotation.\n\n    Example usage:\n\n\n    .. code-block:: python\n\n        @asset\n        def asset_that_uses_database(database: ResourceParam[Database]):\n            # Database used directly in user code\n            database.query("SELECT * FROM table")\n\n        defs = Definitions(\n            assets=[asset_that_uses_database],\n            resources={"database": DatabaseResource(connection_uri="some_uri")},\n        )\n\n    """\n\n    def __init__(self, **data: Any):\n        resource_pointers, data_without_resources = separate_resource_params(self.__class__, data)\n\n        schema = infer_schema_from_config_class(\n            self.__class__, fields_to_omit=set(resource_pointers.keys())\n        )\n\n        # Populate config values\n        Config.__init__(self, **{**data_without_resources, **resource_pointers})\n\n        # We pull the values from the Pydantic config object, which may cast values\n        # to the correct type under the hood - useful in particular for enums\n        casted_data_without_resources = {\n            k: v\n            for k, v in self._convert_to_config_dictionary().items()\n            if k in data_without_resources\n        }\n        resolved_config_dict = config_dictionary_from_values(casted_data_without_resources, schema)\n\n        self._state__internal__ = ConfigurableResourceFactoryState(\n            # We keep track of any resources we depend on which are not fully configured\n            # so that we can retrieve them at runtime\n            nested_partial_resources={\n                k: v for k, v in resource_pointers.items() if (not _is_fully_configured(v))\n            },\n            resolved_config_dict=resolved_config_dict,\n            # These are unfortunately named very similarily\n            config_schema=_curry_config_schema(schema, resolved_config_dict),\n            schema=schema,\n            nested_resources={k: v for k, v in resource_pointers.items()},\n            resource_context=None,\n        )\n\n    @property\n    def _schema(self):\n        return self._state__internal__.schema\n\n    @property\n    def _config_schema(self):\n        return self._state__internal__.config_schema\n\n    @property\n    def _nested_partial_resources(self):\n        return self._state__internal__.nested_partial_resources\n\n    @property\n    def _nested_resources(self):\n        return self._state__internal__.nested_resources\n\n    @property\n    def _resolved_config_dict(self):\n        return self._state__internal__.resolved_config_dict\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        """This should be overridden to return True by all dagster maintained resources and IO managers."""\n        return False\n\n    @classmethod\n    def _is_cm_resource_cls(cls: Type["ConfigurableResourceFactory"]) -> bool:\n        return (\n            cls.yield_for_execution != ConfigurableResourceFactory.yield_for_execution\n            or cls.teardown_after_execution != ConfigurableResourceFactory.teardown_after_execution\n        )\n\n    @property\n    def _is_cm_resource(self) -> bool:\n        return self.__class__._is_cm_resource_cls()  # noqa: SLF001\n\n    def _get_initialize_and_run_fn(self) -> Callable:\n        return self._initialize_and_run_cm if self._is_cm_resource else self._initialize_and_run\n\n    @cached_method\n    def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition:\n        return ConfigurableResourceFactoryResourceDefinition(\n            self.__class__,\n            resource_fn=self._get_initialize_and_run_fn(),\n            config_schema=self._config_schema,\n            description=self.__doc__,\n            resolve_resource_keys=self._resolve_required_resource_keys,\n            nested_resources=self.nested_resources,\n            dagster_maintained=self._is_dagster_maintained(),\n        )\n\n    @abstractmethod\n    def create_resource(self, context: InitResourceContext) -> TResValue:\n        """Returns the object that this resource hands to user code, accessible by ops or assets\n        through the context or resource parameters. This works like the function decorated\n        with @resource when using function-based resources.\n        """\n        raise NotImplementedError()\n\n    @property\n    def nested_resources(\n        self,\n    ) -> Mapping[str, Any]:\n        return self._nested_resources\n\n    @classmethod\n    def configure_at_launch(cls: "Type[Self]", **kwargs) -> "PartialResource[Self]":\n        """Returns a partially initialized copy of the resource, with remaining config fields\n        set at runtime.\n        """\n        return PartialResource(cls, data=kwargs)\n\n    def _with_updated_values(\n        self, values: Optional[Mapping[str, Any]]\n    ) -> "ConfigurableResourceFactory[TResValue]":\n        """Returns a new instance of the resource with the given values.\n        Used when initializing a resource at runtime.\n        """\n        values = check.opt_mapping_param(values, "values", key_type=str)\n        # Since Resource extends BaseModel and is a dataclass, we know that the\n        # signature of any __init__ method will always consist of the fields\n        # of this class. We can therefore safely pass in the values as kwargs.\n        out = self.__class__(**{**self._get_non_none_public_field_values(), **values})\n        out._state__internal__ = out._state__internal__._replace(  # noqa: SLF001\n            resource_context=self._state__internal__.resource_context\n        )\n        return out\n\n    @contextlib.contextmanager\n    def _resolve_and_update_nested_resources(\n        self, context: InitResourceContext\n    ) -> Generator["ConfigurableResourceFactory[TResValue]", None, None]:\n        """Updates any nested resources with the resource values from the context.\n        In this case, populating partially configured resources or\n        resources that return plain Python types.\n\n        Returns a new instance of the resource.\n        """\n        from dagster._core.execution.build_resources import wrap_resource_for_execution\n\n        partial_resources_to_update: Dict[str, Any] = {}\n        if self._nested_partial_resources:\n            context_with_mapping = cast(\n                InitResourceContextWithKeyMapping,\n                check.inst(\n                    context,\n                    InitResourceContextWithKeyMapping,\n                    "This ConfiguredResource contains unresolved partially-specified nested"\n                    " resources, and so can only be initialized using a"\n                    " InitResourceContextWithKeyMapping",\n                ),\n            )\n            partial_resources_to_update = {\n                attr_name: context_with_mapping.resources_by_id[id(resource)]\n                for attr_name, resource in self._nested_partial_resources.items()\n            }\n\n        # Also evaluate any resources that are not partial\n        with contextlib.ExitStack() as stack:\n            resources_to_update, _ = separate_resource_params(self.__class__, self.__dict__)\n            resources_to_update = {\n                attr_name: _call_resource_fn_with_default(\n                    stack, wrap_resource_for_execution(resource), context\n                )\n                for attr_name, resource in resources_to_update.items()\n                if attr_name not in partial_resources_to_update\n            }\n\n            to_update = {**resources_to_update, **partial_resources_to_update}\n            yield self._with_updated_values(to_update)\n\n    @deprecated(\n        breaking_version="2.0", additional_warn_text="Use `with_replaced_resource_context` instead"\n    )\n    def with_resource_context(\n        self, resource_context: InitResourceContext\n    ) -> "ConfigurableResourceFactory[TResValue]":\n        return self.with_replaced_resource_context(resource_context)\n\n    def with_replaced_resource_context(\n        self, resource_context: InitResourceContext\n    ) -> "ConfigurableResourceFactory[TResValue]":\n        """Returns a new instance of the resource with the given resource init context bound."""\n        # This utility is used to create a copy of this resource, without adjusting\n        # any values in this case\n        copy = self._with_updated_values({})\n        copy._state__internal__ = copy._state__internal__._replace(  # noqa: SLF001\n            resource_context=resource_context\n        )\n        return copy\n\n    def _initialize_and_run(self, context: InitResourceContext) -> TResValue:\n        with self._resolve_and_update_nested_resources(context) as has_nested_resource:\n            updated_resource = has_nested_resource.with_replaced_resource_context(  # noqa: SLF001\n                context\n            )._with_updated_values(context.resource_config)\n\n            updated_resource.setup_for_execution(context)\n            return updated_resource.create_resource(context)\n\n    @contextlib.contextmanager\n    def _initialize_and_run_cm(\n        self, context: InitResourceContext\n    ) -> Generator[TResValue, None, None]:\n        with self._resolve_and_update_nested_resources(context) as has_nested_resource:\n            updated_resource = has_nested_resource.with_replaced_resource_context(  # noqa: SLF001\n                context\n            )._with_updated_values(context.resource_config)\n\n            with updated_resource.yield_for_execution(context) as value:\n                yield value\n\n    def setup_for_execution(self, context: InitResourceContext) -> None:\n        """Optionally override this method to perform any pre-execution steps\n        needed before the resource is used in execution.\n        """\n        pass\n\n    def teardown_after_execution(self, context: InitResourceContext) -> None:\n        """Optionally override this method to perform any post-execution steps\n        needed after the resource is used in execution.\n\n        teardown_after_execution will be called even if any part of the run fails.\n        It will not be called if setup_for_execution fails.\n        """\n        pass\n\n    @contextlib.contextmanager\n    def yield_for_execution(self, context: InitResourceContext) -> Generator[TResValue, None, None]:\n        """Optionally override this method to perform any lifecycle steps\n        before or after the resource is used in execution. By default, calls\n        setup_for_execution before yielding, and teardown_after_execution after yielding.\n\n        Note that if you override this method and want setup_for_execution or\n        teardown_after_execution to be called, you must invoke them yourself.\n        """\n        self.setup_for_execution(context)\n        try:\n            yield self.create_resource(context)\n        finally:\n            self.teardown_after_execution(context)\n\n    def get_resource_context(self) -> InitResourceContext:\n        """Returns the context that this resource was initialized with."""\n        return check.not_none(\n            self._state__internal__.resource_context,\n            additional_message="Attempted to get context before resource was initialized.",\n        )\n\n    def process_config_and_initialize(self) -> TResValue:\n        """Initializes this resource, fully processing its config and returning the prepared\n        resource value.\n        """\n        from dagster._config.post_process import post_process_config\n\n        return self.from_resource_context(\n            build_init_resource_context(\n                config=post_process_config(\n                    self._config_schema.config_type, self._convert_to_config_dictionary()\n                ).value\n            )\n        )\n\n    @classmethod\n    def from_resource_context(cls, context: InitResourceContext) -> TResValue:\n        """Creates a new instance of this resource from a populated InitResourceContext.\n        Useful when creating a resource from a function-based resource, for backwards\n        compatibility purposes.\n\n        For resources that have custom teardown behavior, use from_resource_context_cm instead.\n\n        Example usage:\n\n        .. code-block:: python\n\n            class MyResource(ConfigurableResource):\n                my_str: str\n\n            @resource(config_schema=MyResource.to_config_schema())\n            def my_resource(context: InitResourceContext) -> MyResource:\n                return MyResource.from_resource_context(context)\n\n        """\n        check.invariant(\n            not cls._is_cm_resource_cls(),\n            "Use from_resource_context_cm for resources which have custom teardown behavior,"\n            " e.g. overriding yield_for_execution or teardown_after_execution",\n        )\n        return cls(**context.resource_config or {})._initialize_and_run(context)  # noqa: SLF001\n\n    @classmethod\n    @contextlib.contextmanager\n    def from_resource_context_cm(\n        cls, context: InitResourceContext\n    ) -> Generator[TResValue, None, None]:\n        """Context which generates a new instance of this resource from a populated InitResourceContext.\n        Useful when creating a resource from a function-based resource, for backwards\n        compatibility purposes. Handles custom teardown behavior.\n\n        Example usage:\n\n        .. code-block:: python\n\n            class MyResource(ConfigurableResource):\n                my_str: str\n\n            @resource(config_schema=MyResource.to_config_schema())\n            def my_resource(context: InitResourceContext) -> Generator[MyResource, None, None]:\n                with MyResource.from_resource_context_cm(context) as my_resource:\n                    yield my_resource\n\n        """\n        with cls(**context.resource_config or {})._initialize_and_run_cm(  # noqa: SLF001\n            context\n        ) as value:\n            yield value\n\n\n
[docs]class ConfigurableResource(ConfigurableResourceFactory[TResValue]):\n """Base class for Dagster resources that utilize structured config.\n\n This class is a subclass of both :py:class:`ResourceDefinition` and :py:class:`Config`.\n\n Example definition:\n\n .. code-block:: python\n\n class WriterResource(ConfigurableResource):\n prefix: str\n\n def output(self, text: str) -> None:\n print(f"{self.prefix}{text}")\n\n Example usage:\n\n .. code-block:: python\n\n @asset\n def asset_that_uses_writer(writer: WriterResource):\n writer.output("text")\n\n defs = Definitions(\n assets=[asset_that_uses_writer],\n resources={"writer": WriterResource(prefix="a_prefix")},\n )\n\n """\n\n def create_resource(self, context: InitResourceContext) -> TResValue:\n """Returns the object that this resource hands to user code, accessible by ops or assets\n through the context or resource parameters. This works like the function decorated\n with @resource when using function-based resources.\n\n For ConfigurableResource, this function will return itself, passing\n the actual ConfigurableResource object to user code.\n """\n return cast(TResValue, self)
\n\n\ndef _is_fully_configured(resource: CoercibleToResource) -> bool:\n from dagster._core.execution.build_resources import wrap_resource_for_execution\n\n actual_resource = wrap_resource_for_execution(resource)\n res = (\n validate_config(\n actual_resource.config_schema.config_type,\n (\n actual_resource.config_schema.default_value\n if actual_resource.config_schema.default_provided\n else {}\n ),\n ).success\n is True\n )\n\n return res\n\n\nclass PartialResourceState(NamedTuple):\n nested_partial_resources: Dict[str, Any]\n config_schema: DagsterField\n resource_fn: Callable[[InitResourceContext], Any]\n description: Optional[str]\n nested_resources: Dict[str, Any]\n\n\nclass PartialResource(Generic[TResValue], AllowDelayedDependencies, MakeConfigCacheable):\n data: Dict[str, Any]\n resource_cls: Type[ConfigurableResourceFactory[TResValue]]\n\n def __init__(\n self,\n resource_cls: Type[ConfigurableResourceFactory[TResValue]],\n data: Dict[str, Any],\n ):\n resource_pointers, _data_without_resources = separate_resource_params(resource_cls, data)\n\n MakeConfigCacheable.__init__(self, data=data, resource_cls=resource_cls) # type: ignore # extends BaseModel, takes kwargs\n\n def resource_fn(context: InitResourceContext):\n instantiated = resource_cls(\n **{**data, **context.resource_config}\n ) # So that collisions are resolved in favor of the latest provided run config\n return instantiated._get_initialize_and_run_fn()(context) # noqa: SLF001\n\n self._state__internal__ = PartialResourceState(\n # We keep track of any resources we depend on which are not fully configured\n # so that we can retrieve them at runtime\n nested_partial_resources={\n k: v for k, v in resource_pointers.items() if (not _is_fully_configured(v))\n },\n config_schema=infer_schema_from_config_class(\n resource_cls, fields_to_omit=set(resource_pointers.keys())\n ),\n resource_fn=resource_fn,\n description=resource_cls.__doc__,\n nested_resources={k: v for k, v in resource_pointers.items()},\n )\n\n # to make AllowDelayedDependencies work\n @property\n def _nested_partial_resources(\n self,\n ) -> Mapping[str, Any]:\n return self._state__internal__.nested_partial_resources\n\n @property\n def nested_resources(\n self,\n ) -> Mapping[str, Any]:\n return self._state__internal__.nested_resources\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition:\n return ConfigurableResourceFactoryResourceDefinition(\n self.resource_cls,\n resource_fn=self._state__internal__.resource_fn,\n config_schema=self._state__internal__.config_schema,\n description=self._state__internal__.description,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n dagster_maintained=self.resource_cls._is_dagster_maintained(), # noqa: SLF001\n )\n\n\nResourceOrPartial: TypeAlias = Union[\n ConfigurableResourceFactory[TResValue], PartialResource[TResValue]\n]\nResourceOrPartialOrValue: TypeAlias = Union[\n ConfigurableResourceFactory[TResValue],\n PartialResource[TResValue],\n ResourceDefinition,\n TResValue,\n]\n\n\nV = TypeVar("V")\n\n\nclass ResourceDependency(Generic[V]):\n def __set_name__(self, _owner, name):\n self._name = name\n\n def __get__(self, obj: "ConfigurableResourceFactory", __owner: Any) -> V:\n return getattr(obj, self._name)\n\n def __set__(self, obj: Optional[object], value: ResourceOrPartialOrValue[V]) -> None:\n setattr(obj, self._name, value)\n\n\nclass ConfigurableLegacyResourceAdapter(ConfigurableResource, ABC):\n """Adapter base class for wrapping a decorated, function-style resource\n with structured config.\n\n To use this class, subclass it, define config schema fields using Pydantic,\n and implement the ``wrapped_resource`` method.\n\n Example:\n .. code-block:: python\n\n @resource(config_schema={"prefix": str})\n def writer_resource(context):\n prefix = context.resource_config["prefix"]\n\n def output(text: str) -> None:\n out_txt.append(f"{prefix}{text}")\n\n return output\n\n class WriterResource(ConfigurableLegacyResourceAdapter):\n prefix: str\n\n @property\n def wrapped_resource(self) -> ResourceDefinition:\n return writer_resource\n """\n\n @property\n @abstractmethod\n def wrapped_resource(self) -> ResourceDefinition:\n raise NotImplementedError()\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition:\n return ConfigurableResourceFactoryResourceDefinition(\n self.__class__,\n resource_fn=self.wrapped_resource.resource_fn,\n config_schema=self._config_schema,\n description=self.__doc__,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n dagster_maintained=self._is_dagster_maintained(),\n )\n\n def __call__(self, *args, **kwargs):\n return self.wrapped_resource(*args, **kwargs)\n\n\nclass SeparatedResourceParams(NamedTuple):\n resources: Dict[str, Any]\n non_resources: Dict[str, Any]\n\n\ndef _is_annotated_as_resource_type(annotation: Type, metadata: List[str]) -> bool:\n """Determines if a field in a structured config class is annotated as a resource type or not."""\n from .type_check_utils import safe_is_subclass\n\n if metadata and metadata[0] == "resource_dependency":\n return True\n\n is_annotated_as_resource_dependency = get_origin(annotation) == ResourceDependency or getattr(\n annotation, "__metadata__", None\n ) == ("resource_dependency",)\n\n return is_annotated_as_resource_dependency or safe_is_subclass(\n annotation, (ResourceDefinition, ConfigurableResourceFactory)\n )\n\n\nclass ResourceDataWithAnnotation(NamedTuple):\n key: str\n value: Any\n annotation: Type\n annotation_metadata: List[str]\n\n\ndef separate_resource_params(cls: Type[BaseModel], data: Dict[str, Any]) -> SeparatedResourceParams:\n """Separates out the key/value inputs of fields in a structured config Resource class which\n are marked as resources (ie, using ResourceDependency) from those which are not.\n """\n fields_by_resolved_field_name = {\n field.alias if field.alias else key: field for key, field in model_fields(cls).items()\n }\n data_with_annotation: List[ResourceDataWithAnnotation] = [\n # No longer exists in Pydantic 2.x, will need to be updated when we upgrade\n ResourceDataWithAnnotation(\n key=field_name,\n value=field_value,\n annotation=fields_by_resolved_field_name[field_name].annotation,\n annotation_metadata=fields_by_resolved_field_name[field_name].metadata,\n )\n for field_name, field_value in data.items()\n if field_name in fields_by_resolved_field_name\n ]\n # We need to grab metadata from the annotation in order to tell if\n # this key was annotated with a typing.Annotated annotation (which we use for resource/resource deps),\n # since Pydantic 2.0 strips that info out and sticks any Annotated metadata in the\n # metadata field\n out = SeparatedResourceParams(\n resources={\n d.key: d.value\n for d in data_with_annotation\n if _is_annotated_as_resource_type(\n d.annotation,\n d.annotation_metadata,\n )\n },\n non_resources={\n d.key: d.value\n for d in data_with_annotation\n if not _is_annotated_as_resource_type(\n d.annotation,\n d.annotation_metadata,\n )\n },\n )\n return out\n\n\ndef _call_resource_fn_with_default(\n stack: contextlib.ExitStack, obj: ResourceDefinition, context: InitResourceContext\n) -> Any:\n from dagster._config.validate import process_config\n\n if isinstance(obj.config_schema, ConfiguredDefinitionConfigSchema):\n value = cast(Dict[str, Any], obj.config_schema.resolve_config({}).value)\n context = context.replace_config(value["config"])\n elif obj.config_schema.default_provided:\n # To explain why we need to process config here;\n # - The resource available on the init context (context.resource_config) has already been processed\n # - The nested resource's config has also already been processed, but is only available in the broader run config dictionary.\n # - The only information we have access to here is the unprocessed default value, so we need to process it a second time.\n unprocessed_config = obj.config_schema.default_value\n evr = process_config(\n {"config": obj.config_schema.config_type}, {"config": unprocessed_config}\n )\n if not evr.success:\n raise DagsterInvalidConfigError(\n "Error in config for nested resource ",\n evr.errors,\n unprocessed_config,\n )\n context = context.replace_config(cast(dict, evr.value)["config"])\n\n if has_at_least_one_parameter(obj.resource_fn):\n result = cast(ResourceFunctionWithContext, obj.resource_fn)(context)\n else:\n result = cast(ResourceFunctionWithoutContext, obj.resource_fn)()\n\n is_fn_generator = inspect.isgenerator(obj.resource_fn) or isinstance(\n obj.resource_fn, contextlib.ContextDecorator\n )\n if is_fn_generator:\n return stack.enter_context(cast(contextlib.AbstractContextManager, result))\n else:\n return result\n\n\nLateBoundTypesForResourceTypeChecking.set_actual_types_for_type_checking(\n resource_dep_type=ResourceDependency,\n resource_type=ConfigurableResourceFactory,\n partial_resource_type=PartialResource,\n)\n\n\ndef validate_resource_annotated_function(fn) -> None:\n """Validates any parameters on the decorated function that are annotated with\n :py:class:`dagster.ResourceDefinition`, raising a :py:class:`dagster.DagsterInvalidDefinitionError`\n if any are not also instances of :py:class:`dagster.ConfigurableResource` (these resources should\n instead be wrapped in the :py:func:`dagster.Resource` Annotation).\n """\n from dagster import DagsterInvalidDefinitionError\n from dagster._config.pythonic_config.resource import (\n ConfigurableResource,\n ConfigurableResourceFactory,\n TResValue,\n )\n\n from .type_check_utils import safe_is_subclass\n\n malformed_params = [\n param\n for param in get_function_params(fn)\n if safe_is_subclass(param.annotation, (ResourceDefinition, ConfigurableResourceFactory))\n and not safe_is_subclass(param.annotation, ConfigurableResource)\n ]\n if len(malformed_params) > 0:\n malformed_param = malformed_params[0]\n output_type = None\n if safe_is_subclass(malformed_param.annotation, ConfigurableResourceFactory):\n orig_bases = getattr(malformed_param.annotation, "__orig_bases__", None)\n output_type = get_args(orig_bases[0])[0] if orig_bases and len(orig_bases) > 0 else None\n if output_type == TResValue:\n output_type = None\n\n output_type_name = getattr(output_type, "__name__", str(output_type))\n raise DagsterInvalidDefinitionError(\n """Resource param '{param_name}' is annotated as '{annotation_type}', but '{annotation_type}' outputs {value_message} value to user code such as @ops and @assets. This annotation should instead be {annotation_suggestion}""".format(\n param_name=malformed_param.name,\n annotation_type=malformed_param.annotation,\n value_message=f"a '{output_type}'" if output_type else "an unknown",\n annotation_suggestion=(\n f"'ResourceParam[{output_type_name}]'"\n if output_type\n else "'ResourceParam[Any]' or 'ResourceParam[<output type>]'"\n ),\n )\n )\n\n\ndef _resolve_required_resource_keys_for_resource(\n resource: ResourceDefinition, resource_id_to_key_mapping: Mapping[ResourceId, str]\n) -> AbstractSet[str]:\n """Gets the required resource keys for the provided resource, with the assistance of the passed\n resource-id-to-key mapping. For resources which may hold nested partial resources,\n this mapping is used to obtain the top-level resource keys to depend on.\n """\n if isinstance(resource, AllowDelayedDependencies):\n return resource._resolve_required_resource_keys(resource_id_to_key_mapping) # noqa: SLF001\n return resource.required_resource_keys\n
", "current_page_name": "_modules/dagster/_config/pythonic_config/resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.pythonic_config.resource"}}, "source": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.source

\nimport os\n\nimport dagster._check as check\n\nfrom .config_type import ScalarUnion\nfrom .errors import PostProcessingError\nfrom .field_utils import Selector\n\nVALID_STRING_SOURCE_TYPES = (str, dict)\n\n\ndef _ensure_env_variable(var):\n    check.str_param(var, "var")\n    value = os.getenv(var)\n    if value is None:\n        raise PostProcessingError(\n            f'You have attempted to fetch the environment variable "{var}" '\n            "which is not set. In order for this execution to succeed it "\n            "must be set in this environment."\n        )\n    return value\n\n\nclass StringSourceType(ScalarUnion):\n    def __init__(self):\n        super(StringSourceType, self).__init__(\n            scalar_type=str,\n            non_scalar_schema=Selector({"env": str}),\n            _key="StringSourceType",\n        )\n\n    def post_process(self, value):\n        check.param_invariant(isinstance(value, VALID_STRING_SOURCE_TYPES), "value")\n\n        if not isinstance(value, dict):\n            return value\n\n        key, cfg = next(iter(value.items()))\n        check.invariant(key == "env", "Only valid key is env")\n        return str(_ensure_env_variable(cfg))\n\n\nclass IntSourceType(ScalarUnion):\n    def __init__(self):\n        super(IntSourceType, self).__init__(\n            scalar_type=int,\n            non_scalar_schema=Selector({"env": str}),\n            _key="IntSourceType",\n        )\n\n    def post_process(self, value):\n        check.param_invariant(isinstance(value, (dict, int)), "value", "Should be pre-validated")\n\n        if not isinstance(value, dict):\n            return value\n\n        check.invariant(len(value) == 1, "Selector should have one entry")\n\n        key, cfg = next(iter(value.items()))\n        check.invariant(key == "env", "Only valid key is env")\n        value = _ensure_env_variable(cfg)\n        try:\n            return int(value)\n        except ValueError as e:\n            raise PostProcessingError(\n                f'Value "{value}" stored in env variable "{cfg}" cannot be coerced into an int.'\n            ) from e\n\n\nclass BoolSourceType(ScalarUnion):\n    def __init__(self):\n        super(BoolSourceType, self).__init__(\n            scalar_type=bool,\n            non_scalar_schema=Selector({"env": str}),\n            _key="BoolSourceType",\n        )\n\n    def post_process(self, value):\n        check.param_invariant(isinstance(value, (dict, bool)), "value", "Should be pre-validated")\n\n        if not isinstance(value, dict):\n            return value\n\n        check.invariant(len(value) == 1, "Selector should have one entry")\n\n        key, cfg = next(iter(value.items()))\n        check.invariant(key == "env", "Only valid key is env")\n        value = _ensure_env_variable(cfg)\n        try:\n            return bool(value)\n        except ValueError as e:\n            raise PostProcessingError(\n                (\n                    'Value "{value}" stored in env variable "{var}" cannot be coerced into an bool.'\n                ).format(value=value, var=cfg)\n            ) from e\n\n\nStringSource: StringSourceType = StringSourceType()\nIntSource: IntSourceType = IntSourceType()\nBoolSource: BoolSourceType = BoolSourceType()\n
", "current_page_name": "_modules/dagster/_config/source", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.source"}}, "_core": {"definitions": {"asset_check_result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_check_result

\nfrom typing import TYPE_CHECKING, Mapping, NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationTargetMaterializationData,\n)\nfrom dagster._core.definitions.asset_check_spec import AssetCheckSeverity\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterInvariantViolationError\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.compute import StepExecutionContext\n\n\n
[docs]@experimental\nclass AssetCheckResult(\n NamedTuple(\n "_AssetCheckResult",\n [\n ("passed", PublicAttr[bool]),\n ("asset_key", PublicAttr[Optional[AssetKey]]),\n ("check_name", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ("severity", PublicAttr[AssetCheckSeverity]),\n ],\n )\n):\n """The result of an asset check.\n\n Attributes:\n asset_key (Optional[AssetKey]):\n The asset key that was checked.\n check_name (Optional[str]):\n The name of the check.\n passed (bool):\n The pass/fail result of the check.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the asset. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n severity (AssetCheckSeverity):\n Severity of the check. Defaults to ERROR.\n\n """\n\n def __new__(\n cls,\n *,\n passed: bool,\n asset_key: Optional[CoercibleToAssetKey] = None,\n check_name: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n severity: AssetCheckSeverity = AssetCheckSeverity.ERROR,\n ):\n normalized_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n return super().__new__(\n cls,\n asset_key=AssetKey.from_coercible(asset_key) if asset_key is not None else None,\n check_name=check.opt_str_param(check_name, "check_name"),\n passed=check.bool_param(passed, "passed"),\n metadata=normalized_metadata,\n severity=check.inst_param(severity, "severity", AssetCheckSeverity),\n )\n\n def to_asset_check_evaluation(\n self, step_context: "StepExecutionContext"\n ) -> AssetCheckEvaluation:\n spec_check_names_by_asset_key = (\n step_context.job_def.asset_layer.get_check_names_by_asset_key_for_node_handle(\n step_context.node_handle.root\n )\n )\n\n asset_keys_with_specs = spec_check_names_by_asset_key.keys()\n\n if self.asset_key is not None:\n if self.asset_key not in asset_keys_with_specs:\n raise DagsterInvariantViolationError(\n "Received unexpected AssetCheckResult. It targets asset"\n f" '{self.asset_key.to_user_string()}' which is not targeted by any of the"\n " checks currently being evaluated. Targeted assets:"\n f" {[asset_key.to_user_string() for asset_key in asset_keys_with_specs]}."\n )\n\n resolved_asset_key = self.asset_key\n\n else:\n if len(spec_check_names_by_asset_key) > 1:\n raise DagsterInvariantViolationError(\n "AssetCheckResult didn't specify an asset key, but there are multiple assets"\n " to choose from:"\n f" {[asset_key.to_user_string() for asset_key in spec_check_names_by_asset_key.keys()]}"\n )\n\n resolved_asset_key = next(iter(asset_keys_with_specs))\n\n check_names_with_specs = spec_check_names_by_asset_key[resolved_asset_key]\n if self.check_name is not None:\n if self.check_name not in check_names_with_specs:\n raise DagsterInvariantViolationError(\n "Received unexpected AssetCheckResult. No checks currently being evaluated"\n f" target asset '{resolved_asset_key.to_user_string()}' and have name"\n f" '{self.check_name}'. Checks being evaluated for this asset:"\n f" {check_names_with_specs}"\n )\n\n resolved_check_name = self.check_name\n else:\n if len(check_names_with_specs) > 1:\n raise DagsterInvariantViolationError(\n "AssetCheckResult result didn't specify a check name, but there are multiple"\n " checks to choose from for the this asset key:"\n f" {check_names_with_specs}"\n )\n\n resolved_check_name = next(iter(check_names_with_specs))\n\n input_asset_info = step_context.get_input_asset_version_info(resolved_asset_key)\n if input_asset_info is not None:\n target_materialization_data = AssetCheckEvaluationTargetMaterializationData(\n run_id=input_asset_info.run_id,\n storage_id=input_asset_info.storage_id,\n timestamp=input_asset_info.timestamp,\n )\n else:\n target_materialization_data = None\n\n return AssetCheckEvaluation(\n check_name=resolved_check_name,\n asset_key=resolved_asset_key,\n passed=self.passed,\n metadata=self.metadata,\n target_materialization_data=target_materialization_data,\n severity=self.severity,\n )\n\n def get_spec_python_identifier(\n self, *, asset_key: Optional[AssetKey] = None, check_name: Optional[str] = None\n ) -> str:\n """Returns a string uniquely identifying the asset check spec associated with this result.\n This is used for the output name associated with an `AssetCheckResult`.\n """\n asset_key = asset_key or self.asset_key\n check_name = check_name or self.check_name\n assert asset_key is not None, "Asset key must be provided if not set on spec"\n assert asset_key is not None, "Asset key must be provided if not set on spec"\n return f"{asset_key.to_python_identifier()}_{self.check_name}"
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_check_result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_check_result"}, "asset_check_spec": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_check_spec

\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Mapping, NamedTuple, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._serdes.serdes import whitelist_for_serdes\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.assets import AssetsDefinition\n    from dagster._core.definitions.source_asset import SourceAsset\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass AssetCheckSeverity(Enum):\n """Severity level for an asset check.\n\n Severities:\n\n - WARN: If the check fails, don't fail the step.\n - ERROR: If the check fails, fail the step and, within the run, skip materialization of any\n assets that are downstream of the asset being checked.\n """\n\n WARN = "WARN"\n ERROR = "ERROR"
\n\n\n
[docs]@experimental\n@whitelist_for_serdes(old_storage_names={"AssetCheckHandle"})\nclass AssetCheckKey(NamedTuple):\n """Check names are expected to be unique per-asset. Thus, this combination of asset key and\n check name uniquely identifies an asset check within a deployment.\n """\n\n asset_key: PublicAttr[AssetKey]\n name: PublicAttr[str]\n\n @staticmethod\n def from_graphql_input(graphql_input: Mapping[str, Any]) -> "AssetCheckKey":\n return AssetCheckKey(\n asset_key=AssetKey.from_graphql_input(graphql_input["assetKey"]),\n name=graphql_input["name"],\n )
\n\n\n
[docs]@experimental\nclass AssetCheckSpec(\n NamedTuple(\n "_AssetCheckSpec",\n [\n ("name", PublicAttr[str]),\n ("asset_key", PublicAttr[AssetKey]),\n ("description", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Defines information about an asset check, except how to execute it.\n\n AssetCheckSpec is often used as an argument to decorators that decorator a function that can\n execute multiple checks - e.g. `@asset`, and `@multi_asset`. It defines one of the checks that\n will be executed inside that function.\n\n Args:\n name (str): Name of the check.\n asset (Union[AssetKey, Sequence[str], str, AssetsDefinition, SourceAsset]): The asset that\n the check applies to.\n description (Optional[str]): Description for the check.\n """\n\n def __new__(\n cls,\n name: str,\n *,\n asset: Union[CoercibleToAssetKey, "AssetsDefinition", "SourceAsset"],\n description: Optional[str] = None,\n ):\n return super().__new__(\n cls,\n name=check.str_param(name, "name"),\n asset_key=AssetKey.from_coercible_or_definition(asset),\n description=check.opt_str_param(description, "description"),\n )\n\n def get_python_identifier(self) -> str:\n """Returns a string uniquely identifying the asset check, that uses only the characters\n allowed in a Python identifier.\n """\n return f"{self.asset_key.to_python_identifier()}_{self.name}"\n\n @property\n def key(self) -> AssetCheckKey:\n return AssetCheckKey(self.asset_key, self.name)
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_check_spec", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_check_spec"}, "asset_dep": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_dep

\nfrom typing import NamedTuple, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_spec import AssetSpec\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.partition_mapping import PartitionMapping\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n)\n\nCoercibleToAssetDep = Union[\n    CoercibleToAssetKey, AssetSpec, AssetsDefinition, SourceAsset, "AssetDep"\n]\n\n\n
[docs]@experimental\nclass AssetDep(\n NamedTuple(\n "_AssetDep",\n [\n ("asset_key", PublicAttr[AssetKey]),\n ("partition_mapping", PublicAttr[Optional[PartitionMapping]]),\n ],\n )\n):\n """Specifies a dependency on an upstream asset.\n\n Attributes:\n asset (Union[AssetKey, str, AssetSpec, AssetsDefinition, SourceAsset]): The upstream asset to depend on.\n partition_mapping (Optional[PartitionMapping]): Defines what partitions to depend on in\n the upstream asset. If not provided and the upstream asset is partitioned, defaults to\n the default partition mapping for the partitions definition, which is typically maps\n partition keys to the same partition keys in upstream assets.\n\n Examples:\n .. code-block:: python\n\n upstream_asset = AssetSpec("upstream_asset")\n downstream_asset = AssetSpec(\n "downstream_asset",\n deps=[\n AssetDep(\n upstream_asset,\n partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)\n )\n ]\n )\n """\n\n def __new__(\n cls,\n asset: Union[CoercibleToAssetKey, AssetSpec, AssetsDefinition, SourceAsset],\n *,\n partition_mapping: Optional[PartitionMapping] = None,\n ):\n if isinstance(asset, list):\n check.list_param(asset, "asset", of_type=str)\n else:\n check.inst_param(\n asset, "asset", (AssetKey, str, AssetSpec, AssetsDefinition, SourceAsset)\n )\n if isinstance(asset, AssetsDefinition) and len(asset.keys) > 1:\n # Only AssetsDefinition with a single asset can be passed\n raise DagsterInvalidDefinitionError(\n "Cannot create an AssetDep from a multi_asset AssetsDefinition."\n " Instead, specify dependencies on the assets created by the multi_asset"\n f" via AssetKeys or strings. For the multi_asset {asset.node_def.name}, the"\n f" available keys are: {asset.keys}."\n )\n\n asset_key = _get_asset_key(asset)\n\n return super().__new__(\n cls,\n asset_key=asset_key,\n partition_mapping=check.opt_inst_param(\n partition_mapping,\n "partition_mapping",\n PartitionMapping,\n ),\n )\n\n @staticmethod\n def from_coercible(arg: "CoercibleToAssetDep") -> "AssetDep":\n # if arg is AssetDep, return the original object to retain partition_mapping\n return arg if isinstance(arg, AssetDep) else AssetDep(asset=arg)
\n\n\ndef _get_asset_key(arg: "CoercibleToAssetDep") -> AssetKey:\n if isinstance(arg, (AssetsDefinition, SourceAsset, AssetSpec)):\n return arg.key\n elif isinstance(arg, AssetDep):\n return arg.asset_key\n else:\n return AssetKey.from_coercible(arg)\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_dep", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_dep"}, "asset_in": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_in

\nfrom typing import Mapping, NamedTuple, Optional, Sequence, Type, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    CoercibleToAssetKeyPrefix,\n)\nfrom dagster._core.definitions.input import NoValueSentinel\nfrom dagster._core.definitions.metadata import ArbitraryMetadataMapping\nfrom dagster._core.types.dagster_type import DagsterType, resolve_dagster_type\n\nfrom .partition_mapping import PartitionMapping\n\n\n
[docs]class AssetIn(\n NamedTuple(\n "_AssetIn",\n [\n ("key", PublicAttr[Optional[AssetKey]]),\n ("metadata", PublicAttr[Optional[ArbitraryMetadataMapping]]),\n ("key_prefix", PublicAttr[Optional[Sequence[str]]]),\n ("input_manager_key", PublicAttr[Optional[str]]),\n ("partition_mapping", PublicAttr[Optional[PartitionMapping]]),\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ],\n )\n):\n """Defines an asset dependency.\n\n Attributes:\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the input name. Only one of the "key_prefix" and\n "key" arguments should be provided.\n key (Optional[Union[str, Sequence[str], AssetKey]]): The asset's key. Only one of the\n "key_prefix" and "key" arguments should be provided.\n metadata (Optional[Dict[str, Any]]): A dict of the metadata for the input.\n For example, if you only need a subset of columns from an upstream table, you could\n include that in metadata and the IO manager that loads the upstream table could use the\n metadata to determine which columns to load.\n partition_mapping (Optional[PartitionMapping]): Defines what partitions to depend on in\n the upstream asset. If not provided, defaults to the default partition mapping for the\n partitions definition, which is typically maps partition keys to the same partition keys\n in upstream assets.\n dagster_type (DagsterType): Allows specifying type validation functions that\n will be executed on the input of the decorated function before it runs.\n """\n\n def __new__(\n cls,\n key: Optional[CoercibleToAssetKey] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n input_manager_key: Optional[str] = None,\n partition_mapping: Optional[PartitionMapping] = None,\n dagster_type: Union[DagsterType, Type[NoValueSentinel]] = NoValueSentinel,\n ):\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n\n check.invariant(\n not (key and key_prefix), "key and key_prefix cannot both be set on AssetIn"\n )\n\n return super(AssetIn, cls).__new__(\n cls,\n key=AssetKey.from_coercible(key) if key is not None else None,\n metadata=check.opt_inst_param(metadata, "metadata", Mapping),\n key_prefix=check.opt_list_param(key_prefix, "key_prefix", of_type=str),\n input_manager_key=check.opt_str_param(input_manager_key, "input_manager_key"),\n partition_mapping=check.opt_inst_param(\n partition_mapping, "partition_mapping", PartitionMapping\n ),\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_in", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_in"}, "asset_out": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_out

\nfrom typing import Any, Mapping, NamedTuple, Optional, Sequence, Type, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.backfill_policy import BackfillPolicy\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    CoercibleToAssetKeyPrefix,\n)\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.input import NoValueSentinel\nfrom dagster._core.definitions.metadata import MetadataUserInput\nfrom dagster._core.definitions.output import Out\nfrom dagster._core.definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom dagster._core.types.dagster_type import DagsterType, resolve_dagster_type\n\n\n
[docs]class AssetOut(\n NamedTuple(\n "_AssetOut",\n [\n ("key", PublicAttr[Optional[AssetKey]]),\n ("key_prefix", PublicAttr[Optional[Sequence[str]]]),\n ("metadata", PublicAttr[Optional[Mapping[str, Any]]]),\n ("io_manager_key", PublicAttr[str]),\n ("description", PublicAttr[Optional[str]]),\n ("is_required", PublicAttr[bool]),\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ("group_name", PublicAttr[Optional[str]]),\n ("code_version", PublicAttr[Optional[str]]),\n ("freshness_policy", PublicAttr[Optional[FreshnessPolicy]]),\n ("auto_materialize_policy", PublicAttr[Optional[AutoMaterializePolicy]]),\n ("backfill_policy", PublicAttr[Optional[BackfillPolicy]]),\n ],\n )\n):\n """Defines one of the assets produced by a :py:func:`@multi_asset <multi_asset>`.\n\n Attributes:\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name. When using ``@multi_asset``, the\n asset name defaults to the key of the "outs" dictionary Only one of the "key_prefix" and\n "key" arguments should be provided.\n key (Optional[Union[str, Sequence[str], AssetKey]]): The asset's key. Only one of the\n "key_prefix" and "key" arguments should be provided.\n dagster_type (Optional[Union[Type, DagsterType]]]):\n The type of this output. Should only be set if the correct type can not\n be inferred directly from the type signature of the decorated function.\n description (Optional[str]): Human-readable description of the output.\n is_required (bool): Whether the presence of this field is required. (default: True)\n io_manager_key (Optional[str]): The resource key of the IO manager used for this output.\n (default: "io_manager").\n metadata (Optional[Dict[str, Any]]): A dict of the metadata for the output.\n For example, users can provide a file path if the data object will be stored in a\n filesystem, or provide information of a database table when it is going to load the data\n into the table.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If\n not provided, the name "default" is used.\n code_version (Optional[str]): The version of the code that generates this asset.\n freshness_policy (Optional[FreshnessPolicy]): A policy which indicates how up to date this\n asset is intended to be.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply to\n the specified asset.\n backfill_policy (Optional[BackfillPolicy]): BackfillPolicy to apply to the specified asset.\n """\n\n def __new__(\n cls,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n key: Optional[CoercibleToAssetKey] = None,\n dagster_type: Union[Type, DagsterType] = NoValueSentinel,\n description: Optional[str] = None,\n is_required: bool = True,\n io_manager_key: Optional[str] = None,\n metadata: Optional[MetadataUserInput] = None,\n group_name: Optional[str] = None,\n code_version: Optional[str] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n ):\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n\n return super(AssetOut, cls).__new__(\n cls,\n key=AssetKey.from_coercible(key) if key is not None else None,\n key_prefix=check.opt_list_param(key_prefix, "key_prefix", of_type=str),\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n description=check.opt_str_param(description, "description"),\n is_required=check.bool_param(is_required, "is_required"),\n io_manager_key=check.opt_str_param(\n io_manager_key, "io_manager_key", default=DEFAULT_IO_MANAGER_KEY\n ),\n metadata=check.opt_mapping_param(metadata, "metadata", key_type=str),\n group_name=check.opt_str_param(group_name, "group_name"),\n code_version=check.opt_str_param(code_version, "code_version"),\n freshness_policy=check.opt_inst_param(\n freshness_policy, "freshness_policy", FreshnessPolicy\n ),\n auto_materialize_policy=check.opt_inst_param(\n auto_materialize_policy, "auto_materialize_policy", AutoMaterializePolicy\n ),\n backfill_policy=check.opt_inst_param(\n backfill_policy, "backfill_policy", BackfillPolicy\n ),\n )\n\n def to_out(self) -> Out:\n return Out(\n dagster_type=self.dagster_type,\n description=self.description,\n metadata=self.metadata,\n is_required=self.is_required,\n io_manager_key=self.io_manager_key,\n code_version=self.code_version,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_out", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_out"}, "asset_selection": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_selection

\nimport collections.abc\nimport operator\nfrom abc import ABC, abstractmethod\nfrom functools import reduce\nfrom typing import AbstractSet, Iterable, Optional, Sequence, Union, cast\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, public\nfrom dagster._core.definitions.asset_checks import AssetChecksDefinition\nfrom dagster._core.errors import DagsterInvalidSubsetError\nfrom dagster._core.selector.subset_selector import (\n    fetch_connected,\n    fetch_sinks,\n    fetch_sources,\n    parse_clause,\n)\n\nfrom .asset_check_spec import AssetCheckKey\nfrom .asset_graph import AssetGraph, InternalAssetGraph\nfrom .assets import AssetsDefinition\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    CoercibleToAssetKeyPrefix,\n    key_prefix_from_coercible,\n)\nfrom .source_asset import SourceAsset\n\nCoercibleToAssetSelection: TypeAlias = Union[\n    str,\n    Sequence[str],\n    Sequence[AssetKey],\n    Sequence[Union["AssetsDefinition", "SourceAsset"]],\n    "AssetSelection",\n]\n\n\n
[docs]class AssetSelection(ABC):\n """An AssetSelection defines a query over a set of assets and asset checks, normally all that are defined in a code location.\n\n You can use the "|", "&", and "-" operators to create unions, intersections, and differences of selections, respectively.\n\n AssetSelections are typically used with :py:func:`define_asset_job`.\n\n By default, selecting assets will also select all of the asset checks that target those assets.\n\n Examples:\n .. code-block:: python\n\n # Select all assets in group "marketing":\n AssetSelection.groups("marketing")\n\n # Select all assets in group "marketing", as well as the asset with key "promotion":\n AssetSelection.groups("marketing") | AssetSelection.keys("promotion")\n\n # Select all assets in group "marketing" that are downstream of asset "leads":\n AssetSelection.groups("marketing") & AssetSelection.keys("leads").downstream()\n\n # Select a list of assets:\n AssetSelection.assets(*my_assets_list)\n\n # Select all assets except for those in group "marketing"\n AssetSelection.all() - AssetSelection.groups("marketing")\n\n # Select all assets which are materialized by the same op as "projections":\n AssetSelection.keys("projections").required_multi_asset_neighbors()\n\n # Select all assets in group "marketing" and exclude their asset checks:\n AssetSelection.groups("marketing") - AssetSelection.all_asset_checks()\n\n # Select all asset checks that target a list of assets:\n AssetSelection.checks_for_assets(*my_assets_list)\n\n # Select a specific asset check:\n AssetSelection.checks(my_asset_check)\n\n """\n\n
[docs] @public\n @staticmethod\n def all() -> "AllSelection":\n """Returns a selection that includes all assets and asset checks."""\n return AllSelection()
\n\n
[docs] @public\n @staticmethod\n def all_asset_checks() -> "AllAssetCheckSelection":\n """Returns a selection that includes all asset checks."""\n return AllAssetCheckSelection()
\n\n
[docs] @public\n @staticmethod\n def assets(*assets_defs: AssetsDefinition) -> "KeysAssetSelection":\n """Returns a selection that includes all of the provided assets and asset checks that target them."""\n return KeysAssetSelection(*(key for assets_def in assets_defs for key in assets_def.keys))
\n\n
[docs] @public\n @staticmethod\n def keys(*asset_keys: CoercibleToAssetKey) -> "KeysAssetSelection":\n """Returns a selection that includes assets with any of the provided keys and all asset checks that target them.\n\n Examples:\n .. code-block:: python\n\n AssetSelection.keys(AssetKey(["a"]))\n\n AssetSelection.keys("a")\n\n AssetSelection.keys(AssetKey(["a"]), AssetKey(["b"]))\n\n AssetSelection.keys("a", "b")\n\n asset_key_list = [AssetKey(["a"]), AssetKey(["b"])]\n AssetSelection.keys(*asset_key_list)\n """\n _asset_keys = [\n AssetKey.from_user_string(key) if isinstance(key, str) else AssetKey.from_coercible(key)\n for key in asset_keys\n ]\n return KeysAssetSelection(*_asset_keys)
\n\n
[docs] @public\n @staticmethod\n def key_prefixes(\n *key_prefixes: CoercibleToAssetKeyPrefix, include_sources: bool = False\n ) -> "KeyPrefixesAssetSelection":\n """Returns a selection that includes assets that match any of the provided key prefixes and all the asset checks that target them.\n\n Args:\n include_sources (bool): If True, then include source assets matching the key prefix(es)\n in the selection.\n\n Examples:\n .. code-block:: python\n\n # match any asset key where the first segment is equal to "a" or "b"\n # e.g. AssetKey(["a", "b", "c"]) would match, but AssetKey(["abc"]) would not.\n AssetSelection.key_prefixes("a", "b")\n\n # match any asset key where the first two segments are ["a", "b"] or ["a", "c"]\n AssetSelection.key_prefixes(["a", "b"], ["a", "c"])\n """\n _asset_key_prefixes = [key_prefix_from_coercible(key_prefix) for key_prefix in key_prefixes]\n return KeyPrefixesAssetSelection(*_asset_key_prefixes, include_sources=include_sources)
\n\n
[docs] @public\n @staticmethod\n def groups(*group_strs, include_sources: bool = False) -> "GroupsAssetSelection":\n """Returns a selection that includes materializable assets that belong to any of the\n provided groups and all the asset checks that target them.\n\n Args:\n include_sources (bool): If True, then include source assets matching the group in the\n selection.\n """\n check.tuple_param(group_strs, "group_strs", of_type=str)\n return GroupsAssetSelection(*group_strs, include_sources=include_sources)
\n\n
[docs] @public\n @staticmethod\n def checks_for_assets(*assets_defs: AssetsDefinition) -> "AssetChecksForAssetKeys":\n """Returns a selection with the asset checks that target the provided assets."""\n return AssetChecksForAssetKeys(\n [key for assets_def in assets_defs for key in assets_def.keys]\n )
\n\n
[docs] @public\n @staticmethod\n def checks(*asset_checks: AssetChecksDefinition) -> "AssetChecksForHandles":\n """Returns a selection that includes all of the provided asset checks."""\n return AssetChecksForHandles(\n [\n AssetCheckKey(asset_key=AssetKey.from_coercible(spec.asset_key), name=spec.name)\n for checks_def in asset_checks\n for spec in checks_def.specs\n ]\n )
\n\n
[docs] @public\n def downstream(\n self, depth: Optional[int] = None, include_self: bool = True\n ) -> "DownstreamAssetSelection":\n """Returns a selection that includes all assets that are downstream of any of the assets in\n this selection, selecting the assets in this selection by default. Includes the asset checks targeting the returned assets. Iterates through each\n asset in this selection and returns the union of all downstream assets.\n\n depth (Optional[int]): If provided, then only include assets to the given depth. A depth\n of 2 means all assets that are children or grandchildren of the assets in this\n selection.\n include_self (bool): If True, then include the assets in this selection in the result.\n If the include_self flag is False, return each downstream asset that is not part of the\n original selection. By default, set to True.\n """\n check.opt_int_param(depth, "depth")\n check.opt_bool_param(include_self, "include_self")\n return DownstreamAssetSelection(self, depth=depth, include_self=include_self)
\n\n
[docs] @public\n def upstream(\n self, depth: Optional[int] = None, include_self: bool = True\n ) -> "UpstreamAssetSelection":\n """Returns a selection that includes all materializable assets that are upstream of any of\n the assets in this selection, selecting the assets in this selection by default. Includes the asset checks targeting the returned assets. Iterates\n through each asset in this selection and returns the union of all upstream assets.\n\n Because mixed selections of source and materializable assets are currently not supported,\n keys corresponding to `SourceAssets` will not be included as upstream of regular assets.\n\n Args:\n depth (Optional[int]): If provided, then only include assets to the given depth. A depth\n of 2 means all assets that are parents or grandparents of the assets in this\n selection.\n include_self (bool): If True, then include the assets in this selection in the result.\n If the include_self flag is False, return each upstream asset that is not part of the\n original selection. By default, set to True.\n """\n check.opt_int_param(depth, "depth")\n check.opt_bool_param(include_self, "include_self")\n return UpstreamAssetSelection(self, depth=depth, include_self=include_self)
\n\n
[docs] @public\n def sinks(self) -> "SinkAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the sink\n assets within the original asset selection. Includes the asset checks targeting the returned assets.\n\n A sink asset is an asset that has no downstream dependencies within the asset selection.\n The sink asset can have downstream dependencies outside of the asset selection.\n """\n return SinkAssetSelection(self)
\n\n
[docs] @public\n def required_multi_asset_neighbors(self) -> "RequiredNeighborsAssetSelection":\n """Given an asset selection in which some assets are output from a multi-asset compute op\n which cannot be subset, returns a new asset selection that contains all of the assets\n required to execute the original asset selection. Includes the asset checks targeting the returned assets.\n """\n return RequiredNeighborsAssetSelection(self)
\n\n
[docs] @public\n def roots(self) -> "RootAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the root\n assets within the original asset selection. Includes the asset checks targeting the returned assets.\n\n A root asset is an asset that has no upstream dependencies within the asset selection.\n The root asset can have downstream dependencies outside of the asset selection.\n\n Because mixed selections of source and materializable assets are currently not supported,\n keys corresponding to `SourceAssets` will not be included as roots. To select source assets,\n use the `upstream_source_assets` method.\n """\n return RootAssetSelection(self)
\n\n
[docs] @public\n @deprecated(breaking_version="2.0", additional_warn_text="Use AssetSelection.roots instead.")\n def sources(self) -> "RootAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the root\n assets within the original asset selection. Includes the asset checks targeting the returned assets.\n\n A root asset is a materializable asset that has no upstream dependencies within the asset\n selection. The root asset can have downstream dependencies outside of the asset selection.\n\n Because mixed selections of source and materializable assets are currently not supported,\n keys corresponding to `SourceAssets` will not be included as roots. To select source assets,\n use the `upstream_source_assets` method.\n """\n return self.roots()
\n\n
[docs] @public\n def upstream_source_assets(self) -> "SourceAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the source\n assets upstream of assets in the original selection. Includes the asset checks targeting the returned assets.\n """\n return SourceAssetSelection(self)
\n\n
[docs] @public\n def without_checks(self) -> "AssetSelection":\n """Removes all asset checks in the selection."""\n return self - AssetSelection.all_asset_checks()
\n\n def __or__(self, other: "AssetSelection") -> "OrAssetSelection":\n check.inst_param(other, "other", AssetSelection)\n return OrAssetSelection(self, other)\n\n def __and__(self, other: "AssetSelection") -> "AndAssetSelection":\n check.inst_param(other, "other", AssetSelection)\n return AndAssetSelection(self, other)\n\n def __sub__(self, other: "AssetSelection") -> "SubAssetSelection":\n check.inst_param(other, "other", AssetSelection)\n return SubAssetSelection(self, other)\n\n def resolve(\n self, all_assets: Union[Iterable[Union[AssetsDefinition, SourceAsset]], AssetGraph]\n ) -> AbstractSet[AssetKey]:\n if isinstance(all_assets, AssetGraph):\n asset_graph = all_assets\n else:\n check.iterable_param(all_assets, "all_assets", (AssetsDefinition, SourceAsset))\n asset_graph = AssetGraph.from_assets(all_assets)\n\n resolved = self.resolve_inner(asset_graph)\n resolved_source_assets = asset_graph.source_asset_keys & resolved\n resolved_regular_assets = resolved - asset_graph.source_asset_keys\n check.invariant(\n not (len(resolved_source_assets) > 0 and len(resolved_regular_assets) > 0),\n "Asset selection specified both regular assets and source assets. This is not"\n " currently supported. Selections must be all regular assets or all source assets.",\n )\n return resolved\n\n @abstractmethod\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n raise NotImplementedError()\n\n def resolve_checks(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n """We don't need this method currently, but it makes things consistent with resolve_inner. Currently\n we don't store checks in the ExternalAssetGraph, so we only support InternalAssetGraph.\n """\n return self.resolve_checks_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n """By default, resolve to checks that target the selected assets. This is overriden for particular selections."""\n asset_keys = self.resolve(asset_graph)\n return {handle for handle in asset_graph.asset_check_keys if handle.asset_key in asset_keys}\n\n @staticmethod\n def _selection_from_string(string: str) -> "AssetSelection":\n from dagster._core.definitions import AssetSelection\n\n if string == "*":\n return AssetSelection.all()\n\n parts = parse_clause(string)\n if not parts:\n check.failed(f"Invalid selection string: {string}")\n u, item, d = parts\n\n selection: AssetSelection = AssetSelection.keys(item)\n if u:\n selection = selection.upstream(u)\n if d:\n selection = selection.downstream(d)\n return selection\n\n @classmethod\n def from_coercible(cls, selection: CoercibleToAssetSelection) -> "AssetSelection":\n if isinstance(selection, str):\n return cls._selection_from_string(selection)\n elif isinstance(selection, AssetSelection):\n return selection\n elif isinstance(selection, collections.abc.Sequence) and all(\n isinstance(el, str) for el in selection\n ):\n return reduce(\n operator.or_, [cls._selection_from_string(cast(str, s)) for s in selection]\n )\n elif isinstance(selection, collections.abc.Sequence) and all(\n isinstance(el, (AssetsDefinition, SourceAsset)) for el in selection\n ):\n return AssetSelection.keys(\n *(\n key\n for el in selection\n for key in (\n el.keys if isinstance(el, AssetsDefinition) else [cast(SourceAsset, el).key]\n )\n )\n )\n elif isinstance(selection, collections.abc.Sequence) and all(\n isinstance(el, AssetKey) for el in selection\n ):\n return cls.keys(*cast(Sequence[AssetKey], selection))\n else:\n check.failed(\n "selection argument must be one of str, Sequence[str], Sequence[AssetKey],"\n " Sequence[AssetsDefinition], Sequence[SourceAsset], AssetSelection. Was"\n f" {type(selection)}."\n )
\n\n\nclass AllSelection(AssetSelection):\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return asset_graph.materializable_asset_keys\n\n\nclass AllAssetCheckSelection(AssetSelection):\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return set()\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return asset_graph.asset_check_keys\n\n\nclass AssetChecksForAssetKeys(AssetSelection):\n def __init__(self, keys: Sequence[AssetKey]):\n self._keys = keys\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return set()\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return {handle for handle in asset_graph.asset_check_keys if handle.asset_key in self._keys}\n\n\nclass AssetChecksForHandles(AssetSelection):\n def __init__(self, asset_check_keys: Sequence[AssetCheckKey]):\n self._asset_check_keys = asset_check_keys\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return set()\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return {\n handle for handle in asset_graph.asset_check_keys if handle in self._asset_check_keys\n }\n\n\nclass AndAssetSelection(AssetSelection):\n def __init__(self, left: AssetSelection, right: AssetSelection):\n self._left = left\n self._right = right\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return self._left.resolve_inner(asset_graph) & self._right.resolve_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return self._left.resolve_checks_inner(asset_graph) & self._right.resolve_checks_inner(\n asset_graph\n )\n\n\nclass SubAssetSelection(AssetSelection):\n def __init__(self, left: AssetSelection, right: AssetSelection):\n self._left = left\n self._right = right\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return self._left.resolve_inner(asset_graph) - self._right.resolve_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return self._left.resolve_checks_inner(asset_graph) - self._right.resolve_checks_inner(\n asset_graph\n )\n\n\nclass SinkAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n return fetch_sinks(asset_graph.asset_dep_graph, selection)\n\n\nclass RequiredNeighborsAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n output = set(selection)\n for asset_key in selection:\n output.update(asset_graph.get_required_multi_asset_keys(asset_key))\n return output\n\n\nclass RootAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n return fetch_sources(asset_graph.asset_dep_graph, selection)\n\n\nclass DownstreamAssetSelection(AssetSelection):\n def __init__(\n self,\n child: AssetSelection,\n *,\n depth: Optional[int] = None,\n include_self: Optional[bool] = True,\n ):\n self._child = child\n self.depth = depth\n self.include_self = include_self\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n return operator.sub(\n reduce(\n operator.or_,\n [\n {asset_key}\n | fetch_connected(\n item=asset_key,\n graph=asset_graph.asset_dep_graph,\n direction="downstream",\n depth=self.depth,\n )\n for asset_key in selection\n ],\n ),\n selection if not self.include_self else set(),\n )\n\n\nclass GroupsAssetSelection(AssetSelection):\n def __init__(self, *groups: str, include_sources: bool):\n self._groups = groups\n self._include_sources = include_sources\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n base_set = (\n asset_graph.all_asset_keys\n if self._include_sources\n else asset_graph.materializable_asset_keys\n )\n return {\n asset_key\n for asset_key, group in asset_graph.group_names_by_key.items()\n if group in self._groups and asset_key in base_set\n }\n\n\nclass KeysAssetSelection(AssetSelection):\n def __init__(self, *keys: AssetKey):\n self._keys = keys\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n specified_keys = set(self._keys)\n invalid_keys = {key for key in specified_keys if key not in asset_graph.all_asset_keys}\n if invalid_keys:\n raise DagsterInvalidSubsetError(\n f"AssetKey(s) {invalid_keys} were selected, but no AssetsDefinition objects supply "\n "these keys. Make sure all keys are spelled correctly, and all AssetsDefinitions "\n "are correctly added to the `Definitions`."\n )\n return specified_keys\n\n\nclass KeyPrefixesAssetSelection(AssetSelection):\n def __init__(self, *key_prefixes: Sequence[str], include_sources: bool):\n self._key_prefixes = key_prefixes\n self._include_sources = include_sources\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n base_set = (\n asset_graph.all_asset_keys\n if self._include_sources\n else asset_graph.materializable_asset_keys\n )\n return {\n key for key in base_set if any(key.has_prefix(prefix) for prefix in self._key_prefixes)\n }\n\n\nclass OrAssetSelection(AssetSelection):\n def __init__(self, left: AssetSelection, right: AssetSelection):\n self._left = left\n self._right = right\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return self._left.resolve_inner(asset_graph) | self._right.resolve_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return self._left.resolve_checks_inner(asset_graph) | self._right.resolve_checks_inner(\n asset_graph\n )\n\n\ndef _fetch_all_upstream(\n selection: AbstractSet[AssetKey],\n asset_graph: AssetGraph,\n depth: Optional[int] = None,\n include_self: bool = True,\n) -> AbstractSet[AssetKey]:\n return operator.sub(\n reduce(\n operator.or_,\n [\n {asset_key}\n | fetch_connected(\n item=asset_key,\n graph=asset_graph.asset_dep_graph,\n direction="upstream",\n depth=depth,\n )\n for asset_key in selection\n ],\n set(),\n ),\n selection if not include_self else set(),\n )\n\n\nclass UpstreamAssetSelection(AssetSelection):\n def __init__(\n self,\n child: AssetSelection,\n *,\n depth: Optional[int] = None,\n include_self: bool = True,\n ):\n self._child = child\n self.depth = depth\n self.include_self = include_self\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n if len(selection) == 0:\n return selection\n all_upstream = _fetch_all_upstream(selection, asset_graph, self.depth, self.include_self)\n return {key for key in all_upstream if key not in asset_graph.source_asset_keys}\n\n\nclass SourceAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n if len(selection) == 0:\n return selection\n all_upstream = _fetch_all_upstream(selection, asset_graph)\n return {key for key in all_upstream if key in asset_graph.source_asset_keys}\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_selection", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_selection"}, "asset_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_sensor_definition

\nimport inspect\nfrom typing import Any, Callable, NamedTuple, Optional, Sequence, Set\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.resource_annotation import get_resource_args\n\nfrom .events import AssetKey\nfrom .run_request import RunRequest, SkipReason\nfrom .sensor_definition import (\n    DefaultSensorStatus,\n    RawSensorEvaluationFunctionReturn,\n    SensorDefinition,\n    SensorType,\n    validate_and_get_resource_dict,\n)\nfrom .target import ExecutableDefinition\nfrom .utils import check_valid_name\n\n\nclass AssetSensorParamNames(NamedTuple):\n    context_param_name: Optional[str]\n    event_log_entry_param_name: Optional[str]\n\n\ndef get_asset_sensor_param_names(fn: Callable) -> AssetSensorParamNames:\n    """Determines the names of the context and event log entry parameters for an asset sensor function.\n    These are assumed to be the first two non-resource params, in order (context param before event log entry).\n    """\n    resource_params = {param.name for param in get_resource_args(fn)}\n\n    non_resource_params = [\n        param.name for param in get_function_params(fn) if param.name not in resource_params\n    ]\n\n    context_param_name = non_resource_params[0] if len(non_resource_params) > 0 else None\n    event_log_entry_param_name = non_resource_params[1] if len(non_resource_params) > 1 else None\n\n    return AssetSensorParamNames(\n        context_param_name=context_param_name, event_log_entry_param_name=event_log_entry_param_name\n    )\n\n\n
[docs]class AssetSensorDefinition(SensorDefinition):\n """Define an asset sensor that initiates a set of runs based on the materialization of a given\n asset.\n\n If the asset has been materialized multiple times between since the last sensor tick, the\n evaluation function will only be invoked once, with the latest materialization.\n\n Args:\n name (str): The name of the sensor to create.\n asset_key (AssetKey): The asset_key this sensor monitors.\n asset_materialization_fn (Callable[[SensorEvaluationContext, EventLogEntry], Union[Iterator[Union[RunRequest, SkipReason]], RunRequest, SkipReason]]): The core\n evaluation function for the sensor, which is run at an interval to determine whether a\n run should be launched or not. Takes a :py:class:`~dagster.SensorEvaluationContext` and\n an EventLogEntry corresponding to an AssetMaterialization event.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job\n object to target with this sensor.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n """\n\n def __init__(\n self,\n name: str,\n asset_key: AssetKey,\n job_name: Optional[str],\n asset_materialization_fn: Callable[\n ...,\n RawSensorEvaluationFunctionReturn,\n ],\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n self._asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n resource_arg_names: Set[str] = {\n arg.name for arg in get_resource_args(asset_materialization_fn)\n }\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n def _wrap_asset_fn(materialization_fn) -> Any:\n def _fn(context) -> Any:\n after_cursor = None\n if context.cursor:\n try:\n after_cursor = int(context.cursor)\n except ValueError:\n after_cursor = None\n\n event_records = context.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=self._asset_key,\n after_cursor=after_cursor,\n ),\n ascending=False,\n limit=1,\n )\n\n if not event_records:\n yield SkipReason(\n f"No new materialization events found for asset key {self._asset_key}"\n )\n return\n\n event_record = event_records[0]\n\n (\n context_param_name,\n event_log_entry_param_name,\n ) = get_asset_sensor_param_names(materialization_fn)\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n\n # Build asset sensor function args, which can include any subset of\n # context arg, event log entry arg, and any resource args\n args = resource_args_populated\n if context_param_name:\n args[context_param_name] = context\n if event_log_entry_param_name:\n args[event_log_entry_param_name] = event_record.event_log_entry\n\n result = materialization_fn(**args)\n if inspect.isgenerator(result) or isinstance(result, list):\n for item in result:\n yield item\n elif isinstance(result, (SkipReason, RunRequest)):\n yield result\n context.update_cursor(str(event_record.storage_id))\n\n return _fn\n\n super(AssetSensorDefinition, self).__init__(\n name=check_valid_name(name),\n job_name=job_name,\n evaluation_fn=_wrap_asset_fn(\n check.callable_param(asset_materialization_fn, "asset_materialization_fn"),\n ),\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n required_resource_keys=combined_required_resource_keys,\n )\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """AssetKey: The key of the asset targeted by this sensor."""\n return self._asset_key\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.ASSET
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_sensor_definition"}, "asset_spec": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_spec

\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Iterable, Mapping, NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.errors import DagsterInvariantViolationError\n\nfrom .auto_materialize_policy import AutoMaterializePolicy\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n)\nfrom .freshness_policy import FreshnessPolicy\nfrom .metadata import MetadataUserInput\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_dep import AssetDep, CoercibleToAssetDep\n\n# SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE lives on the metadata of an asset\n# (which currently ends up on the Output associated with the asset key)\n# whih encodes the execution type the of asset. "Unexecutable" assets are assets\n# that cannot be materialized in Dagster, but can have events in the event\n# log keyed off of them, making Dagster usable as a observability and lineage tool\n# for externally materialized assets.\nSYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE = "dagster/asset_execution_type"\n\n\nclass AssetExecutionType(Enum):\n    OBSERVATION = "OBSERVATION"\n    UNEXECUTABLE = "UNEXECUTABLE"\n    MATERIALIZATION = "MATERIALIZATION"\n\n    @staticmethod\n    def is_executable(varietal_str: Optional[str]) -> bool:\n        return AssetExecutionType.str_to_enum(varietal_str) in {\n            AssetExecutionType.MATERIALIZATION,\n            AssetExecutionType.OBSERVATION,\n        }\n\n    @staticmethod\n    def str_to_enum(varietal_str: Optional[str]) -> "AssetExecutionType":\n        return (\n            AssetExecutionType.MATERIALIZATION\n            if varietal_str is None\n            else AssetExecutionType(varietal_str)\n        )\n\n\n
[docs]@experimental\nclass AssetSpec(\n NamedTuple(\n "_AssetSpec",\n [\n ("key", PublicAttr[AssetKey]),\n ("deps", PublicAttr[Iterable["AssetDep"]]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Optional[Mapping[str, Any]]]),\n ("group_name", PublicAttr[Optional[str]]),\n ("skippable", PublicAttr[bool]),\n ("code_version", PublicAttr[Optional[str]]),\n ("freshness_policy", PublicAttr[Optional[FreshnessPolicy]]),\n ("auto_materialize_policy", PublicAttr[Optional[AutoMaterializePolicy]]),\n ],\n )\n):\n """Specifies the core attributes of an asset. This object is attached to the decorated\n function that defines how it materialized.\n\n Attributes:\n key (AssetKey): The unique identifier for this asset.\n deps (Optional[AbstractSet[AssetKey]]): The asset keys for the upstream assets that\n materializing this asset depends on.\n description (Optional[str]): Human-readable description of this asset.\n metadata (Optional[Dict[str, Any]]): A dict of static metadata for this asset.\n For example, users can provide information about the database table this\n asset corresponds to.\n skippable (bool): Whether this asset can be omitted during materialization, causing downstream\n dependencies to skip.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If\n not provided, the name "default" is used.\n code_version (Optional[str]): The version of the code for this specific asset,\n overriding the code version of the materialization function\n freshness_policy (Optional[FreshnessPolicy]): A policy which indicates how up to date this\n asset is intended to be.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply to\n the specified asset.\n backfill_policy (Optional[BackfillPolicy]): BackfillPolicy to apply to the specified asset.\n """\n\n def __new__(\n cls,\n key: CoercibleToAssetKey,\n *,\n deps: Optional[Iterable["CoercibleToAssetDep"]] = None,\n description: Optional[str] = None,\n metadata: Optional[MetadataUserInput] = None,\n skippable: bool = False,\n group_name: Optional[str] = None,\n code_version: Optional[str] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n ):\n from dagster._core.definitions.asset_dep import AssetDep\n\n dep_set = {}\n if deps:\n for dep in deps:\n asset_dep = AssetDep.from_coercible(dep)\n\n # we cannot do deduplication via a set because MultiPartitionMappings have an internal\n # dictionary that cannot be hashed. Instead deduplicate by making a dictionary and checking\n # for existing keys.\n if asset_dep.asset_key in dep_set.keys():\n raise DagsterInvariantViolationError(\n f"Cannot set a dependency on asset {asset_dep.asset_key} more than once for"\n f" AssetSpec {key}"\n )\n dep_set[asset_dep.asset_key] = asset_dep\n\n return super().__new__(\n cls,\n key=AssetKey.from_coercible(key),\n deps=list(dep_set.values()),\n description=check.opt_str_param(description, "description"),\n metadata=check.opt_mapping_param(metadata, "metadata", key_type=str),\n skippable=check.bool_param(skippable, "skippable"),\n group_name=check.opt_str_param(group_name, "group_name"),\n code_version=check.opt_str_param(code_version, "code_version"),\n freshness_policy=check.opt_inst_param(\n freshness_policy,\n "freshness_policy",\n FreshnessPolicy,\n ),\n auto_materialize_policy=check.opt_inst_param(\n auto_materialize_policy,\n "auto_materialize_policy",\n AutoMaterializePolicy,\n ),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_spec", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_spec"}, "assets": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.assets

\nimport hashlib\nimport json\nimport warnings\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import experimental_param, public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey, AssetCheckSpec\nfrom dagster._core.definitions.asset_layer import get_dep_node_handles_of_graph_backed_asset\nfrom dagster._core.definitions.asset_spec import AssetExecutionType\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.backfill_policy import BackfillPolicy, BackfillPolicyType\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.metadata import ArbitraryMetadataMapping\nfrom dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\nfrom dagster._core.definitions.op_invocation import direct_invocation_result\nfrom dagster._core.definitions.op_selection import get_graph_subset\nfrom dagster._core.definitions.partition_mapping import MultiPartitionMapping\nfrom dagster._core.definitions.resource_requirement import (\n    RequiresResources,\n    ResourceAddable,\n    ResourceRequirement,\n    merge_resource_defs,\n)\nfrom dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\nfrom dagster._core.definitions.time_window_partitions import TimeWindowPartitionsDefinition\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._utils import IHasInternalInit\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import (\n    disable_dagster_warnings,\n)\n\nfrom .dependency import NodeHandle\nfrom .events import AssetKey, CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom .node_definition import NodeDefinition\nfrom .op_definition import OpDefinition\nfrom .partition import PartitionsDefinition\nfrom .partition_mapping import (\n    PartitionMapping,\n    get_builtin_partition_mapping_types,\n    infer_partition_mapping,\n)\nfrom .resource_definition import ResourceDefinition\nfrom .source_asset import SourceAsset\nfrom .utils import DEFAULT_GROUP_NAME, validate_group_name\n\nif TYPE_CHECKING:\n    from .graph_definition import GraphDefinition\n\n\n
[docs]class AssetsDefinition(ResourceAddable, RequiresResources, IHasInternalInit):\n """Defines a set of assets that are produced by the same op or graph.\n\n AssetsDefinitions are typically not instantiated directly, but rather produced using the\n :py:func:`@asset <asset>` or :py:func:`@multi_asset <multi_asset>` decorators.\n """\n\n _node_def: NodeDefinition\n _keys_by_input_name: Mapping[str, AssetKey]\n _keys_by_output_name: Mapping[str, AssetKey]\n _partitions_def: Optional[PartitionsDefinition]\n _partition_mappings: Mapping[AssetKey, PartitionMapping]\n _asset_deps: Mapping[AssetKey, AbstractSet[AssetKey]]\n _resource_defs: Mapping[str, ResourceDefinition]\n _group_names_by_key: Mapping[AssetKey, str]\n _selected_asset_keys: AbstractSet[AssetKey]\n _can_subset: bool\n _metadata_by_key: Mapping[AssetKey, ArbitraryMetadataMapping]\n _freshness_policies_by_key: Mapping[AssetKey, FreshnessPolicy]\n _auto_materialize_policies_by_key: Mapping[AssetKey, AutoMaterializePolicy]\n _backfill_policy: Optional[BackfillPolicy]\n _code_versions_by_key: Mapping[AssetKey, Optional[str]]\n _descriptions_by_key: Mapping[AssetKey, str]\n _selected_asset_check_keys: AbstractSet[AssetCheckKey]\n\n def __init__(\n self,\n *,\n keys_by_input_name: Mapping[str, AssetKey],\n keys_by_output_name: Mapping[str, AssetKey],\n node_def: NodeDefinition,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[AssetKey, PartitionMapping]] = None,\n asset_deps: Optional[Mapping[AssetKey, AbstractSet[AssetKey]]] = None,\n selected_asset_keys: Optional[AbstractSet[AssetKey]] = None,\n can_subset: bool = False,\n resource_defs: Optional[Mapping[str, object]] = None,\n group_names_by_key: Optional[Mapping[AssetKey, str]] = None,\n metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]] = None,\n freshness_policies_by_key: Optional[Mapping[AssetKey, FreshnessPolicy]] = None,\n auto_materialize_policies_by_key: Optional[Mapping[AssetKey, AutoMaterializePolicy]] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n descriptions_by_key: Optional[Mapping[AssetKey, str]] = None,\n check_specs_by_output_name: Optional[Mapping[str, AssetCheckSpec]] = None,\n selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]] = None,\n # if adding new fields, make sure to handle them in the with_attributes, from_graph, and\n # get_attributes_dict methods\n ):\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n from .graph_definition import GraphDefinition\n\n if isinstance(node_def, GraphDefinition):\n _validate_graph_def(node_def)\n\n self._node_def = node_def\n self._keys_by_input_name = check.mapping_param(\n keys_by_input_name,\n "keys_by_input_name",\n key_type=str,\n value_type=AssetKey,\n )\n self._keys_by_output_name = check.mapping_param(\n keys_by_output_name,\n "keys_by_output_name",\n key_type=str,\n value_type=AssetKey,\n )\n\n check.opt_mapping_param(\n check_specs_by_output_name,\n "check_specs_by_output_name",\n key_type=str,\n value_type=AssetCheckSpec,\n )\n\n # if not specified assume all output assets depend on all input assets\n all_asset_keys = set(keys_by_output_name.values())\n input_asset_keys = set(keys_by_input_name.values())\n\n self._partitions_def = partitions_def\n self._partition_mappings = partition_mappings or {}\n builtin_partition_mappings = get_builtin_partition_mapping_types()\n for asset_key, partition_mapping in self._partition_mappings.items():\n if not isinstance(partition_mapping, builtin_partition_mappings):\n warnings.warn(\n f"Non-built-in PartitionMappings, such as {type(partition_mapping).__name__} "\n "are deprecated and will not work with asset reconciliation. The built-in "\n "partition mappings are "\n + ", ".join(\n builtin_partition_mapping.__name__\n for builtin_partition_mapping in builtin_partition_mappings\n )\n + ".",\n category=DeprecationWarning,\n )\n\n if asset_key not in input_asset_keys:\n check.failed(\n f"While constructing AssetsDefinition outputting {all_asset_keys}, received a"\n f" partition mapping for {asset_key} that is not defined in the set of upstream"\n f" assets: {input_asset_keys}"\n )\n\n self._asset_deps = asset_deps or {\n out_asset_key: set(keys_by_input_name.values()) for out_asset_key in all_asset_keys\n }\n check.invariant(\n set(self._asset_deps.keys()) == all_asset_keys,\n "The set of asset keys with dependencies specified in the asset_deps argument must "\n "equal the set of asset keys produced by this AssetsDefinition. \\n"\n f"asset_deps keys: {set(self._asset_deps.keys())} \\n"\n f"expected keys: {all_asset_keys}",\n )\n self._resource_defs = wrap_resources_for_execution(\n check.opt_mapping_param(resource_defs, "resource_defs")\n )\n\n group_names_by_key = (\n check.mapping_param(group_names_by_key, "group_names_by_key")\n if group_names_by_key\n else {}\n )\n self._group_names_by_key = {}\n # assets that don't have a group name get a DEFAULT_GROUP_NAME\n for key in all_asset_keys:\n group_name = group_names_by_key.get(key)\n self._group_names_by_key[key] = validate_group_name(group_name)\n\n all_check_keys = {spec.key for spec in (check_specs_by_output_name or {}).values()}\n\n # NOTE: this logic mirrors subsetting at the asset layer. This is ripe for consolidation.\n if selected_asset_keys is None and selected_asset_check_keys is None:\n # if no selections, include everything\n self._selected_asset_keys = all_asset_keys\n self._selected_asset_check_keys = all_check_keys\n else:\n self._selected_asset_keys = selected_asset_keys or set()\n\n if selected_asset_check_keys is None:\n # if assets were selected but checks are None, then include all checks for selected\n # assets\n self._selected_asset_check_keys = {\n key for key in all_check_keys if key.asset_key in self._selected_asset_keys\n }\n else:\n # otherwise, use the selected checks\n self._selected_asset_check_keys = selected_asset_check_keys\n\n self._check_specs_by_output_name = {\n name: spec\n for name, spec in (check_specs_by_output_name or {}).items()\n if spec.key in self._selected_asset_check_keys\n }\n self._check_specs_by_key = {\n spec.key: spec for spec in self._check_specs_by_output_name.values()\n }\n\n self._can_subset = can_subset\n\n self._code_versions_by_key = {}\n self._metadata_by_key = dict(\n check.opt_mapping_param(\n metadata_by_key, "metadata_by_key", key_type=AssetKey, value_type=dict\n )\n )\n self._descriptions_by_key = dict(\n check.opt_mapping_param(\n descriptions_by_key, "descriptions_by_key", key_type=AssetKey, value_type=str\n )\n )\n for output_name, asset_key in keys_by_output_name.items():\n output_def, _ = node_def.resolve_output_to_origin(output_name, None)\n self._metadata_by_key[asset_key] = merge_dicts(\n output_def.metadata,\n self._metadata_by_key.get(asset_key, {}),\n )\n # We construct description from three sources of truth here. This\n # highly unfortunate. See commentary in @multi_asset's call to dagster_internal_init.\n description = (\n self._descriptions_by_key.get(asset_key, output_def.description)\n or node_def.description\n )\n if description:\n self._descriptions_by_key[asset_key] = description\n self._code_versions_by_key[asset_key] = output_def.code_version\n\n for key, freshness_policy in (freshness_policies_by_key or {}).items():\n check.param_invariant(\n not (\n freshness_policy\n and self._partitions_def is not None\n and not isinstance(self._partitions_def, TimeWindowPartitionsDefinition)\n ),\n "freshness_policies_by_key",\n "FreshnessPolicies are currently unsupported for assets with partitions of type"\n f" {type(self._partitions_def)}.",\n )\n\n self._freshness_policies_by_key = check.opt_mapping_param(\n freshness_policies_by_key,\n "freshness_policies_by_key",\n key_type=AssetKey,\n value_type=FreshnessPolicy,\n )\n\n self._auto_materialize_policies_by_key = check.opt_mapping_param(\n auto_materialize_policies_by_key,\n "auto_materialize_policies_by_key",\n key_type=AssetKey,\n value_type=AutoMaterializePolicy,\n )\n\n self._backfill_policy = check.opt_inst_param(\n backfill_policy, "backfill_policy", BackfillPolicy\n )\n\n if self._partitions_def is None:\n # check if backfill policy is BackfillPolicyType.SINGLE_RUN if asset is not partitioned\n check.param_invariant(\n (\n backfill_policy.policy_type is BackfillPolicyType.SINGLE_RUN\n if backfill_policy\n else True\n ),\n "backfill_policy",\n "Non partitioned asset can only have single run backfill policy",\n )\n\n _validate_self_deps(\n input_keys=self._keys_by_input_name.values(),\n output_keys=self._selected_asset_keys,\n partition_mappings=self._partition_mappings,\n partitions_def=self._partitions_def,\n )\n\n @staticmethod\n def dagster_internal_init(\n *,\n keys_by_input_name: Mapping[str, AssetKey],\n keys_by_output_name: Mapping[str, AssetKey],\n node_def: NodeDefinition,\n partitions_def: Optional[PartitionsDefinition],\n partition_mappings: Optional[Mapping[AssetKey, PartitionMapping]],\n asset_deps: Optional[Mapping[AssetKey, AbstractSet[AssetKey]]],\n selected_asset_keys: Optional[AbstractSet[AssetKey]],\n can_subset: bool,\n resource_defs: Optional[Mapping[str, object]],\n group_names_by_key: Optional[Mapping[AssetKey, str]],\n metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]],\n freshness_policies_by_key: Optional[Mapping[AssetKey, FreshnessPolicy]],\n auto_materialize_policies_by_key: Optional[Mapping[AssetKey, AutoMaterializePolicy]],\n backfill_policy: Optional[BackfillPolicy],\n descriptions_by_key: Optional[Mapping[AssetKey, str]],\n check_specs_by_output_name: Optional[Mapping[str, AssetCheckSpec]],\n selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],\n ) -> "AssetsDefinition":\n return AssetsDefinition(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n node_def=node_def,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings,\n asset_deps=asset_deps,\n selected_asset_keys=selected_asset_keys,\n can_subset=can_subset,\n resource_defs=resource_defs,\n group_names_by_key=group_names_by_key,\n metadata_by_key=metadata_by_key,\n freshness_policies_by_key=freshness_policies_by_key,\n auto_materialize_policies_by_key=auto_materialize_policies_by_key,\n backfill_policy=backfill_policy,\n descriptions_by_key=descriptions_by_key,\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=selected_asset_check_keys,\n )\n\n def __call__(self, *args: object, **kwargs: object) -> object:\n from .composition import is_in_composition\n from .graph_definition import GraphDefinition\n\n # defer to GraphDefinition.__call__ for graph backed assets, or if invoked in composition\n if isinstance(self.node_def, GraphDefinition) or is_in_composition():\n return self._node_def(*args, **kwargs)\n\n # invoke against self to allow assets def information to be used\n return direct_invocation_result(self, *args, **kwargs)\n\n
[docs] @public\n @experimental_param(param="resource_defs")\n @staticmethod\n def from_graph(\n graph_def: "GraphDefinition",\n *,\n keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,\n keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n group_name: Optional[str] = None,\n group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,\n descriptions_by_output_name: Optional[Mapping[str, str]] = None,\n metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,\n freshness_policies_by_output_name: Optional[Mapping[str, Optional[FreshnessPolicy]]] = None,\n auto_materialize_policies_by_output_name: Optional[\n Mapping[str, Optional[AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n can_subset: bool = False,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n ) -> "AssetsDefinition":\n """Constructs an AssetsDefinition from a GraphDefinition.\n\n Args:\n graph_def (GraphDefinition): The GraphDefinition that is an asset.\n keys_by_input_name (Optional[Mapping[str, AssetKey]]): A mapping of the input\n names of the decorated graph to their corresponding asset keys. If not provided,\n the input asset keys will be created from the graph input names.\n keys_by_output_name (Optional[Mapping[str, AssetKey]]): A mapping of the output\n names of the decorated graph to their corresponding asset keys. If not provided,\n the output asset keys will be created from the graph output names.\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, key_prefix will be prepended\n to each key in keys_by_output_name. Each item in key_prefix must be a valid name in\n dagster (ie only contains letters, numbers, and _) and may not contain python\n reserved keywords.\n internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed\n that all assets produced by the graph depend on all assets that are consumed by that\n graph. If this default is not correct, you pass in a map of output names to a\n corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be\n either used as input to the asset or produced within the graph.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n partition_mappings (Optional[Mapping[str, PartitionMapping]]): Defines how to map partition\n keys for this asset to partition keys of upstream assets. Each key in the dictionary\n correponds to one of the input assets, and each value is a PartitionMapping.\n If no entry is provided for a particular asset dependency, the partition mapping defaults\n to the default partition mapping for the partitions definition, which is typically maps\n partition keys to the same partition keys in upstream assets.\n resource_defs (Optional[Mapping[str, ResourceDefinition]]):\n (Experimental) A mapping of resource keys to resource definitions. These resources\n will be initialized during execution, and can be accessed from the\n body of ops in the graph during execution.\n group_name (Optional[str]): A group name for the constructed asset. Assets without a\n group name are assigned to a group called "default".\n group_names_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a group name to be\n associated with some or all of the output assets for this node. Keys are names of the\n outputs, and values are the group name. Cannot be used with the group_name argument.\n descriptions_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a description to be\n associated with each of the output asstes for this graph.\n metadata_by_output_name (Optional[Mapping[str, Optional[MetadataUserInput]]]): Defines metadata to\n be associated with each of the output assets for this node. Keys are names of the\n outputs, and values are dictionaries of metadata to be associated with the related\n asset.\n freshness_policies_by_output_name (Optional[Mapping[str, Optional[FreshnessPolicy]]]): Defines a\n FreshnessPolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the FreshnessPolicies to be attached\n to the associated asset.\n auto_materialize_policies_by_output_name (Optional[Mapping[str, Optional[AutoMaterializePolicy]]]): Defines an\n AutoMaterializePolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the AutoMaterializePolicies to be attached\n to the associated asset.\n backfill_policy (Optional[BackfillPolicy]): Defines this asset's BackfillPolicy\n """\n return AssetsDefinition._from_node(\n node_def=graph_def,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n key_prefix=key_prefix,\n internal_asset_deps=internal_asset_deps,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings,\n resource_defs=resource_defs,\n group_name=group_name,\n group_names_by_output_name=group_names_by_output_name,\n descriptions_by_output_name=descriptions_by_output_name,\n metadata_by_output_name=metadata_by_output_name,\n freshness_policies_by_output_name=freshness_policies_by_output_name,\n auto_materialize_policies_by_output_name=auto_materialize_policies_by_output_name,\n backfill_policy=backfill_policy,\n can_subset=can_subset,\n check_specs=check_specs,\n )
\n\n
[docs] @public\n @staticmethod\n def from_op(\n op_def: OpDefinition,\n *,\n keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,\n keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,\n group_name: Optional[str] = None,\n group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,\n descriptions_by_output_name: Optional[Mapping[str, str]] = None,\n metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,\n freshness_policies_by_output_name: Optional[Mapping[str, Optional[FreshnessPolicy]]] = None,\n auto_materialize_policies_by_output_name: Optional[\n Mapping[str, Optional[AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n can_subset: bool = False,\n ) -> "AssetsDefinition":\n """Constructs an AssetsDefinition from an OpDefinition.\n\n Args:\n op_def (OpDefinition): The OpDefinition that is an asset.\n keys_by_input_name (Optional[Mapping[str, AssetKey]]): A mapping of the input\n names of the decorated op to their corresponding asset keys. If not provided,\n the input asset keys will be created from the op input names.\n keys_by_output_name (Optional[Mapping[str, AssetKey]]): A mapping of the output\n names of the decorated op to their corresponding asset keys. If not provided,\n the output asset keys will be created from the op output names.\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, key_prefix will be prepended\n to each key in keys_by_output_name. Each item in key_prefix must be a valid name in\n dagster (ie only contains letters, numbers, and _) and may not contain python\n reserved keywords.\n internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed\n that all assets produced by the op depend on all assets that are consumed by that\n op. If this default is not correct, you pass in a map of output names to a\n corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be\n either used as input to the asset or produced within the op.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n partition_mappings (Optional[Mapping[str, PartitionMapping]]): Defines how to map partition\n keys for this asset to partition keys of upstream assets. Each key in the dictionary\n correponds to one of the input assets, and each value is a PartitionMapping.\n If no entry is provided for a particular asset dependency, the partition mapping defaults\n to the default partition mapping for the partitions definition, which is typically maps\n partition keys to the same partition keys in upstream assets.\n group_name (Optional[str]): A group name for the constructed asset. Assets without a\n group name are assigned to a group called "default".\n group_names_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a group name to be\n associated with some or all of the output assets for this node. Keys are names of the\n outputs, and values are the group name. Cannot be used with the group_name argument.\n descriptions_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a description to be\n associated with each of the output asstes for this graph.\n metadata_by_output_name (Optional[Mapping[str, Optional[MetadataUserInput]]]): Defines metadata to\n be associated with each of the output assets for this node. Keys are names of the\n outputs, and values are dictionaries of metadata to be associated with the related\n asset.\n freshness_policies_by_output_name (Optional[Mapping[str, Optional[FreshnessPolicy]]]): Defines a\n FreshnessPolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the FreshnessPolicies to be attached\n to the associated asset.\n auto_materialize_policies_by_output_name (Optional[Mapping[str, Optional[AutoMaterializePolicy]]]): Defines an\n AutoMaterializePolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the AutoMaterializePolicies to be attached\n to the associated asset.\n backfill_policy (Optional[BackfillPolicy]): Defines this asset's BackfillPolicy\n """\n return AssetsDefinition._from_node(\n node_def=op_def,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n key_prefix=key_prefix,\n internal_asset_deps=internal_asset_deps,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings,\n group_name=group_name,\n group_names_by_output_name=group_names_by_output_name,\n descriptions_by_output_name=descriptions_by_output_name,\n metadata_by_output_name=metadata_by_output_name,\n freshness_policies_by_output_name=freshness_policies_by_output_name,\n auto_materialize_policies_by_output_name=auto_materialize_policies_by_output_name,\n backfill_policy=backfill_policy,\n can_subset=can_subset,\n )
\n\n @staticmethod\n def _from_node(\n node_def: Union[OpDefinition, "GraphDefinition"],\n *,\n keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,\n keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n group_name: Optional[str] = None,\n group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,\n descriptions_by_output_name: Optional[Mapping[str, str]] = None,\n metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,\n freshness_policies_by_output_name: Optional[Mapping[str, Optional[FreshnessPolicy]]] = None,\n auto_materialize_policies_by_output_name: Optional[\n Mapping[str, Optional[AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n can_subset: bool = False,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n ) -> "AssetsDefinition":\n from dagster._core.definitions.decorators.asset_decorator import (\n _validate_and_assign_output_names_to_check_specs,\n )\n\n node_def = check.inst_param(node_def, "node_def", NodeDefinition)\n keys_by_input_name = _infer_keys_by_input_names(\n node_def,\n check.opt_mapping_param(\n keys_by_input_name, "keys_by_input_name", key_type=str, value_type=AssetKey\n ),\n )\n keys_by_output_name = check.opt_mapping_param(\n keys_by_output_name,\n "keys_by_output_name",\n key_type=str,\n value_type=AssetKey,\n )\n internal_asset_deps = check.opt_mapping_param(\n internal_asset_deps, "internal_asset_deps", key_type=str, value_type=set\n )\n resource_defs = check.opt_mapping_param(\n resource_defs, "resource_defs", key_type=str, value_type=ResourceDefinition\n )\n transformed_internal_asset_deps: Dict[AssetKey, AbstractSet[AssetKey]] = {}\n if internal_asset_deps:\n for output_name, asset_keys in internal_asset_deps.items():\n check.invariant(\n output_name in keys_by_output_name,\n f"output_name {output_name} specified in internal_asset_deps does not exist"\n " in the decorated function",\n )\n transformed_internal_asset_deps[keys_by_output_name[output_name]] = asset_keys\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, list(keys_by_output_name.values())\n )\n\n keys_by_output_name = _infer_keys_by_output_names(\n node_def, keys_by_output_name or {}, check_specs_by_output_name\n )\n\n keys_by_output_name_with_prefix: Dict[str, AssetKey] = {}\n key_prefix_list = [key_prefix] if isinstance(key_prefix, str) else key_prefix\n for output_name, key in keys_by_output_name.items():\n # add key_prefix to the beginning of each asset key\n key_with_key_prefix = AssetKey(\n list(filter(None, [*(key_prefix_list or []), *key.path]))\n )\n keys_by_output_name_with_prefix[output_name] = key_with_key_prefix\n\n check.param_invariant(\n group_name is None or group_names_by_output_name is None,\n "group_name",\n "Cannot use both group_name and group_names_by_output_name",\n )\n\n if group_name:\n group_names_by_key = {\n asset_key: group_name for asset_key in keys_by_output_name_with_prefix.values()\n }\n elif group_names_by_output_name:\n group_names_by_key = {\n keys_by_output_name_with_prefix[output_name]: group_name\n for output_name, group_name in group_names_by_output_name.items()\n if group_name is not None\n }\n else:\n group_names_by_key = None\n\n return AssetsDefinition.dagster_internal_init(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name_with_prefix,\n node_def=node_def,\n asset_deps=transformed_internal_asset_deps or None,\n partitions_def=check.opt_inst_param(\n partitions_def,\n "partitions_def",\n PartitionsDefinition,\n ),\n group_names_by_key=group_names_by_key,\n resource_defs=resource_defs,\n partition_mappings=(\n {\n keys_by_input_name[input_name]: partition_mapping\n for input_name, partition_mapping in partition_mappings.items()\n }\n if partition_mappings\n else None\n ),\n metadata_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: metadata\n for output_name, metadata in metadata_by_output_name.items()\n if metadata is not None\n }\n if metadata_by_output_name\n else None\n ),\n freshness_policies_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: freshness_policy\n for output_name, freshness_policy in freshness_policies_by_output_name.items()\n if freshness_policy is not None\n }\n if freshness_policies_by_output_name\n else None\n ),\n auto_materialize_policies_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: auto_materialize_policy\n for output_name, auto_materialize_policy in auto_materialize_policies_by_output_name.items()\n if auto_materialize_policy is not None\n }\n if auto_materialize_policies_by_output_name\n else None\n ),\n backfill_policy=check.opt_inst_param(\n backfill_policy, "backfill_policy", BackfillPolicy\n ),\n descriptions_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: description\n for output_name, description in descriptions_by_output_name.items()\n if description is not None\n }\n if descriptions_by_output_name\n else None\n ),\n can_subset=can_subset,\n selected_asset_keys=None, # node has no subselection info\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=None,\n )\n\n @public\n @property\n def can_subset(self) -> bool:\n """bool: If True, indicates that this AssetsDefinition may materialize any subset of its\n asset keys in a given computation (as opposed to being required to materialize all asset\n keys).\n """\n return self._can_subset\n\n @public\n @property\n def group_names_by_key(self) -> Mapping[AssetKey, str]:\n """Mapping[AssetKey, str]: Returns a mapping from the asset keys in this AssetsDefinition\n to the group names assigned to them. If there is no assigned group name for a given AssetKey,\n it will not be present in this dictionary.\n """\n return self._group_names_by_key\n\n @public\n @property\n def descriptions_by_key(self) -> Mapping[AssetKey, str]:\n """Mapping[AssetKey, str]: Returns a mapping from the asset keys in this AssetsDefinition\n to the descriptions assigned to them. If there is no assigned description for a given AssetKey,\n it will not be present in this dictionary.\n """\n return self._descriptions_by_key\n\n @public\n @property\n def op(self) -> OpDefinition:\n """OpDefinition: Returns the OpDefinition that is used to materialize the assets in this\n AssetsDefinition.\n """\n check.invariant(\n isinstance(self._node_def, OpDefinition),\n "The NodeDefinition for this AssetsDefinition is not of type OpDefinition.",\n )\n return cast(OpDefinition, self._node_def)\n\n @public\n @property\n def node_def(self) -> NodeDefinition:\n """NodeDefinition: Returns the OpDefinition or GraphDefinition that is used to materialize\n the assets in this AssetsDefinition.\n """\n return self._node_def\n\n @public\n @property\n def asset_deps(self) -> Mapping[AssetKey, AbstractSet[AssetKey]]:\n """Maps assets that are produced by this definition to assets that they depend on. The\n dependencies can be either "internal", meaning that they refer to other assets that are\n produced by this definition, or "external", meaning that they refer to assets that aren't\n produced by this definition.\n """\n return self._asset_deps\n\n @property\n def input_names(self) -> Iterable[str]:\n """Iterable[str]: The set of input names of the underlying NodeDefinition for this\n AssetsDefinition.\n """\n return self.keys_by_input_name.keys()\n\n @public\n @property\n def key(self) -> AssetKey:\n """AssetKey: The asset key associated with this AssetsDefinition. If this AssetsDefinition\n has more than one asset key, this will produce an error.\n """\n check.invariant(\n len(self.keys) == 1,\n "Tried to retrieve asset key from an assets definition with multiple asset keys: "\n + ", ".join([str(ak.to_string()) for ak in self._keys_by_output_name.values()]),\n )\n\n return next(iter(self.keys))\n\n @public\n @property\n def resource_defs(self) -> Mapping[str, ResourceDefinition]:\n """Mapping[str, ResourceDefinition]: A mapping from resource name to ResourceDefinition for\n the resources bound to this AssetsDefinition.\n """\n return dict(self._resource_defs)\n\n @public\n @property\n def keys(self) -> AbstractSet[AssetKey]:\n """AbstractSet[AssetKey]: The asset keys associated with this AssetsDefinition."""\n return self._selected_asset_keys\n\n @public\n @property\n def dependency_keys(self) -> Iterable[AssetKey]:\n """Iterable[AssetKey]: The asset keys which are upstream of any asset included in this\n AssetsDefinition.\n """\n # the input asset keys that are directly upstream of a selected asset key\n upstream_keys = {dep_key for key in self.keys for dep_key in self.asset_deps[key]}\n input_keys = set(self._keys_by_input_name.values())\n return upstream_keys.intersection(input_keys)\n\n @property\n def node_keys_by_output_name(self) -> Mapping[str, AssetKey]:\n """AssetKey for each output on the underlying NodeDefinition."""\n return self._keys_by_output_name\n\n @property\n def node_keys_by_input_name(self) -> Mapping[str, AssetKey]:\n """AssetKey for each input on the underlying NodeDefinition."""\n return self._keys_by_input_name\n\n @property\n def check_specs_by_output_name(self) -> Mapping[str, AssetCheckSpec]:\n return self._check_specs_by_output_name\n\n def get_spec_for_check_key(self, asset_check_key: AssetCheckKey) -> AssetCheckSpec:\n return self._check_specs_by_key[asset_check_key]\n\n @property\n def keys_by_output_name(self) -> Mapping[str, AssetKey]:\n return {\n name: key for name, key in self.node_keys_by_output_name.items() if key in self.keys\n }\n\n @property\n def keys_by_input_name(self) -> Mapping[str, AssetKey]:\n upstream_keys = {dep_key for key in self.keys for dep_key in self.asset_deps[key]}\n return {\n name: key for name, key in self.node_keys_by_input_name.items() if key in upstream_keys\n }\n\n @property\n def freshness_policies_by_key(self) -> Mapping[AssetKey, FreshnessPolicy]:\n return self._freshness_policies_by_key\n\n @property\n def auto_materialize_policies_by_key(self) -> Mapping[AssetKey, AutoMaterializePolicy]:\n return self._auto_materialize_policies_by_key\n\n @property\n def backfill_policy(self) -> Optional[BackfillPolicy]:\n return self._backfill_policy\n\n @public\n @property\n def partitions_def(self) -> Optional[PartitionsDefinition]:\n """Optional[PartitionsDefinition]: The PartitionsDefinition for this AssetsDefinition (if any)."""\n return self._partitions_def\n\n @property\n def metadata_by_key(self) -> Mapping[AssetKey, ArbitraryMetadataMapping]:\n return self._metadata_by_key\n\n @property\n def code_versions_by_key(self) -> Mapping[AssetKey, Optional[str]]:\n return self._code_versions_by_key\n\n @property\n def partition_mappings(self) -> Mapping[AssetKey, PartitionMapping]:\n return self._partition_mappings\n\n
[docs] @public\n def get_partition_mapping(self, in_asset_key: AssetKey) -> Optional[PartitionMapping]:\n """Returns the partition mapping between keys in this AssetsDefinition and a given input\n asset key (if any).\n """\n return self._partition_mappings.get(in_asset_key)
\n\n @public\n @property\n def check_specs(self) -> Iterable[AssetCheckSpec]:\n """Returns the asset check specs defined on this AssetsDefinition, i.e. the checks that can\n be executed while materializing the assets.\n\n Returns:\n Iterable[AssetsCheckSpec]:\n """\n return self._check_specs_by_output_name.values()\n\n @property\n def check_keys(self) -> AbstractSet[AssetCheckKey]:\n """Returns the selected asset checks associated by this AssetsDefinition.\n\n Returns:\n AbstractSet[Tuple[AssetKey, str]]: The selected asset checks. An asset check is\n identified by the asset key and the name of the check.\n """\n return self._selected_asset_check_keys\n\n def is_asset_executable(self, asset_key: AssetKey) -> bool:\n """Returns True if the asset key is materializable by this AssetsDefinition.\n\n Args:\n asset_key (AssetKey): The asset key to check.\n\n Returns:\n bool: True if the asset key is materializable by this AssetsDefinition.\n """\n from dagster._core.definitions.asset_spec import (\n SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE,\n AssetExecutionType,\n )\n\n return AssetExecutionType.is_executable(\n self._metadata_by_key.get(asset_key, {}).get(SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE)\n )\n\n def asset_execution_type_for_asset(self, asset_key: AssetKey) -> AssetExecutionType:\n from dagster._core.definitions.asset_spec import (\n SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE,\n AssetExecutionType,\n )\n\n return AssetExecutionType.str_to_enum(\n self._metadata_by_key.get(asset_key, {}).get(SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE)\n )\n\n def get_partition_mapping_for_input(self, input_name: str) -> Optional[PartitionMapping]:\n return self._partition_mappings.get(self._keys_by_input_name[input_name])\n\n def infer_partition_mapping(\n self, upstream_asset_key: AssetKey, upstream_partitions_def: Optional[PartitionsDefinition]\n ) -> PartitionMapping:\n with disable_dagster_warnings():\n partition_mapping = self._partition_mappings.get(upstream_asset_key)\n return infer_partition_mapping(\n partition_mapping, self._partitions_def, upstream_partitions_def\n )\n\n def get_output_name_for_asset_key(self, key: AssetKey) -> str:\n for output_name, asset_key in self.keys_by_output_name.items():\n if key == asset_key:\n return output_name\n\n raise DagsterInvariantViolationError(\n f"Asset key {key.to_user_string()} not found in AssetsDefinition"\n )\n\n def get_op_def_for_asset_key(self, key: AssetKey) -> OpDefinition:\n """If this is an op-backed asset, returns the op def. If it's a graph-backed asset,\n returns the op def within the graph that produces the given asset key.\n """\n output_name = self.get_output_name_for_asset_key(key)\n return self.node_def.resolve_output_to_origin_op_def(output_name)\n\n def with_attributes(\n self,\n *,\n output_asset_key_replacements: Optional[Mapping[AssetKey, AssetKey]] = None,\n input_asset_key_replacements: Optional[Mapping[AssetKey, AssetKey]] = None,\n group_names_by_key: Optional[Mapping[AssetKey, str]] = None,\n descriptions_by_key: Optional[Mapping[AssetKey, str]] = None,\n metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]] = None,\n freshness_policy: Optional[\n Union[FreshnessPolicy, Mapping[AssetKey, FreshnessPolicy]]\n ] = None,\n auto_materialize_policy: Optional[\n Union[AutoMaterializePolicy, Mapping[AssetKey, AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n ) -> "AssetsDefinition":\n output_asset_key_replacements = check.opt_mapping_param(\n output_asset_key_replacements,\n "output_asset_key_replacements",\n key_type=AssetKey,\n value_type=AssetKey,\n )\n input_asset_key_replacements = check.opt_mapping_param(\n input_asset_key_replacements,\n "input_asset_key_replacements",\n key_type=AssetKey,\n value_type=AssetKey,\n )\n group_names_by_key = check.opt_mapping_param(\n group_names_by_key, "group_names_by_key", key_type=AssetKey, value_type=str\n )\n descriptions_by_key = check.opt_mapping_param(\n descriptions_by_key, "descriptions_by_key", key_type=AssetKey, value_type=str\n )\n metadata_by_key = check.opt_mapping_param(\n metadata_by_key, "metadata_by_key", key_type=AssetKey, value_type=dict\n )\n\n backfill_policy = check.opt_inst_param(backfill_policy, "backfill_policy", BackfillPolicy)\n\n if group_names_by_key:\n group_name_conflicts = [\n asset_key\n for asset_key in group_names_by_key\n if asset_key in self.group_names_by_key\n and self.group_names_by_key[asset_key] != DEFAULT_GROUP_NAME\n ]\n if group_name_conflicts:\n raise DagsterInvalidDefinitionError(\n "Group name already exists on assets"\n f" {', '.join(asset_key.to_user_string() for asset_key in group_name_conflicts)}"\n )\n\n replaced_group_names_by_key = {\n output_asset_key_replacements.get(key, key): group_name\n for key, group_name in self.group_names_by_key.items()\n }\n\n if freshness_policy:\n freshness_policy_conflicts = (\n self.freshness_policies_by_key.keys()\n if isinstance(freshness_policy, FreshnessPolicy)\n else (freshness_policy.keys() & self.freshness_policies_by_key.keys())\n )\n if freshness_policy_conflicts:\n raise DagsterInvalidDefinitionError(\n "FreshnessPolicy already exists on assets"\n f" {', '.join(key.to_string() for key in freshness_policy_conflicts)}"\n )\n\n replaced_freshness_policies_by_key = {}\n for key in self.keys:\n if isinstance(freshness_policy, FreshnessPolicy):\n replaced_freshness_policy = freshness_policy\n elif freshness_policy:\n replaced_freshness_policy = freshness_policy.get(key)\n else:\n replaced_freshness_policy = self.freshness_policies_by_key.get(key)\n\n if replaced_freshness_policy:\n replaced_freshness_policies_by_key[output_asset_key_replacements.get(key, key)] = (\n replaced_freshness_policy\n )\n\n if auto_materialize_policy:\n auto_materialize_policy_conflicts = (\n self.auto_materialize_policies_by_key.keys()\n if isinstance(auto_materialize_policy, AutoMaterializePolicy)\n else (auto_materialize_policy.keys() & self.auto_materialize_policies_by_key.keys())\n )\n if auto_materialize_policy_conflicts:\n raise DagsterInvalidDefinitionError(\n "AutoMaterializePolicy already exists on assets"\n f" {', '.join(key.to_string() for key in auto_materialize_policy_conflicts)}"\n )\n\n replaced_auto_materialize_policies_by_key = {}\n for key in self.keys:\n if isinstance(auto_materialize_policy, AutoMaterializePolicy):\n replaced_auto_materialize_policy = auto_materialize_policy\n elif auto_materialize_policy:\n replaced_auto_materialize_policy = auto_materialize_policy.get(key)\n else:\n replaced_auto_materialize_policy = self.auto_materialize_policies_by_key.get(key)\n\n if replaced_auto_materialize_policy:\n replaced_auto_materialize_policies_by_key[\n output_asset_key_replacements.get(key, key)\n ] = replaced_auto_materialize_policy\n\n replaced_descriptions_by_key = {\n output_asset_key_replacements.get(key, key): description\n for key, description in descriptions_by_key.items()\n }\n\n if not metadata_by_key:\n metadata_by_key = self.metadata_by_key\n\n replaced_metadata_by_key = {\n output_asset_key_replacements.get(key, key): metadata\n for key, metadata in metadata_by_key.items()\n }\n\n replaced_attributes = dict(\n keys_by_input_name={\n input_name: input_asset_key_replacements.get(key, key)\n for input_name, key in self._keys_by_input_name.items()\n },\n keys_by_output_name={\n output_name: output_asset_key_replacements.get(key, key)\n for output_name, key in self._keys_by_output_name.items()\n },\n partition_mappings={\n input_asset_key_replacements.get(key, key): partition_mapping\n for key, partition_mapping in self._partition_mappings.items()\n },\n asset_deps={\n # replace both the keys and the values in this mapping\n output_asset_key_replacements.get(key, key): {\n input_asset_key_replacements.get(\n upstream_key,\n output_asset_key_replacements.get(upstream_key, upstream_key),\n )\n for upstream_key in value\n }\n for key, value in self.asset_deps.items()\n },\n selected_asset_keys={\n output_asset_key_replacements.get(key, key) for key in self._selected_asset_keys\n },\n group_names_by_key={\n **replaced_group_names_by_key,\n **group_names_by_key,\n },\n metadata_by_key=replaced_metadata_by_key,\n freshness_policies_by_key=replaced_freshness_policies_by_key,\n auto_materialize_policies_by_key=replaced_auto_materialize_policies_by_key,\n backfill_policy=backfill_policy if backfill_policy else self.backfill_policy,\n descriptions_by_key=replaced_descriptions_by_key,\n )\n\n return self.__class__(**merge_dicts(self.get_attributes_dict(), replaced_attributes))\n\n def _subset_graph_backed_asset(\n self,\n selected_asset_keys: AbstractSet[AssetKey],\n ):\n from dagster._core.definitions.graph_definition import GraphDefinition\n\n if not isinstance(self.node_def, GraphDefinition):\n raise DagsterInvalidInvocationError(\n "Method _subset_graph_backed_asset cannot subset an asset that is not a graph"\n )\n\n # All asset keys in selected_asset_keys are outputted from the same top-level graph backed asset\n dep_node_handles_by_asset_key = get_dep_node_handles_of_graph_backed_asset(\n self.node_def, self\n )\n op_selection: List[str] = []\n for asset_key in selected_asset_keys:\n dep_node_handles = dep_node_handles_by_asset_key[asset_key]\n for dep_node_handle in dep_node_handles:\n op_selection.append(".".join(dep_node_handle.path[1:]))\n\n return get_graph_subset(self.node_def, op_selection)\n\n def subset_for(\n self,\n selected_asset_keys: AbstractSet[AssetKey],\n selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],\n ) -> "AssetsDefinition":\n """Create a subset of this AssetsDefinition that will only materialize the assets and checks\n in the selected set.\n\n Args:\n selected_asset_keys (AbstractSet[AssetKey]): The total set of asset keys\n selected_asset_check_keys (AbstractSet[AssetCheckKey]): The selected asset checks\n """\n from dagster._core.definitions.graph_definition import GraphDefinition\n\n check.invariant(\n self.can_subset,\n f"Attempted to subset AssetsDefinition for {self.node_def.name}, but can_subset=False.",\n )\n\n # Set of assets within selected_asset_keys which are outputted by this AssetDefinition\n asset_subselection = selected_asset_keys & self.keys\n if selected_asset_check_keys is None:\n # filter to checks that target selected asset keys\n asset_check_subselection = {\n key for key in self.check_keys if key.asset_key in asset_subselection\n }\n else:\n asset_check_subselection = selected_asset_check_keys & self.check_keys\n\n # Early escape if all assets in AssetsDefinition are selected\n if asset_subselection == self.keys and asset_check_subselection == self.check_keys:\n return self\n elif isinstance(self.node_def, GraphDefinition): # Node is graph-backed asset\n check.invariant(\n selected_asset_check_keys == self.check_keys,\n "Subsetting graph-backed assets with checks is not yet supported",\n )\n\n subsetted_node = self._subset_graph_backed_asset(\n asset_subselection,\n )\n\n # The subsetted node should only include asset inputs that are dependencies of the\n # selected set of assets.\n subsetted_input_names = [input_def.name for input_def in subsetted_node.input_defs]\n subsetted_keys_by_input_name = {\n key: value\n for key, value in self.node_keys_by_input_name.items()\n if key in subsetted_input_names\n }\n\n subsetted_output_names = [output_def.name for output_def in subsetted_node.output_defs]\n subsetted_keys_by_output_name = {\n key: value\n for key, value in self.node_keys_by_output_name.items()\n if key in subsetted_output_names\n }\n\n # An op within the graph-backed asset that yields multiple assets will be run\n # any time any of its output assets are selected. Thus, if an op yields multiple assets\n # and only one of them is selected, the op will still run and potentially unexpectedly\n # materialize the unselected asset.\n #\n # Thus, we include unselected assets that may be accidentally materialized in\n # keys_by_output_name and asset_deps so that the webserver can populate an warning when\n # this occurs. This is the same behavior as multi-asset subsetting.\n\n subsetted_asset_deps = {\n out_asset_key: set(self._keys_by_input_name.values())\n for out_asset_key in subsetted_keys_by_output_name.values()\n }\n\n replaced_attributes = dict(\n keys_by_input_name=subsetted_keys_by_input_name,\n keys_by_output_name=subsetted_keys_by_output_name,\n node_def=subsetted_node,\n asset_deps=subsetted_asset_deps,\n selected_asset_keys=selected_asset_keys & self.keys,\n )\n\n return self.__class__(**merge_dicts(self.get_attributes_dict(), replaced_attributes))\n else:\n # multi_asset subsetting\n replaced_attributes = {\n "selected_asset_keys": asset_subselection,\n "selected_asset_check_keys": asset_check_subselection,\n }\n return self.__class__(**merge_dicts(self.get_attributes_dict(), replaced_attributes))\n\n
[docs] @public\n def to_source_assets(self) -> Sequence[SourceAsset]:\n """Returns a SourceAsset for each asset in this definition.\n\n Each produced SourceAsset will have the same key, metadata, io_manager_key, etc. as the\n corresponding asset\n """\n return [\n self._output_to_source_asset(output_name)\n for output_name in self.keys_by_output_name.keys()\n ]
\n\n
[docs] @public\n def to_source_asset(self, key: Optional[CoercibleToAssetKey] = None) -> SourceAsset:\n """Returns a representation of this asset as a :py:class:`SourceAsset`.\n\n If this is a multi-asset, the "key" argument allows selecting which asset to return a\n SourceAsset representation of.\n\n Args:\n key (Optional[Union[str, Sequence[str], AssetKey]]]): If this is a multi-asset, select\n which asset to return a SourceAsset representation of. If not a multi-asset, this\n can be left as None.\n\n Returns:\n SourceAsset\n """\n if len(self.keys) > 1:\n check.invariant(\n key is not None,\n "The 'key' argument is required when there are multiple assets to choose from",\n )\n\n if key is not None:\n resolved_key = AssetKey.from_coercible(key)\n check.invariant(\n resolved_key in self.keys, f"Key {resolved_key} not found in AssetsDefinition"\n )\n else:\n resolved_key = self.key\n\n output_names = [\n output_name\n for output_name, ak in self.keys_by_output_name.items()\n if ak == resolved_key\n ]\n check.invariant(len(output_names) == 1)\n return self._output_to_source_asset(output_names[0])
\n\n def _output_to_source_asset(self, output_name: str) -> SourceAsset:\n with disable_dagster_warnings():\n output_def = self.node_def.resolve_output_to_origin(\n output_name, NodeHandle(self.node_def.name, parent=None)\n )[0]\n key = self._keys_by_output_name[output_name]\n\n return SourceAsset(\n key=key,\n metadata=output_def.metadata,\n io_manager_key=output_def.io_manager_key,\n description=output_def.description,\n resource_defs=self.resource_defs,\n partitions_def=self.partitions_def,\n group_name=self.group_names_by_key[key],\n )\n\n def get_io_manager_key_for_asset_key(self, key: AssetKey) -> str:\n output_name = self.get_output_name_for_asset_key(key)\n return self.node_def.resolve_output_to_origin(\n output_name, NodeHandle(self.node_def.name, parent=None)\n )[0].io_manager_key\n\n def get_resource_requirements(self) -> Iterator[ResourceRequirement]:\n yield from self.node_def.get_resource_requirements() # type: ignore[attr-defined]\n for source_key, resource_def in self.resource_defs.items():\n yield from resource_def.get_resource_requirements(outer_context=source_key)\n\n @public\n @property\n def required_resource_keys(self) -> Set[str]:\n """Set[str]: The set of keys for resources that must be provided to this AssetsDefinition."""\n return {requirement.key for requirement in self.get_resource_requirements()}\n\n def __str__(self):\n if len(self.keys) == 1:\n return f"AssetsDefinition with key {self.key.to_string()}"\n else:\n asset_keys = ", ".join(sorted(([asset_key.to_string() for asset_key in self.keys])))\n return f"AssetsDefinition with keys {asset_keys}"\n\n @property\n def unique_id(self) -> str:\n """A unique identifier for the AssetsDefinition that's stable across processes."""\n return hashlib.md5((json.dumps(sorted(self.keys))).encode("utf-8")).hexdigest()\n\n def with_resources(self, resource_defs: Mapping[str, ResourceDefinition]) -> "AssetsDefinition":\n attributes_dict = self.get_attributes_dict()\n attributes_dict["resource_defs"] = merge_resource_defs(\n old_resource_defs=self.resource_defs,\n resource_defs_to_merge_in=resource_defs,\n requires_resources=self,\n )\n return self.__class__(**attributes_dict)\n\n def get_attributes_dict(self) -> Dict[str, Any]:\n return dict(\n keys_by_input_name=self._keys_by_input_name,\n keys_by_output_name=self._keys_by_output_name,\n node_def=self._node_def,\n partitions_def=self._partitions_def,\n partition_mappings=self._partition_mappings,\n asset_deps=self.asset_deps,\n selected_asset_keys=self._selected_asset_keys,\n can_subset=self._can_subset,\n resource_defs=self._resource_defs,\n group_names_by_key=self._group_names_by_key,\n metadata_by_key=self._metadata_by_key,\n freshness_policies_by_key=self._freshness_policies_by_key,\n auto_materialize_policies_by_key=self._auto_materialize_policies_by_key,\n backfill_policy=self._backfill_policy,\n descriptions_by_key=self._descriptions_by_key,\n check_specs_by_output_name=self._check_specs_by_output_name,\n selected_asset_check_keys=self._selected_asset_check_keys,\n )
\n\n\ndef _infer_keys_by_input_names(\n node_def: Union["GraphDefinition", OpDefinition], keys_by_input_name: Mapping[str, AssetKey]\n) -> Mapping[str, AssetKey]:\n all_input_names = [input_def.name for input_def in node_def.input_defs]\n if keys_by_input_name:\n check.invariant(\n set(keys_by_input_name.keys()) == set(all_input_names),\n "The set of input names keys specified in the keys_by_input_name argument must "\n f"equal the set of asset keys inputted by '{node_def.name}'. \\n"\n f"keys_by_input_name keys: {set(keys_by_input_name.keys())} \\n"\n f"expected keys: {all_input_names}",\n )\n\n # If asset key is not supplied in keys_by_input_name, create asset key\n # from input name\n inferred_input_names_by_asset_key: Dict[str, AssetKey] = {\n input_name: keys_by_input_name.get(input_name, AssetKey([input_name]))\n for input_name in all_input_names\n }\n\n return inferred_input_names_by_asset_key\n\n\ndef _infer_keys_by_output_names(\n node_def: Union["GraphDefinition", OpDefinition],\n keys_by_output_name: Mapping[str, AssetKey],\n check_specs_by_output_name: Mapping[str, AssetCheckSpec],\n) -> Mapping[str, AssetKey]:\n output_names = [output_def.name for output_def in node_def.output_defs]\n if keys_by_output_name:\n overlapping_asset_and_check_outputs = set(keys_by_output_name.keys()) & set(\n check_specs_by_output_name.keys()\n )\n check.invariant(\n not overlapping_asset_and_check_outputs,\n "The set of output names associated with asset keys and checks overlap:"\n f" {overlapping_asset_and_check_outputs}",\n )\n\n union_asset_and_check_outputs = set(keys_by_output_name.keys()) | set(\n check_specs_by_output_name.keys()\n )\n check.invariant(\n union_asset_and_check_outputs == set(output_names),\n "The union of the set of output names keys specified in the keys_by_output_name and"\n " check_specs_by_output_name arguments must equal the set of asset keys outputted by"\n f" {node_def.name}. union keys:"\n f" {union_asset_and_check_outputs} \\nexpected keys: {set(output_names)}",\n )\n\n inferred_keys_by_output_names: Dict[str, AssetKey] = {\n output_name: asset_key for output_name, asset_key in keys_by_output_name.items()\n }\n\n if (\n len(output_names) == 1\n and output_names[0] not in keys_by_output_name\n and output_names[0] not in check_specs_by_output_name\n and output_names[0] == "result"\n ):\n # If there is only one output and the name is the default "result", generate asset key\n # from the name of the node\n inferred_keys_by_output_names[output_names[0]] = AssetKey([node_def.name])\n\n for output_name in output_names:\n if (\n output_name not in inferred_keys_by_output_names\n and output_name not in check_specs_by_output_name\n ):\n inferred_keys_by_output_names[output_name] = AssetKey([output_name])\n return inferred_keys_by_output_names\n\n\ndef _validate_graph_def(graph_def: "GraphDefinition", prefix: Optional[Sequence[str]] = None):\n """Ensure that all leaf nodes are mapped to graph outputs."""\n from dagster._core.definitions.graph_definition import GraphDefinition, create_adjacency_lists\n\n prefix = check.opt_sequence_param(prefix, "prefix")\n\n # recursively validate any sub-graphs\n for inner_node_def in graph_def.node_defs:\n if isinstance(inner_node_def, GraphDefinition):\n _validate_graph_def(inner_node_def, prefix=[*prefix, graph_def.name])\n\n # leaf nodes have no downstream nodes\n forward_edges, _ = create_adjacency_lists(graph_def.nodes, graph_def.dependency_structure)\n leaf_nodes = {\n node_name for node_name, downstream_nodes in forward_edges.items() if not downstream_nodes\n }\n\n # set of nodes that have outputs mapped to a graph output\n mapped_output_nodes = {\n output_mapping.maps_from.node_name for output_mapping in graph_def.output_mappings\n }\n\n # leaf nodes which do not have an associated mapped output\n unmapped_leaf_nodes = {".".join([*prefix, node]) for node in leaf_nodes - mapped_output_nodes}\n\n check.invariant(\n not unmapped_leaf_nodes,\n f"All leaf nodes within graph '{graph_def.name}' must generate outputs which are mapped"\n " to outputs of the graph, and produce assets. The following leaf node(s) are"\n f" non-asset producing ops: {unmapped_leaf_nodes}. This behavior is not currently"\n " supported because these ops are not required for the creation of the associated"\n " asset(s).",\n )\n\n\ndef _validate_self_deps(\n input_keys: Iterable[AssetKey],\n output_keys: Iterable[AssetKey],\n partition_mappings: Mapping[AssetKey, PartitionMapping],\n partitions_def: Optional[PartitionsDefinition],\n) -> None:\n output_keys_set = set(output_keys)\n for input_key in input_keys:\n if input_key in output_keys_set:\n if input_key in partition_mappings:\n partition_mapping = partition_mappings[input_key]\n time_window_partition_mapping = get_self_dep_time_window_partition_mapping(\n partition_mapping, partitions_def\n )\n if (\n time_window_partition_mapping is not None\n and (time_window_partition_mapping.start_offset or 0) < 0\n and (time_window_partition_mapping.end_offset or 0) < 0\n ):\n continue\n\n raise DagsterInvalidDefinitionError(\n f'Asset "{input_key.to_user_string()}" depends on itself. Assets can only depend'\n " on themselves if they are:\\n(a) time-partitioned and each partition depends on"\n " earlier partitions\\n(b) multipartitioned, with one time dimension that depends"\n " on earlier time partitions"\n )\n\n\ndef get_self_dep_time_window_partition_mapping(\n partition_mapping: Optional[PartitionMapping], partitions_def: Optional[PartitionsDefinition]\n) -> Optional[TimeWindowPartitionMapping]:\n """Returns a time window partition mapping dimension of the provided partition mapping,\n if exists.\n """\n if isinstance(partition_mapping, TimeWindowPartitionMapping):\n return partition_mapping\n elif isinstance(partition_mapping, MultiPartitionMapping):\n if not isinstance(partitions_def, MultiPartitionsDefinition):\n return None\n\n time_partition_mapping = partition_mapping.downstream_mappings_by_upstream_dimension.get(\n partitions_def.time_window_dimension.name\n )\n\n if time_partition_mapping is None or not isinstance(\n time_partition_mapping.partition_mapping, TimeWindowPartitionMapping\n ):\n return None\n\n return time_partition_mapping.partition_mapping\n return None\n
", "current_page_name": "_modules/dagster/_core/definitions/assets", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.assets"}, "auto_materialize_policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.auto_materialize_policy

\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, AbstractSet, Dict, FrozenSet, NamedTuple, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._serdes.serdes import (\n    NamedTupleSerializer,\n    UnpackContext,\n    UnpackedValue,\n    whitelist_for_serdes,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.auto_materialize_rule import (\n        AutoMaterializeRule,\n        AutoMaterializeRuleSnapshot,\n    )\n\n\nclass AutoMaterializePolicySerializer(NamedTupleSerializer):\n    def before_unpack(\n        self, context: UnpackContext, unpacked_dict: Dict[str, UnpackedValue]\n    ) -> Dict[str, UnpackedValue]:\n        from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n        backcompat_map = {\n            "on_missing": AutoMaterializeRule.materialize_on_missing(),\n            "on_new_parent_data": AutoMaterializeRule.materialize_on_parent_updated(),\n            "for_freshness": AutoMaterializeRule.materialize_on_required_for_freshness(),\n        }\n\n        # determine if this namedtuple was serialized with the old format (booleans for rules)\n        if any(backcompat_key in unpacked_dict for backcompat_key in backcompat_map):\n            # all old policies had these rules by default\n            rules = {\n                AutoMaterializeRule.skip_on_parent_outdated(),\n                AutoMaterializeRule.skip_on_parent_missing(),\n            }\n            for backcompat_key, rule in backcompat_map.items():\n                if unpacked_dict.get(backcompat_key):\n                    rules.add(rule)\n            unpacked_dict["rules"] = frozenset(rules)\n\n        return unpacked_dict\n\n\nclass AutoMaterializePolicyType(Enum):\n    EAGER = "EAGER"\n    LAZY = "LAZY"\n\n\n
[docs]@experimental\n@whitelist_for_serdes(\n old_fields={"time_window_partition_scope_minutes": 1e-6},\n serializer=AutoMaterializePolicySerializer,\n)\nclass AutoMaterializePolicy(\n NamedTuple(\n "_AutoMaterializePolicy",\n [\n ("rules", FrozenSet["AutoMaterializeRule"]),\n ("max_materializations_per_minute", Optional[int]),\n ],\n )\n):\n """An AutoMaterializePolicy specifies how Dagster should attempt to keep an asset up-to-date.\n\n Each policy consists of a set of AutoMaterializeRules, which are used to determine whether an\n asset or a partition of an asset should or should not be auto-materialized.\n\n The most common policy is `AutoMaterializePolicy.eager()`, which consists of the following rules:\n\n - `AutoMaterializeRule.materialize_on_missing()`\n Materialize an asset or a partition if it has never been materialized.\n - `AutoMaterializeRule.materialize_on_parent_updated()`\n Materialize an asset or a partition if one of its parents have been updated more recently\n than it has.\n - `AutoMaterializeRule.materialize_on_required_for_freshness()`\n Materialize an asset or a partition if it is required to satisfy a freshness policy.\n - `AutoMaterializeRule.skip_on_parent_outdated()`\n Skip materializing an asset or partition if any of its parents have ancestors that have\n been materialized more recently.\n - `AutoMaterializeRule.skip_on_parent_missing()`\n Skip materializing an asset or a partition if any parent has never been materialized or\n observed.\n\n Policies can be customized by adding or removing rules. For example, if you'd like to allow\n an asset to be materialized even if some of its parent partitions are missing:\n\n .. code-block:: python\n\n from dagster import AutoMaterializePolicy, AutoMaterializeRule\n\n my_policy = AutoMaterializePolicy.eager().without_rules(\n AutoMaterializeRule.skip_on_parent_missing(),\n )\n\n If you'd like an asset to wait for all of its parents to be updated before materializing:\n\n .. code-block:: python\n\n from dagster import AutoMaterializePolicy, AutoMaterializeRule\n\n my_policy = AutoMaterializePolicy.eager().with_rules(\n AutoMaterializeRule.skip_on_all_parents_not_updated(),\n )\n\n Lastly, the `max_materializations_per_minute` parameter, which is set to 1 by default,\n rate-limits the number of auto-materializations that can occur for a particular asset within\n a short time interval. This mainly matters for partitioned assets. Its purpose is to provide a\n safeguard against "surprise backfills", where user-error causes auto-materialize to be\n accidentally triggered for large numbers of partitions at once.\n\n **Warning:**\n\n Constructing an AutoMaterializePolicy directly is not recommended as the API is subject to change.\n AutoMaterializePolicy.eager() and AutoMaterializePolicy.lazy() are the recommended API.\n\n """\n\n def __new__(\n cls,\n rules: AbstractSet["AutoMaterializeRule"],\n max_materializations_per_minute: Optional[int] = 1,\n ):\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n check.invariant(\n max_materializations_per_minute is None or max_materializations_per_minute > 0,\n "max_materializations_per_minute must be positive. To disable rate-limiting, set it"\n " to None. To disable auto materializing, remove the policy.",\n )\n\n return super(AutoMaterializePolicy, cls).__new__(\n cls,\n rules=frozenset(check.set_param(rules, "rules", of_type=AutoMaterializeRule)),\n max_materializations_per_minute=max_materializations_per_minute,\n )\n\n @property\n def materialize_rules(self) -> AbstractSet["AutoMaterializeRule"]:\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeDecisionType\n\n return {\n rule\n for rule in self.rules\n if rule.decision_type == AutoMaterializeDecisionType.MATERIALIZE\n }\n\n @property\n def skip_rules(self) -> AbstractSet["AutoMaterializeRule"]:\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeDecisionType\n\n return {\n rule for rule in self.rules if rule.decision_type == AutoMaterializeDecisionType.SKIP\n }\n\n
[docs] @public\n @staticmethod\n def eager(max_materializations_per_minute: Optional[int] = 1) -> "AutoMaterializePolicy":\n """Constructs an eager AutoMaterializePolicy.\n\n Args:\n max_materializations_per_minute (Optional[int]): The maximum number of\n auto-materializations for this asset that may be initiated per minute. If this limit\n is exceeded, the partitions which would have been materialized will be discarded,\n and will require manual materialization in order to be updated. Defaults to 1.\n """\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n return AutoMaterializePolicy(\n rules={\n AutoMaterializeRule.materialize_on_missing(),\n AutoMaterializeRule.materialize_on_parent_updated(),\n AutoMaterializeRule.materialize_on_required_for_freshness(),\n AutoMaterializeRule.skip_on_parent_outdated(),\n AutoMaterializeRule.skip_on_parent_missing(),\n },\n max_materializations_per_minute=check.opt_int_param(\n max_materializations_per_minute, "max_materializations_per_minute"\n ),\n )
\n\n
[docs] @public\n @staticmethod\n def lazy(max_materializations_per_minute: Optional[int] = 1) -> "AutoMaterializePolicy":\n """Constructs a lazy AutoMaterializePolicy.\n\n Args:\n max_materializations_per_minute (Optional[int]): The maximum number of\n auto-materializations for this asset that may be initiated per minute. If this limit\n is exceeded, the partitions which would have been materialized will be discarded,\n and will require manual materialization in order to be updated. Defaults to 1.\n """\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n return AutoMaterializePolicy(\n rules={\n AutoMaterializeRule.materialize_on_required_for_freshness(),\n AutoMaterializeRule.skip_on_parent_outdated(),\n AutoMaterializeRule.skip_on_parent_missing(),\n },\n max_materializations_per_minute=check.opt_int_param(\n max_materializations_per_minute, "max_materializations_per_minute"\n ),\n )
\n\n
[docs] @public\n def without_rules(self, *rules_to_remove: "AutoMaterializeRule") -> "AutoMaterializePolicy":\n """Constructs a copy of this policy with the specified rules removed. Raises an error\n if any of the arguments are not rules in this policy.\n """\n non_matching_rules = set(rules_to_remove).difference(self.rules)\n check.param_invariant(\n not non_matching_rules,\n "rules_to_remove",\n f"Rules {[rule for rule in rules_to_remove if rule in non_matching_rules]} do not"\n " exist in this policy.",\n )\n return self._replace(\n rules=self.rules.difference(set(rules_to_remove)),\n )
\n\n
[docs] @public\n def with_rules(self, *rules_to_add: "AutoMaterializeRule") -> "AutoMaterializePolicy":\n """Constructs a copy of this policy with the specified rules added."""\n return self._replace(rules=self.rules.union(set(rules_to_add)))
\n\n @property\n def policy_type(self) -> AutoMaterializePolicyType:\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n if AutoMaterializeRule.materialize_on_parent_updated() in self.rules:\n return AutoMaterializePolicyType.EAGER\n return AutoMaterializePolicyType.LAZY\n\n @property\n def rule_snapshots(self) -> Sequence["AutoMaterializeRuleSnapshot"]:\n return [rule.to_snapshot() for rule in self.rules]
\n
", "current_page_name": "_modules/dagster/_core/definitions/auto_materialize_policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.auto_materialize_policy"}, "auto_materialize_rule": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.auto_materialize_rule

\nimport datetime\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Dict,\n    FrozenSet,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.data_time import CachingDataTimeResolver\nfrom dagster._core.definitions.events import AssetKey, AssetKeyPartitionKey\nfrom dagster._core.definitions.freshness_based_auto_materialize import (\n    freshness_evaluation_results_for_asset_key,\n)\nfrom dagster._core.definitions.partition_mapping import IdentityPartitionMapping\nfrom dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\nfrom dagster._serdes.serdes import (\n    NamedTupleSerializer,\n    UnpackContext,\n    UnpackedValue,\n    WhitelistMap,\n    whitelist_for_serdes,\n)\nfrom dagster._utils.caching_instance_queryer import CachingInstanceQueryer\n\nfrom .asset_graph import AssetGraph, sort_key_for_asset_partition\nfrom .partition import SerializedPartitionsSubset\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_daemon_context import AssetDaemonContext\n    from dagster._core.definitions.asset_daemon_cursor import AssetDaemonCursor\n    from dagster._core.instance import DynamicPartitionsStore\n\n\n@whitelist_for_serdes\nclass AutoMaterializeDecisionType(Enum):\n    """Represents the set of results of the auto-materialize logic.\n\n    MATERIALIZE: The asset should be materialized by a run kicked off on this tick\n    SKIP: The asset should not be materialized by a run kicked off on this tick, because future\n        ticks are expected to materialize it.\n    DISCARD: The asset should not be materialized by a run kicked off on this tick, but future\n        ticks are not expected to materialize it.\n    """\n\n    MATERIALIZE = "MATERIALIZE"\n    SKIP = "SKIP"\n    DISCARD = "DISCARD"\n\n\nclass AutoMaterializeRuleEvaluationData(ABC):\n    pass\n\n\n@whitelist_for_serdes\nclass TextRuleEvaluationData(\n    AutoMaterializeRuleEvaluationData,\n    NamedTuple("_TextRuleEvaluationData", [("text", str)]),\n):\n    pass\n\n\n@whitelist_for_serdes\nclass ParentUpdatedRuleEvaluationData(\n    AutoMaterializeRuleEvaluationData,\n    NamedTuple(\n        "_ParentUpdatedRuleEvaluationData",\n        [\n            ("updated_asset_keys", FrozenSet[AssetKey]),\n            ("will_update_asset_keys", FrozenSet[AssetKey]),\n        ],\n    ),\n):\n    pass\n\n\n@whitelist_for_serdes\nclass WaitingOnAssetsRuleEvaluationData(\n    AutoMaterializeRuleEvaluationData,\n    NamedTuple(\n        "_WaitingOnParentRuleEvaluationData",\n        [("waiting_on_asset_keys", FrozenSet[AssetKey])],\n    ),\n):\n    pass\n\n\n@whitelist_for_serdes\nclass AutoMaterializeRuleSnapshot(NamedTuple):\n    """A serializable snapshot of an AutoMaterializeRule for historical evaluations."""\n\n    class_name: str\n    description: str\n    decision_type: AutoMaterializeDecisionType\n\n    @staticmethod\n    def from_rule(rule: "AutoMaterializeRule") -> "AutoMaterializeRuleSnapshot":\n        return AutoMaterializeRuleSnapshot(\n            class_name=rule.__class__.__name__,\n            description=rule.description,\n            decision_type=rule.decision_type,\n        )\n\n\n@whitelist_for_serdes\nclass AutoMaterializeRuleEvaluation(NamedTuple):\n    rule_snapshot: AutoMaterializeRuleSnapshot\n    evaluation_data: Optional[AutoMaterializeRuleEvaluationData]\n\n\nclass RuleEvaluationContext(NamedTuple):\n    asset_key: AssetKey\n    cursor: "AssetDaemonCursor"\n    instance_queryer: CachingInstanceQueryer\n    data_time_resolver: CachingDataTimeResolver\n    will_materialize_mapping: Mapping[AssetKey, AbstractSet[AssetKeyPartitionKey]]\n    expected_data_time_mapping: Mapping[AssetKey, Optional[datetime.datetime]]\n    candidates: AbstractSet[AssetKeyPartitionKey]\n    daemon_context: "AssetDaemonContext"\n\n    @property\n    def asset_graph(self) -> AssetGraph:\n        return self.instance_queryer.asset_graph\n\n    def materializable_in_same_run(self, child_key: AssetKey, parent_key: AssetKey) -> bool:\n        """Returns whether a child asset can be materialized in the same run as a parent asset."""\n        from dagster._core.definitions.external_asset_graph import ExternalAssetGraph\n\n        return (\n            # both assets must be materializable\n            child_key in self.asset_graph.materializable_asset_keys\n            and parent_key in self.asset_graph.materializable_asset_keys\n            # the parent must have the same partitioning\n            and self.asset_graph.have_same_partitioning(child_key, parent_key)\n            # the parent must have a simple partition mapping to the child\n            and (\n                not self.asset_graph.is_partitioned(parent_key)\n                or isinstance(\n                    self.asset_graph.get_partition_mapping(child_key, parent_key),\n                    (TimeWindowPartitionMapping, IdentityPartitionMapping),\n                )\n            )\n            # the parent must be in the same repository to be materialized alongside the candidate\n            and (\n                not isinstance(self.asset_graph, ExternalAssetGraph)\n                or self.asset_graph.get_repository_handle(child_key)\n                == self.asset_graph.get_repository_handle(parent_key)\n            )\n        )\n\n    def get_parents_that_will_not_be_materialized_on_current_tick(\n        self, *, asset_partition: AssetKeyPartitionKey\n    ) -> AbstractSet[AssetKeyPartitionKey]:\n        """Returns the set of parent asset partitions that will not be updated in the same run of\n        this asset partition if we launch a run of this asset partition on this tick.\n        """\n        return {\n            parent\n            for parent in self.asset_graph.get_parents_partitions(\n                dynamic_partitions_store=self.instance_queryer,\n                current_time=self.instance_queryer.evaluation_time,\n                asset_key=asset_partition.asset_key,\n                partition_key=asset_partition.partition_key,\n            ).parent_partitions\n            if parent not in self.will_materialize_mapping.get(parent.asset_key, set())\n            or not self.materializable_in_same_run(asset_partition.asset_key, parent.asset_key)\n        }\n\n    def get_asset_partitions_by_asset_key(\n        self,\n        asset_partitions: AbstractSet[AssetKeyPartitionKey],\n    ) -> Mapping[AssetKey, Set[AssetKeyPartitionKey]]:\n        asset_partitions_by_asset_key: Dict[AssetKey, Set[AssetKeyPartitionKey]] = defaultdict(set)\n        for parent in asset_partitions:\n            asset_partitions_by_asset_key[parent.asset_key].add(parent)\n\n        return asset_partitions_by_asset_key\n\n\nRuleEvaluationResults = Sequence[Tuple[Optional[AutoMaterializeRuleEvaluationData], AbstractSet]]\n\n\n
[docs]class AutoMaterializeRule(ABC):\n """An AutoMaterializeRule defines a bit of logic which helps determine if a materialization\n should be kicked off for a given asset partition.\n\n Each rule can have one of two decision types, `MATERIALIZE` (indicating that an asset partition\n should be materialized) or `SKIP` (indicating that the asset partition should not be\n materialized).\n\n Materialize rules are evaluated first, and skip rules operate over the set of candidates that\n are produced by the materialize rules. Other than that, there is no ordering between rules.\n """\n\n @abstractproperty\n def decision_type(self) -> AutoMaterializeDecisionType:\n """The decision type of the rule (either `MATERIALIZE` or `SKIP`)."""\n ...\n\n @abstractproperty\n def description(self) -> str:\n """A human-readable description of this rule. As a basic guideline, this string should\n complete the sentence: 'Indicates an asset should be (materialize/skipped) when ____'.\n """\n ...\n\n @abstractmethod\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n """The core evaluation function for the rule. This function takes in a context object and\n returns a mapping from evaluated rules to the set of asset partitions that the rule applies\n to.\n """\n ...\n\n
[docs] @public\n @staticmethod\n def materialize_on_required_for_freshness() -> "MaterializeOnRequiredForFreshnessRule":\n """Materialize an asset partition if it is required to satisfy a freshness policy of this\n asset or one of its downstream assets.\n\n Note: This rule has no effect on partitioned assets.\n """\n return MaterializeOnRequiredForFreshnessRule()
\n\n
[docs] @public\n @staticmethod\n def materialize_on_parent_updated() -> "MaterializeOnParentUpdatedRule":\n """Materialize an asset partition if one of its parents has been updated more recently\n than it has.\n\n Note: For time-partitioned or dynamic-partitioned assets downstream of an unpartitioned\n asset, this rule will only fire for the most recent partition of the downstream.\n """\n return MaterializeOnParentUpdatedRule()
\n\n
[docs] @public\n @staticmethod\n def materialize_on_missing() -> "MaterializeOnMissingRule":\n """Materialize an asset partition if it has never been materialized before. This rule will\n not fire for non-root assets unless that asset's parents have been updated.\n """\n return MaterializeOnMissingRule()
\n\n
[docs] @public\n @staticmethod\n def skip_on_parent_missing() -> "SkipOnParentMissingRule":\n """Skip materializing an asset partition if one of its parent asset partitions has never\n been materialized (for regular assets) or observed (for observable source assets).\n """\n return SkipOnParentMissingRule()
\n\n
[docs] @public\n @staticmethod\n def skip_on_parent_outdated() -> "SkipOnParentOutdatedRule":\n """Skip materializing an asset partition if any of its parents has not incorporated the\n latest data from its ancestors.\n """\n return SkipOnParentOutdatedRule()
\n\n
[docs] @public\n @staticmethod\n def skip_on_not_all_parents_updated(\n require_update_for_all_parent_partitions: bool = False,\n ) -> "SkipOnNotAllParentsUpdatedRule":\n """Skip materializing an asset partition if any of its parents have not been updated since\n the asset's last materialization.\n\n Attributes:\n require_update_for_all_parent_partitions (Optional[bool]): Applies only to an unpartitioned\n asset or an asset partition that depends on more than one partition in any upstream asset.\n If true, requires all upstream partitions in each upstream asset to be materialized since\n the downstream asset's last materialization in order to update it. If false, requires at\n least one upstream partition in each upstream asset to be materialized since the downstream\n asset's last materialization in order to update it. Defaults to false.\n """\n return SkipOnNotAllParentsUpdatedRule(require_update_for_all_parent_partitions)
\n\n def to_snapshot(self) -> AutoMaterializeRuleSnapshot:\n """Returns a serializable snapshot of this rule for historical evaluations."""\n return AutoMaterializeRuleSnapshot.from_rule(self)\n\n def __eq__(self, other) -> bool:\n # override the default NamedTuple __eq__ method to factor in types\n return type(self) == type(other) and super().__eq__(other)\n\n def __hash__(self) -> int:\n # override the default NamedTuple __hash__ method to factor in types\n return hash(hash(type(self)) + super().__hash__())
\n\n\n@whitelist_for_serdes\nclass MaterializeOnRequiredForFreshnessRule(\n AutoMaterializeRule, NamedTuple("_MaterializeOnRequiredForFreshnessRule", [])\n):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.MATERIALIZE\n\n @property\n def description(self) -> str:\n return "required to meet this or downstream asset's freshness policy"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n freshness_conditions = freshness_evaluation_results_for_asset_key(\n asset_key=context.asset_key,\n data_time_resolver=context.data_time_resolver,\n asset_graph=context.asset_graph,\n current_time=context.instance_queryer.evaluation_time,\n will_materialize_mapping=context.will_materialize_mapping,\n expected_data_time_mapping=context.expected_data_time_mapping,\n )\n return freshness_conditions\n\n\n@whitelist_for_serdes\nclass MaterializeOnParentUpdatedRule(\n AutoMaterializeRule, NamedTuple("_MaterializeOnParentUpdatedRule", [])\n):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.MATERIALIZE\n\n @property\n def description(self) -> str:\n return "upstream data has changed since latest materialization"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n """Evaluates the set of asset partitions of this asset whose parents have been updated,\n or will update on this tick.\n """\n conditions = defaultdict(set)\n has_parents_that_will_update = set()\n\n # first, get the set of parents that will be materialized this tick, and see if we\n # can materialize this asset with those parents\n will_update_parents_by_asset_partition = defaultdict(set)\n for parent_key in context.asset_graph.get_parents(context.asset_key):\n if not context.materializable_in_same_run(context.asset_key, parent_key):\n continue\n for parent_partition in context.will_materialize_mapping.get(parent_key, set()):\n asset_partition = AssetKeyPartitionKey(\n context.asset_key, parent_partition.partition_key\n )\n will_update_parents_by_asset_partition[asset_partition].add(parent_key)\n has_parents_that_will_update.add(asset_partition)\n\n # next, for each asset partition of this asset which has newly-updated parents, or\n # has a parent that will update, create a ParentUpdatedRuleEvaluationData\n has_or_will_update = (\n context.daemon_context.get_asset_partitions_with_newly_updated_parents_for_key(\n context.asset_key\n )\n | has_parents_that_will_update\n )\n for asset_partition in has_or_will_update:\n parent_asset_partitions = context.asset_graph.get_parents_partitions(\n dynamic_partitions_store=context.instance_queryer,\n current_time=context.instance_queryer.evaluation_time,\n asset_key=asset_partition.asset_key,\n partition_key=asset_partition.partition_key,\n ).parent_partitions\n\n updated_parent_asset_partitions = context.instance_queryer.get_parent_asset_partitions_updated_after_child(\n asset_partition,\n parent_asset_partitions,\n # do a precise check for updated parents, factoring in data versions, as long as\n # we're within reasonable limits on the number of partitions to check\n respect_materialization_data_versions=context.daemon_context.respect_materialization_data_versions\n and len(parent_asset_partitions | has_or_will_update) < 100,\n # ignore self-dependencies when checking for updated parents, to avoid historical\n # rematerializations from causing a chain of materializations to be kicked off\n ignored_parent_keys={context.asset_key},\n )\n updated_parents = {parent.asset_key for parent in updated_parent_asset_partitions}\n will_update_parents = will_update_parents_by_asset_partition[asset_partition]\n\n if updated_parents or will_update_parents:\n conditions[\n ParentUpdatedRuleEvaluationData(\n updated_asset_keys=frozenset(updated_parents),\n will_update_asset_keys=frozenset(will_update_parents),\n )\n ].add(asset_partition)\n if conditions:\n return [(k, v) for k, v in conditions.items()]\n return []\n\n\n@whitelist_for_serdes\nclass MaterializeOnMissingRule(AutoMaterializeRule, NamedTuple("_MaterializeOnMissingRule", [])):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.MATERIALIZE\n\n @property\n def description(self) -> str:\n return "materialization is missing"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n """Evaluates the set of asset partitions for this asset which are missing and were not\n previously discarded. Currently only applies to root asset partitions and asset partitions\n with updated parents.\n """\n missing_asset_partitions = (\n context.daemon_context.get_never_handled_root_asset_partitions_for_key(\n context.asset_key\n )\n )\n # in addition to missing root asset partitions, check any asset partitions with updated\n # parents to see if they're missing\n for (\n candidate\n ) in context.daemon_context.get_asset_partitions_with_newly_updated_parents_for_key(\n context.asset_key\n ):\n if not context.instance_queryer.asset_partition_has_materialization_or_observation(\n candidate\n ):\n missing_asset_partitions |= {candidate}\n if missing_asset_partitions:\n return [(None, missing_asset_partitions)]\n return []\n\n\n@whitelist_for_serdes\nclass SkipOnParentOutdatedRule(AutoMaterializeRule, NamedTuple("_SkipOnParentOutdatedRule", [])):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.SKIP\n\n @property\n def description(self) -> str:\n return "waiting on upstream data to be up to date"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n asset_partitions_by_waiting_on_asset_keys = defaultdict(set)\n for candidate in context.candidates:\n outdated_ancestors = set()\n # find the root cause of why this asset partition's parents are outdated (if any)\n for parent in context.get_parents_that_will_not_be_materialized_on_current_tick(\n asset_partition=candidate\n ):\n outdated_ancestors.update(\n context.instance_queryer.get_outdated_ancestors(asset_partition=parent)\n )\n if outdated_ancestors:\n asset_partitions_by_waiting_on_asset_keys[frozenset(outdated_ancestors)].add(\n candidate\n )\n if asset_partitions_by_waiting_on_asset_keys:\n return [\n (WaitingOnAssetsRuleEvaluationData(waiting_on_asset_keys=k), v)\n for k, v in asset_partitions_by_waiting_on_asset_keys.items()\n ]\n return []\n\n\n@whitelist_for_serdes\nclass SkipOnParentMissingRule(AutoMaterializeRule, NamedTuple("_SkipOnParentMissingRule", [])):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.SKIP\n\n @property\n def description(self) -> str:\n return "waiting on upstream data to be present"\n\n def evaluate_for_asset(\n self,\n context: RuleEvaluationContext,\n ) -> RuleEvaluationResults:\n asset_partitions_by_waiting_on_asset_keys = defaultdict(set)\n for candidate in context.candidates:\n missing_parent_asset_keys = set()\n for parent in context.get_parents_that_will_not_be_materialized_on_current_tick(\n asset_partition=candidate\n ):\n # ignore non-observable sources, which will never have a materialization or observation\n if context.asset_graph.is_source(\n parent.asset_key\n ) and not context.asset_graph.is_observable(parent.asset_key):\n continue\n if not context.instance_queryer.asset_partition_has_materialization_or_observation(\n parent\n ):\n missing_parent_asset_keys.add(parent.asset_key)\n if missing_parent_asset_keys:\n asset_partitions_by_waiting_on_asset_keys[frozenset(missing_parent_asset_keys)].add(\n candidate\n )\n if asset_partitions_by_waiting_on_asset_keys:\n return [\n (WaitingOnAssetsRuleEvaluationData(waiting_on_asset_keys=k), v)\n for k, v in asset_partitions_by_waiting_on_asset_keys.items()\n ]\n return []\n\n\n@whitelist_for_serdes\nclass SkipOnNotAllParentsUpdatedRule(\n AutoMaterializeRule,\n NamedTuple(\n "_SkipOnNotAllParentsUpdatedRule", [("require_update_for_all_parent_partitions", bool)]\n ),\n):\n """An auto-materialize rule that enforces that an asset can only be materialized if all parents\n have been materialized since the asset's last materialization.\n\n Attributes:\n require_update_for_all_parent_partitions (Optional[bool]): Applies only to an unpartitioned\n asset or an asset partition that depends on more than one partition in any upstream asset.\n If true, requires all upstream partitions in each upstream asset to be materialized since\n the downstream asset's last materialization in order to update it. If false, requires at\n least one upstream partition in each upstream asset to be materialized since the downstream\n asset's last materialization in order to update it. Defaults to false.\n """\n\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.SKIP\n\n @property\n def description(self) -> str:\n if self.require_update_for_all_parent_partitions is False:\n return "waiting on upstream data to be updated"\n else:\n return "waiting until all upstream partitions are updated"\n\n def evaluate_for_asset(\n self,\n context: RuleEvaluationContext,\n ) -> RuleEvaluationResults:\n asset_partitions_by_waiting_on_asset_keys = defaultdict(set)\n for candidate in context.candidates:\n parent_partitions = context.asset_graph.get_parents_partitions(\n context.instance_queryer,\n context.instance_queryer.evaluation_time,\n context.asset_key,\n candidate.partition_key,\n ).parent_partitions\n\n updated_parent_partitions = (\n context.instance_queryer.get_parent_asset_partitions_updated_after_child(\n candidate,\n parent_partitions,\n context.daemon_context.respect_materialization_data_versions,\n ignored_parent_keys=set(),\n )\n | set().union(\n *[\n context.will_materialize_mapping.get(parent, set())\n for parent in context.asset_graph.get_parents(context.asset_key)\n ]\n )\n )\n\n if self.require_update_for_all_parent_partitions:\n # All upstream partitions must be updated in order for the candidate to be updated\n non_updated_parent_keys = {\n parent.asset_key for parent in parent_partitions - updated_parent_partitions\n }\n else:\n # At least one upstream partition in each upstream asset must be updated in order\n # for the candidate to be updated\n parent_asset_keys = context.asset_graph.get_parents(context.asset_key)\n updated_parent_partitions_by_asset_key = context.get_asset_partitions_by_asset_key(\n updated_parent_partitions\n )\n non_updated_parent_keys = {\n parent\n for parent in parent_asset_keys\n if not updated_parent_partitions_by_asset_key.get(parent)\n }\n\n # do not require past partitions of this asset to be updated\n non_updated_parent_keys -= {context.asset_key}\n\n if non_updated_parent_keys:\n asset_partitions_by_waiting_on_asset_keys[frozenset(non_updated_parent_keys)].add(\n candidate\n )\n\n if asset_partitions_by_waiting_on_asset_keys:\n return [\n (WaitingOnAssetsRuleEvaluationData(waiting_on_asset_keys=k), v)\n for k, v in asset_partitions_by_waiting_on_asset_keys.items()\n ]\n return []\n\n\n@whitelist_for_serdes\nclass DiscardOnMaxMaterializationsExceededRule(\n AutoMaterializeRule, NamedTuple("_DiscardOnMaxMaterializationsExceededRule", [("limit", int)])\n):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.DISCARD\n\n @property\n def description(self) -> str:\n return f"exceeds {self.limit} materialization(s) per minute"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n # the set of asset partitions which exceed the limit\n rate_limited_asset_partitions = set(\n sorted(\n context.candidates,\n key=lambda x: sort_key_for_asset_partition(context.asset_graph, x),\n )[self.limit :]\n )\n if rate_limited_asset_partitions:\n return [(None, rate_limited_asset_partitions)]\n return []\n\n\n@whitelist_for_serdes\nclass AutoMaterializeAssetEvaluation(NamedTuple):\n """Represents the results of the auto-materialize logic for a single asset.\n\n Properties:\n asset_key (AssetKey): The asset key that was evaluated.\n partition_subsets_by_condition: The rule evaluations that impact if the asset should be\n materialized, skipped, or discarded. If the asset is partitioned, this will be a list of\n tuples, where the first element is the condition and the second element is the\n serialized subset of partitions that the condition applies to. If it's not partitioned,\n the second element will be None.\n """\n\n asset_key: AssetKey\n partition_subsets_by_condition: Sequence[\n Tuple["AutoMaterializeRuleEvaluation", Optional[SerializedPartitionsSubset]]\n ]\n num_requested: int\n num_skipped: int\n num_discarded: int\n run_ids: Set[str] = set()\n rule_snapshots: Optional[Sequence[AutoMaterializeRuleSnapshot]] = None\n\n @staticmethod\n def from_rule_evaluation_results(\n asset_graph: AssetGraph,\n asset_key: AssetKey,\n asset_partitions_by_rule_evaluation: Sequence[\n Tuple[AutoMaterializeRuleEvaluation, AbstractSet[AssetKeyPartitionKey]]\n ],\n num_requested: int,\n num_skipped: int,\n num_discarded: int,\n dynamic_partitions_store: "DynamicPartitionsStore",\n ) -> "AutoMaterializeAssetEvaluation":\n auto_materialize_policy = asset_graph.auto_materialize_policies_by_key.get(asset_key)\n\n if not auto_materialize_policy:\n check.failed(f"Expected auto materialize policy on asset {asset_key}")\n\n partitions_def = asset_graph.get_partitions_def(asset_key)\n if partitions_def is None:\n return AutoMaterializeAssetEvaluation(\n asset_key=asset_key,\n partition_subsets_by_condition=[\n (rule_evaluation, None)\n for rule_evaluation, _ in asset_partitions_by_rule_evaluation\n ],\n num_requested=num_requested,\n num_skipped=num_skipped,\n num_discarded=num_discarded,\n rule_snapshots=auto_materialize_policy.rule_snapshots,\n )\n else:\n return AutoMaterializeAssetEvaluation(\n asset_key=asset_key,\n partition_subsets_by_condition=[\n (\n rule_evaluation,\n SerializedPartitionsSubset.from_subset(\n subset=partitions_def.empty_subset().with_partition_keys(\n check.not_none(ap.partition_key) for ap in asset_partitions\n ),\n partitions_def=partitions_def,\n dynamic_partitions_store=dynamic_partitions_store,\n ),\n )\n for rule_evaluation, asset_partitions in asset_partitions_by_rule_evaluation\n ],\n num_requested=num_requested,\n num_skipped=num_skipped,\n num_discarded=num_discarded,\n rule_snapshots=auto_materialize_policy.rule_snapshots,\n )\n\n\n# BACKCOMPAT GRAVEYARD\n\n\nclass BackcompatAutoMaterializeConditionSerializer(NamedTupleSerializer):\n """This handles backcompat for the old AutoMaterializeCondition objects, turning them into the\n proper AutoMaterializeRuleEvaluation objects. This is necessary because old\n AutoMaterializeAssetEvaluation objects will have serialized AutoMaterializeCondition objects,\n and we need to be able to deserialize them.\n\n In theory, as these serialized objects happen to be purged periodically, we can remove this\n backcompat logic at some point in the future.\n """\n\n def unpack(\n self,\n unpacked_dict: Dict[str, UnpackedValue],\n whitelist_map: WhitelistMap,\n context: UnpackContext,\n ) -> AutoMaterializeRuleEvaluation:\n if self.klass in (\n FreshnessAutoMaterializeCondition,\n DownstreamFreshnessAutoMaterializeCondition,\n ):\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.materialize_on_required_for_freshness().to_snapshot(),\n evaluation_data=None,\n )\n elif self.klass == MissingAutoMaterializeCondition:\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.materialize_on_missing().to_snapshot(),\n evaluation_data=None,\n )\n elif self.klass == ParentMaterializedAutoMaterializeCondition:\n updated_asset_keys = unpacked_dict.get("updated_asset_keys")\n if isinstance(updated_asset_keys, set):\n updated_asset_keys = cast(FrozenSet[AssetKey], frozenset(updated_asset_keys))\n else:\n updated_asset_keys = frozenset()\n will_update_asset_keys = unpacked_dict.get("will_update_asset_keys")\n if isinstance(will_update_asset_keys, set):\n will_update_asset_keys = cast(\n FrozenSet[AssetKey], frozenset(will_update_asset_keys)\n )\n else:\n will_update_asset_keys = frozenset()\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.materialize_on_parent_updated().to_snapshot(),\n evaluation_data=ParentUpdatedRuleEvaluationData(\n updated_asset_keys=updated_asset_keys,\n will_update_asset_keys=will_update_asset_keys,\n ),\n )\n elif self.klass == ParentOutdatedAutoMaterializeCondition:\n waiting_on_asset_keys = unpacked_dict.get("waiting_on_asset_keys")\n if isinstance(waiting_on_asset_keys, set):\n waiting_on_asset_keys = cast(FrozenSet[AssetKey], frozenset(waiting_on_asset_keys))\n else:\n waiting_on_asset_keys = frozenset()\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.skip_on_parent_outdated().to_snapshot(),\n evaluation_data=WaitingOnAssetsRuleEvaluationData(\n waiting_on_asset_keys=waiting_on_asset_keys\n ),\n )\n elif self.klass == MaxMaterializationsExceededAutoMaterializeCondition:\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=DiscardOnMaxMaterializationsExceededRule(limit=1).to_snapshot(),\n evaluation_data=None,\n )\n check.failed(f"Unexpected class {self.klass}")\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass FreshnessAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass DownstreamFreshnessAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass ParentMaterializedAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass MissingAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass ParentOutdatedAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass MaxMaterializationsExceededAutoMaterializeCondition(NamedTuple): ...\n
", "current_page_name": "_modules/dagster/_core/definitions/auto_materialize_rule", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.auto_materialize_rule"}, "backfill_policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.backfill_policy

\nfrom enum import Enum\nfrom typing import NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._serdes import whitelist_for_serdes\n\n\nclass BackfillPolicyType(Enum):\n    SINGLE_RUN = "SINGLE_RUN"\n    MULTI_RUN = "MULTI_RUN"\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass BackfillPolicy(\n NamedTuple(\n "_BackfillPolicy",\n [\n ("max_partitions_per_run", Optional[int]),\n ],\n )\n):\n """A BackfillPolicy specifies how Dagster should attempt to backfill a partitioned asset.\n\n There are two main kinds of backfill policies: single-run and multi-run.\n\n An asset with a single-run backfill policy will take a single run to backfill all of its\n partitions at once.\n\n An asset with a multi-run backfill policy will take multiple runs to backfill all of its\n partitions. Each run will backfill a subset of the partitions. The number of partitions to\n backfill in each run is controlled by the `max_partitions_per_run` parameter.\n\n For example:\n\n - If an asset has 100 partitions, and the `max_partitions_per_run` is set to 10, then it will\n be backfilled in 10 runs; each run will backfill 10 partitions.\n\n - If an asset has 100 partitions, and the `max_partitions_per_run` is set to 11, then it will\n be backfilled in 10 runs; the first 9 runs will backfill 11 partitions, and the last one run\n will backfill the remaining 9 partitions.\n\n **Warning:**\n\n Constructing an BackfillPolicy directly is not recommended as the API is subject to change.\n BackfillPolicy.single_run() and BackfillPolicy.multi_run(max_partitions_per_run=x) are the\n recommended APIs.\n """\n\n def __new__(cls, max_partitions_per_run: Optional[int] = 1):\n return super(BackfillPolicy, cls).__new__(\n cls,\n max_partitions_per_run=max_partitions_per_run,\n )\n\n
[docs] @public\n @staticmethod\n def single_run() -> "BackfillPolicy":\n """Creates a BackfillPolicy that executes the entire backfill in a single run."""\n return BackfillPolicy(max_partitions_per_run=None)
\n\n
[docs] @public\n @staticmethod\n def multi_run(max_partitions_per_run: int = 1) -> "BackfillPolicy":\n """Creates a BackfillPolicy that executes the entire backfill in multiple runs.\n Each run will backfill [max_partitions_per_run] number of partitions.\n\n Args:\n max_partitions_per_run (Optional[int]): The maximum number of partitions in each run of\n the multiple runs. Defaults to 1.\n """\n return BackfillPolicy(\n max_partitions_per_run=check.int_param(max_partitions_per_run, "max_partitions_per_run")\n )
\n\n @property\n def policy_type(self) -> BackfillPolicyType:\n if self.max_partitions_per_run:\n return BackfillPolicyType.MULTI_RUN\n else:\n return BackfillPolicyType.SINGLE_RUN
\n
", "current_page_name": "_modules/dagster/_core/definitions/backfill_policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.backfill_policy"}, "config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.config

\nfrom typing import Any, Callable, Mapping, NamedTuple, Optional, Union, cast\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import (\n    ConfigType,\n    is_supported_config_python_builtin,\n    process_config,\n    resolve_defaults,\n    validate_config,\n)\nfrom dagster._core.definitions.definition_config_schema import IDefinitionConfigSchema\nfrom dagster._core.errors import DagsterInvalidConfigError\n\nfrom .definition_config_schema import convert_user_facing_definition_config_schema\n\nConfigMappingFn: TypeAlias = Callable[[Any], Any]\n\n\ndef is_callable_valid_config_arg(config: Union[Callable[..., Any], Mapping[str, object]]) -> bool:\n    return BuiltinEnum.contains(config) or is_supported_config_python_builtin(config)\n\n\n
[docs]class ConfigMapping(\n NamedTuple(\n "_ConfigMapping",\n [\n ("config_fn", Callable[[Any], Any]),\n ("config_schema", IDefinitionConfigSchema),\n ("receive_processed_config_values", Optional[bool]),\n ],\n )\n):\n """Defines a config mapping for a graph (or job).\n\n By specifying a config mapping function, you can override the configuration for the child\n ops and graphs contained within a graph.\n\n Config mappings require the configuration schema to be specified as ``config_schema``, which will\n be exposed as the configuration schema for the graph, as well as a configuration mapping\n function, ``config_fn``, which maps the config provided to the graph to the config\n that will be provided to the child nodes.\n\n Args:\n config_fn (Callable[[dict], dict]): The function that will be called\n to map the graph config to a config appropriate for the child nodes.\n config_schema (ConfigSchema): The schema of the graph config.\n receive_processed_config_values (Optional[bool]): If true, config values provided to the config_fn\n will be converted to their dagster types before being passed in. For example, if this\n value is true, enum config passed to config_fn will be actual enums, while if false,\n then enum config passed to config_fn will be strings.\n """\n\n def __new__(\n cls,\n config_fn: ConfigMappingFn,\n config_schema: Optional[Any] = None,\n receive_processed_config_values: Optional[bool] = None,\n ):\n return super(ConfigMapping, cls).__new__(\n cls,\n config_fn=check.callable_param(config_fn, "config_fn"),\n config_schema=convert_user_facing_definition_config_schema(config_schema),\n receive_processed_config_values=check.opt_bool_param(\n receive_processed_config_values, "receive_processed_config_values"\n ),\n )\n\n def resolve_from_unvalidated_config(self, config: Any) -> Any:\n """Validates config against outer config schema, and calls mapping against validated config."""\n receive_processed_config_values = check.opt_bool_param(\n self.receive_processed_config_values, "receive_processed_config_values", default=True\n )\n if receive_processed_config_values:\n outer_evr = process_config(\n self.config_schema.config_type,\n config,\n )\n else:\n outer_evr = validate_config(\n self.config_schema.config_type,\n config,\n )\n if not outer_evr.success:\n raise DagsterInvalidConfigError(\n "Error in config mapping ",\n outer_evr.errors,\n config,\n )\n\n outer_config = outer_evr.value\n if not receive_processed_config_values:\n outer_config = resolve_defaults(\n cast(ConfigType, self.config_schema.config_type),\n outer_config,\n ).value\n\n return self.config_fn(outer_config)\n\n def resolve_from_validated_config(self, config: Any) -> Any:\n if self.receive_processed_config_values is not None:\n check.failed(\n "`receive_processed_config_values` parameter has been set, but only applies to "\n "unvalidated config."\n )\n\n return self.config_fn(config)
\n
", "current_page_name": "_modules/dagster/_core/definitions/config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.config"}, "configurable": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.configurable

\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, NamedTuple, Optional, Type, TypeVar, Union, cast\n\nfrom typing_extensions import Self\n\nfrom dagster import (\n    Field,\n    _check as check,\n)\nfrom dagster._config import EvaluateValueResult\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.decorator_utils import get_function_params\n\nfrom .definition_config_schema import (\n    CoercableToConfigSchema,\n    ConfiguredDefinitionConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\n\n\nclass ConfigurableDefinition(ABC):\n    @property\n    @abstractmethod\n    def config_schema(self) -> Optional[IDefinitionConfigSchema]:\n        raise NotImplementedError()\n\n    @property\n    def has_config_field(self) -> bool:\n        return self.config_schema is not None and bool(self.config_schema.as_field())\n\n    @property\n    def config_field(self) -> Optional[Field]:\n        return None if not self.config_schema else self.config_schema.as_field()\n\n    # getter for typed access\n    def get_config_field(self) -> Field:\n        field = self.config_field\n        if field is None:\n            check.failed("Must check has_config_Field before calling get_config_field")\n        return field\n\n    def apply_config_mapping(self, config: Any) -> EvaluateValueResult:\n        """Applies user-provided config mapping functions to the given configuration and validates the\n        results against the respective config schema.\n\n        Expects incoming config to be validated and have fully-resolved values (StringSource values\n        resolved, Enum types hydrated, etc.) via process_config() during ResolvedRunConfig\n        construction and Graph config mapping.\n\n        Args:\n            config (Any): A validated and resolved configuration dictionary matching this object's\n            config_schema\n\n        Returns (EvaluateValueResult):\n            If successful, the value is a validated and resolved configuration dictionary for the\n            innermost wrapped object after applying the config mapping transformation function.\n        """\n        # If schema is on a mapped schema this is the innermost resource (base case),\n        # so we aren't responsible for validating against anything farther down.\n        # Returns an EVR for type consistency with config_mapping_fn.\n        return (\n            self.config_schema.resolve_config(config)\n            if isinstance(self.config_schema, ConfiguredDefinitionConfigSchema)\n            else EvaluateValueResult.for_value(config)\n        )\n\n\nclass AnonymousConfigurableDefinition(ConfigurableDefinition):\n    """An interface that makes the `configured` method not accept a name argument."""\n\n    def configured(\n        self,\n        config_or_config_fn: Any,\n        config_schema: CoercableToConfigSchema = None,\n        description: Optional[str] = None,\n    ) -> Self:\n        """Wraps this object in an object of the same type that provides configuration to the inner\n        object.\n\n        Using ``configured`` may result in config values being displayed in\n        the Dagster UI, so it is not recommended to use this API with sensitive values,\n        such as secrets.\n\n        Args:\n            config_or_config_fn (Union[Any, Callable[[Any], Any]]): Either (1) Run configuration\n                that fully satisfies this object's config schema or (2) A function that accepts run\n                configuration and returns run configuration that fully satisfies this object's\n                config schema.  In the latter case, config_schema must be specified.  When\n                passing a function, it's easiest to use :py:func:`configured`.\n            config_schema (ConfigSchema): If config_or_config_fn is a function, the config schema\n                that its input must satisfy.\n            description (Optional[str]): Description of the new definition. If not specified,\n                inherits the description of the definition being configured.\n\n        Returns (ConfigurableDefinition): A configured version of this object.\n        """\n        new_config_schema = ConfiguredDefinitionConfigSchema(\n            self, convert_user_facing_definition_config_schema(config_schema), config_or_config_fn\n        )\n\n        return self.copy_for_configured(description, new_config_schema)\n\n    @abstractmethod\n    def copy_for_configured(\n        self,\n        description: Optional[str],\n        config_schema: IDefinitionConfigSchema,\n    ) -> Self:\n        raise NotImplementedError()\n\n\nclass NamedConfigurableDefinition(ConfigurableDefinition):\n    """An interface that makes the `configured` method require a positional `name` argument."""\n\n    def configured(\n        self,\n        config_or_config_fn: Any,\n        name: str,\n        config_schema: Optional[UserConfigSchema] = None,\n        description: Optional[str] = None,\n    ) -> Self:\n        """Wraps this object in an object of the same type that provides configuration to the inner\n        object.\n\n        Using ``configured`` may result in config values being displayed in\n        the Dagster UI, so it is not recommended to use this API with sensitive values,\n        such as secrets.\n\n        Args:\n            config_or_config_fn (Union[Any, Callable[[Any], Any]]): Either (1) Run configuration\n                that fully satisfies this object's config schema or (2) A function that accepts run\n                configuration and returns run configuration that fully satisfies this object's\n                config schema.  In the latter case, config_schema must be specified.  When\n                passing a function, it's easiest to use :py:func:`configured`.\n            name (str): Name of the new definition. This is a required argument, as this definition\n                type has a name uniqueness constraint.\n            config_schema (ConfigSchema): If config_or_config_fn is a function, the config schema\n                that its input must satisfy.\n            description (Optional[str]): Description of the new definition. If not specified,\n                inherits the description of the definition being configured.\n\n        Returns (ConfigurableDefinition): A configured version of this object.\n        """\n        name = check.str_param(name, "name")\n\n        new_config_schema = ConfiguredDefinitionConfigSchema(\n            self, convert_user_facing_definition_config_schema(config_schema), config_or_config_fn\n        )\n\n        return self.copy_for_configured(name, description, new_config_schema)\n\n    @abstractmethod\n    def copy_for_configured(\n        self,\n        name: str,\n        description: Optional[str],\n        config_schema: IDefinitionConfigSchema,\n    ) -> Self: ...\n\n\ndef _check_configurable_param(configurable: ConfigurableDefinition) -> None:\n    from dagster._core.definitions.composition import PendingNodeInvocation\n\n    check.param_invariant(\n        not isinstance(configurable, PendingNodeInvocation),\n        "configurable",\n        "You have invoked `configured` on a PendingNodeInvocation (an intermediate type), which"\n        " is produced by aliasing or tagging a node definition. To configure a node, you must"\n        " call `configured` on either an OpDefinition and GraphDefinition. To fix"\n        " this error, make sure to call `configured` on the definition object *before* using"\n        " the `tag` or `alias` methods. For usage examples, see"\n        " https://docs.dagster.io/concepts/configuration/configured",\n    )\n    check.inst_param(\n        configurable,\n        "configurable",\n        ConfigurableDefinition,\n        "Only the following types can be used with the `configured` method: ResourceDefinition,"\n        " ExecutorDefinition, GraphDefinition, NodeDefinition, and LoggerDefinition."\n        " For usage examples of `configured`, see"\n        " https://docs.dagster.io/concepts/configuration/configured",\n    )\n\n\nT_Configurable = TypeVar(\n    "T_Configurable", bound=Union["AnonymousConfigurableDefinition", "NamedConfigurableDefinition"]\n)\n\n\nclass FunctionAndConfigSchema(NamedTuple):\n    function: Callable[[Any], Any]\n    config_schema: Optional[UserConfigSchema]\n\n\ndef _wrap_user_fn_if_pythonic_config(\n    user_fn: Any, config_schema: Optional[UserConfigSchema]\n) -> FunctionAndConfigSchema:\n    """Helper function which allows users to provide a Pythonic config object to a @configurable\n    function. Detects if the function has a single parameter annotated with a Config class.\n    If so, wraps the function to convert the config dictionary into the appropriate Config object.\n    """\n    from dagster._config.pythonic_config import (\n        Config,\n        infer_schema_from_config_annotation,\n        safe_is_subclass,\n    )\n\n    if not isinstance(user_fn, Callable):\n        return FunctionAndConfigSchema(function=user_fn, config_schema=config_schema)\n\n    config_fn_params = get_function_params(user_fn)\n    check.invariant(\n        len(config_fn_params) == 1, "@configured function should have exactly one parameter"\n    )\n\n    param = config_fn_params[0]\n\n    # If the parameter is a subclass of Config, we can infer the config schema from the\n    # type annotation. We'll also wrap the config mapping function to convert the config\n    # dictionary into the appropriate Config object.\n    if not safe_is_subclass(param.annotation, Config):\n        return FunctionAndConfigSchema(function=user_fn, config_schema=config_schema)\n\n    check.invariant(\n        config_schema is None,\n        "Cannot provide config_schema to @configured function with Config-annotated param",\n    )\n\n    config_schema_from_class = infer_schema_from_config_annotation(param.annotation, param.default)\n    config_cls = cast(Type[Config], param.annotation)\n\n    param_name = param.name\n\n    def wrapped_fn(config_as_dict) -> Any:\n        config_input = config_cls(**config_as_dict)\n        output = user_fn(**{param_name: config_input})\n\n        if isinstance(output, Config):\n            return output._convert_to_config_dictionary()  # noqa: SLF001\n        else:\n            return output\n\n    return FunctionAndConfigSchema(function=wrapped_fn, config_schema=config_schema_from_class)\n\n\n
[docs]def configured(\n configurable: T_Configurable,\n config_schema: Optional[UserConfigSchema] = None,\n **kwargs: Any,\n) -> Callable[[object], T_Configurable]:\n """A decorator that makes it easy to create a function-configured version of an object.\n\n The following definition types can be configured using this function:\n\n * :py:class:`GraphDefinition`\n * :py:class:`ExecutorDefinition`\n * :py:class:`LoggerDefinition`\n * :py:class:`ResourceDefinition`\n * :py:class:`OpDefinition`\n\n Using ``configured`` may result in config values being displayed in the Dagster UI,\n so it is not recommended to use this API with sensitive values, such as\n secrets.\n\n If the config that will be supplied to the object is constant, you may alternatively invoke this\n and call the result with a dict of config values to be curried. Examples of both strategies\n below.\n\n Args:\n configurable (ConfigurableDefinition): An object that can be configured.\n config_schema (ConfigSchema): The config schema that the inputs to the decorated function\n must satisfy. Alternatively, annotate the config parameter to the decorated function\n with a subclass of :py:class:`Config` and omit this argument.\n **kwargs: Arbitrary keyword arguments that will be passed to the initializer of the returned\n object.\n\n Returns:\n (Callable[[Union[Any, Callable[[Any], Any]]], ConfigurableDefinition])\n\n **Examples:**\n\n .. code-block:: python\n\n class GreetingConfig(Config):\n message: str\n\n @op\n def greeting_op(config: GreetingConfig):\n print(config.message)\n\n class HelloConfig(Config):\n name: str\n\n @configured(greeting_op)\n def hello_op(config: HelloConfig):\n return GreetingConfig(message=f"Hello, {config.name}!")\n\n .. code-block:: python\n\n dev_s3 = configured(S3Resource, name="dev_s3")({'bucket': 'dev'})\n\n @configured(S3Resource)\n def dev_s3(_):\n return {'bucket': 'dev'}\n\n @configured(S3Resource, {'bucket_prefix', str})\n def dev_s3(config):\n return {'bucket': config['bucket_prefix'] + 'dev'}\n\n """\n _check_configurable_param(configurable)\n\n if isinstance(configurable, NamedConfigurableDefinition):\n\n def _configured(config_or_config_fn: object) -> T_Configurable:\n fn_name = (\n getattr(config_or_config_fn, "__name__", None)\n if callable(config_or_config_fn)\n else None\n )\n name: str = check.not_none(kwargs.get("name") or fn_name)\n\n updated_fn, new_config_schema = _wrap_user_fn_if_pythonic_config(\n config_or_config_fn, config_schema\n )\n return configurable.configured(\n config_or_config_fn=updated_fn,\n name=name,\n config_schema=new_config_schema,\n **{k: v for k, v in kwargs.items() if k != "name"},\n )\n\n return _configured\n elif isinstance(configurable, AnonymousConfigurableDefinition):\n\n def _configured(config_or_config_fn: object) -> T_Configurable:\n updated_fn, new_config_schema = _wrap_user_fn_if_pythonic_config(\n config_or_config_fn, config_schema\n )\n return configurable.configured(\n config_schema=new_config_schema, config_or_config_fn=updated_fn, **kwargs\n )\n\n return _configured\n else:\n check.failed(f"Invalid configurable definition type: {type(configurable)}")
\n
", "current_page_name": "_modules/dagster/_core/definitions/configurable", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.configurable"}, "decorators": {"asset_check_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.asset_check_decorator

\nfrom typing import Any, Callable, Mapping, Optional, Set, Tuple, Union, cast\n\nfrom dagster import _check as check\nfrom dagster._annotations import experimental\nfrom dagster._builtins import Nothing\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.asset_check_spec import AssetCheckSpec\nfrom dagster._core.definitions.asset_checks import (\n    AssetChecksDefinition,\n    AssetChecksDefinitionInputOutputProps,\n)\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.output import Out\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.definitions.utils import NoValueSentinel\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ..input import In\nfrom .asset_decorator import (\n    get_function_params_without_context_or_config_or_resources,\n    stringify_asset_key_to_input_name,\n)\nfrom .op_decorator import _Op\n\nAssetCheckFunctionReturn = AssetCheckResult\nAssetCheckFunction = Callable[..., AssetCheckFunctionReturn]\n\n\ndef _build_asset_check_input(\n    name: str, asset_key: AssetKey, fn: Callable\n) -> Mapping[AssetKey, Tuple[str, In]]:\n    asset_params = get_function_params_without_context_or_config_or_resources(fn)\n\n    if len(asset_params) == 0:\n        input_name = stringify_asset_key_to_input_name(asset_key)\n        in_def = In(cast(type, Nothing))\n    elif len(asset_params) == 1:\n        input_name = asset_params[0].name\n        in_def = In(metadata={}, input_manager_key=None, dagster_type=NoValueSentinel)\n    else:\n        raise DagsterInvalidDefinitionError(\n            f"When defining check '{name}', multiple target assets provided as parameters:"\n            f" {[param.name for param in asset_params]}. Only one"\n            " is allowed."\n        )\n\n    return {\n        asset_key: (\n            input_name,\n            in_def,\n        )\n    }\n\n\n
[docs]@experimental\ndef asset_check(\n *,\n asset: Union[CoercibleToAssetKey, AssetsDefinition, SourceAsset],\n name: Optional[str] = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n config_schema: Optional[UserConfigSchema] = None,\n compute_kind: Optional[str] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n) -> Callable[[AssetCheckFunction], AssetChecksDefinition]:\n """Create a definition for how to execute an asset check.\n\n Args:\n asset (Union[AssetKey, Sequence[str], str, AssetsDefinition, SourceAsset]): The\n asset that the check applies to.\n name (Optional[str]): The name of the check. If not specified, the name of the decorated\n function will be used. Checks for the same asset must have unique names.\n description (Optional[str]): The description of the check.\n required_resource_keys (Optional[Set[str]]): A set of keys for resources that are required\n by the function that execute the check. These can alternatively be specified by\n including resource-typed parameters in the function signature.\n config_schema (Optional[ConfigSchema): The configuration schema for the check's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that executes the check.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n compute_kind (Optional[str]): A string to represent the kind of computation that executes\n the check, e.g. "dbt" or "spark".\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that executes the check.\n\n\n Produces an :py:class:`AssetChecksDefinition` object.\n\n\n Example:\n .. code-block:: python\n\n from dagster import asset, asset_check, AssetCheckResult\n\n @asset\n def my_asset() -> None:\n ...\n\n @asset_check(asset=my_asset, description="Check that my asset has enough rows")\n def my_asset_has_enough_rows() -> AssetCheckResult:\n num_rows = ...\n return AssetCheckResult(passed=num_rows > 5, metadata={"num_rows": num_rows})\n\n\n Example with a DataFrame Output:\n .. code-block:: python\n\n from dagster import asset, asset_check, AssetCheckResult\n from pandas import DataFrame\n\n @asset\n def my_asset() -> DataFrame:\n ...\n\n @asset_check(asset=my_asset, description="Check that my asset has enough rows")\n def my_asset_has_enough_rows(my_asset: DataFrame) -> AssetCheckResult:\n num_rows = my_asset.shape[0]\n return AssetCheckResult(passed=num_rows > 5, metadata={"num_rows": num_rows})\n """\n\n def inner(fn: AssetCheckFunction) -> AssetChecksDefinition:\n check.callable_param(fn, "fn")\n resolved_name = name or fn.__name__\n asset_key = AssetKey.from_coercible_or_definition(asset)\n\n out = Out(dagster_type=None)\n input_tuples_by_asset_key = _build_asset_check_input(resolved_name, asset_key, fn)\n if len(input_tuples_by_asset_key) == 0:\n raise DagsterInvalidDefinitionError(\n f"No target asset provided when defining check '{resolved_name}'"\n )\n\n if len(input_tuples_by_asset_key) > 1:\n raise DagsterInvalidDefinitionError(\n f"When defining check '{resolved_name}', Multiple target assets provided:"\n f" {[key.to_user_string() for key in input_tuples_by_asset_key.keys()]}. Only one"\n " is allowed."\n )\n\n resolved_asset_key = next(iter(input_tuples_by_asset_key.keys()))\n spec = AssetCheckSpec(\n name=resolved_name,\n description=description,\n asset=resolved_asset_key,\n )\n\n op_def = _Op(\n name=spec.get_python_identifier(),\n ins=dict(input_tuples_by_asset_key.values()),\n out=out,\n # Any resource requirements specified as arguments will be identified as\n # part of the Op definition instantiation\n required_resource_keys=required_resource_keys,\n tags={\n **({"kind": compute_kind} if compute_kind else {}),\n **(op_tags or {}),\n },\n config_schema=config_schema,\n retry_policy=retry_policy,\n )(fn)\n\n checks_def = AssetChecksDefinition(\n node_def=op_def,\n resource_defs={},\n specs=[spec],\n input_output_props=AssetChecksDefinitionInputOutputProps(\n asset_keys_by_input_name={\n input_tuples_by_asset_key[resolved_asset_key][0]: resolved_asset_key\n },\n asset_check_keys_by_output_name={op_def.output_defs[0].name: spec.key},\n ),\n )\n\n return checks_def\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/asset_check_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.asset_check_decorator"}, "asset_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.asset_decorator

\nfrom collections import Counter\nfrom inspect import Parameter\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n    overload,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param, experimental_param\nfrom dagster._builtins import Nothing\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.decorator_utils import get_function_params, get_valid_name_permutations\nfrom dagster._core.definitions.asset_dep import AssetDep, CoercibleToAssetDep\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.config import ConfigMapping\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.metadata import ArbitraryMetadataMapping, MetadataUserInput\nfrom dagster._core.definitions.partition_mapping import PartitionMapping\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._core.types.dagster_type import DagsterType\nfrom dagster._utils.warnings import (\n    disable_dagster_warnings,\n)\n\nfrom ..asset_check_spec import AssetCheckSpec\nfrom ..asset_in import AssetIn\nfrom ..asset_out import AssetOut\nfrom ..asset_spec import AssetSpec\nfrom ..assets import AssetsDefinition\nfrom ..backfill_policy import BackfillPolicy, BackfillPolicyType\nfrom ..decorators.graph_decorator import graph\nfrom ..decorators.op_decorator import _Op\nfrom ..events import AssetKey, CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom ..input import GraphIn, In\nfrom ..output import GraphOut, Out\nfrom ..partition import PartitionsDefinition\nfrom ..policy import RetryPolicy\nfrom ..resource_definition import ResourceDefinition\nfrom ..utils import DEFAULT_IO_MANAGER_KEY, DEFAULT_OUTPUT, NoValueSentinel\n\n\n@overload\ndef asset(\n    compute_fn: Callable,\n) -> AssetsDefinition: ...\n\n\n@overload\ndef asset(\n    *,\n    name: Optional[str] = ...,\n    key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n    ins: Optional[Mapping[str, AssetIn]] = ...,\n    deps: Optional[Iterable[CoercibleToAssetDep]] = ...,\n    metadata: Optional[Mapping[str, Any]] = ...,\n    description: Optional[str] = ...,\n    config_schema: Optional[UserConfigSchema] = None,\n    required_resource_keys: Optional[Set[str]] = ...,\n    resource_defs: Optional[Mapping[str, object]] = ...,\n    io_manager_def: Optional[object] = ...,\n    io_manager_key: Optional[str] = ...,\n    compute_kind: Optional[str] = ...,\n    dagster_type: Optional[DagsterType] = ...,\n    partitions_def: Optional[PartitionsDefinition] = ...,\n    op_tags: Optional[Mapping[str, Any]] = ...,\n    group_name: Optional[str] = ...,\n    output_required: bool = ...,\n    freshness_policy: Optional[FreshnessPolicy] = ...,\n    auto_materialize_policy: Optional[AutoMaterializePolicy] = ...,\n    backfill_policy: Optional[BackfillPolicy] = ...,\n    retry_policy: Optional[RetryPolicy] = ...,\n    code_version: Optional[str] = ...,\n    key: Optional[CoercibleToAssetKey] = None,\n    non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = ...,\n    check_specs: Optional[Sequence[AssetCheckSpec]] = ...,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]: ...\n\n\n
[docs]@experimental_param(param="resource_defs")\n@experimental_param(param="io_manager_def")\n@experimental_param(param="auto_materialize_policy")\n@experimental_param(param="backfill_policy")\n@deprecated_param(\n param="non_argument_deps", breaking_version="2.0.0", additional_warn_text="use `deps` instead."\n)\ndef asset(\n compute_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[CoercibleToAssetDep]] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n description: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n io_manager_def: Optional[object] = None,\n io_manager_key: Optional[str] = None,\n compute_kind: Optional[str] = None,\n dagster_type: Optional[DagsterType] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n group_name: Optional[str] = None,\n output_required: bool = True,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n key: Optional[CoercibleToAssetKey] = None,\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n) -> Union[AssetsDefinition, Callable[[Callable[..., Any]], AssetsDefinition]]:\n """Create a definition for how to compute an asset.\n\n A software-defined asset is the combination of:\n 1. An asset key, e.g. the name of a table.\n 2. A function, which can be run to compute the contents of the asset.\n 3. A set of upstream assets that are provided as inputs to the function when computing the asset.\n\n Unlike an op, whose dependencies are determined by the graph it lives inside, an asset knows\n about the upstream assets it depends on. The upstream assets are inferred from the arguments\n to the decorated function. The name of the argument designates the name of the upstream asset.\n\n An asset has an op inside it to represent the function that computes it. The name of the op\n will be the segments of the asset key, separated by double-underscores.\n\n Args:\n name (Optional[str]): The name of the asset. If not provided, defaults to the name of the\n decorated function. The asset's name must be a valid name in dagster (ie only contains\n letters, numbers, and _) and may not contain python reserved keywords.\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name, which defaults to the name of\n the decorated function. Each item in key_prefix must be a valid name in dagster (ie only\n contains letters, numbers, and _) and may not contain python reserved keywords.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n deps (Optional[Sequence[Union[AssetDep, AssetsDefinition, SourceAsset, AssetKey, str]]]):\n The assets that are upstream dependencies, but do not correspond to a parameter of the\n decorated function. If the AssetsDefinition for a multi_asset is provided, dependencies on\n all assets created by the multi_asset will be created.\n config_schema (Optional[ConfigSchema): The configuration schema for the asset's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n metadata (Optional[Dict[str, Any]]): A dict of metadata entries for the asset.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by the op.\n io_manager_key (Optional[str]): The resource key of the IOManager used\n for storing the output of the op as an asset, and for loading it in downstream ops\n (default: "io_manager"). Only one of io_manager_key and io_manager_def can be provided.\n io_manager_def (Optional[object]): (Experimental) The IOManager used for\n storing the output of the op as an asset, and for loading it in\n downstream ops. Only one of io_manager_def and io_manager_key can be provided.\n compute_kind (Optional[str]): A string to represent the kind of computation that produces\n the asset, e.g. "dbt" or "spark". It will be displayed in the Dagster UI as a badge on the asset.\n dagster_type (Optional[DagsterType]): Allows specifying type validation functions that\n will be executed on the output of the decorated function after it runs.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the asset.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If not provided,\n the name "default" is used.\n resource_defs (Optional[Mapping[str, object]]):\n (Experimental) A mapping of resource keys to resources. These resources\n will be initialized during execution, and can be accessed from the\n context within the body of the function.\n output_required (bool): Whether the decorated function will always materialize an asset.\n Defaults to True. If False, the function can return None, which will not be materialized to\n storage and will halt execution of downstream assets.\n freshness_policy (FreshnessPolicy): A constraint telling Dagster how often this asset is intended to be updated\n with respect to its root data.\n auto_materialize_policy (AutoMaterializePolicy): (Experimental) Configure Dagster to automatically materialize\n this asset according to its FreshnessPolicy and when upstream dependencies change.\n backfill_policy (BackfillPolicy): (Experimental) Configure Dagster to backfill this asset according to its\n BackfillPolicy.\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that computes the asset.\n code_version (Optional[str]): (Experimental) Version of the code that generates this asset. In\n general, versions should be set only for code that deterministically produces the same\n output when given the same inputs.\n check_specs (Optional[Sequence[AssetCheckSpec]]): (Experimental) Specs for asset checks that\n execute in the decorated function after materializing the asset.\n non_argument_deps (Optional[Union[Set[AssetKey], Set[str]]]): Deprecated, use deps instead.\n Set of asset keys that are upstream dependencies, but do not pass an input to the asset.\n key (Optional[CoeercibleToAssetKey]): The key for this asset. If provided, cannot specify key_prefix or name.\n\n Examples:\n .. code-block:: python\n\n @asset\n def my_asset(my_upstream_asset: int) -> int:\n return my_upstream_asset + 1\n """\n\n def create_asset():\n upstream_asset_deps = _deps_and_non_argument_deps_to_asset_deps(\n deps=deps, non_argument_deps=non_argument_deps\n )\n\n return _Asset(\n name=cast(Optional[str], name), # (mypy bug that it can't infer name is Optional[str])\n key_prefix=key_prefix,\n ins=ins,\n deps=upstream_asset_deps,\n metadata=metadata,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n resource_defs=resource_defs,\n io_manager_key=io_manager_key,\n io_manager_def=io_manager_def,\n compute_kind=check.opt_str_param(compute_kind, "compute_kind"),\n dagster_type=dagster_type,\n partitions_def=partitions_def,\n op_tags=op_tags,\n group_name=group_name,\n output_required=output_required,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n retry_policy=retry_policy,\n code_version=code_version,\n check_specs=check_specs,\n key=key,\n )\n\n if compute_fn is not None:\n return create_asset()(compute_fn)\n\n def inner(fn: Callable[..., Any]) -> AssetsDefinition:\n check.invariant(\n not (io_manager_key and io_manager_def),\n "Both io_manager_key and io_manager_def were provided to `@asset` decorator. Please"\n " provide one or the other. ",\n )\n return create_asset()(fn)\n\n return inner
\n\n\ndef _resolve_key_and_name(\n *,\n key: Optional[CoercibleToAssetKey],\n key_prefix: Optional[CoercibleToAssetKeyPrefix],\n name: Optional[str],\n decorator: str,\n fn: Callable[..., Any],\n) -> Tuple[AssetKey, str]:\n if (name or key_prefix) and key:\n raise DagsterInvalidDefinitionError(\n f"Cannot specify a name or key prefix for {decorator} when the key"\n " argument is provided."\n )\n key_prefix_list = [key_prefix] if isinstance(key_prefix, str) else key_prefix\n key = AssetKey.from_coercible(key) if key else None\n assigned_name = name or fn.__name__\n return (\n (\n # the filter here appears unnecessary per typing, but this exists\n # historically so keeping it here to be conservative in case users\n # can get Nones into the key_prefix_list somehow\n AssetKey(list(filter(None, [*(key_prefix_list or []), assigned_name])))\n if not key\n else key\n ),\n assigned_name,\n )\n\n\nclass _Asset:\n def __init__(\n self,\n name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[AssetDep]] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n description: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n io_manager_key: Optional[str] = None,\n io_manager_def: Optional[object] = None,\n compute_kind: Optional[str] = None,\n dagster_type: Optional[DagsterType] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n group_name: Optional[str] = None,\n output_required: bool = True,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n key: Optional[CoercibleToAssetKey] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n ):\n self.name = name\n self.key_prefix = key_prefix\n self.ins = ins or {}\n self.deps = deps or []\n self.metadata = metadata\n self.description = description\n self.required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys"\n )\n self.io_manager_key = io_manager_key\n self.io_manager_def = io_manager_def\n self.config_schema = config_schema\n self.compute_kind = compute_kind\n self.dagster_type = dagster_type\n self.partitions_def = partitions_def\n self.op_tags = op_tags\n self.resource_defs = dict(check.opt_mapping_param(resource_defs, "resource_defs"))\n self.group_name = group_name\n self.output_required = output_required\n self.freshness_policy = freshness_policy\n self.retry_policy = retry_policy\n self.auto_materialize_policy = auto_materialize_policy\n self.backfill_policy = backfill_policy\n self.code_version = code_version\n self.check_specs = check_specs\n self.key = key\n\n def __call__(self, fn: Callable) -> AssetsDefinition:\n from dagster._config.pythonic_config import (\n validate_resource_annotated_function,\n )\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n validate_resource_annotated_function(fn)\n\n asset_ins = build_asset_ins(fn, self.ins or {}, {dep.asset_key for dep in self.deps})\n\n out_asset_key, asset_name = _resolve_key_and_name(\n key=self.key,\n key_prefix=self.key_prefix,\n name=self.name,\n fn=fn,\n decorator="@asset",\n )\n\n with disable_dagster_warnings():\n arg_resource_keys = {arg.name for arg in get_resource_args(fn)}\n\n bare_required_resource_keys = set(self.required_resource_keys)\n\n resource_defs_dict = self.resource_defs\n resource_defs_keys = set(resource_defs_dict.keys())\n decorator_resource_keys = bare_required_resource_keys | resource_defs_keys\n\n io_manager_key = self.io_manager_key\n if self.io_manager_def:\n if not io_manager_key:\n io_manager_key = out_asset_key.to_python_identifier("io_manager")\n\n if (\n io_manager_key in self.resource_defs\n and self.resource_defs[io_manager_key] != self.io_manager_def\n ):\n raise DagsterInvalidDefinitionError(\n f"Provided conflicting definitions for io manager key '{io_manager_key}'."\n " Please provide only one definition per key."\n )\n\n resource_defs_dict[io_manager_key] = self.io_manager_def\n\n wrapped_resource_defs = wrap_resources_for_execution(resource_defs_dict)\n\n check.param_invariant(\n len(bare_required_resource_keys) == 0 or len(arg_resource_keys) == 0,\n "Cannot specify resource requirements in both @asset decorator and as arguments"\n " to the decorated function",\n )\n\n io_manager_key = cast(str, io_manager_key) if io_manager_key else DEFAULT_IO_MANAGER_KEY\n\n out = Out(\n metadata=self.metadata or {},\n io_manager_key=io_manager_key,\n dagster_type=self.dagster_type if self.dagster_type else NoValueSentinel,\n description=self.description,\n is_required=self.output_required,\n code_version=self.code_version,\n )\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n self.check_specs, [out_asset_key]\n )\n check_outs: Mapping[str, Out] = {\n output_name: Out(dagster_type=None)\n for output_name in check_specs_by_output_name.keys()\n }\n\n op_required_resource_keys = decorator_resource_keys - arg_resource_keys\n\n op = _Op(\n name=out_asset_key.to_python_identifier(),\n description=self.description,\n ins=dict(asset_ins.values()),\n out={DEFAULT_OUTPUT: out, **check_outs},\n # Any resource requirements specified as arguments will be identified as\n # part of the Op definition instantiation\n required_resource_keys=op_required_resource_keys,\n tags={\n **({"kind": self.compute_kind} if self.compute_kind else {}),\n **(self.op_tags or {}),\n },\n config_schema=self.config_schema,\n retry_policy=self.retry_policy,\n code_version=self.code_version,\n )(fn)\n\n # check backfill policy is BackfillPolicyType.SINGLE_RUN for non-partitioned asset\n if self.partitions_def is None:\n check.param_invariant(\n (\n self.backfill_policy.policy_type is BackfillPolicyType.SINGLE_RUN\n if self.backfill_policy\n else True\n ),\n "backfill_policy",\n "Non partitioned asset can only have single run backfill policy",\n )\n\n keys_by_input_name = {\n input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n }\n partition_mappings = {\n keys_by_input_name[input_name]: asset_in.partition_mapping\n for input_name, asset_in in self.ins.items()\n if asset_in.partition_mapping is not None\n }\n\n partition_mappings = _get_partition_mappings_from_deps(\n partition_mappings=partition_mappings, deps=self.deps, asset_name=asset_name\n )\n\n return AssetsDefinition.dagster_internal_init(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name={"result": out_asset_key},\n node_def=op,\n partitions_def=self.partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n resource_defs=wrapped_resource_defs,\n group_names_by_key={out_asset_key: self.group_name} if self.group_name else None,\n freshness_policies_by_key=(\n {out_asset_key: self.freshness_policy} if self.freshness_policy else None\n ),\n auto_materialize_policies_by_key=(\n {out_asset_key: self.auto_materialize_policy}\n if self.auto_materialize_policy\n else None\n ),\n backfill_policy=self.backfill_policy,\n asset_deps=None, # no asset deps in single-asset decorator\n selected_asset_keys=None, # no subselection in decorator\n can_subset=False,\n metadata_by_key={out_asset_key: self.metadata} if self.metadata else None,\n # see comment in @multi_asset's call to dagster_internal_init for the gory details\n # this is best understood as an _override_ which @asset does not support\n descriptions_by_key=None,\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=None, # no subselection in decorator\n )\n\n\n
[docs]@experimental_param(param="resource_defs")\n@deprecated_param(\n param="non_argument_deps", breaking_version="2.0.0", additional_warn_text="use `deps` instead."\n)\ndef multi_asset(\n *,\n outs: Optional[Mapping[str, AssetOut]] = None,\n name: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[CoercibleToAssetDep]] = None,\n description: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[Set[str]] = None,\n compute_kind: Optional[str] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n can_subset: bool = False,\n resource_defs: Optional[Mapping[str, object]] = None,\n group_name: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n specs: Optional[Sequence[AssetSpec]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n # deprecated\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = None,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]:\n """Create a combined definition of multiple assets that are computed using the same op and same\n upstream assets.\n\n Each argument to the decorated function references an upstream asset that this asset depends on.\n The name of the argument designates the name of the upstream asset.\n\n You can set I/O managers keys, auto-materialize policies, freshness policies, group names, etc.\n on an individual asset within the multi-asset by attaching them to the :py:class:`AssetOut`\n corresponding to that asset in the `outs` parameter.\n\n Args:\n name (Optional[str]): The name of the op.\n outs: (Optional[Dict[str, AssetOut]]): The AssetOuts representing the assets materialized by\n this function. AssetOuts detail the output, IO management, and core asset properties.\n This argument is required except when AssetSpecs are used.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n deps (Optional[Sequence[Union[AssetsDefinition, SourceAsset, AssetKey, str]]]):\n The assets that are upstream dependencies, but do not correspond to a parameter of the\n decorated function. If the AssetsDefinition for a multi_asset is provided, dependencies on\n all assets created by the multi_asset will be created.\n config_schema (Optional[ConfigSchema): The configuration schema for the asset's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by the underlying op.\n compute_kind (Optional[str]): A string to represent the kind of computation that produces\n the asset, e.g. "dbt" or "spark". It will be displayed in the Dagster UI as a badge on the asset.\n internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed\n that all assets produced by a multi_asset depend on all assets that are consumed by that\n multi asset. If this default is not correct, you pass in a map of output names to a\n corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be either\n used as input to the asset or produced within the op.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n backfill_policy (Optional[BackfillPolicy]): The backfill policy for the op that computes the asset.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the asset.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n can_subset (bool): If this asset's computation can emit a subset of the asset\n keys based on the context.selected_assets argument. Defaults to False.\n resource_defs (Optional[Mapping[str, object]]):\n (Experimental) A mapping of resource keys to resources. These resources\n will be initialized during execution, and can be accessed from the\n context within the body of the function.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. This\n group name will be applied to all assets produced by this multi_asset.\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that computes the asset.\n code_version (Optional[str]): (Experimental) Version of the code encapsulated by the multi-asset. If set,\n this is used as a default code version for all defined assets.\n specs (Optional[Sequence[AssetSpec]]): (Experimental) The specifications for the assets materialized\n by this function.\n check_specs (Optional[Sequence[AssetCheckSpec]]): (Experimental) Specs for asset checks that\n execute in the decorated function after materializing the assets.\n non_argument_deps (Optional[Union[Set[AssetKey], Set[str]]]): Deprecated, use deps instead. Set of asset keys that are upstream\n dependencies, but do not pass an input to the multi_asset.\n\n Examples:\n .. code-block:: python\n\n # Use IO managers to handle I/O:\n @multi_asset(\n outs={\n "my_string_asset": AssetOut(),\n "my_int_asset": AssetOut(),\n }\n )\n def my_function(upstream_asset: int):\n result = upstream_asset + 1\n return str(result), result\n\n # Handle I/O on your own:\n @multi_asset(\n outs={\n "asset1": AssetOut(),\n "asset2": AssetOut(),\n },\n deps=["asset0"],\n )\n def my_function():\n asset0_value = load(path="asset0")\n asset1_result, asset2_result = do_some_transformation(asset0_value)\n write(asset1_result, path="asset1")\n write(asset2_result, path="asset2")\n return None, None\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n specs = check.opt_list_param(specs, "specs", of_type=AssetSpec)\n\n upstream_asset_deps = _deps_and_non_argument_deps_to_asset_deps(\n deps=deps, non_argument_deps=non_argument_deps\n )\n\n asset_deps = check.opt_mapping_param(\n internal_asset_deps, "internal_asset_deps", key_type=str, value_type=set\n )\n required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n resource_defs = wrap_resources_for_execution(\n check.opt_mapping_param(resource_defs, "resource_defs", key_type=str)\n )\n\n _config_schema = check.opt_mapping_param(\n config_schema, # type: ignore\n "config_schema",\n additional_message="Only dicts are supported for asset config_schema.",\n )\n\n bare_required_resource_keys = set(required_resource_keys)\n resource_defs_keys = set(resource_defs.keys())\n required_resource_keys = bare_required_resource_keys | resource_defs_keys\n\n asset_out_map: Mapping[str, AssetOut] = {} if outs is None else outs\n\n def inner(fn: Callable[..., Any]) -> AssetsDefinition:\n op_name = name or fn.__name__\n\n if asset_out_map and specs:\n raise DagsterInvalidDefinitionError("Must specify only outs or specs but not both.")\n elif specs:\n output_tuples_by_asset_key = {}\n for asset_spec in specs:\n # output names are asset keys joined with _\n output_name = "_".join(asset_spec.key.path)\n output_tuples_by_asset_key[asset_spec.key] = (\n output_name,\n Out(\n Nothing,\n is_required=not (can_subset or asset_spec.skippable),\n description=asset_spec.description,\n ),\n )\n if upstream_asset_deps:\n raise DagsterInvalidDefinitionError(\n "Can not pass deps and specs to @multi_asset, specify deps on the AssetSpecs"\n " directly."\n )\n if internal_asset_deps:\n raise DagsterInvalidDefinitionError(\n "Can not pass internal_asset_deps and specs to @multi_asset, specify deps on"\n " the AssetSpecs directly."\n )\n\n upstream_keys = set()\n for spec in specs:\n for dep in spec.deps:\n if dep.asset_key not in output_tuples_by_asset_key:\n upstream_keys.add(dep.asset_key)\n if (\n dep.asset_key in output_tuples_by_asset_key\n and dep.partition_mapping is not None\n ):\n # self-dependent asset also needs to be considered an upstream_key\n upstream_keys.add(dep.asset_key)\n\n explicit_ins = ins or {}\n # get which asset keys have inputs set\n loaded_upstreams = build_asset_ins(fn, explicit_ins, deps=set())\n unexpected_upstreams = {\n key for key in loaded_upstreams.keys() if key not in upstream_keys\n }\n if unexpected_upstreams:\n raise DagsterInvalidDefinitionError(\n f"Asset inputs {unexpected_upstreams} do not have dependencies on the passed"\n " AssetSpec(s). Set the deps on the appropriate AssetSpec(s)."\n )\n remaining_upstream_keys = {key for key in upstream_keys if key not in loaded_upstreams}\n asset_ins = build_asset_ins(fn, explicit_ins, deps=remaining_upstream_keys)\n else:\n asset_ins = build_asset_ins(\n fn,\n ins or {},\n deps=(\n {dep.asset_key for dep in upstream_asset_deps} if upstream_asset_deps else set()\n ),\n )\n output_tuples_by_asset_key = build_asset_outs(asset_out_map)\n # validate that the asset_deps make sense\n valid_asset_deps = set(asset_ins.keys()) | set(output_tuples_by_asset_key.keys())\n for out_name, asset_keys in asset_deps.items():\n if asset_out_map and out_name not in asset_out_map:\n check.failed(\n f"Invalid out key '{out_name}' supplied to `internal_asset_deps` argument"\n f" for multi-asset {op_name}. Must be one of the outs for this multi-asset"\n f" {list(asset_out_map.keys())[:20]}.",\n )\n invalid_asset_deps = asset_keys.difference(valid_asset_deps)\n check.invariant(\n not invalid_asset_deps,\n f"Invalid asset dependencies: {invalid_asset_deps} specified in"\n f" `internal_asset_deps` argument for multi-asset '{op_name}' on key"\n f" '{out_name}'. Each specified asset key must be associated with an input to"\n " the asset or produced by this asset. Valid keys:"\n f" {list(valid_asset_deps)[:20]}",\n )\n\n arg_resource_keys = {arg.name for arg in get_resource_args(fn)}\n check.param_invariant(\n len(bare_required_resource_keys or []) == 0 or len(arg_resource_keys) == 0,\n "Cannot specify resource requirements in both @multi_asset decorator and as"\n " arguments to the decorated function",\n )\n\n asset_outs_by_output_name: Mapping[str, Out] = dict(output_tuples_by_asset_key.values())\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, list(output_tuples_by_asset_key.keys())\n )\n check_outs_by_output_name: Mapping[str, Out] = {\n output_name: Out(dagster_type=None, is_required=not can_subset)\n for output_name in check_specs_by_output_name.keys()\n }\n overlapping_output_names = (\n asset_outs_by_output_name.keys() & check_outs_by_output_name.keys()\n )\n check.invariant(\n len(overlapping_output_names) == 0,\n f"Check output names overlap with asset output names: {overlapping_output_names}",\n )\n combined_outs_by_output_name: Mapping[str, Out] = {\n **asset_outs_by_output_name,\n **check_outs_by_output_name,\n }\n\n with disable_dagster_warnings():\n op_required_resource_keys = required_resource_keys - arg_resource_keys\n\n op = _Op(\n name=op_name,\n description=description,\n ins=dict(asset_ins.values()),\n out=combined_outs_by_output_name,\n required_resource_keys=op_required_resource_keys,\n tags={\n **({"kind": compute_kind} if compute_kind else {}),\n **(op_tags or {}),\n },\n config_schema=_config_schema,\n retry_policy=retry_policy,\n code_version=code_version,\n )(fn)\n\n keys_by_input_name = {\n input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n }\n keys_by_output_name = {\n output_name: asset_key\n for asset_key, (output_name, _) in output_tuples_by_asset_key.items()\n }\n partition_mappings = {\n keys_by_input_name[input_name]: asset_in.partition_mapping\n for input_name, asset_in in (ins or {}).items()\n if asset_in.partition_mapping is not None\n }\n\n if upstream_asset_deps:\n partition_mappings = _get_partition_mappings_from_deps(\n partition_mappings=partition_mappings, deps=upstream_asset_deps, asset_name=op_name\n )\n\n if specs:\n internal_deps = {\n spec.key: {dep.asset_key for dep in spec.deps}\n for spec in specs\n if spec.deps is not None\n }\n props_by_asset_key: Mapping[AssetKey, Union[AssetSpec, AssetOut]] = {\n spec.key: spec for spec in specs\n }\n # Add PartitionMappings specified via AssetSpec.deps to partition_mappings dictionary. Error on duplicates\n for spec in specs:\n for dep in spec.deps:\n if dep.partition_mapping is None:\n continue\n if partition_mappings.get(dep.asset_key, None) is None:\n partition_mappings[dep.asset_key] = dep.partition_mapping\n continue\n if partition_mappings[dep.asset_key] == dep.partition_mapping:\n continue\n else:\n raise DagsterInvalidDefinitionError(\n f"Two different PartitionMappings for {dep.asset_key} provided for"\n f" multi_asset {op_name}. Please use the same PartitionMapping for"\n f" {dep.asset_key}."\n )\n\n else:\n internal_deps = {keys_by_output_name[name]: asset_deps[name] for name in asset_deps}\n props_by_asset_key = {\n keys_by_output_name[output_name]: asset_out\n for output_name, asset_out in asset_out_map.items()\n }\n\n # handle properties defined ons AssetSpecs or AssetOuts\n group_names_by_key = {\n asset_key: props.group_name\n for asset_key, props in props_by_asset_key.items()\n if props.group_name is not None\n }\n if group_name:\n check.invariant(\n not group_names_by_key,\n "Cannot set group_name parameter on multi_asset if one or more of the"\n " AssetSpecs/AssetOuts supplied to this multi_asset have a group_name defined.",\n )\n group_names_by_key = {asset_key: group_name for asset_key in props_by_asset_key}\n\n freshness_policies_by_key = {\n asset_key: props.freshness_policy\n for asset_key, props in props_by_asset_key.items()\n if props.freshness_policy is not None\n }\n auto_materialize_policies_by_key = {\n asset_key: props.auto_materialize_policy\n for asset_key, props in props_by_asset_key.items()\n if props.auto_materialize_policy is not None\n }\n metadata_by_key = {\n asset_key: props.metadata\n for asset_key, props in props_by_asset_key.items()\n if props.metadata is not None\n }\n\n return AssetsDefinition.dagster_internal_init(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n node_def=op,\n asset_deps=internal_deps,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n can_subset=can_subset,\n resource_defs=resource_defs,\n group_names_by_key=group_names_by_key,\n freshness_policies_by_key=freshness_policies_by_key,\n auto_materialize_policies_by_key=auto_materialize_policies_by_key,\n backfill_policy=backfill_policy,\n selected_asset_keys=None, # no subselection in decorator\n # descriptions by key is more accurately understood as _overriding_ the descriptions\n # by key that are in the OutputDefinitions associated with the asset key.\n # This is a dangerous construction liable for bugs. Instead there should be a\n # canonical source of asset descriptions in AssetsDefinintion and if we need\n # to create a memoized cached dictionary of asset keys for perf or something we do\n # that in the `__init__` or on demand.\n #\n # This is actually an override. We do not override descriptions\n # in OutputDefinitions in @multi_asset\n descriptions_by_key=None,\n metadata_by_key=metadata_by_key,\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=None, # no subselection in decorator\n )\n\n return inner
\n\n\ndef get_function_params_without_context_or_config_or_resources(fn: Callable) -> List[Parameter]:\n params = get_function_params(fn)\n is_context_provided = len(params) > 0 and params[0].name in get_valid_name_permutations(\n "context"\n )\n input_params = params[1:] if is_context_provided else params\n\n resource_arg_names = {arg.name for arg in get_resource_args(fn)}\n\n new_input_args = []\n for input_arg in input_params:\n if input_arg.name != "config" and input_arg.name not in resource_arg_names:\n new_input_args.append(input_arg)\n\n return new_input_args\n\n\ndef stringify_asset_key_to_input_name(asset_key: AssetKey) -> str:\n return "_".join(asset_key.path).replace("-", "_")\n\n\ndef build_asset_ins(\n fn: Callable,\n asset_ins: Mapping[str, AssetIn],\n deps: Optional[AbstractSet[AssetKey]],\n) -> Mapping[AssetKey, Tuple[str, In]]:\n """Creates a mapping from AssetKey to (name of input, In object)."""\n deps = check.opt_set_param(deps, "deps", AssetKey)\n\n new_input_args = get_function_params_without_context_or_config_or_resources(fn)\n\n non_var_input_param_names = [\n param.name for param in new_input_args if param.kind == Parameter.POSITIONAL_OR_KEYWORD\n ]\n has_kwargs = any(param.kind == Parameter.VAR_KEYWORD for param in new_input_args)\n\n all_input_names = set(non_var_input_param_names) | asset_ins.keys()\n\n if not has_kwargs:\n for in_key, asset_in in asset_ins.items():\n if in_key not in non_var_input_param_names and (\n not isinstance(asset_in.dagster_type, DagsterType)\n or not asset_in.dagster_type.is_nothing\n ):\n raise DagsterInvalidDefinitionError(\n f"Key '{in_key}' in provided ins dict does not correspond to any of the names "\n "of the arguments to the decorated function"\n )\n\n ins_by_asset_key: Dict[AssetKey, Tuple[str, In]] = {}\n for input_name in all_input_names:\n asset_key = None\n\n if input_name in asset_ins:\n asset_key = asset_ins[input_name].key\n metadata = asset_ins[input_name].metadata or {}\n key_prefix = asset_ins[input_name].key_prefix\n input_manager_key = asset_ins[input_name].input_manager_key\n dagster_type = asset_ins[input_name].dagster_type\n else:\n metadata = {}\n key_prefix = None\n input_manager_key = None\n dagster_type = NoValueSentinel\n\n asset_key = asset_key or AssetKey(list(filter(None, [*(key_prefix or []), input_name])))\n\n ins_by_asset_key[asset_key] = (\n input_name.replace("-", "_"),\n In(metadata=metadata, input_manager_key=input_manager_key, dagster_type=dagster_type),\n )\n\n for asset_key in deps:\n if asset_key in ins_by_asset_key:\n raise DagsterInvalidDefinitionError(\n f"deps value {asset_key} also declared as input/AssetIn"\n )\n # mypy doesn't realize that Nothing is a valid type here\n ins_by_asset_key[asset_key] = (\n stringify_asset_key_to_input_name(asset_key),\n In(cast(type, Nothing)),\n )\n\n return ins_by_asset_key\n\n\n@overload\ndef graph_asset(\n compose_fn: Callable,\n) -> AssetsDefinition: ...\n\n\n@overload\ndef graph_asset(\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n group_name: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n metadata: Optional[MetadataUserInput] = ...,\n freshness_policy: Optional[FreshnessPolicy] = ...,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = ...,\n backfill_policy: Optional[BackfillPolicy] = ...,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = ...,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n key: Optional[CoercibleToAssetKey] = None,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]: ...\n\n\n
[docs]def graph_asset(\n compose_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n group_name: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n metadata: Optional[MetadataUserInput] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n key: Optional[CoercibleToAssetKey] = None,\n) -> Union[AssetsDefinition, Callable[[Callable[..., Any]], AssetsDefinition]]:\n """Creates a software-defined asset that's computed using a graph of ops.\n\n This decorator is meant to decorate a function that composes a set of ops or graphs to define\n the dependencies between them.\n\n Args:\n name (Optional[str]): The name of the asset. If not provided, defaults to the name of the\n decorated function. The asset's name must be a valid name in Dagster (ie only contains\n letters, numbers, and underscores) and may not contain Python reserved keywords.\n description (Optional[str]):\n A human-readable description of the asset.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n config (Optional[Union[ConfigMapping], Mapping[str, Any]):\n Describes how the graph underlying the asset is configured at runtime.\n\n If a :py:class:`ConfigMapping` object is provided, then the graph takes on the config\n schema of this object. The mapping will be applied at runtime to generate the config for\n the graph's constituent nodes.\n\n If a dictionary is provided, then it will be used as the default run config for the\n graph. This means it must conform to the config schema of the underlying nodes. Note\n that the values provided will be viewable and editable in the Dagster UI, so be careful\n with secrets. its constituent nodes.\n\n If no value is provided, then the config schema for the graph is the default (derived\n from the underlying nodes).\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name, which defaults to the name of\n the decorated function. Each item in key_prefix must be a valid name in Dagster (ie only\n contains letters, numbers, and underscores) and may not contain Python reserved keywords.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If\n not provided, the name "default" is used.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n metadata (Optional[MetadataUserInput]): Dictionary of metadata to be associated with\n the asset.\n freshness_policy (Optional[FreshnessPolicy]): A constraint telling Dagster how often this asset is\n intended to be updated with respect to its root data.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): The AutoMaterializePolicy to use\n for this asset.\n backfill_policy (Optional[BackfillPolicy]): The BackfillPolicy to use for this asset.\n key (Optional[CoeercibleToAssetKey]): The key for this asset. If provided, cannot specify key_prefix or name.\n\n Examples:\n .. code-block:: python\n\n @op\n def fetch_files_from_slack(context) -> pd.DataFrame:\n ...\n\n @op\n def store_files_in_table(files) -> None:\n files.to_sql(name="slack_files", con=create_db_connection())\n\n @graph_asset\n def slack_files_table():\n return store_files(fetch_files_from_slack())\n """\n if compose_fn is None:\n return lambda fn: graph_asset( # type: ignore # (decorator pattern)\n fn,\n name=name,\n description=description,\n ins=ins,\n config=config,\n key_prefix=key_prefix,\n group_name=group_name,\n partitions_def=partitions_def,\n metadata=metadata,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n resource_defs=resource_defs,\n check_specs=check_specs,\n key=key,\n )\n else:\n return graph_asset_no_defaults(\n compose_fn=compose_fn,\n name=name,\n description=description,\n ins=ins,\n config=config,\n key_prefix=key_prefix,\n group_name=group_name,\n partitions_def=partitions_def,\n metadata=metadata,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n resource_defs=resource_defs,\n check_specs=check_specs,\n key=key,\n )
\n\n\ndef graph_asset_no_defaults(\n *,\n compose_fn: Callable,\n name: Optional[str],\n description: Optional[str],\n ins: Optional[Mapping[str, AssetIn]],\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]],\n key_prefix: Optional[CoercibleToAssetKeyPrefix],\n group_name: Optional[str],\n partitions_def: Optional[PartitionsDefinition],\n metadata: Optional[MetadataUserInput],\n freshness_policy: Optional[FreshnessPolicy],\n auto_materialize_policy: Optional[AutoMaterializePolicy],\n backfill_policy: Optional[BackfillPolicy],\n resource_defs: Optional[Mapping[str, ResourceDefinition]],\n check_specs: Optional[Sequence[AssetCheckSpec]],\n key: Optional[CoercibleToAssetKey],\n) -> AssetsDefinition:\n ins = ins or {}\n asset_ins = build_asset_ins(compose_fn, ins or {}, set())\n out_asset_key, _asset_name = _resolve_key_and_name(\n key=key,\n key_prefix=key_prefix,\n name=name,\n decorator="@graph_asset",\n fn=compose_fn,\n )\n\n keys_by_input_name = {input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()}\n partition_mappings = {\n input_name: asset_in.partition_mapping\n for input_name, asset_in in ins.items()\n if asset_in.partition_mapping\n }\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, [out_asset_key]\n )\n check_outs_by_output_name: Mapping[str, GraphOut] = {\n output_name: GraphOut() for output_name in check_specs_by_output_name.keys()\n }\n\n combined_outs_by_output_name: Mapping = {\n "result": GraphOut(),\n **check_outs_by_output_name,\n }\n\n op_graph = graph(\n name=out_asset_key.to_python_identifier(),\n description=description,\n config=config,\n ins={input_name: GraphIn() for _, (input_name, _) in asset_ins.items()},\n out=combined_outs_by_output_name,\n )(compose_fn)\n return AssetsDefinition.from_graph(\n op_graph,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name={"result": out_asset_key},\n partitions_def=partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n group_name=group_name,\n metadata_by_output_name={"result": metadata} if metadata else None,\n freshness_policies_by_output_name=(\n {"result": freshness_policy} if freshness_policy else None\n ),\n auto_materialize_policies_by_output_name=(\n {"result": auto_materialize_policy} if auto_materialize_policy else None\n ),\n backfill_policy=backfill_policy,\n descriptions_by_output_name={"result": description} if description else None,\n resource_defs=resource_defs,\n check_specs=check_specs,\n )\n\n\n
[docs]def graph_multi_asset(\n *,\n outs: Mapping[str, AssetOut],\n name: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n group_name: Optional[str] = None,\n can_subset: bool = False,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]:\n """Create a combined definition of multiple assets that are computed using the same graph of\n ops, and the same upstream assets.\n\n Each argument to the decorated function references an upstream asset that this asset depends on.\n The name of the argument designates the name of the upstream asset.\n\n Args:\n name (Optional[str]): The name of the graph.\n outs: (Optional[Dict[str, AssetOut]]): The AssetOuts representing the produced assets.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n backfill_policy (Optional[BackfillPolicy]): The backfill policy for the asset.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. This\n group name will be applied to all assets produced by this multi_asset.\n can_subset (bool): Whether this asset's computation can emit a subset of the asset\n keys based on the context.selected_assets argument. Defaults to False.\n """\n\n def inner(fn: Callable) -> AssetsDefinition:\n partition_mappings = {\n input_name: asset_in.partition_mapping\n for input_name, asset_in in (ins or {}).items()\n if asset_in.partition_mapping\n }\n\n asset_ins = build_asset_ins(fn, ins or {}, set())\n keys_by_input_name = {\n input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n }\n asset_outs = build_asset_outs(outs)\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, list(asset_outs.keys())\n )\n check_outs_by_output_name: Mapping[str, GraphOut] = {\n output_name: GraphOut() for output_name in check_specs_by_output_name.keys()\n }\n\n combined_outs_by_output_name = {\n **{output_name: GraphOut() for output_name, _ in asset_outs.values()},\n **check_outs_by_output_name,\n }\n\n op_graph = graph(\n name=name or fn.__name__,\n out=combined_outs_by_output_name,\n )(fn)\n\n # source metadata from the AssetOuts (if any)\n metadata_by_output_name = {\n output_name: out.metadata\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.metadata is not None\n }\n\n # source freshness policies from the AssetOuts (if any)\n freshness_policies_by_output_name = {\n output_name: out.freshness_policy\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.freshness_policy is not None\n }\n\n # source auto materialize policies from the AssetOuts (if any)\n auto_materialize_policies_by_output_name = {\n output_name: out.auto_materialize_policy\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.auto_materialize_policy is not None\n }\n\n # source descriptions from the AssetOuts (if any)\n descriptions_by_output_name = {\n output_name: out.description\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.description is not None\n }\n\n return AssetsDefinition.from_graph(\n op_graph,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name={\n output_name: asset_key for asset_key, (output_name, _) in asset_outs.items()\n },\n partitions_def=partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n group_name=group_name,\n can_subset=can_subset,\n metadata_by_output_name=metadata_by_output_name,\n freshness_policies_by_output_name=freshness_policies_by_output_name,\n auto_materialize_policies_by_output_name=auto_materialize_policies_by_output_name,\n backfill_policy=backfill_policy,\n descriptions_by_output_name=descriptions_by_output_name,\n resource_defs=resource_defs,\n check_specs=check_specs,\n )\n\n return inner
\n\n\ndef build_asset_outs(asset_outs: Mapping[str, AssetOut]) -> Mapping[AssetKey, Tuple[str, Out]]:\n """Creates a mapping from AssetKey to (name of output, Out object)."""\n outs_by_asset_key: Dict[AssetKey, Tuple[str, Out]] = {}\n for output_name, asset_out in asset_outs.items():\n out = asset_out.to_out()\n asset_key = asset_out.key or AssetKey(\n list(filter(None, [*(asset_out.key_prefix or []), output_name]))\n )\n\n outs_by_asset_key[asset_key] = (output_name.replace("-", "_"), out)\n\n return outs_by_asset_key\n\n\ndef _deps_and_non_argument_deps_to_asset_deps(\n deps: Optional[Iterable[CoercibleToAssetDep]],\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]],\n) -> Optional[Iterable[AssetDep]]:\n """Helper function for managing deps and non_argument_deps while non_argument_deps is still an accepted parameter.\n Ensures only one of deps and non_argument_deps is provided, then converts the deps to AssetDeps.\n """\n if non_argument_deps is not None and deps is not None:\n raise DagsterInvalidDefinitionError(\n "Cannot specify both deps and non_argument_deps to @asset. Use only deps instead."\n )\n\n if deps is not None:\n return _make_asset_deps(deps)\n\n if non_argument_deps is not None:\n check.set_param(non_argument_deps, "non_argument_deps", of_type=(AssetKey, str))\n return _make_asset_deps(non_argument_deps)\n\n\ndef _make_asset_deps(deps: Optional[Iterable[CoercibleToAssetDep]]) -> Optional[Iterable[AssetDep]]:\n if deps is None:\n return None\n\n # expand any multi_assets into a list of keys\n all_deps = []\n for dep in deps:\n if isinstance(dep, AssetsDefinition) and len(dep.keys) > 1:\n all_deps.extend(dep.keys)\n else:\n all_deps.append(dep)\n\n with disable_dagster_warnings():\n dep_dict = {}\n for dep in all_deps:\n asset_dep = AssetDep.from_coercible(dep)\n\n # we cannot do deduplication via a set because MultiPartitionMappings have an internal\n # dictionary that cannot be hashed. Instead deduplicate by making a dictionary and checking\n # for existing keys. If an asset is specified as a dependency more than once, only error if the\n # dependency is different (ie has a different PartitionMapping)\n if (\n asset_dep.asset_key in dep_dict.keys()\n and asset_dep != dep_dict[asset_dep.asset_key]\n ):\n raise DagsterInvariantViolationError(\n f"Cannot set a dependency on asset {asset_dep.asset_key} more than once per"\n " asset."\n )\n dep_dict[asset_dep.asset_key] = asset_dep\n\n return list(dep_dict.values())\n\n\ndef _validate_and_assign_output_names_to_check_specs(\n check_specs: Optional[Sequence[AssetCheckSpec]], valid_asset_keys: Sequence[AssetKey]\n) -> Mapping[str, AssetCheckSpec]:\n check_specs_by_output_name = {spec.get_python_identifier(): spec for spec in check_specs or []}\n if check_specs and len(check_specs_by_output_name) != len(check_specs):\n duplicates = {\n item: count\n for item, count in Counter(\n [(spec.asset_key, spec.name) for spec in check_specs]\n ).items()\n if count > 1\n }\n\n raise DagsterInvalidDefinitionError(f"Duplicate check specs: {duplicates}")\n\n for spec in check_specs_by_output_name.values():\n if spec.asset_key not in valid_asset_keys:\n raise DagsterInvalidDefinitionError(\n f"Invalid asset key {spec.asset_key} in check spec {spec.name}. Must be one of"\n f" {valid_asset_keys}"\n )\n\n return check_specs_by_output_name\n\n\ndef _get_partition_mappings_from_deps(\n partition_mappings: Dict[AssetKey, PartitionMapping], deps: Iterable[AssetDep], asset_name: str\n):\n # Add PartitionMappings specified via AssetDeps to partition_mappings dictionary. Error on duplicates\n for dep in deps:\n if dep.partition_mapping is None:\n continue\n if partition_mappings.get(dep.asset_key, None) is None:\n partition_mappings[dep.asset_key] = dep.partition_mapping\n continue\n if partition_mappings[dep.asset_key] == dep.partition_mapping:\n continue\n else:\n raise DagsterInvalidDefinitionError(\n f"Two different PartitionMappings for {dep.asset_key} provided for"\n f" asset {asset_name}. Please use the same PartitionMapping for"\n f" {dep.asset_key}."\n )\n\n return partition_mappings\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/asset_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.asset_decorator"}, "graph_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.graph_decorator

\nfrom functools import update_wrapper\nfrom typing import Any, Callable, Mapping, Optional, Sequence, Union, overload\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import format_docstring_for_description\n\nfrom ..config import ConfigMapping\nfrom ..graph_definition import GraphDefinition\nfrom ..input import GraphIn, InputDefinition\nfrom ..output import GraphOut, OutputDefinition\n\n\nclass _Graph:\n    name: Optional[str]\n    description: Optional[str]\n    input_defs: Sequence[InputDefinition]\n    output_defs: Optional[Sequence[OutputDefinition]]\n    ins: Optional[Mapping[str, GraphIn]]\n    out: Optional[Union[GraphOut, Mapping[str, GraphOut]]]\n    tags: Optional[Mapping[str, str]]\n    config_mapping: Optional[ConfigMapping]\n\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        input_defs: Optional[Sequence[InputDefinition]] = None,\n        output_defs: Optional[Sequence[OutputDefinition]] = None,\n        ins: Optional[Mapping[str, GraphIn]] = None,\n        out: Optional[Union[GraphOut, Mapping[str, GraphOut]]] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        config_mapping: Optional[ConfigMapping] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.description = check.opt_str_param(description, "description")\n        self.input_defs = check.opt_sequence_param(\n            input_defs, "input_defs", of_type=InputDefinition\n        )\n        self.did_pass_outputs = output_defs is not None or out is not None\n        self.output_defs = check.opt_nullable_sequence_param(\n            output_defs, "output_defs", of_type=OutputDefinition\n        )\n        self.ins = ins\n        self.out = out\n        self.tags = tags\n        self.config_mapping = check.opt_inst_param(config_mapping, "config_mapping", ConfigMapping)\n\n    def __call__(self, fn: Callable[..., Any]) -> GraphDefinition:\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        if self.ins is not None:\n            input_defs = [inp.to_definition(name) for name, inp in self.ins.items()]\n        else:\n            input_defs = check.opt_list_param(\n                self.input_defs, "input_defs", of_type=InputDefinition\n            )\n\n        if self.out is None:\n            output_defs = self.output_defs\n        elif isinstance(self.out, GraphOut):\n            output_defs = [self.out.to_definition(name=None)]\n        else:\n            check.dict_param(self.out, "out", key_type=str, value_type=GraphOut)\n            output_defs = [out.to_definition(name=name) for name, out in self.out.items()]\n\n        from dagster._core.definitions.composition import do_composition\n\n        (\n            input_mappings,\n            output_mappings,\n            dependencies,\n            node_defs,\n            config_mapping,\n            positional_inputs,\n            node_input_source_assets,\n        ) = do_composition(\n            decorator_name="@graph",\n            graph_name=self.name,\n            fn=fn,\n            provided_input_defs=input_defs,\n            provided_output_defs=output_defs,\n            ignore_output_from_composition_fn=False,\n            config_mapping=self.config_mapping,\n        )\n\n        graph_def = GraphDefinition(\n            name=self.name,\n            dependencies=dependencies,\n            node_defs=node_defs,\n            description=self.description or format_docstring_for_description(fn),\n            input_mappings=input_mappings,\n            output_mappings=output_mappings,\n            config=config_mapping,\n            positional_inputs=positional_inputs,\n            tags=self.tags,\n            node_input_source_assets=node_input_source_assets,\n        )\n        update_wrapper(graph_def, fn)\n        return graph_def\n\n\n@overload\ndef graph(compose_fn: Callable) -> GraphDefinition: ...\n\n\n@overload\ndef graph(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    input_defs: Optional[Sequence[InputDefinition]] = ...,\n    output_defs: Optional[Sequence[OutputDefinition]] = ...,\n    ins: Optional[Mapping[str, GraphIn]] = ...,\n    out: Optional[Union[GraphOut, Mapping[str, GraphOut]]] = ...,\n    tags: Optional[Mapping[str, Any]] = ...,\n    config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = ...,\n) -> _Graph: ...\n\n\n
[docs]def graph(\n compose_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n input_defs: Optional[Sequence[InputDefinition]] = None,\n output_defs: Optional[Sequence[OutputDefinition]] = None,\n ins: Optional[Mapping[str, GraphIn]] = None,\n out: Optional[Union[GraphOut, Mapping[str, GraphOut]]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,\n) -> Union[GraphDefinition, _Graph]:\n """Create an op graph with the specified parameters from the decorated composition function.\n\n Using this decorator allows you to build up a dependency graph by writing a\n function that invokes ops (or other graphs) and passes the output to subsequent invocations.\n\n Args:\n name (Optional[str]):\n The name of the op graph. Must be unique within any :py:class:`RepositoryDefinition` containing the graph.\n description (Optional[str]):\n A human-readable description of the graph.\n input_defs (Optional[List[InputDefinition]]):\n Information about the inputs that this graph maps. Information provided here\n will be combined with what can be inferred from the function signature, with these\n explicit InputDefinitions taking precedence.\n\n Uses of inputs in the body of the decorated composition function will determine\n the :py:class:`InputMappings <InputMapping>` passed to the underlying\n :py:class:`GraphDefinition`.\n output_defs (Optional[List[OutputDefinition]]):\n Output definitions for the graph. If not provided explicitly, these will be inferred from typehints.\n\n Uses of these outputs in the body of the decorated composition function, as well as the\n return value of the decorated function, will be used to infer the appropriate set of\n :py:class:`OutputMappings <OutputMapping>` for the underlying\n :py:class:`GraphDefinition`.\n\n To map multiple outputs, return a dictionary from the composition function.\n ins (Optional[Dict[str, GraphIn]]):\n Information about the inputs that this graph maps. Information provided here\n will be combined with what can be inferred from the function signature, with these\n explicit GraphIn taking precedence.\n out (Optional[Union[GraphOut, Dict[str, GraphOut]]]):\n Information about the outputs that this graph maps. Information provided here will be\n combined with what can be inferred from the return type signature if the function does\n not use yield.\n\n To map multiple outputs, return a dictionary from the composition function.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution run of the graph.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n\n config (Optional[Union[ConfigMapping], Mapping[str, Any]):\n Describes how the graph is configured at runtime.\n\n If a :py:class:`ConfigMapping` object is provided, then the graph takes on the config\n schema of this object. The mapping will be applied at runtime to generate the config for\n the graph's constituent nodes.\n\n If a dictionary is provided, then it will be used as the default run config for the\n graph. This means it must conform to the config schema of the underlying nodes. Note\n that the values provided will be viewable and editable in the Dagster UI, so be careful\n with secrets. its constituent nodes.\n\n If no value is provided, then the config schema for the graph is the default (derived\n from the underlying nodes).\n """\n if compose_fn is not None:\n check.invariant(description is None)\n return _Graph()(compose_fn)\n\n config_mapping = None\n # Case 1: a dictionary of config is provided, convert to config mapping.\n if config is not None and not isinstance(config, ConfigMapping):\n config = check.dict_param(config, "config", key_type=str)\n config_mapping = ConfigMapping(config_fn=lambda _: config, config_schema=None)\n # Case 2: actual config mapping is provided.\n else:\n config_mapping = config\n\n return _Graph(\n name=name,\n description=description,\n input_defs=input_defs,\n output_defs=output_defs,\n ins=ins,\n out=out,\n tags=tags,\n config_mapping=config_mapping,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/graph_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.graph_decorator"}, "hook_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.hook_decorator

\nfrom functools import update_wrapper\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n    overload,\n)\n\nimport dagster._check as check\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ...decorator_utils import get_function_params, validate_expected_params\nfrom ..events import HookExecutionResult\nfrom ..hook_definition import HookDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.hook import HookContext\n\n\ndef _validate_hook_fn_params(fn, expected_positionals):\n    params = get_function_params(fn)\n    missing_positional = validate_expected_params(params, expected_positionals)\n    if missing_positional:\n        raise DagsterInvalidDefinitionError(\n            f"'{fn.__name__}' decorated function does not have required positional "\n            f"parameter '{missing_positional}'. Hook functions should only have keyword arguments "\n            "that match input names and a first positional parameter named 'context' and "\n            "a second positional parameter named 'event_list'."\n        )\n\n\nclass _Hook:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        required_resource_keys: Optional[AbstractSet[str]] = None,\n        decorated_fn: Optional[Callable[..., Any]] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.required_resource_keys = check.opt_set_param(\n            required_resource_keys, "required_resource_keys"\n        )\n        self.decorated_fn = check.opt_callable_param(decorated_fn, "decorated_fn")\n\n    def __call__(self, fn) -> HookDefinition:\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        expected_positionals = ["context", "event_list"]\n\n        _validate_hook_fn_params(fn, expected_positionals)\n\n        hook_def = HookDefinition(\n            name=self.name or "",\n            hook_fn=fn,\n            required_resource_keys=self.required_resource_keys,\n            decorated_fn=self.decorated_fn or fn,\n        )\n        update_wrapper(cast(Callable[..., Any], hook_def), fn)\n        return hook_def\n\n\n@overload\ndef event_list_hook(\n    hook_fn: Callable,\n) -> HookDefinition:\n    pass\n\n\n@overload\ndef event_list_hook(\n    *,\n    name: Optional[str] = ...,\n    required_resource_keys: Optional[AbstractSet[str]] = ...,\n    decorated_fn: Optional[Callable[..., Any]] = ...,\n) -> _Hook:\n    pass\n\n\ndef event_list_hook(\n    hook_fn: Optional[Callable] = None,\n    *,\n    name: Optional[str] = None,\n    required_resource_keys: Optional[AbstractSet[str]] = None,\n    decorated_fn: Optional[Callable[..., Any]] = None,\n) -> Union[HookDefinition, _Hook]:\n    """Create a generic hook with the specified parameters from the decorated function.\n\n    This decorator is currently used internally by Dagster machinery to support success_hook and\n    failure_hook.\n\n    The user-defined hook function requires two parameters:\n    - A `context` object is passed as the first parameter. The context is an instance of\n        :py:class:`context <HookContext>`, and provides access to system\n        information, such as loggers (context.log), resources (context.resources), the op\n        (context.op) and its execution step (context.step) which triggers this hook.\n    - An `event_list` object is passed as the second paramter. It provides the full event list of the\n        associated execution step.\n\n    Args:\n        name (Optional[str]): The name of this hook.\n        required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n            hook.\n\n    Examples:\n        .. code-block:: python\n\n            @event_list_hook(required_resource_keys={'slack'})\n            def slack_on_materializations(context, event_list):\n                for event in event_list:\n                    if event.event_type == DagsterEventType.ASSET_MATERIALIZATION:\n                        message = f'{context.op_name} has materialized an asset {event.asset_key}.'\n                        # send a slack message every time a materialization event occurs\n                        context.resources.slack.send_message(message)\n\n\n    """\n    # This case is for when decorator is used bare, without arguments.\n    # e.g. @event_list_hook versus @event_list_hook()\n    if hook_fn is not None:\n        check.invariant(required_resource_keys is None)\n        return _Hook()(hook_fn)\n\n    return _Hook(\n        name=name, required_resource_keys=required_resource_keys, decorated_fn=decorated_fn\n    )\n\n\nSuccessOrFailureHookFn = Callable[["HookContext"], Any]\n\n\n@overload\ndef success_hook(hook_fn: SuccessOrFailureHookFn) -> HookDefinition: ...\n\n\n@overload\ndef success_hook(\n    *,\n    name: Optional[str] = ...,\n    required_resource_keys: Optional[AbstractSet[str]] = ...,\n) -> Callable[[SuccessOrFailureHookFn], HookDefinition]: ...\n\n\n
[docs]def success_hook(\n hook_fn: Optional[SuccessOrFailureHookFn] = None,\n *,\n name: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n) -> Union[HookDefinition, Callable[[SuccessOrFailureHookFn], HookDefinition]]:\n """Create a hook on step success events with the specified parameters from the decorated function.\n\n Args:\n name (Optional[str]): The name of this hook.\n required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n hook.\n\n Examples:\n .. code-block:: python\n\n @success_hook(required_resource_keys={'slack'})\n def slack_message_on_success(context):\n message = 'op {} succeeded'.format(context.op.name)\n context.resources.slack.send_message(message)\n\n @success_hook\n def do_something_on_success(context):\n do_something()\n\n\n """\n\n def wrapper(fn: SuccessOrFailureHookFn) -> HookDefinition:\n check.callable_param(fn, "fn")\n\n expected_positionals = ["context"]\n _validate_hook_fn_params(fn, expected_positionals)\n\n if name is None or callable(name):\n _name = fn.__name__\n else:\n _name = name\n\n @event_list_hook(name=_name, required_resource_keys=required_resource_keys, decorated_fn=fn)\n def _success_hook(\n context: "HookContext", event_list: Sequence["DagsterEvent"]\n ) -> HookExecutionResult:\n for event in event_list:\n if event.is_step_success:\n fn(context)\n return HookExecutionResult(hook_name=_name, is_skipped=False)\n\n # hook is skipped when fn didn't run\n return HookExecutionResult(hook_name=_name, is_skipped=True)\n\n return _success_hook\n\n # This case is for when decorator is used bare, without arguments, i.e. @success_hook\n if hook_fn is not None:\n check.invariant(required_resource_keys is None)\n return wrapper(hook_fn)\n\n return wrapper
\n\n\n@overload\ndef failure_hook(name: SuccessOrFailureHookFn) -> HookDefinition: ...\n\n\n@overload\ndef failure_hook(\n name: Optional[str] = ...,\n required_resource_keys: Optional[AbstractSet[str]] = ...,\n) -> Callable[[SuccessOrFailureHookFn], HookDefinition]: ...\n\n\n
[docs]def failure_hook(\n name: Optional[Union[SuccessOrFailureHookFn, str]] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n) -> Union[HookDefinition, Callable[[SuccessOrFailureHookFn], HookDefinition]]:\n """Create a hook on step failure events with the specified parameters from the decorated function.\n\n Args:\n name (Optional[str]): The name of this hook.\n required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n hook.\n\n Examples:\n .. code-block:: python\n\n @failure_hook(required_resource_keys={'slack'})\n def slack_message_on_failure(context):\n message = 'op {} failed'.format(context.op.name)\n context.resources.slack.send_message(message)\n\n @failure_hook\n def do_something_on_failure(context):\n do_something()\n\n\n """\n\n def wrapper(fn: Callable[["HookContext"], Any]) -> HookDefinition:\n check.callable_param(fn, "fn")\n\n expected_positionals = ["context"]\n _validate_hook_fn_params(fn, expected_positionals)\n\n if name is None or callable(name):\n _name = fn.__name__\n else:\n _name = name\n\n @event_list_hook(name=_name, required_resource_keys=required_resource_keys, decorated_fn=fn)\n def _failure_hook(\n context: "HookContext", event_list: Sequence["DagsterEvent"]\n ) -> HookExecutionResult:\n for event in event_list:\n if event.is_step_failure:\n fn(context)\n return HookExecutionResult(hook_name=_name, is_skipped=False)\n\n # hook is skipped when fn didn't run\n return HookExecutionResult(hook_name=_name, is_skipped=True)\n\n return _failure_hook\n\n # This case is for when decorator is used bare, without arguments, i.e. @failure_hook\n if callable(name):\n check.invariant(required_resource_keys is None)\n return wrapper(name)\n\n return wrapper
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/hook_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.hook_decorator"}, "job_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.job_decorator

\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Callable, Mapping, Optional, Union, overload\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.decorator_utils import format_docstring_for_description\n\nfrom ..config import ConfigMapping\nfrom ..graph_definition import GraphDefinition\nfrom ..hook_definition import HookDefinition\nfrom ..job_definition import JobDefinition\nfrom ..logger_definition import LoggerDefinition\nfrom ..metadata import RawMetadataValue\nfrom ..policy import RetryPolicy\nfrom ..resource_definition import ResourceDefinition\nfrom ..version_strategy import VersionStrategy\n\nif TYPE_CHECKING:\n    from ..executor_definition import ExecutorDefinition\n    from ..partition import PartitionedConfig, PartitionsDefinition\n    from ..run_config import RunConfig\n\n\nclass _Job:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n        resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n        config: Optional[\n            Union[ConfigMapping, Mapping[str, Any], "RunConfig", "PartitionedConfig"]\n        ] = None,\n        logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n        executor_def: Optional["ExecutorDefinition"] = None,\n        hooks: Optional[AbstractSet[HookDefinition]] = None,\n        op_retry_policy: Optional[RetryPolicy] = None,\n        version_strategy: Optional[VersionStrategy] = None,\n        partitions_def: Optional["PartitionsDefinition"] = None,\n        input_values: Optional[Mapping[str, object]] = None,\n    ):\n        from dagster._core.definitions.run_config import convert_config_input\n\n        self.name = name\n        self.description = description\n        self.tags = tags\n        self.metadata = metadata\n        self.resource_defs = resource_defs\n        self.config = convert_config_input(config)\n        self.logger_defs = logger_defs\n        self.executor_def = executor_def\n        self.hooks = hooks\n        self.op_retry_policy = op_retry_policy\n        self.version_strategy = version_strategy\n        self.partitions_def = partitions_def\n        self.input_values = input_values\n\n    def __call__(self, fn: Callable[..., Any]) -> JobDefinition:\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        from dagster._core.definitions.composition import do_composition\n\n        (\n            input_mappings,\n            output_mappings,\n            dependencies,\n            node_defs,\n            config_mapping,\n            positional_inputs,\n            node_input_source_assets,\n        ) = do_composition(\n            decorator_name="@job",\n            graph_name=self.name,\n            fn=fn,\n            provided_input_defs=[],\n            provided_output_defs=[],\n            ignore_output_from_composition_fn=False,\n            config_mapping=None,\n        )\n\n        graph_def = GraphDefinition(\n            name=self.name,\n            dependencies=dependencies,\n            node_defs=node_defs,\n            description=self.description or format_docstring_for_description(fn),\n            input_mappings=input_mappings,\n            output_mappings=output_mappings,\n            config=config_mapping,\n            positional_inputs=positional_inputs,\n            tags=self.tags,\n            node_input_source_assets=node_input_source_assets,\n        )\n\n        job_def = graph_def.to_job(\n            description=self.description or format_docstring_for_description(fn),\n            resource_defs=self.resource_defs,\n            config=self.config,\n            tags=self.tags,\n            metadata=self.metadata,\n            logger_defs=self.logger_defs,\n            executor_def=self.executor_def,\n            hooks=self.hooks,\n            op_retry_policy=self.op_retry_policy,\n            version_strategy=self.version_strategy,\n            partitions_def=self.partitions_def,\n            input_values=self.input_values,\n        )\n        update_wrapper(job_def, fn)\n        return job_def\n\n\n@overload\ndef job(compose_fn: Callable[..., Any]) -> JobDefinition: ...\n\n\n@overload\ndef job(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    resource_defs: Optional[Mapping[str, object]] = ...,\n    config: Union[ConfigMapping, Mapping[str, Any], "RunConfig", "PartitionedConfig"] = ...,\n    tags: Optional[Mapping[str, Any]] = ...,\n    metadata: Optional[Mapping[str, RawMetadataValue]] = ...,\n    logger_defs: Optional[Mapping[str, LoggerDefinition]] = ...,\n    executor_def: Optional["ExecutorDefinition"] = ...,\n    hooks: Optional[AbstractSet[HookDefinition]] = ...,\n    op_retry_policy: Optional[RetryPolicy] = ...,\n    version_strategy: Optional[VersionStrategy] = ...,\n    partitions_def: Optional["PartitionsDefinition"] = ...,\n    input_values: Optional[Mapping[str, object]] = ...,\n) -> _Job: ...\n\n\n
[docs]@deprecated_param(\n param="version_strategy",\n breaking_version="2.0",\n additional_warn_text="Use asset versioning instead.",\n)\ndef job(\n compose_fn: Optional[Callable[..., Any]] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n config: Optional[\n Union[ConfigMapping, Mapping[str, Any], "RunConfig", "PartitionedConfig"]\n ] = None,\n tags: Optional[Mapping[str, Any]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n executor_def: Optional["ExecutorDefinition"] = None,\n hooks: Optional[AbstractSet[HookDefinition]] = None,\n op_retry_policy: Optional[RetryPolicy] = None,\n version_strategy: Optional[VersionStrategy] = None,\n partitions_def: Optional["PartitionsDefinition"] = None,\n input_values: Optional[Mapping[str, object]] = None,\n) -> Union[JobDefinition, _Job]:\n """Creates a job with the specified parameters from the decorated graph/op invocation function.\n\n Using this decorator allows you to build an executable job by writing a function that invokes\n ops (or graphs).\n\n Args:\n compose_fn (Callable[..., Any]:\n The decorated function. The body should contain op or graph invocations. Unlike op\n functions, does not accept a context argument.\n name (Optional[str]):\n The name for the Job. Defaults to the name of the this graph.\n resource_defs (Optional[Mapping[str, object]]):\n Resources that are required by this graph for execution.\n If not defined, `io_manager` will default to filesystem.\n config:\n Describes how the job is parameterized at runtime.\n\n If no value is provided, then the schema for the job's run config is a standard\n format based on its ops and resources.\n\n If a dictionary is provided, then it must conform to the standard config schema, and\n it will be used as the job's run config for the job whenever the job is executed.\n The values provided will be viewable and editable in the Dagster UI, so be\n careful with secrets.\n\n If a :py:class:`RunConfig` object is provided, then it will be used directly as the run config\n for the job whenever the job is executed, similar to providing a dictionary.\n\n If a :py:class:`ConfigMapping` object is provided, then the schema for the job's run config is\n determined by the config mapping, and the ConfigMapping, which should return\n configuration in the standard format to configure the job.\n\n If a :py:class:`PartitionedConfig` object is provided, then it defines a discrete set of config\n values that can parameterize the job, as well as a function for mapping those\n values to the base config. The values provided will be viewable and editable in the\n Dagster UI, so be careful with secrets.\n tags (Optional[Dict[str, Any]]):\n Arbitrary information that will be attached to the execution of the Job.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary information that will be attached to the JobDefinition and be viewable in the Dagster UI.\n Keys must be strings, and values must be python primitive types or one of the provided\n MetadataValue types\n logger_defs (Optional[Dict[str, LoggerDefinition]]):\n A dictionary of string logger identifiers to their implementations.\n executor_def (Optional[ExecutorDefinition]):\n How this Job will be executed. Defaults to :py:class:`multiprocess_executor` .\n op_retry_policy (Optional[RetryPolicy]): The default retry policy for all ops in this job.\n Only used if retry policy is not defined on the op definition or op invocation.\n version_strategy (Optional[VersionStrategy]):\n Defines how each op (and optionally, resource) in the job can be versioned. If\n provided, memoization will be enabled for this job.\n partitions_def (Optional[PartitionsDefinition]): Defines a discrete set of partition keys\n that can parameterize the job. If this argument is supplied, the config argument\n can't also be supplied.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of a job.\n\n Examples:\n .. code-block:: python\n\n @op\n def return_one():\n return 1\n\n @op\n def add_one(in1):\n return in1 + 1\n\n @job\n def job1():\n add_one(return_one())\n """\n if compose_fn is not None:\n check.invariant(description is None)\n return _Job()(compose_fn)\n\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return _Job(\n name=name,\n description=description,\n resource_defs=wrap_resources_for_execution(resource_defs),\n config=config,\n tags=tags,\n metadata=metadata,\n logger_defs=logger_defs,\n executor_def=executor_def,\n hooks=hooks,\n op_retry_policy=op_retry_policy,\n version_strategy=version_strategy,\n partitions_def=partitions_def,\n input_values=input_values,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/job_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.job_decorator"}, "op_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.op_decorator

\nfrom functools import lru_cache, update_wrapper\nfrom inspect import Parameter\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n    overload,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.decorator_utils import (\n    format_docstring_for_description,\n    get_function_params,\n    get_valid_name_permutations,\n    param_is_var_keyword,\n    positional_arg_name_list,\n)\nfrom dagster._core.definitions.inference import infer_input_props\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.types.dagster_type import DagsterTypeKind\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom ..input import In, InputDefinition\nfrom ..output import Out\nfrom ..policy import RetryPolicy\nfrom ..utils import DEFAULT_OUTPUT\n\nif TYPE_CHECKING:\n    from ..op_definition import OpDefinition\n\n\nclass _Op:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        required_resource_keys: Optional[AbstractSet[str]] = None,\n        config_schema: Optional[Union[Any, Mapping[str, Any]]] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        code_version: Optional[str] = None,\n        decorator_takes_context: Optional[bool] = True,\n        retry_policy: Optional[RetryPolicy] = None,\n        ins: Optional[Mapping[str, In]] = None,\n        out: Optional[Union[Out, Mapping[str, Out]]] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.decorator_takes_context = check.bool_param(\n            decorator_takes_context, "decorator_takes_context"\n        )\n\n        self.description = check.opt_str_param(description, "description")\n\n        # these will be checked within OpDefinition\n        self.required_resource_keys = required_resource_keys\n        self.tags = tags\n        self.code_version = code_version\n        self.retry_policy = retry_policy\n\n        # config will be checked within OpDefinition\n        self.config_schema = config_schema\n\n        self.ins = check.opt_nullable_mapping_param(ins, "ins", key_type=str, value_type=In)\n        self.out = out\n\n    def __call__(self, fn: Callable[..., Any]) -> "OpDefinition":\n        from dagster._config.pythonic_config import validate_resource_annotated_function\n\n        from ..op_definition import OpDefinition\n\n        validate_resource_annotated_function(fn)\n\n        if not self.name:\n            self.name = fn.__name__\n\n        compute_fn = (\n            DecoratedOpFunction(decorated_fn=fn)\n            if self.decorator_takes_context\n            else NoContextDecoratedOpFunction(decorated_fn=fn)\n        )\n\n        if compute_fn.has_config_arg():\n            check.param_invariant(\n                self.config_schema is None or self.config_schema == {},\n                "If the @op has a config arg, you cannot specify a config schema",\n            )\n\n            from dagster._config.pythonic_config import infer_schema_from_config_annotation\n\n            # Parse schema from the type annotation of the config arg\n            config_arg = compute_fn.get_config_arg()\n            config_arg_type = config_arg.annotation\n            config_arg_default = config_arg.default\n            self.config_schema = infer_schema_from_config_annotation(\n                config_arg_type, config_arg_default\n            )\n\n        outs: Optional[Mapping[str, Out]] = None\n        if self.out is not None and isinstance(self.out, Out):\n            outs = {DEFAULT_OUTPUT: self.out}\n        elif self.out is not None:\n            outs = check.mapping_param(self.out, "out", key_type=str, value_type=Out)\n\n        arg_resource_keys = {arg.name for arg in compute_fn.get_resource_args()}\n        decorator_resource_keys = set(self.required_resource_keys or [])\n        check.param_invariant(\n            len(decorator_resource_keys) == 0 or len(arg_resource_keys) == 0,\n            "Cannot specify resource requirements in both @op decorator and as arguments to the"\n            " decorated function",\n        )\n        resolved_resource_keys = decorator_resource_keys.union(arg_resource_keys)\n\n        op_def = OpDefinition.dagster_internal_init(\n            name=self.name,\n            ins=self.ins,\n            outs=outs,\n            compute_fn=compute_fn,\n            config_schema=self.config_schema,\n            description=self.description or format_docstring_for_description(fn),\n            required_resource_keys=resolved_resource_keys,\n            tags=self.tags,\n            code_version=self.code_version,\n            retry_policy=self.retry_policy,\n            version=None,  # code_version has replaced version\n        )\n        update_wrapper(op_def, compute_fn.decorated_fn)\n        return op_def\n\n\n@overload\ndef op(compute_fn: Callable[..., Any]) -> "OpDefinition": ...\n\n\n@overload\ndef op(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    ins: Optional[Mapping[str, In]] = ...,\n    out: Optional[Union[Out, Mapping[str, Out]]] = ...,\n    config_schema: Optional[UserConfigSchema] = ...,\n    required_resource_keys: Optional[AbstractSet[str]] = ...,\n    tags: Optional[Mapping[str, Any]] = ...,\n    version: Optional[str] = ...,\n    retry_policy: Optional[RetryPolicy] = ...,\n    code_version: Optional[str] = ...,\n) -> _Op: ...\n\n\n
[docs]@deprecated_param(\n param="version", breaking_version="2.0", additional_warn_text="Use `code_version` instead"\n)\ndef op(\n compute_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n ins: Optional[Mapping[str, In]] = None,\n out: Optional[Union[Out, Mapping[str, Out]]] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n version: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n) -> Union["OpDefinition", _Op]:\n """Create an op with the specified parameters from the decorated function.\n\n Ins and outs will be inferred from the type signature of the decorated function\n if not explicitly provided.\n\n The decorated function will be used as the op's compute function. The signature of the\n decorated function is more flexible than that of the ``compute_fn`` in the core API; it may:\n\n 1. Return a value. This value will be wrapped in an :py:class:`Output` and yielded by the compute function.\n 2. Return an :py:class:`Output`. This output will be yielded by the compute function.\n 3. Yield :py:class:`Output` or other :ref:`event objects <events>`. Same as default compute behavior.\n\n Note that options 1) and 2) are incompatible with yielding other events -- if you would like\n to decorate a function that yields events, it must also wrap its eventual output in an\n :py:class:`Output` and yield it.\n\n @op supports ``async def`` functions as well, including async generators when yielding multiple\n events or outputs. Note that async ops will generally be run on their own unless using a custom\n :py:class:`Executor` implementation that supports running them together.\n\n Args:\n name (Optional[str]): Name of op. Must be unique within any :py:class:`GraphDefinition`\n using the op.\n description (Optional[str]): Human-readable description of this op. If not provided, and\n the decorated function has docstring, that docstring will be used as the description.\n ins (Optional[Dict[str, In]]):\n Information about the inputs to the op. Information provided here will be combined\n with what can be inferred from the function signature.\n out (Optional[Union[Out, Dict[str, Out]]]):\n Information about the op outputs. Information provided here will be combined with\n what can be inferred from the return type signature if the function does not use yield.\n config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check\n that config provided for the op matches this schema and fail if it does not. If not\n set, Dagster will accept any config provided for the op.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may\n expect and require certain metadata to be attached to a op. Values that are not strings\n will be json encoded and must meet the criteria that `json.loads(json.dumps(value)) == value`.\n code_version (Optional[str]): (Experimental) Version of the logic encapsulated by the op. If set,\n this is used as a default version for all outputs.\n retry_policy (Optional[RetryPolicy]): The retry policy for this op.\n\n Examples:\n .. code-block:: python\n\n @op\n def hello_world():\n print('hello')\n\n @op\n def echo(msg: str) -> str:\n return msg\n\n @op(\n ins={'msg': In(str)},\n out=Out(str)\n )\n def echo_2(msg): # same as above\n return msg\n\n @op(\n out={'word': Out(), 'num': Out()}\n )\n def multi_out() -> Tuple[str, int]:\n return 'cool', 4\n """\n code_version = normalize_renamed_param(\n code_version,\n "code_version",\n version,\n "version",\n )\n\n if compute_fn is not None:\n check.invariant(description is None)\n check.invariant(config_schema is None)\n check.invariant(required_resource_keys is None)\n check.invariant(tags is None)\n check.invariant(version is None)\n\n return _Op()(compute_fn)\n\n return _Op(\n name=name,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n tags=tags,\n code_version=code_version,\n retry_policy=retry_policy,\n ins=ins,\n out=out,\n )
\n\n\nclass DecoratedOpFunction(NamedTuple):\n """Wrapper around the decorated op function to provide commonly used util methods."""\n\n decorated_fn: Callable[..., Any]\n\n @property\n def name(self):\n return self.decorated_fn.__name__\n\n @lru_cache(maxsize=1)\n def has_context_arg(self) -> bool:\n return is_context_provided(get_function_params(self.decorated_fn))\n\n def get_context_arg(self) -> Parameter:\n if self.has_context_arg():\n return get_function_params(self.decorated_fn)[0]\n check.failed("Requested context arg on function that does not have one")\n\n @lru_cache(maxsize=1)\n def _get_function_params(self) -> Sequence[Parameter]:\n return get_function_params(self.decorated_fn)\n\n def has_config_arg(self) -> bool:\n for param in get_function_params(self.decorated_fn):\n if param.name == "config":\n return True\n\n return False\n\n def get_config_arg(self) -> Parameter:\n for param in get_function_params(self.decorated_fn):\n if param.name == "config":\n return param\n\n check.failed("Requested config arg on function that does not have one")\n\n def get_resource_args(self) -> Sequence[Parameter]:\n return get_resource_args(self.decorated_fn)\n\n def positional_inputs(self) -> Sequence[str]:\n params = self._get_function_params()\n input_args = params[1:] if self.has_context_arg() else params\n resource_arg_names = [arg.name for arg in self.get_resource_args()]\n input_args_filtered = [\n input_arg\n for input_arg in input_args\n if input_arg.name != "config" and input_arg.name not in resource_arg_names\n ]\n return positional_arg_name_list(input_args_filtered)\n\n def has_var_kwargs(self) -> bool:\n params = self._get_function_params()\n # var keyword arg has to be the last argument\n return len(params) > 0 and param_is_var_keyword(params[-1])\n\n def get_output_annotation(self) -> Any:\n from ..inference import infer_output_props\n\n return infer_output_props(self.decorated_fn).annotation\n\n\nclass NoContextDecoratedOpFunction(DecoratedOpFunction):\n """Wrapper around a decorated op function, when the decorator does not permit a context\n parameter.\n """\n\n @lru_cache(maxsize=1)\n def has_context_arg(self) -> bool:\n return False\n\n\ndef is_context_provided(params: Sequence[Parameter]) -> bool:\n if len(params) == 0:\n return False\n return params[0].name in get_valid_name_permutations("context")\n\n\ndef resolve_checked_op_fn_inputs(\n decorator_name: str,\n fn_name: str,\n compute_fn: DecoratedOpFunction,\n explicit_input_defs: Sequence[InputDefinition],\n exclude_nothing: bool,\n) -> Sequence[InputDefinition]:\n """Validate provided input definitions and infer the remaining from the type signature of the compute_fn.\n Returns the resolved set of InputDefinitions.\n\n Args:\n decorator_name (str): Name of the decorator that is wrapping the op function.\n fn_name (str): Name of the decorated function.\n compute_fn (DecoratedOpFunction): The decorated function, wrapped in the\n DecoratedOpFunction wrapper.\n explicit_input_defs (List[InputDefinition]): The input definitions that were explicitly\n provided in the decorator.\n exclude_nothing (bool): True if Nothing type inputs should be excluded from compute_fn\n arguments.\n """\n explicit_names = set()\n if exclude_nothing:\n explicit_names = set(\n inp.name\n for inp in explicit_input_defs\n if not inp.dagster_type.kind == DagsterTypeKind.NOTHING\n )\n nothing_names = set(\n inp.name\n for inp in explicit_input_defs\n if inp.dagster_type.kind == DagsterTypeKind.NOTHING\n )\n else:\n explicit_names = set(inp.name for inp in explicit_input_defs)\n nothing_names = set()\n\n params = get_function_params(compute_fn.decorated_fn)\n\n input_args = params[1:] if compute_fn.has_context_arg() else params\n\n # filter out config arg\n resource_arg_names = {arg.name for arg in compute_fn.get_resource_args()}\n explicit_names = explicit_names - resource_arg_names\n\n if compute_fn.has_config_arg() or resource_arg_names:\n new_input_args = []\n for input_arg in input_args:\n if input_arg.name != "config" and input_arg.name not in resource_arg_names:\n new_input_args.append(input_arg)\n input_args = new_input_args\n\n # Validate input arguments\n used_inputs = set()\n inputs_to_infer = set()\n has_kwargs = False\n\n for param in cast(List[Parameter], input_args):\n if param.kind == Parameter.VAR_KEYWORD:\n has_kwargs = True\n elif param.kind == Parameter.VAR_POSITIONAL:\n raise DagsterInvalidDefinitionError(\n f"{decorator_name} '{fn_name}' decorated function has positional vararg parameter "\n f"'{param}'. {decorator_name} decorated functions should only have keyword "\n "arguments that match input names and, if system information is required, a first "\n "positional parameter named 'context'."\n )\n\n else:\n if param.name not in explicit_names:\n if param.name in nothing_names:\n raise DagsterInvalidDefinitionError(\n f"{decorator_name} '{fn_name}' decorated function has parameter"\n f" '{param.name}' that is one of the input_defs of type 'Nothing' which"\n " should not be included since no data will be passed for it. "\n )\n else:\n inputs_to_infer.add(param.name)\n\n else:\n used_inputs.add(param.name)\n\n undeclared_inputs = explicit_names - used_inputs\n if not has_kwargs and undeclared_inputs:\n undeclared_inputs_printed = ", '".join(undeclared_inputs)\n raise DagsterInvalidDefinitionError(\n f"{decorator_name} '{fn_name}' decorated function does not have argument(s)"\n f" '{undeclared_inputs_printed}'. {decorator_name}-decorated functions should have a"\n " keyword argument for each of their Ins, except for Ins that have the Nothing"\n " dagster_type. Alternatively, they can accept **kwargs."\n )\n\n inferred_props = {\n inferred.name: inferred\n for inferred in infer_input_props(compute_fn.decorated_fn, compute_fn.has_context_arg())\n }\n input_defs = []\n for input_def in explicit_input_defs:\n if input_def.name in inferred_props:\n # combine any information missing on the explicit def that can be inferred\n input_defs.append(input_def.combine_with_inferred(inferred_props[input_def.name]))\n else:\n # pass through those that don't have any inference info, such as Nothing type inputs\n input_defs.append(input_def)\n\n # build defs from the inferred props for those without explicit entries\n inferred_input_defs = [\n InputDefinition.create_from_inferred(inferred)\n for inferred in inferred_props.values()\n if inferred.name in inputs_to_infer\n ]\n\n if exclude_nothing:\n for in_def in inferred_input_defs:\n if in_def.dagster_type.is_nothing:\n raise DagsterInvalidDefinitionError(\n f"Input parameter {in_def.name} is annotated with"\n f" {in_def.dagster_type.display_name} which is a type that represents passing"\n " no data. This type must be used via In() and no parameter should be included"\n f" in the {decorator_name} decorated function."\n )\n\n input_defs.extend(inferred_input_defs)\n\n return input_defs\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/op_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.op_decorator"}, "repository_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.repository_decorator

\nfrom functools import update_wrapper\nfrom typing import (\n    Callable,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    TypeVar,\n    Union,\n    overload,\n)\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.metadata import (\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ..asset_checks import AssetChecksDefinition\nfrom ..executor_definition import ExecutorDefinition\nfrom ..graph_definition import GraphDefinition\nfrom ..job_definition import JobDefinition\nfrom ..logger_definition import LoggerDefinition\nfrom ..partitioned_schedule import UnresolvedPartitionedAssetScheduleDefinition\nfrom ..repository_definition import (\n    VALID_REPOSITORY_DATA_DICT_KEYS,\n    CachingRepositoryData,\n    PendingRepositoryDefinition,\n    PendingRepositoryListDefinition,\n    RepositoryData,\n    RepositoryDefinition,\n    RepositoryListDefinition,\n)\nfrom ..schedule_definition import ScheduleDefinition\nfrom ..sensor_definition import SensorDefinition\nfrom ..unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\nT = TypeVar("T")\n\nRepositoryDictSpec: TypeAlias = Dict[str, Dict[str, RepositoryListDefinition]]\n\n\ndef _flatten(items: Iterable[Union[T, List[T]]]) -> Iterator[T]:\n    for x in items:\n        if isinstance(x, List):\n            # switch to `yield from _flatten(x)` to support multiple layers of nesting\n            yield from x\n        else:\n            yield x\n\n\nclass _Repository:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        metadata: Optional[Dict[str, RawMetadataValue]] = None,\n        default_executor_def: Optional[ExecutorDefinition] = None,\n        default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n        top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n        resource_key_mapping: Optional[Mapping[int, str]] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.description = check.opt_str_param(description, "description")\n        self.metadata = normalize_metadata(\n            check.opt_mapping_param(metadata, "metadata", key_type=str)\n        )\n        self.default_executor_def = check.opt_inst_param(\n            default_executor_def, "default_executor_def", ExecutorDefinition\n        )\n        self.default_logger_defs = check.opt_mapping_param(\n            default_logger_defs, "default_logger_defs", key_type=str, value_type=LoggerDefinition\n        )\n        self.top_level_resources = check.opt_mapping_param(\n            top_level_resources, "top_level_resources", key_type=str, value_type=ResourceDefinition\n        )\n        self.resource_key_mapping = check.opt_mapping_param(\n            resource_key_mapping, "resource_key_mapping", key_type=int, value_type=str\n        )\n\n    @overload\n    def __call__(\n        self,\n        fn: Union[\n            Callable[[], Sequence[RepositoryListDefinition]],\n            Callable[[], RepositoryDictSpec],\n        ],\n    ) -> RepositoryDefinition: ...\n\n    @overload\n    def __call__(\n        self, fn: Callable[[], Sequence[PendingRepositoryListDefinition]]\n    ) -> PendingRepositoryDefinition: ...\n\n    def __call__(\n        self,\n        fn: Union[\n            Callable[[], Sequence[PendingRepositoryListDefinition]],\n            Callable[[], RepositoryDictSpec],\n        ],\n    ) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:\n        from dagster._core.definitions import AssetsDefinition, SourceAsset\n        from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        repository_definitions = fn()\n\n        repository_data: Optional[Union[CachingRepositoryData, RepositoryData]]\n        if isinstance(repository_definitions, list):\n            bad_defns = []\n            repository_defns = []\n            defer_repository_data = False\n            for i, definition in enumerate(_flatten(repository_definitions)):\n                if isinstance(definition, CacheableAssetsDefinition):\n                    defer_repository_data = True\n                elif not isinstance(\n                    definition,\n                    (\n                        JobDefinition,\n                        ScheduleDefinition,\n                        UnresolvedPartitionedAssetScheduleDefinition,\n                        SensorDefinition,\n                        GraphDefinition,\n                        AssetsDefinition,\n                        SourceAsset,\n                        UnresolvedAssetJobDefinition,\n                        AssetChecksDefinition,\n                    ),\n                ):\n                    bad_defns.append((i, type(definition)))\n                else:\n                    repository_defns.append(definition)\n\n            if bad_defns:\n                bad_definitions_str = ", ".join(\n                    [f"value of type {type_} at index {i}" for i, type_ in bad_defns]\n                )\n                raise DagsterInvalidDefinitionError(\n                    "Bad return value from repository construction function: all elements of list "\n                    "must be of type JobDefinition, GraphDefinition, "\n                    "ScheduleDefinition, SensorDefinition, "\n                    "AssetsDefinition, SourceAsset, or AssetChecksDefinition."\n                    f"Got {bad_definitions_str}."\n                )\n\n            repository_data = (\n                None\n                if defer_repository_data\n                else CachingRepositoryData.from_list(\n                    repository_defns,\n                    default_executor_def=self.default_executor_def,\n                    default_logger_defs=self.default_logger_defs,\n                    top_level_resources=self.top_level_resources,\n                    resource_key_mapping=self.resource_key_mapping,\n                )\n            )\n\n        elif isinstance(repository_definitions, dict):\n            if not set(repository_definitions.keys()).issubset(VALID_REPOSITORY_DATA_DICT_KEYS):\n                raise DagsterInvalidDefinitionError(\n                    "Bad return value from repository construction function: dict must not contain "\n                    "keys other than {{'schedules', 'sensors', 'jobs'}}: found "\n                    "{bad_keys}".format(\n                        bad_keys=", ".join(\n                            [\n                                f"'{key}'"\n                                for key in repository_definitions.keys()\n                                if key not in VALID_REPOSITORY_DATA_DICT_KEYS\n                            ]\n                        )\n                    )\n                )\n            repository_data = CachingRepositoryData.from_dict(repository_definitions)\n        elif isinstance(repository_definitions, RepositoryData):\n            repository_data = repository_definitions\n        else:\n            raise DagsterInvalidDefinitionError(\n                "Bad return value of type {type_} from repository construction function: must "\n                "return list, dict, or RepositoryData. See the @repository decorator docstring for "\n                "details and examples".format(type_=type(repository_definitions)),\n            )\n\n        if isinstance(repository_definitions, list) and repository_data is None:\n            return PendingRepositoryDefinition(\n                self.name,\n                repository_definitions=list(_flatten(repository_definitions)),\n                description=self.description,\n                metadata=self.metadata,\n                default_executor_def=self.default_executor_def,\n                default_logger_defs=self.default_logger_defs,\n                _top_level_resources=self.top_level_resources,\n            )\n        else:\n            repository_def = RepositoryDefinition(\n                name=self.name,\n                description=self.description,\n                metadata=self.metadata,\n                repository_data=repository_data,\n            )\n\n            update_wrapper(repository_def, fn)\n            return repository_def\n\n\n@overload\ndef repository(\n    definitions_fn: Union[\n        Callable[[], Sequence[RepositoryListDefinition]], Callable[[], RepositoryDictSpec]\n    ],\n) -> RepositoryDefinition: ...\n\n\n@overload\ndef repository(\n    definitions_fn: Callable[..., Sequence[PendingRepositoryListDefinition]]\n) -> PendingRepositoryDefinition: ...\n\n\n@overload\ndef repository(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    metadata: Optional[Dict[str, RawMetadataValue]] = ...,\n    default_executor_def: Optional[ExecutorDefinition] = ...,\n    default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = ...,\n    _top_level_resources: Optional[Mapping[str, ResourceDefinition]] = ...,\n    _resource_key_mapping: Optional[Mapping[int, str]] = ...,\n) -> _Repository: ...\n\n\n
[docs]def repository(\n definitions_fn: Optional[\n Union[\n Callable[[], Sequence[PendingRepositoryListDefinition]],\n Callable[[], RepositoryDictSpec],\n ]\n ] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n metadata: Optional[Dict[str, RawMetadataValue]] = None,\n default_executor_def: Optional[ExecutorDefinition] = None,\n default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n _top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n _resource_key_mapping: Optional[Mapping[int, str]] = None,\n) -> Union[RepositoryDefinition, PendingRepositoryDefinition, _Repository]:\n """Create a repository from the decorated function.\n\n The decorated function should take no arguments and its return value should one of:\n\n 1. ``List[Union[JobDefinition, ScheduleDefinition, SensorDefinition]]``.\n Use this form when you have no need to lazy load jobs or other definitions. This is the\n typical use case.\n\n 2. A dict of the form:\n\n .. code-block:: python\n\n {\n 'jobs': Dict[str, Callable[[], JobDefinition]],\n 'schedules': Dict[str, Callable[[], ScheduleDefinition]]\n 'sensors': Dict[str, Callable[[], SensorDefinition]]\n }\n\n This form is intended to allow definitions to be created lazily when accessed by name,\n which can be helpful for performance when there are many definitions in a repository, or\n when constructing the definitions is costly.\n\n 3. A :py:class:`RepositoryData`. Return this object if you need fine-grained\n control over the construction and indexing of definitions within the repository, e.g., to\n create definitions dynamically from .yaml files in a directory.\n\n Args:\n name (Optional[str]): The name of the repository. Defaults to the name of the decorated\n function.\n description (Optional[str]): A string description of the repository.\n metadata (Optional[Dict[str, RawMetadataValue]]): Arbitrary metadata for the repository.\n top_level_resources (Optional[Mapping[str, ResourceDefinition]]): A dict of top-level\n resource keys to defintions, for resources which should be displayed in the UI.\n\n Example:\n .. code-block:: python\n\n ######################################################################\n # A simple repository using the first form of the decorated function\n ######################################################################\n\n @op(config_schema={n: Field(Int)})\n def return_n(context):\n return context.op_config['n']\n\n @job\n def simple_job():\n return_n()\n\n @job\n def some_job():\n ...\n\n @sensor(job=some_job)\n def some_sensor():\n if foo():\n yield RunRequest(\n run_key= ...,\n run_config={\n 'ops': {'return_n': {'config': {'n': bar()}}}\n }\n )\n\n @job\n def my_job():\n ...\n\n my_schedule = ScheduleDefinition(cron_schedule="0 0 * * *", job=my_job)\n\n @repository\n def simple_repository():\n return [simple_job, some_sensor, my_schedule]\n\n ######################################################################\n # A simple repository using the first form of the decorated function\n # and custom metadata that will be displayed in the UI\n ######################################################################\n\n ...\n\n @repository(\n name='my_repo',\n metadata={\n 'team': 'Team A',\n 'repository_version': '1.2.3',\n 'environment': 'production',\n })\n def simple_repository():\n return [simple_job, some_sensor, my_schedule]\n\n ######################################################################\n # A lazy-loaded repository\n ######################################################################\n\n def make_expensive_job():\n @job\n def expensive_job():\n for i in range(10000):\n return_n.alias(f'return_n_{i}')()\n\n return expensive_job\n\n def make_expensive_schedule():\n @job\n def other_expensive_job():\n for i in range(11000):\n return_n.alias(f'my_return_n_{i}')()\n\n return ScheduleDefinition(cron_schedule="0 0 * * *", job=other_expensive_job)\n\n @repository\n def lazy_loaded_repository():\n return {\n 'jobs': {'expensive_job': make_expensive_job},\n 'schedules': {'expensive_schedule': make_expensive_schedule}\n }\n\n\n ######################################################################\n # A complex repository that lazily constructs jobs from a directory\n # of files in a bespoke YAML format\n ######################################################################\n\n class ComplexRepositoryData(RepositoryData):\n def __init__(self, yaml_directory):\n self._yaml_directory = yaml_directory\n\n def get_all_jobs(self):\n return [\n self._construct_job_def_from_yaml_file(\n self._yaml_file_for_job_name(file_name)\n )\n for file_name in os.listdir(self._yaml_directory)\n ]\n\n ...\n\n @repository\n def complex_repository():\n return ComplexRepositoryData('some_directory')\n """\n if definitions_fn is not None:\n check.invariant(description is None)\n check.invariant(len(get_function_params(definitions_fn)) == 0)\n\n return _Repository()(definitions_fn)\n\n return _Repository(\n name=name,\n description=description,\n metadata=metadata,\n default_executor_def=default_executor_def,\n default_logger_defs=default_logger_defs,\n top_level_resources=_top_level_resources,\n resource_key_mapping=_resource_key_mapping,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/repository_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.repository_decorator"}, "schedule_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.schedule_decorator

\nimport copy\nfrom functools import update_wrapper\nfrom typing import (\n    Callable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.definitions.sensor_definition import get_context_param_name\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    ScheduleExecutionError,\n    user_code_error_boundary,\n)\nfrom dagster._utils import ensure_gen\n\nfrom ..run_request import RunRequest, SkipReason\nfrom ..schedule_definition import (\n    DecoratedScheduleFunction,\n    DefaultScheduleStatus,\n    RawScheduleEvaluationFunction,\n    RunRequestIterator,\n    ScheduleDefinition,\n    ScheduleEvaluationContext,\n    has_at_least_one_parameter,\n    validate_and_get_schedule_resource_dict,\n)\nfrom ..target import ExecutableDefinition\nfrom ..utils import validate_tags\n\n\n
[docs]def schedule(\n cron_schedule: Union[str, Sequence[str]],\n *,\n job_name: Optional[str] = None,\n name: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n tags_fn: Optional[Callable[[ScheduleEvaluationContext], Optional[Mapping[str, str]]]] = None,\n should_execute: Optional[Callable[[ScheduleEvaluationContext], bool]] = None,\n environment_vars: Optional[Mapping[str, str]] = None,\n execution_timezone: Optional[str] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[RawScheduleEvaluationFunction], ScheduleDefinition]:\n """Creates a schedule following the provided cron schedule and requests runs for the provided job.\n\n The decorated function takes in a :py:class:`~dagster.ScheduleEvaluationContext` as its only\n argument, and does one of the following:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Return a run config dictionary.\n 6. Yield a `SkipReason` or yield one ore more `RunRequest` objects.\n\n Returns a :py:class:`~dagster.ScheduleDefinition`.\n\n Args:\n cron_schedule (Union[str, Sequence[str]]): A valid cron string or sequence of cron strings\n specifying when the schedule will run, e.g., ``'45 23 * * 6'`` for a schedule that runs\n at 11:45 PM every Saturday. If a sequence is provided, then the schedule will run for\n the union of all execution times for the provided cron strings, e.g.,\n ``['45 23 * * 6', '30 9 * * 0]`` for a schedule that runs at 11:45 PM every Saturday and\n 9:30 AM every Sunday.\n name (Optional[str]): The name of the schedule to create.\n tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the scheduled runs.\n tags_fn (Optional[Callable[[ScheduleEvaluationContext], Optional[Dict[str, str]]]]): A function\n that generates tags to attach to the schedules runs. Takes a\n :py:class:`~dagster.ScheduleEvaluationContext` and returns a dictionary of tags (string\n key-value pairs). You may set only one of ``tags`` and ``tags_fn``.\n should_execute (Optional[Callable[[ScheduleEvaluationContext], bool]]): A function that runs at\n schedule execution time to determine whether a schedule should execute or skip. Takes a\n :py:class:`~dagster.ScheduleEvaluationContext` and returns a boolean (``True`` if the\n schedule should execute). Defaults to a function that always returns ``True``.\n execution_timezone (Optional[str]): Timezone in which the schedule should run.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n description (Optional[str]): A human-readable description of the schedule.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job\n that should execute when this schedule runs.\n default_status (DefaultScheduleStatus): Whether the schedule starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n required_resource_keys (Optional[Set[str]]): The set of resource keys required by the schedule.\n """\n\n def inner(fn: RawScheduleEvaluationFunction) -> ScheduleDefinition:\n from dagster._config.pythonic_config import validate_resource_annotated_function\n\n check.callable_param(fn, "fn")\n validate_resource_annotated_function(fn)\n\n schedule_name = name or fn.__name__\n\n validated_tags = None\n\n # perform upfront validation of schedule tags\n if tags_fn and tags:\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both tags_fn and tags as arguments"\n " to ScheduleDefinition. Must provide only one of the two."\n )\n elif tags:\n validated_tags = validate_tags(tags, allow_reserved_tags=False)\n\n context_param_name = get_context_param_name(fn)\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(fn)}\n\n def _wrapped_fn(context: ScheduleEvaluationContext) -> RunRequestIterator:\n if should_execute:\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: (\n "Error occurred during the execution of should_execute for schedule"\n f" {schedule_name}"\n ),\n ):\n if not should_execute(context):\n yield SkipReason(\n f"should_execute function for {schedule_name} returned false."\n )\n return\n resources = validate_and_get_schedule_resource_dict(\n context.resources, schedule_name, resource_arg_names\n )\n\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: f"Error occurred during the evaluation of schedule {schedule_name}",\n ):\n context_param = {context_param_name: context} if context_param_name else {}\n result = fn(**context_param, **resources)\n\n if isinstance(result, dict):\n # this is the run-config based decorated function, wrap the evaluated run config\n # and tags in a RunRequest\n evaluated_run_config = copy.deepcopy(result)\n evaluated_tags = (\n validated_tags\n or (tags_fn and validate_tags(tags_fn(context), allow_reserved_tags=False))\n or None\n )\n yield RunRequest(\n run_key=None,\n run_config=evaluated_run_config,\n tags=evaluated_tags,\n )\n elif isinstance(result, list):\n yield from cast(List[RunRequest], result)\n else:\n # this is a run-request based decorated function\n yield from cast(RunRequestIterator, ensure_gen(result))\n\n has_context_arg = has_at_least_one_parameter(fn)\n evaluation_fn = DecoratedScheduleFunction(\n decorated_fn=fn,\n wrapped_fn=_wrapped_fn,\n has_context_arg=has_context_arg,\n )\n\n schedule_def = ScheduleDefinition.dagster_internal_init(\n name=schedule_name,\n cron_schedule=cron_schedule,\n job_name=job_name,\n environment_vars=environment_vars,\n execution_timezone=execution_timezone,\n description=description,\n execution_fn=evaluation_fn,\n job=job,\n default_status=default_status,\n required_resource_keys=required_resource_keys,\n run_config=None, # cannot supply run_config or run_config_fn to decorator\n run_config_fn=None,\n tags=None, # cannot supply tags or tags_fn to decorator\n tags_fn=None,\n should_execute=None, # already encompassed in evaluation_fn\n )\n\n update_wrapper(schedule_def, wrapped=fn)\n\n return schedule_def\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/schedule_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.schedule_decorator"}, "sensor_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.sensor_decorator

\nimport collections.abc\nimport inspect\nfrom functools import update_wrapper\nfrom typing import Any, Callable, Optional, Sequence, Set, Union\n\nimport dagster._check as check\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.asset_selection import AssetSelection\n\nfrom ...errors import DagsterInvariantViolationError\nfrom ..asset_sensor_definition import AssetSensorDefinition\nfrom ..events import AssetKey\nfrom ..multi_asset_sensor_definition import (\n    AssetMaterializationFunction,\n    MultiAssetMaterializationFunction,\n    MultiAssetSensorDefinition,\n)\nfrom ..run_request import SensorResult\nfrom ..sensor_definition import (\n    DefaultSensorStatus,\n    RawSensorEvaluationFunction,\n    RunRequest,\n    SensorDefinition,\n    SkipReason,\n)\nfrom ..target import ExecutableDefinition\n\n\n
[docs]def sensor(\n job_name: Optional[str] = None,\n *,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n asset_selection: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[RawSensorEvaluationFunction], SensorDefinition]:\n """Creates a sensor where the decorated function is used as the sensor's evaluation function.\n\n The decorated function may:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Yield a `SkipReason` or yield one or more `RunRequest` objects.\n\n Takes a :py:class:`~dagster.SensorEvaluationContext`.\n\n Args:\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated\n function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]):\n The job to be executed when the sensor fires.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n asset_selection (AssetSelection): (Experimental) an asset selection to launch a run for if\n the sensor condition is met. This can be provided instead of specifying a job.\n """\n check.opt_str_param(name, "name")\n\n def inner(fn: RawSensorEvaluationFunction) -> SensorDefinition:\n check.callable_param(fn, "fn")\n\n sensor_def = SensorDefinition.dagster_internal_init(\n name=name,\n job_name=job_name,\n evaluation_fn=fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n asset_selection=asset_selection,\n required_resource_keys=required_resource_keys,\n )\n\n update_wrapper(sensor_def, wrapped=fn)\n\n return sensor_def\n\n return inner
\n\n\n
[docs]def asset_sensor(\n asset_key: AssetKey,\n *,\n job_name: Optional[str] = None,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[AssetMaterializationFunction,], AssetSensorDefinition,]:\n """Creates an asset sensor where the decorated function is used as the asset sensor's evaluation\n function.\n\n If the asset has been materialized multiple times between since the last sensor tick, the\n evaluation function will only be invoked once, with the latest materialization.\n\n The decorated function may:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Yield a `SkipReason` or yield one or more `RunRequest` objects.\n\n Takes a :py:class:`~dagster.SensorEvaluationContext` and an EventLogEntry corresponding to an\n AssetMaterialization event.\n\n Args:\n asset_key (AssetKey): The asset_key this sensor monitors.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated\n function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The\n job to be executed when the sensor fires.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n\n\n Example:\n .. code-block:: python\n\n from dagster import AssetKey, EventLogEntry, SensorEvaluationContext, asset_sensor\n\n\n @asset_sensor(asset_key=AssetKey("my_table"), job=my_job)\n def my_asset_sensor(context: SensorEvaluationContext, asset_event: EventLogEntry):\n return RunRequest(\n run_key=context.cursor,\n run_config={\n "ops": {\n "read_materialization": {\n "config": {\n "asset_key": asset_event.dagster_event.asset_key.path,\n }\n }\n }\n },\n )\n """\n check.opt_str_param(name, "name")\n\n def inner(fn: AssetMaterializationFunction) -> AssetSensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n def _wrapped_fn(*args, **kwargs) -> Any:\n result = fn(*args, **kwargs)\n\n if inspect.isgenerator(result) or isinstance(result, list):\n for item in result:\n yield item\n elif isinstance(result, (RunRequest, SkipReason)):\n yield result\n\n elif isinstance(result, SensorResult):\n if result.cursor:\n raise DagsterInvariantViolationError(\n f"Error in asset sensor {sensor_name}: Sensor returned a SensorResult"\n " with a cursor value. The cursor is managed by the asset sensor and"\n " should not be modified by a user."\n )\n yield result\n\n elif result is not None:\n raise DagsterInvariantViolationError(\n f"Error in sensor {sensor_name}: Sensor unexpectedly returned output "\n f"{result} of type {type(result)}. Should only return SkipReason or "\n "RunRequest objects."\n )\n\n # Preserve any resource arguments from the underlying function, for when we inspect the\n # wrapped function later on\n _wrapped_fn = update_wrapper(_wrapped_fn, wrapped=fn)\n\n return AssetSensorDefinition(\n name=sensor_name,\n asset_key=asset_key,\n job_name=job_name,\n asset_materialization_fn=_wrapped_fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n required_resource_keys=required_resource_keys,\n )\n\n return inner
\n\n\n
[docs]@experimental\ndef multi_asset_sensor(\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n *,\n job_name: Optional[str] = None,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_assets: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[MultiAssetMaterializationFunction,], MultiAssetSensorDefinition,]:\n """Creates an asset sensor that can monitor multiple assets.\n\n The decorated function is used as the asset sensor's evaluation\n function. The decorated function may:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Yield a `SkipReason` or yield one or more `RunRequest` objects.\n\n Takes a :py:class:`~dagster.MultiAssetSensorEvaluationContext`.\n\n Args:\n monitored_assets (Union[Sequence[AssetKey], AssetSelection]): The assets this\n sensor monitors. If an AssetSelection object is provided, it will only apply to assets\n within the Definitions that this sensor is part of.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated\n function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The\n job to be executed when the sensor fires.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_assets (Optional[AssetSelection]): (Experimental) an asset selection to launch a run\n for if the sensor condition is met. This can be provided instead of specifying a job.\n """\n check.opt_str_param(name, "name")\n\n if not isinstance(monitored_assets, AssetSelection) and not (\n isinstance(monitored_assets, collections.abc.Sequence)\n and all(isinstance(el, AssetKey) for el in monitored_assets)\n ):\n check.failed(\n "The value passed to monitored_assets param must be either an AssetSelection"\n f" or a Sequence of AssetKeys, but was a {type(monitored_assets)}"\n )\n\n def inner(fn: MultiAssetMaterializationFunction) -> MultiAssetSensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n sensor_def = MultiAssetSensorDefinition(\n name=sensor_name,\n monitored_assets=monitored_assets,\n job_name=job_name,\n asset_materialization_fn=fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n request_assets=request_assets,\n required_resource_keys=required_resource_keys,\n )\n update_wrapper(sensor_def, wrapped=fn)\n return sensor_def\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/sensor_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.sensor_decorator"}}, "definitions_class": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.definitions_class

\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Type,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, experimental, public\nfrom dagster._config.pythonic_config import (\n    attach_resource_id_to_key_mapping,\n)\nfrom dagster._core.definitions.asset_checks import AssetChecksDefinition\nfrom dagster._core.definitions.asset_graph import InternalAssetGraph\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.executor_definition import ExecutorDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.build_resources import wrap_resources_for_execution\nfrom dagster._core.execution.with_resources import with_resources\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._utils.cached_method import cached_method\n\nfrom .assets import AssetsDefinition, SourceAsset\nfrom .cacheable_assets import CacheableAssetsDefinition\nfrom .decorators import repository\nfrom .job_definition import JobDefinition, default_job_io_manager\nfrom .partitioned_schedule import UnresolvedPartitionedAssetScheduleDefinition\nfrom .repository_definition import (\n    SINGLETON_REPOSITORY_NAME,\n    PendingRepositoryDefinition,\n    RepositoryDefinition,\n)\nfrom .schedule_definition import ScheduleDefinition\nfrom .sensor_definition import SensorDefinition\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n\n
[docs]@public\n@experimental\ndef create_repository_using_definitions_args(\n name: str,\n assets: Optional[\n Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]\n ] = None,\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ] = None,\n sensors: Optional[Iterable[SensorDefinition]] = None,\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n executor: Optional[Union[ExecutorDefinition, Executor]] = None,\n loggers: Optional[Mapping[str, LoggerDefinition]] = None,\n asset_checks: Optional[Iterable[AssetChecksDefinition]] = None,\n) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:\n """Create a named repository using the same arguments as :py:class:`Definitions`. In older\n versions of Dagster, repositories were the mechanism for organizing assets, schedules, sensors,\n and jobs. There could be many repositories per code location. This was a complicated ontology but\n gave users a way to organize code locations that contained large numbers of heterogenous definitions.\n\n As a stopgap for those who both want to 1) use the new :py:class:`Definitions` API and 2) but still\n want multiple logical groups of assets in the same code location, we have introduced this function.\n\n Example usage:\n\n .. code-block:: python\n\n named_repo = create_repository_using_definitions_args(\n name="a_repo",\n assets=[asset_one, asset_two],\n schedules=[a_schedule],\n sensors=[a_sensor],\n jobs=[a_job],\n resources={\n "a_resource": some_resource,\n }\n )\n\n """\n return _create_repository_using_definitions_args(\n name=name,\n assets=assets,\n schedules=schedules,\n sensors=sensors,\n jobs=jobs,\n resources=resources,\n executor=executor,\n loggers=loggers,\n asset_checks=asset_checks,\n )
\n\n\nclass _AttachedObjects(NamedTuple):\n jobs: Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]\n schedules: Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n sensors: Iterable[SensorDefinition]\n\n\ndef _io_manager_needs_replacement(job: JobDefinition, resource_defs: Mapping[str, Any]) -> bool:\n """Explicitly replace the default IO manager in jobs that don't specify one, if a top-level\n I/O manager is provided to Definitions.\n """\n return (\n job.resource_defs.get("io_manager") == default_job_io_manager\n and "io_manager" in resource_defs\n )\n\n\ndef _jobs_which_will_have_io_manager_replaced(\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]],\n resource_defs: Mapping[str, Any],\n) -> List[Union[JobDefinition, UnresolvedAssetJobDefinition]]:\n """Returns whether any jobs will have their I/O manager replaced by an `io_manager` override from\n the top-level `resource_defs` provided to `Definitions` in 1.3. We will warn users if this is\n the case.\n """\n jobs = jobs or []\n return [\n job\n for job in jobs\n if isinstance(job, JobDefinition) and _io_manager_needs_replacement(job, resource_defs)\n ]\n\n\ndef _attach_resources_to_jobs_and_instigator_jobs(\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]],\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ],\n sensors: Optional[Iterable[SensorDefinition]],\n resource_defs: Mapping[str, Any],\n) -> _AttachedObjects:\n """Given a list of jobs, schedules, and sensors along with top-level resource definitions,\n attach the resource definitions to the jobs, schedules, and sensors which require them.\n """\n jobs = jobs or []\n schedules = schedules or []\n sensors = sensors or []\n\n # Add jobs in schedules and sensors as well\n jobs = [\n *jobs,\n *[\n schedule.job\n for schedule in schedules\n if isinstance(schedule, ScheduleDefinition)\n and schedule.has_loadable_target()\n and isinstance(schedule.job, (JobDefinition, UnresolvedAssetJobDefinition))\n ],\n *[\n job\n for sensor in sensors\n if sensor.has_loadable_targets()\n for job in sensor.jobs\n if isinstance(job, (JobDefinition, UnresolvedAssetJobDefinition))\n ],\n ]\n # Dedupe\n jobs = list({id(job): job for job in jobs}.values())\n\n # Find unsatisfied jobs\n unsatisfied_jobs = [\n job\n for job in jobs\n if isinstance(job, JobDefinition)\n and (\n job.is_missing_required_resources() or _io_manager_needs_replacement(job, resource_defs)\n )\n ]\n\n # Create a mapping of job id to a version of the job with the resource defs bound\n unsatisfied_job_to_resource_bound_job = {\n id(job): job.with_top_level_resources(\n {\n **resource_defs,\n **job.resource_defs,\n # special case for IO manager - the job-level IO manager does not take precedence\n # if it is the default and a top-level IO manager is provided\n **(\n {"io_manager": resource_defs["io_manager"]}\n if _io_manager_needs_replacement(job, resource_defs)\n else {}\n ),\n }\n )\n for job in jobs\n if job in unsatisfied_jobs\n }\n\n # Update all jobs to use the resource bound version\n jobs_with_resources = [\n unsatisfied_job_to_resource_bound_job[id(job)] if job in unsatisfied_jobs else job\n for job in jobs\n ]\n\n # Update all schedules and sensors to use the resource bound version\n updated_schedules = [\n (\n schedule.with_updated_job(unsatisfied_job_to_resource_bound_job[id(schedule.job)])\n if (\n isinstance(schedule, ScheduleDefinition)\n and schedule.has_loadable_target()\n and schedule.job in unsatisfied_jobs\n )\n else schedule\n )\n for schedule in schedules\n ]\n updated_sensors = [\n (\n sensor.with_updated_jobs(\n [\n (\n unsatisfied_job_to_resource_bound_job[id(job)]\n if job in unsatisfied_jobs\n else job\n )\n for job in sensor.jobs\n ]\n )\n if sensor.has_loadable_targets() and any(job in unsatisfied_jobs for job in sensor.jobs)\n else sensor\n )\n for sensor in sensors\n ]\n\n return _AttachedObjects(jobs_with_resources, updated_schedules, updated_sensors)\n\n\ndef _create_repository_using_definitions_args(\n name: str,\n assets: Optional[\n Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]\n ] = None,\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ] = None,\n sensors: Optional[Iterable[SensorDefinition]] = None,\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n executor: Optional[Union[ExecutorDefinition, Executor]] = None,\n loggers: Optional[Mapping[str, LoggerDefinition]] = None,\n asset_checks: Optional[Iterable[AssetChecksDefinition]] = None,\n):\n check.opt_iterable_param(\n assets, "assets", (AssetsDefinition, SourceAsset, CacheableAssetsDefinition)\n )\n check.opt_iterable_param(\n schedules, "schedules", (ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition)\n )\n check.opt_iterable_param(sensors, "sensors", SensorDefinition)\n check.opt_iterable_param(jobs, "jobs", (JobDefinition, UnresolvedAssetJobDefinition))\n\n check.opt_inst_param(executor, "executor", (ExecutorDefinition, Executor))\n executor_def = (\n executor\n if isinstance(executor, ExecutorDefinition) or executor is None\n else ExecutorDefinition.hardcoded_executor(executor)\n )\n\n # Generate a mapping from each top-level resource instance ID to its resource key\n resource_key_mapping = {id(v): k for k, v in resources.items()} if resources else {}\n\n # Provide this mapping to each resource instance so that it can be used to resolve\n # nested resources\n resources_with_key_mapping = (\n {\n k: attach_resource_id_to_key_mapping(v, resource_key_mapping)\n for k, v in resources.items()\n }\n if resources\n else {}\n )\n\n resource_defs = wrap_resources_for_execution(resources_with_key_mapping)\n\n check.opt_mapping_param(loggers, "loggers", key_type=str, value_type=LoggerDefinition)\n\n # Binds top-level resources to jobs and any jobs attached to schedules or sensors\n (\n jobs_with_resources,\n schedules_with_resources,\n sensors_with_resources,\n ) = _attach_resources_to_jobs_and_instigator_jobs(jobs, schedules, sensors, resource_defs)\n\n @repository(\n name=name,\n default_executor_def=executor_def,\n default_logger_defs=loggers,\n _top_level_resources=resource_defs,\n _resource_key_mapping=resource_key_mapping,\n )\n def created_repo():\n return [\n *with_resources(assets or [], resource_defs),\n *with_resources(asset_checks or [], resource_defs),\n *(schedules_with_resources),\n *(sensors_with_resources),\n *(jobs_with_resources),\n ]\n\n return created_repo\n\n\n@deprecated(\n breaking_version="2.0",\n additional_warn_text=(\n "Instantiations can be removed. Since it's behavior is now the default, this class is now a"\n " no-op."\n ),\n)\nclass BindResourcesToJobs(list):\n """Used to instruct Dagster to bind top-level resources to jobs and any jobs attached to schedules\n and sensors. Now deprecated since this behavior is the default.\n """\n\n\n
[docs]class Definitions:\n """A set of definitions explicitly available and loadable by Dagster tools.\n\n Parameters:\n assets (Optional[Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]]):\n A list of assets. Assets can be created by annotating\n a function with :py:func:`@asset <asset>` or\n :py:func:`@observable_source_asset <observable_source_asset>`.\n Or they can by directly instantiating :py:class:`AssetsDefinition`,\n :py:class:`SourceAsset`, or :py:class:`CacheableAssetsDefinition`.\n\n asset_checks (Optional[Iterable[AssetChecksDefinition]]):\n A list of asset checks.\n\n schedules (Optional[Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]]):\n List of schedules.\n\n sensors (Optional[Iterable[SensorDefinition]]):\n List of sensors, typically created with :py:func:`@sensor <sensor>`.\n\n jobs (Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]]):\n List of jobs. Typically created with :py:func:`define_asset_job <define_asset_job>`\n or with :py:func:`@job <job>` for jobs defined in terms of ops directly.\n Jobs created with :py:func:`@job <job>` must already have resources bound\n at job creation time. They do not respect the `resources` argument here.\n\n resources (Optional[Mapping[str, Any]]): Dictionary of resources to bind to assets.\n The resources dictionary takes raw Python objects,\n not just instances of :py:class:`ResourceDefinition`. If that raw object inherits from\n :py:class:`IOManager`, it gets coerced to an :py:class:`IOManagerDefinition`.\n Any other object is coerced to a :py:class:`ResourceDefinition`.\n These resources will be automatically bound\n to any assets passed to this Definitions instance using\n :py:func:`with_resources <with_resources>`. Assets passed to Definitions with\n resources already bound using :py:func:`with_resources <with_resources>` will\n override this dictionary.\n\n executor (Optional[Union[ExecutorDefinition, Executor]]):\n Default executor for jobs. Individual jobs can override this and define their own executors\n by setting the executor on :py:func:`@job <job>` or :py:func:`define_asset_job <define_asset_job>`\n explicitly. This executor will also be used for materializing assets directly\n outside of the context of jobs. If an :py:class:`Executor` is passed, it is coerced into\n an :py:class:`ExecutorDefinition`.\n\n loggers (Optional[Mapping[str, LoggerDefinition]):\n Default loggers for jobs. Individual jobs\n can define their own loggers by setting them explictly.\n\n Example usage:\n\n .. code-block:: python\n\n defs = Definitions(\n assets=[asset_one, asset_two],\n schedules=[a_schedule],\n sensors=[a_sensor],\n jobs=[a_job],\n resources={\n "a_resource": some_resource,\n },\n asset_checks=[asset_one_check_one]\n )\n\n Dagster separates user-defined code from system tools such the web server and\n the daemon. Rather than loading code directly into process, a tool such as the\n webserver interacts with user-defined code over a serialization boundary.\n\n These tools must be able to locate and load this code when they start. Via CLI\n arguments or config, they specify a Python module to inspect.\n\n A Python module is loadable by Dagster tools if there is a top-level variable\n that is an instance of :py:class:`Definitions`.\n\n Before the introduction of :py:class:`Definitions`,\n :py:func:`@repository <repository>` was the API for organizing defintions.\n :py:class:`Definitions` provides a few conveniences for dealing with resources\n that do not apply to old-style :py:func:`@repository <repository>` declarations:\n\n * It takes a dictionary of top-level resources which are automatically bound\n (via :py:func:`with_resources <with_resources>`) to any asset passed to it.\n If you need to apply different resources to different assets, use legacy\n :py:func:`@repository <repository>` and use\n :py:func:`with_resources <with_resources>` as before.\n * The resources dictionary takes raw Python objects, not just instances\n of :py:class:`ResourceDefinition`. If that raw object inherits from\n :py:class:`IOManager`, it gets coerced to an :py:class:`IOManagerDefinition`.\n Any other object is coerced to a :py:class:`ResourceDefinition`.\n """\n\n def __init__(\n self,\n assets: Optional[\n Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]\n ] = None,\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ] = None,\n sensors: Optional[Iterable[SensorDefinition]] = None,\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n executor: Optional[Union[ExecutorDefinition, Executor]] = None,\n loggers: Optional[Mapping[str, LoggerDefinition]] = None,\n asset_checks: Optional[Iterable[AssetChecksDefinition]] = None,\n ):\n self._created_pending_or_normal_repo = _create_repository_using_definitions_args(\n name=SINGLETON_REPOSITORY_NAME,\n assets=assets,\n schedules=schedules,\n sensors=sensors,\n jobs=jobs,\n resources=resources,\n executor=executor,\n loggers=loggers,\n asset_checks=asset_checks,\n )\n\n
[docs] @public\n def get_job_def(self, name: str) -> JobDefinition:\n """Get a job definition by name. If you passed in a an :py:class:`UnresolvedAssetJobDefinition`\n (return value of :py:func:`define_asset_job`) it will be resolved to a :py:class:`JobDefinition` when returned\n from this function.\n """\n check.str_param(name, "name")\n return self.get_repository_def().get_job(name)
\n\n
[docs] @public\n def get_sensor_def(self, name: str) -> SensorDefinition:\n """Get a sensor definition by name."""\n check.str_param(name, "name")\n return self.get_repository_def().get_sensor_def(name)
\n\n
[docs] @public\n def get_schedule_def(self, name: str) -> ScheduleDefinition:\n """Get a schedule definition by name."""\n check.str_param(name, "name")\n return self.get_repository_def().get_schedule_def(name)
\n\n
[docs] @public\n def load_asset_value(\n self,\n asset_key: CoercibleToAssetKey,\n *,\n python_type: Optional[Type] = None,\n instance: Optional[DagsterInstance] = None,\n partition_key: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n ) -> object:\n """Load the contents of an asset as a Python object.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the asset.\n\n If you want to load the values of multiple assets, it's more efficient to use\n :py:meth:`~dagster.Definitions.get_asset_value_loader`, which avoids spinning up\n resources separately for each asset.\n\n Args:\n asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.\n python_type (Optional[Type]): The python type to load the asset as. This is what will\n be returned inside `load_input` by `context.dagster_type.typing_type`.\n partition_key (Optional[str]): The partition of the asset to load.\n metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`\n (is equivalent to setting the metadata argument in `In` or `AssetIn`).\n\n Returns:\n The contents of an asset as a Python object.\n """\n return self.get_repository_def().load_asset_value(\n asset_key=asset_key,\n python_type=python_type,\n instance=instance,\n partition_key=partition_key,\n metadata=metadata,\n )
\n\n
[docs] @public\n def get_asset_value_loader(\n self, instance: Optional[DagsterInstance] = None\n ) -> "AssetValueLoader":\n """Returns an object that can load the contents of assets as Python objects.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the assets. Avoids\n spinning up resources separately for each asset.\n\n Usage:\n\n .. code-block:: python\n\n with defs.get_asset_value_loader() as loader:\n asset1 = loader.load_asset_value("asset1")\n asset2 = loader.load_asset_value("asset2")\n """\n return self.get_repository_def().get_asset_value_loader(\n instance=instance,\n )
\n\n def get_all_job_defs(self) -> Sequence[JobDefinition]:\n """Get all the Job definitions in the code location."""\n return self.get_repository_def().get_all_jobs()\n\n def has_implicit_global_asset_job_def(self) -> bool:\n return self.get_repository_def().has_implicit_global_asset_job_def()\n\n def get_implicit_global_asset_job_def(self) -> JobDefinition:\n """A useful conveninence method when there is a single defined global asset job.\n This occurs when all assets in the code location use a single partitioning scheme.\n If there are multiple partitioning schemes you must use get_implicit_job_def_for_assets\n instead to access to the correct implicit asset one.\n """\n return self.get_repository_def().get_implicit_global_asset_job_def()\n\n def get_implicit_job_def_for_assets(\n self, asset_keys: Iterable[AssetKey]\n ) -> Optional[JobDefinition]:\n return self.get_repository_def().get_implicit_job_def_for_assets(asset_keys)\n\n def get_assets_def(self, key: CoercibleToAssetKey) -> AssetsDefinition:\n asset_key = AssetKey.from_coercible(key)\n for assets_def in self.get_asset_graph().assets:\n if asset_key in assets_def.keys:\n return assets_def\n\n raise DagsterInvariantViolationError(f"Could not find asset {asset_key}")\n\n @cached_method\n def get_repository_def(self) -> RepositoryDefinition:\n """Definitions is implemented by wrapping RepositoryDefinition. Get that underlying object\n in order to access an functionality which is not exposed on Definitions. This method\n also resolves a PendingRepositoryDefinition to a RepositoryDefinition.\n """\n return (\n self._created_pending_or_normal_repo.compute_repository_definition()\n if isinstance(self._created_pending_or_normal_repo, PendingRepositoryDefinition)\n else self._created_pending_or_normal_repo\n )\n\n def get_inner_repository_for_loading_process(\n self,\n ) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:\n """This method is used internally to access the inner repository during the loading process\n at CLI entry points. We explicitly do not want to resolve the pending repo because the entire\n point is to defer that resolution until later.\n """\n return self._created_pending_or_normal_repo\n\n def get_asset_graph(self) -> InternalAssetGraph:\n """Get the AssetGraph for this set of definitions."""\n return self.get_repository_def().asset_graph
\n
", "current_page_name": "_modules/dagster/_core/definitions/definitions_class", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.definitions_class"}, "dependency": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.dependency

\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    DefaultDict,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias, TypeVar\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._serdes.serdes import (\n    whitelist_for_serdes,\n)\nfrom dagster._utils import hash_collection\n\nfrom .hook_definition import HookDefinition\nfrom .input import FanInInputPointer, InputDefinition, InputMapping, InputPointer\nfrom .output import OutputDefinition\nfrom .utils import DEFAULT_OUTPUT, struct_to_string, validate_tags\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.op_definition import OpDefinition\n\n    from .asset_layer import AssetLayer\n    from .composition import MappedInputPlaceholder\n    from .graph_definition import GraphDefinition\n    from .node_definition import NodeDefinition\n    from .resource_requirement import ResourceRequirement\n\nT_DependencyKey = TypeVar("T_DependencyKey", str, "NodeInvocation")\nDependencyMapping: TypeAlias = Mapping[T_DependencyKey, Mapping[str, "IDependencyDefinition"]]\n\n\n
[docs]class NodeInvocation(\n NamedTuple(\n "Node",\n [\n ("name", PublicAttr[str]),\n ("alias", PublicAttr[Optional[str]]),\n ("tags", PublicAttr[Mapping[str, Any]]),\n ("hook_defs", PublicAttr[AbstractSet[HookDefinition]]),\n ("retry_policy", PublicAttr[Optional[RetryPolicy]]),\n ],\n )\n):\n """Identifies an instance of a node in a graph dependency structure.\n\n Args:\n name (str): Name of the node of which this is an instance.\n alias (Optional[str]): Name specific to this instance of the node. Necessary when there are\n multiple instances of the same node.\n tags (Optional[Dict[str, Any]]): Optional tags values to extend or override those\n set on the node definition.\n hook_defs (Optional[AbstractSet[HookDefinition]]): A set of hook definitions applied to the\n node instance.\n\n Examples:\n In general, users should prefer not to construct this class directly or use the\n :py:class:`JobDefinition` API that requires instances of this class. Instead, use the\n :py:func:`@job <job>` API:\n\n .. code-block:: python\n\n from dagster import job\n\n @job\n def my_job():\n other_name = some_op.alias('other_name')\n some_graph(other_name(some_op))\n\n """\n\n def __new__(\n cls,\n name: str,\n alias: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n return super().__new__(\n cls,\n name=check.str_param(name, "name"),\n alias=check.opt_str_param(alias, "alias"),\n tags=check.opt_mapping_param(tags, "tags", value_type=str, key_type=str),\n hook_defs=check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition),\n retry_policy=check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy),\n )\n\n # Needs to be hashable because this class is used as a key in dependencies dicts\n def __hash__(self) -> int:\n if not hasattr(self, "_hash"):\n self._hash = hash_collection(self)\n return self._hash
\n\n\nclass Node(ABC):\n """Node invocation within a graph. Identified by its name inside the graph."""\n\n name: str\n definition: "NodeDefinition"\n graph_definition: "GraphDefinition"\n _additional_tags: Mapping[str, str]\n _hook_defs: AbstractSet[HookDefinition]\n _retry_policy: Optional[RetryPolicy]\n _inputs: Mapping[str, "NodeInput"]\n _outputs: Mapping[str, "NodeOutput"]\n\n def __init__(\n self,\n name: str,\n definition: "NodeDefinition",\n graph_definition: "GraphDefinition",\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n from .graph_definition import GraphDefinition\n from .node_definition import NodeDefinition\n\n self.name = check.str_param(name, "name")\n self.definition = check.inst_param(definition, "definition", NodeDefinition)\n self.graph_definition = check.inst_param(\n graph_definition,\n "graph_definition",\n GraphDefinition,\n )\n self._additional_tags = validate_tags(tags)\n self._hook_defs = check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition)\n self._retry_policy = check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy)\n\n self._inputs = {\n name: NodeInput(self, input_def)\n for name, input_def in self.definition.input_dict.items()\n }\n self._outputs = {\n name: NodeOutput(self, output_def)\n for name, output_def in self.definition.output_dict.items()\n }\n\n def inputs(self) -> Iterable["NodeInput"]:\n return self._inputs.values()\n\n def outputs(self) -> Iterable["NodeOutput"]:\n return self._outputs.values()\n\n def get_input(self, name: str) -> "NodeInput":\n check.str_param(name, "name")\n return self._inputs[name]\n\n def get_output(self, name: str) -> "NodeOutput":\n check.str_param(name, "name")\n return self._outputs[name]\n\n def has_input(self, name: str) -> bool:\n return self.definition.has_input(name)\n\n def input_def_named(self, name: str) -> InputDefinition:\n return self.definition.input_def_named(name)\n\n def has_output(self, name: str) -> bool:\n return self.definition.has_output(name)\n\n def output_def_named(self, name: str) -> OutputDefinition:\n return self.definition.output_def_named(name)\n\n @property\n def input_dict(self) -> Mapping[str, InputDefinition]:\n return self.definition.input_dict\n\n @property\n def output_dict(self) -> Mapping[str, OutputDefinition]:\n return self.definition.output_dict\n\n @property\n def tags(self) -> Mapping[str, str]:\n return {**self.definition.tags, **self._additional_tags}\n\n def container_maps_input(self, input_name: str) -> bool:\n return (\n self.graph_definition.input_mapping_for_pointer(InputPointer(self.name, input_name))\n is not None\n )\n\n def container_mapped_input(self, input_name: str) -> InputMapping:\n mapping = self.graph_definition.input_mapping_for_pointer(\n InputPointer(self.name, input_name)\n )\n if mapping is None:\n check.failed(\n f"container does not map input {input_name}, check container_maps_input first"\n )\n return mapping\n\n def container_maps_fan_in_input(self, input_name: str, fan_in_index: int) -> bool:\n return (\n self.graph_definition.input_mapping_for_pointer(\n FanInInputPointer(self.name, input_name, fan_in_index)\n )\n is not None\n )\n\n def container_mapped_fan_in_input(self, input_name: str, fan_in_index: int) -> InputMapping:\n mapping = self.graph_definition.input_mapping_for_pointer(\n FanInInputPointer(self.name, input_name, fan_in_index)\n )\n if mapping is None:\n check.failed(\n f"container does not map fan-in {input_name} idx {fan_in_index}, check "\n "container_maps_fan_in_input first"\n )\n\n return mapping\n\n @property\n def hook_defs(self) -> AbstractSet[HookDefinition]:\n return self._hook_defs\n\n @property\n def retry_policy(self) -> Optional[RetryPolicy]:\n return self._retry_policy\n\n @abstractmethod\n def describe_node(self) -> str: ...\n\n @abstractmethod\n def get_resource_requirements(\n self,\n outer_container: "GraphDefinition",\n parent_handle: Optional["NodeHandle"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n ) -> Iterator["ResourceRequirement"]: ...\n\n\nclass GraphNode(Node):\n definition: "GraphDefinition"\n\n def __init__(\n self,\n name: str,\n definition: "GraphDefinition",\n graph_definition: "GraphDefinition",\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n from .graph_definition import GraphDefinition\n\n check.inst_param(definition, "definition", GraphDefinition)\n super().__init__(name, definition, graph_definition, tags, hook_defs, retry_policy)\n\n def get_resource_requirements(\n self,\n outer_container: "GraphDefinition",\n parent_handle: Optional["NodeHandle"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n ) -> Iterator["ResourceRequirement"]:\n cur_node_handle = NodeHandle(self.name, parent_handle)\n\n for node in self.definition.node_dict.values():\n yield from node.get_resource_requirements(\n asset_layer=asset_layer,\n outer_container=self.definition,\n parent_handle=cur_node_handle,\n )\n\n def describe_node(self) -> str:\n return f"graph '{self.name}'"\n\n\nclass OpNode(Node):\n definition: "OpDefinition"\n\n def __init__(\n self,\n name: str,\n definition: "OpDefinition",\n graph_definition: "GraphDefinition",\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n from .op_definition import OpDefinition\n\n check.inst_param(definition, "definition", OpDefinition)\n super().__init__(name, definition, graph_definition, tags, hook_defs, retry_policy)\n\n def get_resource_requirements(\n self,\n outer_container: "GraphDefinition",\n parent_handle: Optional["NodeHandle"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n ) -> Iterator["ResourceRequirement"]:\n from .resource_requirement import InputManagerRequirement\n\n cur_node_handle = NodeHandle(self.name, parent_handle)\n\n for requirement in self.definition.get_resource_requirements(\n (cur_node_handle, asset_layer)\n ):\n # If requirement is a root input manager requirement, but the corresponding node has an upstream output, then ignore the requirement.\n if (\n isinstance(requirement, InputManagerRequirement)\n and outer_container.dependency_structure.has_deps(\n NodeInput(self, self.definition.input_def_named(requirement.input_name))\n )\n and requirement.root_input\n ):\n continue\n yield requirement\n for hook_def in self.hook_defs:\n yield from hook_def.get_resource_requirements(self.describe_node())\n\n def describe_node(self) -> str:\n return f"op '{self.name}'"\n\n\n@whitelist_for_serdes(storage_name="SolidHandle")\nclass NodeHandle(NamedTuple("_NodeHandle", [("name", str), ("parent", Optional["NodeHandle"])])):\n """A structured object to identify nodes in the potentially recursive graph structure."""\n\n def __new__(cls, name: str, parent: Optional["NodeHandle"]):\n return super(NodeHandle, cls).__new__(\n cls,\n check.str_param(name, "name"),\n check.opt_inst_param(parent, "parent", NodeHandle),\n )\n\n def __str__(self):\n return self.to_string()\n\n @property\n def root(self):\n if self.parent:\n return self.parent.root\n else:\n return self\n\n @property\n def path(self) -> Sequence[str]:\n """Return a list representation of the handle.\n\n Inverse of NodeHandle.from_path.\n\n Returns:\n List[str]:\n """\n path: List[str] = []\n cur = self\n while cur:\n path.append(cur.name)\n cur = cur.parent\n path.reverse()\n return path\n\n def to_string(self) -> str:\n """Return a unique string representation of the handle.\n\n Inverse of NodeHandle.from_string.\n """\n return self.parent.to_string() + "." + self.name if self.parent else self.name\n\n def is_or_descends_from(self, handle: "NodeHandle") -> bool:\n """Check if the handle is or descends from another handle.\n\n Args:\n handle (NodeHandle): The handle to check against.\n\n Returns:\n bool:\n """\n check.inst_param(handle, "handle", NodeHandle)\n\n for idx in range(len(handle.path)):\n if idx >= len(self.path):\n return False\n if self.path[idx] != handle.path[idx]:\n return False\n return True\n\n def pop(self, ancestor: "NodeHandle") -> Optional["NodeHandle"]:\n """Return a copy of the handle with some of its ancestors pruned.\n\n Args:\n ancestor (NodeHandle): Handle to an ancestor of the current handle.\n\n Returns:\n NodeHandle:\n\n Example:\n .. code-block:: python\n\n handle = NodeHandle('baz', NodeHandle('bar', NodeHandle('foo', None)))\n ancestor = NodeHandle('bar', NodeHandle('foo', None))\n assert handle.pop(ancestor) == NodeHandle('baz', None)\n """\n check.inst_param(ancestor, "ancestor", NodeHandle)\n check.invariant(\n self.is_or_descends_from(ancestor),\n f"Handle {self.to_string()} does not descend from {ancestor.to_string()}",\n )\n\n return NodeHandle.from_path(self.path[len(ancestor.path) :])\n\n def with_ancestor(self, ancestor: Optional["NodeHandle"]) -> "NodeHandle":\n """Returns a copy of the handle with an ancestor grafted on.\n\n Args:\n ancestor (NodeHandle): Handle to the new ancestor.\n\n Returns:\n NodeHandle:\n\n Example:\n .. code-block:: python\n\n handle = NodeHandle('baz', NodeHandle('bar', NodeHandle('foo', None)))\n ancestor = NodeHandle('quux' None)\n assert handle.with_ancestor(ancestor) == NodeHandle(\n 'baz', NodeHandle('bar', NodeHandle('foo', NodeHandle('quux', None)))\n )\n """\n check.opt_inst_param(ancestor, "ancestor", NodeHandle)\n\n return NodeHandle.from_path([*(ancestor.path if ancestor else []), *self.path])\n\n @staticmethod\n def from_path(path: Sequence[str]) -> "NodeHandle":\n check.sequence_param(path, "path", of_type=str)\n\n cur: Optional["NodeHandle"] = None\n _path = list(path)\n while len(_path) > 0:\n cur = NodeHandle(name=_path.pop(0), parent=cur)\n\n if cur is None:\n check.failed(f"Invalid handle path {path}")\n\n return cur\n\n @staticmethod\n def from_string(handle_str: str) -> "NodeHandle":\n check.str_param(handle_str, "handle_str")\n\n path = handle_str.split(".")\n return NodeHandle.from_path(path)\n\n @classmethod\n def from_dict(cls, dict_repr: Mapping[str, Any]) -> "NodeHandle":\n """This method makes it possible to load a potentially nested NodeHandle after a\n roundtrip through json.loads(json.dumps(NodeHandle._asdict())).\n """\n check.dict_param(dict_repr, "dict_repr", key_type=str)\n check.invariant(\n "name" in dict_repr, "Dict representation of NodeHandle must have a 'name' key"\n )\n check.invariant(\n "parent" in dict_repr, "Dict representation of NodeHandle must have a 'parent' key"\n )\n\n if isinstance(dict_repr["parent"], (list, tuple)):\n parent = NodeHandle.from_dict(\n {\n "name": dict_repr["parent"][0],\n "parent": dict_repr["parent"][1],\n }\n )\n else:\n parent = dict_repr["parent"]\n\n return NodeHandle(name=dict_repr["name"], parent=parent)\n\n\nclass NodeInputHandle(\n NamedTuple("_NodeInputHandle", [("node_handle", NodeHandle), ("input_name", str)])\n):\n """A structured object to uniquely identify inputs in the potentially recursive graph structure."""\n\n\nclass NodeOutputHandle(\n NamedTuple("_NodeOutputHandle", [("node_handle", NodeHandle), ("output_name", str)])\n):\n """A structured object to uniquely identify outputs in the potentially recursive graph structure."""\n\n\nclass NodeInput(NamedTuple("_NodeInput", [("node", Node), ("input_def", InputDefinition)])):\n def __new__(cls, node: Node, input_def: InputDefinition):\n return super(NodeInput, cls).__new__(\n cls,\n check.inst_param(node, "node", Node),\n check.inst_param(input_def, "input_def", InputDefinition),\n )\n\n def _inner_str(self) -> str:\n return struct_to_string(\n "NodeInput",\n node_name=self.node.name,\n input_name=self.input_def.name,\n )\n\n def __str__(self):\n return self._inner_str()\n\n def __repr__(self):\n return self._inner_str()\n\n def __hash__(self):\n return hash((self.node.name, self.input_def.name))\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, NodeInput)\n and self.node.name == other.node.name\n and self.input_def.name == other.input_def.name\n )\n\n @property\n def node_name(self) -> str:\n return self.node.name\n\n @property\n def input_name(self) -> str:\n return self.input_def.name\n\n\nclass NodeOutput(NamedTuple("_NodeOutput", [("node", Node), ("output_def", OutputDefinition)])):\n def __new__(cls, node: Node, output_def: OutputDefinition):\n return super(NodeOutput, cls).__new__(\n cls,\n check.inst_param(node, "node", Node),\n check.inst_param(output_def, "output_def", OutputDefinition),\n )\n\n def _inner_str(self) -> str:\n return struct_to_string(\n "NodeOutput",\n node_name=self.node.name,\n output_name=self.output_def.name,\n )\n\n def __str__(self):\n return self._inner_str()\n\n def __repr__(self):\n return self._inner_str()\n\n def __hash__(self) -> int:\n return hash((self.node.name, self.output_def.name))\n\n def __eq__(self, other: Any) -> bool:\n return self.node.name == other.node.name and self.output_def.name == other.output_def.name\n\n def describe(self) -> str:\n return f"{self.node_name}:{self.output_def.name}"\n\n @property\n def node_name(self) -> str:\n return self.node.name\n\n @property\n def is_dynamic(self) -> bool:\n return self.output_def.is_dynamic\n\n @property\n def output_name(self) -> str:\n return self.output_def.name\n\n\nclass DependencyType(Enum):\n DIRECT = "DIRECT"\n FAN_IN = "FAN_IN"\n DYNAMIC_COLLECT = "DYNAMIC_COLLECT"\n\n\nclass IDependencyDefinition(ABC):\n @abstractmethod\n def get_node_dependencies(self) -> Sequence["DependencyDefinition"]:\n pass\n\n @abstractmethod\n def is_fan_in(self) -> bool:\n """The result passed to the corresponding input will be a List made from different node outputs."""\n\n\n
[docs]class DependencyDefinition(\n NamedTuple(\n "_DependencyDefinition", [("node", str), ("output", str), ("description", Optional[str])]\n ),\n IDependencyDefinition,\n):\n """Represents an edge in the DAG of nodes (ops or graphs) forming a job.\n\n This object is used at the leaves of a dictionary structure that represents the complete\n dependency structure of a job whose keys represent the dependent node and dependent\n input, so this object only contains information about the dependee.\n\n Concretely, if the input named 'input' of op_b depends on the output named 'result' of\n op_a, and the output named 'other_result' of graph_a, the structure will look as follows:\n\n .. code-block:: python\n\n dependency_structure = {\n 'my_downstream_op': {\n 'input': DependencyDefinition('my_upstream_op', 'result')\n }\n 'my_downstream_op': {\n 'input': DependencyDefinition('my_upstream_graph', 'result')\n }\n }\n\n In general, users should prefer not to construct this class directly or use the\n :py:class:`JobDefinition` API that requires instances of this class. Instead, use the\n :py:func:`@job <job>` API:\n\n .. code-block:: python\n\n @job\n def the_job():\n node_b(node_a())\n\n\n Args:\n node (str): The name of the node (op or graph) that is depended on, that is, from which the value\n passed between the two nodes originates.\n output (Optional[str]): The name of the output that is depended on. (default: "result")\n description (Optional[str]): Human-readable description of this dependency.\n """\n\n def __new__(\n cls,\n node: str,\n output: str = DEFAULT_OUTPUT,\n description: Optional[str] = None,\n ):\n return super(DependencyDefinition, cls).__new__(\n cls,\n check.str_param(node, "node"),\n check.str_param(output, "output"),\n check.opt_str_param(description, "description"),\n )\n\n def get_node_dependencies(self) -> Sequence["DependencyDefinition"]:\n return [self]\n\n
[docs] @public\n def is_fan_in(self) -> bool:\n """Return True if the dependency is fan-in (always False for DependencyDefinition)."""\n return False
\n\n def get_op_dependencies(self) -> Sequence["DependencyDefinition"]:\n return [self]
\n\n\n
[docs]class MultiDependencyDefinition(\n NamedTuple(\n "_MultiDependencyDefinition",\n [\n (\n "dependencies",\n PublicAttr[Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]]],\n )\n ],\n ),\n IDependencyDefinition,\n):\n """Represents a fan-in edge in the DAG of op instances forming a job.\n\n This object is used only when an input of type ``List[T]`` is assembled by fanning-in multiple\n upstream outputs of type ``T``.\n\n This object is used at the leaves of a dictionary structure that represents the complete\n dependency structure of a job whose keys represent the dependent ops or graphs and dependent\n input, so this object only contains information about the dependee.\n\n Concretely, if the input named 'input' of op_c depends on the outputs named 'result' of\n op_a and op_b, this structure will look as follows:\n\n .. code-block:: python\n\n dependency_structure = {\n 'op_c': {\n 'input': MultiDependencyDefinition(\n [\n DependencyDefinition('op_a', 'result'),\n DependencyDefinition('op_b', 'result')\n ]\n )\n }\n }\n\n In general, users should prefer not to construct this class directly or use the\n :py:class:`JobDefinition` API that requires instances of this class. Instead, use the\n :py:func:`@job <job>` API:\n\n .. code-block:: python\n\n @job\n def the_job():\n op_c(op_a(), op_b())\n\n Args:\n dependencies (List[Union[DependencyDefinition, Type[MappedInputPlaceHolder]]]): List of\n upstream dependencies fanned in to this input.\n """\n\n def __new__(\n cls,\n dependencies: Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]],\n ):\n from .composition import MappedInputPlaceholder\n\n deps = check.sequence_param(dependencies, "dependencies")\n seen = {}\n for dep in deps:\n if isinstance(dep, DependencyDefinition):\n key = dep.node + ":" + dep.output\n if key in seen:\n raise DagsterInvalidDefinitionError(\n f'Duplicate dependencies on node "{dep.node}" output "{dep.output}" '\n "used in the same MultiDependencyDefinition."\n )\n seen[key] = True\n elif dep is MappedInputPlaceholder:\n pass\n else:\n check.failed(f"Unexpected dependencies entry {dep}")\n\n return super(MultiDependencyDefinition, cls).__new__(cls, deps)\n\n
[docs] @public\n def get_node_dependencies(self) -> Sequence[DependencyDefinition]:\n """Return the list of :py:class:`DependencyDefinition` contained by this object."""\n return [dep for dep in self.dependencies if isinstance(dep, DependencyDefinition)]
\n\n
[docs] @public\n def is_fan_in(self) -> bool:\n """Return `True` if the dependency is fan-in (always True for MultiDependencyDefinition)."""\n return True
\n\n
[docs] @public\n def get_dependencies_and_mappings(\n self,\n ) -> Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]]:\n """Return the combined list of dependencies contained by this object, inculding of :py:class:`DependencyDefinition` and :py:class:`MappedInputPlaceholder` objects."""\n return self.dependencies
\n\n\nclass BlockingAssetChecksDependencyDefinition(\n IDependencyDefinition,\n NamedTuple(\n "_BlockingAssetChecksDependencyDefinition",\n [\n (\n "asset_check_dependencies",\n Sequence[DependencyDefinition],\n ),\n ("other_dependency", Optional[DependencyDefinition]),\n ],\n ),\n):\n """An input that depends on a set of outputs that correspond to upstream asset checks, and also\n optionally depends on a single upstream output that does not correspond to an asset check.\n\n We model this with a different kind of DependencyDefinition than MultiDependencyDefinition,\n because we treat the value that's passed to the input parameter differently: we ignore the asset\n check dependencies and only pass a single value, instead of a fanned-in list.\n """\n\n @public\n def get_node_dependencies(self) -> Sequence[DependencyDefinition]:\n """Return the list of :py:class:`DependencyDefinition` contained by this object."""\n if self.other_dependency:\n return [*self.asset_check_dependencies, self.other_dependency]\n else:\n return self.asset_check_dependencies\n\n @public\n def is_fan_in(self) -> bool:\n return False\n\n @public\n def get_dependencies_and_mappings(\n self,\n ) -> Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]]:\n return self.get_node_dependencies()\n\n\nclass DynamicCollectDependencyDefinition(\n NamedTuple("_DynamicCollectDependencyDefinition", [("node_name", str), ("output_name", str)]),\n IDependencyDefinition,\n):\n def get_node_dependencies(self) -> Sequence[DependencyDefinition]:\n return [DependencyDefinition(self.node_name, self.output_name)]\n\n def is_fan_in(self) -> bool:\n return True\n\n\nDepTypeAndOutputs: TypeAlias = Tuple[\n DependencyType,\n Union[NodeOutput, List[Union[NodeOutput, Type["MappedInputPlaceholder"]]]],\n]\n\nInputToOutputMap: TypeAlias = Dict[NodeInput, DepTypeAndOutputs]\n\n\ndef _create_handle_dict(\n node_dict: Mapping[str, Node],\n dep_dict: DependencyMapping[str],\n) -> InputToOutputMap:\n from .composition import MappedInputPlaceholder\n\n check.mapping_param(node_dict, "node_dict", key_type=str, value_type=Node)\n check.two_dim_mapping_param(dep_dict, "dep_dict", value_type=IDependencyDefinition)\n\n handle_dict: InputToOutputMap = {}\n\n for node_name, input_dict in dep_dict.items():\n from_node = node_dict[node_name]\n for input_name, dep_def in input_dict.items():\n if isinstance(\n dep_def, (MultiDependencyDefinition, BlockingAssetChecksDependencyDefinition)\n ):\n handles: List[Union[NodeOutput, Type[MappedInputPlaceholder]]] = []\n for inner_dep in dep_def.get_dependencies_and_mappings():\n if isinstance(inner_dep, DependencyDefinition):\n handles.append(node_dict[inner_dep.node].get_output(inner_dep.output))\n elif inner_dep is MappedInputPlaceholder:\n handles.append(inner_dep)\n else:\n check.failed(\n f"Unexpected MultiDependencyDefinition dependencies type {inner_dep}"\n )\n\n handle_dict[from_node.get_input(input_name)] = (DependencyType.FAN_IN, handles)\n\n elif isinstance(dep_def, DependencyDefinition):\n handle_dict[from_node.get_input(input_name)] = (\n DependencyType.DIRECT,\n node_dict[dep_def.node].get_output(dep_def.output),\n )\n elif isinstance(dep_def, DynamicCollectDependencyDefinition):\n handle_dict[from_node.get_input(input_name)] = (\n DependencyType.DYNAMIC_COLLECT,\n node_dict[dep_def.node_name].get_output(dep_def.output_name),\n )\n\n else:\n check.failed(f"Unknown dependency type {dep_def}")\n\n return handle_dict\n\n\nclass DependencyStructure:\n @staticmethod\n def from_definitions(\n nodes: Mapping[str, Node], dep_dict: DependencyMapping[str]\n ) -> "DependencyStructure":\n return DependencyStructure(\n list(dep_dict.keys()),\n _create_handle_dict(nodes, dep_dict),\n dep_dict,\n )\n\n _node_input_index: DefaultDict[str, Dict[NodeInput, List[NodeOutput]]]\n _node_output_index: Dict[str, DefaultDict[NodeOutput, List[NodeInput]]]\n _dynamic_fan_out_index: Dict[str, NodeOutput]\n _collect_index: Dict[str, Set[NodeOutput]]\n _deps_by_node_name: DependencyMapping[str]\n\n def __init__(\n self,\n node_names: Sequence[str],\n input_to_output_map: InputToOutputMap,\n deps_by_node_name: DependencyMapping[str],\n ):\n self._node_names = node_names\n self._input_to_output_map = input_to_output_map\n self._deps_by_node_name = deps_by_node_name\n\n # Building up a couple indexes here so that one can look up all the upstream output handles\n # or downstream input handles in O(1). Without this, this can become O(N^2) where N is node\n # count during the GraphQL query in particular\n\n # node_name => input_handle => list[output_handle]\n self._node_input_index = defaultdict(dict)\n\n # node_name => output_handle => list[input_handle]\n self._node_output_index = defaultdict(lambda: defaultdict(list))\n\n # node_name => dynamic output_handle that this node will dupe for\n self._dynamic_fan_out_index = {}\n\n # node_name => set of dynamic output_handle this collects over\n self._collect_index = defaultdict(set)\n\n for node_input, (dep_type, node_output_or_list) in self._input_to_output_map.items():\n if dep_type == DependencyType.FAN_IN:\n node_output_list: List[NodeOutput] = []\n for node_output in node_output_or_list:\n if not isinstance(node_output, NodeOutput):\n continue\n\n if node_output.is_dynamic:\n raise DagsterInvalidDefinitionError(\n "Currently, items in a fan-in dependency cannot be downstream of"\n " dynamic outputs. Problematic dependency on dynamic output"\n f' "{node_output.describe()}".'\n )\n if self._dynamic_fan_out_index.get(node_output.node_name):\n raise DagsterInvalidDefinitionError(\n "Currently, items in a fan-in dependency cannot be downstream of"\n " dynamic outputs. Problematic dependency on output"\n f' "{node_output.describe()}", downstream of'\n f' "{self._dynamic_fan_out_index[node_output.node_name].describe()}".'\n )\n\n node_output_list.append(node_output)\n elif dep_type == DependencyType.DIRECT:\n node_output = cast(NodeOutput, node_output_or_list)\n\n if node_output.is_dynamic:\n self._validate_and_set_fan_out(node_input, node_output)\n\n if self._dynamic_fan_out_index.get(node_output.node_name):\n self._validate_and_set_fan_out(\n node_input, self._dynamic_fan_out_index[node_output.node_name]\n )\n\n node_output_list = [node_output]\n elif dep_type == DependencyType.DYNAMIC_COLLECT:\n node_output = cast(NodeOutput, node_output_or_list)\n\n if node_output.is_dynamic:\n self._validate_and_set_collect(node_input, node_output)\n\n elif self._dynamic_fan_out_index.get(node_output.node_name):\n self._validate_and_set_collect(\n node_input,\n self._dynamic_fan_out_index[node_output.node_name],\n )\n else:\n check.failed(\n f"Unexpected dynamic fan in dep created {node_output} -> {node_input}"\n )\n\n node_output_list = [node_output]\n else:\n check.failed(f"Unexpected dep type {dep_type}")\n\n self._node_input_index[node_input.node.name][node_input] = node_output_list\n for node_output in node_output_list:\n self._node_output_index[node_output.node.name][node_output].append(node_input)\n\n def _validate_and_set_fan_out(self, node_input: NodeInput, node_output: NodeOutput) -> None:\n """Helper function for populating _dynamic_fan_out_index."""\n if not node_input.node.definition.input_supports_dynamic_output_dep(node_input.input_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be downstream of dynamic output"\n f' "{node_output.describe()}" since input "{node_input.input_name}" maps to a'\n " node that is already downstream of another dynamic output. Nodes cannot be"\n " downstream of more than one dynamic output"\n )\n\n if self._collect_index.get(node_input.node_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be both downstream of dynamic output "\n f"{node_output.describe()} and collect over dynamic output "\n f"{next(iter(self._collect_index[node_input.node_name])).describe()}."\n )\n\n if self._dynamic_fan_out_index.get(node_input.node_name) is None:\n self._dynamic_fan_out_index[node_input.node_name] = node_output\n return\n\n if self._dynamic_fan_out_index[node_input.node_name] != node_output:\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be downstream of more than one dynamic"\n f' output. It is downstream of both "{node_output.describe()}" and'\n f' "{self._dynamic_fan_out_index[node_input.node_name].describe()}"'\n )\n\n def _validate_and_set_collect(\n self,\n node_input: NodeInput,\n node_output: NodeOutput,\n ) -> None:\n if self._dynamic_fan_out_index.get(node_input.node_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot both collect over dynamic output "\n f"{node_output.describe()} and be downstream of the dynamic output "\n f"{self._dynamic_fan_out_index[node_input.node_name].describe()}."\n )\n\n self._collect_index[node_input.node_name].add(node_output)\n\n # if the output is already fanned out\n if self._dynamic_fan_out_index.get(node_output.node_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be downstream of more than one dynamic"\n f' output. It is downstream of both "{node_output.describe()}" and'\n f' "{self._dynamic_fan_out_index[node_output.node_name].describe()}"'\n )\n\n def all_upstream_outputs_from_node(self, node_name: str) -> Sequence[NodeOutput]:\n check.str_param(node_name, "node_name")\n\n # flatten out all outputs that feed into the inputs of this node\n return [\n output_handle\n for output_handle_list in self._node_input_index[node_name].values()\n for output_handle in output_handle_list\n ]\n\n def input_to_upstream_outputs_for_node(\n self, node_name: str\n ) -> Mapping[NodeInput, Sequence[NodeOutput]]:\n """Returns a Dict[NodeInput, List[NodeOutput]] that encodes\n where all the the inputs are sourced from upstream. Usually the\n List[NodeOutput] will be a list of one, except for the\n multi-dependency case.\n """\n check.str_param(node_name, "node_name")\n return self._node_input_index[node_name]\n\n def output_to_downstream_inputs_for_node(\n self, node_name: str\n ) -> Mapping[NodeOutput, Sequence[NodeInput]]:\n """Returns a Dict[NodeOutput, List[NodeInput]] that\n represents all the downstream inputs for each output in the\n dictionary.\n """\n check.str_param(node_name, "node_name")\n return self._node_output_index[node_name]\n\n def has_direct_dep(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n if node_input not in self._input_to_output_map:\n return False\n dep_type, _ = self._input_to_output_map[node_input]\n return dep_type == DependencyType.DIRECT\n\n def get_direct_dep(self, node_input: NodeInput) -> NodeOutput:\n check.inst_param(node_input, "node_input", NodeInput)\n dep_type, dep = self._input_to_output_map[node_input]\n check.invariant(\n dep_type == DependencyType.DIRECT,\n f"Cannot call get_direct_dep when dep is not singular, got {dep_type}",\n )\n return cast(NodeOutput, dep)\n\n def get_dependency_definition(self, node_input: NodeInput) -> Optional[IDependencyDefinition]:\n return self._deps_by_node_name[node_input.node_name].get(node_input.input_name)\n\n def has_fan_in_deps(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n if node_input not in self._input_to_output_map:\n return False\n dep_type, _ = self._input_to_output_map[node_input]\n return dep_type == DependencyType.FAN_IN\n\n def get_fan_in_deps(\n self, node_input: NodeInput\n ) -> Sequence[Union[NodeOutput, Type["MappedInputPlaceholder"]]]:\n check.inst_param(node_input, "node_input", NodeInput)\n dep_type, deps = self._input_to_output_map[node_input]\n check.invariant(\n dep_type == DependencyType.FAN_IN,\n f"Cannot call get_multi_dep when dep is not fan in, got {dep_type}",\n )\n return cast(List[Union[NodeOutput, Type["MappedInputPlaceholder"]]], deps)\n\n def has_dynamic_fan_in_dep(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n if node_input not in self._input_to_output_map:\n return False\n dep_type, _ = self._input_to_output_map[node_input]\n return dep_type == DependencyType.DYNAMIC_COLLECT\n\n def get_dynamic_fan_in_dep(self, node_input: NodeInput) -> NodeOutput:\n check.inst_param(node_input, "node_input", NodeInput)\n dep_type, dep = self._input_to_output_map[node_input]\n check.invariant(\n dep_type == DependencyType.DYNAMIC_COLLECT,\n f"Cannot call get_dynamic_fan_in_dep when dep is not, got {dep_type}",\n )\n return cast(NodeOutput, dep)\n\n def has_deps(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n return node_input in self._input_to_output_map\n\n def get_deps_list(self, node_input: NodeInput) -> Sequence[NodeOutput]:\n check.inst_param(node_input, "node_input", NodeInput)\n check.invariant(self.has_deps(node_input))\n dep_type, handle_or_list = self._input_to_output_map[node_input]\n if dep_type == DependencyType.DIRECT:\n return [cast(NodeOutput, handle_or_list)]\n elif dep_type == DependencyType.DYNAMIC_COLLECT:\n return [cast(NodeOutput, handle_or_list)]\n elif dep_type == DependencyType.FAN_IN:\n return [handle for handle in handle_or_list if isinstance(handle, NodeOutput)]\n else:\n check.failed(f"Unexpected dep type {dep_type}")\n\n def inputs(self) -> Sequence[NodeInput]:\n return list(self._input_to_output_map.keys())\n\n def get_upstream_dynamic_output_for_node(self, node_name: str) -> Optional[NodeOutput]:\n return self._dynamic_fan_out_index.get(node_name)\n\n def get_dependency_type(self, node_input: NodeInput) -> Optional[DependencyType]:\n result = self._input_to_output_map.get(node_input)\n if result is None:\n return None\n dep_type, _ = result\n return dep_type\n\n def is_dynamic_mapped(self, node_name: str) -> bool:\n return node_name in self._dynamic_fan_out_index\n\n def has_dynamic_downstreams(self, node_name: str) -> bool:\n for node_output in self._dynamic_fan_out_index.values():\n if node_output.node_name == node_name:\n return True\n\n return False\n
", "current_page_name": "_modules/dagster/_core/definitions/dependency", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.dependency"}, "events": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.events

\nimport re\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Generic,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    TypeVar,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import PublicAttr, deprecated, experimental_param, public\nfrom dagster._core.definitions.data_version import DATA_VERSION_TAG, DataVersion\nfrom dagster._core.storage.tags import MULTIDIMENSIONAL_PARTITION_PREFIX, SYSTEM_TAG_PREFIX\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._serdes.serdes import NamedTupleSerializer\n\nfrom .metadata import (\n    MetadataFieldSerializer,\n    MetadataMapping,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom .utils import DEFAULT_OUTPUT, check_valid_name\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.assets import AssetsDefinition\n    from dagster._core.definitions.source_asset import SourceAsset\n    from dagster._core.execution.context.output import OutputContext\n\n\nASSET_KEY_SPLIT_REGEX = re.compile("[^a-zA-Z0-9_]")\nASSET_KEY_DELIMITER = "/"\n\n\ndef parse_asset_key_string(s: str) -> Sequence[str]:\n    return list(filter(lambda x: x, re.split(ASSET_KEY_SPLIT_REGEX, s)))\n\n\n
[docs]@whitelist_for_serdes\nclass AssetKey(NamedTuple("_AssetKey", [("path", PublicAttr[Sequence[str]])])):\n """Object representing the structure of an asset key. Takes in a sanitized string, list of\n strings, or tuple of strings.\n\n Example usage:\n\n .. code-block:: python\n\n from dagster import op\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey('flat_asset_key'),\n metadata={"text_metadata": "Text-based metadata for this event"},\n )\n\n @op\n def structured_asset_key(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey(['parent', 'child', 'grandchild']),\n metadata={"text_metadata": "Text-based metadata for this event"},\n )\n\n @op\n def structured_asset_key_2(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey(('parent', 'child', 'grandchild')),\n metadata={"text_metadata": "Text-based metadata for this event"},\n )\n\n Args:\n path (Sequence[str]): String, list of strings, or tuple of strings. A list of strings\n represent the hierarchical structure of the asset_key.\n """\n\n def __new__(cls, path: Sequence[str]):\n if isinstance(path, str):\n path = [path]\n else:\n path = list(check.sequence_param(path, "path", of_type=str))\n\n return super(AssetKey, cls).__new__(cls, path=path)\n\n def __str__(self):\n return f"AssetKey({self.path})"\n\n def __repr__(self):\n return f"AssetKey({self.path})"\n\n def __hash__(self):\n return hash(tuple(self.path))\n\n def __eq__(self, other):\n if not isinstance(other, AssetKey):\n return False\n if len(self.path) != len(other.path):\n return False\n for i in range(0, len(self.path)):\n if self.path[i] != other.path[i]:\n return False\n return True\n\n def to_string(self) -> str:\n """E.g. '["first_component", "second_component"]'."""\n return seven.json.dumps(self.path)\n\n def to_user_string(self) -> str:\n """E.g. "first_component/second_component"."""\n return ASSET_KEY_DELIMITER.join(self.path)\n\n def to_python_identifier(self, suffix: Optional[str] = None) -> str:\n """Build a valid Python identifier based on the asset key that can be used for\n operation names or I/O manager keys.\n """\n path = list(self.path)\n\n if suffix is not None:\n path.append(suffix)\n\n return "__".join(path).replace("-", "_")\n\n @staticmethod\n def from_user_string(asset_key_string: str) -> "AssetKey":\n return AssetKey(asset_key_string.split(ASSET_KEY_DELIMITER))\n\n @staticmethod\n def from_db_string(asset_key_string: Optional[str]) -> Optional["AssetKey"]:\n if not asset_key_string:\n return None\n if asset_key_string[0] == "[":\n # is a json string\n try:\n path = seven.json.loads(asset_key_string)\n except seven.JSONDecodeError:\n path = parse_asset_key_string(asset_key_string)\n else:\n path = parse_asset_key_string(asset_key_string)\n return AssetKey(path)\n\n @staticmethod\n def get_db_prefix(path: Sequence[str]):\n check.sequence_param(path, "path", of_type=str)\n return seven.json.dumps(path)[:-2] # strip trailing '"]' from json string\n\n @staticmethod\n def from_graphql_input(graphql_input_asset_key: Mapping[str, Sequence[str]]) -> "AssetKey":\n return AssetKey(graphql_input_asset_key["path"])\n\n def to_graphql_input(self) -> Mapping[str, Sequence[str]]:\n return {"path": self.path}\n\n @staticmethod\n def from_coercible(arg: "CoercibleToAssetKey") -> "AssetKey":\n if isinstance(arg, AssetKey):\n return check.inst_param(arg, "arg", AssetKey)\n elif isinstance(arg, str):\n return AssetKey([arg])\n elif isinstance(arg, list):\n check.list_param(arg, "arg", of_type=str)\n return AssetKey(arg)\n elif isinstance(arg, tuple):\n check.tuple_param(arg, "arg", of_type=str)\n return AssetKey(arg)\n else:\n check.failed(f"Unexpected type for AssetKey: {type(arg)}")\n\n @staticmethod\n def from_coercible_or_definition(\n arg: Union["CoercibleToAssetKey", "AssetsDefinition", "SourceAsset"]\n ) -> "AssetKey":\n from dagster._core.definitions.assets import AssetsDefinition\n from dagster._core.definitions.source_asset import SourceAsset\n\n if isinstance(arg, AssetsDefinition):\n return arg.key\n elif isinstance(arg, SourceAsset):\n return arg.key\n else:\n return AssetKey.from_coercible(arg)\n\n # @staticmethod\n # def from_coercible_to_asset_dep(arg: "CoercibleToAssetDep") -> "AssetKey":\n # from dagster._core.definitions.asset_dep import AssetDep\n # from dagster._core.definitions.asset_spec import AssetSpec\n # from dagster._core.definitions.assets import AssetsDefinition\n # from dagster._core.definitions.source_asset import SourceAsset\n\n # if isinstance(arg, AssetsDefinition):\n # if len(arg.keys) > 1:\n # # Only AssetsDefinition with a single asset can be passed\n # raise DagsterInvalidDefinitionError(\n # "Cannot pass a multi_asset AssetsDefinition as an argument to deps."\n # " Instead, specify dependencies on the assets created by the multi_asset"\n # f" via AssetKeys or strings. For the multi_asset {arg.node_def.name}, the"\n # f" available keys are: {arg.keys}."\n # )\n # return arg.key\n # elif isinstance(arg, SourceAsset):\n # return arg.key\n # elif isinstance(arg, AssetDep):\n # return arg.asset_key\n # elif isinstance(arg, AssetSpec):\n # return arg.asset_key\n # else:\n # return AssetKey.from_coercible(arg)\n\n def has_prefix(self, prefix: Sequence[str]) -> bool:\n return len(self.path) >= len(prefix) and self.path[: len(prefix)] == prefix\n\n def with_prefix(self, prefix: "CoercibleToAssetKeyPrefix") -> "AssetKey":\n prefix = key_prefix_from_coercible(prefix)\n return AssetKey(list(prefix) + list(self.path))
\n\n\nclass AssetKeyPartitionKey(NamedTuple):\n """An AssetKey with an (optional) partition key. Refers either to a non-partitioned asset or a\n partition of a partitioned asset.\n """\n\n asset_key: AssetKey\n partition_key: Optional[str] = None\n\n\nCoercibleToAssetKey = Union[AssetKey, str, Sequence[str]]\nCoercibleToAssetKeyPrefix = Union[str, Sequence[str]]\n\n\ndef check_opt_coercible_to_asset_key_prefix_param(\n prefix: Optional[CoercibleToAssetKeyPrefix], param_name: str\n) -> Optional[Sequence[str]]:\n try:\n return key_prefix_from_coercible(prefix) if prefix is not None else None\n except check.CheckError:\n raise check.ParameterCheckError(\n f'Param "{param_name}" is not a string or a sequence of strings'\n )\n\n\ndef key_prefix_from_coercible(key_prefix: CoercibleToAssetKeyPrefix) -> Sequence[str]:\n if isinstance(key_prefix, str):\n return [key_prefix]\n elif isinstance(key_prefix, list):\n return key_prefix\n else:\n check.failed(f"Unexpected type for key_prefix: {type(key_prefix)}")\n\n\nDynamicAssetKey = Callable[["OutputContext"], Optional[AssetKey]]\n\n\n@whitelist_for_serdes\nclass AssetLineageInfo(\n NamedTuple("_AssetLineageInfo", [("asset_key", AssetKey), ("partitions", AbstractSet[str])])\n):\n def __new__(cls, asset_key: AssetKey, partitions: Optional[AbstractSet[str]] = None):\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n partitions = check.opt_set_param(partitions, "partitions", str)\n return super(AssetLineageInfo, cls).__new__(cls, asset_key=asset_key, partitions=partitions)\n\n\nT = TypeVar("T")\n\n\n
[docs]@experimental_param(param="data_version")\nclass Output(Generic[T]):\n """Event corresponding to one of a op's outputs.\n\n Op compute functions must explicitly yield events of this type when they have more than\n one output, or when they also yield events of other types, or when defining a op using the\n :py:class:`OpDefinition` API directly.\n\n Outputs are values produced by ops that will be consumed by downstream ops in a job.\n They are type-checked at op boundaries when their corresponding :py:class:`Out`\n or the downstream :py:class:`In` is typed.\n\n Args:\n value (Any): The value returned by the compute function.\n output_name (Optional[str]): Name of the corresponding out. (default:\n "result")\n metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n data_version (Optional[DataVersion]): (Experimental) A data version to manually set\n for the asset.\n """\n\n def __init__(\n self,\n value: T,\n output_name: Optional[str] = DEFAULT_OUTPUT,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n data_version: Optional[DataVersion] = None,\n ):\n self._value = value\n self._output_name = check.str_param(output_name, "output_name")\n self._data_version = check.opt_inst_param(data_version, "data_version", DataVersion)\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n @property\n def metadata(self) -> MetadataMapping:\n return self._metadata\n\n @public\n @property\n def value(self) -> Any:\n """Any: The value returned by the compute function."""\n return self._value\n\n @public\n @property\n def output_name(self) -> str:\n """str: Name of the corresponding :py:class:`Out`."""\n return self._output_name\n\n @public\n @property\n def data_version(self) -> Optional[DataVersion]:\n """Optional[DataVersion]: A data version that was manually set on the `Output`."""\n return self._data_version\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, Output)\n and self.value == other.value\n and self.output_name == other.output_name\n and self.metadata == other.metadata\n )
\n\n\n
[docs]class DynamicOutput(Generic[T]):\n """Variant of :py:class:`Output <dagster.Output>` used to support\n dynamic mapping & collect. Each ``DynamicOutput`` produced by an op represents\n one item in a set that can be processed individually with ``map`` or gathered\n with ``collect``.\n\n Each ``DynamicOutput`` must have a unique ``mapping_key`` to distinguish it with it's set.\n\n Args:\n value (Any):\n The value returned by the compute function.\n mapping_key (str):\n The key that uniquely identifies this dynamic value relative to its peers.\n This key will be used to identify the downstream ops when mapped, ie\n ``mapped_op[example_mapping_key]``\n output_name (Optional[str]):\n Name of the corresponding :py:class:`DynamicOut` defined on the op.\n (default: "result")\n metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __init__(\n self,\n value: T,\n mapping_key: str,\n output_name: Optional[str] = DEFAULT_OUTPUT,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n ):\n self._mapping_key = check_valid_name(check.str_param(mapping_key, "mapping_key"))\n self._output_name = check.str_param(output_name, "output_name")\n self._value = value\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n @property\n def metadata(self) -> Mapping[str, MetadataValue]:\n return self._metadata\n\n @public\n @property\n def mapping_key(self) -> str:\n """The mapping_key that was set for this DynamicOutput at instantiation."""\n return self._mapping_key\n\n @public\n @property\n def value(self) -> T:\n """The value that is returned by the compute function for this DynamicOut."""\n return self._value\n\n @public\n @property\n def output_name(self) -> str:\n """Name of the :py:class:`DynamicOut` defined on the op that this DynamicOut is associated with."""\n return self._output_name\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, DynamicOutput)\n and self.value == other.value\n and self.output_name == other.output_name\n and self.mapping_key == other.mapping_key\n and self.metadata == other.metadata\n )
\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass AssetObservation(\n NamedTuple(\n "_AssetObservation",\n [\n ("asset_key", PublicAttr[AssetKey]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ("partition", PublicAttr[Optional[str]]),\n ("tags", PublicAttr[Mapping[str, str]]),\n ],\n )\n):\n """Event that captures metadata about an asset at a point in time.\n\n Args:\n asset_key (Union[str, List[str], AssetKey]): A key to identify the asset.\n partition (Optional[str]): The name of a partition of the asset that the metadata\n corresponds to.\n tags (Optional[Mapping[str, str]]): A mapping containing system-populated tags for the\n observation. Users should not pass values into this argument.\n metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):\n Arbitrary metadata about the asset. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n asset_key: CoercibleToAssetKey,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n partition: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n ):\n if isinstance(asset_key, AssetKey):\n check.inst_param(asset_key, "asset_key", AssetKey)\n elif isinstance(asset_key, str):\n asset_key = AssetKey(parse_asset_key_string(asset_key))\n else:\n check.sequence_param(asset_key, "asset_key", of_type=str)\n asset_key = AssetKey(asset_key)\n\n tags = check.opt_mapping_param(tags, "tags", key_type=str, value_type=str)\n if any([not tag.startswith(SYSTEM_TAG_PREFIX) for tag in tags or {}]):\n check.failed(\n "Users should not pass values into the tags argument for AssetMaterializations. "\n "The tags argument is reserved for system-populated tags."\n )\n\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n return super(AssetObservation, cls).__new__(\n cls,\n asset_key=asset_key,\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n tags=tags,\n partition=check.opt_str_param(partition, "partition"),\n )\n\n @property\n def label(self) -> str:\n return " ".join(self.asset_key.path)\n\n @property\n def data_version(self) -> Optional[str]:\n return self.tags.get(DATA_VERSION_TAG)\n\n\nUNDEFINED_ASSET_KEY_PATH = ["__undefined__"]\n\n\nclass AssetMaterializationSerializer(NamedTupleSerializer):\n # There are old `Materialization` objects in storage. We set the default value for asset key to\n # be `AssetKey(["__undefined__"])` to ensure that we can load these objects, without needing to\n # allow for the construction of new `AssetMaterialization` objects with no defined AssetKey.\n def before_unpack(self, context, unpacked_dict: Any) -> Any:\n # cover both the case where "asset_key" is not present at all and where it is None\n if unpacked_dict.get("asset_key") is None:\n unpacked_dict["asset_key"] = AssetKey(UNDEFINED_ASSET_KEY_PATH)\n return unpacked_dict\n\n\n
[docs]@whitelist_for_serdes(\n old_storage_names={"Materialization"},\n serializer=AssetMaterializationSerializer,\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass AssetMaterialization(\n NamedTuple(\n "_AssetMaterialization",\n [\n ("asset_key", PublicAttr[AssetKey]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ("partition", PublicAttr[Optional[str]]),\n ("tags", Optional[Mapping[str, str]]),\n ],\n )\n):\n """Event indicating that an op has materialized an asset.\n\n Op compute functions may yield events of this type whenever they wish to indicate to the\n Dagster framework (and the end user) that they have produced a materialized value as a\n side effect of computation. Unlike outputs, asset materializations can not be passed to other\n ops, and their persistence is controlled by op logic, rather than by the Dagster\n framework.\n\n Op authors should use these events to organize metadata about the side effects of their\n computations, enabling tooling like the Assets dashboard in the Dagster UI.\n\n Args:\n asset_key (Union[str, List[str], AssetKey]): A key to identify the materialized asset across\n job runs\n description (Optional[str]): A longer human-readable description of the materialized value.\n partition (Optional[str]): The name of the partition\n that was materialized.\n tags (Optional[Mapping[str, str]]): A mapping containing system-populated tags for the\n materialization. Users should not pass values into this argument.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the asset. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n asset_key: CoercibleToAssetKey,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n partition: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n ):\n from dagster._core.definitions.multi_dimensional_partitions import MultiPartitionKey\n\n if isinstance(asset_key, AssetKey):\n check.inst_param(asset_key, "asset_key", AssetKey)\n elif isinstance(asset_key, str):\n asset_key = AssetKey(parse_asset_key_string(asset_key))\n else:\n check.sequence_param(asset_key, "asset_key", of_type=str)\n asset_key = AssetKey(asset_key)\n\n check.opt_mapping_param(tags, "tags", key_type=str, value_type=str)\n invalid_tags = [tag for tag in tags or {} if not tag.startswith(SYSTEM_TAG_PREFIX)]\n if len(invalid_tags) > 0:\n check.failed(\n f"Invalid tags: {tags} Users should not pass values into the tags argument for"\n " AssetMaterializations. The tags argument is reserved for system-populated tags."\n )\n\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n partition = check.opt_str_param(partition, "partition")\n\n if not isinstance(partition, MultiPartitionKey):\n # When event log records are unpacked from storage, cast the partition key as a\n # MultiPartitionKey if multi-dimensional partition tags exist\n multi_dimensional_partitions = {\n dimension[len(MULTIDIMENSIONAL_PARTITION_PREFIX) :]: partition_key\n for dimension, partition_key in (tags or {}).items()\n if dimension.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX)\n }\n if multi_dimensional_partitions:\n partition = MultiPartitionKey(multi_dimensional_partitions)\n\n return super(AssetMaterialization, cls).__new__(\n cls,\n asset_key=asset_key,\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n tags=tags,\n partition=partition,\n )\n\n @property\n def label(self) -> str:\n return " ".join(self.asset_key.path)\n\n
[docs] @public\n @staticmethod\n def file(\n path: str,\n description: Optional[str] = None,\n asset_key: Optional[Union[str, Sequence[str], AssetKey]] = None,\n ) -> "AssetMaterialization":\n """Static constructor for standard materializations corresponding to files on disk.\n\n Args:\n path (str): The path to the file.\n description (Optional[str]): A human-readable description of the materialization.\n """\n if not asset_key:\n asset_key = path\n\n return AssetMaterialization(\n asset_key=cast(Union[str, AssetKey, List[str]], asset_key),\n description=description,\n metadata={"path": MetadataValue.path(path)},\n )
\n\n\n
[docs]@deprecated(\n breaking_version="1.7",\n additional_warn_text="Please use AssetCheckResult and @asset_check instead.",\n)\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass ExpectationResult(\n NamedTuple(\n "_ExpectationResult",\n [\n ("success", PublicAttr[bool]),\n ("label", PublicAttr[Optional[str]]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ],\n )\n):\n """Event corresponding to a data quality test.\n\n Op compute functions may yield events of this type whenever they wish to indicate to the\n Dagster framework (and the end user) that a data quality test has produced a (positive or\n negative) result.\n\n Args:\n success (bool): Whether the expectation passed or not.\n label (Optional[str]): Short display name for expectation. Defaults to "result".\n description (Optional[str]): A longer human-readable description of the expectation.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n success: bool,\n label: Optional[str] = None,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n ):\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n return super(ExpectationResult, cls).__new__(\n cls,\n success=check.bool_param(success, "success"),\n label=check.opt_str_param(label, "label", "result"),\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n )
\n\n\n
[docs]@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\n@whitelist_for_serdes\nclass TypeCheck(\n NamedTuple(\n "_TypeCheck",\n [\n ("success", PublicAttr[bool]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ],\n )\n):\n """Event corresponding to a successful typecheck.\n\n Events of this type should be returned by user-defined type checks when they need to encapsulate\n additional metadata about a type check's success or failure. (i.e., when using\n :py:func:`as_dagster_type`, :py:func:`@usable_as_dagster_type <dagster_type>`, or the underlying\n :py:func:`PythonObjectDagsterType` API.)\n\n Op compute functions should generally avoid yielding events of this type to avoid confusion.\n\n Args:\n success (bool): ``True`` if the type check succeeded, ``False`` otherwise.\n description (Optional[str]): A human-readable description of the type check.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n success: bool,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n ):\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n return super(TypeCheck, cls).__new__(\n cls,\n success=check.bool_param(success, "success"),\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n )
\n\n\n
[docs]class Failure(Exception):\n """Event indicating op failure.\n\n Raise events of this type from within op compute functions or custom type checks in order to\n indicate an unrecoverable failure in user code to the Dagster machinery and return\n structured metadata about the failure.\n\n Args:\n description (Optional[str]): A human-readable description of the failure.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n allow_retries (Optional[bool]):\n Whether this Failure should respect the retry policy or bypass it and immediately fail.\n Defaults to True, respecting the retry policy and allowing retries.\n """\n\n def __init__(\n self,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n allow_retries: Optional[bool] = None,\n ):\n super(Failure, self).__init__(description)\n self.description = check.opt_str_param(description, "description")\n self.metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n self.allow_retries = check.opt_bool_param(allow_retries, "allow_retries", True)
\n\n\n
[docs]class RetryRequested(Exception):\n """An exception to raise from an op to indicate that it should be retried.\n\n Args:\n max_retries (Optional[int]):\n The max number of retries this step should attempt before failing\n seconds_to_wait (Optional[Union[float,int]]):\n Seconds to wait before restarting the step after putting the step in\n to the up_for_retry state\n\n Example:\n .. code-block:: python\n\n @op\n def flakes():\n try:\n flakey_operation()\n except Exception as e:\n raise RetryRequested(max_retries=3) from e\n """\n\n def __init__(\n self, max_retries: Optional[int] = 1, seconds_to_wait: Optional[Union[float, int]] = None\n ):\n super(RetryRequested, self).__init__()\n self.max_retries = check.int_param(max_retries, "max_retries")\n self.seconds_to_wait = check.opt_numeric_param(seconds_to_wait, "seconds_to_wait")
\n\n\nclass ObjectStoreOperationType(Enum):\n SET_OBJECT = "SET_OBJECT"\n GET_OBJECT = "GET_OBJECT"\n RM_OBJECT = "RM_OBJECT"\n CP_OBJECT = "CP_OBJECT"\n\n\nclass ObjectStoreOperation(\n NamedTuple(\n "_ObjectStoreOperation",\n [\n ("op", ObjectStoreOperationType),\n ("key", str),\n ("dest_key", Optional[str]),\n ("obj", Any),\n ("serialization_strategy_name", Optional[str]),\n ("object_store_name", Optional[str]),\n ("value_name", Optional[str]),\n ("version", Optional[str]),\n ("mapping_key", Optional[str]),\n ],\n )\n):\n """This event is used internally by Dagster machinery when values are written to and read from\n an ObjectStore.\n\n Users should not import this class or yield events of this type from user code.\n\n Args:\n op (ObjectStoreOperationType): The type of the operation on the object store.\n key (str): The key of the object on which the operation was performed.\n dest_key (Optional[str]): The destination key, if any, to which the object was copied.\n obj (Any): The object, if any, retrieved by the operation.\n serialization_strategy_name (Optional[str]): The name of the serialization strategy, if any,\n employed by the operation\n object_store_name (Optional[str]): The name of the object store that performed the\n operation.\n value_name (Optional[str]): The name of the input/output\n version (Optional[str]): (Experimental) The version of the stored data.\n mapping_key (Optional[str]): The mapping key when a dynamic output is used.\n """\n\n def __new__(\n cls,\n op: ObjectStoreOperationType,\n key: str,\n dest_key: Optional[str] = None,\n obj: Any = None,\n serialization_strategy_name: Optional[str] = None,\n object_store_name: Optional[str] = None,\n value_name: Optional[str] = None,\n version: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ):\n return super(ObjectStoreOperation, cls).__new__(\n cls,\n op=op,\n key=check.str_param(key, "key"),\n dest_key=check.opt_str_param(dest_key, "dest_key"),\n obj=obj,\n serialization_strategy_name=check.opt_str_param(\n serialization_strategy_name, "serialization_strategy_name"\n ),\n object_store_name=check.opt_str_param(object_store_name, "object_store_name"),\n value_name=check.opt_str_param(value_name, "value_name"),\n version=check.opt_str_param(version, "version"),\n mapping_key=check.opt_str_param(mapping_key, "mapping_key"),\n )\n\n @classmethod\n def serializable(cls, inst, **kwargs):\n return cls(\n **dict(\n {\n "op": inst.op.value,\n "key": inst.key,\n "dest_key": inst.dest_key,\n "obj": None,\n "serialization_strategy_name": inst.serialization_strategy_name,\n "object_store_name": inst.object_store_name,\n "value_name": inst.value_name,\n "version": inst.version,\n },\n **kwargs,\n )\n )\n\n\nclass HookExecutionResult(\n NamedTuple("_HookExecutionResult", [("hook_name", str), ("is_skipped", bool)])\n):\n """This event is used internally to indicate the execution result of a hook, e.g. whether the\n user-defined hook function is skipped.\n\n Args:\n hook_name (str): The name of the hook.\n is_skipped (bool): ``False`` if the hook_fn is executed, ``True`` otheriwse.\n """\n\n def __new__(cls, hook_name: str, is_skipped: Optional[bool] = None):\n return super(HookExecutionResult, cls).__new__(\n cls,\n hook_name=check.str_param(hook_name, "hook_name"),\n is_skipped=cast(bool, check.opt_bool_param(is_skipped, "is_skipped", default=False)),\n )\n\n\nUserEvent = Union[AssetMaterialization, AssetObservation, ExpectationResult]\n
", "current_page_name": "_modules/dagster/_core/definitions/events", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.events"}, "executor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.executor_definition

\nfrom enum import Enum as PyEnum\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Mapping, Optional, Sequence, Union, overload\n\nfrom typing_extensions import Self, TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import Int\nfrom dagster._config import Field, Noneable, Selector, UserConfigSchema\nfrom dagster._core.definitions.configurable import (\n    ConfiguredDefinitionConfigSchema,\n    NamedConfigurableDefinition,\n)\nfrom dagster._core.definitions.job_base import IJob\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.errors import DagsterUnmetExecutorRequirementsError\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._core.execution.tags import get_tag_concurrency_limits_config\n\nfrom .definition_config_schema import (\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.executor.base import Executor\n    from dagster._core.executor.in_process import InProcessExecutor\n    from dagster._core.executor.init import InitExecutorContext\n    from dagster._core.executor.multiprocess import MultiprocessExecutor\n    from dagster._core.instance import DagsterInstance\n\n\nclass ExecutorRequirement(PyEnum):\n    """An ExecutorDefinition can include a list of requirements that the system uses to\n    check whether the executor will be able to work for a particular job execution.\n    """\n\n    # The passed in IJob must be reconstructable across process boundaries\n    RECONSTRUCTABLE_PIPELINE = (  # This needs to still exist for folks who may have written their own executor\n        "RECONSTRUCTABLE_PIPELINE"\n    )\n    RECONSTRUCTABLE_JOB = "RECONSTRUCTABLE_PIPELINE"\n\n    # The DagsterInstance must be loadable in a different process\n    NON_EPHEMERAL_INSTANCE = "NON_EPHEMERAL_INSTANCE"\n\n    # Any op outputs on the job must be persisted\n    PERSISTENT_OUTPUTS = "PERSISTENT_OUTPUTS"\n\n\ndef multiple_process_executor_requirements() -> Sequence[ExecutorRequirement]:\n    return [\n        ExecutorRequirement.RECONSTRUCTABLE_JOB,\n        ExecutorRequirement.NON_EPHEMERAL_INSTANCE,\n        ExecutorRequirement.PERSISTENT_OUTPUTS,\n    ]\n\n\nExecutorConfig = Mapping[str, object]\nExecutorCreationFunction: TypeAlias = Callable[["InitExecutorContext"], "Executor"]\nExecutorRequirementsFunction: TypeAlias = Callable[[ExecutorConfig], Sequence[ExecutorRequirement]]\n\n\n
[docs]class ExecutorDefinition(NamedConfigurableDefinition):\n """An executor is responsible for executing the steps of a job.\n\n Args:\n name (str): The name of the executor.\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data\n available in `init_context.executor_config`. If not set, Dagster will accept any config\n provided.\n requirements (Optional[List[ExecutorRequirement]]): Any requirements that must\n be met in order for the executor to be usable for a particular job execution.\n executor_creation_fn(Optional[Callable]): Should accept an :py:class:`InitExecutorContext`\n and return an instance of :py:class:`Executor`\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by the\n executor.\n description (Optional[str]): A description of the executor.\n """\n\n def __init__(\n self,\n name: str,\n config_schema: Optional[UserConfigSchema] = None,\n requirements: Union[\n ExecutorRequirementsFunction, Optional[Sequence[ExecutorRequirement]]\n ] = None,\n executor_creation_fn: Optional[ExecutorCreationFunction] = None,\n description: Optional[str] = None,\n ):\n self._name = check.str_param(name, "name")\n self._requirements_fn: ExecutorRequirementsFunction\n if callable(requirements):\n self._requirements_fn = requirements\n else:\n requirements_lst = check.opt_list_param(\n requirements, "requirements", of_type=ExecutorRequirement\n )\n self._requirements_fn = lambda _: requirements_lst\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._executor_creation_fn = check.opt_callable_param(\n executor_creation_fn, "executor_creation_fn"\n )\n self._description = check.opt_str_param(description, "description")\n\n @public\n @property\n def name(self) -> str:\n """Name of the executor."""\n return self._name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Description of executor, if provided."""\n return self._description\n\n @property\n def config_schema(self) -> IDefinitionConfigSchema:\n return self._config_schema\n\n def get_requirements(\n self, executor_config: Mapping[str, object]\n ) -> Sequence[ExecutorRequirement]:\n return self._requirements_fn(executor_config)\n\n @public\n @property\n def executor_creation_fn(self) -> Optional[ExecutorCreationFunction]:\n """Callable that takes an :py:class:`InitExecutorContext` and returns an instance of\n :py:class:`Executor`.\n """\n return self._executor_creation_fn\n\n def copy_for_configured(self, name, description, config_schema) -> "ExecutorDefinition":\n return ExecutorDefinition(\n name=name,\n config_schema=config_schema, # type: ignore\n executor_creation_fn=self.executor_creation_fn,\n description=description or self.description,\n requirements=self._requirements_fn,\n )\n\n @staticmethod\n def hardcoded_executor(executor: "Executor"):\n return ExecutorDefinition(\n # Executor name was only relevant in the pipeline/solid/mode world, so we\n # can put a dummy value\n name="__executor__",\n executor_creation_fn=lambda _init_context: executor,\n )\n\n # Backcompat: Overrides configured method to provide name as a keyword argument.\n # If no name is provided, the name is pulled off of this ExecutorDefinition.\n
[docs] @public\n def configured(\n self,\n config_or_config_fn: Any,\n name: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n description: Optional[str] = None,\n ) -> Self:\n """Wraps this object in an object of the same type that provides configuration to the inner\n object.\n\n Using ``configured`` may result in config values being displayed in\n the Dagster UI, so it is not recommended to use this API with sensitive values,\n such as secrets.\n\n Args:\n config_or_config_fn (Union[Any, Callable[[Any], Any]]): Either (1) Run configuration\n that fully satisfies this object's config schema or (2) A function that accepts run\n configuration and returns run configuration that fully satisfies this object's\n config schema. In the latter case, config_schema must be specified. When\n passing a function, it's easiest to use :py:func:`configured`.\n name (Optional[str]): Name of the new definition. If not provided, the emitted\n definition will inherit the name of the `ExecutorDefinition` upon which this\n function is called.\n config_schema (Optional[ConfigSchema]): If config_or_config_fn is a function, the config\n schema that its input must satisfy. If not set, Dagster will accept any config\n provided.\n description (Optional[str]): Description of the new definition. If not specified,\n inherits the description of the definition being configured.\n\n Returns (ConfigurableDefinition): A configured version of this object.\n """\n name = check.opt_str_param(name, "name")\n\n new_config_schema = ConfiguredDefinitionConfigSchema(\n self, convert_user_facing_definition_config_schema(config_schema), config_or_config_fn\n )\n\n return self.copy_for_configured(name or self.name, description, new_config_schema)
\n\n\n@overload\ndef executor(name: ExecutorCreationFunction) -> ExecutorDefinition: ...\n\n\n@overload\ndef executor(\n name: Optional[str] = ...,\n config_schema: Optional[UserConfigSchema] = ...,\n requirements: Optional[\n Union[ExecutorRequirementsFunction, Sequence[ExecutorRequirement]]\n ] = ...,\n) -> "_ExecutorDecoratorCallable": ...\n\n\n
[docs]def executor(\n name: Union[ExecutorCreationFunction, Optional[str]] = None,\n config_schema: Optional[UserConfigSchema] = None,\n requirements: Optional[\n Union[ExecutorRequirementsFunction, Sequence[ExecutorRequirement]]\n ] = None,\n) -> Union[ExecutorDefinition, "_ExecutorDecoratorCallable"]:\n """Define an executor.\n\n The decorated function should accept an :py:class:`InitExecutorContext` and return an instance\n of :py:class:`Executor`.\n\n Args:\n name (Optional[str]): The name of the executor.\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.executor_config`. If not set, Dagster will accept any config provided for.\n requirements (Optional[List[ExecutorRequirement]]): Any requirements that must\n be met in order for the executor to be usable for a particular job execution.\n """\n if callable(name):\n check.invariant(config_schema is None)\n check.invariant(requirements is None)\n return _ExecutorDecoratorCallable()(name)\n\n return _ExecutorDecoratorCallable(\n name=name, config_schema=config_schema, requirements=requirements\n )
\n\n\nclass _ExecutorDecoratorCallable:\n def __init__(self, name=None, config_schema=None, requirements=None):\n self.name = check.opt_str_param(name, "name")\n self.config_schema = config_schema # type check in definition\n self.requirements = requirements\n\n def __call__(self, fn: ExecutorCreationFunction) -> ExecutorDefinition:\n check.callable_param(fn, "fn")\n\n if not self.name:\n self.name = fn.__name__\n\n executor_def = ExecutorDefinition(\n name=self.name,\n config_schema=self.config_schema,\n executor_creation_fn=fn,\n requirements=self.requirements,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(executor_def, wrapped=fn) # type: ignore\n\n return executor_def\n\n\ndef _core_in_process_executor_creation(config: ExecutorConfig) -> "InProcessExecutor":\n from dagster._core.executor.in_process import InProcessExecutor\n\n return InProcessExecutor(\n # shouldn't need to .get() here - issue with defaults in config setup\n retries=RetryMode.from_config(check.dict_elem(config, "retries")), # type: ignore # (possible none)\n marker_to_close=config.get("marker_to_close"), # type: ignore # (should be str)\n )\n\n\nIN_PROC_CONFIG = Field(\n {\n "retries": get_retries_config(),\n "marker_to_close": Field(\n str,\n is_required=False,\n description="[DEPRECATED]",\n ),\n },\n description="Execute all steps in a single process.",\n)\n\n\n
[docs]@executor(\n name="in_process",\n config_schema=IN_PROC_CONFIG,\n)\ndef in_process_executor(init_context):\n """The in-process executor executes all steps in a single process.\n\n To select it, include the following top-level fragment in config:\n\n .. code-block:: yaml\n\n execution:\n in_process:\n\n Execution priority can be configured using the ``dagster/priority`` tag via op metadata,\n where the higher the number the higher the priority. 0 is the default and both positive\n and negative numbers can be used.\n """\n return _core_in_process_executor_creation(init_context.executor_config)
\n\n\n@executor(name="execute_in_process_executor")\ndef execute_in_process_executor(_) -> "InProcessExecutor":\n """Executor used by execute_in_process.\n\n Use of this executor triggers special behavior in the config system that ignores all incoming\n executor config. This is because someone might set executor config on a job, and when we foist\n this executor onto the job for `execute_in_process`, that config becomes nonsensical.\n """\n from dagster._core.executor.in_process import InProcessExecutor\n\n return InProcessExecutor(\n retries=RetryMode.ENABLED,\n marker_to_close=None,\n )\n\n\ndef _core_multiprocess_executor_creation(config: ExecutorConfig) -> "MultiprocessExecutor":\n from dagster._core.executor.multiprocess import MultiprocessExecutor\n\n # unpack optional selector\n start_method = None\n start_cfg: Dict[str, object] = {}\n start_selector = check.opt_dict_elem(config, "start_method")\n if start_selector:\n start_method, start_cfg = next(iter(start_selector.items()))\n\n return MultiprocessExecutor(\n max_concurrent=check.opt_int_elem(config, "max_concurrent"),\n tag_concurrency_limits=check.opt_list_elem(config, "tag_concurrency_limits"),\n retries=RetryMode.from_config(check.dict_elem(config, "retries")), # type: ignore\n start_method=start_method,\n explicit_forkserver_preload=check.opt_list_elem(start_cfg, "preload_modules", of_type=str),\n )\n\n\nMULTI_PROC_CONFIG = Field(\n {\n "max_concurrent": Field(\n Noneable(Int),\n default_value=None,\n description=(\n "The number of processes that may run concurrently. "\n "By default, this is set to be the return value of `multiprocessing.cpu_count()`."\n ),\n ),\n "tag_concurrency_limits": get_tag_concurrency_limits_config(),\n "start_method": Field(\n Selector(\n fields={\n "spawn": Field(\n {},\n description=(\n "Configure the multiprocess executor to start subprocesses "\n "using `spawn`."\n ),\n ),\n "forkserver": Field(\n {\n "preload_modules": Field(\n [str],\n is_required=False,\n description=(\n "Explicitly specify the modules to preload in the forkserver."\n " Otherwise, there are two cases for default values if modules"\n " are not specified. If the Dagster job was loaded from a"\n " module, the same module will be preloaded. If not, the"\n " `dagster` module is preloaded."\n ),\n ),\n },\n description=(\n "Configure the multiprocess executor to start subprocesses "\n "using `forkserver`."\n ),\n ),\n # fork currently unsupported due to threads usage\n }\n ),\n is_required=False,\n description=(\n "Select how subprocesses are created. By default, `spawn` is selected. See "\n "https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods."\n ),\n ),\n "retries": get_retries_config(),\n },\n description="Execute each step in an individual process.",\n)\n\n\n
[docs]@executor(\n name="multiprocess",\n config_schema=MULTI_PROC_CONFIG,\n requirements=multiple_process_executor_requirements(),\n)\ndef multiprocess_executor(init_context):\n """The multiprocess executor executes each step in an individual process.\n\n Any job that does not specify custom executors will use the multiprocess_executor by default.\n To configure the multiprocess executor, include a fragment such as the following in your run\n config:\n\n .. code-block:: yaml\n\n execution:\n config:\n multiprocess:\n max_concurrent: 4\n\n The ``max_concurrent`` arg is optional and tells the execution engine how many processes may run\n concurrently. By default, or if you set ``max_concurrent`` to be None or 0, this is the return value of\n :py:func:`python:multiprocessing.cpu_count`.\n\n Execution priority can be configured using the ``dagster/priority`` tag via op metadata,\n where the higher the number the higher the priority. 0 is the default and both positive\n and negative numbers can be used.\n """\n return _core_multiprocess_executor_creation(init_context.executor_config)
\n\n\ndef check_cross_process_constraints(init_context: "InitExecutorContext") -> None:\n from dagster._core.executor.init import InitExecutorContext\n\n check.inst_param(init_context, "init_context", InitExecutorContext)\n requirements_lst = init_context.executor_def.get_requirements(init_context.executor_config)\n\n if ExecutorRequirement.RECONSTRUCTABLE_JOB in requirements_lst:\n _check_intra_process_job(init_context.job)\n\n if ExecutorRequirement.NON_EPHEMERAL_INSTANCE in requirements_lst:\n _check_non_ephemeral_instance(init_context.instance)\n\n\ndef _check_intra_process_job(job: IJob) -> None:\n if not isinstance(job, ReconstructableJob):\n raise DagsterUnmetExecutorRequirementsError(\n "You have attempted to use an executor that uses multiple processes with the job"\n f' "{job.get_definition().name}" that is not reconstructable. Job must be loaded in a'\n " way that allows dagster to reconstruct them in a new process. This means: \\n *"\n " using the file, module, or workspace.yaml arguments of"\n " dagster-webserver/dagster-graphql/dagster\\n * loading the job through the"\n " reconstructable() function\\n"\n )\n\n\ndef _check_non_ephemeral_instance(instance: "DagsterInstance") -> None:\n if instance.is_ephemeral:\n raise DagsterUnmetExecutorRequirementsError(\n "You have attempted to use an executor that uses multiple processes with an ephemeral"\n " DagsterInstance. A non-ephemeral instance is needed to coordinate execution between"\n " multiple processes. You can configure your default instance via $DAGSTER_HOME or"\n " ensure a valid one is passed when invoking the python APIs. You can learn more about"\n " setting up a persistent DagsterInstance from the DagsterInstance docs here:"\n " https://docs.dagster.io/deployment/dagster-instance#default-local-behavior"\n )\n\n\ndef _get_default_executor_requirements(\n executor_config: ExecutorConfig,\n) -> Sequence[ExecutorRequirement]:\n return multiple_process_executor_requirements() if "multiprocess" in executor_config else []\n\n\n
[docs]@executor(\n name="multi_or_in_process_executor",\n config_schema=Field(\n Selector(\n {"multiprocess": MULTI_PROC_CONFIG, "in_process": IN_PROC_CONFIG},\n ),\n default_value={"multiprocess": {}},\n ),\n requirements=_get_default_executor_requirements,\n)\ndef multi_or_in_process_executor(init_context: "InitExecutorContext") -> "Executor":\n """The default executor for a job.\n\n This is the executor available by default on a :py:class:`JobDefinition`\n that does not provide custom executors. This executor has a multiprocessing-enabled mode, and a\n single-process mode. By default, multiprocessing mode is enabled. Switching between multiprocess\n mode and in-process mode can be achieved via config.\n\n .. code-block:: yaml\n\n execution:\n config:\n multiprocess:\n\n\n execution:\n config:\n in_process:\n\n When using the multiprocess mode, ``max_concurrent`` and ``retries`` can also be configured.\n\n .. code-block:: yaml\n\n execution:\n config:\n multiprocess:\n max_concurrent: 4\n retries:\n enabled:\n\n The ``max_concurrent`` arg is optional and tells the execution engine how many processes may run\n concurrently. By default, or if you set ``max_concurrent`` to be 0, this is the return value of\n :py:func:`python:multiprocessing.cpu_count`.\n\n When using the in_process mode, then only retries can be configured.\n\n Execution priority can be configured using the ``dagster/priority`` tag via op metadata,\n where the higher the number the higher the priority. 0 is the default and both positive\n and negative numbers can be used.\n """\n if "multiprocess" in init_context.executor_config:\n return _core_multiprocess_executor_creation(\n check.dict_elem(init_context.executor_config, "multiprocess")\n )\n else:\n return _core_in_process_executor_creation(\n check.dict_elem(init_context.executor_config, "in_process")\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/executor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.executor_definition"}, "freshness_policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.freshness_policy

\nimport datetime\nfrom typing import AbstractSet, NamedTuple, Optional\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import experimental\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils.schedules import (\n    is_valid_cron_schedule,\n    reverse_cron_string_iterator,\n)\n\nfrom .events import AssetKey\n\n\nclass FreshnessConstraint(NamedTuple):\n    asset_keys: AbstractSet[AssetKey]\n    required_data_time: datetime.datetime\n    required_by_time: datetime.datetime\n\n\nclass FreshnessMinutes(NamedTuple):\n    overdue_minutes: float\n    lag_minutes: float\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass FreshnessPolicy(\n NamedTuple(\n "_FreshnessPolicy",\n [\n ("maximum_lag_minutes", float),\n ("cron_schedule", Optional[str]),\n ("cron_schedule_timezone", Optional[str]),\n ],\n )\n):\n """A FreshnessPolicy specifies how up-to-date you want a given asset to be.\n\n Attaching a FreshnessPolicy to an asset definition encodes an expectation on the upstream data\n that you expect to be incorporated into the current state of that asset at certain points in time.\n How this is calculated differs depending on if the asset is unpartitioned or time-partitioned\n (other partitioning schemes are not supported).\n\n For time-partitioned assets, the current data time for the asset is simple to calculate. The\n upstream data that is incorporated into the asset is exactly the set of materialized partitions\n for that asset. Thus, the current data time for the asset is simply the time up to which all\n partitions have been materialized.\n\n For unpartitioned assets, the current data time is based on the upstream materialization records\n that were read to generate the current state of the asset. More specifically,\n imagine you have two assets, where A depends on B. If `B` has a FreshnessPolicy defined, this\n means that at time T, the most recent materialization of `B` should have come after a\n materialization of `A` which was no more than `maximum_lag_minutes` ago. This calculation is\n recursive: any given asset is expected to incorporate up-to-date data from all of its upstream\n assets.\n\n It is assumed that all asset definitions with no upstream asset definitions consume from some\n always-updating source. That is, if you materialize that asset at time T, it will incorporate\n all data up to time T.\n\n If `cron_schedule` is not defined, the given asset will be expected to incorporate upstream\n data from no more than `maximum_lag_minutes` ago at all points in time. For example, "The events\n table should always have data from at most 1 hour ago".\n\n If `cron_schedule` is defined, the given asset will be expected to incorporate upstream data\n from no more than `maximum_lag_minutes` ago at each cron schedule tick. For example, "By 9AM,\n the signups table should contain all of yesterday's data".\n\n The freshness status of assets with policies defined will be visible in the UI. If you are using\n an asset reconciliation sensor, this sensor will kick off runs to help keep your assets up to\n date with respect to their FreshnessPolicy.\n\n Args:\n maximum_lag_minutes (float): An upper bound for how old the data contained within this\n asset may be.\n cron_schedule (Optional[str]): A cron schedule string (e.g. ``"0 1 * * *"``) specifying a\n series of times by which the `maximum_lag_minutes` constraint must be satisfied. If\n no cron schedule is provided, then this constraint must be satisfied at all times.\n cron_schedule_timezone (Optional[str]): Timezone in which the cron schedule should be evaluated.\n If not specified, defaults to UTC. Supported strings for timezones are the ones provided\n by the `IANA time zone database <https://www.iana.org/time-zones>` - e.g.\n "America/Los_Angeles".\n\n .. code-block:: python\n\n # At any point in time, this asset must incorporate all upstream data from at least 30 minutes ago.\n @asset(freshness_policy=FreshnessPolicy(maximum_lag_minutes=30))\n def fresh_asset():\n ...\n\n # At any point in time, this asset must incorporate all upstream data from at least 30 minutes ago.\n @asset(freshness_policy=FreshnessPolicy(maximum_lag_minutes=30))\n def cron_up_to_date_asset():\n ...\n\n """\n\n def __new__(\n cls,\n *,\n maximum_lag_minutes: float,\n cron_schedule: Optional[str] = None,\n cron_schedule_timezone: Optional[str] = None,\n ):\n if cron_schedule is not None:\n if not is_valid_cron_schedule(cron_schedule):\n raise DagsterInvalidDefinitionError(f"Invalid cron schedule '{cron_schedule}'.")\n check.param_invariant(\n is_valid_cron_schedule(cron_schedule),\n "cron_schedule",\n f"Invalid cron schedule '{cron_schedule}'.",\n )\n if cron_schedule_timezone is not None:\n check.param_invariant(\n cron_schedule is not None,\n "cron_schedule_timezone",\n "Cannot specify cron_schedule_timezone without a cron_schedule.",\n )\n try:\n # Verify that the timezone can be loaded\n pendulum.tz.timezone(cron_schedule_timezone) # type: ignore\n except Exception as e:\n raise DagsterInvalidDefinitionError(\n "Invalid cron schedule timezone '{cron_schedule_timezone}'. "\n ) from e\n return super(FreshnessPolicy, cls).__new__(\n cls,\n maximum_lag_minutes=float(\n check.numeric_param(maximum_lag_minutes, "maximum_lag_minutes")\n ),\n cron_schedule=check.opt_str_param(cron_schedule, "cron_schedule"),\n cron_schedule_timezone=check.opt_str_param(\n cron_schedule_timezone, "cron_schedule_timezone"\n ),\n )\n\n @classmethod\n def _create(cls, *args):\n """Pickle requires a method with positional arguments to construct\n instances of a class. Since the constructor for this class has\n keyword arguments only, we define this method to be used by pickle.\n """\n return cls(maximum_lag_minutes=args[0], cron_schedule=args[1])\n\n def __reduce__(self):\n return (self._create, (self.maximum_lag_minutes, self.cron_schedule))\n\n @property\n def maximum_lag_delta(self) -> datetime.timedelta:\n return datetime.timedelta(minutes=self.maximum_lag_minutes)\n\n def get_evaluation_tick(\n self,\n evaluation_time: datetime.datetime,\n ) -> Optional[datetime.datetime]:\n if self.cron_schedule:\n # most recent cron schedule tick\n schedule_ticks = reverse_cron_string_iterator(\n end_timestamp=evaluation_time.timestamp(),\n cron_string=self.cron_schedule,\n execution_timezone=self.cron_schedule_timezone,\n )\n return next(schedule_ticks)\n else:\n return evaluation_time\n\n def minutes_overdue(\n self,\n data_time: Optional[datetime.datetime],\n evaluation_time: datetime.datetime,\n ) -> Optional[FreshnessMinutes]:\n """Returns a number of minutes past the specified freshness policy that this asset currently\n is. If the asset is missing upstream data, or is not materialized at all, then it is unknown\n how overdue it is, and this will return None.\n\n Args:\n data_time (Optional[datetime]): The timestamp of the data that was used to create the\n current version of this asset.\n evaluation_time (datetime): The time at which we're evaluating the overdueness of this\n asset. Generally, this is the current time.\n """\n if data_time is None:\n return None\n evaluation_tick = self.get_evaluation_tick(evaluation_time)\n if evaluation_tick is None:\n return None\n required_time = evaluation_tick - self.maximum_lag_delta\n\n return FreshnessMinutes(\n lag_minutes=max(0.0, (evaluation_tick - data_time).total_seconds() / 60),\n overdue_minutes=max(0.0, (required_time - data_time).total_seconds() / 60),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/freshness_policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.freshness_policy"}, "freshness_policy_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.freshness_policy_sensor_definition

\nfrom typing import Callable, Dict, Mapping, NamedTuple, Optional, Set, cast\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_selection import AssetSelection\nfrom dagster._core.definitions.data_time import CachingDataTimeResolver\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.scoped_resources_builder import Resources, ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    FreshnessPolicySensorExecutionError,\n    user_code_error_boundary,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._serdes import (\n    serialize_value,\n    whitelist_for_serdes,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._seven import JSONDecodeError\n\nfrom .sensor_definition import (\n    DefaultSensorStatus,\n    SensorDefinition,\n    SensorEvaluationContext,\n    SensorType,\n    SkipReason,\n    get_context_param_name,\n    get_sensor_context_from_args_or_kwargs,\n    validate_and_get_resource_dict,\n)\n\n\n@whitelist_for_serdes\nclass FreshnessPolicySensorCursor(\n    NamedTuple(\n        "_FreshnessPolicySensorCursor",\n        [("minutes_late_by_key_str", Mapping[str, Optional[float]])],\n    )\n):\n    def __new__(cls, minutes_late_by_key_str: Mapping[str, Optional[float]]):\n        return super(FreshnessPolicySensorCursor, cls).__new__(\n            cls,\n            minutes_late_by_key_str=check.mapping_param(\n                minutes_late_by_key_str, "minutes_late_by_key_str", key_type=str\n            ),\n        )\n\n    @staticmethod\n    def is_valid(json_str: str) -> bool:\n        try:\n            deserialize_value(json_str, FreshnessPolicySensorCursor)\n            return True\n        except (JSONDecodeError, DeserializationError):\n            return False\n\n    @staticmethod\n    def from_dict(\n        minutes_late_by_key: Mapping[AssetKey, Optional[float]]\n    ) -> "FreshnessPolicySensorCursor":\n        return FreshnessPolicySensorCursor(\n            minutes_late_by_key_str={k.to_user_string(): v for k, v in minutes_late_by_key.items()}\n        )\n\n    @property\n    def minutes_late_by_key(self) -> Mapping[AssetKey, Optional[float]]:\n        return {AssetKey.from_user_string(k): v for k, v in self.minutes_late_by_key_str.items()}\n\n    def to_json(self) -> str:\n        return serialize_value(cast(NamedTuple, self))\n\n    @staticmethod\n    def from_json(json_str: str) -> "FreshnessPolicySensorCursor":\n        return deserialize_value(json_str, FreshnessPolicySensorCursor)\n\n\n
[docs]class FreshnessPolicySensorContext(\n NamedTuple(\n "_FreshnessPolicySensorContext",\n [\n ("sensor_name", PublicAttr[str]),\n ("asset_key", PublicAttr[AssetKey]),\n ("freshness_policy", PublicAttr[FreshnessPolicy]),\n ("minutes_overdue", PublicAttr[Optional[float]]),\n ("previous_minutes_overdue", PublicAttr[Optional[float]]),\n ("instance", PublicAttr[DagsterInstance]),\n ("resources", Resources),\n ],\n )\n):\n """The ``context`` object available to a decorated function of ``freshness_policy_sensor``.\n\n Attributes:\n sensor_name (str): the name of the sensor.\n asset_key (AssetKey): the key of the asset being monitored\n freshness_policy (FreshnessPolicy): the freshness policy of the asset being monitored\n minutes_overdue (Optional[float])\n previous_minutes_overdue (Optional[float]): the minutes_overdue value for this asset on the\n previous sensor tick.\n instance (DagsterInstance): the current instance.\n """\n\n def __new__(\n cls,\n sensor_name: str,\n asset_key: AssetKey,\n freshness_policy: FreshnessPolicy,\n minutes_overdue: Optional[float],\n previous_minutes_overdue: Optional[float],\n instance: DagsterInstance,\n resources: Optional[Resources] = None,\n ):\n minutes_overdue = check.opt_numeric_param(minutes_overdue, "minutes_overdue")\n previous_minutes_overdue = check.opt_numeric_param(\n previous_minutes_overdue, "previous_minutes_overdue"\n )\n return super(FreshnessPolicySensorContext, cls).__new__(\n cls,\n sensor_name=check.str_param(sensor_name, "sensor_name"),\n asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n freshness_policy=check.inst_param(freshness_policy, "FreshnessPolicy", FreshnessPolicy),\n minutes_overdue=float(minutes_overdue) if minutes_overdue is not None else None,\n previous_minutes_overdue=(\n float(previous_minutes_overdue) if previous_minutes_overdue is not None else None\n ),\n instance=check.inst_param(instance, "instance", DagsterInstance),\n resources=resources or ScopedResourcesBuilder.build_empty(),\n )
\n\n\n
[docs]@experimental\ndef build_freshness_policy_sensor_context(\n sensor_name: str,\n asset_key: AssetKey,\n freshness_policy: FreshnessPolicy,\n minutes_overdue: Optional[float],\n previous_minutes_overdue: Optional[float] = None,\n instance: Optional[DagsterInstance] = None,\n resources: Optional[Resources] = None,\n) -> FreshnessPolicySensorContext:\n """Builds freshness policy sensor context from provided parameters.\n\n This function can be used to provide the context argument when directly invoking a function\n decorated with `@freshness_policy_sensor`, such as when writing unit tests.\n\n Args:\n sensor_name (str): The name of the sensor the context is being constructed for.\n asset_key (AssetKey): The AssetKey for the monitored asset\n freshness_policy (FreshnessPolicy): The FreshnessPolicy for the monitored asset\n minutes_overdue (Optional[float]): How overdue the monitored asset currently is\n previous_minutes_overdue (Optional[float]): How overdue the monitored asset was on the\n previous tick.\n instance (DagsterInstance): The dagster instance configured for the context.\n\n Examples:\n .. code-block:: python\n\n context = build_freshness_policy_sensor_context(\n sensor_name="freshness_policy_sensor_to_invoke",\n asset_key=AssetKey("some_asset"),\n freshness_policy=FreshnessPolicy(maximum_lag_minutes=30)<\n minutes_overdue=10.0,\n )\n freshness_policy_sensor_to_invoke(context)\n """\n return FreshnessPolicySensorContext(\n sensor_name=sensor_name,\n asset_key=asset_key,\n freshness_policy=freshness_policy,\n minutes_overdue=minutes_overdue,\n previous_minutes_overdue=previous_minutes_overdue,\n instance=instance or DagsterInstance.ephemeral(),\n resources=resources,\n )
\n\n\n
[docs]class FreshnessPolicySensorDefinition(SensorDefinition):\n """Define a sensor that reacts to the status of a given set of asset freshness policies,\n where the decorated function will be evaluated on every sensor tick.\n\n Args:\n name (str): The name of the sensor. Defaults to the name of the decorated function.\n freshness_policy_sensor_fn (Callable[[FreshnessPolicySensorContext], None]): The core\n evaluation function for the sensor. Takes a :py:class:`~dagster.FreshnessPolicySensorContext`.\n asset_selection (AssetSelection): The asset selection monitored by the sensor.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n """\n\n def __init__(\n self,\n name: str,\n asset_selection: AssetSelection,\n freshness_policy_sensor_fn: Callable[..., None],\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n check.str_param(name, "name")\n check.inst_param(asset_selection, "asset_selection", AssetSelection)\n check.opt_int_param(minimum_interval_seconds, "minimum_interval_seconds")\n check.opt_str_param(description, "description")\n check.inst_param(default_status, "default_status", DefaultSensorStatus)\n\n self._freshness_policy_sensor_fn = check.callable_param(\n freshness_policy_sensor_fn, "freshness_policy_sensor_fn"\n )\n\n resource_arg_names: Set[str] = {\n arg.name for arg in get_resource_args(freshness_policy_sensor_fn)\n }\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n def _wrapped_fn(context: SensorEvaluationContext):\n from dagster._utils.caching_instance_queryer import (\n CachingInstanceQueryer, # expensive import\n )\n\n if context.repository_def is None:\n raise DagsterInvalidInvocationError(\n "The `repository_def` property on the `SensorEvaluationContext` passed into a "\n "`FreshnessPolicySensorDefinition` must not be None."\n )\n\n if context.cursor is None or not FreshnessPolicySensorCursor.is_valid(context.cursor):\n new_cursor = FreshnessPolicySensorCursor({})\n context.update_cursor(new_cursor.to_json())\n yield SkipReason(f"Initializing {name}.")\n return\n\n evaluation_time = pendulum.now("UTC")\n asset_graph = context.repository_def.asset_graph\n instance_queryer = CachingInstanceQueryer(\n context.instance, asset_graph, evaluation_time\n )\n data_time_resolver = CachingDataTimeResolver(instance_queryer=instance_queryer)\n monitored_keys = asset_selection.resolve(asset_graph)\n\n # get the previous status from the cursor\n previous_minutes_late_by_key = FreshnessPolicySensorCursor.from_json(\n context.cursor\n ).minutes_late_by_key\n\n minutes_late_by_key: Dict[AssetKey, Optional[float]] = {}\n for asset_key in monitored_keys:\n freshness_policy = asset_graph.freshness_policies_by_key.get(asset_key)\n if freshness_policy is None:\n continue\n\n # get the current minutes_overdue value for this asset\n result = data_time_resolver.get_minutes_overdue(\n evaluation_time=evaluation_time,\n asset_key=asset_key,\n )\n minutes_late_by_key[asset_key] = result.overdue_minutes if result else None\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n context_param_name = get_context_param_name(freshness_policy_sensor_fn)\n freshness_context = FreshnessPolicySensorContext(\n sensor_name=name,\n asset_key=asset_key,\n freshness_policy=freshness_policy,\n minutes_overdue=minutes_late_by_key[asset_key],\n previous_minutes_overdue=previous_minutes_late_by_key.get(asset_key),\n instance=context.instance,\n resources=context.resources,\n )\n\n with user_code_error_boundary(\n FreshnessPolicySensorExecutionError,\n lambda: f'Error occurred during the execution of sensor "{name}".',\n ):\n context_param = (\n {context_param_name: freshness_context} if context_param_name else {}\n )\n result = freshness_policy_sensor_fn(\n **context_param,\n **resource_args_populated,\n )\n if result is not None:\n raise DagsterInvalidDefinitionError(\n "Functions decorated by `@freshness_policy_sensor` may not return or yield"\n " a value."\n )\n\n context.update_cursor(\n FreshnessPolicySensorCursor.from_dict(minutes_late_by_key).to_json()\n )\n\n super(FreshnessPolicySensorDefinition, self).__init__(\n name=name,\n evaluation_fn=_wrapped_fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n default_status=default_status,\n required_resource_keys=combined_required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> None:\n context_param_name = get_context_param_name(self._freshness_policy_sensor_fn)\n\n sensor_context = get_sensor_context_from_args_or_kwargs(\n self._freshness_policy_sensor_fn,\n args,\n kwargs,\n context_type=FreshnessPolicySensorContext,\n )\n context_param = (\n {context_param_name: sensor_context} if context_param_name and sensor_context else {}\n )\n\n resources = validate_and_get_resource_dict(\n sensor_context.resources if sensor_context else ScopedResourcesBuilder.build_empty(),\n self._name,\n self._required_resource_keys,\n )\n\n return self._freshness_policy_sensor_fn(**context_param, **resources)\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.FRESHNESS_POLICY
\n\n\n
[docs]@experimental\ndef freshness_policy_sensor(\n asset_selection: AssetSelection,\n *,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n) -> Callable[[Callable[..., None]], FreshnessPolicySensorDefinition,]:\n """Define a sensor that reacts to the status of a given set of asset freshness policies, where the\n decorated function will be evaluated on every tick for each asset in the selection that has a\n FreshnessPolicy defined.\n\n Note: returning or yielding a value from the annotated function will result in an error.\n\n Takes a :py:class:`~dagster.FreshnessPolicySensorContext`.\n\n Args:\n asset_selection (AssetSelection): The asset selection monitored by the sensor.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated function.\n freshness_policy_sensor_fn (Callable[[FreshnessPolicySensorContext], None]): The core\n evaluation function for the sensor. Takes a :py:class:`~dagster.FreshnessPolicySensorContext`.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n """\n\n def inner(fn: Callable[..., None]) -> FreshnessPolicySensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n return FreshnessPolicySensorDefinition(\n name=sensor_name,\n freshness_policy_sensor_fn=fn,\n asset_selection=asset_selection,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n default_status=default_status,\n )\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/freshness_policy_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.freshness_policy_sensor_definition"}, "graph_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.graph_definition

\nfrom collections import OrderedDict, defaultdict\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom toposort import CircularDependencyError, toposort_flatten\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.config import ConfigMapping\nfrom dagster._core.definitions.definition_config_schema import IDefinitionConfigSchema\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._core.selector.subset_selector import AssetSelectionData\nfrom dagster._core.types.dagster_type import (\n    DagsterType,\n    DagsterTypeKind,\n    construct_dagster_type_dictionary,\n)\n\nfrom .dependency import (\n    DependencyMapping,\n    DependencyStructure,\n    GraphNode,\n    Node,\n    NodeHandle,\n    NodeInput,\n    NodeInputHandle,\n    NodeInvocation,\n)\nfrom .hook_definition import HookDefinition\nfrom .input import FanInInputPointer, InputDefinition, InputMapping, InputPointer\nfrom .logger_definition import LoggerDefinition\nfrom .metadata import RawMetadataValue\nfrom .node_container import create_execution_structure, normalize_dependency_dict\nfrom .node_definition import NodeDefinition\nfrom .output import OutputDefinition, OutputMapping\nfrom .resource_requirement import ResourceRequirement\nfrom .version_strategy import VersionStrategy\n\nif TYPE_CHECKING:\n    from dagster._core.execution.execute_in_process_result import ExecuteInProcessResult\n    from dagster._core.instance import DagsterInstance\n\n    from .asset_layer import AssetLayer\n    from .composition import PendingNodeInvocation\n    from .executor_definition import ExecutorDefinition\n    from .job_definition import JobDefinition\n    from .op_definition import OpDefinition\n    from .partition import PartitionedConfig, PartitionsDefinition\n    from .run_config import RunConfig\n    from .source_asset import SourceAsset\n\nT = TypeVar("T")\n\n\ndef _check_node_defs_arg(\n    graph_name: str, node_defs: Optional[Sequence[NodeDefinition]]\n) -> Sequence[NodeDefinition]:\n    node_defs = node_defs or []\n\n    _node_defs = check.opt_sequence_param(node_defs, "node_defs")\n    for node_def in _node_defs:\n        if isinstance(node_def, NodeDefinition):\n            continue\n        elif callable(node_def):\n            raise DagsterInvalidDefinitionError(\n                """You have passed a lambda or function {func} into {name} that is\n                not a node. You have likely forgetten to annotate this function with\n                the @op or @graph decorators.'\n                """.format(name=graph_name, func=node_def.__name__)\n            )\n        else:\n            raise DagsterInvalidDefinitionError(f"Invalid item in node list: {node_def!r}")\n\n    return node_defs\n\n\ndef create_adjacency_lists(\n    nodes: Sequence[Node],\n    dep_structure: DependencyStructure,\n) -> Tuple[Mapping[str, Set[str]], Mapping[str, Set[str]]]:\n    visit_dict = {s.name: False for s in nodes}\n    forward_edges: Dict[str, Set[str]] = {s.name: set() for s in nodes}\n    backward_edges: Dict[str, Set[str]] = {s.name: set() for s in nodes}\n\n    def visit(node_name: str) -> None:\n        if visit_dict[node_name]:\n            return\n\n        visit_dict[node_name] = True\n\n        for node_output in dep_structure.all_upstream_outputs_from_node(node_name):\n            forward_node = node_output.node.name\n            backward_node = node_name\n            if forward_node in forward_edges:\n                forward_edges[forward_node].add(backward_node)\n                backward_edges[backward_node].add(forward_node)\n                visit(forward_node)\n\n    for s in nodes:\n        visit(s.name)\n\n    return (forward_edges, backward_edges)\n\n\n
[docs]class GraphDefinition(NodeDefinition):\n """Defines a Dagster op graph.\n\n An op graph is made up of\n\n - Nodes, which can either be an op (the functional unit of computation), or another graph.\n - Dependencies, which determine how the values produced by nodes as outputs flow from\n one node to another. This tells Dagster how to arrange nodes into a directed, acyclic graph\n (DAG) of compute.\n\n End users should prefer the :func:`@graph <graph>` decorator. GraphDefinition is generally\n intended to be used by framework authors or for programatically generated graphs.\n\n Args:\n name (str): The name of the graph. Must be unique within any :py:class:`GraphDefinition`\n or :py:class:`JobDefinition` containing the graph.\n description (Optional[str]): A human-readable description of the job.\n node_defs (Optional[Sequence[NodeDefinition]]): The set of ops / graphs used in this graph.\n dependencies (Optional[Dict[Union[str, NodeInvocation], Dict[str, DependencyDefinition]]]):\n A structure that declares the dependencies of each op's inputs on the outputs of other\n ops in the graph. Keys of the top level dict are either the string names of ops in the\n graph or, in the case of aliased ops, :py:class:`NodeInvocations <NodeInvocation>`.\n Values of the top level dict are themselves dicts, which map input names belonging to\n the op or aliased op to :py:class:`DependencyDefinitions <DependencyDefinition>`.\n input_mappings (Optional[Sequence[InputMapping]]): Defines the inputs to the nested graph, and\n how they map to the inputs of its constituent ops.\n output_mappings (Optional[Sequence[OutputMapping]]): Defines the outputs of the nested graph,\n and how they map from the outputs of its constituent ops.\n config (Optional[ConfigMapping]): Defines the config of the graph, and how its schema maps\n to the config of its constituent ops.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution of the graph.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n\n Examples:\n .. code-block:: python\n\n @op\n def return_one():\n return 1\n\n @op\n def add_one(num):\n return num + 1\n\n graph_def = GraphDefinition(\n name='basic',\n node_defs=[return_one, add_one],\n dependencies={'add_one': {'num': DependencyDefinition('return_one')}},\n )\n """\n\n _node_defs: Sequence[NodeDefinition]\n _dagster_type_dict: Mapping[str, DagsterType]\n _dependencies: DependencyMapping[NodeInvocation]\n _dependency_structure: DependencyStructure\n _node_dict: Mapping[str, Node]\n _input_mappings: Sequence[InputMapping]\n _output_mappings: Sequence[OutputMapping]\n _config_mapping: Optional[ConfigMapping]\n _nodes_in_topological_order: Sequence[Node]\n\n # (node name within the graph -> (input name -> SourceAsset to load that input from))\n # Does NOT include keys for:\n # - Inputs to the graph itself\n # - Inputs to nodes within sub-graphs of the graph\n _node_input_source_assets: Mapping[str, Mapping[str, "SourceAsset"]]\n\n def __init__(\n self,\n name: str,\n *,\n description: Optional[str] = None,\n node_defs: Optional[Sequence[NodeDefinition]] = None,\n dependencies: Optional[\n Union[DependencyMapping[str], DependencyMapping[NodeInvocation]]\n ] = None,\n input_mappings: Optional[Sequence[InputMapping]] = None,\n output_mappings: Optional[Sequence[OutputMapping]] = None,\n config: Optional[ConfigMapping] = None,\n tags: Optional[Mapping[str, str]] = None,\n node_input_source_assets: Optional[Mapping[str, Mapping[str, "SourceAsset"]]] = None,\n **kwargs: Any,\n ):\n self._node_defs = _check_node_defs_arg(name, node_defs)\n\n # `dependencies` will be converted to `dependency_structure` and `node_dict`, which may\n # alternatively be passed directly (useful when copying)\n self._dependencies = normalize_dependency_dict(dependencies)\n self._dependency_structure, self._node_dict = create_execution_structure(\n self._node_defs, self._dependencies, graph_definition=self\n )\n\n # Sequence[InputMapping]\n self._input_mappings = check.opt_sequence_param(input_mappings, "input_mappings")\n input_defs = _validate_in_mappings(\n self._input_mappings,\n self._node_dict,\n self._dependency_structure,\n name,\n class_name=type(self).__name__,\n )\n\n # Sequence[OutputMapping]\n self._output_mappings, output_defs = _validate_out_mappings(\n check.opt_sequence_param(output_mappings, "output_mappings"),\n self._node_dict,\n name,\n class_name=type(self).__name__,\n )\n\n self._config_mapping = check.opt_inst_param(config, "config", ConfigMapping)\n\n super(GraphDefinition, self).__init__(\n name=name,\n description=description,\n input_defs=input_defs,\n output_defs=output_defs,\n tags=tags,\n **kwargs,\n )\n\n # must happen after base class construction as properties are assumed to be there\n # eager computation to detect cycles\n self._nodes_in_topological_order = self._get_nodes_in_topological_order()\n self._dagster_type_dict = construct_dagster_type_dictionary([self])\n self._node_input_source_assets = check.opt_mapping_param(\n node_input_source_assets, "node_input_source_assets", key_type=str, value_type=dict\n )\n\n def _get_nodes_in_topological_order(self) -> Sequence[Node]:\n _forward_edges, backward_edges = create_adjacency_lists(\n self.nodes, self.dependency_structure\n )\n\n try:\n order = toposort_flatten(backward_edges)\n except CircularDependencyError as err:\n raise DagsterInvalidDefinitionError(str(err)) from err\n\n return [self.node_named(node_name) for node_name in order]\n\n def get_inputs_must_be_resolved_top_level(\n self, asset_layer: "AssetLayer", handle: Optional[NodeHandle] = None\n ) -> Sequence[InputDefinition]:\n unresolveable_input_defs: List[InputDefinition] = []\n for node in self.node_dict.values():\n cur_handle = NodeHandle(node.name, handle)\n for input_def in node.definition.get_inputs_must_be_resolved_top_level(\n asset_layer, cur_handle\n ):\n if self.dependency_structure.has_deps(NodeInput(node, input_def)):\n continue\n elif not node.container_maps_input(input_def.name):\n raise DagsterInvalidDefinitionError(\n f"Input '{input_def.name}' of {node.describe_node()} "\n "has no way of being resolved. Must provide a resolution to this "\n "input via another op/graph, or via a direct input value mapped from the "\n "top-level graph. To "\n "learn more, see the docs for unconnected inputs: "\n "https://docs.dagster.io/concepts/io-management/unconnected-inputs#unconnected-inputs."\n )\n else:\n mapped_input = node.container_mapped_input(input_def.name)\n unresolveable_input_defs.append(mapped_input.get_definition())\n return unresolveable_input_defs\n\n @property\n def node_type_str(self) -> str:\n return "graph"\n\n @property\n def is_graph_job_op_node(self) -> bool:\n return True\n\n @property\n def nodes(self) -> Sequence[Node]:\n return list(set(self._node_dict.values()))\n\n @property\n def node_dict(self) -> Mapping[str, Node]:\n return self._node_dict\n\n @property\n def node_defs(self) -> Sequence[NodeDefinition]:\n return self._node_defs\n\n @property\n def nodes_in_topological_order(self) -> Sequence[Node]:\n return self._nodes_in_topological_order\n\n @property\n def node_input_source_assets(self) -> Mapping[str, Mapping[str, "SourceAsset"]]:\n return self._node_input_source_assets\n\n def has_node_named(self, name: str) -> bool:\n check.str_param(name, "name")\n return name in self._node_dict\n\n def node_named(self, name: str) -> Node:\n check.str_param(name, "name")\n if name not in self._node_dict:\n raise DagsterInvariantViolationError(f"{self._name} has no op named {name}.")\n\n return self._node_dict[name]\n\n def get_node(self, handle: NodeHandle) -> Node:\n check.inst_param(handle, "handle", NodeHandle)\n current = handle\n lineage: List[str] = []\n while current:\n lineage.append(current.name)\n current = current.parent\n\n name = lineage.pop()\n node = self.node_named(name)\n while lineage:\n name = lineage.pop()\n # We know that this is a current node is a graph while ascending lineage\n definition = cast(GraphDefinition, node.definition)\n node = definition.node_named(name)\n\n return node\n\n def iterate_node_defs(self) -> Iterator[NodeDefinition]:\n yield self\n for outer_node_def in self._node_defs:\n yield from outer_node_def.iterate_node_defs()\n\n def iterate_op_defs(self) -> Iterator["OpDefinition"]:\n for outer_node_def in self._node_defs:\n yield from outer_node_def.iterate_op_defs()\n\n def iterate_node_handles(\n self, parent_node_handle: Optional[NodeHandle] = None\n ) -> Iterator[NodeHandle]:\n for node in self.node_dict.values():\n cur_node_handle = NodeHandle(node.name, parent_node_handle)\n if isinstance(node, GraphNode):\n yield from node.definition.iterate_node_handles(cur_node_handle)\n yield cur_node_handle\n\n @public\n @property\n def input_mappings(self) -> Sequence[InputMapping]:\n """Input mappings for the graph.\n\n An input mapping is a mapping from an input of the graph to an input of a child node.\n """\n return self._input_mappings\n\n @public\n @property\n def output_mappings(self) -> Sequence[OutputMapping]:\n """Output mappings for the graph.\n\n An output mapping is a mapping from an output of the graph to an output of a child node.\n """\n return self._output_mappings\n\n @public\n @property\n def config_mapping(self) -> Optional[ConfigMapping]:\n """The config mapping for the graph, if present.\n\n By specifying a config mapping function, you can override the configuration for the child nodes contained within a graph.\n """\n return self._config_mapping\n\n @property\n def has_config_mapping(self) -> bool:\n return self._config_mapping is not None\n\n def all_dagster_types(self) -> Iterable[DagsterType]:\n return self._dagster_type_dict.values()\n\n def has_dagster_type(self, name: str) -> bool:\n check.str_param(name, "name")\n return name in self._dagster_type_dict\n\n def dagster_type_named(self, name: str) -> DagsterType:\n check.str_param(name, "name")\n return self._dagster_type_dict[name]\n\n def get_input_mapping(self, input_name: str) -> InputMapping:\n check.str_param(input_name, "input_name")\n for mapping in self._input_mappings:\n if mapping.graph_input_name == input_name:\n return mapping\n check.failed(f"Could not find input mapping {input_name}")\n\n def input_mapping_for_pointer(\n self, pointer: Union[InputPointer, FanInInputPointer]\n ) -> Optional[InputMapping]:\n check.inst_param(pointer, "pointer", (InputPointer, FanInInputPointer))\n\n for mapping in self._input_mappings:\n if mapping.maps_to == pointer:\n return mapping\n return None\n\n def get_output_mapping(self, output_name: str) -> OutputMapping:\n check.str_param(output_name, "output_name")\n for mapping in self._output_mappings:\n if mapping.graph_output_name == output_name:\n return mapping\n check.failed(f"Could not find output mapping {output_name}")\n\n T_Handle = TypeVar("T_Handle", bound=Optional[NodeHandle])\n\n def resolve_output_to_origin(\n self, output_name: str, handle: Optional[NodeHandle]\n ) -> Tuple[OutputDefinition, Optional[NodeHandle]]:\n check.str_param(output_name, "output_name")\n check.opt_inst_param(handle, "handle", NodeHandle)\n\n mapping = self.get_output_mapping(output_name)\n check.invariant(mapping, "Can only resolve outputs for valid output names")\n mapped_node = self.node_named(mapping.maps_from.node_name)\n return mapped_node.definition.resolve_output_to_origin(\n mapping.maps_from.output_name,\n NodeHandle(mapped_node.name, handle),\n )\n\n def resolve_output_to_origin_op_def(self, output_name: str) -> "OpDefinition":\n mapping = self.get_output_mapping(output_name)\n check.invariant(mapping, "Can only resolve outputs for valid output names")\n return self.node_named(\n mapping.maps_from.node_name\n ).definition.resolve_output_to_origin_op_def(output_name)\n\n def default_value_for_input(self, input_name: str) -> object:\n check.str_param(input_name, "input_name")\n\n # base case\n if self.input_def_named(input_name).has_default_value:\n return self.input_def_named(input_name).default_value\n\n mapping = self.get_input_mapping(input_name)\n check.invariant(mapping, "Can only resolve inputs for valid input names")\n mapped_node = self.node_named(mapping.maps_to.node_name)\n\n return mapped_node.definition.default_value_for_input(mapping.maps_to.input_name)\n\n def input_has_default(self, input_name: str) -> bool:\n check.str_param(input_name, "input_name")\n\n # base case\n if self.input_def_named(input_name).has_default_value:\n return True\n\n mapping = self.get_input_mapping(input_name)\n check.invariant(mapping, "Can only resolve inputs for valid input names")\n mapped_node = self.node_named(mapping.maps_to.node_name)\n\n return mapped_node.definition.input_has_default(mapping.maps_to.input_name)\n\n @property\n def dependencies(self) -> DependencyMapping[NodeInvocation]:\n return self._dependencies\n\n @property\n def dependency_structure(self) -> DependencyStructure:\n return self._dependency_structure\n\n @property\n def config_schema(self) -> Optional[IDefinitionConfigSchema]:\n return self.config_mapping.config_schema if self.config_mapping is not None else None\n\n def input_supports_dynamic_output_dep(self, input_name: str) -> bool:\n mapping = self.get_input_mapping(input_name)\n target_node = mapping.maps_to.node_name\n # check if input mapped to node which is downstream of another dynamic output within\n if self.dependency_structure.is_dynamic_mapped(target_node):\n return False\n\n # check if input mapped to node which starts new dynamic downstream\n if self.dependency_structure.has_dynamic_downstreams(target_node):\n return False\n\n return self.node_named(target_node).definition.input_supports_dynamic_output_dep(\n mapping.maps_to.input_name\n )\n\n def copy(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n input_mappings: Optional[Sequence[InputMapping]] = None,\n output_mappings: Optional[Sequence[OutputMapping]] = None,\n config: Optional[ConfigMapping] = None,\n tags: Optional[Mapping[str, str]] = None,\n node_input_source_assets: Optional[Mapping[str, Mapping[str, "SourceAsset"]]] = None,\n ) -> Self:\n return GraphDefinition(\n node_defs=self.node_defs,\n dependencies=self.dependencies,\n name=name or self.name,\n description=description or self.description,\n input_mappings=input_mappings or self._input_mappings,\n output_mappings=output_mappings or self._output_mappings,\n config=config or self.config_mapping,\n tags=tags or self.tags,\n node_input_source_assets=node_input_source_assets or self.node_input_source_assets,\n )\n\n def copy_for_configured(\n self,\n name: str,\n description: Optional[str],\n config_schema: Any,\n ) -> "GraphDefinition":\n if not self.has_config_mapping:\n raise DagsterInvalidDefinitionError(\n "Only graphs utilizing config mapping can be pre-configured. The graph "\n f'"{self.name}" does not have a config mapping, and thus has nothing to be '\n "configured."\n )\n config_mapping = cast(ConfigMapping, self.config_mapping)\n return self.copy(\n name=name,\n description=check.opt_str_param(description, "description", default=self.description),\n config=ConfigMapping(\n config_mapping.config_fn,\n config_schema=config_schema,\n receive_processed_config_values=config_mapping.receive_processed_config_values,\n ),\n )\n\n def node_names(self) -> Sequence[str]:\n return list(self._node_dict.keys())\n\n
[docs] @public\n def to_job(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n config: Optional[\n Union["RunConfig", ConfigMapping, Mapping[str, object], "PartitionedConfig"]\n ] = None,\n tags: Optional[Mapping[str, str]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n executor_def: Optional["ExecutorDefinition"] = None,\n hooks: Optional[AbstractSet[HookDefinition]] = None,\n op_retry_policy: Optional[RetryPolicy] = None,\n version_strategy: Optional[VersionStrategy] = None,\n op_selection: Optional[Sequence[str]] = None,\n partitions_def: Optional["PartitionsDefinition"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n input_values: Optional[Mapping[str, object]] = None,\n _asset_selection_data: Optional[AssetSelectionData] = None,\n ) -> "JobDefinition":\n """Make this graph in to an executable Job by providing remaining components required for execution.\n\n Args:\n name (Optional[str]):\n The name for the Job. Defaults to the name of the this graph.\n resource_defs (Optional[Mapping [str, object]]):\n Resources that are required by this graph for execution.\n If not defined, `io_manager` will default to filesystem.\n config:\n Describes how the job is parameterized at runtime.\n\n If no value is provided, then the schema for the job's run config is a standard\n format based on its ops and resources.\n\n If a dictionary is provided, then it must conform to the standard config schema, and\n it will be used as the job's run config for the job whenever the job is executed.\n The values provided will be viewable and editable in the Dagster UI, so be\n careful with secrets.\n\n If a :py:class:`ConfigMapping` object is provided, then the schema for the job's run config is\n determined by the config mapping, and the ConfigMapping, which should return\n configuration in the standard format to configure the job.\n\n If a :py:class:`PartitionedConfig` object is provided, then it defines a discrete set of config\n values that can parameterize the job, as well as a function for mapping those\n values to the base config. The values provided will be viewable and editable in the\n Dagster UI, so be careful with secrets.\n tags (Optional[Mapping[str, Any]]):\n Arbitrary information that will be attached to the execution of the Job.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n metadata (Optional[Mapping[str, RawMetadataValue]]):\n Arbitrary information that will be attached to the JobDefinition and be viewable in the Dagster UI.\n Keys must be strings, and values must be python primitive types or one of the provided\n MetadataValue types\n logger_defs (Optional[Mapping[str, LoggerDefinition]]):\n A dictionary of string logger identifiers to their implementations.\n executor_def (Optional[ExecutorDefinition]):\n How this Job will be executed. Defaults to :py:class:`multi_or_in_process_executor`,\n which can be switched between multi-process and in-process modes of execution. The\n default mode of execution is multi-process.\n op_retry_policy (Optional[RetryPolicy]): The default retry policy for all ops in this job.\n Only used if retry policy is not defined on the op definition or op invocation.\n version_strategy (Optional[VersionStrategy]):\n Defines how each op (and optionally, resource) in the job can be versioned. If\n provided, memoizaton will be enabled for this job.\n partitions_def (Optional[PartitionsDefinition]): Defines a discrete set of partition\n keys that can parameterize the job. If this argument is supplied, the config\n argument can't also be supplied.\n asset_layer (Optional[AssetLayer]): Top level information about the assets this job\n will produce. Generally should not be set manually.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of a job.\n\n Returns:\n JobDefinition\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n from .job_definition import JobDefinition\n\n wrapped_resource_defs = wrap_resources_for_execution(resource_defs)\n\n return JobDefinition.dagster_internal_init(\n name=name,\n description=description or self.description,\n graph_def=self,\n resource_defs=wrapped_resource_defs,\n logger_defs=logger_defs,\n executor_def=executor_def,\n config=config,\n partitions_def=partitions_def,\n tags=tags,\n metadata=metadata,\n hook_defs=hooks,\n version_strategy=version_strategy,\n op_retry_policy=op_retry_policy,\n asset_layer=asset_layer,\n input_values=input_values,\n _subset_selection_data=_asset_selection_data,\n _was_explicitly_provided_resources=None, # None means this is determined by whether resource_defs contains any explicitly provided resources\n ).get_subset(op_selection=op_selection)
\n\n def coerce_to_job(self) -> "JobDefinition":\n # attempt to coerce a Graph in to a Job, raising a useful error if it doesn't work\n try:\n return self.to_job()\n except DagsterInvalidDefinitionError as err:\n raise DagsterInvalidDefinitionError(\n f"Failed attempting to coerce Graph {self.name} in to a Job. "\n "Use to_job instead, passing the required information."\n ) from err\n\n
[docs] @public\n def execute_in_process(\n self,\n run_config: Any = None,\n instance: Optional["DagsterInstance"] = None,\n resources: Optional[Mapping[str, object]] = None,\n raise_on_error: bool = True,\n op_selection: Optional[Sequence[str]] = None,\n run_id: Optional[str] = None,\n input_values: Optional[Mapping[str, object]] = None,\n ) -> "ExecuteInProcessResult":\n """Execute this graph in-process, collecting results in-memory.\n\n Args:\n run_config (Optional[Mapping[str, Any]]):\n Run config to provide to execution. The configuration for the underlying graph\n should exist under the "ops" key.\n instance (Optional[DagsterInstance]):\n The instance to execute against, an ephemeral one will be used if none provided.\n resources (Optional[Mapping[str, Any]]):\n The resources needed if any are required. Can provide resource instances directly,\n or resource definitions.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``True``.\n op_selection (Optional[List[str]]): A list of op selection queries (including single op\n names) to execute. For example:\n * ``['some_op']``: selects ``some_op`` itself.\n * ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n * ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n * ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of the graph.\n\n Returns:\n :py:class:`~dagster.ExecuteInProcessResult`\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n from dagster._core.instance import DagsterInstance\n\n from .executor_definition import execute_in_process_executor\n from .job_definition import JobDefinition\n\n instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n input_values = check.opt_mapping_param(input_values, "input_values")\n\n resource_defs = wrap_resources_for_execution(resources)\n\n ephemeral_job = JobDefinition(\n name=self._name,\n graph_def=self,\n executor_def=execute_in_process_executor,\n resource_defs=resource_defs,\n input_values=input_values,\n ).get_subset(op_selection=op_selection)\n\n run_config = run_config if run_config is not None else {}\n op_selection = check.opt_sequence_param(op_selection, "op_selection", str)\n\n return ephemeral_job.execute_in_process(\n run_config=run_config,\n instance=instance,\n raise_on_error=raise_on_error,\n run_id=run_id,\n )
\n\n @property\n def parent_graph_def(self) -> Optional["GraphDefinition"]:\n return None\n\n @property\n def is_subselected(self) -> bool:\n return False\n\n def get_resource_requirements(\n self, asset_layer: Optional["AssetLayer"] = None\n ) -> Iterator[ResourceRequirement]:\n for node in self.node_dict.values():\n yield from node.get_resource_requirements(outer_container=self, asset_layer=asset_layer)\n\n for dagster_type in self.all_dagster_types():\n yield from dagster_type.get_resource_requirements()\n\n @public\n @property\n def name(self) -> str:\n """The name of the graph."""\n return super(GraphDefinition, self).name\n\n @public\n @property\n def tags(self) -> Mapping[str, str]:\n """The tags associated with the graph."""\n return super(GraphDefinition, self).tags\n\n
[docs] @public\n def alias(self, name: str) -> "PendingNodeInvocation":\n """Aliases the graph with a new name.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.alias("my_graph_alias")\n """\n return super(GraphDefinition, self).alias(name)
\n\n
[docs] @public\n def tag(self, tags: Optional[Mapping[str, str]]) -> "PendingNodeInvocation":\n """Attaches the provided tags to the graph immutably.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.tag({"my_tag": "my_value"})\n """\n return super(GraphDefinition, self).tag(tags)
\n\n
[docs] @public\n def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PendingNodeInvocation":\n """Attaches the provided hooks to the graph immutably.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.with_hooks({my_hook})\n """\n return super(GraphDefinition, self).with_hooks(hook_defs)
\n\n
[docs] @public\n def with_retry_policy(self, retry_policy: RetryPolicy) -> "PendingNodeInvocation":\n """Attaches the provided retry policy to the graph immutably.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.with_retry_policy(RetryPolicy(max_retries=5))\n """\n return super(GraphDefinition, self).with_retry_policy(retry_policy)
\n\n def resolve_input_to_destinations(\n self, input_handle: NodeInputHandle\n ) -> Sequence[NodeInputHandle]:\n all_destinations: List[NodeInputHandle] = []\n for mapping in self.input_mappings:\n if mapping.graph_input_name != input_handle.input_name:\n continue\n # recurse into graph structure\n all_destinations += self.node_named(\n mapping.maps_to.node_name\n ).definition.resolve_input_to_destinations(\n NodeInputHandle(\n NodeHandle(mapping.maps_to.node_name, parent=input_handle.node_handle),\n mapping.maps_to.input_name,\n ),\n )\n\n return all_destinations
\n\n\nclass SubselectedGraphDefinition(GraphDefinition):\n """Defines a subselected graph.\n\n Args:\n parent_graph_def (GraphDefinition): The parent graph that this current graph is subselected\n from. This is used for tracking where the subselected graph originally comes from.\n Note that we allow subselecting a subselected graph, and this field refers to the direct\n parent graph of the current subselection, rather than the original root graph.\n node_defs (Optional[Sequence[NodeDefinition]]): A list of all top level nodes in the graph. A\n node can be an op or a graph that contains other nodes.\n dependencies (Optional[Mapping[Union[str, NodeInvocation], Mapping[str, IDependencyDefinition]]]):\n A structure that declares the dependencies of each op's inputs on the outputs of other\n ops in the subselected graph. Keys of the top level dict are either the string names of\n ops in the graph or, in the case of aliased ops, :py:class:`NodeInvocations <NodeInvocation>`.\n Values of the top level dict are themselves dicts, which map input names belonging to\n the op or aliased op to :py:class:`DependencyDefinitions <DependencyDefinition>`.\n input_mappings (Optional[Sequence[InputMapping]]): Define the inputs to the nested graph, and\n how they map to the inputs of its constituent ops.\n output_mappings (Optional[Sequence[OutputMapping]]): Define the outputs of the nested graph, and\n how they map from the outputs of its constituent ops.\n """\n\n def __init__(\n self,\n parent_graph_def: GraphDefinition,\n node_defs: Optional[Sequence[NodeDefinition]],\n dependencies: Optional[\n Union[\n DependencyMapping[str],\n DependencyMapping[NodeInvocation],\n ]\n ],\n input_mappings: Optional[Sequence[InputMapping]],\n output_mappings: Optional[Sequence[OutputMapping]],\n ):\n self._parent_graph_def = check.inst_param(\n parent_graph_def, "parent_graph_def", GraphDefinition\n )\n super(SubselectedGraphDefinition, self).__init__(\n name=parent_graph_def.name, # should we create special name for subselected graphs\n node_defs=node_defs,\n dependencies=dependencies,\n input_mappings=input_mappings,\n output_mappings=output_mappings,\n config=parent_graph_def.config_mapping,\n tags=parent_graph_def.tags,\n )\n\n @property\n def parent_graph_def(self) -> GraphDefinition:\n return self._parent_graph_def\n\n def get_top_level_omitted_nodes(self) -> Sequence[Node]:\n return [node for node in self.parent_graph_def.nodes if not self.has_node_named(node.name)]\n\n @property\n def is_subselected(self) -> bool:\n return True\n\n\ndef _validate_in_mappings(\n input_mappings: Sequence[InputMapping],\n nodes_by_name: Mapping[str, Node],\n dependency_structure: DependencyStructure,\n name: str,\n class_name: str,\n) -> Sequence[InputDefinition]:\n from .composition import MappedInputPlaceholder\n\n input_defs_by_name: Dict[str, InputDefinition] = OrderedDict()\n mapping_keys: Set[str] = set()\n\n target_input_types_by_graph_input_name: Dict[str, Set[DagsterType]] = defaultdict(set)\n\n for mapping in input_mappings:\n # handle incorrect objects passed in as mappings\n if not isinstance(mapping, InputMapping):\n if isinstance(mapping, InputDefinition):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' you passed an InputDefinition "\n f"named '{mapping.name}' directly in to input_mappings. Return "\n "an InputMapping by calling mapping_to on the InputDefinition."\n )\n else:\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' received unexpected type '{type(mapping)}' in"\n " input_mappings. Provide an InputMapping using InputMapping(...)"\n )\n\n input_defs_by_name[mapping.graph_input_name] = mapping.get_definition()\n\n target_node = nodes_by_name.get(mapping.maps_to.node_name)\n if target_node is None:\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping references node "\n f"'{mapping.maps_to.node_name}' which it does not contain."\n )\n if not target_node.has_input(mapping.maps_to.input_name):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping to node '{mapping.maps_to.node_name}' "\n f"which contains no input named '{mapping.maps_to.input_name}'"\n )\n\n target_input_def = target_node.input_def_named(mapping.maps_to.input_name)\n node_input = NodeInput(target_node, target_input_def)\n\n if mapping.maps_to_fan_in:\n maps_to = cast(FanInInputPointer, mapping.maps_to)\n if not dependency_structure.has_fan_in_deps(node_input):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping target"\n f' "{maps_to.node_name}.{maps_to.input_name}" (index'\n f" {maps_to.fan_in_index} of fan-in) is not a MultiDependencyDefinition."\n )\n inner_deps = dependency_structure.get_fan_in_deps(node_input)\n if (maps_to.fan_in_index >= len(inner_deps)) or (\n inner_deps[maps_to.fan_in_index] is not MappedInputPlaceholder\n ):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping target "\n f'"{maps_to.node_name}.{maps_to.input_name}" index {maps_to.fan_in_index} in '\n "the MultiDependencyDefinition is not a MappedInputPlaceholder"\n )\n mapping_keys.add(f"{maps_to.node_name}.{maps_to.input_name}.{maps_to.fan_in_index}")\n target_input_types_by_graph_input_name[mapping.graph_input_name].add(\n target_input_def.dagster_type.get_inner_type_for_fan_in()\n )\n else:\n if dependency_structure.has_deps(node_input):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping target "\n f'"{mapping.maps_to.node_name}.{mapping.maps_to.input_name}" '\n "is already satisfied by output"\n )\n\n mapping_keys.add(f"{mapping.maps_to.node_name}.{mapping.maps_to.input_name}")\n target_input_types_by_graph_input_name[mapping.graph_input_name].add(\n target_input_def.dagster_type\n )\n\n for node_input in dependency_structure.inputs():\n if dependency_structure.has_fan_in_deps(node_input):\n for idx, dep in enumerate(dependency_structure.get_fan_in_deps(node_input)):\n if dep is MappedInputPlaceholder:\n mapping_str = f"{node_input.node_name}.{node_input.input_name}.{idx}"\n if mapping_str not in mapping_keys:\n raise DagsterInvalidDefinitionError(\n f"Unsatisfied MappedInputPlaceholder at index {idx} in"\n " MultiDependencyDefinition for"\n f" '{node_input.node_name}.{node_input.input_name}'"\n )\n\n # if the dagster type on a graph input is Any and all its target inputs have the\n # same dagster type, then use that dagster type for the graph input\n for graph_input_name, graph_input_def in input_defs_by_name.items():\n if graph_input_def.dagster_type.kind == DagsterTypeKind.ANY:\n target_input_types = target_input_types_by_graph_input_name[graph_input_name]\n if len(target_input_types) == 1:\n input_defs_by_name[graph_input_name] = graph_input_def.with_dagster_type(\n next(iter(target_input_types))\n )\n\n return list(input_defs_by_name.values())\n\n\ndef _validate_out_mappings(\n output_mappings: Sequence[OutputMapping],\n node_dict: Mapping[str, Node],\n name: str,\n class_name: str,\n) -> Tuple[Sequence[OutputMapping], Sequence[OutputDefinition]]:\n output_defs: List[OutputDefinition] = []\n for mapping in output_mappings:\n if isinstance(mapping, OutputMapping):\n target_node = node_dict.get(mapping.maps_from.node_name)\n if target_node is None:\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' output mapping references node "\n f"'{mapping.maps_from.node_name}' which it does not contain."\n )\n if not target_node.has_output(mapping.maps_from.output_name):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} {name} output mapping from {target_node.describe_node()} "\n f"which contains no output named '{mapping.maps_from.output_name}'"\n )\n\n target_output = target_node.output_def_named(mapping.maps_from.output_name)\n output_def = mapping.get_definition(is_dynamic=target_output.is_dynamic)\n output_defs.append(output_def)\n\n if (\n mapping.dagster_type\n and mapping.dagster_type.kind != DagsterTypeKind.ANY\n and (target_output.dagster_type != mapping.dagster_type)\n and class_name != "GraphDefinition"\n ):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' output '{mapping.graph_output_name}' of type"\n f" {mapping.dagster_type.display_name} maps from"\n f" {mapping.maps_from.node_name}.{mapping.maps_from.output_name} of different"\n f" type {target_output.dagster_type.display_name}. OutputMapping source and"\n " destination must have the same type."\n )\n\n elif isinstance(mapping, OutputDefinition):\n raise DagsterInvalidDefinitionError(\n f"You passed an OutputDefinition named '{mapping.name}' directly "\n "in to output_mappings. Return an OutputMapping by calling "\n "mapping_from on the OutputDefinition."\n )\n else:\n raise DagsterInvalidDefinitionError(\n f"Received unexpected type '{type(mapping)}' in output_mappings. "\n "Provide an OutputMapping using OutputDefinition(...).mapping_from(...)"\n )\n return output_mappings, output_defs\n
", "current_page_name": "_modules/dagster/_core/definitions/graph_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.graph_definition"}, "hook_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.hook_definition

\nfrom typing import AbstractSet, Any, Callable, Iterator, NamedTuple, Optional, cast\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\n\nfrom ..decorator_utils import get_function_params\nfrom ..errors import DagsterInvalidInvocationError\nfrom .resource_requirement import HookResourceRequirement, RequiresResources, ResourceRequirement\nfrom .utils import check_valid_name\n\n\n
[docs]class HookDefinition(\n NamedTuple(\n "_HookDefinition",\n [\n ("name", PublicAttr[str]),\n ("hook_fn", PublicAttr[Callable]),\n ("required_resource_keys", PublicAttr[AbstractSet[str]]),\n ("decorated_fn", PublicAttr[Optional[Callable]]),\n ],\n ),\n RequiresResources,\n):\n """Define a hook which can be triggered during a op execution (e.g. a callback on the step\n execution failure event during a op execution).\n\n Args:\n name (str): The name of this hook.\n hook_fn (Callable): The callback function that will be triggered.\n required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n hook.\n """\n\n def __new__(\n cls,\n *,\n name: str,\n hook_fn: Callable[..., Any],\n required_resource_keys: Optional[AbstractSet[str]] = None,\n decorated_fn: Optional[Callable[..., Any]] = None,\n ):\n return super(HookDefinition, cls).__new__(\n cls,\n name=check_valid_name(name),\n hook_fn=check.callable_param(hook_fn, "hook_fn"),\n required_resource_keys=frozenset(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n ),\n decorated_fn=check.opt_callable_param(decorated_fn, "decorated_fn"),\n )\n\n def __call__(self, *args, **kwargs):\n """This is invoked when the hook is used as a decorator.\n\n We currently support hooks to decorate the following:\n\n - JobDefinition: when the hook decorates a job definition, it will be added to\n all the op invocations within the job.\n\n Example:\n .. code-block:: python\n\n @success_hook\n def slack_message_on_success(_):\n ...\n\n @slack_message_on_success\n @job\n def a_job():\n foo(bar())\n\n """\n from ..execution.context.hook import HookContext\n from .graph_definition import GraphDefinition\n from .hook_invocation import hook_invocation_result\n from .job_definition import JobDefinition\n\n if len(args) > 0 and isinstance(args[0], (JobDefinition, GraphDefinition)):\n # when it decorates a job, we apply this hook to all the op invocations within\n # the job.\n return args[0].with_hooks({self})\n else:\n if not self.decorated_fn:\n raise DagsterInvalidInvocationError(\n "Only hook definitions created using one of the hook decorators can be invoked."\n )\n fxn_args = get_function_params(self.decorated_fn)\n # If decorated fxn has two arguments, then this is an event list hook fxn, and parameter\n # names are always context and event_list\n if len(fxn_args) == 2:\n context_arg_name = fxn_args[0].name\n event_list_arg_name = fxn_args[1].name\n if len(args) + len(kwargs) != 2:\n raise DagsterInvalidInvocationError(\n "Decorated function expects two parameters, context and event_list, but "\n f"{len(args) + len(kwargs)} were provided."\n )\n if args:\n context = check.opt_inst_param(args[0], "context", HookContext)\n event_list = check.opt_list_param(\n args[1] if len(args) > 1 else kwargs[event_list_arg_name],\n event_list_arg_name,\n )\n else:\n if context_arg_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Could not find expected argument '{context_arg_name}'. Provided "\n f"kwargs: {list(kwargs.keys())}"\n )\n if event_list_arg_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Could not find expected argument '{event_list_arg_name}'. Provided "\n f"kwargs: {list(kwargs.keys())}"\n )\n context = check.opt_inst_param(\n kwargs[context_arg_name], context_arg_name, HookContext\n )\n event_list = check.opt_list_param(\n kwargs[event_list_arg_name], event_list_arg_name\n )\n return hook_invocation_result(self, context, event_list)\n else:\n context_arg_name = fxn_args[0].name\n if len(args) + len(kwargs) != 1:\n raise DagsterInvalidInvocationError(\n f"Decorated function expects one parameter, {context_arg_name}, but "\n f"{len(args) + len(kwargs)} were provided."\n )\n if args:\n context = check.opt_inst_param(args[0], context_arg_name, HookContext)\n else:\n if context_arg_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Could not find expected argument '{context_arg_name}'. Provided "\n f"kwargs: {list(kwargs.keys())}"\n )\n context = check.opt_inst_param(\n kwargs[context_arg_name], context_arg_name, HookContext\n )\n return hook_invocation_result(self, context)\n\n def get_resource_requirements(\n self, outer_context: Optional[object] = None\n ) -> Iterator[ResourceRequirement]:\n # outer_context in this case is a string of (job, job name) or (node, node name)\n attached_to = cast(Optional[str], outer_context)\n for resource_key in sorted(list(self.required_resource_keys)):\n yield HookResourceRequirement(\n key=resource_key, attached_to=attached_to, hook_name=self.name\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/hook_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.hook_definition"}, "input": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.input

\nimport inspect\nfrom types import FunctionType\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Set,\n    Type,\n    TypeVar,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, deprecated_param, experimental_param\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterError, DagsterInvalidDefinitionError\nfrom dagster._core.types.dagster_type import (  # BuiltinScalarDagsterType,\n    DagsterType,\n    resolve_dagster_type,\n)\n\nfrom .inference import InferredInputProps\nfrom .utils import NoValueSentinel, check_valid_name\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.input import InputContext\n\nT = TypeVar("T")\n\n\n# unfortunately since type_check functions need TypeCheckContext which is only available\n# at runtime, we can only check basic types before runtime\ndef _check_default_value(input_name: str, dagster_type: DagsterType, default_value: T) -> T:\n    from dagster._core.types.dagster_type import BuiltinScalarDagsterType\n\n    if default_value is not NoValueSentinel:\n        if dagster_type.is_nothing:\n            raise DagsterInvalidDefinitionError(\n                "Setting a default_value is invalid on InputDefinitions of type Nothing"\n            )\n\n        if isinstance(dagster_type, BuiltinScalarDagsterType):\n            type_check = dagster_type.type_check_scalar_value(default_value)\n            if not type_check.success:\n                raise DagsterInvalidDefinitionError(\n                    "Type check failed for the default_value of InputDefinition "\n                    f"{input_name} of type {dagster_type.display_name}. "\n                    f"Received value {default_value} of type {type(default_value)}",\n                )\n\n    return default_value\n\n\n@experimental_param(param="asset_key")\n@experimental_param(param="asset_partitions")\nclass InputDefinition:\n    """Defines an argument to an op's compute function.\n\n    Inputs may flow from previous op outputs, or be stubbed using config. They may optionally\n    be typed using the Dagster type system.\n\n    Args:\n        name (str): Name of the input.\n        dagster_type (Optional[Union[Type, DagsterType]]]): The type of this input.\n            Users should provide the Python type of the objects that they expect to be passed for\n            this input, or a :py:class:`DagsterType` that defines a runtime check that they want\n            to be run on this input. Defaults to :py:class:`Any`.\n        description (Optional[str]): Human-readable description of the input.\n        default_value (Optional[Any]): The default value to use if no input is provided.\n        metadata (Optional[Dict[str, Any]]): A dict of metadata for the input.\n        asset_key (Optional[Union[AssetKey, InputContext -> AssetKey]]): (Experimental) An AssetKey\n            (or function that produces an AssetKey from the InputContext) which should be associated\n            with this InputDefinition. Used for tracking lineage information through Dagster.\n        asset_partitions (Optional[Union[AbstractSet[str], InputContext -> AbstractSet[str]]]): (Experimental) A\n            set of partitions of the given asset_key (or a function that produces this list of\n            partitions from the InputContext) which should be associated with this InputDefinition.\n        input_manager_key (Optional[str]): (Experimental) The resource key for the\n            :py:class:`InputManager` used for loading this input when it is not connected to an\n            upstream output.\n    """\n\n    _name: str\n    _type_not_set: bool\n    _dagster_type: DagsterType\n    _description: Optional[str]\n    _default_value: Any\n    _input_manager_key: Optional[str]\n    _raw_metadata: ArbitraryMetadataMapping\n    _metadata: Mapping[str, MetadataValue]\n    _asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]]\n    _asset_partitions_fn: Optional[Callable[["InputContext"], Set[str]]]\n\n    def __init__(\n        self,\n        name: str,\n        dagster_type: object = None,\n        description: Optional[str] = None,\n        default_value: object = NoValueSentinel,\n        metadata: Optional[ArbitraryMetadataMapping] = None,\n        asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]] = None,\n        asset_partitions: Optional[Union[Set[str], Callable[["InputContext"], Set[str]]]] = None,\n        input_manager_key: Optional[str] = None,\n        # when adding new params, make sure to update combine_with_inferred and with_dagster_type below\n    ):\n        self._name = check_valid_name(name, allow_list=["config"])\n\n        self._type_not_set = dagster_type is None\n        self._dagster_type = check.inst(resolve_dagster_type(dagster_type), DagsterType)\n\n        self._description = check.opt_str_param(description, "description")\n\n        self._default_value = _check_default_value(self._name, self._dagster_type, default_value)\n\n        self._input_manager_key = check.opt_str_param(input_manager_key, "input_manager_key")\n\n        self._raw_metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n        self._metadata = normalize_metadata(self._raw_metadata, allow_invalid=True)\n\n        if not callable(asset_key):\n            check.opt_inst_param(asset_key, "asset_key", AssetKey)\n\n        self._asset_key = asset_key\n\n        if asset_partitions:\n            check.param_invariant(\n                asset_key is not None,\n                "asset_partitions",\n                'Cannot specify "asset_partitions" argument without also specifying "asset_key"',\n            )\n        if callable(asset_partitions):\n            self._asset_partitions_fn = asset_partitions\n        elif asset_partitions is not None:\n            _asset_partitions = check.set_param(asset_partitions, "asset_partitions", of_type=str)\n            self._asset_partitions_fn = lambda _: _asset_partitions\n        else:\n            self._asset_partitions_fn = None\n\n    @property\n    def name(self) -> str:\n        return self._name\n\n    @property\n    def dagster_type(self) -> DagsterType:\n        return self._dagster_type\n\n    @property\n    def description(self) -> Optional[str]:\n        return self._description\n\n    @property\n    def has_default_value(self) -> bool:\n        return self._default_value is not NoValueSentinel\n\n    @property\n    def default_value(self) -> Any:\n        check.invariant(self.has_default_value, "Can only fetch default_value if has_default_value")\n        return self._default_value\n\n    @property\n    def input_manager_key(self) -> Optional[str]:\n        return self._input_manager_key\n\n    @property\n    def metadata(self) -> ArbitraryMetadataMapping:\n        return self._raw_metadata\n\n    @property\n    def is_asset(self) -> bool:\n        return self._asset_key is not None\n\n    @property\n    def hardcoded_asset_key(self) -> Optional[AssetKey]:\n        if not callable(self._asset_key):\n            return self._asset_key\n        else:\n            return None\n\n    def get_asset_key(self, context: "InputContext") -> Optional[AssetKey]:\n        """Get the AssetKey associated with this InputDefinition for the given\n        :py:class:`InputContext` (if any).\n\n        Args:\n            context (InputContext): The InputContext that this InputDefinition is being evaluated\n                in\n        """\n        if callable(self._asset_key):\n            return self._asset_key(context)\n        else:\n            return self.hardcoded_asset_key\n\n    def get_asset_partitions(self, context: "InputContext") -> Optional[Set[str]]:\n        """Get the set of partitions that this op will read from this InputDefinition for the given\n        :py:class:`InputContext` (if any).\n\n        Args:\n            context (InputContext): The InputContext that this InputDefinition is being evaluated\n                in\n        """\n        if self._asset_partitions_fn is None:\n            return None\n\n        return self._asset_partitions_fn(context)\n\n    def mapping_to(\n        self, node_name: str, input_name: str, fan_in_index: Optional[int] = None\n    ) -> "InputMapping":\n        """Create an input mapping to an input of a child node.\n\n        In a GraphDefinition, you can use this helper function to construct\n        an :py:class:`InputMapping` to the input of a child node.\n\n        Args:\n            node_name (str): The name of the child node to which to map this input.\n            input_name (str): The name of the child node' input to which to map this input.\n            fan_in_index (Optional[int]): The index in to a fanned in input, else None\n\n        Examples:\n            .. code-block:: python\n\n                input_mapping = InputDefinition('composite_input', Int).mapping_to(\n                    'child_node', 'int_input'\n                )\n        """\n        check.str_param(node_name, "node_name")\n        check.str_param(input_name, "input_name")\n        check.opt_int_param(fan_in_index, "fan_in_index")\n\n        return InputMapping(\n            graph_input_name=self.name,\n            mapped_node_name=node_name,\n            mapped_node_input_name=input_name,\n            fan_in_index=fan_in_index,\n            graph_input_description=self.description,\n            dagster_type=self.dagster_type,\n        )\n\n    @staticmethod\n    def create_from_inferred(inferred: InferredInputProps) -> "InputDefinition":\n        return InputDefinition(\n            name=inferred.name,\n            dagster_type=_checked_inferred_type(inferred),\n            description=inferred.description,\n            default_value=inferred.default_value,\n        )\n\n    def combine_with_inferred(self, inferred: InferredInputProps) -> "InputDefinition":\n        """Return a new InputDefinition that merges this ones properties with those inferred from type signature.\n        This can update: dagster_type, description, and default_value if they are not set.\n        """\n        check.invariant(\n            self.name == inferred.name,\n            f"InferredInputProps name {inferred.name} did not align with InputDefinition name"\n            f" {self.name}",\n        )\n\n        dagster_type = self._dagster_type\n        if self._type_not_set:\n            dagster_type = _checked_inferred_type(inferred)\n\n        description = self._description\n        if description is None and inferred.description is not None:\n            description = inferred.description\n\n        default_value = self._default_value\n        if not self.has_default_value:\n            default_value = inferred.default_value\n\n        return InputDefinition(\n            name=self.name,\n            dagster_type=dagster_type,\n            description=description,\n            default_value=default_value,\n            metadata=self.metadata,\n            asset_key=self._asset_key,\n            asset_partitions=self._asset_partitions_fn,\n            input_manager_key=self._input_manager_key,\n        )\n\n    def with_dagster_type(self, dagster_type: DagsterType) -> "InputDefinition":\n        return InputDefinition(\n            name=self.name,\n            dagster_type=dagster_type,\n            description=self.description,\n            default_value=self.default_value if self.has_default_value else NoValueSentinel,\n            metadata=self.metadata,\n            asset_key=self._asset_key,\n            asset_partitions=self._asset_partitions_fn,\n            input_manager_key=self._input_manager_key,\n        )\n\n\ndef _checked_inferred_type(inferred: InferredInputProps) -> DagsterType:\n    try:\n        if inferred.annotation == inspect.Parameter.empty:\n            resolved_type = resolve_dagster_type(None)\n        elif inferred.annotation is None:\n            # When inferred.annotation is None, it means someone explicitly put "None" as the\n            # annotation, so want to map it to a DagsterType that checks for the None type\n            resolved_type = resolve_dagster_type(type(None))\n        else:\n            resolved_type = resolve_dagster_type(inferred.annotation)\n\n    except DagsterError as e:\n        raise DagsterInvalidDefinitionError(\n            f"Problem using type '{inferred.annotation}' from type annotation for argument "\n            f"'{inferred.name}', correct the issue or explicitly set the dagster_type "\n            "via In()."\n        ) from e\n\n    return resolved_type\n\n\nclass InputPointer(NamedTuple("_InputPointer", [("node_name", str), ("input_name", str)])):\n    def __new__(cls, node_name: str, input_name: str):\n        return super(InputPointer, cls).__new__(\n            cls,\n            check.str_param(node_name, "node_name"),\n            check.str_param(input_name, "input_name"),\n        )\n\n\nclass FanInInputPointer(\n    NamedTuple(\n        "_FanInInputPointer", [("node_name", str), ("input_name", str), ("fan_in_index", int)]\n    )\n):\n    def __new__(cls, node_name: str, input_name: str, fan_in_index: int):\n        return super(FanInInputPointer, cls).__new__(\n            cls,\n            check.str_param(node_name, "node_name"),\n            check.str_param(input_name, "input_name"),\n            check.int_param(fan_in_index, "fan_in_index"),\n        )\n\n\n
[docs]@deprecated_param(\n param="dagster_type",\n breaking_version="2.0",\n additional_warn_text="Any defined `dagster_type` should come from the upstream op `Output`.",\n # Disabling warning here since we're passing this internally and I'm not sure whether it is\n # actually used or discarded.\n emit_runtime_warning=False,\n)\nclass InputMapping(NamedTuple):\n """Defines an input mapping for a graph.\n\n Args:\n graph_input_name (str): Name of the input in the graph being mapped from.\n mapped_node_name (str): Named of the node (op/graph) that the input is being mapped to.\n mapped_node_input_name (str): Name of the input in the node (op/graph) that is being mapped to.\n fan_in_index (Optional[int]): The index in to a fanned input, otherwise None.\n graph_input_description (Optional[str]): A description of the input in the graph being mapped from.\n dagster_type (Optional[DagsterType]): The dagster type of the graph's input\n being mapped from.\n\n Examples:\n .. code-block:: python\n\n from dagster import InputMapping, GraphDefinition, op, graph\n\n @op\n def needs_input(x):\n return x + 1\n\n # The following two graph definitions are equivalent\n GraphDefinition(\n name="the_graph",\n node_defs=[needs_input],\n input_mappings=[\n InputMapping(\n graph_input_name="maps_x", mapped_node_name="needs_input",\n mapped_node_input_name="x"\n )\n ]\n )\n\n @graph\n def the_graph(maps_x):\n needs_input(maps_x)\n """\n\n graph_input_name: str\n mapped_node_name: str\n mapped_node_input_name: str\n fan_in_index: Optional[int] = None\n graph_input_description: Optional[str] = None\n dagster_type: Optional[DagsterType] = None\n\n @property\n def maps_to(self) -> Union[InputPointer, FanInInputPointer]:\n if self.fan_in_index is not None:\n return FanInInputPointer(\n self.mapped_node_name, self.mapped_node_input_name, self.fan_in_index\n )\n return InputPointer(self.mapped_node_name, self.mapped_node_input_name)\n\n @property\n def maps_to_fan_in(self) -> bool:\n return isinstance(self.maps_to, FanInInputPointer)\n\n def describe(self) -> str:\n idx = self.maps_to.fan_in_index if isinstance(self.maps_to, FanInInputPointer) else ""\n return f"{self.graph_input_name} -> {self.maps_to.node_name}:{self.maps_to.input_name}{idx}"\n\n def get_definition(self) -> "InputDefinition":\n return InputDefinition(\n name=self.graph_input_name,\n description=self.graph_input_description,\n dagster_type=self.dagster_type,\n )
\n\n\n
[docs]class In(\n NamedTuple(\n "_In",\n [\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ("description", PublicAttr[Optional[str]]),\n ("default_value", PublicAttr[Any]),\n ("metadata", PublicAttr[Optional[Mapping[str, Any]]]),\n (\n "asset_key",\n PublicAttr[Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]]],\n ),\n (\n "asset_partitions",\n PublicAttr[Optional[Union[Set[str], Callable[["InputContext"], Set[str]]]]],\n ),\n ("input_manager_key", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Defines an argument to an op's compute function.\n\n Inputs may flow from previous op's outputs, or be stubbed using config. They may optionally\n be typed using the Dagster type system.\n\n Args:\n dagster_type (Optional[Union[Type, DagsterType]]]):\n The type of this input. Should only be set if the correct type can not\n be inferred directly from the type signature of the decorated function.\n description (Optional[str]): Human-readable description of the input.\n default_value (Optional[Any]): The default value to use if no input is provided.\n metadata (Optional[Dict[str, RawMetadataValue]]): A dict of metadata for the input.\n asset_key (Optional[Union[AssetKey, InputContext -> AssetKey]]): (Experimental) An AssetKey\n (or function that produces an AssetKey from the InputContext) which should be associated\n with this In. Used for tracking lineage information through Dagster.\n asset_partitions (Optional[Union[Set[str], InputContext -> Set[str]]]): (Experimental) A\n set of partitions of the given asset_key (or a function that produces this list of\n partitions from the InputContext) which should be associated with this In.\n input_manager_key (Optional[str]): (Experimental) The resource key for the\n :py:class:`InputManager` used for loading this input when it is not connected to an\n upstream output.\n """\n\n def __new__(\n cls,\n dagster_type: Union[Type, DagsterType] = NoValueSentinel,\n description: Optional[str] = None,\n default_value: Any = NoValueSentinel,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]] = None,\n asset_partitions: Optional[Union[Set[str], Callable[["InputContext"], Set[str]]]] = None,\n input_manager_key: Optional[str] = None,\n ):\n return super(In, cls).__new__(\n cls,\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n description=check.opt_str_param(description, "description"),\n default_value=default_value,\n metadata=check.opt_mapping_param(metadata, "metadata", key_type=str),\n asset_key=check.opt_inst_param(asset_key, "asset_key", (AssetKey, FunctionType)),\n asset_partitions=asset_partitions,\n input_manager_key=check.opt_str_param(input_manager_key, "input_manager_key"),\n )\n\n @staticmethod\n def from_definition(input_def: InputDefinition) -> "In":\n return In(\n dagster_type=input_def.dagster_type,\n description=input_def.description,\n default_value=input_def._default_value, # noqa: SLF001\n metadata=input_def.metadata,\n asset_key=input_def._asset_key, # noqa: SLF001\n asset_partitions=input_def._asset_partitions_fn, # noqa: SLF001\n input_manager_key=input_def.input_manager_key,\n )\n\n def to_definition(self, name: str) -> InputDefinition:\n dagster_type = self.dagster_type if self.dagster_type is not NoValueSentinel else None\n return InputDefinition(\n name=name,\n dagster_type=dagster_type,\n description=self.description,\n default_value=self.default_value,\n metadata=self.metadata,\n asset_key=self.asset_key,\n asset_partitions=self.asset_partitions,\n input_manager_key=self.input_manager_key,\n )
\n\n\n
[docs]class GraphIn(NamedTuple("_GraphIn", [("description", PublicAttr[Optional[str]])])):\n """Represents information about an input that a graph maps.\n\n Args:\n description (Optional[str]): Human-readable description of the input.\n """\n\n def __new__(cls, description: Optional[str] = None):\n return super(GraphIn, cls).__new__(cls, description=description)\n\n def to_definition(self, name: str) -> InputDefinition:\n return InputDefinition(name=name, description=self.description)
\n
", "current_page_name": "_modules/dagster/_core/definitions/input", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.input"}, "job_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.job_definition

\nimport importlib\nimport os\nimport warnings\nfrom datetime import datetime\nfrom functools import update_wrapper\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, experimental_param, public\nfrom dagster._config import Field, Shape, StringSource\nfrom dagster._config.config_type import ConfigType\nfrom dagster._config.validate import validate_config\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.dependency import (\n    Node,\n    NodeHandle,\n    NodeInputHandle,\n    NodeInvocation,\n)\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.node_definition import NodeDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.op_selection import OpSelection, get_graph_subset\nfrom dagster._core.definitions.partition import DynamicPartitionsDefinition\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.resource_requirement import (\n    ResourceRequirement,\n    ensure_requirements_satisfied,\n)\nfrom dagster._core.definitions.utils import check_valid_name\nfrom dagster._core.errors import (\n    DagsterInvalidConfigError,\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidSubsetError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.selector.subset_selector import (\n    AssetSelectionData,\n    OpSelectionData,\n)\nfrom dagster._core.storage.io_manager import (\n    IOManagerDefinition,\n    dagster_maintained_io_manager,\n    io_manager,\n)\nfrom dagster._core.storage.tags import MEMOIZED_RUN_TAG\nfrom dagster._core.types.dagster_type import DagsterType\nfrom dagster._core.utils import str_format_set\nfrom dagster._utils import IHasInternalInit\nfrom dagster._utils.merger import merge_dicts\n\nfrom .asset_layer import AssetLayer, build_asset_selection_job\nfrom .config import ConfigMapping\nfrom .dependency import (\n    DependencyMapping,\n    DependencyStructure,\n    OpNode,\n)\nfrom .executor_definition import ExecutorDefinition, multi_or_in_process_executor\nfrom .graph_definition import GraphDefinition, SubselectedGraphDefinition\nfrom .hook_definition import HookDefinition\nfrom .logger_definition import LoggerDefinition\nfrom .metadata import MetadataValue, RawMetadataValue, normalize_metadata\nfrom .partition import PartitionedConfig, PartitionsDefinition\nfrom .resource_definition import ResourceDefinition\nfrom .run_request import RunRequest\nfrom .utils import DEFAULT_IO_MANAGER_KEY, validate_tags\nfrom .version_strategy import VersionStrategy\n\nif TYPE_CHECKING:\n    from dagster._config.snap import ConfigSchemaSnapshot\n    from dagster._core.definitions.run_config import RunConfig\n    from dagster._core.execution.execute_in_process_result import ExecuteInProcessResult\n    from dagster._core.execution.resources_init import InitResourceContext\n    from dagster._core.host_representation.job_index import JobIndex\n    from dagster._core.instance import DagsterInstance, DynamicPartitionsStore\n    from dagster._core.snap import JobSnapshot\n\n    from .run_config_schema import RunConfigSchema\n\nDEFAULT_EXECUTOR_DEF = multi_or_in_process_executor\n\n\n
[docs]@experimental_param(param="version_strategy")\nclass JobDefinition(IHasInternalInit):\n """Defines a Dagster job."""\n\n _name: str\n _graph_def: GraphDefinition\n _description: Optional[str]\n _tags: Mapping[str, str]\n _metadata: Mapping[str, MetadataValue]\n _current_level_node_defs: Sequence[NodeDefinition]\n _hook_defs: AbstractSet[HookDefinition]\n _op_retry_policy: Optional[RetryPolicy]\n _asset_layer: AssetLayer\n _resource_requirements: Mapping[str, AbstractSet[str]]\n _all_node_defs: Mapping[str, NodeDefinition]\n _cached_run_config_schemas: Dict[str, "RunConfigSchema"]\n _version_strategy: VersionStrategy\n _subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]]\n input_values: Mapping[str, object]\n\n def __init__(\n self,\n *,\n graph_def: GraphDefinition,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n executor_def: Optional[ExecutorDefinition] = None,\n logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n name: Optional[str] = None,\n config: Optional[\n Union[ConfigMapping, Mapping[str, object], PartitionedConfig, "RunConfig"]\n ] = None,\n description: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n tags: Optional[Mapping[str, Any]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n op_retry_policy: Optional[RetryPolicy] = None,\n version_strategy: Optional[VersionStrategy] = None,\n _subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]] = None,\n asset_layer: Optional[AssetLayer] = None,\n input_values: Optional[Mapping[str, object]] = None,\n _was_explicitly_provided_resources: Optional[bool] = None,\n ):\n from dagster._core.definitions.run_config import RunConfig, convert_config_input\n\n self._graph_def = graph_def\n self._current_level_node_defs = self._graph_def.node_defs\n # Recursively explore all nodes in the this job\n self._all_node_defs = _build_all_node_defs(self._current_level_node_defs)\n self._asset_layer = check.opt_inst_param(\n asset_layer, "asset_layer", AssetLayer\n ) or _infer_asset_layer_from_source_asset_deps(graph_def)\n\n # validates\n self._graph_def.get_inputs_must_be_resolved_top_level(self._asset_layer)\n\n self._name = check_valid_name(check.str_param(name, "name")) if name else graph_def.name\n self._executor_def = check.opt_inst_param(executor_def, "executor_def", ExecutorDefinition)\n self._loggers = check.opt_nullable_mapping_param(\n logger_defs,\n "logger_defs",\n key_type=str,\n value_type=LoggerDefinition,\n )\n\n config = check.opt_inst_param(\n config, "config", (Mapping, ConfigMapping, PartitionedConfig, RunConfig)\n )\n config = convert_config_input(config)\n\n partitions_def = check.opt_inst_param(\n partitions_def, "partitions_def", PartitionsDefinition\n )\n # tags and description can exist on graph as well, but since\n # same graph may be in multiple jobs, keep separate layer\n self._description = check.opt_str_param(description, "description")\n self._tags = validate_tags(tags)\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n )\n self._hook_defs = check.opt_set_param(hook_defs, "hook_defs")\n self._op_retry_policy = check.opt_inst_param(\n op_retry_policy, "op_retry_policy", RetryPolicy\n )\n self.version_strategy = check.opt_inst_param(\n version_strategy, "version_strategy", VersionStrategy\n )\n\n _subset_selection_data = check.opt_inst_param(\n _subset_selection_data, "_subset_selection_data", (OpSelectionData, AssetSelectionData)\n )\n input_values = check.opt_mapping_param(input_values, "input_values", key_type=str)\n\n resource_defs = check.opt_mapping_param(\n resource_defs, "resource_defs", key_type=str, value_type=ResourceDefinition\n )\n for key in resource_defs.keys():\n if not key.isidentifier():\n check.failed(f"Resource key '{key}' must be a valid Python identifier.")\n was_provided_resources = (\n bool(resource_defs)\n if _was_explicitly_provided_resources is None\n else _was_explicitly_provided_resources\n )\n self._resource_defs = {\n DEFAULT_IO_MANAGER_KEY: default_job_io_manager,\n **resource_defs,\n }\n self._required_resource_keys = self._get_required_resource_keys(was_provided_resources)\n\n self._config_mapping = None\n self._partitioned_config = None\n self._run_config = None\n self._run_config_schema = None\n self._original_config_argument = config\n\n if partitions_def:\n self._partitioned_config = PartitionedConfig.from_flexible_config(\n config, partitions_def\n )\n else:\n if isinstance(config, ConfigMapping):\n self._config_mapping = config\n elif isinstance(config, PartitionedConfig):\n self._partitioned_config = config\n elif isinstance(config, dict):\n self._run_config = config\n # Using config mapping here is a trick to make it so that the preset will be used even\n # when no config is supplied for the job.\n self._config_mapping = _config_mapping_with_default_value(\n get_run_config_schema_for_job(\n graph_def,\n self.resource_defs,\n self.executor_def,\n self.loggers,\n asset_layer,\n was_explicitly_provided_resources=was_provided_resources,\n ),\n config,\n self.name,\n )\n elif config is not None:\n check.failed(\n "config param must be a ConfigMapping, a PartitionedConfig, or a dictionary,"\n f" but is an object of type {type(config)}"\n )\n\n self._subset_selection_data = _subset_selection_data\n self.input_values = input_values\n for input_name in sorted(list(self.input_values.keys())):\n if not graph_def.has_input(input_name):\n raise DagsterInvalidDefinitionError(\n f"Error when constructing JobDefinition '{self.name}': Input value provided for"\n f" key '{input_name}', but job has no top-level input with that name."\n )\n\n def dagster_internal_init(\n *,\n graph_def: GraphDefinition,\n resource_defs: Optional[Mapping[str, ResourceDefinition]],\n executor_def: Optional[ExecutorDefinition],\n logger_defs: Optional[Mapping[str, LoggerDefinition]],\n name: Optional[str],\n config: Optional[\n Union[ConfigMapping, Mapping[str, object], PartitionedConfig, "RunConfig"]\n ],\n description: Optional[str],\n partitions_def: Optional[PartitionsDefinition],\n tags: Optional[Mapping[str, Any]],\n metadata: Optional[Mapping[str, RawMetadataValue]],\n hook_defs: Optional[AbstractSet[HookDefinition]],\n op_retry_policy: Optional[RetryPolicy],\n version_strategy: Optional[VersionStrategy],\n _subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]],\n asset_layer: Optional[AssetLayer],\n input_values: Optional[Mapping[str, object]],\n _was_explicitly_provided_resources: Optional[bool],\n ) -> "JobDefinition":\n return JobDefinition(\n graph_def=graph_def,\n resource_defs=resource_defs,\n executor_def=executor_def,\n logger_defs=logger_defs,\n name=name,\n config=config,\n description=description,\n partitions_def=partitions_def,\n tags=tags,\n metadata=metadata,\n hook_defs=hook_defs,\n op_retry_policy=op_retry_policy,\n version_strategy=version_strategy,\n _subset_selection_data=_subset_selection_data,\n asset_layer=asset_layer,\n input_values=input_values,\n _was_explicitly_provided_resources=_was_explicitly_provided_resources,\n )\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def tags(self) -> Mapping[str, str]:\n return merge_dicts(self._graph_def.tags, self._tags)\n\n @property\n def metadata(self) -> Mapping[str, MetadataValue]:\n return self._metadata\n\n @property\n def description(self) -> Optional[str]:\n return self._description\n\n @property\n def graph(self) -> GraphDefinition:\n return self._graph_def\n\n @property\n def dependency_structure(self) -> DependencyStructure:\n return self._graph_def.dependency_structure\n\n @property\n def dependencies(self) -> DependencyMapping[NodeInvocation]:\n return self._graph_def.dependencies\n\n @public\n @property\n def executor_def(self) -> ExecutorDefinition:\n """Returns the default :py:class:`ExecutorDefinition` for the job.\n\n If the user has not specified an executor definition, then this will default to the :py:func:`multi_or_in_process_executor`. If a default is specified on the :py:class:`Definitions` object the job was provided to, then that will be used instead.\n """\n return self._executor_def or DEFAULT_EXECUTOR_DEF\n\n @public\n @property\n def has_specified_executor(self) -> bool:\n """Returns True if this job has explicitly specified an executor, and False if the executor was inherited through defaults or the :py:class:`Definitions` object the job was provided to."""\n return self._executor_def is not None\n\n @public\n @property\n def resource_defs(self) -> Mapping[str, ResourceDefinition]:\n """Returns the set of ResourceDefinition objects specified on the job.\n\n This may not be the complete set of resources required by the job, since those can also be provided on the :py:class:`Definitions` object the job may be provided to.\n """\n return self._resource_defs\n\n @public\n @property\n def partitioned_config(self) -> Optional[PartitionedConfig]:\n """The partitioned config for the job, if it has one.\n\n A partitioned config defines a way to map partition keys to run config for the job.\n """\n return self._partitioned_config\n\n @public\n @property\n def config_mapping(self) -> Optional[ConfigMapping]:\n """The config mapping for the job, if it has one.\n\n A config mapping defines a way to map a top-level config schema to run config for the job.\n """\n return self._config_mapping\n\n @public\n @property\n def loggers(self) -> Mapping[str, LoggerDefinition]:\n """Returns the set of LoggerDefinition objects specified on the job.\n\n If the user has not specified a mapping of :py:class:`LoggerDefinition` objects, then this will default to the :py:func:`colored_console_logger` under the key `console`. If a default is specified on the :py:class:`Definitions` object the job was provided to, then that will be used instead.\n """\n from dagster._loggers import default_loggers\n\n return self._loggers or default_loggers()\n\n @public\n @property\n def has_specified_loggers(self) -> bool:\n """Returns true if the job explicitly set loggers, and False if loggers were inherited through defaults or the :py:class:`Definitions` object the job was provided to."""\n return self._loggers is not None\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return self._required_resource_keys\n\n @property\n def run_config(self) -> Optional[Mapping[str, Any]]:\n return self._run_config\n\n @property\n def run_config_schema(self) -> "RunConfigSchema":\n if self._run_config_schema is None:\n self._run_config_schema = _create_run_config_schema(self, self.required_resource_keys)\n return self._run_config_schema\n\n @public\n @property\n def partitions_def(self) -> Optional[PartitionsDefinition]:\n """Returns the :py:class:`PartitionsDefinition` for the job, if it has one.\n\n A partitions definition defines the set of partition keys the job operates on.\n """\n return None if not self.partitioned_config else self.partitioned_config.partitions_def\n\n @property\n def hook_defs(self) -> AbstractSet[HookDefinition]:\n return self._hook_defs\n\n @property\n def asset_layer(self) -> AssetLayer:\n return self._asset_layer\n\n @property\n def all_node_defs(self) -> Sequence[NodeDefinition]:\n return list(self._all_node_defs.values())\n\n @property\n def top_level_node_defs(self) -> Sequence[NodeDefinition]:\n return self._current_level_node_defs\n\n def node_def_named(self, name: str) -> NodeDefinition:\n check.str_param(name, "name")\n\n check.invariant(name in self._all_node_defs, f"{name} not found")\n return self._all_node_defs[name]\n\n def has_node(self, name: str) -> bool:\n check.str_param(name, "name")\n return name in self._all_node_defs\n\n def get_node(self, handle: NodeHandle) -> Node:\n return self._graph_def.get_node(handle)\n\n def get_op(self, handle: NodeHandle) -> OpNode:\n node = self.get_node(handle)\n assert isinstance(\n node, OpNode\n ), f"Tried to retrieve node {handle} as op, but it represents a nested graph."\n return node\n\n def has_node_named(self, name: str) -> bool:\n return self._graph_def.has_node_named(name)\n\n def get_node_named(self, name: str) -> Node:\n return self._graph_def.node_named(name)\n\n @property\n def nodes(self) -> Sequence[Node]:\n return self._graph_def.nodes\n\n @property\n def nodes_in_topological_order(self) -> Sequence[Node]:\n return self._graph_def.nodes_in_topological_order\n\n def all_dagster_types(self) -> Iterable[DagsterType]:\n return self._graph_def.all_dagster_types()\n\n def has_dagster_type(self, name: str) -> bool:\n return self._graph_def.has_dagster_type(name)\n\n def dagster_type_named(self, name: str) -> DagsterType:\n return self._graph_def.dagster_type_named(name)\n\n def describe_target(self) -> str:\n return f"job '{self.name}'"\n\n def is_using_memoization(self, run_tags: Mapping[str, str]) -> bool:\n tags = merge_dicts(self.tags, run_tags)\n # If someone provides a false value for memoized run tag, then they are intentionally\n # switching off memoization.\n if tags.get(MEMOIZED_RUN_TAG) == "false":\n return False\n return (\n MEMOIZED_RUN_TAG in tags and tags.get(MEMOIZED_RUN_TAG) == "true"\n ) or self.version_strategy is not None\n\n def get_required_resource_defs(self) -> Mapping[str, ResourceDefinition]:\n return {\n resource_key: resource\n for resource_key, resource in self.resource_defs.items()\n if resource_key in self.required_resource_keys\n }\n\n def _get_required_resource_keys(self, validate_requirements: bool = False) -> AbstractSet[str]:\n from ..execution.resources_init import get_transitive_required_resource_keys\n\n requirements = self._get_resource_requirements()\n if validate_requirements:\n ensure_requirements_satisfied(self.resource_defs, requirements)\n required_keys = {req.key for req in requirements}\n if validate_requirements:\n return required_keys.union(\n get_transitive_required_resource_keys(required_keys, self.resource_defs)\n )\n else:\n return required_keys\n\n def _get_resource_requirements(self) -> Sequence[ResourceRequirement]:\n return [\n *self._graph_def.get_resource_requirements(self.asset_layer),\n *[\n req\n for hook_def in self._hook_defs\n for req in hook_def.get_resource_requirements(outer_context=f"job '{self._name}'")\n ],\n ]\n\n def validate_resource_requirements_satisfied(self) -> None:\n resource_requirements = self._get_resource_requirements()\n ensure_requirements_satisfied(self.resource_defs, resource_requirements)\n\n def is_missing_required_resources(self) -> bool:\n requirements = self._get_resource_requirements()\n for requirement in requirements:\n if not requirement.resources_contain_key(self.resource_defs):\n return True\n return False\n\n def get_all_hooks_for_handle(self, handle: NodeHandle) -> AbstractSet[HookDefinition]:\n """Gather all the hooks for the given node from all places possibly attached with a hook.\n\n A hook can be attached to any of the following objects\n * Node (node invocation)\n * JobDefinition\n\n Args:\n handle (NodeHandle): The node's handle\n\n Returns:\n FrozenSet[HookDefinition]\n """\n check.inst_param(handle, "handle", NodeHandle)\n hook_defs: Set[HookDefinition] = set()\n\n current = handle\n lineage = []\n while current:\n lineage.append(current.name)\n current = current.parent\n\n # hooks on top-level node\n name = lineage.pop()\n node = self._graph_def.node_named(name)\n hook_defs = hook_defs.union(node.hook_defs)\n\n # hooks on non-top-level nodes\n while lineage:\n name = lineage.pop()\n # While lineage is non-empty, definition is guaranteed to be a graph\n definition = cast(GraphDefinition, node.definition)\n node = definition.node_named(name)\n hook_defs = hook_defs.union(node.hook_defs)\n\n # hooks applied to a job definition will run on every node\n hook_defs = hook_defs.union(self.hook_defs)\n\n return frozenset(hook_defs)\n\n def get_retry_policy_for_handle(self, handle: NodeHandle) -> Optional[RetryPolicy]:\n node = self.get_node(handle)\n definition = node.definition\n\n if node.retry_policy:\n return node.retry_policy\n elif isinstance(definition, OpDefinition) and definition.retry_policy:\n return definition.retry_policy\n\n # could be expanded to look in graph containers\n else:\n return self._op_retry_policy\n\n # make Callable for decorator reference updates\n def __call__(self, *args, **kwargs):\n raise DagsterInvariantViolationError(\n f"Attempted to call job '{self.name}' directly. Jobs should be invoked by "\n "using an execution API function (e.g. `job.execute_in_process`)."\n )\n\n
[docs] @public\n def execute_in_process(\n self,\n run_config: Optional[Union[Mapping[str, Any], "RunConfig"]] = None,\n instance: Optional["DagsterInstance"] = None,\n partition_key: Optional[str] = None,\n raise_on_error: bool = True,\n op_selection: Optional[Sequence[str]] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n run_id: Optional[str] = None,\n input_values: Optional[Mapping[str, object]] = None,\n tags: Optional[Mapping[str, str]] = None,\n resources: Optional[Mapping[str, object]] = None,\n ) -> "ExecuteInProcessResult":\n """Execute the Job in-process, gathering results in-memory.\n\n The `executor_def` on the Job will be ignored, and replaced with the in-process executor.\n If using the default `io_manager`, it will switch from filesystem to in-memory.\n\n\n Args:\n run_config (Optional[Mapping[str, Any]]:\n The configuration for the run\n instance (Optional[DagsterInstance]):\n The instance to execute against, an ephemeral one will be used if none provided.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for jobs with partitioned config.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``True``.\n op_selection (Optional[Sequence[str]]): A list of op selection queries (including single op\n names) to execute. For example:\n * ``['some_op']``: selects ``some_op`` itself.\n * ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n * ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n * ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.\n resources (Optional[Mapping[str, Any]]):\n The resources needed if any are required. Can provide resource instances directly,\n or resource definitions.\n\n Returns:\n :py:class:`~dagster.ExecuteInProcessResult`\n\n """\n from dagster._core.definitions.executor_definition import execute_in_process_executor\n from dagster._core.definitions.run_config import convert_config_input\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n from dagster._core.execution.execute_in_process import core_execute_in_process\n\n run_config = check.opt_mapping_param(convert_config_input(run_config), "run_config")\n op_selection = check.opt_sequence_param(op_selection, "op_selection", str)\n asset_selection = check.opt_sequence_param(asset_selection, "asset_selection", AssetKey)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n\n resource_defs = wrap_resources_for_execution(resources)\n\n check.invariant(\n not (op_selection and asset_selection),\n "op_selection and asset_selection cannot both be provided as args to"\n " execute_in_process",\n )\n\n partition_key = check.opt_str_param(partition_key, "partition_key")\n input_values = check.opt_mapping_param(input_values, "input_values")\n\n # Combine provided input values at execute_in_process with input values\n # provided to the definition. Input values provided at\n # execute_in_process will override those provided on the definition.\n input_values = merge_dicts(self.input_values, input_values)\n\n bound_resource_defs = dict(self.resource_defs)\n ephemeral_job = JobDefinition.dagster_internal_init(\n name=self._name,\n graph_def=self._graph_def,\n resource_defs={**_swap_default_io_man(bound_resource_defs, self), **resource_defs},\n executor_def=execute_in_process_executor,\n logger_defs=self._loggers,\n hook_defs=self.hook_defs,\n config=self.config_mapping or self.partitioned_config or self.run_config,\n tags=self.tags,\n op_retry_policy=self._op_retry_policy,\n version_strategy=self.version_strategy,\n asset_layer=self.asset_layer,\n input_values=input_values,\n description=self.description,\n partitions_def=self.partitions_def,\n metadata=self.metadata,\n _subset_selection_data=None, # this is added below\n _was_explicitly_provided_resources=True,\n )\n\n ephemeral_job = ephemeral_job.get_subset(\n op_selection=op_selection,\n asset_selection=frozenset(asset_selection) if asset_selection else None,\n )\n\n merged_tags = merge_dicts(self.tags, tags or {})\n if partition_key:\n if not (self.partitions_def and self.partitioned_config):\n check.failed("Attempted to execute a partitioned run for a non-partitioned job")\n self.partitions_def.validate_partition_key(\n partition_key, dynamic_partitions_store=instance\n )\n\n run_config = (\n run_config\n if run_config\n else self.partitioned_config.get_run_config_for_partition_key(partition_key)\n )\n merged_tags.update(\n self.partitioned_config.get_tags_for_partition_key(\n partition_key, job_name=self.name\n )\n )\n\n return core_execute_in_process(\n ephemeral_job=ephemeral_job,\n run_config=run_config,\n instance=instance,\n output_capturing_enabled=True,\n raise_on_error=raise_on_error,\n run_tags=merged_tags,\n run_id=run_id,\n asset_selection=frozenset(asset_selection),\n )
\n\n @property\n def op_selection_data(self) -> Optional[OpSelectionData]:\n return (\n self._subset_selection_data\n if isinstance(self._subset_selection_data, OpSelectionData)\n else None\n )\n\n @property\n def asset_selection_data(self) -> Optional[AssetSelectionData]:\n return (\n self._subset_selection_data\n if isinstance(self._subset_selection_data, AssetSelectionData)\n else None\n )\n\n @property\n def is_subset(self) -> bool:\n return bool(self._subset_selection_data)\n\n def get_subset(\n self,\n *,\n op_selection: Optional[Iterable[str]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n ) -> Self:\n check.invariant(\n not (op_selection and (asset_selection or asset_check_selection)),\n "op_selection cannot be provided with asset_selection or asset_check_selection to"\n " execute_in_process",\n )\n if op_selection:\n return self._get_job_def_for_op_selection(op_selection)\n if asset_selection or asset_check_selection:\n return self._get_job_def_for_asset_selection(\n asset_selection=asset_selection, asset_check_selection=asset_check_selection\n )\n else:\n return self\n\n def _get_job_def_for_asset_selection(\n self,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n ) -> Self:\n asset_selection = check.opt_set_param(asset_selection, "asset_selection", AssetKey)\n check.opt_set_param(asset_check_selection, "asset_check_selection", AssetCheckKey)\n\n nonexistent_assets = [\n asset\n for asset in asset_selection\n if asset not in self.asset_layer.asset_keys\n and asset not in self.asset_layer.source_assets_by_key\n ]\n nonexistent_asset_strings = [\n asset_str\n for asset_str in (asset.to_string() for asset in nonexistent_assets)\n if asset_str\n ]\n if nonexistent_assets:\n raise DagsterInvalidSubsetError(\n "Assets provided in asset_selection argument "\n f"{', '.join(nonexistent_asset_strings)} do not exist in parent asset group or job."\n )\n\n # Test that selected asset checks exist\n all_check_keys = self.asset_layer.node_output_handles_by_asset_check_key.keys()\n\n nonexistent_asset_checks = [\n asset_check\n for asset_check in asset_check_selection or set()\n if asset_check not in all_check_keys\n ]\n nonexistent_asset_check_strings = [\n str(asset_check) for asset_check in nonexistent_asset_checks\n ]\n if nonexistent_asset_checks:\n raise DagsterInvalidSubsetError(\n "Asset checks provided in asset_check_selection argument"\n f" {', '.join(nonexistent_asset_check_strings)} do not exist in parent asset group"\n " or job."\n )\n\n asset_selection_data = AssetSelectionData(\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n parent_job_def=self,\n )\n\n check.invariant(\n self.asset_layer.assets_defs_by_key is not None,\n "Asset layer must have _asset_defs argument defined",\n )\n\n new_job = build_asset_selection_job(\n name=self.name,\n assets=set(self.asset_layer.assets_defs_by_key.values()),\n source_assets=self.asset_layer.source_assets_by_key.values(),\n executor_def=self.executor_def,\n resource_defs=self.resource_defs,\n description=self.description,\n tags=self.tags,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n asset_selection_data=asset_selection_data,\n config=self.config_mapping or self.partitioned_config,\n asset_checks=self.asset_layer.asset_checks_defs,\n )\n return new_job\n\n def _get_job_def_for_op_selection(self, op_selection: Iterable[str]) -> Self:\n try:\n sub_graph = get_graph_subset(self.graph, op_selection)\n\n # if explicit config was passed the config_mapping that resolves the defaults implicitly is\n # very unlikely to work. The job will still present the default config in the Dagster UI.\n config = (\n None\n if self.run_config is not None\n else self.config_mapping or self.partitioned_config\n )\n\n return self._copy(\n config=config,\n graph_def=sub_graph,\n _subset_selection_data=OpSelectionData(\n op_selection=list(op_selection),\n resolved_op_selection=OpSelection(op_selection).resolve(self.graph),\n parent_job_def=self, # used by job snapshot lineage\n ),\n # TODO: subset this structure.\n # https://github.com/dagster-io/dagster/issues/7541\n asset_layer=self.asset_layer,\n )\n except DagsterInvalidDefinitionError as exc:\n # This handles the case when you construct a subset such that an unsatisfied\n # input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,\n # we re-raise a DagsterInvalidSubsetError.\n node_paths = OpSelection(op_selection).resolve(self.graph)\n raise DagsterInvalidSubsetError(\n f"The attempted subset {str_format_set(node_paths)} for graph "\n f"{self.graph.name} results in an invalid graph."\n ) from exc\n\n
[docs] @public\n @deprecated(\n breaking_version="2.0.0",\n additional_warn_text="Directly instantiate `RunRequest(partition_key=...)` instead.",\n )\n def run_request_for_partition(\n self,\n partition_key: str,\n run_key: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n run_config: Optional[Mapping[str, Any]] = None,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,\n ) -> RunRequest:\n """Creates a RunRequest object for a run that processes the given partition.\n\n Args:\n partition_key: The key of the partition to request a run for.\n run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that\n only one run is created per run key across all sensor evaluations. For schedules,\n ensures that one run is created per tick, across failure recoveries. Passing in a `None`\n value means that a run will always be launched per evaluation.\n tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the launched run.\n run_config (Optional[Mapping[str, Any]]: Configuration for the run. If the job has\n a :py:class:`PartitionedConfig`, this value will override replace the config\n provided by it.\n current_time (Optional[datetime]): Used to determine which time-partitions exist.\n Defaults to now.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when the\n partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n\n Returns:\n RunRequest: an object that requests a run to process the given partition.\n """\n if not (self.partitions_def and self.partitioned_config):\n check.failed("Called run_request_for_partition on a non-partitioned job")\n\n if (\n isinstance(self.partitions_def, DynamicPartitionsDefinition)\n and self.partitions_def.name\n ):\n # Do not support using run_request_for_partition with dynamic partitions,\n # since this requires querying the instance once per run request for the\n # existent dynamic partitions\n check.failed(\n "run_request_for_partition is not supported for dynamic partitions. Instead, use"\n " RunRequest(partition_key=...)"\n )\n\n self.partitions_def.validate_partition_key(\n partition_key,\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n\n run_config = (\n run_config\n if run_config is not None\n else self.partitioned_config.get_run_config_for_partition_key(partition_key)\n )\n run_request_tags = {\n **(tags or {}),\n **self.partitioned_config.get_tags_for_partition_key(\n partition_key,\n job_name=self.name,\n ),\n }\n\n return RunRequest(\n run_key=run_key,\n run_config=run_config,\n tags=run_request_tags,\n job_name=self.name,\n asset_selection=asset_selection,\n partition_key=partition_key,\n )
\n\n def get_config_schema_snapshot(self) -> "ConfigSchemaSnapshot":\n return self.get_job_snapshot().config_schema_snapshot\n\n def get_job_snapshot(self) -> "JobSnapshot":\n return self.get_job_index().job_snapshot\n\n def get_job_index(self) -> "JobIndex":\n from dagster._core.host_representation import JobIndex\n from dagster._core.snap import JobSnapshot\n\n return JobIndex(JobSnapshot.from_job_def(self), self.get_parent_job_snapshot())\n\n def get_job_snapshot_id(self) -> str:\n return self.get_job_index().job_snapshot_id\n\n def get_parent_job_snapshot(self) -> Optional["JobSnapshot"]:\n if self.op_selection_data:\n return self.op_selection_data.parent_job_def.get_job_snapshot()\n elif self.asset_selection_data:\n return self.asset_selection_data.parent_job_def.get_job_snapshot()\n else:\n return None\n\n def has_direct_input_value(self, input_name: str) -> bool:\n return input_name in self.input_values\n\n def get_direct_input_value(self, input_name: str) -> object:\n if input_name not in self.input_values:\n raise DagsterInvalidInvocationError(\n f"On job '{self.name}', attempted to retrieve input value for input named"\n f" '{input_name}', but no value was provided. Provided input values:"\n f" {sorted(list(self.input_values.keys()))}"\n )\n return self.input_values[input_name]\n\n def _copy(self, **kwargs: Any) -> "JobDefinition":\n # dict() calls copy dict props\n base_kwargs = dict(\n graph_def=self.graph,\n resource_defs=dict(self.resource_defs),\n executor_def=self._executor_def,\n logger_defs=self._loggers,\n config=self._original_config_argument,\n name=self._name,\n description=self.description,\n tags=self.tags,\n metadata=self._metadata,\n hook_defs=self.hook_defs,\n op_retry_policy=self._op_retry_policy,\n version_strategy=self.version_strategy,\n _subset_selection_data=self._subset_selection_data,\n asset_layer=self.asset_layer,\n input_values=self.input_values,\n partitions_def=self.partitions_def,\n _was_explicitly_provided_resources=None,\n )\n resolved_kwargs = {**base_kwargs, **kwargs} # base kwargs overwritten for conflicts\n job_def = JobDefinition.dagster_internal_init(**resolved_kwargs)\n update_wrapper(job_def, self, updated=())\n return job_def\n\n
[docs] @public\n def with_top_level_resources(\n self, resource_defs: Mapping[str, ResourceDefinition]\n ) -> "JobDefinition":\n """Apply a set of resources to all op instances within the job."""\n resource_defs = check.mapping_param(resource_defs, "resource_defs", key_type=str)\n return self._copy(resource_defs=resource_defs)
\n\n
[docs] @public\n def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "JobDefinition":\n """Apply a set of hooks to all op instances within the job."""\n hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)\n return self._copy(hook_defs=(hook_defs | self.hook_defs))
\n\n def with_executor_def(self, executor_def: ExecutorDefinition) -> "JobDefinition":\n return self._copy(executor_def=executor_def)\n\n def with_logger_defs(self, logger_defs: Mapping[str, LoggerDefinition]) -> "JobDefinition":\n return self._copy(logger_defs=logger_defs)\n\n @property\n def op_selection(self) -> Optional[AbstractSet[str]]:\n return set(self.op_selection_data.op_selection) if self.op_selection_data else None\n\n @property\n def asset_selection(self) -> Optional[AbstractSet[AssetKey]]:\n return self.asset_selection_data.asset_selection if self.asset_selection_data else None\n\n @property\n def asset_check_selection(self) -> Optional[AbstractSet[AssetCheckKey]]:\n return (\n self.asset_selection_data.asset_check_selection if self.asset_selection_data else None\n )\n\n @property\n def resolved_op_selection(self) -> Optional[AbstractSet[str]]:\n return self.op_selection_data.resolved_op_selection if self.op_selection_data else None
\n\n\ndef _swap_default_io_man(resources: Mapping[str, ResourceDefinition], job: JobDefinition):\n """Used to create the user facing experience of the default io_manager\n switching to in-memory when using execute_in_process.\n """\n from dagster._core.storage.mem_io_manager import mem_io_manager\n\n if (\n resources.get(DEFAULT_IO_MANAGER_KEY) in [default_job_io_manager]\n and job.version_strategy is None\n ):\n updated_resources = dict(resources)\n updated_resources[DEFAULT_IO_MANAGER_KEY] = mem_io_manager\n return updated_resources\n\n return resources\n\n\n@dagster_maintained_io_manager\n@io_manager(\n description="Built-in filesystem IO manager that stores and retrieves values using pickling."\n)\ndef default_job_io_manager(init_context: "InitResourceContext"):\n # support overriding the default io manager via environment variables\n module_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_MODULE")\n attribute_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE")\n silence_failures = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_SILENCE_FAILURES")\n\n if module_name and attribute_name:\n from dagster._core.execution.build_resources import build_resources\n\n try:\n module = importlib.import_module(module_name)\n attr = getattr(module, attribute_name)\n check.invariant(\n isinstance(attr, IOManagerDefinition),\n "DAGSTER_DEFAULT_IO_MANAGER_MODULE and DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE"\n " must specify an IOManagerDefinition",\n )\n with build_resources({"io_manager": attr}, instance=init_context.instance) as resources:\n return resources.io_manager\n except Exception as e:\n if not silence_failures:\n raise\n else:\n warnings.warn(\n f"Failed to load io manager override with module: {module_name} attribute:"\n f" {attribute_name}: {e}\\nFalling back to default io manager."\n )\n\n # normally, default to the fs_io_manager\n from dagster._core.storage.fs_io_manager import PickledObjectFilesystemIOManager\n\n instance = check.not_none(init_context.instance)\n return PickledObjectFilesystemIOManager(base_dir=instance.storage_directory())\n\n\n@dagster_maintained_io_manager\n@io_manager(\n description="Built-in filesystem IO manager that stores and retrieves values using pickling.",\n config_schema={"base_dir": Field(StringSource, is_required=False)},\n)\ndef default_job_io_manager_with_fs_io_manager_schema(init_context: "InitResourceContext"):\n # support overriding the default io manager via environment variables\n module_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_MODULE")\n attribute_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE")\n silence_failures = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_SILENCE_FAILURES")\n\n if module_name and attribute_name:\n from dagster._core.execution.build_resources import build_resources\n\n try:\n module = importlib.import_module(module_name)\n attr = getattr(module, attribute_name)\n check.invariant(\n isinstance(attr, IOManagerDefinition),\n "DAGSTER_DEFAULT_IO_MANAGER_MODULE and DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE"\n " must specify an IOManagerDefinition",\n )\n with build_resources({"io_manager": attr}, instance=init_context.instance) as resources:\n return resources.io_manager\n except Exception as e:\n if not silence_failures:\n raise\n else:\n warnings.warn(\n f"Failed to load io manager override with module: {module_name} attribute:"\n f" {attribute_name}: {e}\\nFalling back to default io manager."\n )\n from dagster._core.storage.fs_io_manager import PickledObjectFilesystemIOManager\n\n # normally, default to the fs_io_manager\n base_dir = init_context.resource_config.get(\n "base_dir", init_context.instance.storage_directory() if init_context.instance else None\n )\n\n return PickledObjectFilesystemIOManager(base_dir=base_dir)\n\n\ndef _config_mapping_with_default_value(\n inner_schema: ConfigType,\n default_config: Mapping[str, Any],\n job_name: str,\n) -> ConfigMapping:\n if not isinstance(inner_schema, Shape):\n check.failed("Only Shape (dictionary) config_schema allowed on Job ConfigMapping")\n\n def config_fn(x):\n return x\n\n updated_fields = {}\n field_aliases = inner_schema.field_aliases\n for name, field in inner_schema.fields.items():\n if name in default_config:\n updated_fields[name] = Field(\n config=field.config_type,\n default_value=default_config[name],\n description=field.description,\n )\n elif name in field_aliases and field_aliases[name] in default_config:\n updated_fields[name] = Field(\n config=field.config_type,\n default_value=default_config[field_aliases[name]],\n description=field.description,\n )\n else:\n updated_fields[name] = field\n\n config_schema = Shape(\n fields=updated_fields,\n description=(\n "This run config schema was automatically populated with default values "\n "from `default_config`."\n ),\n field_aliases=inner_schema.field_aliases,\n )\n\n config_evr = validate_config(config_schema, default_config)\n if not config_evr.success:\n raise DagsterInvalidConfigError(\n f"Error in config when building job '{job_name}' ",\n config_evr.errors,\n default_config,\n )\n\n return ConfigMapping(\n config_fn=config_fn, config_schema=config_schema, receive_processed_config_values=False\n )\n\n\ndef get_run_config_schema_for_job(\n graph_def: GraphDefinition,\n resource_defs: Mapping[str, ResourceDefinition],\n executor_def: "ExecutorDefinition",\n logger_defs: Mapping[str, LoggerDefinition],\n asset_layer: Optional[AssetLayer],\n was_explicitly_provided_resources: bool = False,\n) -> ConfigType:\n return JobDefinition(\n name=graph_def.name,\n graph_def=graph_def,\n resource_defs=resource_defs,\n executor_def=executor_def,\n logger_defs=logger_defs,\n asset_layer=asset_layer,\n _was_explicitly_provided_resources=was_explicitly_provided_resources,\n ).run_config_schema.run_config_schema_type\n\n\ndef _infer_asset_layer_from_source_asset_deps(job_graph_def: GraphDefinition) -> AssetLayer:\n """For non-asset jobs that have some inputs that are fed from SourceAssets, constructs an\n AssetLayer that includes those SourceAssets.\n """\n asset_keys_by_node_input_handle: Dict[NodeInputHandle, AssetKey] = {}\n source_assets_list = []\n source_asset_keys_set = set()\n io_manager_keys_by_asset_key: Mapping[AssetKey, str] = {}\n\n # each entry is a graph definition and its handle relative to the job root\n stack: List[Tuple[GraphDefinition, Optional[NodeHandle]]] = [(job_graph_def, None)]\n\n while stack:\n graph_def, parent_node_handle = stack.pop()\n\n for node_name, input_source_assets in graph_def.node_input_source_assets.items():\n node_handle = NodeHandle(node_name, parent_node_handle)\n for input_name, source_asset in input_source_assets.items():\n if source_asset.key not in source_asset_keys_set:\n source_asset_keys_set.add(source_asset.key)\n source_assets_list.append(source_asset)\n\n input_handle = NodeInputHandle(node_handle, input_name)\n asset_keys_by_node_input_handle[input_handle] = source_asset.key\n for resolved_input_handle in graph_def.node_dict[\n node_name\n ].definition.resolve_input_to_destinations(input_handle):\n asset_keys_by_node_input_handle[resolved_input_handle] = source_asset.key\n\n if source_asset.io_manager_key:\n io_manager_keys_by_asset_key[source_asset.key] = source_asset.io_manager_key\n\n for node_name, node in graph_def.node_dict.items():\n if isinstance(node.definition, GraphDefinition):\n stack.append((node.definition, NodeHandle(node_name, parent_node_handle)))\n\n return AssetLayer(\n assets_defs_by_node_handle={},\n asset_keys_by_node_input_handle=asset_keys_by_node_input_handle,\n asset_info_by_node_output_handle={},\n asset_deps={},\n dependency_node_handles_by_asset_key={},\n assets_defs_by_key={},\n source_assets_by_key={\n source_asset.key: source_asset for source_asset in source_assets_list\n },\n io_manager_keys_by_asset_key=io_manager_keys_by_asset_key,\n dep_asset_keys_by_node_output_handle={},\n partition_mappings_by_asset_dep={},\n asset_checks_defs_by_node_handle={},\n node_output_handles_by_asset_check_key={},\n check_names_by_asset_key_by_node_handle={},\n check_key_by_node_output_handle={},\n )\n\n\ndef _build_all_node_defs(node_defs: Sequence[NodeDefinition]) -> Mapping[str, NodeDefinition]:\n all_defs: Dict[str, NodeDefinition] = {}\n for current_level_node_def in node_defs:\n for node_def in current_level_node_def.iterate_node_defs():\n if node_def.name in all_defs:\n if all_defs[node_def.name] != node_def:\n raise DagsterInvalidDefinitionError(\n 'Detected conflicting node definitions with the same name "{name}"'.format(\n name=node_def.name\n )\n )\n else:\n all_defs[node_def.name] = node_def\n\n return all_defs\n\n\ndef _create_run_config_schema(\n job_def: JobDefinition,\n required_resources: AbstractSet[str],\n) -> "RunConfigSchema":\n from .run_config import (\n RunConfigSchemaCreationData,\n construct_config_type_dictionary,\n define_run_config_schema_type,\n )\n from .run_config_schema import RunConfigSchema\n\n # When executing with a subset job, include the missing nodes\n # from the original job as ignored to allow execution with\n # run config that is valid for the original\n ignored_nodes: Sequence[Node] = []\n if job_def.is_subset:\n if isinstance(job_def.graph, SubselectedGraphDefinition): # op selection provided\n ignored_nodes = job_def.graph.get_top_level_omitted_nodes()\n elif job_def.asset_selection_data:\n parent_job = job_def\n while parent_job.asset_selection_data:\n parent_job = parent_job.asset_selection_data.parent_job_def\n\n ignored_nodes = [\n node for node in parent_job.graph.nodes if not job_def.has_node_named(node.name)\n ]\n else:\n ignored_nodes = []\n\n run_config_schema_type = define_run_config_schema_type(\n RunConfigSchemaCreationData(\n job_name=job_def.name,\n nodes=job_def.graph.nodes,\n graph_def=job_def.graph,\n dependency_structure=job_def.graph.dependency_structure,\n executor_def=job_def.executor_def,\n resource_defs=job_def.resource_defs,\n logger_defs=job_def.loggers,\n ignored_nodes=ignored_nodes,\n required_resources=required_resources,\n direct_inputs=job_def.input_values,\n asset_layer=job_def.asset_layer,\n )\n )\n\n if job_def.config_mapping:\n outer_config_type = job_def.config_mapping.config_schema.config_type\n else:\n outer_config_type = run_config_schema_type\n\n if outer_config_type is None:\n check.failed("Unexpected outer_config_type value of None")\n\n config_type_dict_by_name, config_type_dict_by_key = construct_config_type_dictionary(\n job_def.all_node_defs,\n outer_config_type,\n )\n\n return RunConfigSchema(\n run_config_schema_type=run_config_schema_type,\n config_type_dict_by_name=config_type_dict_by_name,\n config_type_dict_by_key=config_type_dict_by_key,\n config_mapping=job_def.config_mapping,\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/job_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.job_definition"}, "load_assets_from_modules": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.load_assets_from_modules

\nimport inspect\nimport os\nimport pkgutil\nfrom importlib import import_module\nfrom types import ModuleType\nfrom typing import Dict, Generator, Iterable, List, Optional, Sequence, Set, Tuple, Union\n\nimport dagster._check as check\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.backfill_policy import BackfillPolicy\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom .assets import AssetsDefinition\nfrom .cacheable_assets import CacheableAssetsDefinition\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKeyPrefix,\n    check_opt_coercible_to_asset_key_prefix_param,\n)\nfrom .source_asset import SourceAsset\n\n\ndef _find_assets_in_module(\n    module: ModuleType,\n) -> Generator[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition], None, None]:\n    """Finds assets in the given module and adds them to the given sets of assets and source assets."""\n    for attr in dir(module):\n        value = getattr(module, attr)\n        if isinstance(value, (AssetsDefinition, SourceAsset, CacheableAssetsDefinition)):\n            yield value\n        elif isinstance(value, list) and all(\n            isinstance(el, (AssetsDefinition, SourceAsset, CacheableAssetsDefinition))\n            for el in value\n        ):\n            yield from value\n\n\ndef assets_from_modules(\n    modules: Iterable[ModuleType], extra_source_assets: Optional[Sequence[SourceAsset]] = None\n) -> Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]:\n    """Constructs three lists, a list of assets, a list of source assets, and a list of cacheable\n    assets from the given modules.\n\n    Args:\n        modules (Iterable[ModuleType]): The Python modules to look for assets inside.\n        extra_source_assets (Optional[Sequence[SourceAsset]]): Source assets to include in the\n            group in addition to the source assets found in the modules.\n\n    Returns:\n        Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]]:\n            A tuple containing a list of assets, a list of source assets, and a list of\n            cacheable assets defined in the given modules.\n    """\n    asset_ids: Set[int] = set()\n    asset_keys: Dict[AssetKey, ModuleType] = dict()\n    source_assets: List[SourceAsset] = list(\n        check.opt_sequence_param(extra_source_assets, "extra_source_assets", of_type=SourceAsset)\n    )\n    cacheable_assets: List[CacheableAssetsDefinition] = []\n    assets: Dict[AssetKey, AssetsDefinition] = {}\n    for module in modules:\n        for asset in _find_assets_in_module(module):\n            if id(asset) not in asset_ids:\n                asset_ids.add(id(asset))\n                if isinstance(asset, CacheableAssetsDefinition):\n                    cacheable_assets.append(asset)\n                else:\n                    keys = asset.keys if isinstance(asset, AssetsDefinition) else [asset.key]\n                    for key in keys:\n                        if key in asset_keys:\n                            modules_str = ", ".join(\n                                set([asset_keys[key].__name__, module.__name__])\n                            )\n                            error_str = (\n                                f"Asset key {key} is defined multiple times. Definitions found in"\n                                f" modules: {modules_str}. "\n                            )\n\n                            if key in assets and isinstance(asset, AssetsDefinition):\n                                if assets[key].node_def == asset.node_def:\n                                    error_str += (\n                                        "One possible cause of this bug is a call to with_resources"\n                                        " outside of a repository definition, causing a duplicate"\n                                        " asset definition."\n                                    )\n\n                            raise DagsterInvalidDefinitionError(error_str)\n                        else:\n                            asset_keys[key] = module\n                            if isinstance(asset, AssetsDefinition):\n                                assets[key] = asset\n                    if isinstance(asset, SourceAsset):\n                        source_assets.append(asset)\n    return list(set(assets.values())), source_assets, cacheable_assets\n\n\n
[docs]def load_assets_from_modules(\n modules: Iterable[ModuleType],\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets and source assets from the given modules.\n\n Args:\n modules (Iterable[ModuleType]): The Python modules to look for assets inside.\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset]]:\n A list containing assets and source assets defined in the given modules.\n """\n group_name = check.opt_str_param(group_name, "group_name")\n key_prefix = check_opt_coercible_to_asset_key_prefix_param(key_prefix, "key_prefix")\n freshness_policy = check.opt_inst_param(freshness_policy, "freshness_policy", FreshnessPolicy)\n auto_materialize_policy = check.opt_inst_param(\n auto_materialize_policy, "auto_materialize_policy", AutoMaterializePolicy\n )\n backfill_policy = check.opt_inst_param(backfill_policy, "backfill_policy", BackfillPolicy)\n\n (\n assets,\n source_assets,\n cacheable_assets,\n ) = assets_from_modules(modules)\n\n return assets_with_attributes(\n assets,\n source_assets,\n cacheable_assets,\n key_prefix=key_prefix,\n group_name=group_name,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n source_key_prefix=source_key_prefix,\n )
\n\n\n
[docs]def load_assets_from_current_module(\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets, source assets, and cacheable assets from the module where\n this function is called.\n\n Args:\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset, CachableAssetsDefinition]]:\n A list containing assets, source assets, and cacheable assets defined in the module.\n """\n caller = inspect.stack()[1]\n module = inspect.getmodule(caller[0])\n if module is None:\n check.failed("Could not find a module for the caller")\n\n return load_assets_from_modules(\n [module],\n group_name=group_name,\n key_prefix=key_prefix,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )
\n\n\ndef assets_from_package_module(\n package_module: ModuleType,\n extra_source_assets: Optional[Sequence[SourceAsset]] = None,\n) -> Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]:\n """Constructs three lists, a list of assets, a list of source assets, and a list of cacheable assets\n from the given package module.\n\n Args:\n package_module (ModuleType): The package module to looks for assets inside.\n extra_source_assets (Optional[Sequence[SourceAsset]]): Source assets to include in the\n group in addition to the source assets found in the modules.\n\n Returns:\n Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]:\n A tuple containing a list of assets, a list of source assets, and a list of cacheable assets\n defined in the given modules.\n """\n return assets_from_modules(\n _find_modules_in_package(package_module), extra_source_assets=extra_source_assets\n )\n\n\n
[docs]def load_assets_from_package_module(\n package_module: ModuleType,\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets and source assets that includes all asset\n definitions, source assets, and cacheable assets in all sub-modules of the given package module.\n\n A package module is the result of importing a package.\n\n Args:\n package_module (ModuleType): The package module to looks for assets inside.\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n A list containing assets, source assets, and cacheable assets defined in the module.\n """\n group_name = check.opt_str_param(group_name, "group_name")\n key_prefix = check_opt_coercible_to_asset_key_prefix_param(key_prefix, "key_prefix")\n freshness_policy = check.opt_inst_param(freshness_policy, "freshness_policy", FreshnessPolicy)\n auto_materialize_policy = check.opt_inst_param(\n auto_materialize_policy, "auto_materialize_policy", AutoMaterializePolicy\n )\n backfill_policy = check.opt_inst_param(backfill_policy, "backfill_policy", BackfillPolicy)\n\n (\n assets,\n source_assets,\n cacheable_assets,\n ) = assets_from_package_module(package_module)\n return assets_with_attributes(\n assets,\n source_assets,\n cacheable_assets,\n key_prefix=key_prefix,\n group_name=group_name,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n source_key_prefix=source_key_prefix,\n )
\n\n\n
[docs]def load_assets_from_package_name(\n package_name: str,\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets, source assets, and cacheable assets that includes all asset\n definitions and source assets in all sub-modules of the given package.\n\n Args:\n package_name (str): The name of a Python package to look for assets inside.\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n A list containing assets, source assets, and cacheable assets defined in the module.\n """\n package_module = import_module(package_name)\n return load_assets_from_package_module(\n package_module,\n group_name=group_name,\n key_prefix=key_prefix,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )
\n\n\ndef _find_modules_in_package(package_module: ModuleType) -> Iterable[ModuleType]:\n yield package_module\n package_path = package_module.__file__\n if package_path:\n for _, modname, is_pkg in pkgutil.walk_packages([os.path.dirname(package_path)]):\n submodule = import_module(f"{package_module.__name__}.{modname}")\n if is_pkg:\n yield from _find_modules_in_package(submodule)\n else:\n yield submodule\n else:\n raise ValueError(\n f"Tried to find modules in package {package_module}, but its __file__ is None"\n )\n\n\ndef prefix_assets(\n assets_defs: Sequence[AssetsDefinition],\n key_prefix: CoercibleToAssetKeyPrefix,\n source_assets: Sequence[SourceAsset],\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix],\n) -> Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset]]:\n """Given a list of assets, prefix the input and output asset keys with key_prefix.\n The prefix is not added to source assets.\n\n Input asset keys that reference other assets within assets_defs are "brought along" -\n i.e. prefixed as well.\n\n Example with a single asset:\n\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n result = prefixed_asset_key_replacements([asset_1], "my_prefix")\n assert result.assets[0].asset_key == AssetKey(["my_prefix", "asset1"])\n\n Example with dependencies within the list of assets:\n\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n @asset\n def asset2(asset1):\n ...\n\n result = prefixed_asset_key_replacements([asset1, asset2], "my_prefix")\n assert result.assets[0].asset_key == AssetKey(["my_prefix", "asset1"])\n assert result.assets[1].asset_key == AssetKey(["my_prefix", "asset2"])\n assert result.assets[1].dependency_keys == {AssetKey(["my_prefix", "asset1"])}\n\n """\n asset_keys = {asset_key for assets_def in assets_defs for asset_key in assets_def.keys}\n source_asset_keys = {source_asset.key for source_asset in source_assets}\n\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.is_list(key_prefix, of_type=str)\n\n result_assets: List[AssetsDefinition] = []\n for assets_def in assets_defs:\n output_asset_key_replacements = {\n asset_key: AssetKey([*key_prefix, *asset_key.path]) for asset_key in assets_def.keys\n }\n input_asset_key_replacements = {}\n for dep_asset_key in assets_def.dependency_keys:\n if dep_asset_key in asset_keys:\n input_asset_key_replacements[dep_asset_key] = AssetKey(\n [*key_prefix, *dep_asset_key.path]\n )\n elif source_key_prefix and dep_asset_key in source_asset_keys:\n input_asset_key_replacements[dep_asset_key] = AssetKey(\n [*source_key_prefix, *dep_asset_key.path]\n )\n\n result_assets.append(\n assets_def.with_attributes(\n output_asset_key_replacements=output_asset_key_replacements,\n input_asset_key_replacements=input_asset_key_replacements,\n )\n )\n\n if source_key_prefix:\n result_source_assets = [\n source_asset.with_attributes(key=AssetKey([*source_key_prefix, *source_asset.key.path]))\n for source_asset in source_assets\n ]\n else:\n result_source_assets = source_assets\n\n return result_assets, result_source_assets\n\n\ndef assets_with_attributes(\n assets_defs: Sequence[AssetsDefinition],\n source_assets: Sequence[SourceAsset],\n cacheable_assets: Sequence[CacheableAssetsDefinition],\n key_prefix: Optional[Sequence[str]],\n group_name: Optional[str],\n freshness_policy: Optional[FreshnessPolicy],\n auto_materialize_policy: Optional[AutoMaterializePolicy],\n backfill_policy: Optional[BackfillPolicy],\n source_key_prefix: Optional[Sequence[str]],\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n # There is a tricky edge case here where if a non-cacheable asset depends on a cacheable asset,\n # and the assets are prefixed, the non-cacheable asset's dependency will not be prefixed since\n # at prefix-time it is not known that its dependency is one of the cacheable assets.\n # https://github.com/dagster-io/dagster/pull/10389#pullrequestreview-1170913271\n if key_prefix:\n assets_defs, source_assets = prefix_assets(\n assets_defs, key_prefix, source_assets, source_key_prefix\n )\n cacheable_assets = [\n cached_asset.with_prefix_for_all(key_prefix) for cached_asset in cacheable_assets\n ]\n\n if group_name or freshness_policy or auto_materialize_policy or backfill_policy:\n assets_defs = [\n asset.with_attributes(\n group_names_by_key=(\n {asset_key: group_name for asset_key in asset.keys} if group_name else None\n ),\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )\n for asset in assets_defs\n ]\n if group_name:\n source_assets = [\n source_asset.with_attributes(group_name=group_name)\n for source_asset in source_assets\n ]\n cacheable_assets = [\n cached_asset.with_attributes_for_all(\n group_name,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )\n for cached_asset in cacheable_assets\n ]\n\n return [*assets_defs, *source_assets, *cacheable_assets]\n
", "current_page_name": "_modules/dagster/_core/definitions/load_assets_from_modules", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.load_assets_from_modules"}, "logger_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.logger_definition

\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Union, cast, overload\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.errors import DagsterInvalidInvocationError\n\nfrom ..decorator_utils import get_function_params\nfrom .config import is_callable_valid_config_arg\nfrom .configurable import AnonymousConfigurableDefinition\nfrom .definition_config_schema import (\n    CoercableToConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\n\nif TYPE_CHECKING:\n    import logging\n\n    from dagster._core.definitions import JobDefinition\n    from dagster._core.execution.context.logger import InitLoggerContext, UnboundInitLoggerContext\n\n    InitLoggerFunction = Callable[[InitLoggerContext], logging.Logger]\n\n\n
[docs]class LoggerDefinition(AnonymousConfigurableDefinition):\n """Core class for defining loggers.\n\n Loggers are job-scoped logging handlers, which will be automatically invoked whenever\n dagster messages are logged from within a job.\n\n Args:\n logger_fn (Callable[[InitLoggerContext], logging.Logger]): User-provided function to\n instantiate the logger. This logger will be automatically invoked whenever the methods\n on ``context.log`` are called from within job compute logic.\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.logger_config`. If not set, Dagster will accept any config provided.\n description (Optional[str]): A human-readable description of this logger.\n """\n\n def __init__(\n self,\n logger_fn: "InitLoggerFunction",\n config_schema: Any = None,\n description: Optional[str] = None,\n ):\n self._logger_fn = check.callable_param(logger_fn, "logger_fn")\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._description = check.opt_str_param(description, "description")\n\n def __call__(self, *args, **kwargs):\n from dagster._core.execution.context.logger import UnboundInitLoggerContext\n\n from .logger_invocation import logger_invocation_result\n\n if len(args) == 0 and len(kwargs) == 0:\n raise DagsterInvalidInvocationError(\n "Logger initialization function has context argument, but no context argument was "\n "provided when invoking."\n )\n if len(args) + len(kwargs) > 1:\n raise DagsterInvalidInvocationError(\n "Initialization of logger received multiple arguments. Only a first "\n "positional context parameter should be provided when invoking."\n )\n\n context_param_name = get_function_params(self.logger_fn)[0].name\n\n if args:\n context = check.opt_inst_param(\n args[0],\n context_param_name,\n UnboundInitLoggerContext,\n default=UnboundInitLoggerContext(logger_config=None, job_def=None),\n )\n return logger_invocation_result(self, context)\n else:\n if context_param_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Logger initialization expected argument '{context_param_name}'."\n )\n context = check.opt_inst_param(\n kwargs[context_param_name],\n context_param_name,\n UnboundInitLoggerContext,\n default=UnboundInitLoggerContext(logger_config=None, job_def=None),\n )\n\n return logger_invocation_result(self, context)\n\n @public\n @property\n def logger_fn(self) -> "InitLoggerFunction":\n """Callable[[InitLoggerContext], logging.Logger]: The function that will be invoked to\n instantiate the logger.\n """\n return self._logger_fn\n\n @public\n @property\n def config_schema(self) -> Any:\n """Any: The schema for the logger's config. Configuration data available in `init_context.logger_config`."""\n return self._config_schema\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A human-readable description of the logger."""\n return self._description\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: Any,\n ) -> "LoggerDefinition":\n return LoggerDefinition(\n config_schema=config_schema,\n description=description or self.description,\n logger_fn=self.logger_fn,\n )
\n\n\n@overload\ndef logger(\n config_schema: CoercableToConfigSchema, description: Optional[str] = ...\n) -> Callable[["InitLoggerFunction"], "LoggerDefinition"]: ...\n\n\n@overload\ndef logger(\n config_schema: "InitLoggerFunction", description: Optional[str] = ...\n) -> "LoggerDefinition": ...\n\n\n
[docs]def logger(\n config_schema: Union[CoercableToConfigSchema, "InitLoggerFunction"] = None,\n description: Optional[str] = None,\n) -> Union["LoggerDefinition", Callable[["InitLoggerFunction"], "LoggerDefinition"]]:\n """Define a logger.\n\n The decorated function should accept an :py:class:`InitLoggerContext` and return an instance of\n :py:class:`python:logging.Logger`. This function will become the ``logger_fn`` of an underlying\n :py:class:`LoggerDefinition`.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.logger_config`. If not set, Dagster will accept any config provided.\n description (Optional[str]): A human-readable description of the logger.\n """\n # This case is for when decorator is used bare, without arguments.\n # E.g. @logger versus @logger()\n if callable(config_schema) and not is_callable_valid_config_arg(config_schema):\n return LoggerDefinition(logger_fn=cast("InitLoggerFunction", config_schema))\n\n def _wrap(logger_fn: "InitLoggerFunction") -> "LoggerDefinition":\n return LoggerDefinition(\n logger_fn=logger_fn,\n config_schema=config_schema,\n description=description,\n )\n\n return _wrap
\n\n\n
[docs]def build_init_logger_context(\n logger_config: Any = None,\n job_def: Optional["JobDefinition"] = None,\n) -> "UnboundInitLoggerContext":\n """Builds logger initialization context from provided parameters.\n\n This function can be used to provide the context argument to the invocation of a logger\n definition.\n\n Note that you may only specify one of pipeline_def and job_def.\n\n Args:\n logger_config (Any): The config to provide during initialization of logger.\n job_def (Optional[JobDefinition]): The job definition that the logger will be used with.\n\n Examples:\n .. code-block:: python\n\n context = build_init_logger_context()\n logger_to_init(context)\n """\n from dagster._core.definitions import JobDefinition\n from dagster._core.execution.context.logger import UnboundInitLoggerContext\n\n check.opt_inst_param(job_def, "job_def", JobDefinition)\n\n return UnboundInitLoggerContext(logger_config=logger_config, job_def=job_def)
\n
", "current_page_name": "_modules/dagster/_core/definitions/logger_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.logger_definition"}, "materialize": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.materialize

\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Set, Union\n\nimport dagster._check as check\nfrom dagster._core.definitions.unresolved_asset_job_definition import define_asset_job\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..errors import DagsterInvariantViolationError\nfrom ..instance import DagsterInstance\nfrom ..storage.io_manager import IOManagerDefinition\nfrom ..storage.mem_io_manager import mem_io_manager\nfrom .assets import AssetsDefinition\nfrom .source_asset import SourceAsset\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_selection import CoercibleToAssetSelection\n    from dagster._core.definitions.events import AssetKey\n\n    from ..execution.execute_in_process_result import ExecuteInProcessResult\n\nEPHEMERAL_JOB_NAME = "__ephemeral_asset_job__"\n\n\n
[docs]def materialize(\n assets: Sequence[Union[AssetsDefinition, SourceAsset]],\n run_config: Any = None,\n instance: Optional[DagsterInstance] = None,\n resources: Optional[Mapping[str, object]] = None,\n partition_key: Optional[str] = None,\n raise_on_error: bool = True,\n tags: Optional[Mapping[str, str]] = None,\n selection: Optional["CoercibleToAssetSelection"] = None,\n) -> "ExecuteInProcessResult":\n """Executes a single-threaded, in-process run which materializes provided assets.\n\n By default, will materialize assets to the local filesystem.\n\n Args:\n assets (Sequence[Union[AssetsDefinition, SourceAsset]]):\n The assets to materialize.\n\n Unless you're using `deps` or `non_argument_deps`, you must also include all assets that are\n upstream of the assets that you want to materialize. This is because those upstream\n asset definitions have information that is needed to load their contents while\n materializing the downstream assets.\n\n You can use the `selection` argument to distinguish between assets that you want to\n materialize and assets that are just present for loading.\n resources (Optional[Mapping[str, object]]):\n The resources needed for execution. Can provide resource instances\n directly, or resource definitions. Note that if provided resources\n conflict with resources directly on assets, an error will be thrown.\n run_config (Optional[Any]): The run config to use for the run that materializes the assets.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for assets with partitioned config.\n tags (Optional[Mapping[str, str]]): Tags for the run.\n selection (Optional[Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]]):\n A sub-selection of assets to materialize.\n\n If not provided, then all assets will be materialized.\n\n If providing a string or sequence of strings,\n https://docs.dagster.io/concepts/assets/asset-selection-syntax describes the accepted\n syntax.\n\n Returns:\n ExecuteInProcessResult: The result of the execution.\n\n Examples:\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n @asset\n def asset2(asset1):\n ...\n\n # executes a run that materializes asset1 and then asset2\n materialize([asset1, asset2])\n\n # executes a run that materializes just asset2, loading its input from asset1\n materialize([asset1, asset2], selection=[asset2])\n """\n from dagster._core.definitions.definitions_class import Definitions\n\n assets = check.sequence_param(assets, "assets", of_type=(AssetsDefinition, SourceAsset))\n instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n partition_key = check.opt_str_param(partition_key, "partition_key")\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n\n all_executable_keys: Set[AssetKey] = set()\n for asset in assets:\n if isinstance(asset, AssetsDefinition):\n all_executable_keys = all_executable_keys.union(set(asset.keys))\n\n defs = Definitions(\n jobs=[define_asset_job(name=EPHEMERAL_JOB_NAME, selection=selection)],\n assets=assets,\n resources=resources,\n )\n return check.not_none(\n defs.get_job_def(EPHEMERAL_JOB_NAME),\n "This should always return a job",\n ).execute_in_process(\n run_config=run_config,\n instance=instance,\n partition_key=partition_key,\n raise_on_error=raise_on_error,\n tags=tags,\n )
\n\n\n
[docs]def materialize_to_memory(\n assets: Sequence[Union[AssetsDefinition, SourceAsset]],\n run_config: Any = None,\n instance: Optional[DagsterInstance] = None,\n resources: Optional[Mapping[str, object]] = None,\n partition_key: Optional[str] = None,\n raise_on_error: bool = True,\n tags: Optional[Mapping[str, str]] = None,\n selection: Optional["CoercibleToAssetSelection"] = None,\n) -> "ExecuteInProcessResult":\n """Executes a single-threaded, in-process run which materializes provided assets in memory.\n\n Will explicitly use :py:func:`mem_io_manager` for all required io manager\n keys. If any io managers are directly provided using the `resources`\n argument, a :py:class:`DagsterInvariantViolationError` will be thrown.\n\n Args:\n assets (Sequence[Union[AssetsDefinition, SourceAsset]]):\n The assets to materialize. Can also provide :py:class:`SourceAsset` objects to fill dependencies for asset defs.\n run_config (Optional[Any]): The run config to use for the run that materializes the assets.\n resources (Optional[Mapping[str, object]]):\n The resources needed for execution. Can provide resource instances\n directly, or resource definitions. If provided resources\n conflict with resources directly on assets, an error will be thrown.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for assets with partitioned config.\n tags (Optional[Mapping[str, str]]): Tags for the run.\n selection (Optional[Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]]):\n A sub-selection of assets to materialize.\n\n If not provided, then all assets will be materialized.\n\n If providing a string or sequence of strings,\n https://docs.dagster.io/concepts/assets/asset-selection-syntax describes the accepted\n syntax.\n\n Returns:\n ExecuteInProcessResult: The result of the execution.\n\n Examples:\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n @asset\n def asset2(asset1):\n ...\n\n # executes a run that materializes asset1 and then asset2\n materialize([asset1, asset2])\n\n # executes a run that materializes just asset1\n materialize([asset1, asset2], selection=[asset1])\n """\n assets = check.sequence_param(assets, "assets", of_type=(AssetsDefinition, SourceAsset))\n\n # Gather all resource defs for the purpose of checking io managers.\n resources_dict = resources or {}\n all_resource_keys = set(resources_dict.keys())\n for asset in assets:\n all_resource_keys = all_resource_keys.union(asset.resource_defs.keys())\n\n io_manager_keys = _get_required_io_manager_keys(assets)\n for io_manager_key in io_manager_keys:\n if io_manager_key in all_resource_keys:\n raise DagsterInvariantViolationError(\n "Attempted to call `materialize_to_memory` with a resource "\n f"provided for io manager key '{io_manager_key}'. Do not "\n "provide resources for io manager keys when calling "\n "`materialize_to_memory`, as it will override io management "\n "behavior for all keys."\n )\n\n resource_defs = merge_dicts({key: mem_io_manager for key in io_manager_keys}, resources_dict)\n\n return materialize(\n assets=assets,\n run_config=run_config,\n resources=resource_defs,\n instance=instance,\n partition_key=partition_key,\n raise_on_error=raise_on_error,\n tags=tags,\n selection=selection,\n )
\n\n\ndef _get_required_io_manager_keys(\n assets: Sequence[Union[AssetsDefinition, SourceAsset]]\n) -> Set[str]:\n io_manager_keys = set()\n for asset in assets:\n for requirement in asset.get_resource_requirements():\n if requirement.expected_type == IOManagerDefinition:\n io_manager_keys.add(requirement.key)\n return io_manager_keys\n
", "current_page_name": "_modules/dagster/_core/definitions/materialize", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.materialize"}, "metadata": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.metadata

\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import Self, TypeAlias, TypeVar\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import PublicAttr, deprecated, deprecated_param, experimental, public\nfrom dagster._core.errors import DagsterInvalidMetadata\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._serdes.serdes import (\n    FieldSerializer,\n    PackableValue,\n    UnpackContext,\n    WhitelistMap,\n    pack_value,\n)\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n    normalize_renamed_param,\n)\n\nfrom .table import (  # re-exported\n    TableColumn as TableColumn,\n    TableColumnConstraints as TableColumnConstraints,\n    TableConstraints as TableConstraints,\n    TableRecord as TableRecord,\n    TableSchema as TableSchema,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.events import AssetKey\n\nArbitraryMetadataMapping: TypeAlias = Mapping[str, Any]\n\nRawMetadataValue = Union[\n    "MetadataValue",\n    TableSchema,\n    "AssetKey",\n    os.PathLike,\n    Dict[Any, Any],\n    float,\n    int,\n    List[Any],\n    str,\n    None,\n]\n\nMetadataMapping: TypeAlias = Mapping[str, "MetadataValue"]\nMetadataUserInput: TypeAlias = Mapping[str, RawMetadataValue]\n\nT_Packable = TypeVar("T_Packable", bound=PackableValue, default=PackableValue, covariant=True)\n\n# ########################\n# ##### NORMALIZATION\n# ########################\n\n\ndef normalize_metadata(\n    metadata: Mapping[str, RawMetadataValue],\n    allow_invalid: bool = False,\n) -> Mapping[str, "MetadataValue"]:\n    # This is a stopgap measure to deal with unsupported metadata values, which occur when we try\n    # to convert arbitrary metadata (on e.g. OutputDefinition) to a MetadataValue, which is required\n    # for serialization. This will cause unsupported values to be silently replaced with a\n    # string placeholder.\n    normalized_metadata: Dict[str, MetadataValue] = {}\n    for k, v in metadata.items():\n        try:\n            normalized_value = normalize_metadata_value(v)\n        except DagsterInvalidMetadata as e:\n            if allow_invalid:\n                deprecation_warning(\n                    "Support for arbitrary metadata values",\n                    "2.0.0",\n                    additional_warn_text=(\n                        "In the future, all user-supplied metadata values must be one of"\n                        f" {RawMetadataValue}"\n                    ),\n                    stacklevel=4,  # to get the caller of `normalize_metadata`\n                )\n                normalized_value = TextMetadataValue(f"[{v.__class__.__name__}] (unserializable)")\n            else:\n                raise DagsterInvalidMetadata(\n                    f'Could not resolve the metadata value for "{k}" to a known type. {e}'\n                ) from None\n        normalized_metadata[k] = normalized_value\n\n    return normalized_metadata\n\n\ndef normalize_metadata_value(raw_value: RawMetadataValue) -> "MetadataValue[Any]":\n    from dagster._core.definitions.events import AssetKey\n\n    if isinstance(raw_value, MetadataValue):\n        return raw_value\n    elif isinstance(raw_value, str):\n        return MetadataValue.text(raw_value)\n    elif isinstance(raw_value, float):\n        return MetadataValue.float(raw_value)\n    elif isinstance(raw_value, bool):\n        return MetadataValue.bool(raw_value)\n    elif isinstance(raw_value, int):\n        return MetadataValue.int(raw_value)\n    elif isinstance(raw_value, (list, dict)):\n        return MetadataValue.json(raw_value)\n    elif isinstance(raw_value, os.PathLike):\n        return MetadataValue.path(raw_value)\n    elif isinstance(raw_value, AssetKey):\n        return MetadataValue.asset(raw_value)\n    elif isinstance(raw_value, TableSchema):\n        return MetadataValue.table_schema(raw_value)\n    elif raw_value is None:\n        return MetadataValue.null()\n\n    raise DagsterInvalidMetadata(\n        f"Its type was {type(raw_value)}. Consider wrapping the value with the appropriate "\n        "MetadataValue type."\n    )\n\n\n# ########################\n# ##### METADATA VALUE\n# ########################\n\n\n
[docs]class MetadataValue(ABC, Generic[T_Packable]):\n """Utility class to wrap metadata values passed into Dagster events so that they can be\n displayed in the Dagster UI and other tooling.\n\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "my_text_label": "hello",\n "dashboard_url": MetadataValue.url("http://mycoolsite.com/my_dashboard"),\n "num_rows": 0,\n },\n )\n """\n\n @public\n @property\n @abstractmethod\n def value(self) -> T_Packable:\n """The wrapped value."""\n raise NotImplementedError()\n\n
[docs] @public\n @staticmethod\n def text(text: str) -> "TextMetadataValue":\n """Static constructor for a metadata value wrapping text as\n :py:class:`TextMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "my_text_label": MetadataValue.text("hello")\n },\n )\n\n Args:\n text (str): The text string for a metadata entry.\n """\n return TextMetadataValue(text)
\n\n
[docs] @public\n @staticmethod\n def url(url: str) -> "UrlMetadataValue":\n """Static constructor for a metadata value wrapping a URL as\n :py:class:`UrlMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield AssetMaterialization(\n asset_key="my_dashboard",\n metadata={\n "dashboard_url": MetadataValue.url("http://mycoolsite.com/my_dashboard"),\n }\n )\n\n Args:\n url (str): The URL for a metadata entry.\n """\n return UrlMetadataValue(url)
\n\n
[docs] @public\n @staticmethod\n def path(path: Union[str, os.PathLike]) -> "PathMetadataValue":\n """Static constructor for a metadata value wrapping a path as\n :py:class:`PathMetadataValue`.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "filepath": MetadataValue.path("path/to/file"),\n }\n )\n\n Args:\n path (str): The path for a metadata entry.\n """\n return PathMetadataValue(path)
\n\n
[docs] @public\n @staticmethod\n def notebook(path: Union[str, os.PathLike]) -> "NotebookMetadataValue":\n """Static constructor for a metadata value wrapping a notebook path as\n :py:class:`NotebookMetadataValue`.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "notebook_path": MetadataValue.notebook("path/to/notebook.ipynb"),\n }\n )\n\n Args:\n path (str): The path to a notebook for a metadata entry.\n """\n return NotebookMetadataValue(path)
\n\n
[docs] @public\n @staticmethod\n def json(data: Union[Sequence[Any], Mapping[str, Any]]) -> "JsonMetadataValue":\n """Static constructor for a metadata value wrapping a json-serializable list or dict\n as :py:class:`JsonMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield ExpectationResult(\n success=not missing_things,\n label="is_present",\n metadata={\n "about my dataset": MetadataValue.json({"missing_columns": missing_things})\n },\n )\n\n Args:\n data (Union[Sequence[Any], Mapping[str, Any]]): The JSON data for a metadata entry.\n """\n return JsonMetadataValue(data)
\n\n
[docs] @public\n @staticmethod\n def md(data: str) -> "MarkdownMetadataValue":\n """Static constructor for a metadata value wrapping markdown data as\n :py:class:`MarkdownMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, md_str):\n yield AssetMaterialization(\n asset_key="info",\n metadata={\n 'Details': MetadataValue.md(md_str)\n },\n )\n\n Args:\n md_str (str): The markdown for a metadata entry.\n """\n return MarkdownMetadataValue(data)
\n\n
[docs] @public\n @staticmethod\n def python_artifact(python_artifact: Callable) -> "PythonArtifactMetadataValue":\n """Static constructor for a metadata value wrapping a python artifact as\n :py:class:`PythonArtifactMetadataValue`. Can be used as the value type for the\n `metadata` parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "class": MetadataValue.python_artifact(MyClass),\n "function": MetadataValue.python_artifact(my_function),\n }\n )\n\n Args:\n value (Callable): The python class or function for a metadata entry.\n """\n check.callable_param(python_artifact, "python_artifact")\n return PythonArtifactMetadataValue(python_artifact.__module__, python_artifact.__name__)
\n\n
[docs] @public\n @staticmethod\n def float(value: float) -> "FloatMetadataValue":\n """Static constructor for a metadata value wrapping a float as\n :py:class:`FloatMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "size (bytes)": MetadataValue.float(calculate_bytes(df)),\n }\n )\n\n Args:\n value (float): The float value for a metadata entry.\n """\n return FloatMetadataValue(value)
\n\n
[docs] @public\n @staticmethod\n def int(value: int) -> "IntMetadataValue":\n """Static constructor for a metadata value wrapping an int as\n :py:class:`IntMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "number of rows": MetadataValue.int(len(df)),\n },\n )\n\n Args:\n value (int): The int value for a metadata entry.\n """\n return IntMetadataValue(value)
\n\n
[docs] @public\n @staticmethod\n def bool(value: bool) -> "BoolMetadataValue":\n """Static constructor for a metadata value wrapping a bool as\n :py:class:`BoolMetadataValuye`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "num rows > 1000": MetadataValue.bool(len(df) > 1000),\n },\n )\n\n Args:\n value (bool): The bool value for a metadata entry.\n """\n return BoolMetadataValue(value)
\n\n
[docs] @public\n @staticmethod\n def dagster_run(run_id: str) -> "DagsterRunMetadataValue":\n """Static constructor for a metadata value wrapping a reference to a Dagster run.\n\n Args:\n run_id (str): The ID of the run.\n """\n return DagsterRunMetadataValue(run_id)
\n\n
[docs] @public\n @staticmethod\n def asset(asset_key: "AssetKey") -> "DagsterAssetMetadataValue":\n """Static constructor for a metadata value referencing a Dagster asset, by key.\n\n For example:\n\n .. code-block:: python\n\n @op\n def validate_table(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey("my_table"),\n metadata={\n "Related asset": MetadataValue.asset(AssetKey('my_other_table')),\n },\n )\n\n Args:\n asset_key (AssetKey): The asset key referencing the asset.\n """\n from dagster._core.definitions.events import AssetKey\n\n check.inst_param(asset_key, "asset_key", AssetKey)\n return DagsterAssetMetadataValue(asset_key)
\n\n
[docs] @public\n @staticmethod\n @experimental\n def table(\n records: Sequence[TableRecord], schema: Optional[TableSchema] = None\n ) -> "TableMetadataValue":\n """Static constructor for a metadata value wrapping arbitrary tabular data as\n :py:class:`TableMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield ExpectationResult(\n success=not has_errors,\n label="is_valid",\n metadata={\n "errors": MetadataValue.table(\n records=[\n TableRecord(code="invalid-data-type", row=2, col="name"),\n ],\n schema=TableSchema(\n columns=[\n TableColumn(name="code", type="string"),\n TableColumn(name="row", type="int"),\n TableColumn(name="col", type="string"),\n ]\n )\n ),\n },\n )\n """\n return TableMetadataValue(records, schema)
\n\n
[docs] @public\n @staticmethod\n def table_schema(\n schema: TableSchema,\n ) -> "TableSchemaMetadataValue":\n """Static constructor for a metadata value wrapping a table schema as\n :py:class:`TableSchemaMetadataValue`. Can be used as the value type\n for the `metadata` parameter for supported events.\n\n Example:\n .. code-block:: python\n\n schema = TableSchema(\n columns = [\n TableColumn(name="id", type="int"),\n TableColumn(name="status", type="bool"),\n ]\n )\n\n DagsterType(\n type_check_fn=some_validation_fn,\n name='MyTable',\n metadata={\n 'my_table_schema': MetadataValue.table_schema(schema),\n }\n )\n\n Args:\n schema (TableSchema): The table schema for a metadata entry.\n """\n return TableSchemaMetadataValue(schema)
\n\n
[docs] @public\n @staticmethod\n def null() -> "NullMetadataValue":\n """Static constructor for a metadata value representing null. Can be used as the value type\n for the `metadata` parameter for supported events.\n """\n return NullMetadataValue()
\n\n\n# ########################\n# ##### METADATA VALUE TYPES\n# ########################\n\n# NOTE: We have `type: ignore` in a few places below because mypy complains about an instance method\n# (e.g. `text`) overriding a static method on the superclass of the same name. This is not a concern\n# for us because these static methods should never be called on instances.\n\n# NOTE: `XMetadataValue` classes are serialized with a storage name of `XMetadataEntryData` to\n# maintain backward compatibility. See docstring of `whitelist_for_serdes` for more info.\n\n\n
[docs]@whitelist_for_serdes(storage_name="TextMetadataEntryData")\nclass TextMetadataValue(\n NamedTuple(\n "_TextMetadataValue",\n [\n ("text", PublicAttr[Optional[str]]),\n ],\n ),\n MetadataValue[str],\n):\n """Container class for text metadata entry data.\n\n Args:\n text (Optional[str]): The text data.\n """\n\n def __new__(cls, text: Optional[str]):\n return super(TextMetadataValue, cls).__new__(\n cls, check.opt_str_param(text, "text", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped text data."""\n return self.text
\n\n\n
[docs]@whitelist_for_serdes(storage_name="UrlMetadataEntryData")\nclass UrlMetadataValue(\n NamedTuple(\n "_UrlMetadataValue",\n [\n ("url", PublicAttr[Optional[str]]),\n ],\n ),\n MetadataValue[str],\n):\n """Container class for URL metadata entry data.\n\n Args:\n url (Optional[str]): The URL as a string.\n """\n\n def __new__(cls, url: Optional[str]):\n return super(UrlMetadataValue, cls).__new__(\n cls, check.opt_str_param(url, "url", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped URL."""\n return self.url
\n\n\n
[docs]@whitelist_for_serdes(storage_name="PathMetadataEntryData")\nclass PathMetadataValue(\n NamedTuple("_PathMetadataValue", [("path", PublicAttr[Optional[str]])]), MetadataValue[str]\n):\n """Container class for path metadata entry data.\n\n Args:\n path (Optional[str]): The path as a string or conforming to os.PathLike.\n """\n\n def __new__(cls, path: Optional[Union[str, os.PathLike]]):\n return super(PathMetadataValue, cls).__new__(\n cls, check.opt_path_param(path, "path", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped path."""\n return self.path
\n\n\n
[docs]@whitelist_for_serdes(storage_name="NotebookMetadataEntryData")\nclass NotebookMetadataValue(\n NamedTuple("_NotebookMetadataValue", [("path", PublicAttr[Optional[str]])]), MetadataValue[str]\n):\n """Container class for notebook metadata entry data.\n\n Args:\n path (Optional[str]): The path to the notebook as a string or conforming to os.PathLike.\n """\n\n def __new__(cls, path: Optional[Union[str, os.PathLike]]):\n return super(NotebookMetadataValue, cls).__new__(\n cls, check.opt_path_param(path, "path", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped path to the notebook as a string."""\n return self.path
\n\n\n
[docs]@whitelist_for_serdes(storage_name="JsonMetadataEntryData")\nclass JsonMetadataValue(\n NamedTuple(\n "_JsonMetadataValue",\n [\n ("data", PublicAttr[Optional[Union[Sequence[Any], Mapping[str, Any]]]]),\n ],\n ),\n MetadataValue[Union[Sequence[Any], Mapping[str, Any]]],\n):\n """Container class for JSON metadata entry data.\n\n Args:\n data (Union[Sequence[Any], Dict[str, Any]]): The JSON data.\n """\n\n def __new__(cls, data: Optional[Union[Sequence[Any], Mapping[str, Any]]]):\n data = check.opt_inst_param(data, "data", (Sequence, Mapping))\n try:\n # check that the value is JSON serializable\n seven.dumps(data)\n except TypeError:\n raise DagsterInvalidMetadata("Value is not JSON serializable.")\n return super(JsonMetadataValue, cls).__new__(cls, data)\n\n @public\n @property\n def value(self) -> Optional[Union[Sequence[Any], Mapping[str, Any]]]:\n """Optional[Union[Sequence[Any], Dict[str, Any]]]: The wrapped JSON data."""\n return self.data
\n\n\n
[docs]@whitelist_for_serdes(storage_name="MarkdownMetadataEntryData")\nclass MarkdownMetadataValue(\n NamedTuple(\n "_MarkdownMetadataValue",\n [\n ("md_str", PublicAttr[Optional[str]]),\n ],\n ),\n MetadataValue[str],\n):\n """Container class for markdown metadata entry data.\n\n Args:\n md_str (Optional[str]): The markdown as a string.\n """\n\n def __new__(cls, md_str: Optional[str]):\n return super(MarkdownMetadataValue, cls).__new__(\n cls, check.opt_str_param(md_str, "md_str", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped markdown as a string."""\n return self.md_str
\n\n\n# This should be deprecated or fixed so that `value` does not return itself.\n
[docs]@whitelist_for_serdes(storage_name="PythonArtifactMetadataEntryData")\nclass PythonArtifactMetadataValue(\n NamedTuple(\n "_PythonArtifactMetadataValue",\n [\n ("module", PublicAttr[str]),\n ("name", PublicAttr[str]),\n ],\n ),\n MetadataValue["PythonArtifactMetadataValue"],\n):\n """Container class for python artifact metadata entry data.\n\n Args:\n module (str): The module where the python artifact can be found\n name (str): The name of the python artifact\n """\n\n def __new__(cls, module: str, name: str):\n return super(PythonArtifactMetadataValue, cls).__new__(\n cls, check.str_param(module, "module"), check.str_param(name, "name")\n )\n\n @public\n @property\n def value(self) -> Self:\n """PythonArtifactMetadataValue: Identity function."""\n return self
\n\n\n
[docs]@whitelist_for_serdes(storage_name="FloatMetadataEntryData")\nclass FloatMetadataValue(\n NamedTuple(\n "_FloatMetadataValue",\n [\n ("value", PublicAttr[Optional[float]]),\n ],\n ),\n MetadataValue[float],\n):\n """Container class for float metadata entry data.\n\n Args:\n value (Optional[float]): The float value.\n """\n\n def __new__(cls, value: Optional[float]):\n return super(FloatMetadataValue, cls).__new__(cls, check.opt_float_param(value, "value"))
\n\n\n
[docs]@whitelist_for_serdes(storage_name="IntMetadataEntryData")\nclass IntMetadataValue(\n NamedTuple(\n "_IntMetadataValue",\n [\n ("value", PublicAttr[Optional[int]]),\n ],\n ),\n MetadataValue[int],\n):\n """Container class for int metadata entry data.\n\n Args:\n value (Optional[int]): The int value.\n """\n\n def __new__(cls, value: Optional[int]):\n return super(IntMetadataValue, cls).__new__(cls, check.opt_int_param(value, "value"))
\n\n\n@whitelist_for_serdes(storage_name="BoolMetadataEntryData")\nclass BoolMetadataValue(\n NamedTuple("_BoolMetadataValue", [("value", PublicAttr[Optional[bool]])]),\n MetadataValue[bool],\n):\n """Container class for bool metadata entry data.\n\n Args:\n value (Optional[bool]): The bool value.\n """\n\n def __new__(cls, value: Optional[bool]):\n return super(BoolMetadataValue, cls).__new__(cls, check.opt_bool_param(value, "value"))\n\n\n
[docs]@whitelist_for_serdes(storage_name="DagsterPipelineRunMetadataEntryData")\nclass DagsterRunMetadataValue(\n NamedTuple(\n "_DagsterRunMetadataValue",\n [\n ("run_id", PublicAttr[str]),\n ],\n ),\n MetadataValue[str],\n):\n """Representation of a dagster run.\n\n Args:\n run_id (str): The run id\n """\n\n def __new__(cls, run_id: str):\n return super(DagsterRunMetadataValue, cls).__new__(cls, check.str_param(run_id, "run_id"))\n\n @public\n @property\n def value(self) -> str:\n """str: The wrapped run id."""\n return self.run_id
\n\n\n
[docs]@whitelist_for_serdes(storage_name="DagsterAssetMetadataEntryData")\nclass DagsterAssetMetadataValue(\n NamedTuple("_DagsterAssetMetadataValue", [("asset_key", PublicAttr["AssetKey"])]),\n MetadataValue["AssetKey"],\n):\n """Representation of a dagster asset.\n\n Args:\n asset_key (AssetKey): The dagster asset key\n """\n\n def __new__(cls, asset_key: "AssetKey"):\n from dagster._core.definitions.events import AssetKey\n\n return super(DagsterAssetMetadataValue, cls).__new__(\n cls, check.inst_param(asset_key, "asset_key", AssetKey)\n )\n\n @public\n @property\n def value(self) -> "AssetKey":\n """AssetKey: The wrapped :py:class:`AssetKey`."""\n return self.asset_key
\n\n\n# This should be deprecated or fixed so that `value` does not return itself.\n
[docs]@experimental\n@whitelist_for_serdes(storage_name="TableMetadataEntryData")\nclass TableMetadataValue(\n NamedTuple(\n "_TableMetadataValue",\n [\n ("records", PublicAttr[Sequence[TableRecord]]),\n ("schema", PublicAttr[TableSchema]),\n ],\n ),\n MetadataValue["TableMetadataValue"],\n):\n """Container class for table metadata entry data.\n\n Args:\n records (TableRecord): The data as a list of records (i.e. rows).\n schema (Optional[TableSchema]): A schema for the table.\n """\n\n
[docs] @public\n @staticmethod\n def infer_column_type(value: object) -> str:\n """str: Infer the :py:class:`TableSchema` column type that will be used for a value."""\n if isinstance(value, bool):\n return "bool"\n elif isinstance(value, int):\n return "int"\n elif isinstance(value, float):\n return "float"\n else:\n return "string"
\n\n def __new__(cls, records: Sequence[TableRecord], schema: Optional[TableSchema]):\n check.sequence_param(records, "records", of_type=TableRecord)\n check.opt_inst_param(schema, "schema", TableSchema)\n\n if len(records) == 0:\n schema = check.not_none(schema, "schema must be provided if records is empty")\n else:\n columns = set(records[0].data.keys())\n for record in records[1:]:\n check.invariant(\n set(record.data.keys()) == columns, "All records must have the same fields"\n )\n schema = schema or TableSchema(\n columns=[\n TableColumn(name=k, type=TableMetadataValue.infer_column_type(v))\n for k, v in records[0].data.items()\n ]\n )\n\n return super(TableMetadataValue, cls).__new__(\n cls,\n records,\n schema,\n )\n\n @public\n @property\n def value(self) -> Self:\n """TableMetadataValue: Identity function."""\n return self
\n\n\n
[docs]@whitelist_for_serdes(storage_name="TableSchemaMetadataEntryData")\nclass TableSchemaMetadataValue(\n NamedTuple("_TableSchemaMetadataValue", [("schema", PublicAttr[TableSchema])]),\n MetadataValue[TableSchema],\n):\n """Representation of a schema for arbitrary tabular data.\n\n Args:\n schema (TableSchema): The dictionary containing the schema representation.\n """\n\n def __new__(cls, schema: TableSchema):\n return super(TableSchemaMetadataValue, cls).__new__(\n cls, check.inst_param(schema, "schema", TableSchema)\n )\n\n @public\n @property\n def value(self) -> TableSchema:\n """TableSchema: The wrapped :py:class:`TableSchema`."""\n return self.schema
\n\n\n@whitelist_for_serdes(storage_name="NullMetadataEntryData")\nclass NullMetadataValue(NamedTuple("_NullMetadataValue", []), MetadataValue[None]):\n """Representation of null."""\n\n @public\n @property\n def value(self) -> None:\n """None: The wrapped null value."""\n return None\n\n\n# ########################\n# ##### METADATA BACKCOMPAT\n# ########################\n\n# Metadata used to be represented as a `List[MetadataEntry]`, but that class has been deleted. But\n# we still serialize metadata dicts to the serialized representation of `List[MetadataEntry]` for\n# backcompat purposes.\n\n\nclass MetadataFieldSerializer(FieldSerializer):\n """Converts between metadata dict (new) and metadata entries list (old)."""\n\n storage_name = "metadata_entries"\n loaded_name = "metadata"\n\n def pack(\n self,\n metadata_dict: Mapping[str, MetadataValue],\n whitelist_map: WhitelistMap,\n descent_path: str,\n ) -> Sequence[Mapping[str, Any]]:\n return [\n {\n "__class__": "EventMetadataEntry",\n "label": k,\n # MetadataValue itself can't inherit from NamedTuple and so isn't a PackableValue,\n # but one of its subclasses will always be returned here.\n "entry_data": pack_value(v, whitelist_map, descent_path), # type: ignore\n "description": None,\n }\n for k, v in metadata_dict.items()\n ]\n\n def unpack(\n self,\n metadata_entries: List["MetadataEntry"],\n whitelist_map: WhitelistMap,\n context: UnpackContext,\n ) -> Mapping[str, MetadataValue]:\n return {e.label: e.entry_data for e in metadata_entries}\n\n\nT_MetadataValue = TypeVar("T_MetadataValue", bound=MetadataValue, covariant=True)\n\n\n# NOTE: MetadataEntry is no longer accessible via the public API-- all metadata APIs use metadata\n# dicts. This clas shas only been preserved to adhere strictly to our backcompat guarantees. It is\n# still instantiated in the above `MetadataFieldSerializer` but that can easily be changed.\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use a dict with `MetadataValue` values instead.",\n)\n@deprecated_param(\n param="entry_data", breaking_version="2.0", additional_warn_text="Use `value` instead."\n)\n@whitelist_for_serdes(storage_name="EventMetadataEntry")\nclass MetadataEntry(\n NamedTuple(\n "_MetadataEntry",\n [\n ("label", PublicAttr[str]),\n ("description", PublicAttr[Optional[str]]),\n ("entry_data", PublicAttr[MetadataValue]),\n ],\n ),\n Generic[T_MetadataValue],\n):\n """A structure for describing metadata for Dagster events.\n\n .. note:: This class is no longer usable in any Dagster API, and will be completely removed in 2.0.\n\n Lists of objects of this type can be passed as arguments to Dagster events and will be displayed\n in the Dagster UI and other tooling.\n\n Should be yielded from within an IO manager to append metadata for a given input/output event.\n For other event types, passing a dict with `MetadataValue` values to the `metadata` argument\n is preferred.\n\n Args:\n label (str): Short display label for this metadata entry.\n description (Optional[str]): A human-readable description of this metadata entry.\n value (MetadataValue): Typed metadata entry data. The different types allow\n for customized display in tools like the Dagster UI.\n """\n\n def __new__(\n cls,\n label: str,\n description: Optional[str] = None,\n entry_data: Optional["RawMetadataValue"] = None,\n value: Optional["RawMetadataValue"] = None,\n ):\n value = cast(\n RawMetadataValue,\n normalize_renamed_param(\n new_val=value,\n new_arg="value",\n old_val=entry_data,\n old_arg="entry_data",\n ),\n )\n value = normalize_metadata_value(value)\n\n return super(MetadataEntry, cls).__new__(\n cls,\n check.str_param(label, "label"),\n check.opt_str_param(description, "description"),\n check.inst_param(value, "value", MetadataValue),\n )\n\n @property\n def value(self):\n """Alias of `entry_data`."""\n return self.entry_data
\n
", "current_page_name": "_modules/dagster/_core/definitions/metadata", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "table": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.metadata.table

\nfrom typing import Mapping, NamedTuple, Optional, Sequence, Union, cast\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental, public\nfrom dagster._serdes.serdes import (\n    whitelist_for_serdes,\n)\n\n# ########################\n# ##### TABLE RECORD\n# ########################\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass TableRecord(\n NamedTuple("TableRecord", [("data", PublicAttr[Mapping[str, Union[str, int, float, bool]]])])\n):\n """Represents one record in a table. Field keys are arbitrary strings-- field values must be\n strings, integers, floats, or bools.\n """\n\n def __new__(cls, data: Mapping[str, Union[str, int, float, bool]]):\n check.dict_param(\n data,\n "data",\n value_type=(str, float, int, bool, type(None)),\n additional_message="Record fields must be one of types: (str, float, int, bool)",\n )\n return super(TableRecord, cls).__new__(cls, data=data)
\n\n\n# ########################\n# ##### TABLE SCHEMA\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableSchema(\n NamedTuple(\n "TableSchema",\n [\n ("columns", PublicAttr[Sequence["TableColumn"]]),\n ("constraints", PublicAttr["TableConstraints"]),\n ],\n )\n):\n """Representation of a schema for tabular data.\n\n Schema is composed of two parts:\n\n - A required list of columns (`TableColumn`). Each column specifies a\n `name`, `type`, set of `constraints`, and (optional) `description`. `type`\n defaults to `string` if unspecified. Column constraints\n (`TableColumnConstraints`) consist of boolean properties `unique` and\n `nullable`, as well as a list of strings `other` containing string\n descriptions of all additional constraints (e.g. `"<= 5"`).\n - An optional list of table-level constraints (`TableConstraints`). A\n table-level constraint cannot be expressed in terms of a single column,\n e.g. col a > col b. Presently, all table-level constraints must be\n expressed as strings under the `other` attribute of a `TableConstraints`\n object.\n\n .. code-block:: python\n\n # example schema\n TableSchema(\n constraints = TableConstraints(\n other = [\n "foo > bar",\n ],\n ),\n columns = [\n TableColumn(\n name = "foo",\n type = "string",\n description = "Foo description",\n constraints = TableColumnConstraints(\n required = True,\n other = [\n "starts with the letter 'a'",\n ],\n ),\n ),\n TableColumn(\n name = "bar",\n type = "string",\n ),\n TableColumn(\n name = "baz",\n type = "custom_type",\n constraints = TableColumnConstraints(\n unique = True,\n )\n ),\n ],\n )\n\n Args:\n columns (List[TableColumn]): The columns of the table.\n constraints (Optional[TableConstraints]): The constraints of the table.\n """\n\n def __new__(\n cls,\n columns: Sequence["TableColumn"],\n constraints: Optional["TableConstraints"] = None,\n ):\n return super(TableSchema, cls).__new__(\n cls,\n columns=check.sequence_param(columns, "columns", of_type=TableColumn),\n constraints=check.opt_inst_param(\n constraints, "constraints", TableConstraints, default=_DEFAULT_TABLE_CONSTRAINTS\n ),\n )\n\n
[docs] @public\n @staticmethod\n def from_name_type_dict(name_type_dict: Mapping[str, str]):\n """Constructs a TableSchema from a dictionary whose keys are column names and values are the\n names of data types of those columns.\n """\n return TableSchema(\n columns=[\n TableColumn(name=name, type=type_str) for name, type_str in name_type_dict.items()\n ]\n )
\n\n\n# ########################\n# ##### TABLE CONSTRAINTS\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableConstraints(\n NamedTuple(\n "TableConstraints",\n [\n ("other", PublicAttr[Sequence[str]]),\n ],\n )\n):\n """Descriptor for "table-level" constraints. Presently only one property,\n `other` is supported. This contains strings describing arbitrary\n table-level constraints. A table-level constraint is a constraint defined\n in terms of multiple columns (e.g. col_A > col_B) or in terms of rows.\n\n Args:\n other (List[str]): Descriptions of arbitrary table-level constraints.\n """\n\n def __new__(\n cls,\n other: Sequence[str],\n ):\n return super(TableConstraints, cls).__new__(\n cls,\n other=check.sequence_param(other, "other", of_type=str),\n )
\n\n\n_DEFAULT_TABLE_CONSTRAINTS = TableConstraints(other=[])\n\n# ########################\n# ##### TABLE COLUMN\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableColumn(\n NamedTuple(\n "TableColumn",\n [\n ("name", PublicAttr[str]),\n ("type", PublicAttr[str]),\n ("description", PublicAttr[Optional[str]]),\n ("constraints", PublicAttr["TableColumnConstraints"]),\n ],\n )\n):\n """Descriptor for a table column. The only property that must be specified\n by the user is `name`. If no `type` is specified, `string` is assumed. If\n no `constraints` are specified, the column is assumed to be nullable\n (i.e. `required = False`) and have no other constraints beyond the data type.\n\n Args:\n name (List[str]): Descriptions of arbitrary table-level constraints.\n type (Optional[str]): The type of the column. Can be an arbitrary\n string. Defaults to `"string"`.\n description (Optional[str]): Description of this column. Defaults to `None`.\n constraints (Optional[TableColumnConstraints]): Column-level constraints.\n If unspecified, column is nullable with no constraints.\n """\n\n def __new__(\n cls,\n name: str,\n type: str = "string", # noqa: A002\n description: Optional[str] = None,\n constraints: Optional["TableColumnConstraints"] = None,\n ):\n return super(TableColumn, cls).__new__(\n cls,\n name=check.str_param(name, "name"),\n type=check.str_param(type, "type"),\n description=check.opt_str_param(description, "description"),\n constraints=cast(\n "TableColumnConstraints",\n check.opt_inst_param(\n constraints,\n "constraints",\n TableColumnConstraints,\n default=_DEFAULT_TABLE_COLUMN_CONSTRAINTS,\n ),\n ),\n )
\n\n\n# ########################\n# ##### TABLE COLUMN CONSTRAINTS\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableColumnConstraints(\n NamedTuple(\n "TableColumnConstraints",\n [\n ("nullable", PublicAttr[bool]),\n ("unique", PublicAttr[bool]),\n ("other", PublicAttr[Optional[Sequence[str]]]),\n ],\n )\n):\n """Descriptor for a table column's constraints. Nullability and uniqueness are specified with\n boolean properties. All other constraints are described using arbitrary strings under the\n `other` property.\n\n Args:\n nullable (Optional[bool]): If true, this column can hold null values.\n unique (Optional[bool]): If true, all values in this column must be unique.\n other (List[str]): Descriptions of arbitrary column-level constraints\n not expressible by the predefined properties.\n """\n\n def __new__(\n cls,\n nullable: bool = True,\n unique: bool = False,\n other: Optional[Sequence[str]] = None,\n ):\n return super(TableColumnConstraints, cls).__new__(\n cls,\n nullable=check.bool_param(nullable, "nullable"),\n unique=check.bool_param(unique, "unique"),\n other=check.opt_sequence_param(other, "other"),\n )
\n\n\n_DEFAULT_TABLE_COLUMN_CONSTRAINTS = TableColumnConstraints()\n
", "current_page_name": "_modules/dagster/_core/definitions/metadata/table", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}, {"link": "../", "title": "dagster._core.definitions.metadata"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.metadata.table"}, "title": "dagster._core.definitions.metadata"}, "multi_asset_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.multi_asset_sensor_definition

\nimport inspect\nimport json\nfrom collections import OrderedDict, defaultdict\nfrom typing import (\n    TYPE_CHECKING,\n    Callable,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_selection import AssetSelection\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.scoped_resources_builder import ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._utils import normalize_to_repository\n\nfrom .events import AssetKey\nfrom .run_request import RunRequest, SensorResult, SkipReason\nfrom .sensor_definition import (\n    DefaultSensorStatus,\n    SensorDefinition,\n    SensorEvaluationContext,\n    SensorType,\n    get_context_param_name,\n    get_sensor_context_from_args_or_kwargs,\n    validate_and_get_resource_dict,\n)\nfrom .target import ExecutableDefinition\nfrom .utils import check_valid_name\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.definitions_class import Definitions\n    from dagster._core.definitions.repository_definition import RepositoryDefinition\n    from dagster._core.storage.event_log.base import EventLogRecord\n\nMAX_NUM_UNCONSUMED_EVENTS = 25\n\n\nclass MultiAssetSensorAssetCursorComponent(\n    NamedTuple(\n        "_MultiAssetSensorAssetCursorComponent",\n        [\n            ("latest_consumed_event_partition", Optional[str]),\n            ("latest_consumed_event_id", Optional[int]),\n            ("trailing_unconsumed_partitioned_event_ids", Dict[str, int]),\n        ],\n    )\n):\n    """A cursor component that is used to track the cursor for a particular asset in a multi-asset\n    sensor.\n\n    Here's an illustration to help explain how this representation works:\n\n    partition_1  ---|----------a----\n    partition_2  -t-----|-x---------\n    partition_3  ----t------|---a---\n\n\n    The "|", "a", "t", and "x" characters represent materialization events.\n    The x-axis is storage_id, which is basically time. The cursor has been advanced to the "|" event\n    for each partition. latest_evaluated_event_partition would be "partition_3", and\n    "latest_evaluated_event_id" would be the storage_id of the "|" event for partition_3.\n\n    The "t" events aren't directly represented in the cursor, because they trail the event that the\n    the cursor for their partition has advanced to. The "a" events aren't directly represented\n    in the cursor, because they occurred after the "latest_evaluated_event_id".  The "x" event is\n    included in "unevaluated_partitioned_event_ids", because it's after the event that the cursor\n    for its partition has advanced to, but trails "latest_evaluated_event_id".\n\n    Attributes:\n        latest_consumed_event_partition (Optional[str]): The partition of the latest consumed event\n            for this asset.\n        latest_consumed_event_id (Optional[int]): The event ID of the latest consumed event for\n            this asset.\n        trailing_unconsumed_partitioned_event_ids (Dict[str, int]): A mapping containing\n            the partition key mapped to the latest unconsumed materialization event for this\n            partition with an ID less than latest_consumed_event_id.\n    """\n\n    def __new__(\n        cls,\n        latest_consumed_event_partition,\n        latest_consumed_event_id,\n        trailing_unconsumed_partitioned_event_ids,\n    ):\n        return super(MultiAssetSensorAssetCursorComponent, cls).__new__(\n            cls,\n            latest_consumed_event_partition=check.opt_str_param(\n                latest_consumed_event_partition, "latest_consumed_event_partition"\n            ),\n            latest_consumed_event_id=check.opt_int_param(\n                latest_consumed_event_id, "latest_consumed_event_id"\n            ),\n            trailing_unconsumed_partitioned_event_ids=check.dict_param(\n                trailing_unconsumed_partitioned_event_ids,\n                "trailing_unconsumed_partitioned_event_ids",\n                key_type=str,\n                value_type=int,\n            ),\n        )\n\n\nclass MultiAssetSensorContextCursor:\n    # Tracks the state of the cursor within the tick, created for utility purposes.\n    # Must call MultiAssetSensorEvaluationContext._update_cursor_after_evaluation at end of tick\n    # to serialize the cursor.\n    def __init__(self, cursor: Optional[str], context: "MultiAssetSensorEvaluationContext"):\n        loaded_cursor = json.loads(cursor) if cursor else {}\n        self._cursor_component_by_asset_key: Dict[str, MultiAssetSensorAssetCursorComponent] = {}\n\n        # The initial latest consumed event ID at the beginning of the tick\n        self.initial_latest_consumed_event_ids_by_asset_key: Dict[str, Optional[int]] = {}\n\n        for str_asset_key, cursor_list in loaded_cursor.items():\n            if len(cursor_list) != 3:\n                # In this case, the cursor object is not a multi asset sensor asset cursor\n                # component. This cursor is maintained by the asset reconciliation sensor.\n                break\n            else:\n                partition_key, event_id, trailing_unconsumed_partitioned_event_ids = cursor_list\n                self._cursor_component_by_asset_key[str_asset_key] = (\n                    MultiAssetSensorAssetCursorComponent(\n                        latest_consumed_event_partition=partition_key,\n                        latest_consumed_event_id=event_id,\n                        trailing_unconsumed_partitioned_event_ids=trailing_unconsumed_partitioned_event_ids,\n                    )\n                )\n\n                self.initial_latest_consumed_event_ids_by_asset_key[str_asset_key] = event_id\n\n        check.dict_param(self._cursor_component_by_asset_key, "unpacked_cursor", key_type=str)\n        self._context = context\n\n    def get_cursor_for_asset(self, asset_key: AssetKey) -> MultiAssetSensorAssetCursorComponent:\n        return self._cursor_component_by_asset_key.get(\n            str(asset_key), MultiAssetSensorAssetCursorComponent(None, None, {})\n        )\n\n    def get_stringified_cursor(self) -> str:\n        return json.dumps(self._cursor_component_by_asset_key)\n\n\n
[docs]@experimental\nclass MultiAssetSensorEvaluationContext(SensorEvaluationContext):\n """The context object available as the argument to the evaluation function of a\n :py:class:`dagster.MultiAssetSensorDefinition`.\n\n Users should not instantiate this object directly. To construct a\n `MultiAssetSensorEvaluationContext` for testing purposes, use :py:func:`dagster.\n build_multi_asset_sensor_context`.\n\n The `MultiAssetSensorEvaluationContext` contains a cursor object that tracks the state of\n consumed event logs for each monitored asset. For each asset, the cursor stores the storage ID\n of the latest materialization that has been marked as "consumed" (via a call to `advance_cursor`)\n in a `latest_consumed_event_id` field.\n\n For each monitored asset, the cursor will store the latest unconsumed event ID for up to 25\n partitions. Each event ID must be before the `latest_consumed_event_id` field for the asset.\n\n Events marked as consumed via `advance_cursor` will be returned in future ticks until they\n are marked as consumed.\n\n To update the cursor to the latest materialization and clear the unconsumed events, call\n `advance_all_cursors`.\n\n Attributes:\n monitored_assets (Union[Sequence[AssetKey], AssetSelection]): The assets monitored\n by the sensor. If an AssetSelection object is provided, it will only apply to assets\n within the Definitions that this sensor is part of.\n repository_def (Optional[RepositoryDefinition]): The repository that the sensor belongs to.\n If needed by the sensor top-level resource definitions will be pulled from this repository.\n You can provide either this or `definitions`.\n instance_ref (Optional[InstanceRef]): The serialized instance configured to run the schedule\n cursor (Optional[str]): The cursor, passed back from the last sensor evaluation via\n the cursor attribute of SkipReason and RunRequest. Must be a dictionary of asset key\n strings to a stringified tuple of (latest_event_partition, latest_event_storage_id,\n trailing_unconsumed_partitioned_event_ids).\n last_completion_time (float): DEPRECATED The last time that the sensor was consumed (UTC).\n last_run_key (str): DEPRECATED The run key of the RunRequest most recently created by this\n sensor. Use the preferred `cursor` attribute instead.\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n instance (Optional[DagsterInstance]): The deserialized instance can also be passed in\n directly (primarily useful in testing contexts).\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n If needed by the sensor, top-level resource definitions will be pulled from these\n definitions. You can provide either this or `repository_def`.\n\n Example:\n .. code-block:: python\n\n from dagster import multi_asset_sensor, MultiAssetSensorEvaluationContext\n\n @multi_asset_sensor(monitored_assets=[AssetKey("asset_1), AssetKey("asset_2)])\n def the_sensor(context: MultiAssetSensorEvaluationContext):\n ...\n """\n\n def __init__(\n self,\n instance_ref: Optional[InstanceRef],\n last_completion_time: Optional[float],\n last_run_key: Optional[str],\n cursor: Optional[str],\n repository_name: Optional[str],\n repository_def: Optional["RepositoryDefinition"],\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n instance: Optional[DagsterInstance] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n definitions: Optional["Definitions"] = None,\n ):\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n self._repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n )\n self._monitored_asset_keys: Sequence[AssetKey]\n if isinstance(monitored_assets, AssetSelection):\n repo_assets = self._repository_def.assets_defs_by_key.values()\n repo_source_assets = self._repository_def.source_assets_by_key.values()\n self._monitored_asset_keys = list(\n monitored_assets.resolve([*repo_assets, *repo_source_assets])\n )\n else:\n self._monitored_asset_keys = monitored_assets\n\n self._assets_by_key: Dict[AssetKey, Optional[AssetsDefinition]] = {}\n self._partitions_def_by_asset_key: Dict[AssetKey, Optional[PartitionsDefinition]] = {}\n for asset_key in self._monitored_asset_keys:\n assets_def = self._repository_def.assets_defs_by_key.get(asset_key)\n self._assets_by_key[asset_key] = assets_def\n\n source_asset_def = self._repository_def.source_assets_by_key.get(asset_key)\n self._partitions_def_by_asset_key[asset_key] = (\n assets_def.partitions_def\n if assets_def\n else source_asset_def.partitions_def if source_asset_def else None\n )\n\n # Cursor object with utility methods for updating and retrieving cursor information.\n # At the end of each tick, must call update_cursor_after_evaluation to update the serialized\n # cursor.\n self._unpacked_cursor = MultiAssetSensorContextCursor(cursor, self)\n self._cursor_advance_state_mutation = MultiAssetSensorCursorAdvances()\n\n self._initial_unconsumed_events_by_id: Dict[int, EventLogRecord] = {}\n self._fetched_initial_unconsumed_events = False\n\n super(MultiAssetSensorEvaluationContext, self).__init__(\n instance_ref=instance_ref,\n last_completion_time=last_completion_time,\n last_run_key=last_run_key,\n cursor=cursor,\n repository_name=repository_name,\n instance=instance,\n repository_def=repository_def,\n resources=resource_defs,\n )\n\n def _cache_initial_unconsumed_events(self) -> None:\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n # This method caches the initial unconsumed events for each asset key. To generate the\n # current unconsumed events, call get_trailing_unconsumed_events instead.\n if self._fetched_initial_unconsumed_events:\n return\n\n for asset_key in self._monitored_asset_keys:\n unconsumed_event_ids = list(\n self._get_cursor(asset_key).trailing_unconsumed_partitioned_event_ids.values()\n )\n if unconsumed_event_ids:\n event_records = self.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n storage_ids=unconsumed_event_ids,\n )\n )\n self._initial_unconsumed_events_by_id.update(\n {event_record.storage_id: event_record for event_record in event_records}\n )\n\n self._fetched_initial_unconsumed_events = True\n\n def _get_unconsumed_events_with_ids(\n self, event_ids: Sequence[int]\n ) -> Sequence["EventLogRecord"]:\n self._cache_initial_unconsumed_events()\n unconsumed_events = []\n for event_id in sorted(event_ids):\n event = self._initial_unconsumed_events_by_id.get(event_id)\n unconsumed_events.extend([event] if event else [])\n\n return unconsumed_events\n\n
[docs] @public\n def get_trailing_unconsumed_events(self, asset_key: AssetKey) -> Sequence["EventLogRecord"]:\n """Fetches the unconsumed events for a given asset key. Returns only events\n before the latest consumed event ID for the given asset. To mark an event as consumed,\n pass the event to `advance_cursor`. Returns events in ascending order by storage ID.\n\n Args:\n asset_key (AssetKey): The asset key to get unconsumed events for.\n\n Returns:\n Sequence[EventLogRecord]: The unconsumed events for the given asset key.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n return self._get_unconsumed_events_with_ids(\n list(self._get_cursor(asset_key).trailing_unconsumed_partitioned_event_ids.values())\n )
\n\n def _get_partitions_after_cursor(self, asset_key: AssetKey) -> Sequence[str]:\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n partition_key = self._get_cursor(asset_key).latest_consumed_event_partition\n\n partitions_def = self._partitions_def_by_asset_key.get(asset_key)\n\n if not isinstance(partitions_def, PartitionsDefinition):\n raise DagsterInvalidInvocationError(f"No partitions defined for asset key {asset_key}")\n\n partitions_to_fetch = list(\n partitions_def.get_partition_keys(dynamic_partitions_store=self.instance)\n )\n\n if partition_key is not None:\n # Return partitions after the cursor partition, not including the cursor partition\n partitions_to_fetch = partitions_to_fetch[\n partitions_to_fetch.index(partition_key) + 1 :\n ]\n return partitions_to_fetch\n\n def update_cursor_after_evaluation(self) -> None:\n """Updates the cursor after the sensor evaluation function has been called. This method\n should be called at most once per evaluation.\n """\n new_cursor = self._cursor_advance_state_mutation.get_cursor_with_advances(\n self, self._unpacked_cursor\n )\n\n if new_cursor is not None:\n # Cursor was not updated by this context object, so we do not need to update it\n self._cursor = new_cursor\n self._unpacked_cursor = MultiAssetSensorContextCursor(new_cursor, self)\n self._cursor_advance_state_mutation = MultiAssetSensorCursorAdvances()\n self._fetched_initial_unconsumed_events = False\n\n
[docs] @public\n def latest_materialization_records_by_key(\n self,\n asset_keys: Optional[Sequence[AssetKey]] = None,\n ) -> Mapping[AssetKey, Optional["EventLogRecord"]]:\n """Fetches the most recent materialization event record for each asset in asset_keys.\n Only fetches events after the latest consumed event ID for the given asset key.\n\n Args:\n asset_keys (Optional[Sequence[AssetKey]]): list of asset keys to fetch events for. If\n not specified, the latest materialization will be fetched for all assets the\n multi_asset_sensor monitors.\n\n Returns: Mapping of AssetKey to EventLogRecord where the EventLogRecord is the latest\n materialization event for the asset. If there is no materialization event for the asset,\n the value in the mapping will be None.\n """\n # Do not evaluate unconsumed events, only events newer than the cursor\n # if there are no new events after the cursor, the cursor points to the most\n # recent event.\n\n if asset_keys is None:\n asset_keys = self._monitored_asset_keys\n else:\n asset_keys = check.opt_sequence_param(asset_keys, "asset_keys", of_type=AssetKey)\n\n asset_records = self.instance.get_asset_records(asset_keys)\n\n asset_event_records: Dict[AssetKey, Optional[EventLogRecord]] = {\n asset_key: None for asset_key in asset_keys\n }\n for record in asset_records:\n if (\n record.asset_entry.last_materialization_record\n and record.asset_entry.last_materialization_record.storage_id\n > (self._get_cursor(record.asset_entry.asset_key).latest_consumed_event_id or 0)\n ):\n asset_event_records[record.asset_entry.asset_key] = (\n record.asset_entry.last_materialization_record\n )\n\n return asset_event_records
\n\n
[docs] @public\n def materialization_records_for_key(\n self, asset_key: AssetKey, limit: Optional[int] = None\n ) -> Iterable["EventLogRecord"]:\n """Fetches asset materialization event records for asset_key, with the earliest event first.\n\n Only fetches events after the latest consumed event ID for the given asset key.\n\n Args:\n asset_key (AssetKey): The asset to fetch materialization events for\n limit (Optional[int]): The number of events to fetch\n """\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n if asset_key not in self._assets_by_key:\n raise DagsterInvalidInvocationError(f"Asset key {asset_key} not monitored by sensor.")\n\n events = list(\n self.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n after_cursor=self._get_cursor(asset_key).latest_consumed_event_id,\n ),\n ascending=True,\n limit=limit,\n )\n )\n\n return events
\n\n def _get_cursor(self, asset_key: AssetKey) -> MultiAssetSensorAssetCursorComponent:\n """Returns the MultiAssetSensorAssetCursorComponent for the asset key.\n\n For more information, view the docstring for the MultiAssetSensorAssetCursorComponent class.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n return self._unpacked_cursor.get_cursor_for_asset(asset_key)\n\n
[docs] @public\n def latest_materialization_records_by_partition(\n self,\n asset_key: AssetKey,\n after_cursor_partition: Optional[bool] = False,\n ) -> Mapping[str, "EventLogRecord"]:\n """Given an asset, returns a mapping of partition key to the latest materialization event\n for that partition. Fetches only materializations that have not been marked as "consumed"\n via a call to `advance_cursor`.\n\n Args:\n asset_key (AssetKey): The asset to fetch events for.\n after_cursor_partition (Optional[bool]): If True, only materializations with partitions\n after the cursor's current partition will be returned. By default, set to False.\n\n Returns:\n Mapping[str, EventLogRecord]:\n Mapping of AssetKey to a mapping of partitions to EventLogRecords where the\n EventLogRecord is the most recent materialization event for the partition.\n The mapping preserves the order that the materializations occurred.\n\n Example:\n .. code-block:: python\n\n @asset(partitions_def=DailyPartitionsDefinition("2022-07-01"))\n def july_asset():\n return 1\n\n @multi_asset_sensor(asset_keys=[july_asset.key])\n def my_sensor(context):\n context.latest_materialization_records_by_partition(july_asset.key)\n\n # After materializing july_asset for 2022-07-05, latest_materialization_by_partition\n # returns {"2022-07-05": EventLogRecord(...)}\n\n """\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventLogRecord, EventRecordsFilter\n\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n\n if asset_key not in self._assets_by_key:\n raise DagsterInvalidInvocationError(\n f"Asset key {asset_key} not monitored in sensor definition"\n )\n\n partitions_def = self._partitions_def_by_asset_key.get(asset_key)\n if not isinstance(partitions_def, PartitionsDefinition):\n raise DagsterInvariantViolationError(\n "Cannot get latest materialization by partition for assets with no partitions"\n )\n\n partitions_to_fetch = (\n self._get_partitions_after_cursor(asset_key)\n if after_cursor_partition\n else list(partitions_def.get_partition_keys(dynamic_partitions_store=self.instance))\n )\n\n # Retain ordering of materializations\n materialization_by_partition: Dict[str, EventLogRecord] = OrderedDict()\n\n # Add unconsumed events to the materialization by partition dictionary\n # These events came before the cursor, so should be inserted in storage ID ascending order\n for unconsumed_event in sorted(\n self._get_unconsumed_events_with_ids(\n list(self._get_cursor(asset_key).trailing_unconsumed_partitioned_event_ids.values())\n )\n ):\n partition = unconsumed_event.partition_key\n if isinstance(partition, str) and partition in partitions_to_fetch:\n if partition in materialization_by_partition:\n # Remove partition to ensure materialization_by_partition preserves\n # the order of materializations\n materialization_by_partition.pop(partition)\n # Add partition and materialization to the end of the OrderedDict\n materialization_by_partition[partition] = unconsumed_event\n\n partition_materializations = self.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n asset_partitions=partitions_to_fetch,\n after_cursor=self._get_cursor(asset_key).latest_consumed_event_id,\n ),\n ascending=True,\n )\n for materialization in partition_materializations:\n partition = materialization.partition_key\n\n if isinstance(partition, str):\n if partition in materialization_by_partition:\n # Remove partition to ensure materialization_by_partition preserves\n # the order of materializations\n materialization_by_partition.pop(partition)\n # Add partition and materialization to the end of the OrderedDict\n materialization_by_partition[partition] = materialization\n\n return materialization_by_partition
\n\n
[docs] @public\n def latest_materialization_records_by_partition_and_asset(\n self,\n ) -> Mapping[str, Mapping[AssetKey, "EventLogRecord"]]:\n """Finds the most recent unconsumed materialization for each partition for each asset\n monitored by the sensor. Aggregates all materializations into a mapping of partition key\n to a mapping of asset key to the materialization event for that partition.\n\n For example, if the sensor monitors two partitioned assets A and B that are materialized\n for partition_x after the cursor, this function returns:\n\n .. code-block:: python\n\n {\n "partition_x": {asset_a.key: EventLogRecord(...), asset_b.key: EventLogRecord(...)}\n }\n\n This method can only be called when all monitored assets are partitioned and share\n the same partition definition.\n """\n partitions_defs = list(self._partitions_def_by_asset_key.values())\n if not partitions_defs or not all(x == partitions_defs[0] for x in partitions_defs):\n raise DagsterInvalidInvocationError(\n "All assets must be partitioned and share the same partitions definition"\n )\n\n asset_and_materialization_tuple_by_partition: Dict[\n str, Dict[AssetKey, "EventLogRecord"]\n ] = defaultdict(dict)\n\n for asset_key in self._monitored_asset_keys:\n materialization_by_partition = self.latest_materialization_records_by_partition(\n asset_key\n )\n for partition, materialization in materialization_by_partition.items():\n asset_and_materialization_tuple_by_partition[partition][asset_key] = materialization\n\n return asset_and_materialization_tuple_by_partition
\n\n
[docs] @public\n def get_cursor_partition(self, asset_key: Optional[AssetKey]) -> Optional[str]:\n """A utility method to get the current partition the cursor is on."""\n asset_key = check.opt_inst_param(asset_key, "asset_key", AssetKey)\n if asset_key not in self._monitored_asset_keys:\n raise DagsterInvalidInvocationError(\n "Provided asset key must correspond to a provided asset"\n )\n if asset_key:\n partition_key = self._get_cursor(asset_key).latest_consumed_event_partition\n elif self._monitored_asset_keys is not None and len(self._monitored_asset_keys) == 1:\n partition_key = self._get_cursor(\n self._monitored_asset_keys[0]\n ).latest_consumed_event_partition\n else:\n raise DagsterInvalidInvocationError(\n "Asset key must be provided when multiple assets are defined"\n )\n\n return partition_key
\n\n
[docs] @public\n def all_partitions_materialized(\n self, asset_key: AssetKey, partitions: Optional[Sequence[str]] = None\n ) -> bool:\n """A utility method to check if a provided list of partitions have been materialized\n for a particular asset. This method ignores the cursor and checks all materializations\n for the asset.\n\n Args:\n asset_key (AssetKey): The asset to check partitions for.\n partitions (Optional[Sequence[str]]): A list of partitions to check. If not provided,\n all partitions for the asset will be checked.\n\n Returns:\n bool: True if all selected partitions have been materialized, False otherwise.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n if partitions is not None:\n check.sequence_param(partitions, "partitions", of_type=str)\n if len(partitions) == 0:\n raise DagsterInvalidInvocationError("Must provide at least one partition in list")\n\n materialized_partitions = self.instance.get_materialized_partitions(asset_key)\n if not partitions:\n if asset_key not in self._monitored_asset_keys:\n raise DagsterInvariantViolationError(\n f"Asset key {asset_key} not monitored by sensor"\n )\n\n partitions_def = self._partitions_def_by_asset_key.get(asset_key)\n if not partitions_def:\n raise DagsterInvariantViolationError(\n f"Asset key {asset_key} is not partitioned. Cannot check if partitions have"\n " been materialized."\n )\n partitions = partitions_def.get_partition_keys(dynamic_partitions_store=self.instance)\n\n return all([partition in materialized_partitions for partition in partitions])
\n\n def _get_asset(self, asset_key: AssetKey, fn_name: str) -> AssetsDefinition:\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n repo_def = cast(RepositoryDefinition, self._repository_def)\n repository_assets = repo_def.assets_defs_by_key\n if asset_key in self._assets_by_key:\n asset_def = self._assets_by_key[asset_key]\n if asset_def is None:\n raise DagsterInvalidInvocationError(\n f"Asset key {asset_key} does not have an AssetDefinition in this repository"\n f" (likely because it is a SourceAsset). fn context.{fn_name} can only be"\n " called for assets with AssetDefinitions in the repository."\n )\n else:\n return asset_def\n elif asset_key in repository_assets:\n return repository_assets[asset_key]\n else:\n raise DagsterInvalidInvocationError(\n f"Asset key {asset_key} not monitored in sensor and does not exist in target jobs"\n )\n\n
[docs] @public\n def get_downstream_partition_keys(\n self, partition_key: str, from_asset_key: AssetKey, to_asset_key: AssetKey\n ) -> Sequence[str]:\n """Converts a partition key from one asset to the corresponding partition key in a downstream\n asset. Uses the existing partition mapping between the upstream asset and the downstream\n asset if it exists, otherwise, uses the default partition mapping.\n\n Args:\n partition_key (str): The partition key to convert.\n from_asset_key (AssetKey): The asset key of the upstream asset, which the provided\n partition key belongs to.\n to_asset_key (AssetKey): The asset key of the downstream asset. The provided partition\n key will be mapped to partitions within this asset.\n\n Returns:\n Sequence[str]: A list of the corresponding downstream partitions in to_asset_key that\n partition_key maps to.\n """\n partition_key = check.str_param(partition_key, "partition_key")\n\n to_asset = self._get_asset(to_asset_key, fn_name="get_downstream_partition_keys")\n from_asset = self._get_asset(from_asset_key, fn_name="get_downstream_partition_keys")\n\n to_partitions_def = to_asset.partitions_def\n\n if not isinstance(to_partitions_def, PartitionsDefinition):\n raise DagsterInvalidInvocationError(\n f"Asset key {to_asset_key} is not partitioned. Cannot get partition keys."\n )\n if not isinstance(from_asset.partitions_def, PartitionsDefinition):\n raise DagsterInvalidInvocationError(\n f"Asset key {from_asset_key} is not partitioned. Cannot get partition keys."\n )\n\n partition_mapping = to_asset.infer_partition_mapping(\n from_asset_key, from_asset.partitions_def\n )\n downstream_partition_key_subset = (\n partition_mapping.get_downstream_partitions_for_partitions(\n from_asset.partitions_def.empty_subset().with_partition_keys([partition_key]),\n downstream_partitions_def=to_partitions_def,\n dynamic_partitions_store=self.instance,\n )\n )\n\n return list(downstream_partition_key_subset.get_partition_keys())
\n\n
[docs] @public\n def advance_cursor(\n self, materialization_records_by_key: Mapping[AssetKey, Optional["EventLogRecord"]]\n ):\n """Marks the provided materialization records as having been consumed by the sensor.\n\n At the end of the tick, the cursor will be updated to advance past all materializations\n records provided via `advance_cursor`. In the next tick, records that have been consumed\n will no longer be returned.\n\n Passing a partitioned materialization record into this function will mark prior materializations\n with the same asset key and partition as having been consumed.\n\n Args:\n materialization_records_by_key (Mapping[AssetKey, Optional[EventLogRecord]]): Mapping of\n AssetKeys to EventLogRecord or None. If an EventLogRecord is provided, the cursor\n for the AssetKey will be updated and future calls to fetch asset materialization events\n will not fetch this event again. If None is provided, the cursor for the AssetKey\n will not be updated.\n """\n self._cursor_advance_state_mutation.add_advanced_records(materialization_records_by_key)\n self._cursor_updated = True
\n\n
[docs] @public\n def advance_all_cursors(self):\n """Updates the cursor to the most recent materialization event for all assets monitored by\n the multi_asset_sensor.\n\n Marks all materialization events as consumed by the sensor, including unconsumed events.\n """\n materializations_by_key = self.latest_materialization_records_by_key()\n\n self._cursor_advance_state_mutation.add_advanced_records(materializations_by_key)\n self._cursor_advance_state_mutation.advance_all_cursors_called = True\n self._cursor_updated = True
\n\n @public\n @property\n def assets_defs_by_key(self) -> Mapping[AssetKey, Optional[AssetsDefinition]]:\n """Mapping[AssetKey, Optional[AssetsDefinition]]: A mapping from AssetKey to the\n AssetsDefinition object which produces it. If a given asset is monitored by this sensor, but\n is not produced within the same code location as this sensor, then the value will be None.\n """\n return self._assets_by_key\n\n @public\n @property\n def asset_keys(self) -> Sequence[AssetKey]:\n """Sequence[AssetKey]: The asset keys which are monitored by this sensor."""\n return self._monitored_asset_keys
\n\n\nclass MultiAssetSensorCursorAdvances:\n _advanced_record_ids_by_key: Dict[AssetKey, Set[int]]\n _partition_key_by_record_id: Dict[int, Optional[str]]\n advance_all_cursors_called: bool\n\n def __init__(self):\n self._advanced_record_ids_by_key = defaultdict(set)\n self._partition_key_by_record_id = {}\n self.advance_all_cursors_called = False\n\n def add_advanced_records(\n self, materialization_records_by_key: Mapping[AssetKey, Optional["EventLogRecord"]]\n ):\n for asset_key, materialization in materialization_records_by_key.items():\n if materialization:\n self._advanced_record_ids_by_key[asset_key].add(materialization.storage_id)\n\n self._partition_key_by_record_id[materialization.storage_id] = (\n materialization.partition_key\n )\n\n def get_cursor_with_advances(\n self,\n context: MultiAssetSensorEvaluationContext,\n initial_cursor: MultiAssetSensorContextCursor,\n ) -> Optional[str]:\n """Given the multi asset sensor context and the cursor at the start of the tick,\n returns the cursor that should be used in the next tick.\n\n If the cursor has not been updated, returns None\n """\n if len(self._advanced_record_ids_by_key) == 0:\n # No events marked as advanced\n return None\n\n return json.dumps(\n {\n str(asset_key): self.get_asset_cursor_with_advances(\n asset_key, context, initial_cursor\n )\n for asset_key in context.asset_keys\n }\n )\n\n def get_asset_cursor_with_advances(\n self,\n asset_key: AssetKey,\n context: MultiAssetSensorEvaluationContext,\n initial_cursor: MultiAssetSensorContextCursor,\n ) -> MultiAssetSensorAssetCursorComponent:\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n advanced_records: Set[int] = self._advanced_record_ids_by_key.get(asset_key, set())\n if len(advanced_records) == 0:\n # No events marked as advanced for this asset key\n return initial_cursor.get_cursor_for_asset(asset_key)\n\n initial_asset_cursor = initial_cursor.get_cursor_for_asset(asset_key)\n\n latest_consumed_event_id_at_tick_start = initial_asset_cursor.latest_consumed_event_id\n\n greatest_consumed_event_id_in_tick = max(advanced_records)\n latest_consumed_partition_in_tick = self._partition_key_by_record_id[\n greatest_consumed_event_id_in_tick\n ]\n latest_unconsumed_record_by_partition: Dict[str, int] = {}\n\n if not self.advance_all_cursors_called:\n latest_unconsumed_record_by_partition = (\n initial_asset_cursor.trailing_unconsumed_partitioned_event_ids\n )\n unconsumed_events = list(context.get_trailing_unconsumed_events(asset_key)) + list(\n context.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n after_cursor=latest_consumed_event_id_at_tick_start,\n before_cursor=greatest_consumed_event_id_in_tick,\n ),\n ascending=True,\n )\n if greatest_consumed_event_id_in_tick\n > (latest_consumed_event_id_at_tick_start or 0)\n else []\n )\n\n # Iterate through events in ascending order, storing the latest unconsumed\n # event for each partition. If an advanced event exists for a partition, clear\n # the prior unconsumed event for that partition.\n for event in unconsumed_events:\n partition = event.partition_key\n if partition is not None: # Ignore unpartitioned events\n if event.storage_id not in advanced_records:\n latest_unconsumed_record_by_partition[partition] = event.storage_id\n elif partition in latest_unconsumed_record_by_partition:\n latest_unconsumed_record_by_partition.pop(partition)\n\n if (\n latest_consumed_partition_in_tick is not None\n and latest_consumed_partition_in_tick in latest_unconsumed_record_by_partition\n ):\n latest_unconsumed_record_by_partition.pop(latest_consumed_partition_in_tick)\n\n if len(latest_unconsumed_record_by_partition.keys()) >= MAX_NUM_UNCONSUMED_EVENTS:\n raise DagsterInvariantViolationError(f"""\n You have reached the maximum number of trailing unconsumed events\n ({MAX_NUM_UNCONSUMED_EVENTS}) for asset {asset_key} and no more events can be\n added. You can access the unconsumed events by calling the\n `get_trailing_unconsumed_events` method on the sensor context, and\n mark events as consumed by passing them to `advance_cursor`.\n\n Otherwise, you can clear all unconsumed events and reset the cursor to the latest\n materialization for each asset by calling `advance_all_cursors`.\n """)\n\n return MultiAssetSensorAssetCursorComponent(\n latest_consumed_event_partition=(\n latest_consumed_partition_in_tick\n if greatest_consumed_event_id_in_tick\n > (latest_consumed_event_id_at_tick_start or 0)\n else initial_asset_cursor.latest_consumed_event_partition\n ),\n latest_consumed_event_id=(\n greatest_consumed_event_id_in_tick\n if greatest_consumed_event_id_in_tick\n > (latest_consumed_event_id_at_tick_start or 0)\n else latest_consumed_event_id_at_tick_start\n ),\n trailing_unconsumed_partitioned_event_ids=latest_unconsumed_record_by_partition,\n )\n\n\ndef get_cursor_from_latest_materializations(\n asset_keys: Sequence[AssetKey], instance: DagsterInstance\n) -> str:\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n cursor_dict: Dict[str, MultiAssetSensorAssetCursorComponent] = {}\n\n for asset_key in asset_keys:\n materializations = instance.get_event_records(\n EventRecordsFilter(\n DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n ),\n limit=1,\n )\n if materializations:\n last_materialization = list(materializations)[-1]\n\n cursor_dict[str(asset_key)] = MultiAssetSensorAssetCursorComponent(\n last_materialization.partition_key,\n last_materialization.storage_id,\n {},\n )\n\n cursor_str = json.dumps(cursor_dict)\n return cursor_str\n\n\n
[docs]@experimental\ndef build_multi_asset_sensor_context(\n *,\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n repository_def: Optional["RepositoryDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n cursor: Optional[str] = None,\n repository_name: Optional[str] = None,\n cursor_from_latest_materializations: bool = False,\n resources: Optional[Mapping[str, object]] = None,\n definitions: Optional["Definitions"] = None,\n) -> MultiAssetSensorEvaluationContext:\n """Builds multi asset sensor execution context for testing purposes using the provided parameters.\n\n This function can be used to provide a context to the invocation of a multi asset sensor definition. If\n provided, the dagster instance must be persistent; DagsterInstance.ephemeral() will result in an\n error.\n\n Args:\n monitored_assets (Union[Sequence[AssetKey], AssetSelection]): The assets monitored\n by the sensor. If an AssetSelection object is provided, it will only apply to assets\n within the Definitions that this sensor is part of.\n repository_def (RepositoryDefinition): `RepositoryDefinition` object that\n the sensor is defined in. Must provide `definitions` if this is not provided.\n instance (Optional[DagsterInstance]): The dagster instance configured to run the sensor.\n cursor (Optional[str]): A string cursor to provide to the evaluation of the sensor. Must be\n a dictionary of asset key strings to ints that has been converted to a json string\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n cursor_from_latest_materializations (bool): If True, the cursor will be set to the latest\n materialization for each monitored asset. By default, set to False.\n resources (Optional[Mapping[str, object]]): The resource definitions\n to provide to the sensor.\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n Must provide `repository_def` if this is not provided.\n\n Examples:\n .. code-block:: python\n\n with instance_for_test() as instance:\n context = build_multi_asset_sensor_context(\n monitored_assets=[AssetKey("asset_1"), AssetKey("asset_2")],\n instance=instance,\n )\n my_asset_sensor(context)\n\n """\n from dagster._core.definitions import RepositoryDefinition\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n check.opt_inst_param(instance, "instance", DagsterInstance)\n check.opt_str_param(cursor, "cursor")\n check.opt_str_param(repository_name, "repository_name")\n repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n )\n\n check.bool_param(cursor_from_latest_materializations, "cursor_from_latest_materializations")\n\n if cursor_from_latest_materializations:\n if cursor:\n raise DagsterInvalidInvocationError(\n "Cannot provide both cursor and cursor_from_latest_materializations objects."\n " Dagster will override the provided cursor based on the"\n " cursor_from_latest_materializations object."\n )\n if not instance:\n raise DagsterInvalidInvocationError(\n "Cannot provide cursor_from_latest_materializations object without a Dagster"\n " instance."\n )\n\n asset_keys: Sequence[AssetKey]\n if isinstance(monitored_assets, AssetSelection):\n asset_keys = cast(\n List[AssetKey],\n list(\n monitored_assets.resolve(list(set(repository_def.assets_defs_by_key.values())))\n ),\n )\n else:\n asset_keys = monitored_assets\n\n cursor = get_cursor_from_latest_materializations(asset_keys, instance)\n\n return MultiAssetSensorEvaluationContext(\n instance_ref=None,\n last_completion_time=None,\n last_run_key=None,\n cursor=cursor,\n repository_name=repository_name,\n instance=instance,\n monitored_assets=monitored_assets,\n repository_def=repository_def,\n resource_defs=wrap_resources_for_execution(resources),\n )
\n\n\nAssetMaterializationFunctionReturn = Union[\n Iterator[Union[RunRequest, SkipReason, SensorResult]],\n Sequence[RunRequest],\n RunRequest,\n SkipReason,\n None,\n SensorResult,\n]\nAssetMaterializationFunction = Callable[\n ...,\n AssetMaterializationFunctionReturn,\n]\n\nMultiAssetMaterializationFunction = Callable[\n ...,\n AssetMaterializationFunctionReturn,\n]\n\n\n
[docs]@experimental\nclass MultiAssetSensorDefinition(SensorDefinition):\n """Define an asset sensor that initiates a set of runs based on the materialization of a list of\n assets.\n\n Users should not instantiate this object directly. To construct a\n `MultiAssetSensorDefinition`, use :py:func:`dagster.\n multi_asset_sensor`.\n\n Args:\n name (str): The name of the sensor to create.\n asset_keys (Sequence[AssetKey]): The asset_keys this sensor monitors.\n asset_materialization_fn (Callable[[MultiAssetSensorEvaluationContext], Union[Iterator[Union[RunRequest, SkipReason]], RunRequest, SkipReason]]): The core\n evaluation function for the sensor, which is run at an interval to determine whether a\n run should be launched or not. Takes a :py:class:`~dagster.MultiAssetSensorEvaluationContext`.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job\n object to target with this sensor.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_assets (Optional[AssetSelection]): (Experimental) an asset selection to launch a run\n for if the sensor condition is met. This can be provided instead of specifying a job.\n """\n\n def __init__(\n self,\n name: str,\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n job_name: Optional[str],\n asset_materialization_fn: MultiAssetMaterializationFunction,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_assets: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n resource_arg_names: Set[str] = {\n arg.name for arg in get_resource_args(asset_materialization_fn)\n }\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n def _wrap_asset_fn(materialization_fn):\n def _fn(context):\n def _check_cursor_not_set(sensor_result: SensorResult):\n if sensor_result.cursor:\n raise DagsterInvariantViolationError(\n "Cannot set cursor in a multi_asset_sensor. Cursor is set automatically"\n " based on the latest materialization for each monitored asset."\n )\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n\n with MultiAssetSensorEvaluationContext(\n instance_ref=context.instance_ref,\n last_completion_time=context.last_completion_time,\n last_run_key=context.last_run_key,\n cursor=context.cursor,\n repository_name=context.repository_def.name,\n repository_def=context.repository_def,\n monitored_assets=monitored_assets,\n instance=context.instance,\n resource_defs=context.resource_defs,\n ) as multi_asset_sensor_context:\n context_param_name = get_context_param_name(materialization_fn)\n context_param = (\n {context_param_name: multi_asset_sensor_context}\n if context_param_name\n else {}\n )\n result = materialization_fn(\n **context_param,\n **resource_args_populated,\n )\n if result is None:\n return\n\n # because the materialization_fn can yield results (see _wrapped_fn in multi_asset_sensor decorator),\n # even if you return None in a sensor, it will still cause in inspect.isgenerator(result) to be True.\n # So keep track to see if we actually return any values and should update the cursor\n runs_yielded = False\n if inspect.isgenerator(result) or isinstance(result, list):\n for item in result:\n if isinstance(item, RunRequest):\n runs_yielded = True\n if isinstance(item, SensorResult):\n raise DagsterInvariantViolationError(\n "Cannot yield a SensorResult from a multi_asset_sensor. Instead"\n " return the SensorResult."\n )\n yield item\n elif isinstance(result, RunRequest):\n runs_yielded = True\n yield result\n elif isinstance(result, SkipReason):\n # if result is a SkipReason, we don't update the cursor, so don't set runs_yielded = True\n yield result\n elif isinstance(result, SensorResult):\n _check_cursor_not_set(result)\n if result.run_requests:\n runs_yielded = True\n yield result\n\n if runs_yielded and not multi_asset_sensor_context.cursor_updated:\n raise DagsterInvalidDefinitionError(\n "Asset materializations have been handled in this sensor, but the cursor"\n " was not updated. This means the same materialization events will be"\n " handled in the next sensor tick. Use context.advance_cursor or"\n " context.advance_all_cursors to update the cursor."\n )\n\n multi_asset_sensor_context.update_cursor_after_evaluation()\n context.update_cursor(multi_asset_sensor_context.cursor)\n\n return _fn\n\n self._raw_asset_materialization_fn = asset_materialization_fn\n\n super(MultiAssetSensorDefinition, self).__init__(\n name=check_valid_name(name),\n job_name=job_name,\n evaluation_fn=_wrap_asset_fn(\n check.callable_param(asset_materialization_fn, "asset_materialization_fn")\n ),\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n asset_selection=request_assets,\n required_resource_keys=combined_required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> AssetMaterializationFunctionReturn:\n context_param_name = get_context_param_name(self._raw_asset_materialization_fn)\n context = get_sensor_context_from_args_or_kwargs(\n self._raw_asset_materialization_fn,\n args,\n kwargs,\n context_type=MultiAssetSensorEvaluationContext,\n )\n\n resources = validate_and_get_resource_dict(\n context.resources if context else ScopedResourcesBuilder.build_empty(),\n self._name,\n self._required_resource_keys,\n )\n\n context_param = {context_param_name: context} if context_param_name and context else {}\n result = self._raw_asset_materialization_fn(**context_param, **resources)\n\n if context:\n context.update_cursor_after_evaluation()\n return result\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.MULTI_ASSET
\n
", "current_page_name": "_modules/dagster/_core/definitions/multi_asset_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.multi_asset_sensor_definition"}, "multi_dimensional_partitions": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.multi_dimensional_partitions

\nimport hashlib\nimport itertools\nfrom datetime import datetime\nfrom functools import lru_cache, reduce\nfrom typing import (\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterUnknownPartitionError,\n)\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._core.storage.tags import (\n    MULTIDIMENSIONAL_PARTITION_PREFIX,\n    get_multidimensional_partition_tag,\n)\n\nfrom .partition import (\n    DefaultPartitionsSubset,\n    DynamicPartitionsDefinition,\n    PartitionsDefinition,\n    PartitionsSubset,\n    StaticPartitionsDefinition,\n)\nfrom .time_window_partitions import TimeWindow, TimeWindowPartitionsDefinition\n\nINVALID_STATIC_PARTITIONS_KEY_CHARACTERS = set(["|", ",", "[", "]"])\n\nMULTIPARTITION_KEY_DELIMITER = "|"\n\n\nclass PartitionDimensionKey(\n    NamedTuple("_PartitionDimensionKey", [("dimension_name", str), ("partition_key", str)])\n):\n    """Representation of a single dimension of a multi-dimensional partition key."""\n\n    def __new__(cls, dimension_name: str, partition_key: str):\n        return super(PartitionDimensionKey, cls).__new__(\n            cls,\n            dimension_name=check.str_param(dimension_name, "dimension_name"),\n            partition_key=check.str_param(partition_key, "partition_key"),\n        )\n\n\n
[docs]class MultiPartitionKey(str):\n """A multi-dimensional partition key stores the partition key for each dimension.\n Subclasses the string class to keep partition key type as a string.\n\n Contains additional methods to access the partition key for each dimension.\n Creates a string representation of the partition key for each dimension, separated by a pipe (|).\n Orders the dimensions by name, to ensure consistent string representation.\n """\n\n dimension_keys: List[PartitionDimensionKey] = []\n\n def __new__(cls, keys_by_dimension: Mapping[str, str]):\n check.mapping_param(\n keys_by_dimension, "partitions_by_dimension", key_type=str, value_type=str\n )\n\n dimension_keys: List[PartitionDimensionKey] = [\n PartitionDimensionKey(dimension, keys_by_dimension[dimension])\n for dimension in sorted(list(keys_by_dimension.keys()))\n ]\n\n str_key = super(MultiPartitionKey, cls).__new__(\n cls,\n MULTIPARTITION_KEY_DELIMITER.join(\n [dim_key.partition_key for dim_key in dimension_keys]\n ),\n )\n\n str_key.dimension_keys = dimension_keys\n\n return str_key\n\n def __getnewargs__(self):\n # When this instance is pickled, replace the argument to __new__ with the\n # dimension key mapping instead of the string representation.\n return ({dim_key.dimension_name: dim_key.partition_key for dim_key in self.dimension_keys},)\n\n @property\n def keys_by_dimension(self) -> Mapping[str, str]:\n return {dim_key.dimension_name: dim_key.partition_key for dim_key in self.dimension_keys}
\n\n\nclass PartitionDimensionDefinition(\n NamedTuple(\n "_PartitionDimensionDefinition",\n [\n ("name", str),\n ("partitions_def", PartitionsDefinition),\n ],\n )\n):\n def __new__(\n cls,\n name: str,\n partitions_def: PartitionsDefinition,\n ):\n return super().__new__(\n cls,\n name=check.str_param(name, "name"),\n partitions_def=check.inst_param(partitions_def, "partitions_def", PartitionsDefinition),\n )\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, PartitionDimensionDefinition)\n and self.name == other.name\n and self.partitions_def == other.partitions_def\n )\n\n\nALLOWED_PARTITION_DIMENSION_TYPES = (\n StaticPartitionsDefinition,\n TimeWindowPartitionsDefinition,\n DynamicPartitionsDefinition,\n)\n\n\ndef _check_valid_partitions_dimensions(\n partitions_dimensions: Mapping[str, PartitionsDefinition]\n) -> None:\n for dim_name, partitions_def in partitions_dimensions.items():\n if not any(isinstance(partitions_def, t) for t in ALLOWED_PARTITION_DIMENSION_TYPES):\n raise DagsterInvalidDefinitionError(\n f"Invalid partitions definition type {type(partitions_def)}. "\n "Only the following partitions definition types are supported: "\n f"{ALLOWED_PARTITION_DIMENSION_TYPES}."\n )\n if isinstance(partitions_def, DynamicPartitionsDefinition) and partitions_def.name is None:\n raise DagsterInvalidDefinitionError(\n "DynamicPartitionsDefinition must have a name to be used in a"\n " MultiPartitionsDefinition."\n )\n\n if isinstance(partitions_def, StaticPartitionsDefinition):\n if any(\n [\n INVALID_STATIC_PARTITIONS_KEY_CHARACTERS & set(key)\n for key in partitions_def.get_partition_keys()\n ]\n ):\n raise DagsterInvalidDefinitionError(\n f"Invalid character in partition key for dimension {dim_name}. "\n "A multi-partitions definition cannot contain partition keys with "\n "the following characters: |, [, ], ,"\n )\n\n\n
[docs]class MultiPartitionsDefinition(PartitionsDefinition[MultiPartitionKey]):\n """Takes the cross-product of partitions from two partitions definitions.\n\n For example, with a static partitions definition where the partitions are ["a", "b", "c"]\n and a daily partitions definition, this partitions definition will have the following\n partitions:\n\n 2020-01-01|a\n 2020-01-01|b\n 2020-01-01|c\n 2020-01-02|a\n 2020-01-02|b\n ...\n\n Args:\n partitions_defs (Mapping[str, PartitionsDefinition]):\n A mapping of dimension name to partitions definition. The total set of partitions will\n be the cross-product of the partitions from each PartitionsDefinition.\n\n Attributes:\n partitions_defs (Sequence[PartitionDimensionDefinition]):\n A sequence of PartitionDimensionDefinition objects, each of which contains a dimension\n name and a PartitionsDefinition. The total set of partitions will be the cross-product\n of the partitions from each PartitionsDefinition. This sequence is ordered by\n dimension name, to ensure consistent ordering of the partitions.\n """\n\n def __init__(self, partitions_defs: Mapping[str, PartitionsDefinition]):\n if not len(partitions_defs.keys()) == 2:\n raise DagsterInvalidInvocationError(\n "Dagster currently only supports multi-partitions definitions with 2 partitions"\n " definitions. Your multi-partitions definition has"\n f" {len(partitions_defs.keys())} partitions definitions."\n )\n check.mapping_param(\n partitions_defs, "partitions_defs", key_type=str, value_type=PartitionsDefinition\n )\n\n _check_valid_partitions_dimensions(partitions_defs)\n\n self._partitions_defs: List[PartitionDimensionDefinition] = sorted(\n [\n PartitionDimensionDefinition(name, partitions_def)\n for name, partitions_def in partitions_defs.items()\n ],\n key=lambda x: x.name,\n )\n\n @property\n def partitions_subset_class(self) -> Type["PartitionsSubset"]:\n return MultiPartitionsSubset\n\n def get_serializable_unique_identifier(\n self, dynamic_partitions_store: Optional[DynamicPartitionsStore] = None\n ) -> str:\n return hashlib.sha1(\n str(\n {\n dim_def.name: dim_def.partitions_def.get_serializable_unique_identifier(\n dynamic_partitions_store\n )\n for dim_def in self.partitions_defs\n }\n ).encode("utf-8")\n ).hexdigest()\n\n @property\n def partition_dimension_names(self) -> List[str]:\n return [dim_def.name for dim_def in self._partitions_defs]\n\n @property\n def partitions_defs(self) -> Sequence[PartitionDimensionDefinition]:\n return self._partitions_defs\n\n def get_partitions_def_for_dimension(self, dimension_name: str) -> PartitionsDefinition:\n for dim_def in self._partitions_defs:\n if dim_def.name == dimension_name:\n return dim_def.partitions_def\n check.failed(f"Invalid dimension name {dimension_name}")\n\n # We override the default implementation of `has_partition_key` for performance.\n def has_partition_key(\n self,\n partition_key: Union[MultiPartitionKey, str],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n partition_key = (\n partition_key\n if isinstance(partition_key, MultiPartitionKey)\n else self.get_partition_key_from_str(partition_key)\n )\n if partition_key.keys_by_dimension.keys() != set(self.partition_dimension_names):\n raise DagsterUnknownPartitionError(\n f"Invalid partition key {partition_key}. The dimensions of the partition key are"\n " not the dimensions of the partitions definition."\n )\n\n for dimension in self.partitions_defs:\n if not dimension.partitions_def.has_partition_key(\n partition_key.keys_by_dimension[dimension.name],\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n ):\n return False\n return True\n\n # store results for repeated calls with the same current_time\n @lru_cache(maxsize=1)\n def _get_partition_keys(\n self, current_time: datetime, dynamic_partitions_store: Optional[DynamicPartitionsStore]\n ) -> Sequence[MultiPartitionKey]:\n partition_key_sequences = [\n partition_dim.partitions_def.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n for partition_dim in self._partitions_defs\n ]\n\n return [\n MultiPartitionKey(\n {self._partitions_defs[i].name: key for i, key in enumerate(partition_key_tuple)}\n )\n for partition_key_tuple in itertools.product(*partition_key_sequences)\n ]\n\n
[docs] @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[MultiPartitionKey]:\n """Returns a list of MultiPartitionKeys representing the partition keys of the\n PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partition dimensions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when a\n dimension is a DynamicPartitionsDefinition with a name defined. Users can pass the\n DagsterInstance fetched via `context.instance` to this argument.\n\n Returns:\n Sequence[MultiPartitionKey]\n """\n return self._get_partition_keys(\n current_time or pendulum.now("UTC"), dynamic_partitions_store\n )
\n\n def filter_valid_partition_keys(\n self, partition_keys: Set[str], dynamic_partitions_store: DynamicPartitionsStore\n ) -> Set[MultiPartitionKey]:\n partition_keys_by_dimension = {\n dim.name: dim.partitions_def.get_partition_keys(\n dynamic_partitions_store=dynamic_partitions_store\n )\n for dim in self.partitions_defs\n }\n validated_partitions = set()\n for partition_key in partition_keys:\n partition_key_strs = partition_key.split(MULTIPARTITION_KEY_DELIMITER)\n if len(partition_key_strs) != len(self.partitions_defs):\n continue\n\n multipartition_key = MultiPartitionKey(\n {dim.name: partition_key_strs[i] for i, dim in enumerate(self._partitions_defs)}\n )\n\n if all(\n key in partition_keys_by_dimension.get(dim, [])\n for dim, key in multipartition_key.keys_by_dimension.items()\n ):\n validated_partitions.add(partition_key)\n\n return validated_partitions\n\n def __eq__(self, other):\n return (\n isinstance(other, MultiPartitionsDefinition)\n and self.partitions_defs == other.partitions_defs\n )\n\n def __hash__(self):\n return hash(\n tuple(\n [\n (partitions_def.name, partitions_def.__repr__())\n for partitions_def in self.partitions_defs\n ]\n )\n )\n\n def __str__(self) -> str:\n dimension_1 = self._partitions_defs[0]\n dimension_2 = self._partitions_defs[1]\n partition_str = (\n "Multi-partitioned, with dimensions: \\n"\n f"{dimension_1.name.capitalize()}: {dimension_1.partitions_def} \\n"\n f"{dimension_2.name.capitalize()}: {dimension_2.partitions_def}"\n )\n return partition_str\n\n def __repr__(self) -> str:\n return f"{type(self).__name__}(dimensions={[str(dim) for dim in self.partitions_defs]}"\n\n def get_partition_key_from_str(self, partition_key_str: str) -> MultiPartitionKey:\n """Given a string representation of a partition key, returns a MultiPartitionKey object."""\n check.str_param(partition_key_str, "partition_key_str")\n\n partition_key_strs = partition_key_str.split(MULTIPARTITION_KEY_DELIMITER)\n check.invariant(\n len(partition_key_strs) == len(self.partitions_defs),\n f"Expected {len(self.partitions_defs)} partition keys in partition key string"\n f" {partition_key_str}, but got {len(partition_key_strs)}",\n )\n\n return MultiPartitionKey(\n {dim.name: partition_key_strs[i] for i, dim in enumerate(self._partitions_defs)}\n )\n\n def _get_primary_and_secondary_dimension(\n self,\n ) -> Tuple[PartitionDimensionDefinition, PartitionDimensionDefinition]:\n # Multipartitions subsets are serialized by primary dimension. If changing\n # the selection of primary/secondary dimension, will need to also update the\n # serialization of MultiPartitionsSubsets\n\n time_dimensions = [\n dim\n for dim in self.partitions_defs\n if isinstance(dim.partitions_def, TimeWindowPartitionsDefinition)\n ]\n if len(time_dimensions) == 1:\n primary_dimension, secondary_dimension = time_dimensions[0], next(\n iter([dim for dim in self.partitions_defs if dim != time_dimensions[0]])\n )\n else:\n primary_dimension, secondary_dimension = (\n self.partitions_defs[0],\n self.partitions_defs[1],\n )\n\n return primary_dimension, secondary_dimension\n\n @property\n def primary_dimension(self) -> PartitionDimensionDefinition:\n return self._get_primary_and_secondary_dimension()[0]\n\n @property\n def secondary_dimension(self) -> PartitionDimensionDefinition:\n return self._get_primary_and_secondary_dimension()[1]\n\n def get_tags_for_partition_key(self, partition_key: str) -> Mapping[str, str]:\n partition_key = cast(MultiPartitionKey, self.get_partition_key_from_str(partition_key))\n tags = {**super().get_tags_for_partition_key(partition_key)}\n tags.update(get_tags_from_multi_partition_key(partition_key))\n return tags\n\n @property\n def time_window_dimension(self) -> PartitionDimensionDefinition:\n time_window_dims = [\n dim\n for dim in self.partitions_defs\n if isinstance(dim.partitions_def, TimeWindowPartitionsDefinition)\n ]\n check.invariant(\n len(time_window_dims) == 1, "Expected exactly one time window partitioned dimension"\n )\n return next(iter(time_window_dims))\n\n def time_window_for_partition_key(self, partition_key: str) -> TimeWindow:\n if not isinstance(partition_key, MultiPartitionKey):\n partition_key = self.get_partition_key_from_str(partition_key)\n\n time_window_dimension = self.time_window_dimension\n return cast(\n TimeWindowPartitionsDefinition, time_window_dimension.partitions_def\n ).time_window_for_partition_key(\n cast(MultiPartitionKey, partition_key).keys_by_dimension[time_window_dimension.name]\n )\n\n def get_multipartition_keys_with_dimension_value(\n self,\n dimension_name: str,\n dimension_partition_key: str,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n current_time: Optional[datetime] = None,\n ) -> Sequence[MultiPartitionKey]:\n check.str_param(dimension_name, "dimension_name")\n check.str_param(dimension_partition_key, "dimension_partition_key")\n\n matching_dimensions = [\n dimension for dimension in self.partitions_defs if dimension.name == dimension_name\n ]\n other_dimensions = [\n dimension for dimension in self.partitions_defs if dimension.name != dimension_name\n ]\n\n check.invariant(\n len(matching_dimensions) == 1,\n f"Dimension {dimension_name} not found in MultiPartitionsDefinition with dimensions"\n f" {[dim.name for dim in self.partitions_defs]}",\n )\n\n partition_sequences = [\n partition_dim.partitions_def.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n for partition_dim in other_dimensions\n ] + [[dimension_partition_key]]\n\n # Names of partitions dimensions in the same order as partition_sequences\n partition_dim_names = [dim.name for dim in other_dimensions] + [dimension_name]\n\n return [\n MultiPartitionKey(\n {\n partition_dim_names[i]: partition_key\n for i, partition_key in enumerate(partitions_tuple)\n }\n )\n for partitions_tuple in itertools.product(*partition_sequences)\n ]\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n # Static partitions definitions can contain duplicate keys (will throw error in 1.3.0)\n # In the meantime, relying on get_num_partitions to handle duplicates to display\n # correct counts in the Dagster UI.\n dimension_counts = [\n dim.partitions_def.get_num_partitions(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n for dim in self.partitions_defs\n ]\n return reduce(lambda x, y: x * y, dimension_counts, 1)
\n\n\nclass MultiPartitionsSubset(DefaultPartitionsSubset):\n def __init__(\n self,\n partitions_def: MultiPartitionsDefinition,\n subset: Optional[Set[str]] = None,\n ):\n check.inst_param(partitions_def, "partitions_def", MultiPartitionsDefinition)\n subset = (\n set(\n [\n partitions_def.get_partition_key_from_str(key)\n for key in subset\n if MULTIPARTITION_KEY_DELIMITER in key\n ]\n )\n if subset\n else set()\n )\n super(MultiPartitionsSubset, self).__init__(partitions_def, subset)\n\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "MultiPartitionsSubset":\n return MultiPartitionsSubset(\n cast(MultiPartitionsDefinition, self._partitions_def),\n self._subset | set(partition_keys),\n )\n\n\ndef get_tags_from_multi_partition_key(multi_partition_key: MultiPartitionKey) -> Mapping[str, str]:\n check.inst_param(multi_partition_key, "multi_partition_key", MultiPartitionKey)\n\n return {\n get_multidimensional_partition_tag(dimension.dimension_name): dimension.partition_key\n for dimension in multi_partition_key.dimension_keys\n }\n\n\ndef get_multipartition_key_from_tags(tags: Mapping[str, str]) -> str:\n partitions_by_dimension: Dict[str, str] = {}\n for tag in tags:\n if tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX):\n dimension = tag[len(MULTIDIMENSIONAL_PARTITION_PREFIX) :]\n partitions_by_dimension[dimension] = tags[tag]\n\n return MultiPartitionKey(partitions_by_dimension)\n
", "current_page_name": "_modules/dagster/_core/definitions/multi_dimensional_partitions", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.multi_dimensional_partitions"}, "op_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.op_definition

\nimport inspect\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias, get_args, get_origin\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, deprecated_param, public\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.definitions.dependency import NodeHandle, NodeInputHandle\nfrom dagster._core.definitions.node_definition import NodeDefinition\nfrom dagster._core.definitions.op_invocation import direct_invocation_result\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.resource_requirement import (\n    InputManagerRequirement,\n    OpDefinitionResourceRequirement,\n    OutputManagerRequirement,\n    ResourceRequirement,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.types.dagster_type import DagsterType, DagsterTypeKind\nfrom dagster._utils import IHasInternalInit\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom .definition_config_schema import (\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom .hook_definition import HookDefinition\nfrom .inference import infer_output_props\nfrom .input import In, InputDefinition\nfrom .output import Out, OutputDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_layer import AssetLayer\n\n    from .composition import PendingNodeInvocation\n    from .decorators.op_decorator import DecoratedOpFunction\n\nOpComputeFunction: TypeAlias = Callable[..., Any]\n\n\n
[docs]@deprecated_param(\n param="version", breaking_version="2.0", additional_warn_text="Use `code_version` instead."\n)\nclass OpDefinition(NodeDefinition, IHasInternalInit):\n """Defines an op, the functional unit of user-defined computation.\n\n For more details on what a op is, refer to the\n `Ops Overview <../../concepts/ops-jobs-graphs/ops>`_ .\n\n End users should prefer the :func:`@op <op>` decorator. OpDefinition is generally intended to be\n used by framework authors or for programatically generated ops.\n\n Args:\n name (str): Name of the op. Must be unique within any :py:class:`GraphDefinition` or\n :py:class:`JobDefinition` that contains the op.\n input_defs (List[InputDefinition]): Inputs of the op.\n compute_fn (Callable): The core of the op, the function that performs the actual\n computation. The signature of this function is determined by ``input_defs``, and\n optionally, an injected first argument, ``context``, a collection of information\n provided by the system.\n\n This function will be coerced into a generator or an async generator, which must yield\n one :py:class:`Output` for each of the op's ``output_defs``, and additionally may\n yield other types of Dagster events, including :py:class:`AssetMaterialization` and\n :py:class:`ExpectationResult`.\n output_defs (List[OutputDefinition]): Outputs of the op.\n config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check\n that the config provided for the op matches this schema and will fail if it does not. If\n not set, Dagster will accept any config provided for the op.\n description (Optional[str]): Human-readable description of the op.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may\n expect and require certain metadata to be attached to a op. Users should generally\n not set metadata directly. Values that are not strings will be json encoded and must meet\n the criteria that `json.loads(json.dumps(value)) == value`.\n required_resource_keys (Optional[Set[str]]): Set of resources handles required by this op.\n code_version (Optional[str]): (Experimental) Version of the code encapsulated by the op. If set,\n this is used as a default code version for all outputs.\n retry_policy (Optional[RetryPolicy]): The retry policy for this op.\n\n\n Examples:\n .. code-block:: python\n\n def _add_one(_context, inputs):\n yield Output(inputs["num"] + 1)\n\n OpDefinition(\n name="add_one",\n ins={"num": In(int)},\n outs={"result": Out(int)},\n compute_fn=_add_one,\n )\n """\n\n _compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"]\n _config_schema: IDefinitionConfigSchema\n _required_resource_keys: AbstractSet[str]\n _version: Optional[str]\n _retry_policy: Optional[RetryPolicy]\n\n def __init__(\n self,\n compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],\n name: str,\n ins: Optional[Mapping[str, In]] = None,\n outs: Optional[Mapping[str, Out]] = None,\n description: Optional[str] = None,\n config_schema: Optional[Union[UserConfigSchema, IDefinitionConfigSchema]] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n version: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n ):\n from .decorators.op_decorator import DecoratedOpFunction, resolve_checked_op_fn_inputs\n\n ins = check.opt_mapping_param(ins, "ins")\n input_defs = [\n inp.to_definition(name) for name, inp in sorted(ins.items(), key=lambda inp: inp[0])\n ] # sort so that input definition order is deterministic\n\n if isinstance(compute_fn, DecoratedOpFunction):\n resolved_input_defs: Sequence[InputDefinition] = resolve_checked_op_fn_inputs(\n decorator_name="@op",\n fn_name=name,\n compute_fn=cast(DecoratedOpFunction, compute_fn),\n explicit_input_defs=input_defs,\n exclude_nothing=True,\n )\n self._compute_fn = compute_fn\n _validate_context_type_hint(self._compute_fn.decorated_fn)\n else:\n resolved_input_defs = input_defs\n self._compute_fn = check.callable_param(compute_fn, "compute_fn")\n _validate_context_type_hint(self._compute_fn)\n\n code_version = normalize_renamed_param(\n code_version,\n "code_version",\n version,\n "version",\n )\n self._version = code_version\n\n check.opt_mapping_param(outs, "outs")\n output_defs = _resolve_output_defs_from_outs(\n compute_fn=compute_fn, outs=outs, default_code_version=code_version\n )\n\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._required_resource_keys = frozenset(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n )\n self._retry_policy = check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy)\n\n positional_inputs = (\n self._compute_fn.positional_inputs()\n if isinstance(self._compute_fn, DecoratedOpFunction)\n else None\n )\n\n super(OpDefinition, self).__init__(\n name=name,\n input_defs=check.sequence_param(resolved_input_defs, "input_defs", InputDefinition),\n output_defs=check.sequence_param(output_defs, "output_defs", OutputDefinition),\n description=description,\n tags=check.opt_mapping_param(tags, "tags", key_type=str),\n positional_inputs=positional_inputs,\n )\n\n def dagster_internal_init(\n *,\n compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],\n name: str,\n ins: Optional[Mapping[str, In]],\n outs: Optional[Mapping[str, Out]],\n description: Optional[str],\n config_schema: Optional[Union[UserConfigSchema, IDefinitionConfigSchema]],\n required_resource_keys: Optional[AbstractSet[str]],\n tags: Optional[Mapping[str, Any]],\n version: Optional[str],\n retry_policy: Optional[RetryPolicy],\n code_version: Optional[str],\n ) -> "OpDefinition":\n return OpDefinition(\n compute_fn=compute_fn,\n name=name,\n ins=ins,\n outs=outs,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n tags=tags,\n version=version,\n retry_policy=retry_policy,\n code_version=code_version,\n )\n\n @property\n def node_type_str(self) -> str:\n return "op"\n\n @property\n def is_graph_job_op_node(self) -> bool:\n return True\n\n @public\n @property\n def name(self) -> str:\n """str: The name of this op."""\n return super(OpDefinition, self).name\n\n @public\n @property\n def ins(self) -> Mapping[str, In]:\n """Mapping[str, In]: A mapping from input name to the In object that represents that input."""\n return {input_def.name: In.from_definition(input_def) for input_def in self.input_defs}\n\n @public\n @property\n def outs(self) -> Mapping[str, Out]:\n """Mapping[str, Out]: A mapping from output name to the Out object that represents that output."""\n return {output_def.name: Out.from_definition(output_def) for output_def in self.output_defs}\n\n @property\n def compute_fn(self) -> Union[Callable[..., Any], "DecoratedOpFunction"]:\n return self._compute_fn\n\n @public\n @property\n def config_schema(self) -> IDefinitionConfigSchema:\n """IDefinitionConfigSchema: The config schema for this op."""\n return self._config_schema\n\n @public\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n """AbstractSet[str]: A set of keys for resources that must be provided to this OpDefinition."""\n return frozenset(self._required_resource_keys)\n\n @public\n @deprecated(breaking_version="2.0", additional_warn_text="Use `code_version` instead.")\n @property\n def version(self) -> Optional[str]:\n """str: Version of the code encapsulated by the op. If set, this is used as a\n default code version for all outputs.\n """\n return self._version\n\n @public\n @property\n def retry_policy(self) -> Optional[RetryPolicy]:\n """Optional[RetryPolicy]: The RetryPolicy for this op."""\n return self._retry_policy\n\n @public\n @property\n def tags(self) -> Mapping[str, str]:\n """Mapping[str, str]: The tags for this op."""\n return super(OpDefinition, self).tags\n\n
[docs] @public\n def alias(self, name: str) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given name."""\n return super(OpDefinition, self).alias(name)
\n\n
[docs] @public\n def tag(self, tags: Optional[Mapping[str, str]]) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given tags."""\n return super(OpDefinition, self).tag(tags)
\n\n
[docs] @public\n def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given hook definitions."""\n return super(OpDefinition, self).with_hooks(hook_defs)
\n\n
[docs] @public\n def with_retry_policy(self, retry_policy: RetryPolicy) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given retry policy."""\n return super(OpDefinition, self).with_retry_policy(retry_policy)
\n\n def is_from_decorator(self) -> bool:\n from .decorators.op_decorator import DecoratedOpFunction\n\n return isinstance(self._compute_fn, DecoratedOpFunction)\n\n def get_output_annotation(self) -> Any:\n if not self.is_from_decorator():\n raise DagsterInvalidInvocationError(\n f"Attempted to get output annotation for {self.node_type_str} '{self.name}', "\n "which was not constructed from a decorated function."\n )\n return cast("DecoratedOpFunction", self.compute_fn).get_output_annotation()\n\n def all_dagster_types(self) -> Iterator[DagsterType]:\n yield from self.all_input_output_types()\n\n def iterate_node_defs(self) -> Iterator[NodeDefinition]:\n yield self\n\n def iterate_op_defs(self) -> Iterator["OpDefinition"]:\n yield self\n\n T_Handle = TypeVar("T_Handle", bound=Optional[NodeHandle])\n\n def resolve_output_to_origin(\n self, output_name: str, handle: T_Handle\n ) -> Tuple[OutputDefinition, T_Handle]:\n return self.output_def_named(output_name), handle\n\n def resolve_output_to_origin_op_def(self, output_name: str) -> "OpDefinition":\n return self\n\n def get_inputs_must_be_resolved_top_level(\n self, asset_layer: "AssetLayer", handle: Optional[NodeHandle] = None\n ) -> Sequence[InputDefinition]:\n handle = cast(NodeHandle, check.inst_param(handle, "handle", NodeHandle))\n unresolveable_input_defs = []\n for input_def in self.input_defs:\n if (\n not input_def.dagster_type.loader\n and not input_def.dagster_type.kind == DagsterTypeKind.NOTHING\n and not input_def.has_default_value\n and not input_def.input_manager_key\n ):\n input_asset_key = asset_layer.asset_key_for_input(handle, input_def.name)\n # If input_asset_key is present, this input can be resolved\n # by a source asset, so input does not need to be resolved\n # at the top level.\n if input_asset_key:\n continue\n unresolveable_input_defs.append(input_def)\n return unresolveable_input_defs\n\n def input_has_default(self, input_name: str) -> bool:\n return self.input_def_named(input_name).has_default_value\n\n def default_value_for_input(self, input_name: str) -> InputDefinition:\n return self.input_def_named(input_name).default_value\n\n def input_supports_dynamic_output_dep(self, input_name: str) -> bool:\n return True\n\n def with_replaced_properties(\n self,\n name: str,\n ins: Optional[Mapping[str, In]] = None,\n outs: Optional[Mapping[str, Out]] = None,\n config_schema: Optional[IDefinitionConfigSchema] = None,\n description: Optional[str] = None,\n ) -> "OpDefinition":\n return OpDefinition.dagster_internal_init(\n name=name,\n ins=ins\n or {input_def.name: In.from_definition(input_def) for input_def in self.input_defs},\n outs=outs\n or {\n output_def.name: Out.from_definition(output_def) for output_def in self.output_defs\n },\n compute_fn=self.compute_fn,\n config_schema=config_schema or self.config_schema,\n description=description or self.description,\n tags=self.tags,\n required_resource_keys=self.required_resource_keys,\n code_version=self._version,\n retry_policy=self.retry_policy,\n version=None, # code_version replaces version\n )\n\n def copy_for_configured(\n self,\n name: str,\n description: Optional[str],\n config_schema: IDefinitionConfigSchema,\n ) -> "OpDefinition":\n return self.with_replaced_properties(\n name=name,\n description=description,\n config_schema=config_schema,\n )\n\n def get_resource_requirements(\n self,\n outer_context: Optional[object] = None,\n ) -> Iterator[ResourceRequirement]:\n # Outer requiree in this context is the outer-calling node handle. If not provided, then\n # just use the op name.\n outer_context = cast(Optional[Tuple[NodeHandle, Optional["AssetLayer"]]], outer_context)\n if not outer_context:\n handle = None\n asset_layer = None\n else:\n handle, asset_layer = outer_context\n node_description = f"{self.node_type_str} '{handle or self.name}'"\n for resource_key in sorted(list(self.required_resource_keys)):\n yield OpDefinitionResourceRequirement(\n key=resource_key, node_description=node_description\n )\n for input_def in self.input_defs:\n if input_def.input_manager_key:\n yield InputManagerRequirement(\n key=input_def.input_manager_key,\n node_description=node_description,\n input_name=input_def.name,\n root_input=False,\n )\n elif asset_layer and handle:\n input_asset_key = asset_layer.asset_key_for_input(handle, input_def.name)\n if input_asset_key:\n io_manager_key = asset_layer.io_manager_key_for_asset(input_asset_key)\n yield InputManagerRequirement(\n key=io_manager_key,\n node_description=node_description,\n input_name=input_def.name,\n root_input=False,\n )\n\n for output_def in self.output_defs:\n yield OutputManagerRequirement(\n key=output_def.io_manager_key,\n node_description=node_description,\n output_name=output_def.name,\n )\n\n def resolve_input_to_destinations(\n self, input_handle: NodeInputHandle\n ) -> Sequence[NodeInputHandle]:\n return [input_handle]\n\n def __call__(self, *args, **kwargs) -> Any:\n from .composition import is_in_composition\n\n if is_in_composition():\n return super(OpDefinition, self).__call__(*args, **kwargs)\n\n return direct_invocation_result(self, *args, **kwargs)
\n\n\ndef _resolve_output_defs_from_outs(\n compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],\n outs: Optional[Mapping[str, Out]],\n default_code_version: Optional[str],\n) -> Sequence[OutputDefinition]:\n from .decorators.op_decorator import DecoratedOpFunction\n\n if isinstance(compute_fn, DecoratedOpFunction):\n inferred_output_props = infer_output_props(compute_fn.decorated_fn)\n annotation = inferred_output_props.annotation\n description = inferred_output_props.description\n else:\n inferred_output_props = None\n annotation = inspect.Parameter.empty\n description = None\n\n if outs is None:\n return [OutputDefinition.create_from_inferred(inferred_output_props, default_code_version)]\n\n # If only a single entry has been provided to the out dict, then slurp the\n # annotation into the entry.\n if len(outs) == 1:\n name = next(iter(outs.keys()))\n only_out = outs[name]\n return [only_out.to_definition(annotation, name, description, default_code_version)]\n\n output_defs: List[OutputDefinition] = []\n\n # Introspection on type annotations is experimental, so checking\n # metaclass is the best we can do.\n if annotation != inspect.Parameter.empty and not get_origin(annotation) == tuple:\n raise DagsterInvariantViolationError(\n "Expected Tuple annotation for multiple outputs, but received non-tuple annotation."\n )\n if annotation != inspect.Parameter.empty and not len(get_args(annotation)) == len(outs):\n raise DagsterInvariantViolationError(\n "Expected Tuple annotation to have number of entries matching the "\n f"number of outputs for more than one output. Expected {len(outs)} "\n f"outputs but annotation has {len(get_args(annotation))}."\n )\n for idx, (name, cur_out) in enumerate(outs.items()):\n annotation_type = (\n get_args(annotation)[idx]\n if annotation != inspect.Parameter.empty\n else inspect.Parameter.empty\n )\n # Don't provide description when using multiple outputs. Introspection\n # is challenging when faced with multiple inputs.\n output_defs.append(\n cur_out.to_definition(\n annotation_type, name=name, description=None, code_version=default_code_version\n )\n )\n\n return output_defs\n\n\ndef _validate_context_type_hint(fn):\n from inspect import _empty as EmptyAnnotation\n\n from dagster._core.decorator_utils import get_function_params\n from dagster._core.definitions.decorators.op_decorator import is_context_provided\n from dagster._core.execution.context.compute import AssetExecutionContext, OpExecutionContext\n\n params = get_function_params(fn)\n if is_context_provided(params):\n if (\n params[0].annotation is not AssetExecutionContext\n and params[0].annotation is not OpExecutionContext\n and params[0].annotation is not EmptyAnnotation\n ):\n raise DagsterInvalidDefinitionError(\n f"Cannot annotate `context` parameter with type {params[0].annotation}. `context`"\n " must be annotated with AssetExecutionContext, OpExecutionContext, or left blank."\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/op_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.op_definition"}, "output": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.output

\nimport inspect\nfrom typing import (\n    Any,\n    NamedTuple,\n    Optional,\n    Type,\n    TypeVar,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, deprecated_param\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataUserInput,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterError, DagsterInvalidDefinitionError\nfrom dagster._core.types.dagster_type import (\n    DagsterType,\n    is_dynamic_output_annotation,\n    resolve_dagster_type,\n)\n\nfrom .inference import InferredOutputProps\nfrom .input import NoValueSentinel\nfrom .utils import DEFAULT_IO_MANAGER_KEY, DEFAULT_OUTPUT, check_valid_name\n\nTOutputDefinition = TypeVar("TOutputDefinition", bound="OutputDefinition")\nTOut = TypeVar("TOut", bound="Out")\n\n\nclass OutputDefinition:\n    """Defines an output from an op's compute function.\n\n    Ops can have multiple outputs, in which case outputs cannot be anonymous.\n\n    Many ops have only one output, in which case the user can provide a single output definition\n    that will be given the default name, "result".\n\n    Output definitions may be typed using the Dagster type system.\n\n    Args:\n        dagster_type (Optional[Union[Type, DagsterType]]]): The type of this output.\n            Users should provide the Python type of the objects that they expect the op to yield\n            for this output, or a :py:class:`DagsterType` that defines a runtime check that they\n            want to be run on this output. Defaults to :py:class:`Any`.\n        name (Optional[str]): Name of the output. (default: "result")\n        description (Optional[str]): Human-readable description of the output.\n        is_required (Optional[bool]): Whether the presence of this field is required. (default: True)\n        io_manager_key (Optional[str]): The resource key of the IOManager used for storing this\n            output and loading it in downstream steps (default: "io_manager").\n        metadata (Optional[Dict[str, Any]]): A dict of the metadata for the output.\n            For example, users can provide a file path if the data object will be stored in a\n            filesystem, or provide information of a database table when it is going to load the data\n            into the table.\n        code_version (Optional[str]): (Experimental) Version of the code that generates this output. In\n            general, versions should be set only for code that deterministically produces the same\n            output when given the same inputs.\n\n    """\n\n    def __init__(\n        self,\n        dagster_type=None,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        is_required: bool = True,\n        io_manager_key: Optional[str] = None,\n        metadata: Optional[ArbitraryMetadataMapping] = None,\n        code_version: Optional[str] = None,\n        # make sure new parameters are updated in combine_with_inferred below\n    ):\n        self._name = check_valid_name(check.opt_str_param(name, "name", DEFAULT_OUTPUT))\n        self._type_not_set = dagster_type is None\n        self._dagster_type = resolve_dagster_type(dagster_type)\n        self._description = check.opt_str_param(description, "description")\n        self._is_required = check.bool_param(is_required, "is_required")\n        self._io_manager_key = check.opt_str_param(\n            io_manager_key,\n            "io_manager_key",\n            default=DEFAULT_IO_MANAGER_KEY,\n        )\n        self._code_version = check.opt_str_param(code_version, "code_version")\n        self._raw_metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n        self._metadata = normalize_metadata(self._raw_metadata, allow_invalid=True)\n\n    @property\n    def name(self) -> str:\n        return self._name\n\n    @property\n    def dagster_type(self) -> DagsterType:\n        return self._dagster_type\n\n    @property\n    def description(self) -> Optional[str]:\n        return self._description\n\n    @property\n    def is_required(self) -> bool:\n        return self._is_required\n\n    @property\n    def io_manager_key(self) -> str:\n        return self._io_manager_key\n\n    @property\n    def code_version(self) -> Optional[str]:\n        return self._code_version\n\n    @property\n    def optional(self) -> bool:\n        return not self.is_required\n\n    @property\n    def metadata(self) -> ArbitraryMetadataMapping:\n        return self._raw_metadata\n\n    @property\n    def is_dynamic(self) -> bool:\n        return False\n\n    def mapping_from(\n        self, node_name: str, output_name: Optional[str] = None, from_dynamic_mapping: bool = False\n    ) -> "OutputMapping":\n        """Create an output mapping from an output of a child node.\n\n        In a GraphDefinition, you can use this helper function to construct\n        an :py:class:`OutputMapping` from the output of a child node.\n\n        Args:\n            node_name (str): The name of the child node from which to map this output.\n            output_name (str): The name of the child node's output from which to map this output.\n\n        Examples:\n            .. code-block:: python\n\n                output_mapping = OutputDefinition(Int).mapping_from('child_node')\n        """\n        return OutputMapping(\n            graph_output_name=self.name,\n            mapped_node_name=node_name,\n            mapped_node_output_name=output_name or DEFAULT_OUTPUT,\n            graph_output_description=self.description,\n            dagster_type=self.dagster_type,\n            from_dynamic_mapping=from_dynamic_mapping or self.is_dynamic,\n        )\n\n    @staticmethod\n    def create_from_inferred(\n        inferred: Optional[InferredOutputProps], code_version: Optional[str] = None\n    ) -> "OutputDefinition":\n        if not inferred:\n            return OutputDefinition(code_version=code_version)\n        if is_dynamic_output_annotation(inferred.annotation):\n            return DynamicOutputDefinition(\n                dagster_type=_checked_inferred_type(inferred.annotation),\n                description=inferred.description,\n                code_version=code_version,\n            )\n        else:\n            return OutputDefinition(\n                dagster_type=_checked_inferred_type(inferred.annotation),\n                description=inferred.description,\n                code_version=code_version,\n            )\n\n    def combine_with_inferred(\n        self: TOutputDefinition, inferred: InferredOutputProps\n    ) -> TOutputDefinition:\n        dagster_type = self.dagster_type\n        if self._type_not_set:\n            dagster_type = _checked_inferred_type(inferred.annotation)\n        if self.description is None:\n            description = inferred.description\n        else:\n            description = self.description\n\n        return self.__class__(\n            name=self.name,\n            dagster_type=dagster_type,\n            description=description,\n            is_required=self.is_required,\n            io_manager_key=self.io_manager_key,\n            metadata=self._metadata,\n        )\n\n\ndef _checked_inferred_type(inferred: Any) -> DagsterType:\n    try:\n        if inferred == inspect.Parameter.empty:\n            return resolve_dagster_type(None)\n        elif inferred is None:\n            # When inferred.annotation is None, it means someone explicitly put "None" as the\n            # annotation, so want to map it to a DagsterType that checks for the None type\n            return resolve_dagster_type(type(None))\n        else:\n            return resolve_dagster_type(inferred)\n\n    except DagsterError as e:\n        raise DagsterInvalidDefinitionError(\n            f"Problem using type '{inferred}' from return type annotation, correct the issue "\n            "or explicitly set the dagster_type via Out()."\n        ) from e\n\n\nclass DynamicOutputDefinition(OutputDefinition):\n    """Variant of :py:class:`OutputDefinition <dagster.OutputDefinition>` for an\n    output that will dynamically alter the graph at runtime.\n\n    When using in a composition function such as :py:func:`@job <dagster.job>`,\n    dynamic outputs must be used with either:\n\n    * ``map`` - clone downstream nodes for each separate :py:class:`DynamicOutput`\n    * ``collect`` - gather across all :py:class:`DynamicOutput` in to a list\n\n    Uses the same constructor as :py:class:`OutputDefinition <dagster.OutputDefinition>`\n\n        .. code-block:: python\n\n            @op(\n                config_schema={\n                    "path": Field(str, default_value=file_relative_path(__file__, "sample"))\n                },\n                output_defs=[DynamicOutputDefinition(str)],\n            )\n            def files_in_directory(context):\n                path = context.op_config["path"]\n                dirname, _, filenames = next(os.walk(path))\n                for file in filenames:\n                    yield DynamicOutput(os.path.join(dirname, file), mapping_key=_clean(file))\n\n            @job\n            def process_directory():\n                files = files_in_directory()\n\n                # use map to invoke an op on each dynamic output\n                file_results = files.map(process_file)\n\n                # use collect to gather the results in to a list\n                summarize_directory(file_results.collect())\n    """\n\n    @property\n    def is_dynamic(self) -> bool:\n        return True\n\n\nclass OutputPointer(NamedTuple("_OutputPointer", [("node_name", str), ("output_name", str)])):\n    def __new__(cls, node_name: str, output_name: Optional[str] = None):\n        return super(OutputPointer, cls).__new__(\n            cls,\n            check.str_param(node_name, "node_name"),\n            check.opt_str_param(output_name, "output_name", DEFAULT_OUTPUT),\n        )\n\n\n
[docs]@deprecated_param(\n param="dagster_type",\n breaking_version="2.0",\n additional_warn_text="Any defined `dagster_type` should come from the underlying op `Output`.",\n # Disabling warning here since we're passing this internally and I'm not sure whether it is\n # actually used or discarded.\n emit_runtime_warning=False,\n)\nclass OutputMapping(NamedTuple):\n """Defines an output mapping for a graph.\n\n Args:\n graph_output_name (str): Name of the output in the graph being mapped to.\n mapped_node_name (str): Named of the node (op/graph) that the output is being mapped from.\n mapped_node_output_name (str): Name of the output in the node (op/graph) that is being mapped from.\n graph_output_description (Optional[str]): A description of the output in the graph being mapped from.\n from_dynamic_mapping (bool): Set to true if the node being mapped to is a mapped dynamic node.\n dagster_type (Optional[DagsterType]): The dagster type of the graph's output being mapped to.\n\n Examples:\n .. code-block:: python\n\n from dagster import OutputMapping, GraphDefinition, op, graph, GraphOut\n\n @op\n def emit_five(x):\n return 5\n\n # The following two graph definitions are equivalent\n GraphDefinition(\n name="the_graph",\n node_defs=[emit_five],\n output_mappings=[\n OutputMapping(\n graph_output_name="result", # Default output name\n mapped_node_name="emit_five",\n mapped_node_output_name="result"\n )\n ]\n )\n\n @graph(out=GraphOut())\n def the_graph:\n return emit_five()\n """\n\n graph_output_name: str\n mapped_node_name: str\n mapped_node_output_name: str\n graph_output_description: Optional[str] = None\n dagster_type: Optional[DagsterType] = None\n from_dynamic_mapping: bool = False\n\n @property\n def maps_from(self) -> OutputPointer:\n return OutputPointer(self.mapped_node_name, self.mapped_node_output_name)\n\n def get_definition(self, is_dynamic: bool) -> "OutputDefinition":\n check.invariant(not is_dynamic or self.from_dynamic_mapping)\n is_dynamic = is_dynamic or self.from_dynamic_mapping\n klass = DynamicOutputDefinition if is_dynamic else OutputDefinition\n return klass(\n name=self.graph_output_name,\n description=self.graph_output_description,\n dagster_type=self.dagster_type,\n )
\n\n\n
[docs]class Out(\n NamedTuple(\n "_Out",\n [\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ("description", PublicAttr[Optional[str]]),\n ("is_required", PublicAttr[bool]),\n ("io_manager_key", PublicAttr[str]),\n ("metadata", PublicAttr[Optional[MetadataUserInput]]),\n ("code_version", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Defines an output from an op's compute function.\n\n Ops can have multiple outputs, in which case outputs cannot be anonymous.\n\n Many ops have only one output, in which case the user can provide a single output definition\n that will be given the default name, "result".\n\n Outs may be typed using the Dagster type system.\n\n Args:\n dagster_type (Optional[Union[Type, DagsterType]]]):\n The type of this output. Should only be set if the correct type can not\n be inferred directly from the type signature of the decorated function.\n description (Optional[str]): Human-readable description of the output.\n is_required (bool): Whether the presence of this field is required. (default: True)\n io_manager_key (Optional[str]): The resource key of the output manager used for this output.\n (default: "io_manager").\n metadata (Optional[Dict[str, Any]]): A dict of the metadata for the output.\n For example, users can provide a file path if the data object will be stored in a\n filesystem, or provide information of a database table when it is going to load the data\n into the table.\n code_version (Optional[str]): (Experimental) Version of the code that generates this output. In\n general, versions should be set only for code that deterministically produces the same\n output when given the same inputs.\n """\n\n def __new__(\n cls,\n dagster_type: Union[Type, DagsterType] = NoValueSentinel,\n description: Optional[str] = None,\n is_required: bool = True,\n io_manager_key: Optional[str] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n code_version: Optional[str] = None,\n # make sure new parameters are updated in combine_with_inferred below\n ):\n return super(Out, cls).__new__(\n cls,\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n description=description,\n is_required=check.bool_param(is_required, "is_required"),\n io_manager_key=check.opt_str_param(\n io_manager_key, "io_manager_key", default=DEFAULT_IO_MANAGER_KEY\n ),\n metadata=metadata,\n code_version=code_version,\n )\n\n @classmethod\n def from_definition(cls, output_def: "OutputDefinition"):\n klass = Out if not output_def.is_dynamic else DynamicOut\n return klass(\n dagster_type=output_def.dagster_type,\n description=output_def.description,\n is_required=output_def.is_required,\n io_manager_key=output_def.io_manager_key,\n metadata=output_def.metadata,\n code_version=output_def.code_version,\n )\n\n def to_definition(\n self,\n annotation_type: type,\n name: Optional[str],\n description: Optional[str],\n code_version: Optional[str],\n ) -> "OutputDefinition":\n dagster_type = (\n self.dagster_type\n if self.dagster_type is not NoValueSentinel\n else _checked_inferred_type(annotation_type)\n )\n\n klass = OutputDefinition if not self.is_dynamic else DynamicOutputDefinition\n\n return klass(\n dagster_type=dagster_type,\n name=name,\n description=self.description or description,\n is_required=self.is_required,\n io_manager_key=self.io_manager_key,\n metadata=self.metadata,\n code_version=self.code_version or code_version,\n )\n\n @property\n def is_dynamic(self) -> bool:\n return False
\n\n\n
[docs]class DynamicOut(Out):\n """Variant of :py:class:`Out <dagster.Out>` for an output that will dynamically alter the graph at\n runtime.\n\n When using in a composition function such as :py:func:`@graph <dagster.graph>`,\n dynamic outputs must be used with either\n\n * ``map`` - clone downstream ops for each separate :py:class:`DynamicOut`\n * ``collect`` - gather across all :py:class:`DynamicOut` in to a list\n\n Uses the same constructor as :py:class:`Out <dagster.Out>`\n\n .. code-block:: python\n\n @op(\n config_schema={\n "path": Field(str, default_value=file_relative_path(__file__, "sample"))\n },\n out=DynamicOut(str),\n )\n def files_in_directory(context):\n path = context.op_config["path"]\n dirname, _, filenames = next(os.walk(path))\n for file in filenames:\n yield DynamicOutput(os.path.join(dirname, file), mapping_key=_clean(file))\n\n @job\n def process_directory():\n files = files_in_directory()\n\n # use map to invoke an op on each dynamic output\n file_results = files.map(process_file)\n\n # use collect to gather the results in to a list\n summarize_directory(file_results.collect())\n """\n\n def to_definition(\n self,\n annotation_type: type,\n name: Optional[str],\n description: Optional[str],\n code_version: Optional[str],\n ) -> "OutputDefinition":\n dagster_type = (\n self.dagster_type\n if self.dagster_type is not NoValueSentinel\n else _checked_inferred_type(annotation_type)\n )\n\n return DynamicOutputDefinition(\n dagster_type=dagster_type,\n name=name,\n description=self.description or description,\n is_required=self.is_required,\n io_manager_key=self.io_manager_key,\n metadata=self.metadata,\n code_version=self.code_version or code_version,\n )\n\n @property\n def is_dynamic(self) -> bool:\n return True
\n\n\n
[docs]class GraphOut(NamedTuple("_GraphOut", [("description", PublicAttr[Optional[str]])])):\n """Represents information about the outputs that a graph maps.\n\n Args:\n description (Optional[str]): Human-readable description of the output.\n """\n\n def __new__(cls, description: Optional[str] = None):\n return super(GraphOut, cls).__new__(cls, description=description)\n\n def to_definition(self, name: Optional[str]) -> "OutputDefinition":\n return OutputDefinition(name=name, description=self.description)
\n
", "current_page_name": "_modules/dagster/_core/definitions/output", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.output"}, "partition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partition

\nimport copy\nimport hashlib\nimport json\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom datetime import (\n    datetime,\n    timedelta,\n)\nfrom enum import Enum\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    Iterable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Type,\n    Union,\n    cast,\n)\n\nfrom dateutil.relativedelta import relativedelta\nfrom typing_extensions import TypeVar\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, deprecated, deprecated_param, public\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.run_request import (\n    AddDynamicPartitionsRequest,\n    DeleteDynamicPartitionsRequest,\n)\nfrom dagster._core.instance import DagsterInstance, DynamicPartitionsStore\nfrom dagster._core.storage.tags import PARTITION_NAME_TAG, PARTITION_SET_TAG\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import xor\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.warnings import (\n    normalize_renamed_param,\n)\n\nfrom ..errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidDeserializationVersionError,\n    DagsterInvalidInvocationError,\n    DagsterUnknownPartitionError,\n)\nfrom .config import ConfigMapping\nfrom .utils import validate_tags\n\nDEFAULT_DATE_FORMAT = "%Y-%m-%d"\n\nT_cov = TypeVar("T_cov", default=Any, covariant=True)\nT_str = TypeVar("T_str", bound=str, default=str, covariant=True)\nT_PartitionsDefinition = TypeVar(\n    "T_PartitionsDefinition",\n    bound="PartitionsDefinition",\n    default="PartitionsDefinition",\n    covariant=True,\n)\n\n# In the Dagster UI users can select partition ranges following the format '2022-01-13...2022-01-14'\n# "..." is an invalid substring in partition keys\n# The other escape characters are characters that may not display in the Dagster UI.\nINVALID_PARTITION_SUBSTRINGS = ["...", "\\a", "\\b", "\\f", "\\n", "\\r", "\\t", "\\v", "\\0"]\n\n\n@deprecated(breaking_version="2.0", additional_warn_text="Use string partition keys instead.")\nclass Partition(Generic[T_cov]):\n    """A Partition represents a single slice of the entire set of a job's possible work. It consists\n    of a value, which is an object that represents that partition, and an optional name, which is\n    used to label the partition in a human-readable way.\n\n    Args:\n        value (Any): The object for this partition\n        name (str): Name for this partition\n    """\n\n    def __init__(self, value: Any, name: Optional[str] = None):\n        self._value = value\n        self._name = check.str_param(name or str(value), "name")\n\n    @property\n    def value(self) -> T_cov:\n        return self._value\n\n    @property\n    def name(self) -> str:\n        return self._name\n\n    def __eq__(self, other: object) -> bool:\n        if not isinstance(other, Partition):\n            return False\n        else:\n            return self.value == other.value and self.name == other.name\n\n\n@whitelist_for_serdes\nclass ScheduleType(Enum):\n    HOURLY = "HOURLY"\n    DAILY = "DAILY"\n    WEEKLY = "WEEKLY"\n    MONTHLY = "MONTHLY"\n\n    @property\n    def ordinal(self):\n        return {"HOURLY": 1, "DAILY": 2, "WEEKLY": 3, "MONTHLY": 4}[self.value]\n\n    @property\n    def delta(self):\n        if self == ScheduleType.HOURLY:\n            return timedelta(hours=1)\n        elif self == ScheduleType.DAILY:\n            return timedelta(days=1)\n        elif self == ScheduleType.WEEKLY:\n            return timedelta(weeks=1)\n        elif self == ScheduleType.MONTHLY:\n            return relativedelta(months=1)\n        else:\n            check.failed(f"Unexpected ScheduleType {self}")\n\n    def __gt__(self, other: "ScheduleType") -> bool:\n        check.inst(other, ScheduleType, "Cannot compare ScheduleType with non-ScheduleType")\n        return self.ordinal > other.ordinal\n\n    def __lt__(self, other: "ScheduleType") -> bool:\n        check.inst(other, ScheduleType, "Cannot compare ScheduleType with non-ScheduleType")\n        return self.ordinal < other.ordinal\n\n\n
[docs]class PartitionsDefinition(ABC, Generic[T_str]):\n """Defines a set of partitions, which can be attached to a software-defined asset or job.\n\n Abstract class with implementations for different kinds of partitions.\n """\n\n @property\n def partitions_subset_class(self) -> Type["PartitionsSubset[T_str]"]:\n return DefaultPartitionsSubset[T_str]\n\n
[docs] @abstractmethod\n @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[T_str]:\n """Returns a list of strings representing the partition keys of the PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partitions definitions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when the\n partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n Returns:\n Sequence[str]\n """\n ...
\n\n def __str__(self) -> str:\n joined_keys = ", ".join([f"'{key}'" for key in self.get_partition_keys()])\n return joined_keys\n\n def get_last_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[T_str]:\n partition_keys = self.get_partition_keys(current_time, dynamic_partitions_store)\n return partition_keys[-1] if partition_keys else None\n\n def get_first_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[T_str]:\n partition_keys = self.get_partition_keys(current_time, dynamic_partitions_store)\n return partition_keys[0] if partition_keys else None\n\n def get_partition_keys_in_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[T_str]:\n keys_exist = {\n partition_key_range.start: self.has_partition_key(\n partition_key_range.start, dynamic_partitions_store=dynamic_partitions_store\n ),\n partition_key_range.end: self.has_partition_key(\n partition_key_range.end, dynamic_partitions_store=dynamic_partitions_store\n ),\n }\n if not all(keys_exist.values()):\n raise DagsterInvalidInvocationError(\n f"""Partition range {partition_key_range.start} to {partition_key_range.end} is\n not a valid range. Nonexistent partition keys:\n {list(key for key in keys_exist if keys_exist[key] is False)}"""\n )\n\n # in the simple case, simply return the single key in the range\n if partition_key_range.start == partition_key_range.end:\n return [cast(T_str, partition_key_range.start)]\n\n # defer this call as it is potentially expensive\n partition_keys = self.get_partition_keys(dynamic_partitions_store=dynamic_partitions_store)\n return partition_keys[\n partition_keys.index(partition_key_range.start) : partition_keys.index(\n partition_key_range.end\n )\n + 1\n ]\n\n def empty_subset(self) -> "PartitionsSubset[T_str]":\n return self.partitions_subset_class.empty_subset(self)\n\n def subset_with_partition_keys(\n self, partition_keys: Iterable[str]\n ) -> "PartitionsSubset[T_str]":\n return self.empty_subset().with_partition_keys(partition_keys)\n\n def subset_with_all_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "PartitionsSubset[T_str]":\n return self.subset_with_partition_keys(\n self.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n )\n\n def deserialize_subset(self, serialized: str) -> "PartitionsSubset[T_str]":\n return self.partitions_subset_class.from_serialized(self, serialized)\n\n def can_deserialize_subset(\n self,\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n return self.partitions_subset_class.can_deserialize(\n self,\n serialized,\n serialized_partitions_def_unique_id,\n serialized_partitions_def_class_name,\n )\n\n def get_serializable_unique_identifier(\n self, dynamic_partitions_store: Optional[DynamicPartitionsStore] = None\n ) -> str:\n return hashlib.sha1(\n json.dumps(\n self.get_partition_keys(dynamic_partitions_store=dynamic_partitions_store)\n ).encode("utf-8")\n ).hexdigest()\n\n def get_tags_for_partition_key(self, partition_key: str) -> Mapping[str, str]:\n tags = {PARTITION_NAME_TAG: partition_key}\n return tags\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n return len(self.get_partition_keys(current_time, dynamic_partitions_store))\n\n def has_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n return partition_key in self.get_partition_keys(\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n\n def validate_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> None:\n if not self.has_partition_key(partition_key, current_time, dynamic_partitions_store):\n raise DagsterUnknownPartitionError(\n f"Could not find a partition with key `{partition_key}`."\n )
\n\n\ndef raise_error_on_invalid_partition_key_substring(partition_keys: Sequence[str]) -> None:\n for partition_key in partition_keys:\n found_invalid_substrs = [\n invalid_substr\n for invalid_substr in INVALID_PARTITION_SUBSTRINGS\n if invalid_substr in partition_key\n ]\n if found_invalid_substrs:\n raise DagsterInvalidDefinitionError(\n f"{found_invalid_substrs} are invalid substrings in a partition key"\n )\n\n\ndef raise_error_on_duplicate_partition_keys(partition_keys: Sequence[str]) -> None:\n counts: Dict[str, int] = defaultdict(lambda: 0)\n for partition_key in partition_keys:\n counts[partition_key] += 1\n found_duplicates = [key for key in counts.keys() if counts[key] > 1]\n if found_duplicates:\n raise DagsterInvalidDefinitionError(\n "Partition keys must be unique. Duplicate instances of partition keys:"\n f" {found_duplicates}."\n )\n\n\n
[docs]class StaticPartitionsDefinition(PartitionsDefinition[str]):\n """A statically-defined set of partitions.\n\n Example:\n .. code-block:: python\n\n from dagster import StaticPartitionsDefinition, asset\n\n oceans_partitions_def = StaticPartitionsDefinition(\n ["arctic", "atlantic", "indian", "pacific", "southern"]\n )\n\n @asset(partitions_def=oceans_partitions_defs)\n def ml_model_for_each_ocean():\n ...\n """\n\n def __init__(self, partition_keys: Sequence[str]):\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n\n raise_error_on_invalid_partition_key_substring(partition_keys)\n raise_error_on_duplicate_partition_keys(partition_keys)\n\n self._partition_keys = partition_keys\n\n
[docs] @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n """Returns a list of strings representing the partition keys of the PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partitions definitions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Only applicable to\n DynamicPartitionsDefinitions.\n\n Returns:\n Sequence[str]\n\n """\n return self._partition_keys
\n\n def __hash__(self):\n return hash(self.__repr__())\n\n def __eq__(self, other) -> bool:\n return isinstance(other, StaticPartitionsDefinition) and (\n self is other or self._partition_keys == other.get_partition_keys()\n )\n\n def __repr__(self) -> str:\n return f"{type(self).__name__}(partition_keys={self._partition_keys})"\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n # We don't currently throw an error when a duplicate partition key is defined\n # in a static partitions definition, though we will at 1.3.0.\n # This ensures that partition counts are correct in the Dagster UI.\n return len(set(self.get_partition_keys(current_time, dynamic_partitions_store)))
\n\n\nclass CachingDynamicPartitionsLoader(DynamicPartitionsStore):\n """A batch loader that caches the partition keys for a given dynamic partitions definition,\n to avoid repeated calls to the database for the same partitions definition.\n """\n\n def __init__(self, instance: DagsterInstance):\n self._instance = instance\n\n @cached_method\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n return self._instance.get_dynamic_partitions(partitions_def_name)\n\n @cached_method\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n return self._instance.has_dynamic_partition(partitions_def_name, partition_key)\n\n\n
[docs]@deprecated_param(\n param="partition_fn",\n breaking_version="2.0",\n additional_warn_text="Provide partition definition name instead.",\n)\nclass DynamicPartitionsDefinition(\n PartitionsDefinition,\n NamedTuple(\n "_DynamicPartitionsDefinition",\n [\n (\n "partition_fn",\n PublicAttr[\n Optional[\n Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]\n ]\n ],\n ),\n ("name", PublicAttr[Optional[str]]),\n ],\n ),\n):\n """A partitions definition whose partition keys can be dynamically added and removed.\n\n This is useful for cases where the set of partitions is not known at definition time,\n but is instead determined at runtime.\n\n Partitions can be added and removed using `instance.add_dynamic_partitions` and\n `instance.delete_dynamic_partition` methods.\n\n Args:\n name (Optional[str]): The name of the partitions definition.\n partition_fn (Optional[Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]]):\n A function that returns the current set of partitions. This argument is deprecated and\n will be removed in 2.0.0.\n\n Examples:\n .. code-block:: python\n\n fruits = DynamicPartitionsDefinition(name="fruits")\n\n @sensor(job=my_job)\n def my_sensor(context):\n return SensorResult(\n run_requests=[RunRequest(partition_key="apple")],\n dynamic_partitions_requests=[fruits.build_add_request(["apple"])]\n )\n """\n\n def __new__(\n cls,\n partition_fn: Optional[\n Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]\n ] = None,\n name: Optional[str] = None,\n ):\n partition_fn = check.opt_callable_param(partition_fn, "partition_fn")\n name = check.opt_str_param(name, "name")\n\n if partition_fn is None and name is None:\n raise DagsterInvalidDefinitionError(\n "Must provide either partition_fn or name to DynamicPartitionsDefinition."\n )\n\n if partition_fn and name:\n raise DagsterInvalidDefinitionError(\n "Cannot provide both partition_fn and name to DynamicPartitionsDefinition."\n )\n\n return super(DynamicPartitionsDefinition, cls).__new__(\n cls,\n partition_fn=check.opt_callable_param(partition_fn, "partition_fn"),\n name=check.opt_str_param(name, "name"),\n )\n\n def _validated_name(self) -> str:\n if self.name is None:\n check.failed(\n "Dynamic partitions definition must have a name to fetch dynamic partitions"\n )\n return self.name\n\n def __eq__(self, other):\n return (\n isinstance(other, DynamicPartitionsDefinition)\n and self.name == other.name\n and self.partition_fn == other.partition_fn\n )\n\n def __hash__(self):\n return hash(tuple(self.__repr__()))\n\n def __str__(self) -> str:\n if self.name:\n return f'Dynamic partitions: "{self._validated_name()}"'\n else:\n return super().__str__()\n\n
[docs] @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n """Returns a list of strings representing the partition keys of the\n PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partitions definitions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when the\n partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n Returns:\n Sequence[str]\n """\n if self.partition_fn:\n partitions = self.partition_fn(current_time)\n if all(isinstance(partition, Partition) for partition in partitions):\n return [partition.name for partition in partitions] # type: ignore # (illegible conditional)\n else:\n return partitions # type: ignore # (illegible conditional)\n else:\n check.opt_inst_param(\n dynamic_partitions_store, "dynamic_partitions_store", DynamicPartitionsStore\n )\n\n if dynamic_partitions_store is None:\n check.failed(\n "The instance is not available to load partitions. You may be seeing this error"\n " when using dynamic partitions with a version of dagster-webserver or"\n " dagster-cloud that is older than 1.1.18."\n )\n\n return dynamic_partitions_store.get_dynamic_partitions(\n partitions_def_name=self._validated_name()\n )
\n\n def has_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n if self.partition_fn:\n return partition_key in self.get_partition_keys(current_time)\n else:\n if dynamic_partitions_store is None:\n check.failed(\n "The instance is not available to load partitions. You may be seeing this error"\n " when using dynamic partitions with a version of dagster-webserver or"\n " dagster-cloud that is older than 1.1.18."\n )\n\n return dynamic_partitions_store.has_dynamic_partition(\n partitions_def_name=self._validated_name(), partition_key=partition_key\n )\n\n def build_add_request(self, partition_keys: Sequence[str]) -> AddDynamicPartitionsRequest:\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n validated_name = self._validated_name()\n return AddDynamicPartitionsRequest(validated_name, partition_keys)\n\n def build_delete_request(self, partition_keys: Sequence[str]) -> DeleteDynamicPartitionsRequest:\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n validated_name = self._validated_name()\n return DeleteDynamicPartitionsRequest(validated_name, partition_keys)
\n\n\n
[docs]@deprecated_param(\n param="run_config_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use `run_config_for_partition_key_fn` instead.",\n)\n@deprecated_param(\n param="tags_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use `tags_for_partition_key_fn` instead.",\n)\nclass PartitionedConfig(Generic[T_PartitionsDefinition]):\n """Defines a way of configuring a job where the job can be run on one of a discrete set of\n partitions, and each partition corresponds to run configuration for the job.\n\n Setting PartitionedConfig as the config for a job allows you to launch backfills for that job\n and view the run history across partitions.\n """\n\n def __init__(\n self,\n partitions_def: T_PartitionsDefinition,\n run_config_for_partition_fn: Optional[Callable[[Partition], Mapping[str, Any]]] = None,\n decorated_fn: Optional[Callable[..., Mapping[str, Any]]] = None,\n tags_for_partition_fn: Optional[Callable[[Partition[Any]], Mapping[str, str]]] = None,\n run_config_for_partition_key_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n ):\n self._partitions = check.inst_param(partitions_def, "partitions_def", PartitionsDefinition)\n self._decorated_fn = decorated_fn\n\n check.invariant(\n xor(run_config_for_partition_fn, run_config_for_partition_key_fn),\n "Must provide exactly one of run_config_for_partition_fn or"\n " run_config_for_partition_key_fn",\n )\n check.invariant(\n not (tags_for_partition_fn and tags_for_partition_key_fn),\n "Cannot provide both of tags_for_partition_fn or tags_for_partition_key_fn",\n )\n\n self._run_config_for_partition_fn = check.opt_callable_param(\n run_config_for_partition_fn, "run_config_for_partition_fn"\n )\n self._run_config_for_partition_key_fn = check.opt_callable_param(\n run_config_for_partition_key_fn, "run_config_for_partition_key_fn"\n )\n self._tags_for_partition_fn = check.opt_callable_param(\n tags_for_partition_fn, "tags_for_partition_fn"\n )\n self._tags_for_partition_key_fn = check.opt_callable_param(\n tags_for_partition_key_fn, "tags_for_partition_key_fn"\n )\n\n @public\n @property\n def partitions_def(\n self,\n ) -> T_PartitionsDefinition:\n """T_PartitionsDefinition: The partitions definition associated with this PartitionedConfig."""\n return self._partitions\n\n @deprecated(\n breaking_version="2.0",\n additional_warn_text="Use `run_config_for_partition_key_fn` instead.",\n )\n @public\n @property\n def run_config_for_partition_fn(\n self,\n ) -> Optional[Callable[[Partition], Mapping[str, Any]]]:\n """Optional[Callable[[Partition], Mapping[str, Any]]]: A function that accepts a partition\n and returns a dictionary representing the config to attach to runs for that partition.\n Deprecated as of 1.3.3.\n """\n return self._run_config_for_partition_fn\n\n @public\n @property\n def run_config_for_partition_key_fn(\n self,\n ) -> Optional[Callable[[str], Mapping[str, Any]]]:\n """Optional[Callable[[str], Mapping[str, Any]]]: A function that accepts a partition key\n and returns a dictionary representing the config to attach to runs for that partition.\n """\n\n @deprecated(\n breaking_version="2.0", additional_warn_text="Use `tags_for_partition_key_fn` instead."\n )\n @public\n @property\n def tags_for_partition_fn(self) -> Optional[Callable[[Partition], Mapping[str, str]]]:\n """Optional[Callable[[Partition], Mapping[str, str]]]: A function that\n accepts a partition and returns a dictionary of tags to attach to runs for\n that partition. Deprecated as of 1.3.3.\n """\n return self._tags_for_partition_fn\n\n @public\n @property\n def tags_for_partition_key_fn(\n self,\n ) -> Optional[Callable[[str], Mapping[str, str]]]:\n """Optional[Callable[[str], Mapping[str, str]]]: A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for\n that partition.\n """\n return self._tags_for_partition_key_fn\n\n
[docs] @public\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Sequence[str]:\n """Returns a list of partition keys, representing the full set of partitions that\n config can be applied to.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time. Only\n applicable to time-based partitions definitions.\n\n Returns:\n Sequence[str]\n """\n return self.partitions_def.get_partition_keys(current_time)
\n\n # Assumes partition key already validated\n def get_run_config_for_partition_key(\n self,\n partition_key: str,\n ) -> Mapping[str, Any]:\n """Generates the run config corresponding to a partition key.\n\n Args:\n partition_key (str): the key for a partition that should be used to generate a run config.\n """\n # _run_config_for_partition_fn is deprecated, we can remove this branching logic in 2.0\n if self._run_config_for_partition_fn:\n run_config = self._run_config_for_partition_fn(Partition(partition_key))\n elif self._run_config_for_partition_key_fn:\n run_config = self._run_config_for_partition_key_fn(partition_key)\n else:\n check.failed("Unreachable.") # one of the above funcs always defined\n return copy.deepcopy(run_config)\n\n # Assumes partition key already validated\n def get_tags_for_partition_key(\n self,\n partition_key: str,\n job_name: Optional[str] = None,\n ) -> Mapping[str, str]:\n from dagster._core.host_representation.external_data import (\n external_partition_set_name_for_job_name,\n )\n\n # _tags_for_partition_fn is deprecated, we can remove this branching logic in 2.0\n if self._tags_for_partition_fn:\n user_tags = self._tags_for_partition_fn(Partition(partition_key))\n elif self._tags_for_partition_key_fn:\n user_tags = self._tags_for_partition_key_fn(partition_key)\n else:\n user_tags = {}\n user_tags = validate_tags(user_tags, allow_reserved_tags=False)\n\n system_tags = {\n **self.partitions_def.get_tags_for_partition_key(partition_key),\n **(\n # `PartitionSetDefinition` has been deleted but we still need to attach this special tag in\n # order for reexecution against partitions to work properly.\n {PARTITION_SET_TAG: external_partition_set_name_for_job_name(job_name)}\n if job_name\n else {}\n ),\n }\n\n return {**user_tags, **system_tags}\n\n @classmethod\n def from_flexible_config(\n cls,\n config: Optional[Union[ConfigMapping, Mapping[str, object], "PartitionedConfig"]],\n partitions_def: PartitionsDefinition,\n ) -> "PartitionedConfig":\n check.invariant(\n not isinstance(config, ConfigMapping),\n "Can't supply a ConfigMapping for 'config' when 'partitions_def' is supplied.",\n )\n\n if isinstance(config, PartitionedConfig):\n check.invariant(\n config.partitions_def == partitions_def,\n "Can't supply a PartitionedConfig for 'config' with a different "\n "PartitionsDefinition than supplied for 'partitions_def'.",\n )\n return config\n else:\n hardcoded_config = config if config else {}\n return cls(\n partitions_def,\n run_config_for_partition_key_fn=lambda _: cast(Mapping, hardcoded_config),\n )\n\n def __call__(self, *args, **kwargs):\n if self._decorated_fn is None:\n raise DagsterInvalidInvocationError(\n "Only PartitionedConfig objects created using one of the partitioned config "\n "decorators can be directly invoked."\n )\n else:\n return self._decorated_fn(*args, **kwargs)
\n\n\n
[docs]@deprecated_param(\n param="tags_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use tags_for_partition_key_fn instead.",\n)\ndef static_partitioned_config(\n partition_keys: Sequence[str],\n tags_for_partition_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n) -> Callable[[Callable[[str], Mapping[str, Any]]], PartitionedConfig[StaticPartitionsDefinition]]:\n """Creates a static partitioned config for a job.\n\n The provided partition_keys is a static list of strings identifying the set of partitions. The\n list of partitions is static, so while the run config returned by the decorated function may\n change over time, the list of valid partition keys does not.\n\n This has performance advantages over `dynamic_partitioned_config` in terms of loading different\n partition views in the Dagster UI.\n\n The decorated function takes in a partition key and returns a valid run config for a particular\n target job.\n\n Args:\n partition_keys (Sequence[str]): A list of valid partition keys, which serve as the range of\n values that can be provided to the decorated run config function.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n tags_for_partition_key_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n\n Returns:\n PartitionedConfig\n """\n check.sequence_param(partition_keys, "partition_keys", str)\n\n tags_for_partition_key_fn = normalize_renamed_param(\n tags_for_partition_key_fn,\n "tags_for_partition_key_fn",\n tags_for_partition_fn,\n "tags_for_partition_fn",\n )\n\n def inner(\n fn: Callable[[str], Mapping[str, Any]]\n ) -> PartitionedConfig[StaticPartitionsDefinition]:\n return PartitionedConfig(\n partitions_def=StaticPartitionsDefinition(partition_keys),\n run_config_for_partition_key_fn=fn,\n decorated_fn=fn,\n tags_for_partition_key_fn=tags_for_partition_key_fn,\n )\n\n return inner
\n\n\ndef partitioned_config(\n partitions_def: PartitionsDefinition,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n) -> Callable[[Callable[[str], Mapping[str, Any]]], PartitionedConfig]:\n """Creates a partitioned config for a job given a PartitionsDefinition.\n\n The partitions_def provides the set of partitions, which may change over time\n (for example, when using a DynamicPartitionsDefinition).\n\n The decorated function takes in a partition key and returns a valid run config for a particular\n target job.\n\n Args:\n partitions_def: (Optional[DynamicPartitionsDefinition]): PartitionsDefinition for the job\n tags_for_partition_key_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n\n Returns:\n PartitionedConfig\n """\n check.opt_callable_param(tags_for_partition_key_fn, "tags_for_partition_key_fn")\n\n def inner(fn: Callable[[str], Mapping[str, Any]]) -> PartitionedConfig:\n return PartitionedConfig(\n partitions_def=partitions_def,\n run_config_for_partition_key_fn=fn,\n decorated_fn=fn,\n tags_for_partition_key_fn=tags_for_partition_key_fn,\n )\n\n return inner\n\n\n
[docs]@deprecated_param(\n param="tags_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use tags_for_partition_key_fn instead.",\n)\ndef dynamic_partitioned_config(\n partition_fn: Callable[[Optional[datetime]], Sequence[str]],\n tags_for_partition_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n) -> Callable[[Callable[[str], Mapping[str, Any]]], PartitionedConfig]:\n """Creates a dynamic partitioned config for a job.\n\n The provided partition_fn returns a list of strings identifying the set of partitions, given\n an optional datetime argument (representing the current time). The list of partitions returned\n may change over time.\n\n The decorated function takes in a partition key and returns a valid run config for a particular\n target job.\n\n Args:\n partition_fn (Callable[[datetime.datetime], Sequence[str]]): A function that generates a\n list of valid partition keys, which serve as the range of values that can be provided\n to the decorated run config function.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n\n Returns:\n PartitionedConfig\n """\n check.callable_param(partition_fn, "partition_fn")\n\n tags_for_partition_key_fn = normalize_renamed_param(\n tags_for_partition_key_fn,\n "tags_for_partition_key_fn",\n tags_for_partition_fn,\n "tags_for_partition_fn",\n )\n\n def inner(fn: Callable[[str], Mapping[str, Any]]) -> PartitionedConfig:\n return PartitionedConfig(\n partitions_def=DynamicPartitionsDefinition(partition_fn),\n run_config_for_partition_key_fn=fn,\n decorated_fn=fn,\n tags_for_partition_key_fn=tags_for_partition_key_fn,\n )\n\n return inner
\n\n\ndef cron_schedule_from_schedule_type_and_offsets(\n schedule_type: ScheduleType,\n minute_offset: int,\n hour_offset: int,\n day_offset: Optional[int],\n) -> str:\n if schedule_type is ScheduleType.HOURLY:\n return f"{minute_offset} * * * *"\n elif schedule_type is ScheduleType.DAILY:\n return f"{minute_offset} {hour_offset} * * *"\n elif schedule_type is ScheduleType.WEEKLY:\n return f"{minute_offset} {hour_offset} * * {day_offset if day_offset is not None else 0}"\n elif schedule_type is ScheduleType.MONTHLY:\n return f"{minute_offset} {hour_offset} {day_offset if day_offset is not None else 1} * *"\n else:\n check.assert_never(schedule_type)\n\n\nclass PartitionsSubset(ABC, Generic[T_str]):\n """Represents a subset of the partitions within a PartitionsDefinition."""\n\n @abstractmethod\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[T_str]: ...\n\n @abstractmethod\n @public\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[T_str]: ...\n\n @abstractmethod\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]: ...\n\n @abstractmethod\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "PartitionsSubset[T_str]": ...\n\n def with_partition_key_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "PartitionsSubset[T_str]":\n return self.with_partition_keys(\n self.partitions_def.get_partition_keys_in_range(\n partition_key_range, dynamic_partitions_store=dynamic_partitions_store\n )\n )\n\n def __or__(self, other: "PartitionsSubset") -> "PartitionsSubset[T_str]":\n if self is other:\n return self\n return self.with_partition_keys(other.get_partition_keys())\n\n def __sub__(self, other: "PartitionsSubset") -> "PartitionsSubset[T_str]":\n if self is other:\n return self.partitions_def.empty_subset()\n return self.partitions_def.empty_subset().with_partition_keys(\n set(self.get_partition_keys()).difference(set(other.get_partition_keys()))\n )\n\n def __and__(self, other: "PartitionsSubset") -> "PartitionsSubset[T_str]":\n if self is other:\n return self\n return self.partitions_def.empty_subset().with_partition_keys(\n set(self.get_partition_keys()) & set(other.get_partition_keys())\n )\n\n @abstractmethod\n def serialize(self) -> str: ...\n\n @classmethod\n @abstractmethod\n def from_serialized(\n cls, partitions_def: PartitionsDefinition[T_str], serialized: str\n ) -> "PartitionsSubset[T_str]": ...\n\n @classmethod\n @abstractmethod\n def can_deserialize(\n cls,\n partitions_def: PartitionsDefinition,\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool: ...\n\n @property\n @abstractmethod\n def partitions_def(self) -> PartitionsDefinition[T_str]: ...\n\n @abstractmethod\n def __len__(self) -> int: ...\n\n @abstractmethod\n def __contains__(self, value) -> bool: ...\n\n @classmethod\n @abstractmethod\n def empty_subset(\n cls, partitions_def: PartitionsDefinition[T_str]\n ) -> "PartitionsSubset[T_str]": ...\n\n\n@whitelist_for_serdes\nclass SerializedPartitionsSubset(NamedTuple):\n serialized_subset: str\n serialized_partitions_def_unique_id: str\n serialized_partitions_def_class_name: str\n\n @classmethod\n def from_subset(\n cls,\n subset: PartitionsSubset,\n partitions_def: PartitionsDefinition,\n dynamic_partitions_store: DynamicPartitionsStore,\n ):\n return cls(\n serialized_subset=subset.serialize(),\n serialized_partitions_def_unique_id=partitions_def.get_serializable_unique_identifier(\n dynamic_partitions_store\n ),\n serialized_partitions_def_class_name=partitions_def.__class__.__name__,\n )\n\n def can_deserialize(self, partitions_def: Optional[PartitionsDefinition]) -> bool:\n if not partitions_def:\n # Asset had a partitions definition at storage time, but no longer does\n return False\n\n return partitions_def.can_deserialize_subset(\n self.serialized_subset,\n serialized_partitions_def_unique_id=self.serialized_partitions_def_unique_id,\n serialized_partitions_def_class_name=self.serialized_partitions_def_class_name,\n )\n\n def deserialize(self, partitions_def: PartitionsDefinition) -> PartitionsSubset:\n return partitions_def.deserialize_subset(self.serialized_subset)\n\n\nclass DefaultPartitionsSubset(PartitionsSubset[T_str]):\n # Every time we change the serialization format, we should increment the version number.\n # This will ensure that we can gracefully degrade when deserializing old data.\n SERIALIZATION_VERSION = 1\n\n def __init__(\n self, partitions_def: PartitionsDefinition[T_str], subset: Optional[Set[T_str]] = None\n ):\n check.opt_set_param(subset, "subset")\n self._partitions_def = partitions_def\n self._subset = subset or set()\n\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[str]:\n return (\n set(\n self._partitions_def.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n )\n - self._subset\n )\n\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[str]:\n return self._subset\n\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]:\n partition_keys = self._partitions_def.get_partition_keys(\n current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n cur_range_start = None\n cur_range_end = None\n result = []\n for partition_key in partition_keys:\n if partition_key in self._subset:\n if cur_range_start is None:\n cur_range_start = partition_key\n cur_range_end = partition_key\n else:\n if cur_range_start is not None and cur_range_end is not None:\n result.append(PartitionKeyRange(cur_range_start, cur_range_end))\n cur_range_start = cur_range_end = None\n\n if cur_range_start is not None and cur_range_end is not None:\n result.append(PartitionKeyRange(cur_range_start, cur_range_end))\n\n return result\n\n def with_partition_keys(\n self, partition_keys: Iterable[T_str]\n ) -> "DefaultPartitionsSubset[T_str]":\n return DefaultPartitionsSubset(\n self._partitions_def,\n self._subset | set(partition_keys),\n )\n\n def serialize(self) -> str:\n # Serialize version number, so attempting to deserialize old versions can be handled gracefully.\n # Any time the serialization format changes, we should increment the version number.\n return json.dumps({"version": self.SERIALIZATION_VERSION, "subset": list(self._subset)})\n\n @classmethod\n def from_serialized(\n cls, partitions_def: PartitionsDefinition[T_str], serialized: str\n ) -> "PartitionsSubset[T_str]":\n # Check the version number, so only valid versions can be deserialized.\n data = json.loads(serialized)\n\n if isinstance(data, list):\n # backwards compatibility\n return cls(subset=set(data), partitions_def=partitions_def)\n else:\n if data.get("version") != cls.SERIALIZATION_VERSION:\n raise DagsterInvalidDeserializationVersionError(\n f"Attempted to deserialize partition subset with version {data.get('version')},"\n f" but only version {cls.SERIALIZATION_VERSION} is supported."\n )\n return cls(subset=set(data.get("subset")), partitions_def=partitions_def)\n\n @classmethod\n def can_deserialize(\n cls,\n partitions_def: PartitionsDefinition[T_str],\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n if serialized_partitions_def_class_name is not None:\n return serialized_partitions_def_class_name == partitions_def.__class__.__name__\n\n data = json.loads(serialized)\n return isinstance(data, list) or (\n data.get("subset") is not None and data.get("version") == cls.SERIALIZATION_VERSION\n )\n\n @property\n def partitions_def(self) -> PartitionsDefinition[T_str]:\n return self._partitions_def\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, DefaultPartitionsSubset)\n and self._partitions_def == other._partitions_def\n and self._subset == other._subset\n )\n\n def __len__(self) -> int:\n return len(self._subset)\n\n def __contains__(self, value) -> bool:\n return value in self._subset\n\n def __repr__(self) -> str:\n return (\n f"DefaultPartitionsSubset(subset={self._subset}, partitions_def={self._partitions_def})"\n )\n\n @classmethod\n def empty_subset(cls, partitions_def: PartitionsDefinition[T_str]) -> "PartitionsSubset[T_str]":\n return cls(partitions_def=partitions_def)\n
", "current_page_name": "_modules/dagster/_core/definitions/partition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partition"}, "partition_key_range": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partition_key_range

\nfrom typing import NamedTuple\n\nfrom dagster._annotations import PublicAttr\n\n\n
[docs]class PartitionKeyRange(NamedTuple):\n """Defines a range of partitions.\n\n Attributes:\n start (str): The starting partition key in the range (inclusive).\n end (str): The ending partition key in the range (inclusive).\n\n Examples:\n .. code-block:: python\n\n partitions_def = StaticPartitionsDefinition(["a", "b", "c", "d"])\n partition_key_range = PartitionKeyRange(start="a", end="c") # Represents ["a", "b", "c"]\n """\n\n # Inclusive on both sides\n start: PublicAttr[str]\n end: PublicAttr[str]
\n
", "current_page_name": "_modules/dagster/_core/definitions/partition_key_range", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partition_key_range"}, "partition_mapping": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partition_mapping

\nimport collections.abc\nimport itertools\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import (\n    Collection,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental, public\nfrom dagster._core.definitions.multi_dimensional_partitions import (\n    MultiPartitionKey,\n    MultiPartitionsDefinition,\n)\nfrom dagster._core.definitions.partition import (\n    PartitionsDefinition,\n    PartitionsSubset,\n    StaticPartitionsDefinition,\n)\nfrom dagster._core.definitions.time_window_partitions import TimeWindowPartitionsDefinition\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.warnings import disable_dagster_warnings\n\n\nclass UpstreamPartitionsResult(NamedTuple):\n    """Represents the result of mapping a PartitionsSubset to the corresponding\n    partitions in another PartitionsDefinition.\n\n    partitions_subset (PartitionsSubset): The resulting partitions subset that was\n        mapped to. Only contains partitions for existent partitions, filtering out nonexistent partitions.\n    required_but_nonexistent_partition_keys (Sequence[str]): A list containing invalid partition keys in to_partitions_def\n        that partitions in from_partitions_subset were mapped to.\n    """\n\n    partitions_subset: PartitionsSubset\n    required_but_nonexistent_partition_keys: Sequence[str]\n\n\n
[docs]class PartitionMapping(ABC):\n """Defines a correspondence between the partitions in an asset and the partitions in an asset\n that it depends on.\n\n Overriding PartitionMapping outside of Dagster is not supported. The abstract methods of this\n class may change at any time.\n """\n\n
[docs] @public\n @abstractmethod\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n """Returns the subset of partition keys in the downstream asset that use the data in the given\n partition key subset of the upstream asset.\n\n Args:\n upstream_partitions_subset (Union[PartitionKeyRange, PartitionsSubset]): The\n subset of partition keys in the upstream asset.\n downstream_partitions_def (PartitionsDefinition): The partitions definition for the\n downstream asset.\n """
\n\n
[docs] @public\n @abstractmethod\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n """Returns a UpstreamPartitionsResult object containing the partition keys the downstream\n partitions subset was mapped to in the upstream partitions definition.\n\n Valid upstream partitions will be included in UpstreamPartitionsResult.partitions_subset.\n Invalid upstream partitions will be included in UpstreamPartitionsResult.required_but_nonexistent_partition_keys.\n\n For example, if an upstream asset is time-partitioned and starts in June 2023, and the\n downstream asset is time-partitioned and starts in May 2023, this function would return a\n UpstreamPartitionsResult(PartitionsSubset("2023-06-01"), required_but_nonexistent_partition_keys=["2023-05-01"])\n when downstream_partitions_subset contains 2023-05-01 and 2023-06-01.\n """
\n\n\n
[docs]@whitelist_for_serdes\nclass IdentityPartitionMapping(PartitionMapping, NamedTuple("_IdentityPartitionMapping", [])):\n """Expects that the upstream and downstream assets are partitioned in the same way, and maps\n partitions in the downstream asset to the same partition in the upstream asset.\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n if downstream_partitions_subset is None:\n check.failed("downstream asset is not partitioned")\n\n if downstream_partitions_subset.partitions_def == upstream_partitions_def:\n return UpstreamPartitionsResult(downstream_partitions_subset, [])\n\n upstream_partition_keys = set(\n upstream_partitions_def.get_partition_keys(\n dynamic_partitions_store=dynamic_partitions_store\n )\n )\n downstream_partition_keys = set(downstream_partitions_subset.get_partition_keys())\n\n return UpstreamPartitionsResult(\n upstream_partitions_def.subset_with_partition_keys(\n list(upstream_partition_keys & downstream_partition_keys)\n ),\n list(downstream_partition_keys - upstream_partition_keys),\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n if upstream_partitions_subset is None:\n check.failed("upstream asset is not partitioned")\n\n if upstream_partitions_subset.partitions_def == downstream_partitions_def:\n return upstream_partitions_subset\n\n upstream_partition_keys = set(upstream_partitions_subset.get_partition_keys())\n downstream_partition_keys = set(\n downstream_partitions_def.get_partition_keys(\n dynamic_partitions_store=dynamic_partitions_store\n )\n )\n\n return downstream_partitions_def.empty_subset().with_partition_keys(\n list(downstream_partition_keys & upstream_partition_keys)\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass AllPartitionMapping(PartitionMapping, NamedTuple("_AllPartitionMapping", [])):\n """Maps every partition in the downstream asset to every partition in the upstream asset.\n\n Commonly used in the case when the downstream asset is not partitioned, in which the entire\n downstream asset depends on all partitions of the usptream asset.\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n upstream_subset = upstream_partitions_def.subset_with_all_partitions(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n return UpstreamPartitionsResult(upstream_subset, [])\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n raise NotImplementedError()
\n\n\n
[docs]@whitelist_for_serdes\nclass LastPartitionMapping(PartitionMapping, NamedTuple("_LastPartitionMapping", [])):\n """Maps all dependencies to the last partition in the upstream asset.\n\n Commonly used in the case when the downstream asset is not partitioned, in which the entire\n downstream asset depends on the last partition of the upstream asset.\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n last = upstream_partitions_def.get_last_partition_key(\n current_time=None, dynamic_partitions_store=dynamic_partitions_store\n )\n\n upstream_subset = upstream_partitions_def.empty_subset()\n if last is not None:\n upstream_subset = upstream_subset.with_partition_keys([last])\n\n return UpstreamPartitionsResult(upstream_subset, [])\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n raise NotImplementedError()
\n\n\n
[docs]@whitelist_for_serdes\nclass SpecificPartitionsPartitionMapping(\n PartitionMapping,\n NamedTuple(\n "_SpecificPartitionsPartitionMapping", [("partition_keys", PublicAttr[Sequence[str]])]\n ),\n):\n """Maps to a specific subset of partitions in the upstream asset.\n\n Example:\n .. code-block:: python\n\n from dagster import SpecificPartitionsPartitionMapping, StaticPartitionsDefinition, asset\n\n @asset(partitions_def=StaticPartitionsDefinition(["a", "b", "c"]))\n def upstream():\n ...\n\n @asset(\n ins={\n "upstream": AssetIn(partition_mapping=SpecificPartitionsPartitionMapping(["a"]))\n }\n )\n def a_downstream(upstream):\n ...\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n return UpstreamPartitionsResult(\n upstream_partitions_def.subset_with_partition_keys(self.partition_keys), []\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n # if any of the partition keys in this partition mapping are contained within the upstream\n # partitions subset, then all partitions of the downstream asset are dependencies\n if any(key in upstream_partitions_subset for key in self.partition_keys):\n return downstream_partitions_def.subset_with_all_partitions(\n dynamic_partitions_store=dynamic_partitions_store\n )\n return downstream_partitions_def.empty_subset()
\n\n\nclass DimensionDependency(NamedTuple):\n partition_mapping: PartitionMapping\n upstream_dimension_name: Optional[str] = None\n downstream_dimension_name: Optional[str] = None\n\n\nclass BaseMultiPartitionMapping(ABC):\n @abstractmethod\n def get_dimension_dependencies(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> Sequence[DimensionDependency]: ...\n\n def get_partitions_def(\n self, partitions_def: PartitionsDefinition, dimension_name: Optional[str]\n ) -> PartitionsDefinition:\n if isinstance(partitions_def, MultiPartitionsDefinition):\n if not isinstance(dimension_name, str):\n check.failed("Expected dimension_name to be a string")\n return partitions_def.get_partitions_def_for_dimension(dimension_name)\n return partitions_def\n\n def _get_dependency_partitions_subset(\n self,\n a_partitions_def: PartitionsDefinition,\n a_partitions_subset: PartitionsSubset,\n b_partitions_def: PartitionsDefinition,\n a_upstream_of_b: bool,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n current_time: Optional[datetime] = None,\n ) -> Union[UpstreamPartitionsResult, PartitionsSubset]:\n """Given two partitions definitions a_partitions_def and b_partitions_def that have a dependency\n relationship (a_upstream_of_b is True if a_partitions_def is upstream of b_partitions_def),\n and a_partition_keys, a list of partition keys in a_partitions_def, returns a list of\n partition keys in the partitions definition b_partitions_def that are\n dependencies of the partition keys in a_partition_keys.\n """\n a_partition_keys_by_dimension = defaultdict(set)\n if isinstance(a_partitions_def, MultiPartitionsDefinition):\n for partition_key in a_partitions_subset.get_partition_keys():\n for dimension_name, key in cast(\n MultiPartitionKey, partition_key\n ).keys_by_dimension.items():\n a_partition_keys_by_dimension[dimension_name].add(key)\n else:\n for partition_key in a_partitions_subset.get_partition_keys():\n a_partition_keys_by_dimension[None].add(partition_key)\n\n # Maps the dimension name and key of a partition in a_partitions_def to the list of\n # partition keys in b_partitions_def that are dependencies of that partition\n dep_b_keys_by_a_dim_and_key: Dict[Optional[str], Dict[Optional[str], List[str]]] = (\n defaultdict(lambda: defaultdict(list))\n )\n required_but_nonexistent_upstream_partitions = set()\n\n b_dimension_partitions_def_by_name: Dict[Optional[str], PartitionsDefinition] = (\n {\n dimension.name: dimension.partitions_def\n for dimension in b_partitions_def.partitions_defs\n }\n if isinstance(b_partitions_def, MultiPartitionsDefinition)\n else {None: b_partitions_def}\n )\n\n if a_upstream_of_b:\n # a_partitions_def is upstream of b_partitions_def, so we need to map the\n # dimension names of a_partitions_def to the corresponding dependent dimensions of\n # b_partitions_def\n a_dim_to_dependency_b_dim = {\n dimension_mapping.upstream_dimension_name: (\n dimension_mapping.downstream_dimension_name,\n dimension_mapping.partition_mapping,\n )\n for dimension_mapping in self.get_dimension_dependencies(\n a_partitions_def, b_partitions_def\n )\n }\n\n for a_dim_name, keys in a_partition_keys_by_dimension.items():\n if a_dim_name in a_dim_to_dependency_b_dim:\n (\n b_dim_name,\n dimension_mapping,\n ) = a_dim_to_dependency_b_dim[a_dim_name]\n a_dimension_partitions_def = self.get_partitions_def(\n a_partitions_def, a_dim_name\n )\n b_dimension_partitions_def = self.get_partitions_def(\n b_partitions_def, b_dim_name\n )\n for key in keys:\n # if downstream dimension mapping exists, for a given key, get the list of\n # downstream partition keys that are dependencies of that key\n dep_b_keys_by_a_dim_and_key[a_dim_name][key] = list(\n dimension_mapping.get_downstream_partitions_for_partitions(\n a_dimension_partitions_def.empty_subset().with_partition_keys(\n [key]\n ),\n b_dimension_partitions_def,\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n ).get_partition_keys()\n )\n\n else:\n # a_partitions_def is downstream of b_partitions_def, so we need to map the\n # dimension names of a_partitions_def to the corresponding dependency dimensions of\n # b_partitions_def\n a_dim_to_dependency_b_dim = {\n dimension_mapping.downstream_dimension_name: (\n dimension_mapping.upstream_dimension_name,\n dimension_mapping.partition_mapping,\n )\n for dimension_mapping in self.get_dimension_dependencies(\n b_partitions_def, a_partitions_def\n )\n }\n\n for a_dim_name, keys in a_partition_keys_by_dimension.items():\n if a_dim_name in a_dim_to_dependency_b_dim:\n (\n b_dim_name,\n partition_mapping,\n ) = a_dim_to_dependency_b_dim[a_dim_name]\n a_dimension_partitions_def = self.get_partitions_def(\n a_partitions_def, a_dim_name\n )\n b_dimension_partitions_def = self.get_partitions_def(\n b_partitions_def, b_dim_name\n )\n for key in keys:\n mapped_partitions_result = (\n partition_mapping.get_upstream_mapped_partitions_result_for_partitions(\n a_dimension_partitions_def.empty_subset().with_partition_keys(\n [key]\n ),\n b_dimension_partitions_def,\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n )\n dep_b_keys_by_a_dim_and_key[a_dim_name][key] = list(\n mapped_partitions_result.partitions_subset.get_partition_keys()\n )\n required_but_nonexistent_upstream_partitions.update(\n set(mapped_partitions_result.required_but_nonexistent_partition_keys)\n )\n\n b_partition_keys = set()\n\n mapped_a_dim_names = a_dim_to_dependency_b_dim.keys()\n mapped_b_dim_names = [mapping[0] for mapping in a_dim_to_dependency_b_dim.values()]\n unmapped_b_dim_names = list(\n set(b_dimension_partitions_def_by_name.keys()) - set(mapped_b_dim_names)\n )\n\n for key in a_partitions_subset.get_partition_keys():\n for b_key_values in itertools.product(\n *(\n [\n dep_b_keys_by_a_dim_and_key[dim_name][\n (\n cast(MultiPartitionKey, key).keys_by_dimension[dim_name]\n if dim_name\n else key\n )\n ]\n for dim_name in mapped_a_dim_names\n ]\n ),\n *[\n b_dimension_partitions_def_by_name[dim_name].get_partition_keys()\n for dim_name in unmapped_b_dim_names\n ],\n ):\n b_partition_keys.add(\n MultiPartitionKey(\n {\n cast(str, (mapped_b_dim_names + unmapped_b_dim_names)[i]): key\n for i, key in enumerate(b_key_values)\n }\n )\n if len(b_key_values) > 1\n else b_key_values[0]\n )\n\n mapped_subset = b_partitions_def.empty_subset().with_partition_keys(b_partition_keys)\n if a_upstream_of_b:\n return mapped_subset\n else:\n return UpstreamPartitionsResult(\n mapped_subset,\n required_but_nonexistent_partition_keys=list(\n required_but_nonexistent_upstream_partitions\n ),\n )\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n if downstream_partitions_subset is None:\n check.failed("downstream asset is not partitioned")\n\n result = self._get_dependency_partitions_subset(\n cast(MultiPartitionsDefinition, downstream_partitions_subset.partitions_def),\n downstream_partitions_subset,\n cast(MultiPartitionsDefinition, upstream_partitions_def),\n a_upstream_of_b=False,\n dynamic_partitions_store=dynamic_partitions_store,\n current_time=current_time,\n )\n\n if not isinstance(result, UpstreamPartitionsResult):\n check.failed("Expected UpstreamPartitionsResult")\n\n return result\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n if upstream_partitions_subset is None:\n check.failed("upstream asset is not partitioned")\n\n result = self._get_dependency_partitions_subset(\n cast(MultiPartitionsDefinition, upstream_partitions_subset.partitions_def),\n upstream_partitions_subset,\n cast(MultiPartitionsDefinition, downstream_partitions_def),\n a_upstream_of_b=True,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n\n if isinstance(result, UpstreamPartitionsResult):\n check.failed("Expected PartitionsSubset")\n\n return result\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass MultiToSingleDimensionPartitionMapping(\n BaseMultiPartitionMapping,\n PartitionMapping,\n NamedTuple(\n "_MultiToSingleDimensionPartitionMapping", [("partition_dimension_name", Optional[str])]\n ),\n):\n """Defines a correspondence between an single-dimensional partitions definition\n and a MultiPartitionsDefinition. The single-dimensional partitions definition must be\n a dimension of the MultiPartitionsDefinition.\n\n This class handles the case where the upstream asset is multipartitioned and the\n downstream asset is single dimensional, and vice versa.\n\n For a partition key X, this partition mapping assumes that any multi-partition key with\n X in the selected dimension is a dependency.\n\n Args:\n partition_dimension_name (Optional[str]): The name of the partition dimension in the\n MultiPartitionsDefinition that matches the single-dimension partitions definition.\n """\n\n def __new__(cls, partition_dimension_name: Optional[str] = None):\n return super(MultiToSingleDimensionPartitionMapping, cls).__new__(\n cls,\n partition_dimension_name=check.opt_str_param(\n partition_dimension_name, "partition_dimension_name"\n ),\n )\n\n def get_dimension_dependencies(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> Sequence[DimensionDependency]:\n infer_mapping_result = _get_infer_single_to_multi_dimension_deps_result(\n upstream_partitions_def, downstream_partitions_def\n )\n\n if not infer_mapping_result.can_infer:\n check.invariant(isinstance(infer_mapping_result.inference_failure_reason, str))\n check.failed(cast(str, infer_mapping_result.inference_failure_reason))\n\n return [cast(DimensionDependency, infer_mapping_result.dimension_dependency)]
\n\n\n@whitelist_for_serdes\nclass DimensionPartitionMapping(\n NamedTuple(\n "_DimensionPartitionMapping",\n [\n ("dimension_name", str),\n ("partition_mapping", PartitionMapping),\n ],\n )\n):\n """A helper class for MultiPartitionMapping that defines a partition mapping used to calculate\n the dependent partition keys in the selected downstream MultiPartitions definition dimension.\n\n Args:\n dimension_name (str): The name of the dimension in the downstream MultiPartitionsDefinition.\n partition_mapping (PartitionMapping): The partition mapping object used to calculate\n the downstream dimension partitions from the upstream dimension partitions and vice versa.\n """\n\n def __new__(\n cls,\n dimension_name: str,\n partition_mapping: PartitionMapping,\n ):\n return super(DimensionPartitionMapping, cls).__new__(\n cls,\n dimension_name=check.str_param(dimension_name, "dimension_name"),\n partition_mapping=check.inst_param(\n partition_mapping, "partition_mapping", PartitionMapping\n ),\n )\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass MultiPartitionMapping(\n BaseMultiPartitionMapping,\n PartitionMapping,\n NamedTuple(\n "_MultiPartitionMapping",\n [("downstream_mappings_by_upstream_dimension", Mapping[str, DimensionPartitionMapping])],\n ),\n):\n """Defines a correspondence between two MultiPartitionsDefinitions.\n\n Accepts a mapping of upstream dimension name to downstream DimensionPartitionMapping, representing\n the explicit correspondence between the upstream and downstream MultiPartitions dimensions\n and the partition mapping used to calculate the downstream partitions.\n\n Examples:\n .. code-block:: python\n\n weekly_abc = MultiPartitionsDefinition(\n {\n "abc": StaticPartitionsDefinition(["a", "b", "c"]),\n "weekly": WeeklyPartitionsDefinition("2023-01-01"),\n }\n )\n daily_123 = MultiPartitionsDefinition(\n {\n "123": StaticPartitionsDefinition(["1", "2", "3"]),\n "daily": DailyPartitionsDefinition("2023-01-01"),\n }\n )\n\n MultiPartitionsMapping(\n {\n "abc": DimensionPartitionMapping(\n dimension_name="123",\n partition_mapping=StaticPartitionMapping({"a": "1", "b": "2", "c": "3"}),\n ),\n "weekly": DimensionPartitionMapping(\n dimension_name="daily",\n partition_mapping=TimeWindowPartitionMapping(),\n )\n }\n )\n\n For upstream or downstream dimensions not explicitly defined in the mapping, Dagster will\n assume an `AllPartitionsMapping`, meaning that all upstream partitions in those dimensions\n will be mapped to all downstream partitions in those dimensions.\n\n Examples:\n .. code-block:: python\n\n weekly_abc = MultiPartitionsDefinition(\n {\n "abc": StaticPartitionsDefinition(["a", "b", "c"]),\n "daily": DailyPartitionsDefinition("2023-01-01"),\n }\n )\n daily_123 = MultiPartitionsDefinition(\n {\n "123": StaticPartitionsDefinition(["1", "2", "3"]),\n "daily": DailyPartitionsDefinition("2023-01-01"),\n }\n )\n\n MultiPartitionsMapping(\n {\n "daily": DimensionPartitionMapping(\n dimension_name="daily",\n partition_mapping=IdentityPartitionMapping(),\n )\n }\n )\n\n # Will map `daily_123` partition key {"123": "1", "daily": "2023-01-01"} to the upstream:\n # {"abc": "a", "daily": "2023-01-01"}\n # {"abc": "b", "daily": "2023-01-01"}\n # {"abc": "c", "daily": "2023-01-01"}\n\n Args:\n downstream_mappings_by_upstream_dimension (Mapping[str, DimensionPartitionMapping]): A\n mapping that defines an explicit correspondence between one dimension of the upstream\n MultiPartitionsDefinition and one dimension of the downstream MultiPartitionsDefinition.\n Maps a string representing upstream dimension name to downstream DimensionPartitionMapping,\n containing the downstream dimension name and partition mapping.\n """\n\n def __new__(\n cls, downstream_mappings_by_upstream_dimension: Mapping[str, DimensionPartitionMapping]\n ):\n return super(MultiPartitionMapping, cls).__new__(\n cls,\n downstream_mappings_by_upstream_dimension=check.mapping_param(\n downstream_mappings_by_upstream_dimension,\n "downstream_mappings_by_upstream_dimension",\n key_type=str,\n value_type=DimensionPartitionMapping,\n ),\n )\n\n def get_dimension_dependencies(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> Sequence[DimensionDependency]:\n self._check_all_dimensions_accounted_for(\n upstream_partitions_def,\n downstream_partitions_def,\n )\n\n return [\n DimensionDependency(\n mapping.partition_mapping,\n upstream_dimension_name=upstream_dimension,\n downstream_dimension_name=mapping.dimension_name,\n )\n for upstream_dimension, mapping in self.downstream_mappings_by_upstream_dimension.items()\n ]\n\n def _check_all_dimensions_accounted_for(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> None:\n if any(\n not isinstance(partitions_def, MultiPartitionsDefinition)\n for partitions_def in (upstream_partitions_def, downstream_partitions_def)\n ):\n check.failed(\n "Both partitions defs provided to a MultiPartitionMapping must be multi-partitioned"\n )\n\n upstream_dimension_names = {\n dim.name\n for dim in cast(MultiPartitionsDefinition, upstream_partitions_def).partitions_defs\n }\n dimension_names = {\n dim.name\n for dim in cast(MultiPartitionsDefinition, downstream_partitions_def).partitions_defs\n }\n\n for (\n upstream_dimension_name,\n dimension_mapping,\n ) in self.downstream_mappings_by_upstream_dimension.items():\n if upstream_dimension_name not in upstream_dimension_names:\n check.failed(\n "Dimension mapping has an upstream dimension name that is not in the upstream "\n "partitions def"\n )\n if dimension_mapping.dimension_name not in dimension_names:\n check.failed(\n "Dimension mapping has a downstream dimension name that is not in the"\n " downstream partitions def"\n )\n\n upstream_dimension_names.remove(upstream_dimension_name)\n dimension_names.remove(dimension_mapping.dimension_name)
\n\n\n
[docs]@whitelist_for_serdes\nclass StaticPartitionMapping(\n PartitionMapping,\n NamedTuple(\n "_StaticPartitionMapping",\n [\n (\n "downstream_partition_keys_by_upstream_partition_key",\n PublicAttr[Mapping[str, Union[str, Collection[str]]]],\n )\n ],\n ),\n):\n """Define an explicit correspondence between two StaticPartitionsDefinitions.\n\n Args:\n downstream_partition_keys_by_upstream_partition_key (Dict[str, str | Collection[str]]):\n The single or multi-valued correspondence from upstream keys to downstream keys.\n """\n\n def __init__(\n self,\n downstream_partition_keys_by_upstream_partition_key: Mapping[\n str, Union[str, Collection[str]]\n ],\n ):\n check.mapping_param(\n downstream_partition_keys_by_upstream_partition_key,\n "downstream_partition_keys_by_upstream_partition_key",\n key_type=str,\n value_type=(str, collections.abc.Collection),\n )\n\n # cache forward and reverse mappings\n self._mapping = defaultdict(set)\n for (\n upstream_key,\n downstream_keys,\n ) in downstream_partition_keys_by_upstream_partition_key.items():\n self._mapping[upstream_key] = (\n {downstream_keys} if isinstance(downstream_keys, str) else set(downstream_keys)\n )\n\n self._inverse_mapping = defaultdict(set)\n for upstream_key, downstream_keys in self._mapping.items():\n for downstream_key in downstream_keys:\n self._inverse_mapping[downstream_key].add(upstream_key)\n\n @cached_method\n def _check_upstream(self, *, upstream_partitions_def: PartitionsDefinition):\n """Validate that the mapping from upstream to downstream is only defined on upstream keys."""\n check.inst(\n upstream_partitions_def,\n StaticPartitionsDefinition,\n "StaticPartitionMapping can only be defined between two StaticPartitionsDefinitions",\n )\n upstream_keys = upstream_partitions_def.get_partition_keys()\n extra_keys = set(self._mapping.keys()).difference(upstream_keys)\n if extra_keys:\n raise ValueError(\n f"mapping source partitions not in the upstream partitions definition: {extra_keys}"\n )\n\n @cached_method\n def _check_downstream(self, *, downstream_partitions_def: PartitionsDefinition):\n """Validate that the mapping from upstream to downstream only maps to downstream keys."""\n check.inst(\n downstream_partitions_def,\n StaticPartitionsDefinition,\n "StaticPartitionMapping can only be defined between two StaticPartitionsDefinitions",\n )\n downstream_keys = downstream_partitions_def.get_partition_keys()\n extra_keys = set(self._inverse_mapping.keys()).difference(downstream_keys)\n if extra_keys:\n raise ValueError(\n "mapping target partitions not in the downstream partitions definition:"\n f" {extra_keys}"\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n self._check_downstream(downstream_partitions_def=downstream_partitions_def)\n\n downstream_subset = downstream_partitions_def.empty_subset()\n downstream_keys = set()\n for key in upstream_partitions_subset.get_partition_keys():\n downstream_keys.update(self._mapping[key])\n return downstream_subset.with_partition_keys(downstream_keys)\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n self._check_upstream(upstream_partitions_def=upstream_partitions_def)\n\n upstream_subset = upstream_partitions_def.empty_subset()\n if downstream_partitions_subset is None:\n return UpstreamPartitionsResult(upstream_subset, [])\n\n upstream_keys = set()\n for key in downstream_partitions_subset.get_partition_keys():\n upstream_keys.update(self._inverse_mapping[key])\n\n return UpstreamPartitionsResult(upstream_subset.with_partition_keys(upstream_keys), [])
\n\n\nclass InferSingleToMultiDimensionDepsResult(\n NamedTuple(\n "_InferSingleToMultiDimensionDepsResult",\n [\n ("can_infer", bool),\n ("inference_failure_reason", Optional[str]),\n ("dimension_dependency", Optional[DimensionDependency]),\n ],\n )\n):\n def __new__(\n cls,\n can_infer: bool,\n inference_failure_reason: Optional[str] = None,\n dimension_dependency: Optional[DimensionDependency] = None,\n ):\n if can_infer and dimension_dependency is None:\n check.failed("dimension_dependency must be provided if can_infer is True")\n if not can_infer and inference_failure_reason is None:\n check.failed("inference_failure_reason must be provided if can_infer is False")\n\n return super(InferSingleToMultiDimensionDepsResult, cls).__new__(\n cls,\n can_infer,\n inference_failure_reason,\n dimension_dependency,\n )\n\n\ndef _get_infer_single_to_multi_dimension_deps_result(\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n partition_dimension_name: Optional[str] = None,\n) -> InferSingleToMultiDimensionDepsResult:\n from dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\n\n upstream_is_multipartitioned = isinstance(upstream_partitions_def, MultiPartitionsDefinition)\n\n multipartitions_defs = [\n partitions_def\n for partitions_def in [upstream_partitions_def, downstream_partitions_def]\n if isinstance(partitions_def, MultiPartitionsDefinition)\n ]\n if len(multipartitions_defs) != 1:\n return InferSingleToMultiDimensionDepsResult(\n False,\n "Can only use MultiToSingleDimensionPartitionMapping when upstream asset is"\n " multipartitioned and the downstream asset is single dimensional, or vice versa."\n f" Instead received {len(multipartitions_defs)} multi-partitioned assets.",\n )\n\n multipartitions_def = cast(MultiPartitionsDefinition, next(iter(multipartitions_defs)))\n\n single_dimension_partitions_def = next(\n iter(\n {\n upstream_partitions_def,\n downstream_partitions_def,\n }\n - set(multipartitions_defs)\n )\n )\n\n filtered_multipartition_dims = (\n multipartitions_def.partitions_defs\n if partition_dimension_name is None\n else [\n dim\n for dim in multipartitions_def.partitions_defs\n if dim.name == partition_dimension_name\n ]\n )\n\n if partition_dimension_name:\n if len(filtered_multipartition_dims) != 1:\n return InferSingleToMultiDimensionDepsResult(\n False,\n f"Provided partition dimension name {partition_dimension_name} not found in"\n f" multipartitions definition {multipartitions_def}.",\n )\n\n matching_dimension_defs = [\n dimension_def\n for dimension_def in filtered_multipartition_dims\n if dimension_def.partitions_def == single_dimension_partitions_def\n ]\n\n if len(matching_dimension_defs) == 1:\n return InferSingleToMultiDimensionDepsResult(\n True,\n dimension_dependency=DimensionDependency(\n IdentityPartitionMapping(),\n upstream_dimension_name=(\n matching_dimension_defs[0].name if upstream_is_multipartitioned else None\n ),\n downstream_dimension_name=(\n matching_dimension_defs[0].name if not upstream_is_multipartitioned else None\n ),\n ),\n )\n elif len(matching_dimension_defs) > 1:\n return InferSingleToMultiDimensionDepsResult(\n False,\n "partition dimension name must be specified when multiple dimensions of the"\n " MultiPartitionsDefinition match the single dimension partitions def",\n )\n\n time_dimensions = [\n dimension_def\n for dimension_def in filtered_multipartition_dims\n if isinstance(dimension_def.partitions_def, TimeWindowPartitionsDefinition)\n ]\n\n if len(time_dimensions) == 1 and isinstance(\n single_dimension_partitions_def, TimeWindowPartitionsDefinition\n ):\n return InferSingleToMultiDimensionDepsResult(\n True,\n dimension_dependency=DimensionDependency(\n TimeWindowPartitionMapping(),\n upstream_dimension_name=(\n time_dimensions[0].name if upstream_is_multipartitioned else None\n ),\n downstream_dimension_name=(\n time_dimensions[0].name if not upstream_is_multipartitioned else None\n ),\n ),\n )\n\n return InferSingleToMultiDimensionDepsResult(\n False,\n "MultiToSingleDimensionPartitionMapping can only be used when: \\n(a) The single dimensional"\n " partitions definition is a dimension of the MultiPartitionsDefinition.\\n(b) The single"\n " dimensional partitions definition is a TimeWindowPartitionsDefinition and the"\n " MultiPartitionsDefinition has a single time dimension.",\n )\n\n\ndef infer_partition_mapping(\n partition_mapping: Optional[PartitionMapping],\n downstream_partitions_def: Optional[PartitionsDefinition],\n upstream_partitions_def: Optional[PartitionsDefinition],\n) -> PartitionMapping:\n from .time_window_partition_mapping import TimeWindowPartitionMapping\n\n if partition_mapping is not None:\n return partition_mapping\n elif upstream_partitions_def and downstream_partitions_def:\n if _get_infer_single_to_multi_dimension_deps_result(\n upstream_partitions_def, downstream_partitions_def\n ).can_infer:\n with disable_dagster_warnings():\n return MultiToSingleDimensionPartitionMapping()\n elif isinstance(upstream_partitions_def, TimeWindowPartitionsDefinition) and isinstance(\n downstream_partitions_def, TimeWindowPartitionsDefinition\n ):\n return TimeWindowPartitionMapping()\n else:\n return IdentityPartitionMapping()\n else:\n return AllPartitionMapping()\n\n\ndef get_builtin_partition_mapping_types() -> Tuple[Type[PartitionMapping], ...]:\n from dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\n\n return (\n AllPartitionMapping,\n IdentityPartitionMapping,\n LastPartitionMapping,\n SpecificPartitionsPartitionMapping,\n StaticPartitionMapping,\n TimeWindowPartitionMapping,\n MultiToSingleDimensionPartitionMapping,\n MultiPartitionMapping,\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/partition_mapping", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partition_mapping"}, "partitioned_schedule": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partitioned_schedule

\nfrom typing import Callable, Mapping, NamedTuple, Optional, Union, cast\n\nimport dagster._check as check\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom .decorators.schedule_decorator import schedule\nfrom .job_definition import JobDefinition\nfrom .multi_dimensional_partitions import MultiPartitionsDefinition\nfrom .partition import PartitionsDefinition\nfrom .run_request import RunRequest, SkipReason\nfrom .schedule_definition import (\n    DefaultScheduleStatus,\n    RunRequestIterator,\n    ScheduleDefinition,\n    ScheduleEvaluationContext,\n)\nfrom .time_window_partitions import (\n    TimeWindowPartitionsDefinition,\n    get_time_partitions_def,\n    has_one_dimension_time_window_partitioning,\n)\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\n\nclass UnresolvedPartitionedAssetScheduleDefinition(NamedTuple):\n    """Points to an unresolved asset job. The asset selection isn't resolved yet, so we can't resolve\n    the PartitionsDefinition, so we can't resolve the schedule cadence.\n    """\n\n    name: str\n    job: UnresolvedAssetJobDefinition\n    description: Optional[str]\n    default_status: DefaultScheduleStatus\n    minute_of_hour: Optional[int]\n    hour_of_day: Optional[int]\n    day_of_week: Optional[int]\n    day_of_month: Optional[int]\n    tags: Optional[Mapping[str, str]]\n\n    def resolve(self, resolved_job: JobDefinition) -> ScheduleDefinition:\n        partitions_def = resolved_job.partitions_def\n        if partitions_def is None:\n            check.failed(\n                f"Job '{resolved_job.name}' provided to build_schedule_from_partitioned_job must"\n                " contain partitioned assets or a partitions definition."\n            )\n\n        partitions_def = _check_valid_schedule_partitions_def(partitions_def)\n        time_partitions_def = check.not_none(get_time_partitions_def(partitions_def))\n\n        return ScheduleDefinition(\n            job=resolved_job,\n            name=self.name,\n            execution_fn=_get_schedule_evaluation_fn(partitions_def, resolved_job, self.tags),\n            execution_timezone=time_partitions_def.timezone,\n            cron_schedule=time_partitions_def.get_cron_schedule(\n                self.minute_of_hour, self.hour_of_day, self.day_of_week, self.day_of_month\n            ),\n        )\n\n\n
[docs]def build_schedule_from_partitioned_job(\n job: Union[JobDefinition, UnresolvedAssetJobDefinition],\n description: Optional[str] = None,\n name: Optional[str] = None,\n minute_of_hour: Optional[int] = None,\n hour_of_day: Optional[int] = None,\n day_of_week: Optional[int] = None,\n day_of_month: Optional[int] = None,\n default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,\n tags: Optional[Mapping[str, str]] = None,\n) -> Union[UnresolvedPartitionedAssetScheduleDefinition, ScheduleDefinition]:\n """Creates a schedule from a time window-partitioned job or a job that targets\n time window-partitioned assets. The job can also be multipartitioned, as long as one\n of the partitions dimensions is time-partitioned.\n\n The schedule executes at the cadence specified by the time partitioning of the job or assets.\n\n Examples:\n .. code-block:: python\n\n ######################################\n # Job that targets partitioned assets\n ######################################\n\n from dagster import (\n DailyPartitionsDefinition,\n asset,\n build_schedule_from_partitioned_job,\n define_asset_job,\n )\n\n @asset(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))\n def asset1():\n ...\n\n asset1_job = define_asset_job("asset1_job", selection=[asset1])\n\n # The created schedule will fire daily\n asset1_job_schedule = build_schedule_from_partitioned_job(asset1_job)\n\n defs = Definitions(assets=[asset1], schedules=[asset1_job_schedule])\n\n ################\n # Non-asset job\n ################\n\n from dagster import DailyPartitionsDefinition, build_schedule_from_partitioned_job, jog\n\n\n @job(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))\n def do_stuff_partitioned():\n ...\n\n # The created schedule will fire daily\n do_stuff_partitioned_schedule = build_schedule_from_partitioned_job(\n do_stuff_partitioned,\n )\n\n defs = Definitions(schedules=[do_stuff_partitioned_schedule])\n """\n check.invariant(\n not (day_of_week and day_of_month),\n "Cannot provide both day_of_month and day_of_week parameter to"\n " build_schedule_from_partitioned_job.",\n )\n\n if isinstance(job, UnresolvedAssetJobDefinition) and job.partitions_def is None:\n return UnresolvedPartitionedAssetScheduleDefinition(\n job=job,\n default_status=default_status,\n name=check.opt_str_param(name, "name", f"{job.name}_schedule"),\n description=check.opt_str_param(description, "description"),\n minute_of_hour=minute_of_hour,\n hour_of_day=hour_of_day,\n day_of_week=day_of_week,\n day_of_month=day_of_month,\n tags=tags,\n )\n else:\n partitions_def = job.partitions_def\n if partitions_def is None:\n check.failed("The provided job is not partitioned")\n\n partitions_def = _check_valid_schedule_partitions_def(partitions_def)\n time_partitions_def = check.not_none(get_time_partitions_def(partitions_def))\n\n return schedule(\n cron_schedule=time_partitions_def.get_cron_schedule(\n minute_of_hour, hour_of_day, day_of_week, day_of_month\n ),\n job=job,\n default_status=default_status,\n execution_timezone=time_partitions_def.timezone,\n name=check.opt_str_param(name, "name", f"{job.name}_schedule"),\n description=check.opt_str_param(description, "description"),\n )(_get_schedule_evaluation_fn(partitions_def, job, tags))
\n\n\ndef _get_schedule_evaluation_fn(\n partitions_def: PartitionsDefinition,\n job: Union[JobDefinition, UnresolvedAssetJobDefinition],\n tags: Optional[Mapping[str, str]] = None,\n) -> Callable[[ScheduleEvaluationContext], Union[SkipReason, RunRequest, RunRequestIterator]]:\n def schedule_fn(context):\n # Run for the latest partition. Prior partitions will have been handled by prior ticks.\n if isinstance(partitions_def, TimeWindowPartitionsDefinition):\n partition_key = partitions_def.get_last_partition_key(context.scheduled_execution_time)\n if partition_key is None:\n return SkipReason("The job's PartitionsDefinition has no partitions")\n\n return job.run_request_for_partition(\n partition_key=partition_key,\n run_key=partition_key,\n tags=tags,\n current_time=context.scheduled_execution_time,\n )\n else:\n check.invariant(isinstance(partitions_def, MultiPartitionsDefinition))\n time_window_dimension = partitions_def.time_window_dimension\n partition_key = time_window_dimension.partitions_def.get_last_partition_key(\n context.scheduled_execution_time\n )\n if partition_key is None:\n return SkipReason("The job's PartitionsDefinition has no partitions")\n\n return [\n job.run_request_for_partition(\n partition_key=key,\n run_key=key,\n tags=tags,\n current_time=context.scheduled_execution_time,\n dynamic_partitions_store=context.instance if context.instance_ref else None,\n )\n for key in partitions_def.get_multipartition_keys_with_dimension_value(\n time_window_dimension.name,\n partition_key,\n dynamic_partitions_store=context.instance if context.instance_ref else None,\n )\n ]\n\n return schedule_fn\n\n\ndef _check_valid_schedule_partitions_def(\n partitions_def: PartitionsDefinition,\n) -> Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition]:\n if not has_one_dimension_time_window_partitioning(partitions_def):\n raise DagsterInvalidDefinitionError(\n "Tried to build a partitioned schedule from an asset job, but received an invalid"\n " partitions definition. The permitted partitions definitions are: \\n1."\n " TimeWindowPartitionsDefinition\\n2. MultiPartitionsDefinition with a single"\n " TimeWindowPartitionsDefinition dimension"\n )\n\n return cast(Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition], partitions_def)\n\n\nschedule_from_partitions = build_schedule_from_partitioned_job\n
", "current_page_name": "_modules/dagster/_core/definitions/partitioned_schedule", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partitioned_schedule"}, "policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.policy

\nfrom enum import Enum\nfrom random import random\nfrom typing import NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\n\n
[docs]class Backoff(Enum):\n """A modifier for delay as a function of attempt number.\n\n LINEAR: `attempt_num * delay`\n EXPONENTIAL: `((2 ^ attempt_num) - 1) * delay`\n """\n\n LINEAR = "LINEAR"\n EXPONENTIAL = "EXPONENTIAL"
\n\n\n
[docs]class Jitter(Enum):\n """A randomizing modifier for delay, applied after backoff calculation.\n\n FULL: between 0 and the calculated delay based on backoff: `random() * backoff_delay`\n PLUS_MINUS: +/- the delay: `backoff_delay + ((2 * (random() * delay)) - delay)`\n """\n\n FULL = "FULL"\n PLUS_MINUS = "PLUS_MINUS"
\n\n\n
[docs]class RetryPolicy(\n NamedTuple(\n "_RetryPolicy",\n [\n ("max_retries", PublicAttr[int]),\n ("delay", PublicAttr[Optional[check.Numeric]]),\n # declarative time modulation to allow calc witout running user function\n ("backoff", PublicAttr[Optional[Backoff]]),\n ("jitter", PublicAttr[Optional[Jitter]]),\n ],\n ),\n):\n """A declarative policy for when to request retries when an exception occurs during op execution.\n\n Args:\n max_retries (int):\n The maximum number of retries to attempt. Defaults to 1.\n delay (Optional[Union[int,float]]):\n The time in seconds to wait between the retry being requested and the next attempt\n being started. This unit of time can be modulated as a function of attempt number\n with backoff and randomly with jitter.\n backoff (Optional[Backoff]):\n A modifier for delay as a function of retry attempt number.\n jitter (Optional[Jitter]):\n A randomizing modifier for delay, applied after backoff calculation.\n """\n\n def __new__(\n cls,\n max_retries: int = 1,\n delay: Optional[check.Numeric] = None,\n backoff: Optional[Backoff] = None,\n jitter: Optional[Jitter] = None,\n ):\n if backoff is not None and delay is None:\n raise DagsterInvalidDefinitionError(\n "Can not set jitter on RetryPolicy without also setting delay"\n )\n\n if jitter is not None and delay is None:\n raise DagsterInvalidDefinitionError(\n "Can not set backoff on RetryPolicy without also setting delay"\n )\n\n return super().__new__(\n cls,\n max_retries=check.int_param(max_retries, "max_retries"),\n delay=check.opt_numeric_param(delay, "delay"),\n backoff=check.opt_inst_param(backoff, "backoff", Backoff),\n jitter=check.opt_inst_param(jitter, "jitter", Jitter),\n )\n\n def calculate_delay(self, attempt_num: int) -> check.Numeric:\n return calculate_delay(\n attempt_num=attempt_num,\n backoff=self.backoff,\n jitter=self.jitter,\n base_delay=self.delay or 0,\n )
\n\n\ndef calculate_delay(\n attempt_num: int, backoff: Optional[Backoff], jitter: Optional[Jitter], base_delay: float\n) -> float:\n if backoff is Backoff.EXPONENTIAL:\n calc_delay = ((2**attempt_num) - 1) * base_delay\n elif backoff is Backoff.LINEAR:\n calc_delay = base_delay * attempt_num\n elif backoff is None:\n calc_delay = base_delay\n else:\n check.assert_never(backoff)\n\n if jitter is Jitter.FULL:\n calc_delay = random() * calc_delay\n elif jitter is Jitter.PLUS_MINUS:\n calc_delay = calc_delay + ((2 * (random() * base_delay)) - base_delay)\n elif jitter is None:\n pass\n else:\n check.assert_never(jitter)\n\n return calc_delay\n
", "current_page_name": "_modules/dagster/_core/definitions/policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.policy"}, "reconstruct": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.reconstruct

\nimport inspect\nimport json\nimport os\nimport sys\nfrom functools import lru_cache\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n    overload,\n)\n\nfrom typing_extensions import Self, TypeAlias\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import experimental\nfrom dagster._core.code_pointer import (\n    CodePointer,\n    CustomPointer,\n    FileCodePointer,\n    ModuleCodePointer,\n    get_python_file_from_target,\n)\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.origin import (\n    DEFAULT_DAGSTER_ENTRY_POINT,\n    JobPythonOrigin,\n    RepositoryPythonOrigin,\n)\nfrom dagster._serdes import pack_value, unpack_value, whitelist_for_serdes\nfrom dagster._serdes.serdes import NamedTupleSerializer\nfrom dagster._utils import hash_collection\n\nfrom .events import AssetKey\nfrom .job_base import IJob\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.assets import AssetsDefinition\n    from dagster._core.definitions.job_definition import JobDefinition\n    from dagster._core.definitions.repository_definition import (\n        PendingRepositoryDefinition,\n        RepositoryLoadData,\n    )\n    from dagster._core.definitions.source_asset import SourceAsset\n\n    from .graph_definition import GraphDefinition\n    from .repository_definition import RepositoryDefinition\n\n\ndef get_ephemeral_repository_name(job_name: str) -> str:\n    check.str_param(job_name, "job_name")\n    return f"__repository__{job_name}"\n\n\n@whitelist_for_serdes\nclass ReconstructableRepository(\n    NamedTuple(\n        "_ReconstructableRepository",\n        [\n            ("pointer", CodePointer),\n            ("container_image", Optional[str]),\n            ("executable_path", Optional[str]),\n            ("entry_point", Sequence[str]),\n            ("container_context", Optional[Mapping[str, Any]]),\n            ("repository_load_data", Optional["RepositoryLoadData"]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        pointer: CodePointer,\n        container_image: Optional[str] = None,\n        executable_path: Optional[str] = None,\n        entry_point: Optional[Sequence[str]] = None,\n        container_context: Optional[Mapping[str, Any]] = None,\n        repository_load_data: Optional["RepositoryLoadData"] = None,\n    ):\n        from dagster._core.definitions.repository_definition import RepositoryLoadData\n\n        return super(ReconstructableRepository, cls).__new__(\n            cls,\n            pointer=check.inst_param(pointer, "pointer", CodePointer),\n            container_image=check.opt_str_param(container_image, "container_image"),\n            executable_path=check.opt_str_param(executable_path, "executable_path"),\n            entry_point=(\n                check.sequence_param(entry_point, "entry_point", of_type=str)\n                if entry_point is not None\n                else DEFAULT_DAGSTER_ENTRY_POINT\n            ),\n            container_context=(\n                check.mapping_param(container_context, "container_context")\n                if container_context is not None\n                else None\n            ),\n            repository_load_data=check.opt_inst_param(\n                repository_load_data, "repository_load_data", RepositoryLoadData\n            ),\n        )\n\n    def with_repository_load_data(\n        self, metadata: Optional["RepositoryLoadData"]\n    ) -> "ReconstructableRepository":\n        return self._replace(repository_load_data=metadata)\n\n    def get_definition(self) -> "RepositoryDefinition":\n        return repository_def_from_pointer(self.pointer, self.repository_load_data)\n\n    def get_reconstructable_job(self, name: str) -> "ReconstructableJob":\n        return ReconstructableJob(self, name)\n\n    @classmethod\n    def for_file(\n        cls,\n        file: str,\n        fn_name: str,\n        working_directory: Optional[str] = None,\n        container_image: Optional[str] = None,\n        container_context: Optional[Mapping[str, Any]] = None,\n    ) -> "ReconstructableRepository":\n        if not working_directory:\n            working_directory = os.getcwd()\n        return cls(\n            FileCodePointer(file, fn_name, working_directory),\n            container_image=container_image,\n            container_context=container_context,\n        )\n\n    @classmethod\n    def for_module(\n        cls,\n        module: str,\n        fn_name: str,\n        working_directory: Optional[str] = None,\n        container_image: Optional[str] = None,\n        container_context: Optional[Mapping[str, Any]] = None,\n    ) -> "ReconstructableRepository":\n        return cls(\n            ModuleCodePointer(module, fn_name, working_directory),\n            container_image=container_image,\n            container_context=container_context,\n        )\n\n    def get_python_origin(self) -> RepositoryPythonOrigin:\n        return RepositoryPythonOrigin(\n            executable_path=self.executable_path if self.executable_path else sys.executable,\n            code_pointer=self.pointer,\n            container_image=self.container_image,\n            entry_point=self.entry_point,\n            container_context=self.container_context,\n        )\n\n    def get_python_origin_id(self) -> str:\n        return self.get_python_origin().get_id()\n\n    # Allow this to be hashed for use in `lru_cache`. This is needed because:\n    # - `ReconstructableJob` uses `lru_cache`\n    # - `ReconstructableJob` has a `ReconstructableRepository` attribute\n    # - `ReconstructableRepository` has `Sequence` attributes that are unhashable by default\n    def __hash__(self) -> int:\n        if not hasattr(self, "_hash"):\n            self._hash = hash_collection(self)\n        return self._hash\n\n\nclass ReconstructableJobSerializer(NamedTupleSerializer):\n    def before_unpack(self, _, unpacked_dict: Dict[str, Any]) -> Dict[str, Any]:\n        solid_selection_str = unpacked_dict.get("solid_selection_str")\n        solids_to_execute = unpacked_dict.get("solids_to_execute")\n        if solid_selection_str:\n            unpacked_dict["op_selection"] = json.loads(solid_selection_str)\n        elif solids_to_execute:\n            unpacked_dict["op_selection"] = solids_to_execute\n        return unpacked_dict\n\n    def after_pack(self, **packed_dict: Any) -> Dict[str, Any]:\n        if packed_dict["op_selection"]:\n            packed_dict["solid_selection_str"] = json.dumps(packed_dict["op_selection"]["__set__"])\n        else:\n            packed_dict["solid_selection_str"] = None\n        del packed_dict["op_selection"]\n        return packed_dict\n\n\n@whitelist_for_serdes(\n    serializer=ReconstructableJobSerializer,\n    storage_name="ReconstructablePipeline",\n    storage_field_names={\n        "job_name": "pipeline_name",\n    },\n)\nclass ReconstructableJob(\n    NamedTuple(\n        "_ReconstructableJob",\n        [\n            ("repository", ReconstructableRepository),\n            ("job_name", str),\n            ("op_selection", Optional[AbstractSet[str]]),\n            ("asset_selection", Optional[AbstractSet[AssetKey]]),\n            ("asset_check_selection", Optional[AbstractSet[AssetCheckKey]]),\n        ],\n    ),\n    IJob,\n):\n    """Defines a reconstructable job. When your job must cross process boundaries, Dagster must know\n    how to reconstruct the job on the other side of the process boundary.\n\n    Args:\n        repository (ReconstructableRepository): The reconstructable representation of the repository\n            the job belongs to.\n        job_name (str): The name of the job.\n        op_selection (Optional[AbstractSet[str]]): A set of op query strings. Ops matching any of\n            these queries will be selected. None if no selection is specified.\n        asset_selection (Optional[AbstractSet[AssetKey]]) A set of assets to execute. None if no selection\n            is specified, i.e. the entire job will be run.\n    """\n\n    def __new__(\n        cls,\n        repository: ReconstructableRepository,\n        job_name: str,\n        op_selection: Optional[Iterable[str]] = None,\n        asset_selection: Optional[AbstractSet[AssetKey]] = None,\n        asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n    ):\n        op_selection = set(op_selection) if op_selection else None\n        return super(ReconstructableJob, cls).__new__(\n            cls,\n            repository=check.inst_param(repository, "repository", ReconstructableRepository),\n            job_name=check.str_param(job_name, "job_name"),\n            op_selection=check.opt_nullable_set_param(op_selection, "op_selection", of_type=str),\n            asset_selection=check.opt_nullable_set_param(\n                asset_selection, "asset_selection", AssetKey\n            ),\n            asset_check_selection=check.opt_nullable_set_param(\n                asset_check_selection, "asset_check_selection", AssetCheckKey\n            ),\n        )\n\n    def with_repository_load_data(\n        self, metadata: Optional["RepositoryLoadData"]\n    ) -> "ReconstructableJob":\n        return self._replace(repository=self.repository.with_repository_load_data(metadata))\n\n    # Keep the most recent 1 definition (globally since this is a NamedTuple method)\n    # This allows repeated calls to get_definition in execution paths to not reload the job\n    @lru_cache(maxsize=1)\n    def get_definition(self) -> "JobDefinition":\n        return self.repository.get_definition().get_maybe_subset_job_def(\n            self.job_name,\n            self.op_selection,\n            self.asset_selection,\n            self.asset_check_selection,\n        )\n\n    def get_reconstructable_repository(self) -> ReconstructableRepository:\n        return self.repository\n\n    def get_subset(\n        self,\n        *,\n        op_selection: Optional[Iterable[str]] = None,\n        asset_selection: Optional[AbstractSet[AssetKey]] = None,\n        asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n    ) -> Self:\n        if op_selection and (asset_selection or asset_check_selection):\n            check.failed(\n                "op_selection and asset_selection or asset_check_selection cannot both be provided"\n                " as arguments",\n            )\n        op_selection = set(op_selection) if op_selection else None\n        return ReconstructableJob(\n            repository=self.repository,\n            job_name=self.job_name,\n            op_selection=op_selection,\n            asset_selection=asset_selection,\n            asset_check_selection=asset_check_selection,\n        )\n\n    def describe(self) -> str:\n        return f'"{self.job_name}" in repository ({self.repository.pointer.describe})'\n\n    @staticmethod\n    def for_file(python_file: str, fn_name: str) -> "ReconstructableJob":\n        return bootstrap_standalone_recon_job(FileCodePointer(python_file, fn_name, os.getcwd()))\n\n    @staticmethod\n    def for_module(module: str, fn_name: str) -> "ReconstructableJob":\n        return bootstrap_standalone_recon_job(ModuleCodePointer(module, fn_name, os.getcwd()))\n\n    def to_dict(self) -> Mapping[str, object]:\n        return pack_value(self)\n\n    @staticmethod\n    def from_dict(val: Mapping[str, Any]) -> "ReconstructableJob":\n        check.mapping_param(val, "val")\n\n        inst = unpack_value(val)\n        check.invariant(\n            isinstance(inst, ReconstructableJob),\n            f"Deserialized object is not instance of ReconstructableJob, got {type(inst)}",\n        )\n        return inst  # type: ignore  # (illegible runtime check)\n\n    def get_python_origin(self) -> JobPythonOrigin:\n        return JobPythonOrigin(self.job_name, self.repository.get_python_origin())\n\n    def get_python_origin_id(self) -> str:\n        return self.get_python_origin().get_id()\n\n    def get_module(self) -> Optional[str]:\n        """Return the module the job is found in, the origin is a module code pointer."""\n        pointer = self.get_python_origin().get_repo_pointer()\n        if isinstance(pointer, ModuleCodePointer):\n            return pointer.module\n\n        return None\n\n    # Allow this to be hashed for `lru_cache` in `get_definition`\n    def __hash__(self) -> int:\n        if not hasattr(self, "_hash"):\n            self._hash = hash_collection(self)\n        return self._hash\n\n\n
[docs]def reconstructable(target: Callable[..., "JobDefinition"]) -> ReconstructableJob:\n """Create a :py:class:`~dagster._core.definitions.reconstructable.ReconstructableJob` from a\n function that returns a :py:class:`~dagster.JobDefinition`/:py:class:`~dagster.JobDefinition`,\n or a function decorated with :py:func:`@job <dagster.job>`.\n\n When your job must cross process boundaries, e.g., for execution on multiple nodes or\n in different systems (like ``dagstermill``), Dagster must know how to reconstruct the job\n on the other side of the process boundary.\n\n Passing a job created with ``~dagster.GraphDefinition.to_job`` to ``reconstructable()``,\n requires you to wrap that job's definition in a module-scoped function, and pass that function\n instead:\n\n .. code-block:: python\n\n from dagster import graph, reconstructable\n\n @graph\n def my_graph():\n ...\n\n def define_my_job():\n return my_graph.to_job()\n\n reconstructable(define_my_job)\n\n This function implements a very conservative strategy for reconstruction, so that its behavior\n is easy to predict, but as a consequence it is not able to reconstruct certain kinds of jobs\n or jobs, such as those defined by lambdas, in nested scopes (e.g., dynamically within a method\n call), or in interactive environments such as the Python REPL or Jupyter notebooks.\n\n If you need to reconstruct objects constructed in these ways, you should use\n :py:func:`~dagster.reconstructable.build_reconstructable_job` instead, which allows you to\n specify your own reconstruction strategy.\n\n Examples:\n .. code-block:: python\n\n from dagster import job, reconstructable\n\n @job\n def foo_job():\n ...\n\n reconstructable_foo_job = reconstructable(foo_job)\n\n\n @graph\n def foo():\n ...\n\n def make_bar_job():\n return foo.to_job()\n\n reconstructable_bar_job = reconstructable(make_bar_job)\n """\n from dagster._core.definitions import JobDefinition\n\n if not seven.is_function_or_decorator_instance_of(target, JobDefinition):\n if isinstance(target, JobDefinition):\n raise DagsterInvariantViolationError(\n "Reconstructable target was not a function returning a job definition, or a job "\n "definition produced by a decorated function. If your job was constructed using "\n "``GraphDefinition.to_job``, you must wrap the ``to_job`` call in a function at "\n "module scope, ie not within any other functions. "\n "To learn more, check out the docs on ``reconstructable``: "\n "https://docs.dagster.io/_apidocs/execution#dagster.reconstructable"\n )\n raise DagsterInvariantViolationError(\n "Reconstructable target should be a function or definition produced "\n f"by a decorated function, got {type(target)}.",\n )\n\n if seven.is_lambda(target):\n raise DagsterInvariantViolationError(\n "Reconstructable target can not be a lambda. Use a function or "\n "decorated function defined at module scope instead, or use "\n "build_reconstructable_job."\n )\n\n if seven.qualname_differs(target):\n raise DagsterInvariantViolationError(\n f'Reconstructable target "{target.__name__}" has a different '\n f'__qualname__ "{target.__qualname__}" indicating it is not '\n "defined at module scope. Use a function or decorated function "\n "defined at module scope instead, or use build_reconstructable_job."\n )\n\n try:\n if (\n hasattr(target, "__module__")\n and hasattr(target, "__name__")\n and getattr(inspect.getmodule(target), "__name__", None) != "__main__"\n ):\n return ReconstructableJob.for_module(target.__module__, target.__name__)\n except:\n pass\n\n python_file = get_python_file_from_target(target)\n if not python_file:\n raise DagsterInvariantViolationError(\n "reconstructable() can not reconstruct jobs defined in interactive "\n "environments like <stdin>, IPython, or Jupyter notebooks. "\n "Use a job defined in a module or file instead, or use build_reconstructable_job."\n )\n\n pointer = FileCodePointer(\n python_file=python_file, fn_name=target.__name__, working_directory=os.getcwd()\n )\n\n return bootstrap_standalone_recon_job(pointer)
\n\n\n
[docs]@experimental\ndef build_reconstructable_job(\n reconstructor_module_name: str,\n reconstructor_function_name: str,\n reconstructable_args: Optional[Tuple[object]] = None,\n reconstructable_kwargs: Optional[Mapping[str, object]] = None,\n reconstructor_working_directory: Optional[str] = None,\n) -> ReconstructableJob:\n """Create a :py:class:`dagster._core.definitions.reconstructable.ReconstructableJob`.\n\n When your job must cross process boundaries, e.g., for execution on multiple nodes or in\n different systems (like ``dagstermill``), Dagster must know how to reconstruct the job\n on the other side of the process boundary.\n\n This function allows you to use the strategy of your choice for reconstructing jobs, so\n that you can reconstruct certain kinds of jobs that are not supported by\n :py:func:`~dagster.reconstructable`, such as those defined by lambdas, in nested scopes (e.g.,\n dynamically within a method call), or in interactive environments such as the Python REPL or\n Jupyter notebooks.\n\n If you need to reconstruct jobs constructed in these ways, use this function instead of\n :py:func:`~dagster.reconstructable`.\n\n Args:\n reconstructor_module_name (str): The name of the module containing the function to use to\n reconstruct the job.\n reconstructor_function_name (str): The name of the function to use to reconstruct the\n job.\n reconstructable_args (Tuple): Args to the function to use to reconstruct the job.\n Values of the tuple must be JSON serializable.\n reconstructable_kwargs (Dict[str, Any]): Kwargs to the function to use to reconstruct the\n job. Values of the dict must be JSON serializable.\n\n Examples:\n .. code-block:: python\n\n # module: mymodule\n\n from dagster import JobDefinition, job, build_reconstructable_job\n\n class JobFactory:\n def make_job(*args, **kwargs):\n\n @job\n def _job(...):\n ...\n\n return _job\n\n def reconstruct_job(*args):\n factory = JobFactory()\n return factory.make_job(*args)\n\n factory = JobFactory()\n\n foo_job_args = (...,...)\n\n foo_job_kwargs = {...:...}\n\n foo_job = factory.make_job(*foo_job_args, **foo_job_kwargs)\n\n reconstructable_foo_job = build_reconstructable_job(\n 'mymodule',\n 'reconstruct_job',\n foo_job_args,\n foo_job_kwargs,\n )\n """\n check.str_param(reconstructor_module_name, "reconstructor_module_name")\n check.str_param(reconstructor_function_name, "reconstructor_function_name")\n check.opt_str_param(\n reconstructor_working_directory, "reconstructor_working_directory", os.getcwd()\n )\n\n _reconstructable_args: List[object] = list(\n check.opt_tuple_param(reconstructable_args, "reconstructable_args")\n )\n _reconstructable_kwargs: List[List[Union[str, object]]] = list(\n (\n [key, value]\n for key, value in check.opt_mapping_param(\n reconstructable_kwargs, "reconstructable_kwargs", key_type=str\n ).items()\n )\n )\n\n reconstructor_pointer = ModuleCodePointer(\n reconstructor_module_name,\n reconstructor_function_name,\n working_directory=reconstructor_working_directory,\n )\n\n pointer = CustomPointer(reconstructor_pointer, _reconstructable_args, _reconstructable_kwargs)\n\n job_def = job_def_from_pointer(pointer)\n\n return ReconstructableJob(\n repository=ReconstructableRepository(pointer), # creates ephemeral repo\n job_name=job_def.name,\n )
\n\n\ndef bootstrap_standalone_recon_job(pointer: CodePointer) -> ReconstructableJob:\n # So this actually straps the the job for the sole\n # purpose of getting the job name. If we changed ReconstructableJob\n # to get the job on demand in order to get name, we could avoid this.\n job_def = job_def_from_pointer(pointer)\n return ReconstructableJob(\n repository=ReconstructableRepository(pointer), # creates ephemeral repo\n job_name=job_def.name,\n )\n\n\nLoadableDefinition: TypeAlias = Union[\n "JobDefinition",\n "RepositoryDefinition",\n "PendingRepositoryDefinition",\n "GraphDefinition",\n "Sequence[Union[AssetsDefinition, SourceAsset]]",\n]\n\nT_LoadableDefinition = TypeVar("T_LoadableDefinition", bound=LoadableDefinition)\n\n\ndef _is_list_of_assets(\n definition: LoadableDefinition,\n) -> bool:\n from dagster._core.definitions.assets import AssetsDefinition\n from dagster._core.definitions.source_asset import SourceAsset\n\n return isinstance(definition, list) and all(\n isinstance(item, (AssetsDefinition, SourceAsset)) for item in definition\n )\n\n\ndef _check_is_loadable(definition: T_LoadableDefinition) -> T_LoadableDefinition:\n from .definitions_class import Definitions\n from .graph_definition import GraphDefinition\n from .job_definition import JobDefinition\n from .repository_definition import PendingRepositoryDefinition, RepositoryDefinition\n\n if not (\n isinstance(\n definition,\n (\n JobDefinition,\n RepositoryDefinition,\n PendingRepositoryDefinition,\n GraphDefinition,\n Definitions,\n ),\n )\n or _is_list_of_assets(definition)\n ):\n raise DagsterInvariantViolationError(\n "Loadable attributes must be either a JobDefinition, GraphDefinition, "\n f"or RepositoryDefinition. Got {definition!r}."\n )\n return definition\n\n\ndef load_def_in_module(\n module_name: str, attribute: str, working_directory: Optional[str]\n) -> LoadableDefinition:\n return def_from_pointer(CodePointer.from_module(module_name, attribute, working_directory))\n\n\ndef load_def_in_package(\n package_name: str, attribute: str, working_directory: Optional[str]\n) -> LoadableDefinition:\n return def_from_pointer(\n CodePointer.from_python_package(package_name, attribute, working_directory)\n )\n\n\ndef load_def_in_python_file(\n python_file: str, attribute: str, working_directory: Optional[str]\n) -> LoadableDefinition:\n return def_from_pointer(CodePointer.from_python_file(python_file, attribute, working_directory))\n\n\ndef def_from_pointer(\n pointer: CodePointer,\n) -> LoadableDefinition:\n target = pointer.load_target()\n\n from .graph_definition import GraphDefinition\n from .job_definition import JobDefinition\n from .repository_definition import PendingRepositoryDefinition, RepositoryDefinition\n\n if isinstance(\n target,\n (\n GraphDefinition,\n JobDefinition,\n PendingRepositoryDefinition,\n RepositoryDefinition,\n ),\n ) or not callable(target):\n return _check_is_loadable(target) # type: ignore\n\n # if its a function invoke it - otherwise we are pointing to a\n # artifact in module scope, likely decorator output\n\n if seven.get_arg_names(target):\n raise DagsterInvariantViolationError(\n f"Error invoking function at {pointer.describe()} with no arguments. "\n "Reconstructable target must be callable with no arguments"\n )\n\n return _check_is_loadable(target())\n\n\ndef job_def_from_pointer(pointer: CodePointer) -> "JobDefinition":\n from .job_definition import JobDefinition\n\n target = def_from_pointer(pointer)\n\n if isinstance(target, JobDefinition):\n return target\n\n raise DagsterInvariantViolationError(\n "CodePointer ({str}) must resolve to a JobDefinition (or JobDefinition for legacy"\n " code). Received a {type}".format(str=pointer.describe(), type=type(target))\n )\n\n\n@overload\ndef repository_def_from_target_def(\n target: Union["RepositoryDefinition", "JobDefinition", "GraphDefinition"],\n repository_load_data: Optional["RepositoryLoadData"] = None,\n) -> "RepositoryDefinition": ...\n\n\n@overload\ndef repository_def_from_target_def(\n target: object, repository_load_data: Optional["RepositoryLoadData"] = None\n) -> None: ...\n\n\ndef repository_def_from_target_def(\n target: object, repository_load_data: Optional["RepositoryLoadData"] = None\n) -> Optional["RepositoryDefinition"]:\n from .assets import AssetsDefinition\n from .definitions_class import Definitions\n from .graph_definition import GraphDefinition\n from .job_definition import JobDefinition\n from .repository_definition import (\n SINGLETON_REPOSITORY_NAME,\n CachingRepositoryData,\n PendingRepositoryDefinition,\n RepositoryDefinition,\n )\n from .source_asset import SourceAsset\n\n if isinstance(target, Definitions):\n # reassign to handle both repository and pending repo case\n target = target.get_inner_repository_for_loading_process()\n\n # special case - we can wrap a single job in a repository\n if isinstance(target, (JobDefinition, GraphDefinition)):\n # consider including job name in generated repo name\n return RepositoryDefinition(\n name=get_ephemeral_repository_name(target.name),\n repository_data=CachingRepositoryData.from_list([target]),\n )\n elif isinstance(target, list) and all(\n isinstance(item, (AssetsDefinition, SourceAsset)) for item in target\n ):\n return RepositoryDefinition(\n name=SINGLETON_REPOSITORY_NAME,\n repository_data=CachingRepositoryData.from_list(target),\n )\n elif isinstance(target, RepositoryDefinition):\n return target\n elif isinstance(target, PendingRepositoryDefinition):\n # must load repository from scratch\n if repository_load_data is None:\n return target.compute_repository_definition()\n # can use the cached data to more efficiently load data\n return target.reconstruct_repository_definition(repository_load_data)\n else:\n return None\n\n\ndef repository_def_from_pointer(\n pointer: CodePointer, repository_load_data: Optional["RepositoryLoadData"] = None\n) -> "RepositoryDefinition":\n target = def_from_pointer(pointer)\n repo_def = repository_def_from_target_def(target, repository_load_data)\n if not repo_def:\n raise DagsterInvariantViolationError(\n f"CodePointer ({pointer.describe()}) must resolve to a "\n "RepositoryDefinition, JobDefinition, or JobDefinition. "\n f"Received a {type(target)}"\n )\n return repo_def\n
", "current_page_name": "_modules/dagster/_core/definitions/reconstruct", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.reconstruct"}, "repository_definition": {"repository_data": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.repository_definition.repository_data

\nfrom abc import ABC, abstractmethod\nfrom types import FunctionType\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Mapping,\n    Optional,\n    Sequence,\n    TypeVar,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.executor_definition import ExecutorDefinition\nfrom dagster._core.definitions.graph_definition import SubselectedGraphDefinition\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.schedule_definition import ScheduleDefinition\nfrom dagster._core.definitions.sensor_definition import SensorDefinition\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\n\nfrom .caching_index import CacheingDefinitionIndex\nfrom .valid_definitions import RepositoryListDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import AssetsDefinition\n\n\nT = TypeVar("T")\nResolvable = Callable[[], T]\n\n\n
[docs]class RepositoryData(ABC):\n """Users should usually rely on the :py:func:`@repository <repository>` decorator to create new\n repositories, which will in turn call the static constructors on this class. However, users may\n subclass :py:class:`RepositoryData` for fine-grained control over access to and lazy creation\n of repository members.\n """\n\n @abstractmethod\n def get_resource_key_mapping(self) -> Mapping[int, str]:\n pass\n\n @abstractmethod\n def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]:\n """Return all top-level resources in the repository as a list,\n such as those provided to the Definitions constructor.\n\n Returns:\n List[ResourceDefinition]: All top-level resources in the repository.\n """\n\n @abstractmethod\n def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]:\n pass\n\n
[docs] @abstractmethod\n @public\n def get_all_jobs(self) -> Sequence[JobDefinition]:\n """Return all jobs in the repository as a list.\n\n Returns:\n List[JobDefinition]: All jobs in the repository.\n """
\n\n
[docs] @public\n def get_job_names(self) -> Sequence[str]:\n """Get the names of all jobs in the repository.\n\n Returns:\n List[str]\n """\n return [job_def.name for job_def in self.get_all_jobs()]
\n\n
[docs] @public\n def has_job(self, job_name: str) -> bool:\n """Check if a job with a given name is present in the repository.\n\n Args:\n job_name (str): The name of the job.\n\n Returns:\n bool\n """\n return job_name in self.get_job_names()
\n\n
[docs] @public\n def get_job(self, job_name: str) -> JobDefinition:\n """Get a job by name.\n\n Args:\n job_name (str): Name of the job to retrieve.\n\n Returns:\n JobDefinition: The job definition corresponding to the given name.\n """\n match = next(job for job in self.get_all_jobs() if job.name == job_name)\n if match is None:\n raise DagsterInvariantViolationError(f"Could not find job {job_name} in repository")\n return match
\n\n
[docs] @public\n def get_schedule_names(self) -> Sequence[str]:\n """Get the names of all schedules in the repository.\n\n Returns:\n List[str]\n """\n return [schedule.name for schedule in self.get_all_schedules()]
\n\n
[docs] @public\n def get_all_schedules(self) -> Sequence[ScheduleDefinition]:\n """Return all schedules in the repository as a list.\n\n Returns:\n List[ScheduleDefinition]: All jobs in the repository.\n """\n return []
\n\n
[docs] @public\n def get_schedule(self, schedule_name: str) -> ScheduleDefinition:\n """Get a schedule by name.\n\n Args:\n schedule_name (str): name of the schedule to retrieve.\n\n Returns:\n ScheduleDefinition: The schedule definition corresponding to the given name.\n """\n schedules_with_name = [\n schedule for schedule in self.get_all_schedules() if schedule.name == schedule_name\n ]\n if not schedules_with_name:\n raise DagsterInvariantViolationError(\n f"Could not find schedule {schedule_name} in repository"\n )\n return schedules_with_name[0]
\n\n
[docs] @public\n def has_schedule(self, schedule_name: str) -> bool:\n """Check if a schedule with a given name is present in the repository."""\n return schedule_name in self.get_schedule_names()
\n\n
[docs] @public\n def get_all_sensors(self) -> Sequence[SensorDefinition]:\n """Sequence[SensorDefinition]: Return all sensors in the repository as a list."""\n return []
\n\n
[docs] @public\n def get_sensor_names(self) -> Sequence[str]:\n """Sequence[str]: Get the names of all sensors in the repository."""\n return [sensor.name for sensor in self.get_all_sensors()]
\n\n
[docs] @public\n def get_sensor(self, sensor_name: str) -> SensorDefinition:\n """Get a sensor by name.\n\n Args:\n sensor_name (str): name of the sensor to retrieve.\n\n Returns:\n SensorDefinition: The sensor definition corresponding to the given name.\n """\n sensors_with_name = [\n sensor for sensor in self.get_all_sensors() if sensor.name == sensor_name\n ]\n if not sensors_with_name:\n raise DagsterInvariantViolationError(\n f"Could not find sensor {sensor_name} in repository"\n )\n return sensors_with_name[0]
\n\n
[docs] @public\n def has_sensor(self, sensor_name: str) -> bool:\n """Check if a sensor with a given name is present in the repository."""\n return sensor_name in self.get_sensor_names()
\n\n
[docs] @public\n def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]:\n """Mapping[AssetKey, SourceAsset]: Get the source assets for the repository."""\n return {}
\n\n
[docs] @public\n def get_assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]:\n """Mapping[AssetKey, AssetsDefinition]: Get the asset definitions for the repository."""\n return {}
\n\n def load_all_definitions(self):\n # force load of all lazy constructed code artifacts\n self.get_all_jobs()\n self.get_all_schedules()\n self.get_all_sensors()\n self.get_source_assets_by_key()
\n\n\nclass CachingRepositoryData(RepositoryData):\n """Default implementation of RepositoryData used by the :py:func:`@repository <repository>` decorator."""\n\n _all_jobs: Optional[Sequence[JobDefinition]]\n _all_pipelines: Optional[Sequence[JobDefinition]]\n\n def __init__(\n self,\n jobs: Mapping[str, Union[JobDefinition, Resolvable[JobDefinition]]],\n schedules: Mapping[str, Union[ScheduleDefinition, Resolvable[ScheduleDefinition]]],\n sensors: Mapping[str, Union[SensorDefinition, Resolvable[SensorDefinition]]],\n source_assets_by_key: Mapping[AssetKey, SourceAsset],\n assets_defs_by_key: Mapping[AssetKey, "AssetsDefinition"],\n top_level_resources: Mapping[str, ResourceDefinition],\n utilized_env_vars: Mapping[str, AbstractSet[str]],\n resource_key_mapping: Mapping[int, str],\n ):\n """Constructs a new CachingRepositoryData object.\n\n You may pass pipeline, job, and schedule definitions directly, or you may pass callables\n with no arguments that will be invoked to lazily construct definitions when accessed by\n name. This can be helpful for performance when there are many definitions in a repository,\n or when constructing the definitions is costly.\n\n Note that when lazily constructing a definition, the name of the definition must match its\n key in its dictionary index, or a :py:class:`DagsterInvariantViolationError` will be thrown\n at retrieval time.\n\n Args:\n jobs (Mapping[str, Union[JobDefinition, Callable[[], JobDefinition]]]):\n The job definitions belonging to the repository.\n schedules (Mapping[str, Union[ScheduleDefinition, Callable[[], ScheduleDefinition]]]):\n The schedules belonging to the repository.\n sensors (Mapping[str, Union[SensorDefinition, Callable[[], SensorDefinition]]]):\n The sensors belonging to a repository.\n source_assets_by_key (Mapping[AssetKey, SourceAsset]): The source assets belonging to a repository.\n assets_defs_by_key (Mapping[AssetKey, AssetsDefinition]): The assets definitions\n belonging to a repository.\n top_level_resources (Mapping[str, ResourceDefinition]): A dict of top-level\n resource keys to defintions, for resources which should be displayed in the UI.\n """\n from dagster._core.definitions import AssetsDefinition\n\n check.mapping_param(jobs, "jobs", key_type=str, value_type=(JobDefinition, FunctionType))\n check.mapping_param(\n schedules, "schedules", key_type=str, value_type=(ScheduleDefinition, FunctionType)\n )\n check.mapping_param(\n sensors, "sensors", key_type=str, value_type=(SensorDefinition, FunctionType)\n )\n check.mapping_param(\n source_assets_by_key, "source_assets_by_key", key_type=AssetKey, value_type=SourceAsset\n )\n check.mapping_param(\n assets_defs_by_key, "assets_defs_by_key", key_type=AssetKey, value_type=AssetsDefinition\n )\n check.mapping_param(\n top_level_resources, "top_level_resources", key_type=str, value_type=ResourceDefinition\n )\n check.mapping_param(\n utilized_env_vars,\n "utilized_resources",\n key_type=str,\n )\n check.mapping_param(\n resource_key_mapping, "resource_key_mapping", key_type=int, value_type=str\n )\n\n self._jobs = CacheingDefinitionIndex(\n JobDefinition,\n "JobDefinition",\n "job",\n jobs,\n self._validate_job,\n )\n\n self._schedules = CacheingDefinitionIndex(\n ScheduleDefinition,\n "ScheduleDefinition",\n "schedule",\n schedules,\n self._validate_schedule,\n )\n # load all schedules to force validation\n self._schedules.get_all_definitions()\n\n self._source_assets_by_key = source_assets_by_key\n self._assets_defs_by_key = assets_defs_by_key\n self._top_level_resources = top_level_resources\n self._utilized_env_vars = utilized_env_vars\n self._resource_key_mapping = resource_key_mapping\n\n self._sensors = CacheingDefinitionIndex(\n SensorDefinition,\n "SensorDefinition",\n "sensor",\n sensors,\n self._validate_sensor,\n )\n # load all sensors to force validation\n self._sensors.get_all_definitions()\n\n self._all_jobs = None\n\n @staticmethod\n def from_dict(repository_definitions: Dict[str, Dict[str, Any]]) -> "CachingRepositoryData":\n """Static constructor.\n\n Args:\n repository_definition (Dict[str, Dict[str, ...]]): A dict of the form:\n\n {\n 'jobs': Dict[str, Callable[[], JobDefinition]],\n 'schedules': Dict[str, Callable[[], ScheduleDefinition]]\n }\n\n This form is intended to allow definitions to be created lazily when accessed by name,\n which can be helpful for performance when there are many definitions in a repository, or\n when constructing the definitions is costly.\n """\n from .repository_data_builder import build_caching_repository_data_from_dict\n\n return build_caching_repository_data_from_dict(repository_definitions)\n\n @classmethod\n def from_list(\n cls,\n repository_definitions: Sequence[RepositoryListDefinition],\n default_executor_def: Optional[ExecutorDefinition] = None,\n default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n resource_key_mapping: Optional[Mapping[int, str]] = None,\n ) -> "CachingRepositoryData":\n """Static constructor.\n\n Args:\n repository_definitions (List[Union[JobDefinition, ScheduleDefinition, SensorDefinition, GraphDefinition]]):\n Use this constructor when you have no need to lazy load jobs or other definitions.\n top_level_resources (Optional[Mapping[str, ResourceDefinition]]): A dict of top-level\n resource keys to defintions, for resources which should be displayed in the UI.\n """\n from .repository_data_builder import build_caching_repository_data_from_list\n\n return build_caching_repository_data_from_list(\n repository_definitions=repository_definitions,\n default_executor_def=default_executor_def,\n default_logger_defs=default_logger_defs,\n top_level_resources=top_level_resources,\n resource_key_mapping=resource_key_mapping,\n )\n\n def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]:\n return self._utilized_env_vars\n\n def get_resource_key_mapping(self) -> Mapping[int, str]:\n return self._resource_key_mapping\n\n def get_job_names(self) -> Sequence[str]:\n """Get the names of all jobs in the repository.\n\n Returns:\n List[str]\n """\n return self._jobs.get_definition_names()\n\n def has_job(self, job_name: str) -> bool:\n """Check if a job with a given name is present in the repository.\n\n Args:\n job_name (str): The name of the job.\n\n Returns:\n bool\n """\n check.str_param(job_name, "job_name")\n return self._jobs.has_definition(job_name)\n\n def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]:\n return self._top_level_resources\n\n def get_all_jobs(self) -> Sequence[JobDefinition]:\n """Return all jobs in the repository as a list.\n\n Note that this will construct any job that has not yet been constructed.\n\n Returns:\n List[JobDefinition]: All jobs in the repository.\n """\n if self._all_jobs is not None:\n return self._all_jobs\n\n self._all_jobs = self._jobs.get_all_definitions()\n self._check_node_defs(self._all_jobs)\n return self._all_jobs\n\n def get_job(self, job_name: str) -> JobDefinition:\n """Get a job by name.\n\n If this job has not yet been constructed, only this job is constructed, and will\n be cached for future calls.\n\n Args:\n job_name (str): Name of the job to retrieve.\n\n Returns:\n JobDefinition: The job definition corresponding to the given name.\n """\n check.str_param(job_name, "job_name")\n return self._jobs.get_definition(job_name)\n\n def get_schedule_names(self) -> Sequence[str]:\n """Get the names of all schedules in the repository.\n\n Returns:\n List[str]\n """\n return self._schedules.get_definition_names()\n\n def get_all_schedules(self) -> Sequence[ScheduleDefinition]:\n """Return all schedules in the repository as a list.\n\n Note that this will construct any schedule that has not yet been constructed.\n\n Returns:\n List[ScheduleDefinition]: All schedules in the repository.\n """\n return self._schedules.get_all_definitions()\n\n def get_schedule(self, schedule_name: str) -> ScheduleDefinition:\n """Get a schedule by name.\n\n if this schedule has not yet been constructed, only this schedule is constructed, and will\n be cached for future calls.\n\n Args:\n schedule_name (str): name of the schedule to retrieve.\n\n Returns:\n ScheduleDefinition: The schedule definition corresponding to the given name.\n """\n check.str_param(schedule_name, "schedule_name")\n\n return self._schedules.get_definition(schedule_name)\n\n def has_schedule(self, schedule_name: str) -> bool:\n check.str_param(schedule_name, "schedule_name")\n\n return self._schedules.has_definition(schedule_name)\n\n def get_all_sensors(self) -> Sequence[SensorDefinition]:\n return self._sensors.get_all_definitions()\n\n def get_sensor_names(self) -> Sequence[str]:\n return self._sensors.get_definition_names()\n\n def get_sensor(self, sensor_name: str) -> SensorDefinition:\n return self._sensors.get_definition(sensor_name)\n\n def has_sensor(self, sensor_name: str) -> bool:\n return self._sensors.has_definition(sensor_name)\n\n def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]:\n return self._source_assets_by_key\n\n def get_assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]:\n return self._assets_defs_by_key\n\n def _check_node_defs(self, job_defs: Sequence[JobDefinition]) -> None:\n node_defs = {}\n node_to_job = {}\n for job_def in job_defs:\n for node_def in [*job_def.all_node_defs, job_def.graph]:\n # skip checks for subselected graphs because they don't have their own names\n if isinstance(node_def, SubselectedGraphDefinition):\n break\n\n if node_def.name not in node_defs:\n node_defs[node_def.name] = node_def\n node_to_job[node_def.name] = job_def.name\n\n if node_defs[node_def.name] is not node_def:\n first_name, second_name = sorted([node_to_job[node_def.name], job_def.name])\n raise DagsterInvalidDefinitionError(\n f"Conflicting definitions found in repository with name '{node_def.name}'."\n " Op/Graph definition names must be unique within a repository."\n f" {node_def.__class__.__name__} is defined in"\n f" job '{first_name}' and in"\n f" job '{second_name}'."\n )\n\n def _validate_job(self, job: JobDefinition) -> JobDefinition:\n return job\n\n def _validate_schedule(self, schedule: ScheduleDefinition) -> ScheduleDefinition:\n job_names = self.get_job_names()\n\n if schedule.job_name not in job_names:\n raise DagsterInvalidDefinitionError(\n f'ScheduleDefinition "{schedule.name}" targets job "{schedule.job_name}" '\n "which was not found in this repository."\n )\n\n return schedule\n\n def _validate_sensor(self, sensor: SensorDefinition) -> SensorDefinition:\n job_names = self.get_job_names()\n if len(sensor.targets) == 0:\n # skip validation when the sensor does not target a job\n return sensor\n\n for target in sensor.targets:\n if target.job_name not in job_names:\n raise DagsterInvalidDefinitionError(\n f'SensorDefinition "{sensor.name}" targets job "{sensor.job_name}" '\n "which was not found in this repository."\n )\n\n return sensor\n
", "current_page_name": "_modules/dagster/_core/definitions/repository_definition/repository_data", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.repository_definition.repository_data"}, "repository_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.repository_definition.repository_definition

\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Type,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.asset_graph import AssetGraph, InternalAssetGraph\nfrom dagster._core.definitions.assets_job import (\n    ASSET_BASE_JOB_PREFIX,\n)\nfrom dagster._core.definitions.cacheable_assets import AssetsDefinitionCacheableData\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.executor_definition import ExecutorDefinition\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.definitions.metadata import MetadataMapping\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.schedule_definition import ScheduleDefinition\nfrom dagster._core.definitions.sensor_definition import SensorDefinition\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.definitions.utils import check_valid_name\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import hash_collection\n\nfrom .repository_data import CachingRepositoryData, RepositoryData\nfrom .valid_definitions import (\n    RepositoryListDefinition as RepositoryListDefinition,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import AssetsDefinition\n    from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n    from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n\n@whitelist_for_serdes\nclass RepositoryLoadData(\n    NamedTuple(\n        "_RepositoryLoadData",\n        [\n            ("cached_data_by_key", Mapping[str, Sequence[AssetsDefinitionCacheableData]]),\n        ],\n    )\n):\n    def __new__(cls, cached_data_by_key: Mapping[str, Sequence[AssetsDefinitionCacheableData]]):\n        return super(RepositoryLoadData, cls).__new__(\n            cls,\n            cached_data_by_key=(\n                check.mapping_param(\n                    cached_data_by_key,\n                    "cached_data_by_key",\n                    key_type=str,\n                    value_type=list,\n                )\n            ),\n        )\n\n    # Allow this to be hashed for use in `lru_cache`. This is needed because:\n    # - `ReconstructableJob` uses `lru_cache`\n    # - `ReconstructableJob` has a `ReconstructableRepository` attribute\n    # - `ReconstructableRepository` has a `RepositoryLoadData` attribute\n    # - `RepositoryLoadData` has collection attributes that are unhashable by default\n    def __hash__(self) -> int:\n        if not hasattr(self, "_hash"):\n            self._hash = hash_collection(self)\n        return self._hash\n\n\n
[docs]class RepositoryDefinition:\n """Define a repository that contains a group of definitions.\n\n Users should typically not create objects of this class directly. Instead, use the\n :py:func:`@repository` decorator.\n\n Args:\n name (str): The name of the repository.\n repository_data (RepositoryData): Contains the definitions making up the repository.\n description (Optional[str]): A string description of the repository.\n metadata (Optional[MetadataMapping]): A map of arbitrary metadata for the repository.\n """\n\n def __init__(\n self,\n name,\n *,\n repository_data,\n description=None,\n metadata=None,\n repository_load_data=None,\n ):\n self._name = check_valid_name(name)\n self._description = check.opt_str_param(description, "description")\n self._repository_data: RepositoryData = check.inst_param(\n repository_data, "repository_data", RepositoryData\n )\n self._metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n self._repository_load_data = check.opt_inst_param(\n repository_load_data, "repository_load_data", RepositoryLoadData\n )\n\n @property\n def repository_load_data(self) -> Optional[RepositoryLoadData]:\n return self._repository_load_data\n\n @public\n @property\n def name(self) -> str:\n """str: The name of the repository."""\n return self._name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A human-readable description of the repository."""\n return self._description\n\n @public\n @property\n def metadata(self) -> Optional[MetadataMapping]:\n """Optional[MetadataMapping]: Arbitrary metadata for the repository."""\n return self._metadata\n\n def load_all_definitions(self):\n # force load of all lazy constructed code artifacts\n self._repository_data.load_all_definitions()\n\n @public\n @property\n def job_names(self) -> Sequence[str]:\n """List[str]: Names of all jobs in the repository."""\n return self._repository_data.get_job_names()\n\n def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]:\n return self._repository_data.get_top_level_resources()\n\n def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]:\n return self._repository_data.get_env_vars_by_top_level_resource()\n\n def get_resource_key_mapping(self) -> Mapping[int, str]:\n return self._repository_data.get_resource_key_mapping()\n\n
[docs] @public\n def has_job(self, name: str) -> bool:\n """Check if a job with a given name is present in the repository.\n\n Args:\n name (str): The name of the job.\n\n Returns:\n bool\n """\n return self._repository_data.has_job(name)
\n\n
[docs] @public\n def get_job(self, name: str) -> JobDefinition:\n """Get a job by name.\n\n If this job is present in the lazily evaluated dictionary passed to the\n constructor, but has not yet been constructed, only this job is constructed, and\n will be cached for future calls.\n\n Args:\n name (str): Name of the job to retrieve.\n\n Returns:\n JobDefinition: The job definition corresponding to\n the given name.\n """\n return self._repository_data.get_job(name)
\n\n
[docs] @public\n def get_all_jobs(self) -> Sequence[JobDefinition]:\n """Return all jobs in the repository as a list.\n\n Note that this will construct any job in the lazily evaluated dictionary that has\n not yet been constructed.\n\n Returns:\n List[JobDefinition]: All jobs in the repository.\n """\n return self._repository_data.get_all_jobs()
\n\n @public\n @property\n def schedule_defs(self) -> Sequence[ScheduleDefinition]:\n """List[ScheduleDefinition]: All schedules in the repository."""\n return self._repository_data.get_all_schedules()\n\n
[docs] @public\n def get_schedule_def(self, name: str) -> ScheduleDefinition:\n """Get a schedule definition by name.\n\n Args:\n name (str): The name of the schedule.\n\n Returns:\n ScheduleDefinition: The schedule definition.\n """\n return self._repository_data.get_schedule(name)
\n\n
[docs] @public\n def has_schedule_def(self, name: str) -> bool:\n """bool: Check if a schedule with a given name is present in the repository."""\n return self._repository_data.has_schedule(name)
\n\n @public\n @property\n def sensor_defs(self) -> Sequence[SensorDefinition]:\n """Sequence[SensorDefinition]: All sensors in the repository."""\n return self._repository_data.get_all_sensors()\n\n
[docs] @public\n def get_sensor_def(self, name: str) -> SensorDefinition:\n """Get a sensor definition by name.\n\n Args:\n name (str): The name of the sensor.\n\n Returns:\n SensorDefinition: The sensor definition.\n """\n return self._repository_data.get_sensor(name)
\n\n
[docs] @public\n def has_sensor_def(self, name: str) -> bool:\n """bool: Check if a sensor with a given name is present in the repository."""\n return self._repository_data.has_sensor(name)
\n\n @property\n def source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]:\n return self._repository_data.get_source_assets_by_key()\n\n @property\n def assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]:\n return self._repository_data.get_assets_defs_by_key()\n\n def has_implicit_global_asset_job_def(self) -> bool:\n """Returns true is there is a single implicit asset job for all asset keys in a repository."""\n return self.has_job(ASSET_BASE_JOB_PREFIX)\n\n def get_implicit_global_asset_job_def(self) -> JobDefinition:\n """A useful conveninence method for repositories where there are a set of assets with\n the same partitioning schema and one wants to access their corresponding implicit job\n easily.\n """\n if not self.has_job(ASSET_BASE_JOB_PREFIX):\n raise DagsterInvariantViolationError(\n "There is no single global asset job, likely due to assets using "\n "different partitioning schemes via their partitions_def parameter. You must "\n "use get_implicit_job_def_for_assets in order to access the correct implicit job."\n )\n\n return self.get_job(ASSET_BASE_JOB_PREFIX)\n\n def get_implicit_asset_job_names(self) -> Sequence[str]:\n return [\n job_name for job_name in self.job_names if job_name.startswith(ASSET_BASE_JOB_PREFIX)\n ]\n\n def get_implicit_job_def_for_assets(\n self, asset_keys: Iterable[AssetKey]\n ) -> Optional[JobDefinition]:\n """Returns the asset base job that contains all the given assets, or None if there is no such\n job.\n """\n if self.has_job(ASSET_BASE_JOB_PREFIX):\n base_job = self.get_job(ASSET_BASE_JOB_PREFIX)\n if all(\n key in base_job.asset_layer.assets_defs_by_key\n or base_job.asset_layer.is_observable_for_asset(key)\n for key in asset_keys\n ):\n return base_job\n else:\n i = 0\n while self.has_job(f"{ASSET_BASE_JOB_PREFIX}_{i}"):\n base_job = self.get_job(f"{ASSET_BASE_JOB_PREFIX}_{i}")\n\n if all(\n key in base_job.asset_layer.assets_defs_by_key\n or base_job.asset_layer.is_observable_for_asset(key)\n for key in asset_keys\n ):\n return base_job\n\n i += 1\n\n return None\n\n def get_maybe_subset_job_def(\n self,\n job_name: str,\n op_selection: Optional[Iterable[str]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n ):\n defn = self.get_job(job_name)\n return defn.get_subset(\n op_selection=op_selection,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n )\n\n
[docs] @public\n def load_asset_value(\n self,\n asset_key: CoercibleToAssetKey,\n *,\n python_type: Optional[Type] = None,\n instance: Optional[DagsterInstance] = None,\n partition_key: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n resource_config: Optional[Any] = None,\n ) -> object:\n """Load the contents of an asset as a Python object.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the asset.\n\n If you want to load the values of multiple assets, it's more efficient to use\n :py:meth:`~dagster.RepositoryDefinition.get_asset_value_loader`, which avoids spinning up\n resources separately for each asset.\n\n Args:\n asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.\n python_type (Optional[Type]): The python type to load the asset as. This is what will\n be returned inside `load_input` by `context.dagster_type.typing_type`.\n partition_key (Optional[str]): The partition of the asset to load.\n metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`\n (is equivalent to setting the metadata argument in `In` or `AssetIn`).\n resource_config (Optional[Any]): A dictionary of resource configurations to be passed\n to the :py:class:`IOManager`.\n\n Returns:\n The contents of an asset as a Python object.\n """\n from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n with AssetValueLoader(\n self.assets_defs_by_key, self.source_assets_by_key, instance=instance\n ) as loader:\n return loader.load_asset_value(\n asset_key,\n python_type=python_type,\n partition_key=partition_key,\n metadata=metadata,\n resource_config=resource_config,\n )
\n\n
[docs] @public\n def get_asset_value_loader(\n self, instance: Optional[DagsterInstance] = None\n ) -> "AssetValueLoader":\n """Returns an object that can load the contents of assets as Python objects.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the assets. Avoids\n spinning up resources separately for each asset.\n\n Usage:\n\n .. code-block:: python\n\n with my_repo.get_asset_value_loader() as loader:\n asset1 = loader.load_asset_value("asset1")\n asset2 = loader.load_asset_value("asset2")\n\n """\n from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n return AssetValueLoader(\n self.assets_defs_by_key, self.source_assets_by_key, instance=instance\n )
\n\n @property\n def asset_graph(self) -> InternalAssetGraph:\n return AssetGraph.from_assets(\n [*set(self.assets_defs_by_key.values()), *self.source_assets_by_key.values()]\n )\n\n # If definition comes from the @repository decorator, then the __call__ method will be\n # overwritten. Therefore, we want to maintain the call-ability of repository definitions.\n def __call__(self, *args, **kwargs):\n return self
\n\n\nclass PendingRepositoryDefinition:\n def __init__(\n self,\n name: str,\n repository_definitions: Sequence[\n Union[RepositoryListDefinition, "CacheableAssetsDefinition"]\n ],\n description: Optional[str] = None,\n metadata: Optional[MetadataMapping] = None,\n default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n default_executor_def: Optional[ExecutorDefinition] = None,\n _top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n _resource_key_mapping: Optional[Mapping[int, str]] = None,\n ):\n self._repository_definitions = check.list_param(\n repository_definitions,\n "repository_definition",\n additional_message=(\n "PendingRepositoryDefinition supports only list-based repository data at this time."\n ),\n )\n self._name = name\n self._description = description\n self._metadata = metadata\n self._default_logger_defs = default_logger_defs\n self._default_executor_def = default_executor_def\n self._top_level_resources = _top_level_resources\n self._resource_key_mapping = _resource_key_mapping\n\n @property\n def name(self) -> str:\n return self._name\n\n def _compute_repository_load_data(self) -> RepositoryLoadData:\n from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n\n return RepositoryLoadData(\n cached_data_by_key={\n defn.unique_id: defn.compute_cacheable_data()\n for defn in self._repository_definitions\n if isinstance(defn, CacheableAssetsDefinition)\n }\n )\n\n def _get_repository_definition(\n self, repository_load_data: RepositoryLoadData\n ) -> RepositoryDefinition:\n from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n\n resolved_definitions: List[RepositoryListDefinition] = []\n for defn in self._repository_definitions:\n if isinstance(defn, CacheableAssetsDefinition):\n # should always have metadata for each cached defn at this point\n check.invariant(\n defn.unique_id in repository_load_data.cached_data_by_key,\n "No metadata found for CacheableAssetsDefinition with unique_id"\n f" {defn.unique_id}.",\n )\n # use the emtadata to generate definitions\n resolved_definitions.extend(\n defn.build_definitions(\n data=repository_load_data.cached_data_by_key[defn.unique_id]\n )\n )\n else:\n resolved_definitions.append(defn)\n\n repository_data = CachingRepositoryData.from_list(\n resolved_definitions,\n default_executor_def=self._default_executor_def,\n default_logger_defs=self._default_logger_defs,\n top_level_resources=self._top_level_resources,\n resource_key_mapping=self._resource_key_mapping,\n )\n\n return RepositoryDefinition(\n self._name,\n repository_data=repository_data,\n description=self._description,\n metadata=self._metadata,\n repository_load_data=repository_load_data,\n )\n\n def reconstruct_repository_definition(\n self, repository_load_data: RepositoryLoadData\n ) -> RepositoryDefinition:\n """Use the provided RepositoryLoadData to construct and return a RepositoryDefinition."""\n check.inst_param(repository_load_data, "repository_load_data", RepositoryLoadData)\n return self._get_repository_definition(repository_load_data)\n\n def compute_repository_definition(self) -> RepositoryDefinition:\n """Compute the required RepositoryLoadData and use it to construct and return a RepositoryDefinition."""\n repository_load_data = self._compute_repository_load_data()\n return self._get_repository_definition(repository_load_data)\n
", "current_page_name": "_modules/dagster/_core/definitions/repository_definition/repository_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.repository_definition.repository_definition"}}, "resource_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.resource_definition

\nfrom functools import update_wrapper\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    Optional,\n    Union,\n    cast,\n    overload,\n)\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import experimental_param, public\nfrom dagster._core.decorator_utils import format_docstring_for_description\nfrom dagster._core.definitions.config import is_callable_valid_config_arg\nfrom dagster._core.definitions.configurable import AnonymousConfigurableDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvalidInvocationError\nfrom dagster._utils import IHasInternalInit\n\nfrom ..decorator_utils import (\n    get_function_params,\n    has_at_least_one_parameter,\n    is_required_param,\n    positional_arg_name_list,\n    validate_expected_params,\n)\nfrom .definition_config_schema import (\n    CoercableToConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom .resource_invocation import resource_invocation_result\nfrom .resource_requirement import (\n    RequiresResources,\n    ResourceDependencyRequirement,\n    ResourceRequirement,\n)\nfrom .scoped_resources_builder import (  # re-exported\n    IContainsGenerator as IContainsGenerator,\n    Resources as Resources,\n    ScopedResourcesBuilder as ScopedResourcesBuilder,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.execution.resources_init import InitResourceContext\n\nResourceFunctionWithContext: TypeAlias = Callable[["InitResourceContext"], Any]\nResourceFunctionWithoutContext: TypeAlias = Callable[[], Any]\nResourceFunction: TypeAlias = Union[\n    ResourceFunctionWithContext,\n    ResourceFunctionWithoutContext,\n]\n\n\n
[docs]@experimental_param(param="version")\nclass ResourceDefinition(AnonymousConfigurableDefinition, RequiresResources, IHasInternalInit):\n """Core class for defining resources.\n\n Resources are scoped ways to make external resources (like database connections) available to\n ops and assets during job execution and to clean up after execution resolves.\n\n If resource_fn yields once rather than returning (in the manner of functions decorable with\n :py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then the body of the\n function after the yield will be run after execution resolves, allowing users to write their\n own teardown/cleanup logic.\n\n Depending on your executor, resources may be instantiated and cleaned up more than once in a\n job execution.\n\n Args:\n resource_fn (Callable[[InitResourceContext], Any]): User-provided function to instantiate\n the resource, which will be made available to executions keyed on the\n ``context.resources`` object.\n config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check\n that config provided for the resource matches this schema and fail if it does not. If\n not set, Dagster will accept any config provided for the resource.\n description (Optional[str]): A human-readable description of the resource.\n required_resource_keys: (Optional[Set[str]]) Keys for the resources required by this\n resource. A DagsterInvariantViolationError will be raised during initialization if\n dependencies are cyclic.\n version (Optional[str]): (Experimental) The version of the resource's definition fn. Two\n wrapped resource functions should only have the same version if they produce the same\n resource definition when provided with the same inputs.\n """\n\n def __init__(\n self,\n resource_fn: ResourceFunction,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n ):\n self._resource_fn = check.callable_param(resource_fn, "resource_fn")\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._description = check.opt_str_param(description, "description")\n self._required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys"\n )\n self._version = check.opt_str_param(version, "version")\n\n # this attribute will be updated by the @dagster_maintained_resource and @dagster_maintained_io_manager decorators\n self._dagster_maintained = False\n self._hardcoded_resource_type = None\n\n @staticmethod\n def dagster_internal_init(\n *,\n resource_fn: ResourceFunction,\n config_schema: CoercableToConfigSchema,\n description: Optional[str],\n required_resource_keys: Optional[AbstractSet[str]],\n version: Optional[str],\n ) -> "ResourceDefinition":\n return ResourceDefinition(\n resource_fn=resource_fn,\n config_schema=config_schema,\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )\n\n @property\n def resource_fn(self) -> ResourceFunction:\n return self._resource_fn\n\n @property\n def config_schema(self) -> IDefinitionConfigSchema:\n return self._config_schema\n\n @public\n @property\n def description(self) -> Optional[str]:\n """A human-readable description of the resource."""\n return self._description\n\n @public\n @property\n def version(self) -> Optional[str]:\n """A string which can be used to identify a particular code version of a resource definition."""\n return self._version\n\n @public\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n """A set of the resource keys that this resource depends on. These keys will be made available\n to the resource's init context during execution, and the resource will not be instantiated\n until all required resources are available.\n """\n return self._required_resource_keys\n\n def _is_dagster_maintained(self) -> bool:\n return self._dagster_maintained\n\n
[docs] @public\n @staticmethod\n def none_resource(description: Optional[str] = None) -> "ResourceDefinition":\n """A helper function that returns a none resource.\n\n Args:\n description ([Optional[str]]): The description of the resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A resource that does nothing.\n """\n return ResourceDefinition.hardcoded_resource(value=None, description=description)
\n\n
[docs] @public\n @staticmethod\n def hardcoded_resource(value: Any, description: Optional[str] = None) -> "ResourceDefinition":\n """A helper function that creates a ``ResourceDefinition`` with a hardcoded object.\n\n Args:\n value (Any): The value that will be accessible via context.resources.resource_name.\n description ([Optional[str]]): The description of the resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A hardcoded resource.\n """\n resource_def = ResourceDefinition(\n resource_fn=lambda _init_context: value, description=description\n )\n # Make sure telemetry info gets passed in to hardcoded resources\n if hasattr(value, "_is_dagster_maintained"):\n resource_def._dagster_maintained = value._is_dagster_maintained() # noqa: SLF001\n resource_def._hardcoded_resource_type = type(value) # noqa: SLF001\n\n return resource_def
\n\n
[docs] @public\n @staticmethod\n def mock_resource(description: Optional[str] = None) -> "ResourceDefinition":\n """A helper function that creates a ``ResourceDefinition`` which wraps a ``mock.MagicMock``.\n\n Args:\n description ([Optional[str]]): The description of the resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A resource that creates the magic methods automatically and helps\n you mock existing resources.\n """\n from unittest import mock\n\n return ResourceDefinition(\n resource_fn=lambda _init_context: mock.MagicMock(), description=description\n )
\n\n
[docs] @public\n @staticmethod\n def string_resource(description: Optional[str] = None) -> "ResourceDefinition":\n """Creates a ``ResourceDefinition`` which takes in a single string as configuration\n and returns this configured string to any ops or assets which depend on it.\n\n Args:\n description ([Optional[str]]): The description of the string resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A resource that takes in a single string as configuration and\n returns that string.\n """\n return ResourceDefinition(\n resource_fn=lambda init_context: init_context.resource_config,\n config_schema=str,\n description=description,\n )
\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: CoercableToConfigSchema,\n ) -> "ResourceDefinition":\n resource_def = ResourceDefinition.dagster_internal_init(\n config_schema=config_schema,\n description=description or self.description,\n resource_fn=self.resource_fn,\n required_resource_keys=self.required_resource_keys,\n version=self.version,\n )\n\n resource_def._dagster_maintained = self._is_dagster_maintained() # noqa: SLF001\n\n return resource_def\n\n def __call__(self, *args, **kwargs):\n from dagster._core.execution.context.init import UnboundInitResourceContext\n\n if has_at_least_one_parameter(self.resource_fn):\n if len(args) + len(kwargs) == 0:\n raise DagsterInvalidInvocationError(\n "Resource initialization function has context argument, but no context was"\n " provided when invoking."\n )\n if len(args) + len(kwargs) > 1:\n raise DagsterInvalidInvocationError(\n "Initialization of resource received multiple arguments. Only a first "\n "positional context parameter should be provided when invoking."\n )\n\n context_param_name = get_function_params(self.resource_fn)[0].name\n\n if args:\n check.opt_inst_param(args[0], context_param_name, UnboundInitResourceContext)\n return resource_invocation_result(\n self, cast(Optional[UnboundInitResourceContext], args[0])\n )\n else:\n if context_param_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Resource initialization expected argument '{context_param_name}'."\n )\n check.opt_inst_param(\n kwargs[context_param_name], context_param_name, UnboundInitResourceContext\n )\n\n return resource_invocation_result(\n self, cast(Optional[UnboundInitResourceContext], kwargs[context_param_name])\n )\n elif len(args) + len(kwargs) > 0:\n raise DagsterInvalidInvocationError(\n "Attempted to invoke resource with argument, but underlying function has no context"\n " argument. Either specify a context argument on the resource function, or remove"\n " the passed-in argument."\n )\n else:\n return resource_invocation_result(self, None)\n\n def get_resource_requirements(\n self, outer_context: Optional[object] = None\n ) -> Iterator[ResourceRequirement]:\n source_key = cast(str, outer_context)\n for resource_key in sorted(list(self.required_resource_keys)):\n yield ResourceDependencyRequirement(key=resource_key, source_key=source_key)
\n\n\ndef dagster_maintained_resource(\n resource_def: ResourceDefinition,\n) -> ResourceDefinition:\n resource_def._dagster_maintained = True # noqa: SLF001\n return resource_def\n\n\nclass _ResourceDecoratorCallable:\n def __init__(\n self,\n config_schema: Optional[Mapping[str, Any]] = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n ):\n self.config_schema = config_schema # checked by underlying definition\n self.description = check.opt_str_param(description, "description")\n self.version = check.opt_str_param(version, "version")\n self.required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys"\n )\n\n def __call__(self, resource_fn: ResourceFunction) -> ResourceDefinition:\n check.callable_param(resource_fn, "resource_fn")\n\n any_name = ["*"] if has_at_least_one_parameter(resource_fn) else []\n\n params = get_function_params(resource_fn)\n\n missing_positional = validate_expected_params(params, any_name)\n if missing_positional:\n raise DagsterInvalidDefinitionError(\n f"@resource decorated function '{resource_fn.__name__}' expects a single "\n "positional argument."\n )\n\n extras = params[len(any_name) :]\n\n required_extras = list(filter(is_required_param, extras))\n if required_extras:\n raise DagsterInvalidDefinitionError(\n f"@resource decorated function '{resource_fn.__name__}' expects only a single"\n " positional required argument. Got required extra params"\n f" {', '.join(positional_arg_name_list(required_extras))}"\n )\n\n resource_def = ResourceDefinition.dagster_internal_init(\n resource_fn=resource_fn,\n config_schema=self.config_schema,\n description=self.description or format_docstring_for_description(resource_fn),\n version=self.version,\n required_resource_keys=self.required_resource_keys,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(resource_def, wrapped=resource_fn) # type: ignore\n\n return resource_def\n\n\n@overload\ndef resource(config_schema: ResourceFunction) -> ResourceDefinition: ...\n\n\n@overload\ndef resource(\n config_schema: CoercableToConfigSchema = ...,\n description: Optional[str] = ...,\n required_resource_keys: Optional[AbstractSet[str]] = ...,\n version: Optional[str] = ...,\n) -> Callable[[ResourceFunction], "ResourceDefinition"]: ...\n\n\n
[docs]def resource(\n config_schema: Union[ResourceFunction, CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n) -> Union[Callable[[ResourceFunction], "ResourceDefinition"], "ResourceDefinition"]:\n """Define a resource.\n\n The decorated function should accept an :py:class:`InitResourceContext` and return an instance of\n the resource. This function will become the ``resource_fn`` of an underlying\n :py:class:`ResourceDefinition`.\n\n If the decorated function yields once rather than returning (in the manner of functions\n decorable with :py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then\n the body of the function after the yield will be run after execution resolves, allowing users\n to write their own teardown/cleanup logic.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.resource_config`. If not set, Dagster will accept any config provided.\n description(Optional[str]): A human-readable description of the resource.\n version (Optional[str]): (Experimental) The version of a resource function. Two wrapped\n resource functions should only have the same version if they produce the same resource\n definition when provided with the same inputs.\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by this resource.\n """\n # This case is for when decorator is used bare, without arguments.\n # E.g. @resource versus @resource()\n if callable(config_schema) and not is_callable_valid_config_arg(config_schema):\n return _ResourceDecoratorCallable()(config_schema)\n\n def _wrap(resource_fn: ResourceFunction) -> "ResourceDefinition":\n return _ResourceDecoratorCallable(\n config_schema=cast(Optional[Dict[str, Any]], config_schema),\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )(resource_fn)\n\n return _wrap
\n\n\n
[docs]def make_values_resource(**kwargs: Any) -> ResourceDefinition:\n """A helper function that creates a ``ResourceDefinition`` to take in user-defined values.\n\n This is useful for sharing values between ops.\n\n Args:\n **kwargs: Arbitrary keyword arguments that will be passed to the config schema of the\n returned resource definition. If not set, Dagster will accept any config provided for\n the resource.\n\n For example:\n\n .. code-block:: python\n\n @op(required_resource_keys={"globals"})\n def my_op(context):\n print(context.resources.globals["my_str_var"])\n\n @job(resource_defs={"globals": make_values_resource(my_str_var=str, my_int_var=int)})\n def my_job():\n my_op()\n\n Returns:\n ResourceDefinition: A resource that passes in user-defined values.\n """\n return ResourceDefinition(\n resource_fn=lambda init_context: init_context.resource_config,\n config_schema=kwargs or Any,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/resource_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.resource_definition"}, "result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.result

\nfrom typing import NamedTuple, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.data_version import DataVersion\n\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n)\nfrom .metadata import MetadataUserInput\n\n\n
[docs]@experimental\nclass MaterializeResult(\n NamedTuple(\n "_MaterializeResult",\n [\n ("asset_key", PublicAttr[Optional[AssetKey]]),\n ("metadata", PublicAttr[Optional[MetadataUserInput]]),\n ("check_results", PublicAttr[Sequence[AssetCheckResult]]),\n ("data_version", PublicAttr[Optional[DataVersion]]),\n ],\n )\n):\n """An object representing a successful materialization of an asset. These can be returned from\n @asset and @multi_asset decorated functions to pass metadata or specify specific assets were\n materialized.\n\n Attributes:\n asset_key (Optional[AssetKey]): Optional in @asset, required in @multi_asset to discern which asset this refers to.\n metadata (Optional[MetadataUserInput]): Metadata to record with the corresponding AssetMaterialization event.\n """\n\n def __new__(\n cls,\n *, # enforce kwargs\n asset_key: Optional[CoercibleToAssetKey] = None,\n metadata: Optional[MetadataUserInput] = None,\n check_results: Optional[Sequence[AssetCheckResult]] = None,\n data_version: Optional[DataVersion] = None,\n ):\n asset_key = AssetKey.from_coercible(asset_key) if asset_key else None\n\n return super().__new__(\n cls,\n asset_key=asset_key,\n metadata=check.opt_nullable_mapping_param(\n metadata,\n "metadata",\n key_type=str,\n ),\n check_results=check.opt_sequence_param(\n check_results, "check_results", of_type=AssetCheckResult\n ),\n data_version=check.opt_inst_param(data_version, "data_version", DataVersion),\n )\n\n def check_result_named(self, check_name: str) -> AssetCheckResult:\n for check_result in self.check_results:\n if check_result.check_name == check_name:\n return check_result\n\n check.failed(f"Could not find check result named {check_name}")
\n
", "current_page_name": "_modules/dagster/_core/definitions/result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.result"}, "run_config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.run_config

\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterator,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias\n\nfrom dagster._config import (\n    ALL_CONFIG_BUILTINS,\n    ConfigType,\n    Field,\n    Permissive,\n    Selector,\n    Shape,\n)\nfrom dagster._config.pythonic_config import Config\nfrom dagster._core.definitions.asset_layer import AssetLayer\nfrom dagster._core.definitions.executor_definition import (\n    ExecutorDefinition,\n    execute_in_process_executor,\n    in_process_executor,\n)\nfrom dagster._core.definitions.input import InputDefinition\nfrom dagster._core.definitions.output import OutputDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.storage.input_manager import IInputManagerDefinition\nfrom dagster._core.storage.output_manager import IOutputManagerDefinition\nfrom dagster._core.types.dagster_type import ALL_RUNTIME_BUILTINS, construct_dagster_type_dictionary\nfrom dagster._utils import check\n\nfrom .configurable import ConfigurableDefinition\nfrom .definition_config_schema import IDefinitionConfigSchema\nfrom .dependency import DependencyStructure, GraphNode, Node, NodeHandle, NodeInput, OpNode\nfrom .graph_definition import GraphDefinition\nfrom .logger_definition import LoggerDefinition\nfrom .op_definition import NodeDefinition, OpDefinition\nfrom .resource_definition import ResourceDefinition\n\nif TYPE_CHECKING:\n    from .source_asset import SourceAsset\n\n\ndef define_resource_dictionary_cls(\n    resource_defs: Mapping[str, ResourceDefinition],\n    required_resources: AbstractSet[str],\n) -> Shape:\n    fields = {}\n    for resource_name, resource_def in resource_defs.items():\n        if resource_def.config_schema:\n            is_required = None\n            if resource_name not in required_resources:\n                # explicitly make section not required if resource is not required\n                # for the current mode\n                is_required = False\n\n            fields[resource_name] = def_config_field(\n                resource_def,\n                is_required=is_required,\n                description=resource_def.description,\n            )\n\n    return Shape(fields=fields)\n\n\ndef remove_none_entries(ddict: Mapping[Any, Any]) -> dict:\n    return {k: v for k, v in ddict.items() if v is not None}\n\n\ndef def_config_field(\n    configurable_def: ConfigurableDefinition,\n    is_required: Optional[bool] = None,\n    description: Optional[str] = None,\n) -> Field:\n    return Field(\n        Shape(\n            {"config": configurable_def.config_field} if configurable_def.has_config_field else {}\n        ),\n        is_required=is_required,\n        description=description,\n    )\n\n\nclass RunConfigSchemaCreationData(NamedTuple):\n    job_name: str\n    nodes: Sequence[Node]\n    graph_def: GraphDefinition\n    dependency_structure: DependencyStructure\n    executor_def: ExecutorDefinition\n    resource_defs: Mapping[str, ResourceDefinition]\n    logger_defs: Mapping[str, LoggerDefinition]\n    ignored_nodes: Sequence[Node]\n    required_resources: AbstractSet[str]\n    direct_inputs: Mapping[str, Any]\n    asset_layer: AssetLayer\n\n\ndef define_logger_dictionary_cls(creation_data: RunConfigSchemaCreationData) -> Shape:\n    return Shape(\n        {\n            logger_name: def_config_field(logger_definition, is_required=False)\n            for logger_name, logger_definition in creation_data.logger_defs.items()\n        }\n    )\n\n\ndef define_execution_field(executor_defs: Sequence[ExecutorDefinition], description: str) -> Field:\n    default_in_process = False\n    for executor_def in executor_defs:\n        if executor_def == in_process_executor:\n            default_in_process = True\n\n    selector = selector_for_named_defs(executor_defs)\n\n    if default_in_process:\n        return Field(\n            selector, default_value={in_process_executor.name: {}}, description=description\n        )\n\n    # If we are using the execute_in_process executor, then ignore all executor config.\n    if len(executor_defs) == 1 and executor_defs[0] == execute_in_process_executor:\n        return Field(Permissive(), is_required=False, default_value={}, description=description)\n\n    return Field(selector, description=description)\n\n\ndef define_single_execution_field(executor_def: ExecutorDefinition, description: str) -> Field:\n    return def_config_field(executor_def, description=description)\n\n\ndef define_run_config_schema_type(creation_data: RunConfigSchemaCreationData) -> ConfigType:\n    execution_field = define_single_execution_field(\n        creation_data.executor_def,\n        "Configure how steps are executed within a run.",\n    )\n\n    top_level_node = GraphNode(\n        name=creation_data.graph_def.name,\n        definition=creation_data.graph_def,\n        graph_definition=creation_data.graph_def,\n    )\n\n    fields = {\n        "execution": execution_field,\n        "loggers": Field(\n            define_logger_dictionary_cls(creation_data),\n            description="Configure how loggers emit messages within a run.",\n        ),\n        "resources": Field(\n            define_resource_dictionary_cls(\n                creation_data.resource_defs,\n                creation_data.required_resources,\n            ),\n            description="Configure how shared resources are implemented within a run.",\n        ),\n        "inputs": get_inputs_field(\n            node=top_level_node,\n            handle=NodeHandle(top_level_node.name, parent=None),\n            dependency_structure=creation_data.dependency_structure,\n            resource_defs=creation_data.resource_defs,\n            node_ignored=False,\n            direct_inputs=creation_data.direct_inputs,\n            input_source_assets={},\n            asset_layer=creation_data.asset_layer,\n        ),\n    }\n\n    if creation_data.graph_def.has_config_mapping:\n        config_schema = cast(IDefinitionConfigSchema, creation_data.graph_def.config_schema)\n        nodes_field = Field(\n            {"config": config_schema.as_field()},\n            description="Configure runtime parameters for ops or assets.",\n        )\n    else:\n        nodes_field = Field(\n            define_node_shape(\n                nodes=creation_data.nodes,\n                ignored_nodes=creation_data.ignored_nodes,\n                dependency_structure=creation_data.dependency_structure,\n                resource_defs=creation_data.resource_defs,\n                asset_layer=creation_data.asset_layer,\n                node_input_source_assets=creation_data.graph_def.node_input_source_assets,\n            ),\n            description="Configure runtime parameters for ops or assets.",\n        )\n\n    fields["ops"] = nodes_field\n\n    return Shape(\n        fields=remove_none_entries(fields),\n    )\n\n\n# Common pattern for a set of named definitions (e.g. executors)\n# to build a selector so that one of them is selected\ndef selector_for_named_defs(named_defs) -> Selector:\n    return Selector({named_def.name: def_config_field(named_def) for named_def in named_defs})\n\n\ndef get_inputs_field(\n    node: Node,\n    handle: NodeHandle,\n    dependency_structure: DependencyStructure,\n    resource_defs: Mapping[str, ResourceDefinition],\n    node_ignored: bool,\n    asset_layer: AssetLayer,\n    input_source_assets: Mapping[str, "SourceAsset"],\n    direct_inputs: Optional[Mapping[str, Any]] = None,\n) -> Optional[Field]:\n    direct_inputs = check.opt_mapping_param(direct_inputs, "direct_inputs")\n    inputs_field_fields = {}\n    for name, inp in node.definition.input_dict.items():\n        inp_handle = NodeInput(node, inp)\n        has_upstream = input_has_upstream(dependency_structure, inp_handle, node, name)\n        if inp.input_manager_key:\n            input_field = get_input_manager_input_field(node, inp, resource_defs)\n        elif (\n            # if you have asset definitions, input will be loaded from the source asset\n            asset_layer.has_assets_defs\n            or asset_layer.has_asset_check_defs\n            and asset_layer.asset_key_for_input(handle, name)\n            and not has_upstream\n        ):\n            input_field = None\n        elif name in direct_inputs and not has_upstream:\n            input_field = None\n        elif name in input_source_assets and not has_upstream:\n            input_field = None\n        elif inp.dagster_type.loader and not has_upstream:\n            input_field = get_type_loader_input_field(node, name, inp)\n        else:\n            input_field = None\n\n        if input_field:\n            inputs_field_fields[name] = input_field\n\n    if not inputs_field_fields:\n        return None\n    if node_ignored:\n        return Field(\n            Shape(inputs_field_fields),\n            is_required=False,\n            description=(\n                "This op is not present in the current op selection, "\n                "the input config values are allowed but ignored."\n            ),\n        )\n    else:\n        return Field(Shape(inputs_field_fields))\n\n\ndef input_has_upstream(\n    dependency_structure: DependencyStructure,\n    input_handle: NodeInput,\n    node: Node,\n    input_name: str,\n) -> bool:\n    return dependency_structure.has_deps(input_handle) or node.container_maps_input(input_name)\n\n\ndef get_input_manager_input_field(\n    node: Node,\n    input_def: InputDefinition,\n    resource_defs: Mapping[str, ResourceDefinition],\n) -> Optional[Field]:\n    if input_def.input_manager_key:\n        if input_def.input_manager_key not in resource_defs:\n            raise DagsterInvalidDefinitionError(\n                f"Input '{input_def.name}' for {node.describe_node()} requires input_manager_key"\n                f" '{input_def.input_manager_key}', but no resource has been provided. Please"\n                " include a resource definition for that key in the provided resource_defs."\n            )\n\n        input_manager = resource_defs[input_def.input_manager_key]\n        if not isinstance(input_manager, IInputManagerDefinition):\n            raise DagsterInvalidDefinitionError(\n                f"Input '{input_def.name}' for {node.describe_node()} requires input_manager_key "\n                f"'{input_def.input_manager_key}', but the resource definition provided is not an "\n                "IInputManagerDefinition"\n            )\n\n        input_config_schema = input_manager.input_config_schema\n        if input_config_schema:\n            return input_config_schema.as_field()\n        return None\n\n    return None\n\n\ndef get_type_loader_input_field(node: Node, input_name: str, input_def: InputDefinition) -> Field:\n    loader = check.not_none(input_def.dagster_type.loader)\n    return Field(\n        loader.schema_type,\n        is_required=(not node.definition.input_has_default(input_name)),\n    )\n\n\ndef get_outputs_field(\n    node: Node,\n    resource_defs: Mapping[str, ResourceDefinition],\n) -> Optional[Field]:\n    output_manager_fields = {}\n    for name, output_def in node.definition.output_dict.items():\n        output_manager_output_field = get_output_manager_output_field(\n            node, output_def, resource_defs\n        )\n        if output_manager_output_field:\n            output_manager_fields[name] = output_manager_output_field\n\n    return Field(Shape(output_manager_fields)) if output_manager_fields else None\n\n\ndef get_output_manager_output_field(\n    node: Node, output_def: OutputDefinition, resource_defs: Mapping[str, ResourceDefinition]\n) -> Optional[ConfigType]:\n    if output_def.io_manager_key not in resource_defs:\n        raise DagsterInvalidDefinitionError(\n            f'Output "{output_def.name}" for {node.describe_node()} requires io_manager_key '\n            f'"{output_def.io_manager_key}", but no resource has been provided. Please include a '\n            "resource definition for that key in the provided resource_defs."\n        )\n    if not isinstance(resource_defs[output_def.io_manager_key], IOutputManagerDefinition):\n        raise DagsterInvalidDefinitionError(\n            f'Output "{output_def.name}" for {node.describe_node()} requires io_manager_key '\n            f'"{output_def.io_manager_key}", but the resource definition provided is not an '\n            "IOutputManagerDefinition"\n        )\n    output_manager_def = resource_defs[output_def.io_manager_key]\n    if (\n        output_manager_def\n        and isinstance(output_manager_def, IOutputManagerDefinition)\n        and output_manager_def.output_config_schema\n    ):\n        return output_manager_def.output_config_schema.as_field()\n\n    return None\n\n\ndef node_config_field(fields: Mapping[str, Optional[Field]], ignored: bool) -> Optional[Field]:\n    trimmed_fields = remove_none_entries(fields)\n    if trimmed_fields:\n        if ignored:\n            return Field(\n                Shape(trimmed_fields),\n                is_required=False,\n                description=(\n                    "This op is not present in the current op selection, "\n                    "the config values are allowed but ignored."\n                ),\n            )\n        else:\n            return Field(Shape(trimmed_fields))\n    else:\n        return None\n\n\ndef construct_leaf_node_config(\n    node: Node,\n    handle: NodeHandle,\n    dependency_structure: DependencyStructure,\n    config_schema: Optional[IDefinitionConfigSchema],\n    resource_defs: Mapping[str, ResourceDefinition],\n    ignored: bool,\n    asset_layer: AssetLayer,\n    input_source_assets: Mapping[str, "SourceAsset"],\n) -> Optional[Field]:\n    return node_config_field(\n        {\n            "inputs": get_inputs_field(\n                node,\n                handle,\n                dependency_structure,\n                resource_defs,\n                ignored,\n                asset_layer,\n                input_source_assets,\n            ),\n            "outputs": get_outputs_field(node, resource_defs),\n            "config": config_schema.as_field() if config_schema else None,\n        },\n        ignored=ignored,\n    )\n\n\ndef define_node_field(\n    node: Node,\n    handle: NodeHandle,\n    dependency_structure: DependencyStructure,\n    resource_defs: Mapping[str, ResourceDefinition],\n    ignored: bool,\n    asset_layer: AssetLayer,\n    input_source_assets: Mapping[str, "SourceAsset"],\n) -> Optional[Field]:\n    # All nodes regardless of compositing status get the same inputs and outputs\n    # config. The only thing the varies is on extra element of configuration\n    # 1) Vanilla op definition: a 'config' key with the config_schema as the value\n    # 2) Graph with field mapping: a 'config' key with the config_schema of\n    #    the config mapping (via GraphDefinition#config_schema)\n    # 3) Graph without field mapping: an 'ops' key with recursively defined\n    #    ops dictionary\n    # 4) `configured` graph with field mapping: a 'config' key with the config_schema that was\n    #    provided when `configured` was called (via GraphDefinition#config_schema)\n\n    assert isinstance(node, (OpNode, GraphNode)), f"Invalid node type: {type(node)}"\n\n    if isinstance(node, OpNode):\n        return construct_leaf_node_config(\n            node,\n            handle,\n            dependency_structure,\n            node.definition.config_schema,\n            resource_defs,\n            ignored,\n            asset_layer,\n            input_source_assets,\n        )\n\n    graph_def = node.definition\n\n    if graph_def.has_config_mapping:\n        # has_config_mapping covers cases 2 & 4 from above (only config mapped graphs can\n        # be `configured`)...\n        return construct_leaf_node_config(\n            node,\n            handle,\n            dependency_structure,\n            # ...and in both cases, the correct schema for 'config' key is exposed by this property:\n            graph_def.config_schema,\n            resource_defs,\n            ignored,\n            asset_layer,\n            input_source_assets,\n        )\n        # This case omits an 'ops' key, thus if a graph is `configured` or has a field\n        # mapping, the user cannot stub any config, inputs, or outputs for inner (child) nodes.\n    else:\n        fields = {\n            "inputs": get_inputs_field(\n                node,\n                handle,\n                dependency_structure,\n                resource_defs,\n                ignored,\n                asset_layer,\n                input_source_assets,\n            ),\n            "outputs": get_outputs_field(node, resource_defs),\n            "ops": Field(\n                define_node_shape(\n                    nodes=graph_def.nodes,\n                    ignored_nodes=None,\n                    dependency_structure=graph_def.dependency_structure,\n                    parent_handle=handle,\n                    resource_defs=resource_defs,\n                    asset_layer=asset_layer,\n                    node_input_source_assets=graph_def.node_input_source_assets,\n                )\n            ),\n        }\n\n        return node_config_field(fields, ignored=ignored)\n\n\ndef define_node_shape(\n    nodes: Sequence[Node],\n    ignored_nodes: Optional[Sequence[Node]],\n    dependency_structure: DependencyStructure,\n    resource_defs: Mapping[str, ResourceDefinition],\n    asset_layer: AssetLayer,\n    node_input_source_assets: Mapping[str, Mapping[str, "SourceAsset"]],\n    parent_handle: Optional[NodeHandle] = None,\n) -> Shape:\n    """Examples of what this method is used to generate the schema for:\n    1.\n        inputs: ...\n        ops:\n      >    op1: ...\n      >    op2: ...\n\n    2.\n        inputs:\n        ops:\n          graph1: ...\n            inputs: ...\n            ops:\n      >       op1: ...\n      >       inner_graph: ...\n\n\n    """\n    ignored_nodes = check.opt_sequence_param(ignored_nodes, "ignored_nodes", of_type=Node)\n\n    fields = {}\n    for node in nodes:\n        node_field = define_node_field(\n            node,\n            NodeHandle(node.name, parent_handle),\n            dependency_structure,\n            resource_defs,\n            ignored=False,\n            asset_layer=asset_layer,\n            input_source_assets=node_input_source_assets.get(node.name, {}),\n        )\n\n        if node_field:\n            fields[node.name] = node_field\n\n    for node in ignored_nodes:\n        node_field = define_node_field(\n            node,\n            NodeHandle(node.name, parent_handle),\n            dependency_structure,\n            resource_defs,\n            ignored=True,\n            asset_layer=asset_layer,\n            input_source_assets=node_input_source_assets.get(node.name, {}),\n        )\n        if node_field:\n            fields[node.name] = node_field\n\n    return Shape(fields)\n\n\ndef iterate_node_def_config_types(node_def: NodeDefinition) -> Iterator[ConfigType]:\n    if isinstance(node_def, OpDefinition):\n        if node_def.has_config_field:\n            yield from node_def.get_config_field().config_type.type_iterator()\n    elif isinstance(node_def, GraphDefinition):\n        for node in node_def.nodes:\n            yield from iterate_node_def_config_types(node.definition)\n\n    else:\n        check.invariant(f"Unexpected NodeDefinition type {type(node_def)}")\n\n\ndef _gather_all_schemas(node_defs: Sequence[NodeDefinition]) -> Iterator[ConfigType]:\n    dagster_types = construct_dagster_type_dictionary(node_defs)\n    for dagster_type in list(dagster_types.values()) + list(ALL_RUNTIME_BUILTINS):\n        if dagster_type.loader:\n            yield from dagster_type.loader.schema_type.type_iterator()\n\n\ndef _gather_all_config_types(\n    node_defs: Sequence[NodeDefinition], run_config_schema_type: ConfigType\n) -> Iterator[ConfigType]:\n    for node_def in node_defs:\n        yield from iterate_node_def_config_types(node_def)\n\n    yield from run_config_schema_type.type_iterator()\n\n\ndef construct_config_type_dictionary(\n    node_defs: Sequence[NodeDefinition],\n    run_config_schema_type: ConfigType,\n) -> Tuple[Mapping[str, ConfigType], Mapping[str, ConfigType]]:\n    type_dict_by_name = {t.given_name: t for t in ALL_CONFIG_BUILTINS if t.given_name}\n    type_dict_by_key = {t.key: t for t in ALL_CONFIG_BUILTINS}\n    all_types = list(_gather_all_config_types(node_defs, run_config_schema_type)) + list(\n        _gather_all_schemas(node_defs)\n    )\n\n    for config_type in all_types:\n        name = config_type.given_name\n        if name and name in type_dict_by_name:\n            if type(config_type) is not type(type_dict_by_name[name]):\n                raise DagsterInvalidDefinitionError(\n                    "Type names must be unique. You have constructed two different "\n                    f'instances of types with the same name "{name}".'\n                )\n        elif name:\n            type_dict_by_name[name] = config_type\n\n        type_dict_by_key[config_type.key] = config_type\n\n    return type_dict_by_name, type_dict_by_key\n\n\ndef _convert_config_classes_inner(configs: Any) -> Any:\n    if not isinstance(configs, dict):\n        return configs\n\n    return {\n        k: (\n            {"config": v._convert_to_config_dictionary()}  # noqa: SLF001\n            if isinstance(v, Config)\n            else _convert_config_classes_inner(v)\n        )\n        for k, v in configs.items()\n    }\n\n\ndef _convert_config_classes(configs: Dict[str, Any]) -> Dict[str, Any]:\n    return _convert_config_classes_inner(configs)\n\n\n
[docs]class RunConfig:\n """Container for all the configuration that can be passed to a run. Accepts Pythonic definitions\n for op and asset config and resources and converts them under the hood to the appropriate config dictionaries.\n\n Example usage:\n\n .. code-block:: python\n\n class MyAssetConfig(Config):\n a_str: str\n\n @asset\n def my_asset(config: MyAssetConfig):\n assert config.a_str == "foo"\n\n materialize(\n [my_asset],\n run_config=RunConfig(\n ops={"my_asset": MyAssetConfig(a_str="foo")}\n )\n )\n\n """\n\n def __init__(\n self,\n ops: Optional[Dict[str, Any]] = None,\n resources: Optional[Dict[str, Any]] = None,\n loggers: Optional[Dict[str, Any]] = None,\n execution: Optional[Dict[str, Any]] = None,\n ):\n self.ops = check.opt_dict_param(ops, "ops")\n self.resources = check.opt_dict_param(resources, "resources")\n self.loggers = check.opt_dict_param(loggers, "loggers")\n self.execution = check.opt_dict_param(execution, "execution")\n\n def to_config_dict(self):\n return {\n "loggers": self.loggers,\n "resources": _convert_config_classes(self.resources),\n "ops": _convert_config_classes(self.ops),\n "execution": self.execution,\n }
\n\n\nCoercibleToRunConfig: TypeAlias = Union[Dict[str, Any], RunConfig]\n\nT = TypeVar("T")\n\n\ndef convert_config_input(inp: Union[CoercibleToRunConfig, T]) -> Union[T, Mapping[str, Any]]:\n if isinstance(inp, RunConfig):\n return inp.to_config_dict()\n else:\n return inp\n
", "current_page_name": "_modules/dagster/_core/definitions/run_config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.run_config"}, "run_request": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.run_request

\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental_param\nfrom dagster._core.definitions.asset_check_evaluation import AssetCheckEvaluation\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization, AssetObservation\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.storage.tags import PARTITION_NAME_TAG\nfrom dagster._serdes.serdes import whitelist_for_serdes\nfrom dagster._utils.error import SerializableErrorInfo\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.job_definition import JobDefinition\n    from dagster._core.definitions.partition import PartitionsDefinition\n    from dagster._core.definitions.run_config import RunConfig\n    from dagster._core.definitions.unresolved_asset_job_definition import (\n        UnresolvedAssetJobDefinition,\n    )\n\n\n@whitelist_for_serdes(old_storage_names={"JobType"})\nclass InstigatorType(Enum):\n    SCHEDULE = "SCHEDULE"\n    SENSOR = "SENSOR"\n    AUTO_MATERIALIZE = "AUTO_MATERIALIZE"\n\n\n
[docs]@whitelist_for_serdes\nclass SkipReason(NamedTuple("_SkipReason", [("skip_message", PublicAttr[Optional[str]])])):\n """Represents a skipped evaluation, where no runs are requested. May contain a message to indicate\n why no runs were requested.\n\n Attributes:\n skip_message (Optional[str]): A message displayed in the Dagster UI for why this evaluation resulted\n in no requested runs.\n """\n\n def __new__(cls, skip_message: Optional[str] = None):\n return super(SkipReason, cls).__new__(\n cls,\n skip_message=check.opt_str_param(skip_message, "skip_message"),\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass AddDynamicPartitionsRequest(\n NamedTuple(\n "_AddDynamicPartitionsRequest",\n [\n ("partitions_def_name", str),\n ("partition_keys", Sequence[str]),\n ],\n )\n):\n """A request to add partitions to a dynamic partitions definition, to be evaluated by a sensor or schedule."""\n\n def __new__(\n cls,\n partitions_def_name: str,\n partition_keys: Sequence[str],\n ):\n return super(AddDynamicPartitionsRequest, cls).__new__(\n cls,\n partitions_def_name=check.str_param(partitions_def_name, "partitions_def_name"),\n partition_keys=check.list_param(partition_keys, "partition_keys", of_type=str),\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass DeleteDynamicPartitionsRequest(\n NamedTuple(\n "_AddDynamicPartitionsRequest",\n [\n ("partitions_def_name", str),\n ("partition_keys", Sequence[str]),\n ],\n )\n):\n """A request to delete partitions to a dynamic partitions definition, to be evaluated by a sensor or schedule."""\n\n def __new__(\n cls,\n partitions_def_name: str,\n partition_keys: Sequence[str],\n ):\n return super(DeleteDynamicPartitionsRequest, cls).__new__(\n cls,\n partitions_def_name=check.str_param(partitions_def_name, "partitions_def_name"),\n partition_keys=check.list_param(partition_keys, "partition_keys", of_type=str),\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass RunRequest(\n NamedTuple(\n "_RunRequest",\n [\n ("run_key", PublicAttr[Optional[str]]),\n ("run_config", PublicAttr[Mapping[str, Any]]),\n ("tags", PublicAttr[Mapping[str, str]]),\n ("job_name", PublicAttr[Optional[str]]),\n ("asset_selection", PublicAttr[Optional[Sequence[AssetKey]]]),\n ("stale_assets_only", PublicAttr[bool]),\n ("partition_key", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Represents all the information required to launch a single run. Must be returned by a\n SensorDefinition or ScheduleDefinition's evaluation function for a run to be launched.\n\n Attributes:\n run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that\n only one run is created per run key across all sensor evaluations. For schedules,\n ensures that one run is created per tick, across failure recoveries. Passing in a `None`\n value means that a run will always be launched per evaluation.\n run_config (Optional[Mapping[str, Any]]: Configuration for the run. If the job has\n a :py:class:`PartitionedConfig`, this value will override replace the config\n provided by it.\n tags (Optional[Dict[str, Any]]): A dictionary of tags (string key-value pairs) to attach\n to the launched run.\n job_name (Optional[str]): (Experimental) The name of the job this run request will launch.\n Required for sensors that target multiple jobs.\n asset_selection (Optional[Sequence[AssetKey]]): A sequence of AssetKeys that should be\n launched with this run.\n stale_assets_only (bool): Set to true to further narrow the asset\n selection to stale assets. If passed without an asset selection, all stale assets in the\n job will be materialized. If the job does not materialize assets, this flag is ignored.\n partition_key (Optional[str]): The partition key for this run request.\n """\n\n def __new__(\n cls,\n run_key: Optional[str] = None,\n run_config: Optional[Union["RunConfig", Mapping[str, Any]]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n job_name: Optional[str] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n stale_assets_only: bool = False,\n partition_key: Optional[str] = None,\n ):\n from dagster._core.definitions.run_config import convert_config_input\n\n return super(RunRequest, cls).__new__(\n cls,\n run_key=check.opt_str_param(run_key, "run_key"),\n run_config=check.opt_mapping_param(\n convert_config_input(run_config), "run_config", key_type=str\n ),\n tags=validate_tags(check.opt_mapping_param(tags, "tags", key_type=str)),\n job_name=check.opt_str_param(job_name, "job_name"),\n asset_selection=check.opt_nullable_sequence_param(\n asset_selection, "asset_selection", of_type=AssetKey\n ),\n stale_assets_only=check.bool_param(stale_assets_only, "stale_assets_only"),\n partition_key=check.opt_str_param(partition_key, "partition_key"),\n )\n\n def with_replaced_attrs(self, **kwargs: Any) -> "RunRequest":\n fields = self._asdict()\n for k in fields.keys():\n if k in kwargs:\n fields[k] = kwargs[k]\n return RunRequest(**fields)\n\n def with_resolved_tags_and_config(\n self,\n target_definition: Union["JobDefinition", "UnresolvedAssetJobDefinition"],\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "RunRequest":\n from dagster._core.definitions.job_definition import JobDefinition\n from dagster._core.definitions.partition import (\n PartitionedConfig,\n PartitionsDefinition,\n )\n\n if self.partition_key is None:\n check.failed(\n "Cannot resolve partition for run request without partition key",\n )\n\n partitions_def = target_definition.partitions_def\n if partitions_def is None:\n check.failed(\n "Cannot resolve partition for run request when target job"\n f" '{target_definition.name}' is unpartitioned.",\n )\n partitions_def = cast(PartitionsDefinition, partitions_def)\n\n partitioned_config = (\n target_definition.partitioned_config\n if isinstance(target_definition, JobDefinition)\n else PartitionedConfig.from_flexible_config(target_definition.config, partitions_def)\n )\n if partitioned_config is None:\n check.failed(\n "Cannot resolve partition for run request on unpartitioned job",\n )\n\n _check_valid_partition_key_after_dynamic_partitions_requests(\n self.partition_key,\n partitions_def,\n dynamic_partitions_requests,\n current_time,\n dynamic_partitions_store,\n )\n\n tags = {\n **(self.tags or {}),\n **partitioned_config.get_tags_for_partition_key(\n self.partition_key,\n job_name=target_definition.name,\n ),\n }\n\n return self.with_replaced_attrs(\n run_config=(\n self.run_config\n if self.run_config\n else partitioned_config.get_run_config_for_partition_key(self.partition_key)\n ),\n tags=tags,\n )\n\n def has_resolved_partition(self) -> bool:\n # Backcompat run requests yielded via `run_request_for_partition` already have resolved\n # partitioning\n return self.tags.get(PARTITION_NAME_TAG) is not None if self.partition_key else True
\n\n\ndef _check_valid_partition_key_after_dynamic_partitions_requests(\n partition_key: str,\n partitions_def: "PartitionsDefinition",\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n):\n from dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\n from dagster._core.definitions.partition import (\n DynamicPartitionsDefinition,\n )\n\n if isinstance(partitions_def, MultiPartitionsDefinition):\n multipartition_key = partitions_def.get_partition_key_from_str(partition_key)\n\n for dimension in partitions_def.partitions_defs:\n _check_valid_partition_key_after_dynamic_partitions_requests(\n multipartition_key.keys_by_dimension[dimension.name],\n dimension.partitions_def,\n dynamic_partitions_requests,\n current_time,\n dynamic_partitions_store,\n )\n\n elif isinstance(partitions_def, DynamicPartitionsDefinition) and partitions_def.name:\n if not dynamic_partitions_store:\n check.failed(\n "Cannot resolve partition for run request on dynamic partitions without"\n " dynamic_partitions_store"\n )\n\n add_partition_keys: Set[str] = set()\n delete_partition_keys: Set[str] = set()\n for req in dynamic_partitions_requests:\n if isinstance(req, AddDynamicPartitionsRequest):\n if req.partitions_def_name == partitions_def.name:\n add_partition_keys.update(set(req.partition_keys))\n elif isinstance(req, DeleteDynamicPartitionsRequest):\n if req.partitions_def_name == partitions_def.name:\n delete_partition_keys.update(set(req.partition_keys))\n\n partition_keys_after_requests_resolved = (\n set(\n dynamic_partitions_store.get_dynamic_partitions(\n partitions_def_name=partitions_def.name\n )\n )\n | add_partition_keys\n ) - delete_partition_keys\n\n if partition_key not in partition_keys_after_requests_resolved:\n check.failed(\n f"Dynamic partition key {partition_key} for partitions def"\n f" '{partitions_def.name}' is invalid. After dynamic partitions requests are"\n " applied, it does not exist in the set of valid partition keys."\n )\n\n else:\n partitions_def.validate_partition_key(\n partition_key,\n dynamic_partitions_store=dynamic_partitions_store,\n current_time=current_time,\n )\n\n\n@whitelist_for_serdes(\n storage_name="PipelineRunReaction",\n storage_field_names={\n "dagster_run": "pipeline_run",\n },\n)\nclass DagsterRunReaction(\n NamedTuple(\n "_DagsterRunReaction",\n [\n ("dagster_run", Optional[DagsterRun]),\n ("error", Optional[SerializableErrorInfo]),\n ("run_status", Optional[DagsterRunStatus]),\n ],\n )\n):\n """Represents a request that reacts to an existing dagster run. If success, it will report logs\n back to the run.\n\n Attributes:\n dagster_run (Optional[DagsterRun]): The dagster run that originates this reaction.\n error (Optional[SerializableErrorInfo]): user code execution error.\n run_status: (Optional[DagsterRunStatus]): The run status that triggered the reaction.\n """\n\n def __new__(\n cls,\n dagster_run: Optional[DagsterRun],\n error: Optional[SerializableErrorInfo] = None,\n run_status: Optional[DagsterRunStatus] = None,\n ):\n return super(DagsterRunReaction, cls).__new__(\n cls,\n dagster_run=check.opt_inst_param(dagster_run, "dagster_run", DagsterRun),\n error=check.opt_inst_param(error, "error", SerializableErrorInfo),\n run_status=check.opt_inst_param(run_status, "run_status", DagsterRunStatus),\n )\n\n\n
[docs]@experimental_param(\n param="asset_events", additional_warn_text="Runless asset events are experimental"\n)\nclass SensorResult(\n NamedTuple(\n "_SensorResult",\n [\n ("run_requests", Optional[Sequence[RunRequest]]),\n ("skip_reason", Optional[SkipReason]),\n ("cursor", Optional[str]),\n (\n "dynamic_partitions_requests",\n Optional[\n Sequence[Union[DeleteDynamicPartitionsRequest, AddDynamicPartitionsRequest]]\n ],\n ),\n (\n "asset_events",\n List[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]],\n ),\n ],\n )\n):\n """The result of a sensor evaluation.\n\n Attributes:\n run_requests (Optional[Sequence[RunRequest]]): A list\n of run requests to be executed.\n skip_reason (Optional[Union[str, SkipReason]]): A skip message indicating why sensor\n evaluation was skipped.\n cursor (Optional[str]): The cursor value for this sensor, which will be provided on the\n context for the next sensor evaluation.\n dynamic_partitions_requests (Optional[Sequence[Union[DeleteDynamicPartitionsRequest,\n AddDynamicPartitionsRequest]]]): A list of dynamic partition requests to request dynamic\n partition addition and deletion. Run requests will be evaluated using the state of the\n partitions with these changes applied.\n asset_events (Optional[Sequence[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]]]): (Experimental) A\n list of materializations, observations, and asset check evaluations that the system\n will persist on your behalf at the end of sensor evaluation. These events will be not\n be associated with any particular run, but will be queryable and viewable in the asset catalog.\n\n\n """\n\n def __new__(\n cls,\n run_requests: Optional[Sequence[RunRequest]] = None,\n skip_reason: Optional[Union[str, SkipReason]] = None,\n cursor: Optional[str] = None,\n dynamic_partitions_requests: Optional[\n Sequence[Union[DeleteDynamicPartitionsRequest, AddDynamicPartitionsRequest]]\n ] = None,\n asset_events: Optional[\n Sequence[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]]\n ] = None,\n ):\n if skip_reason and len(run_requests if run_requests else []) > 0:\n check.failed(\n "Expected a single skip reason or one or more run requests: received values for "\n "both run_requests and skip_reason"\n )\n\n skip_reason = check.opt_inst_param(skip_reason, "skip_reason", (SkipReason, str))\n if isinstance(skip_reason, str):\n skip_reason = SkipReason(skip_reason)\n\n return super(SensorResult, cls).__new__(\n cls,\n run_requests=check.opt_sequence_param(run_requests, "run_requests", RunRequest),\n skip_reason=skip_reason,\n cursor=check.opt_str_param(cursor, "cursor"),\n dynamic_partitions_requests=check.opt_sequence_param(\n dynamic_partitions_requests,\n "dynamic_partitions_requests",\n (AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest),\n ),\n asset_events=list(\n check.opt_sequence_param(\n asset_events,\n "asset_check_evaluations",\n (AssetObservation, AssetMaterialization, AssetCheckEvaluation),\n )\n ),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/run_request", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.run_request"}, "run_status_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.run_status_sensor_definition

\nimport functools\nimport logging\nfrom contextlib import ExitStack\nfrom datetime import datetime\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Iterator,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n    overload,\n)\n\nimport pendulum\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param, public\nfrom dagster._core.definitions.instigation_logger import InstigationLogger\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.scoped_resources_builder import Resources, ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvariantViolationError,\n    RunStatusSensorExecutionError,\n    user_code_error_boundary,\n)\nfrom dagster._core.events import PIPELINE_RUN_STATUS_TO_EVENT_TYPE, DagsterEvent, DagsterEventType\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus, RunsFilter\nfrom dagster._serdes import (\n    serialize_value,\n    whitelist_for_serdes,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._seven import JSONDecodeError\nfrom dagster._utils import utc_datetime_from_timestamp\nfrom dagster._utils.error import serializable_error_info_from_exc_info\n\nfrom .graph_definition import GraphDefinition\nfrom .job_definition import JobDefinition\nfrom .sensor_definition import (\n    DagsterRunReaction,\n    DefaultSensorStatus,\n    RawSensorEvaluationFunctionReturn,\n    RunRequest,\n    SensorDefinition,\n    SensorEvaluationContext,\n    SensorResult,\n    SensorType,\n    SkipReason,\n    get_context_param_name,\n    get_sensor_context_from_args_or_kwargs,\n    validate_and_get_resource_dict,\n)\nfrom .target import ExecutableDefinition\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.resource_definition import ResourceDefinition\n    from dagster._core.definitions.selector import (\n        CodeLocationSelector,\n        JobSelector,\n        RepositorySelector,\n    )\n\nRunStatusSensorEvaluationFunction: TypeAlias = Union[\n    Callable[..., RawSensorEvaluationFunctionReturn],\n    Callable[..., RawSensorEvaluationFunctionReturn],\n]\nRunFailureSensorEvaluationFn: TypeAlias = Union[\n    Callable[..., RawSensorEvaluationFunctionReturn],\n    Callable[..., RawSensorEvaluationFunctionReturn],\n]\n\n\n@whitelist_for_serdes(old_storage_names={"PipelineSensorCursor"})\nclass RunStatusSensorCursor(\n    NamedTuple(\n        "_RunStatusSensorCursor",\n        [("record_id", int), ("update_timestamp", str)],\n    )\n):\n    def __new__(cls, record_id, update_timestamp):\n        return super(RunStatusSensorCursor, cls).__new__(\n            cls,\n            record_id=check.int_param(record_id, "record_id"),\n            update_timestamp=check.str_param(update_timestamp, "update_timestamp"),\n        )\n\n    @staticmethod\n    def is_valid(json_str: str) -> bool:\n        try:\n            obj = deserialize_value(json_str, RunStatusSensorCursor)\n            return isinstance(obj, RunStatusSensorCursor)\n        except (JSONDecodeError, DeserializationError):\n            return False\n\n    def to_json(self) -> str:\n        return serialize_value(cast(NamedTuple, self))\n\n    @staticmethod\n    def from_json(json_str: str) -> "RunStatusSensorCursor":\n        return deserialize_value(json_str, RunStatusSensorCursor)\n\n\n
[docs]class RunStatusSensorContext:\n """The ``context`` object available to a decorated function of ``run_status_sensor``."""\n\n def __init__(\n self,\n sensor_name,\n dagster_run,\n dagster_event,\n instance,\n context: Optional[\n SensorEvaluationContext\n ] = None, # deprecated arg, but we need to keep it for backcompat\n resource_defs: Optional[Mapping[str, "ResourceDefinition"]] = None,\n logger: Optional[logging.Logger] = None,\n partition_key: Optional[str] = None,\n _resources: Optional[Resources] = None,\n _cm_scope_entered: bool = False,\n ) -> None:\n self._exit_stack = ExitStack()\n self._sensor_name = check.str_param(sensor_name, "sensor_name")\n self._dagster_run = check.inst_param(dagster_run, "dagster_run", DagsterRun)\n self._dagster_event = check.inst_param(dagster_event, "dagster_event", DagsterEvent)\n self._instance = check.inst_param(instance, "instance", DagsterInstance)\n self._logger: Optional[logging.Logger] = logger or (context.log if context else None)\n self._partition_key = check.opt_str_param(partition_key, "partition_key")\n\n # Wait to set resources unless they're accessed\n self._resource_defs = resource_defs\n self._resources = _resources\n self._cm_scope_entered = _cm_scope_entered\n\n def for_run_failure(self) -> "RunFailureSensorContext":\n """Converts RunStatusSensorContext to RunFailureSensorContext."""\n return RunFailureSensorContext(\n sensor_name=self._sensor_name,\n dagster_run=self._dagster_run,\n dagster_event=self._dagster_event,\n instance=self._instance,\n logger=self._logger,\n partition_key=self._partition_key,\n resource_defs=self._resource_defs,\n _resources=self._resources,\n _cm_scope_entered=self._cm_scope_entered,\n )\n\n @property\n def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]:\n return self._resource_defs\n\n @property\n def resources(self) -> Resources:\n from dagster._core.definitions.scoped_resources_builder import (\n IContainsGenerator,\n )\n from dagster._core.execution.build_resources import build_resources\n\n if not self._resources:\n """\n This is similar to what we do in e.g. the op context - we set up a resource\n building context manager, and immediately enter it. This is so that in cases\n where a user is not using any context-manager based resources, they don't\n need to enter this SensorEvaluationContext themselves.\n\n For example:\n\n my_sensor(build_sensor_context(resources={"my_resource": my_non_cm_resource})\n\n will work ok, but for a CM resource we must do\n\n with build_sensor_context(resources={"my_resource": my_cm_resource}) as context:\n my_sensor(context)\n """\n\n instance = self.instance if self._instance else None\n\n resources_cm = build_resources(resources=self._resource_defs or {}, instance=instance)\n self._resources = self._exit_stack.enter_context(resources_cm)\n\n if isinstance(self._resources, IContainsGenerator) and not self._cm_scope_entered:\n self._exit_stack.close()\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access"\n " resources outside of context manager scope. You can use the following syntax"\n " to open a context manager: `with build_schedule_context(...) as context:`"\n )\n\n return self._resources\n\n @public\n @property\n def sensor_name(self) -> str:\n """The name of the sensor."""\n return self._sensor_name\n\n @public\n @property\n def dagster_run(self) -> DagsterRun:\n """The run of the job."""\n return self._dagster_run\n\n @public\n @property\n def dagster_event(self) -> DagsterEvent:\n """The event associated with the job run status."""\n return self._dagster_event\n\n @public\n @property\n def instance(self) -> DagsterInstance:\n """The current instance."""\n return self._instance\n\n @public\n @property\n def log(self) -> logging.Logger:\n """The logger for the current sensor evaluation."""\n if not self._logger:\n self._logger = InstigationLogger()\n\n return self._logger\n\n @public\n @property\n def partition_key(self) -> Optional[str]:\n """Optional[str]: The partition key of the relevant run."""\n return self._partition_key\n\n def __enter__(self) -> "RunStatusSensorContext":\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc) -> None:\n self._exit_stack.close()\n self._logger = None
\n\n\n
[docs]class RunFailureSensorContext(RunStatusSensorContext):\n """The ``context`` object available to a decorated function of ``run_failure_sensor``.\n\n Attributes:\n sensor_name (str): the name of the sensor.\n dagster_run (DagsterRun): the failed run.\n """\n\n @public\n @property\n def failure_event(self) -> DagsterEvent:\n """The run failure event.\n\n If the run failed because of an error inside a step, get_step_failure_events will have more\n details on the step failure.\n """\n return self.dagster_event\n\n
[docs] @public\n def get_step_failure_events(self) -> Sequence[DagsterEvent]:\n """The step failure event for each step in the run that failed.\n\n Examples:\n .. code-block:: python\n\n error_strings_by_step_key = {\n # includes the stack trace\n event.step_key: event.event_specific_data.error.to_string()\n for event in context.get_step_failure_events()\n }\n """\n records = self.instance.get_records_for_run(\n run_id=self.dagster_run.run_id, of_type=DagsterEventType.STEP_FAILURE\n ).records\n return [cast(DagsterEvent, record.event_log_entry.dagster_event) for record in records]
\n\n\n
[docs]def build_run_status_sensor_context(\n sensor_name: str,\n dagster_event: DagsterEvent,\n dagster_instance: DagsterInstance,\n dagster_run: DagsterRun,\n context: Optional[SensorEvaluationContext] = None,\n resources: Optional[Mapping[str, object]] = None,\n partition_key: Optional[str] = None,\n) -> RunStatusSensorContext:\n """Builds run status sensor context from provided parameters.\n\n This function can be used to provide the context argument when directly invoking a function\n decorated with `@run_status_sensor` or `@run_failure_sensor`, such as when writing unit tests.\n\n Args:\n sensor_name (str): The name of the sensor the context is being constructed for.\n dagster_event (DagsterEvent): A DagsterEvent with the same event type as the one that\n triggers the run_status_sensor\n dagster_instance (DagsterInstance): The dagster instance configured for the context.\n dagster_run (DagsterRun): DagsterRun object from running a job\n resources (Optional[Mapping[str, object]]): A dictionary of resources to be made available\n to the sensor.\n\n Examples:\n .. code-block:: python\n\n instance = DagsterInstance.ephemeral()\n result = my_job.execute_in_process(instance=instance)\n\n dagster_run = result.dagster_run\n dagster_event = result.get_job_success_event() # or get_job_failure_event()\n\n context = build_run_status_sensor_context(\n sensor_name="run_status_sensor_to_invoke",\n dagster_instance=instance,\n dagster_run=dagster_run,\n dagster_event=dagster_event,\n )\n run_status_sensor_to_invoke(context)\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return RunStatusSensorContext(\n sensor_name=sensor_name,\n instance=dagster_instance,\n dagster_run=dagster_run,\n dagster_event=dagster_event,\n resource_defs=wrap_resources_for_execution(resources),\n logger=context.log if context else None,\n partition_key=partition_key,\n )
\n\n\n@overload\ndef run_failure_sensor(\n name: RunFailureSensorEvaluationFn,\n) -> SensorDefinition: ...\n\n\n@overload\ndef run_failure_sensor(\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n) -> Callable[[RunFailureSensorEvaluationFn], SensorDefinition,]: ...\n\n\n
[docs]@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef run_failure_sensor(\n name: Optional[Union[RunFailureSensorEvaluationFn, str]] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n) -> Union[SensorDefinition, Callable[[RunFailureSensorEvaluationFn], SensorDefinition,]]:\n """Creates a sensor that reacts to job failure events, where the decorated function will be\n run when a run fails.\n\n Takes a :py:class:`~dagster.RunFailureSensorContext`.\n\n Args:\n name (Optional[str]): The name of the job failure sensor. Defaults to the name of the\n decorated function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n The jobs in the current repository that will be monitored by this failure sensor.\n Defaults to None, which means the alert will be sent when any job in the current\n repository fails.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n (deprecated in favor of monitored_jobs) The jobs in the current repository that will be\n monitored by this failure sensor. Defaults to None, which means the alert will be sent\n when any job in the repository fails.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJob]]): The job a RunRequest should\n execute if yielded from the sensor.\n request_jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJob]]]): (experimental)\n A list of jobs to be executed if RunRequests are yielded from the sensor.\n """\n\n def inner(\n fn: RunFailureSensorEvaluationFn,\n ) -> SensorDefinition:\n check.callable_param(fn, "fn")\n if name is None or callable(name):\n sensor_name = fn.__name__\n else:\n sensor_name = name\n\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n @run_status_sensor(\n run_status=DagsterRunStatus.FAILURE,\n name=sensor_name,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n monitored_jobs=jobs,\n monitor_all_repositories=monitor_all_repositories,\n default_status=default_status,\n request_job=request_job,\n request_jobs=request_jobs,\n )\n @functools.wraps(fn)\n def _run_failure_sensor(*args, **kwargs) -> Any:\n args_modified = [\n arg.for_run_failure() if isinstance(arg, RunStatusSensorContext) else arg\n for arg in args\n ]\n kwargs_modified = {\n k: v.for_run_failure() if isinstance(v, RunStatusSensorContext) else v\n for k, v in kwargs.items()\n }\n return fn(*args_modified, **kwargs_modified)\n\n return _run_failure_sensor\n\n # This case is for when decorator is used bare, without arguments\n if callable(name):\n return inner(name)\n\n return inner
\n\n\n
[docs]class RunStatusSensorDefinition(SensorDefinition):\n """Define a sensor that reacts to a given status of job execution, where the decorated\n function will be evaluated when a run is at the given status.\n\n Args:\n name (str): The name of the sensor. Defaults to the name of the decorated function.\n run_status (DagsterRunStatus): The status of a run which will be\n monitored by the sensor.\n run_status_sensor_fn (Callable[[RunStatusSensorContext], Union[SkipReason, DagsterRunReaction]]): The core\n evaluation function for the sensor. Takes a :py:class:`~dagster.RunStatusSensorContext`.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, JobSelector, RepositorySelector, CodeLocationSelector]]]):\n The jobs in the current repository that will be monitored by this sensor. Defaults to\n None, which means the alert will be sent when any job in the repository fails.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_job (Optional[Union[GraphDefinition, JobDefinition]]): The job a RunRequest should\n execute if yielded from the sensor.\n request_jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition]]]): (experimental)\n A list of jobs to be executed if RunRequests are yielded from the sensor.\n """\n\n def __init__(\n self,\n name: str,\n run_status: DagsterRunStatus,\n run_status_sensor_fn: RunStatusSensorEvaluationFunction,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n from dagster._core.definitions.selector import (\n CodeLocationSelector,\n JobSelector,\n RepositorySelector,\n )\n from dagster._core.event_api import RunShardedEventsCursor\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n check.str_param(name, "name")\n check.inst_param(run_status, "run_status", DagsterRunStatus)\n check.callable_param(run_status_sensor_fn, "run_status_sensor_fn")\n check.opt_int_param(minimum_interval_seconds, "minimum_interval_seconds")\n check.opt_str_param(description, "description")\n check.opt_list_param(\n monitored_jobs,\n "monitored_jobs",\n (\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n RepositorySelector,\n JobSelector,\n CodeLocationSelector,\n ),\n )\n check.inst_param(default_status, "default_status", DefaultSensorStatus)\n\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(run_status_sensor_fn)}\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n # coerce CodeLocationSelectors to RepositorySelectors with repo name "__repository__"\n monitored_jobs = [\n job.to_repository_selector() if isinstance(job, CodeLocationSelector) else job\n for job in (monitored_jobs or [])\n ]\n\n self._run_status_sensor_fn = check.callable_param(\n run_status_sensor_fn, "run_status_sensor_fn"\n )\n event_type = PIPELINE_RUN_STATUS_TO_EVENT_TYPE[run_status]\n\n # split monitored_jobs into external repos, external jobs, and jobs in the current repo\n other_repos = (\n [x for x in monitored_jobs if isinstance(x, RepositorySelector)]\n if monitored_jobs\n else []\n )\n\n other_repo_jobs = (\n [x for x in monitored_jobs if isinstance(x, JobSelector)] if monitored_jobs else []\n )\n\n current_repo_jobs = (\n [x for x in monitored_jobs if not isinstance(x, (JobSelector, RepositorySelector))]\n if monitored_jobs\n else []\n )\n\n def _wrapped_fn(\n context: SensorEvaluationContext,\n ) -> Iterator[Union[RunRequest, SkipReason, DagsterRunReaction, SensorResult]]:\n # initiate the cursor to (most recent event id, current timestamp) when:\n # * it's the first time starting the sensor\n # * or, the cursor isn't in valid format (backcompt)\n if context.cursor is None or not RunStatusSensorCursor.is_valid(context.cursor):\n most_recent_event_records = list(\n context.instance.get_event_records(\n EventRecordsFilter(event_type=event_type), ascending=False, limit=1\n )\n )\n most_recent_event_id = (\n most_recent_event_records[0].storage_id\n if len(most_recent_event_records) == 1\n else -1\n )\n\n new_cursor = RunStatusSensorCursor(\n update_timestamp=pendulum.now("UTC").isoformat(),\n record_id=most_recent_event_id,\n )\n context.update_cursor(new_cursor.to_json())\n yield SkipReason(f"Initiating {name}. Set cursor to {new_cursor}")\n return\n\n record_id, update_timestamp = RunStatusSensorCursor.from_json(context.cursor)\n\n # Fetch events after the cursor id\n # * we move the cursor forward to the latest visited event's id to avoid revisits\n # * when the daemon is down, bc we persist the cursor info, we can go back to where we\n # left and backfill alerts for the qualified events (up to 5 at a time) during the downtime\n # Note: this is a cross-run query which requires extra handling in sqlite, see details in SqliteEventLogStorage.\n event_records = context.instance.get_event_records(\n EventRecordsFilter(\n after_cursor=RunShardedEventsCursor(\n id=record_id,\n run_updated_after=cast(datetime, pendulum.parse(update_timestamp)),\n ),\n event_type=event_type,\n ),\n ascending=True,\n limit=5,\n )\n\n for event_record in event_records:\n event_log_entry = event_record.event_log_entry\n storage_id = event_record.storage_id\n\n # get run info\n run_records = context.instance.get_run_records(\n filters=RunsFilter(run_ids=[event_log_entry.run_id])\n )\n\n # skip if we couldn't find the right run\n if len(run_records) != 1:\n # bc we couldn't find the run, we use the event timestamp as the approximate\n # run update timestamp\n approximate_update_timestamp = utc_datetime_from_timestamp(\n event_log_entry.timestamp\n )\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id,\n update_timestamp=approximate_update_timestamp.isoformat(),\n ).to_json()\n )\n continue\n\n dagster_run = run_records[0].dagster_run\n update_timestamp = run_records[0].update_timestamp\n\n job_match = False\n\n # if monitor_all_repositories is provided, then we want to run the sensor for all jobs in all repositories\n if monitor_all_repositories:\n job_match = True\n\n # check if the run is in the current repository and (if provided) one of jobs specified in monitored_jobs\n if (\n not job_match\n and\n # the job has a repository (not manually executed)\n dagster_run.external_job_origin\n and\n # the job belongs to the current repository\n dagster_run.external_job_origin.external_repository_origin.repository_name\n == context.repository_name\n ):\n if monitored_jobs:\n if dagster_run.job_name in map(lambda x: x.name, current_repo_jobs):\n job_match = True\n else:\n job_match = True\n\n if not job_match:\n # check if the run is one of the jobs specified by JobSelector or RepositorySelector (ie in another repo)\n # make a JobSelector for the run in question\n external_repository_origin = check.not_none(\n dagster_run.external_job_origin\n ).external_repository_origin\n run_job_selector = JobSelector(\n location_name=external_repository_origin.code_location_origin.location_name,\n repository_name=external_repository_origin.repository_name,\n job_name=dagster_run.job_name,\n )\n if run_job_selector in other_repo_jobs:\n job_match = True\n\n # make a RepositorySelector for the run in question\n run_repo_selector = RepositorySelector(\n location_name=external_repository_origin.code_location_origin.location_name,\n repository_name=external_repository_origin.repository_name,\n )\n if run_repo_selector in other_repos:\n job_match = True\n\n if not job_match:\n # the run in question doesn't match any of the criteria for we advance the cursor and move on\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id, update_timestamp=update_timestamp.isoformat()\n ).to_json()\n )\n continue\n\n serializable_error = None\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n\n try:\n with RunStatusSensorContext(\n sensor_name=name,\n dagster_run=dagster_run,\n dagster_event=event_log_entry.dagster_event,\n instance=context.instance,\n resource_defs=context.resource_defs,\n logger=context.log,\n partition_key=dagster_run.tags.get("dagster/partition"),\n ) as sensor_context, user_code_error_boundary(\n RunStatusSensorExecutionError,\n lambda: f'Error occurred during the execution sensor "{name}".',\n ):\n context_param_name = get_context_param_name(run_status_sensor_fn)\n context_param = (\n {context_param_name: sensor_context} if context_param_name else {}\n )\n\n sensor_return = run_status_sensor_fn(\n **context_param,\n **resource_args_populated,\n )\n\n if sensor_return is not None:\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id,\n update_timestamp=update_timestamp.isoformat(),\n ).to_json()\n )\n\n if isinstance(sensor_return, SensorResult):\n if sensor_return.cursor:\n raise DagsterInvariantViolationError(\n f"Error in run status sensor {name}: Sensor returned a"\n " SensorResult with a cursor value. The cursor is managed"\n " by the sensor and should not be modified by a user."\n )\n yield sensor_return\n elif isinstance(\n sensor_return,\n (RunRequest, SkipReason, DagsterRunReaction),\n ):\n yield sensor_return\n else:\n yield from sensor_return\n return\n except RunStatusSensorExecutionError as run_status_sensor_execution_error:\n # When the user code errors, we report error to the sensor tick not the original run.\n serializable_error = serializable_error_info_from_exc_info(\n run_status_sensor_execution_error.original_exc_info\n )\n\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id, update_timestamp=update_timestamp.isoformat()\n ).to_json()\n )\n\n # Yield DagsterRunReaction to indicate the execution success/failure.\n # The sensor machinery would\n # * report back to the original run if success\n # * update cursor and job state\n yield DagsterRunReaction(\n dagster_run=dagster_run,\n run_status=run_status,\n error=serializable_error,\n )\n\n super(RunStatusSensorDefinition, self).__init__(\n name=name,\n evaluation_fn=_wrapped_fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n default_status=default_status,\n job=request_job,\n jobs=request_jobs,\n required_resource_keys=combined_required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> RawSensorEvaluationFunctionReturn:\n context_param_name = get_context_param_name(self._run_status_sensor_fn)\n context = get_sensor_context_from_args_or_kwargs(\n self._run_status_sensor_fn,\n args,\n kwargs,\n context_type=RunStatusSensorContext,\n )\n context_param = {context_param_name: context} if context_param_name and context else {}\n\n resources = validate_and_get_resource_dict(\n context.resources if context else ScopedResourcesBuilder.build_empty(),\n self._name,\n self._required_resource_keys,\n )\n return self._run_status_sensor_fn(**context_param, **resources)\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.RUN_STATUS
\n\n\n
[docs]@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef run_status_sensor(\n run_status: DagsterRunStatus,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n) -> Callable[[RunStatusSensorEvaluationFunction], RunStatusSensorDefinition,]:\n """Creates a sensor that reacts to a given status of job execution, where the decorated\n function will be run when a job is at the given status.\n\n Takes a :py:class:`~dagster.RunStatusSensorContext`.\n\n Args:\n run_status (DagsterRunStatus): The status of run execution which will be\n monitored by the sensor.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n Jobs in the current repository that will be monitored by this sensor. Defaults to None, which means the alert will\n be sent when any job in the repository matches the requested run_status. Jobs in external repositories can be monitored by using\n RepositorySelector or JobSelector.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the Dagster instance.\n If set to True, an error will be raised if you also specify monitored_jobs or job_selection.\n Defaults to False.\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n (deprecated in favor of monitored_jobs) Jobs in the current repository that will be\n monitored by this sensor. Defaults to None, which means the alert will be sent when\n any job in the repository matches the requested run_status.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job that should be\n executed if a RunRequest is yielded from the sensor.\n request_jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]): (experimental)\n A list of jobs to be executed if RunRequests are yielded from the sensor.\n """\n\n def inner(\n fn: RunStatusSensorEvaluationFunction,\n ) -> RunStatusSensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n if jobs and monitor_all_repositories:\n DagsterInvalidDefinitionError(\n "Cannot specify both monitor_all_repositories and"\n f" {'monitored_jobs' if monitored_jobs else 'job_selection'}."\n )\n\n return RunStatusSensorDefinition(\n name=sensor_name,\n run_status=run_status,\n run_status_sensor_fn=fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n monitored_jobs=jobs,\n monitor_all_repositories=monitor_all_repositories,\n default_status=default_status,\n request_job=request_job,\n request_jobs=request_jobs,\n )\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/run_status_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.run_status_sensor_definition"}, "schedule_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.schedule_definition

\nimport copy\nimport logging\nfrom contextlib import ExitStack\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    TypeVar,\n    Union,\n    cast,\n)\n\nimport pendulum\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, deprecated_param, public\nfrom dagster._core.definitions.instigation_logger import InstigationLogger\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.scoped_resources_builder import Resources, ScopedResourcesBuilder\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import IHasInternalInit, ensure_gen\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.schedules import is_valid_cron_schedule\n\nfrom ..decorator_utils import has_at_least_one_parameter\nfrom ..errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n    ScheduleExecutionError,\n    user_code_error_boundary,\n)\nfrom ..instance import DagsterInstance\nfrom ..instance.ref import InstanceRef\nfrom ..storage.dagster_run import DagsterRun\nfrom .graph_definition import GraphDefinition\nfrom .job_definition import JobDefinition\nfrom .run_request import RunRequest, SkipReason\nfrom .target import DirectTarget, ExecutableDefinition, RepoRelativeTarget\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom .utils import check_valid_name, validate_tags\n\nif TYPE_CHECKING:\n    from dagster import ResourceDefinition\n    from dagster._core.definitions.repository_definition import RepositoryDefinition\nT = TypeVar("T")\n\nRunConfig: TypeAlias = Mapping[str, Any]\nRunRequestIterator: TypeAlias = Iterator[Union[RunRequest, SkipReason]]\n\nScheduleEvaluationFunctionReturn: TypeAlias = Union[\n    RunRequest, SkipReason, RunConfig, RunRequestIterator, Sequence[RunRequest]\n]\nRawScheduleEvaluationFunction: TypeAlias = Callable[..., ScheduleEvaluationFunctionReturn]\n\nScheduleRunConfigFunction: TypeAlias = Union[\n    Callable[["ScheduleEvaluationContext"], RunConfig],\n    Callable[[], RunConfig],\n]\n\nScheduleTagsFunction: TypeAlias = Callable[["ScheduleEvaluationContext"], Mapping[str, str]]\nScheduleShouldExecuteFunction: TypeAlias = Callable[["ScheduleEvaluationContext"], bool]\nScheduleExecutionFunction: TypeAlias = Union[\n    Callable[["ScheduleEvaluationContext"], Any],\n    "DecoratedScheduleFunction",\n]\n\n\n@whitelist_for_serdes\nclass DefaultScheduleStatus(Enum):\n    RUNNING = "RUNNING"\n    STOPPED = "STOPPED"\n\n\ndef get_or_create_schedule_context(\n    fn: Callable, *args: Any, **kwargs: Any\n) -> "ScheduleEvaluationContext":\n    """Based on the passed resource function and the arguments passed to it, returns the\n    user-passed ScheduleEvaluationContext or creates one if it is not passed.\n\n    Raises an exception if the user passes more than one argument or if the user-provided\n    function requires a context parameter but none is passed.\n    """\n    from dagster._config.pythonic_config import is_coercible_to_resource\n    from dagster._core.definitions.sensor_definition import get_context_param_name\n\n    context_param_name = get_context_param_name(fn)\n\n    kwarg_keys_non_resource = set(kwargs.keys()) - {param.name for param in get_resource_args(fn)}\n    if len(args) + len(kwarg_keys_non_resource) > 1:\n        raise DagsterInvalidInvocationError(\n            "Schedule invocation received multiple non-resource arguments. Only a first "\n            "positional context parameter should be provided when invoking."\n        )\n\n    if any(is_coercible_to_resource(arg) for arg in args):\n        raise DagsterInvalidInvocationError(\n            "If directly invoking a schedule, you may not provide resources as"\n            " positional arguments, only as keyword arguments."\n        )\n\n    context: Optional[ScheduleEvaluationContext] = None\n\n    if len(args) > 0:\n        context = check.opt_inst(args[0], ScheduleEvaluationContext)\n    elif len(kwargs) > 0:\n        if context_param_name and context_param_name not in kwargs:\n            raise DagsterInvalidInvocationError(\n                f"Schedule invocation expected argument '{context_param_name}'."\n            )\n        context = check.opt_inst(\n            kwargs.get(context_param_name or "context"), ScheduleEvaluationContext\n        )\n    elif context_param_name:\n        # If the context parameter is present but no value was provided, we error\n        raise DagsterInvalidInvocationError(\n            "Schedule evaluation function expected context argument, but no context argument "\n            "was provided when invoking."\n        )\n\n    context = context or build_schedule_context()\n    resource_args_from_kwargs = {}\n\n    resource_args = {param.name for param in get_resource_args(fn)}\n    for resource_arg in resource_args:\n        if resource_arg in kwargs:\n            resource_args_from_kwargs[resource_arg] = kwargs[resource_arg]\n\n    if resource_args_from_kwargs:\n        return context.merge_resources(resource_args_from_kwargs)\n\n    return context\n\n\n
[docs]class ScheduleEvaluationContext:\n """The context object available as the first argument various functions defined on a :py:class:`dagster.ScheduleDefinition`.\n\n A `ScheduleEvaluationContext` object is passed as the first argument to ``run_config_fn``, ``tags_fn``,\n and ``should_execute``.\n\n Users should not instantiate this object directly. To construct a `ScheduleEvaluationContext` for testing purposes, use :py:func:`dagster.build_schedule_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import schedule, ScheduleEvaluationContext\n\n @schedule\n def the_schedule(context: ScheduleEvaluationContext):\n ...\n\n """\n\n __slots__ = [\n "_instance_ref",\n "_scheduled_execution_time",\n "_exit_stack",\n "_instance",\n "_log_key",\n "_logger",\n "_repository_name",\n "_resource_defs",\n "_schedule_name",\n "_resources_cm",\n "_resources",\n "_cm_scope_entered",\n "_repository_def",\n ]\n\n def __init__(\n self,\n instance_ref: Optional[InstanceRef],\n scheduled_execution_time: Optional[datetime],\n repository_name: Optional[str] = None,\n schedule_name: Optional[str] = None,\n resources: Optional[Mapping[str, "ResourceDefinition"]] = None,\n repository_def: Optional["RepositoryDefinition"] = None,\n ):\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n self._exit_stack = ExitStack()\n self._instance = None\n\n self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)\n self._scheduled_execution_time = check.opt_inst_param(\n scheduled_execution_time, "scheduled_execution_time", datetime\n )\n self._log_key = (\n [\n repository_name,\n schedule_name,\n scheduled_execution_time.strftime("%Y%m%d_%H%M%S"),\n ]\n if repository_name and schedule_name and scheduled_execution_time\n else None\n )\n self._logger = None\n self._repository_name = repository_name\n self._schedule_name = schedule_name\n\n # Wait to set resources unless they're accessed\n self._resource_defs = resources\n self._resources = None\n self._cm_scope_entered = False\n self._repository_def = check.opt_inst_param(\n repository_def, "repository_def", RepositoryDefinition\n )\n\n def __enter__(self) -> "ScheduleEvaluationContext":\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc) -> None:\n self._exit_stack.close()\n self._logger = None\n\n @property\n def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]:\n return self._resource_defs\n\n @public\n @property\n def resources(self) -> Resources:\n """Mapping of resource key to resource definition to be made available\n during schedule execution.\n """\n from dagster._core.definitions.scoped_resources_builder import (\n IContainsGenerator,\n )\n from dagster._core.execution.build_resources import build_resources\n\n if not self._resources:\n # Early exit if no resources are defined. This skips unnecessary initialization\n # entirely. This allows users to run user code servers in cases where they\n # do not have access to the instance if they use a subset of features do\n # that do not require instance access. In this case, if they do not use\n # resources on schedules they do not require the instance, so we do not\n # instantiate it\n #\n # Tracking at https://github.com/dagster-io/dagster/issues/14345\n if not self._resource_defs:\n self._resources = ScopedResourcesBuilder.build_empty()\n return self._resources\n\n instance = self.instance if self._instance or self._instance_ref else None\n\n resources_cm = build_resources(resources=self._resource_defs, instance=instance)\n self._resources = self._exit_stack.enter_context(resources_cm)\n\n if isinstance(self._resources, IContainsGenerator) and not self._cm_scope_entered:\n self._exit_stack.close()\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access"\n " resources outside of context manager scope. You can use the following syntax"\n " to open a context manager: `with build_sensor_context(...) as context:`"\n )\n\n return self._resources\n\n def merge_resources(self, resources_dict: Mapping[str, Any]) -> "ScheduleEvaluationContext":\n """Merge the specified resources into this context.\n This method is intended to be used by the Dagster framework, and should not be called by user code.\n\n Args:\n resources_dict (Mapping[str, Any]): The resources to replace in the context.\n """\n check.invariant(\n self._resources is None, "Cannot merge resources in context that has been initialized."\n )\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return ScheduleEvaluationContext(\n instance_ref=self._instance_ref,\n scheduled_execution_time=self._scheduled_execution_time,\n repository_name=self._repository_name,\n schedule_name=self._schedule_name,\n resources={\n **(self._resource_defs or {}),\n **wrap_resources_for_execution(resources_dict),\n },\n repository_def=self._repository_def,\n )\n\n @public\n @property\n def instance(self) -> "DagsterInstance":\n """DagsterInstance: The current DagsterInstance."""\n # self._instance_ref should only ever be None when this ScheduleEvaluationContext was\n # constructed under test.\n if not self._instance_ref:\n raise DagsterInvariantViolationError(\n "Attempted to initialize dagster instance, but no instance reference was provided."\n )\n if not self._instance:\n self._instance = self._exit_stack.enter_context(\n DagsterInstance.from_ref(self._instance_ref)\n )\n return cast(DagsterInstance, self._instance)\n\n @property\n def instance_ref(self) -> Optional[InstanceRef]:\n """The serialized instance configured to run the schedule."""\n return self._instance_ref\n\n @public\n @property\n def scheduled_execution_time(self) -> datetime:\n """The time in which the execution was scheduled to happen. May differ slightly\n from both the actual execution time and the time at which the run config is computed.\n """\n if self._scheduled_execution_time is None:\n check.failed(\n "Attempting to access scheduled_execution_time, but no scheduled_execution_time was"\n " set on this context"\n )\n\n return self._scheduled_execution_time\n\n @property\n def log(self) -> logging.Logger:\n if self._logger:\n return self._logger\n\n if not self._instance_ref:\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n repository_name=self._repository_name,\n name=self._schedule_name,\n )\n )\n\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n self.instance,\n repository_name=self._repository_name,\n name=self._schedule_name,\n )\n )\n return cast(InstigationLogger, self._logger)\n\n def has_captured_logs(self):\n return self._logger and self._logger.has_captured_logs()\n\n @property\n def log_key(self) -> Optional[List[str]]:\n return self._log_key\n\n @property\n def repository_def(self) -> "RepositoryDefinition":\n if not self._repository_def:\n raise DagsterInvariantViolationError(\n "Attempted to access repository_def, but no repository_def was provided."\n )\n return self._repository_def
\n\n\nclass DecoratedScheduleFunction(NamedTuple):\n """Wrapper around the decorated schedule function. Keeps track of both to better support the\n optimal return value for direct invocation of the evaluation function.\n """\n\n decorated_fn: RawScheduleEvaluationFunction\n wrapped_fn: Callable[[ScheduleEvaluationContext], RunRequestIterator]\n has_context_arg: bool\n\n\n
[docs]def build_schedule_context(\n instance: Optional[DagsterInstance] = None,\n scheduled_execution_time: Optional[datetime] = None,\n resources: Optional[Mapping[str, object]] = None,\n repository_def: Optional["RepositoryDefinition"] = None,\n instance_ref: Optional["InstanceRef"] = None,\n) -> ScheduleEvaluationContext:\n """Builds schedule execution context using the provided parameters.\n\n The instance provided to ``build_schedule_context`` must be persistent;\n DagsterInstance.ephemeral() will result in an error.\n\n Args:\n instance (Optional[DagsterInstance]): The dagster instance configured to run the schedule.\n scheduled_execution_time (datetime): The time in which the execution was scheduled to\n happen. May differ slightly from both the actual execution time and the time at which\n the run config is computed.\n\n Examples:\n .. code-block:: python\n\n context = build_schedule_context(instance)\n\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n check.opt_inst_param(instance, "instance", DagsterInstance)\n\n return ScheduleEvaluationContext(\n instance_ref=(\n instance_ref\n if instance_ref\n else instance.get_ref() if instance and instance.is_persistent else None\n ),\n scheduled_execution_time=check.opt_inst_param(\n scheduled_execution_time, "scheduled_execution_time", datetime\n ),\n resources=wrap_resources_for_execution(resources),\n repository_def=repository_def,\n )
\n\n\n@whitelist_for_serdes\nclass ScheduleExecutionData(\n NamedTuple(\n "_ScheduleExecutionData",\n [\n ("run_requests", Optional[Sequence[RunRequest]]),\n ("skip_message", Optional[str]),\n ("captured_log_key", Optional[Sequence[str]]),\n ],\n )\n):\n def __new__(\n cls,\n run_requests: Optional[Sequence[RunRequest]] = None,\n skip_message: Optional[str] = None,\n captured_log_key: Optional[Sequence[str]] = None,\n ):\n check.opt_sequence_param(run_requests, "run_requests", RunRequest)\n check.opt_str_param(skip_message, "skip_message")\n check.opt_list_param(captured_log_key, "captured_log_key", str)\n check.invariant(\n not (run_requests and skip_message), "Found both skip data and run request data"\n )\n return super(ScheduleExecutionData, cls).__new__(\n cls,\n run_requests=run_requests,\n skip_message=skip_message,\n captured_log_key=captured_log_key,\n )\n\n\ndef validate_and_get_schedule_resource_dict(\n resources: Resources, schedule_name: str, required_resource_keys: Set[str]\n) -> Dict[str, Any]:\n """Validates that the context has all the required resources and returns a dictionary of\n resource key to resource object.\n """\n for k in required_resource_keys:\n if not hasattr(resources, k):\n raise DagsterInvalidDefinitionError(\n f"Resource with key '{k}' required by schedule '{schedule_name}' was not provided."\n )\n\n return {k: getattr(resources, k) for k in required_resource_keys}\n\n\n
[docs]@deprecated_param(\n param="environment_vars",\n breaking_version="2.0",\n additional_warn_text=(\n "It is no longer necessary. Schedules will have access to all environment variables set in"\n " the containing environment, and can safely be deleted."\n ),\n)\nclass ScheduleDefinition(IHasInternalInit):\n """Define a schedule that targets a job.\n\n Args:\n name (Optional[str]): The name of the schedule to create. Defaults to the job name plus\n "_schedule".\n cron_schedule (Union[str, Sequence[str]]): A valid cron string or sequence of cron strings\n specifying when the schedule will run, e.g., ``'45 23 * * 6'`` for a schedule that runs\n at 11:45 PM every Saturday. If a sequence is provided, then the schedule will run for\n the union of all execution times for the provided cron strings, e.g.,\n ``['45 23 * * 6', '30 9 * * 0]`` for a schedule that runs at 11:45 PM every Saturday and\n 9:30 AM every Sunday.\n execution_fn (Callable[ScheduleEvaluationContext]): The core evaluation function for the\n schedule, which is run at an interval to determine whether a run should be launched or\n not. Takes a :py:class:`~dagster.ScheduleEvaluationContext`.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n run_config (Optional[Mapping]): The config that parameterizes this execution,\n as a dict.\n run_config_fn (Optional[Callable[[ScheduleEvaluationContext], [Mapping]]]): A function that\n takes a ScheduleEvaluationContext object and returns the run configuration that\n parameterizes this execution, as a dict. You may set only one of ``run_config``,\n ``run_config_fn``, and ``execution_fn``.\n tags (Optional[Mapping[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the scheduled runs.\n tags_fn (Optional[Callable[[ScheduleEvaluationContext], Optional[Mapping[str, str]]]]): A\n function that generates tags to attach to the schedules runs. Takes a\n :py:class:`~dagster.ScheduleEvaluationContext` and returns a dictionary of tags (string\n key-value pairs). You may set only one of ``tags``, ``tags_fn``, and ``execution_fn``.\n should_execute (Optional[Callable[[ScheduleEvaluationContext], bool]]): A function that runs\n at schedule execution time to determine whether a schedule should execute or skip. Takes\n a :py:class:`~dagster.ScheduleEvaluationContext` and returns a boolean (``True`` if the\n schedule should execute). Defaults to a function that always returns ``True``.\n execution_timezone (Optional[str]): Timezone in which the schedule should run.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n description (Optional[str]): A human-readable description of the schedule.\n job (Optional[Union[GraphDefinition, JobDefinition]]): The job that should execute when this\n schedule runs.\n default_status (DefaultScheduleStatus): Whether the schedule starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n required_resource_keys (Optional[Set[str]]): The set of resource keys required by the schedule.\n """\n\n def with_updated_job(self, new_job: ExecutableDefinition) -> "ScheduleDefinition":\n """Returns a copy of this schedule with the job replaced.\n\n Args:\n job (ExecutableDefinition): The job that should execute when this\n schedule runs.\n """\n return ScheduleDefinition.dagster_internal_init(\n name=self.name,\n cron_schedule=self._cron_schedule,\n job_name=self.job_name,\n execution_timezone=self.execution_timezone,\n execution_fn=self._execution_fn,\n description=self.description,\n job=new_job,\n default_status=self.default_status,\n environment_vars=self._environment_vars,\n required_resource_keys=self._raw_required_resource_keys,\n run_config=None, # run_config, tags, should_execute encapsulated in execution_fn\n run_config_fn=None,\n tags=None,\n tags_fn=None,\n should_execute=None,\n )\n\n def __init__(\n self,\n name: Optional[str] = None,\n *,\n cron_schedule: Optional[Union[str, Sequence[str]]] = None,\n job_name: Optional[str] = None,\n run_config: Optional[Any] = None,\n run_config_fn: Optional[ScheduleRunConfigFunction] = None,\n tags: Optional[Mapping[str, str]] = None,\n tags_fn: Optional[ScheduleTagsFunction] = None,\n should_execute: Optional[ScheduleShouldExecuteFunction] = None,\n environment_vars: Optional[Mapping[str, str]] = None,\n execution_timezone: Optional[str] = None,\n execution_fn: Optional[ScheduleExecutionFunction] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n self._cron_schedule = check.inst_param(cron_schedule, "cron_schedule", (str, Sequence))\n if not isinstance(self._cron_schedule, str):\n check.sequence_param(self._cron_schedule, "cron_schedule", of_type=str) # type: ignore\n\n if not is_valid_cron_schedule(self._cron_schedule): # type: ignore\n raise DagsterInvalidDefinitionError(\n f"Found invalid cron schedule '{self._cron_schedule}' for schedule '{name}''. "\n "Dagster recognizes standard cron expressions consisting of 5 fields."\n )\n\n if job is not None:\n self._target: Union[DirectTarget, RepoRelativeTarget] = DirectTarget(job)\n else:\n self._target = RepoRelativeTarget(\n job_name=check.str_param(job_name, "job_name"),\n op_selection=None,\n )\n\n if name:\n self._name = check_valid_name(name)\n elif job_name:\n self._name = job_name + "_schedule"\n elif job:\n self._name = job.name + "_schedule"\n\n self._description = check.opt_str_param(description, "description")\n\n self._environment_vars = check.opt_mapping_param(\n environment_vars, "environment_vars", key_type=str, value_type=str\n )\n\n self._execution_timezone = check.opt_str_param(execution_timezone, "execution_timezone")\n\n if execution_fn and (run_config_fn or tags_fn or should_execute or tags or run_config):\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both execution_fn and individual run_config/tags arguments "\n "to ScheduleDefinition. Must provide only one of the two."\n )\n elif execution_fn:\n self._execution_fn: Optional[Union[Callable[..., Any], DecoratedScheduleFunction]] = (\n None\n )\n if isinstance(execution_fn, DecoratedScheduleFunction):\n self._execution_fn = execution_fn\n else:\n self._execution_fn = check.opt_callable_param(execution_fn, "execution_fn")\n self._run_config_fn = None\n else:\n if run_config_fn and run_config:\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both run_config_fn and run_config as arguments"\n " to ScheduleDefinition. Must provide only one of the two."\n )\n\n def _default_run_config_fn(context: ScheduleEvaluationContext) -> RunConfig:\n return check.opt_dict_param(run_config, "run_config")\n\n self._run_config_fn = check.opt_callable_param(\n run_config_fn, "run_config_fn", default=_default_run_config_fn\n )\n\n if tags_fn and tags:\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both tags_fn and tags as arguments"\n " to ScheduleDefinition. Must provide only one of the two."\n )\n elif tags:\n tags = validate_tags(tags, allow_reserved_tags=False)\n tags_fn = lambda _context: tags\n else:\n tags_fn = check.opt_callable_param(\n tags_fn, "tags_fn", default=lambda _context: cast(Mapping[str, str], {})\n )\n self._tags_fn = tags_fn\n self._tags = tags\n\n self._should_execute: ScheduleShouldExecuteFunction = check.opt_callable_param(\n should_execute, "should_execute", default=lambda _context: True\n )\n\n # Several type-ignores are present in this function to work around bugs in mypy\n # inference.\n def _execution_fn(context: ScheduleEvaluationContext) -> RunRequestIterator:\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: (\n f"Error occurred during the execution of should_execute for schedule {name}"\n ),\n ):\n if not self._should_execute(context):\n yield SkipReason(f"should_execute function for {name} returned false.")\n return\n\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: (\n f"Error occurred during the execution of run_config_fn for schedule {name}"\n ),\n ):\n _run_config_fn = check.not_none(self._run_config_fn)\n evaluated_run_config = copy.deepcopy(\n _run_config_fn(context)\n if has_at_least_one_parameter(_run_config_fn)\n else _run_config_fn() # type: ignore # (strict type guard)\n )\n\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: f"Error occurred during the execution of tags_fn for schedule {name}",\n ):\n evaluated_tags = validate_tags(tags_fn(context), allow_reserved_tags=False)\n\n yield RunRequest(\n run_key=None,\n run_config=evaluated_run_config,\n tags=evaluated_tags,\n )\n\n self._execution_fn = _execution_fn\n\n if self._execution_timezone:\n try:\n # Verify that the timezone can be loaded\n pendulum.tz.timezone(self._execution_timezone) # type: ignore\n except Exception as e:\n raise DagsterInvalidDefinitionError(\n f"Invalid execution timezone {self._execution_timezone} for {name}"\n ) from e\n\n self._default_status = check.inst_param(\n default_status, "default_status", DefaultScheduleStatus\n )\n\n resource_arg_names: Set[str] = (\n {arg.name for arg in get_resource_args(self._execution_fn.decorated_fn)}\n if isinstance(self._execution_fn, DecoratedScheduleFunction)\n else set()\n )\n\n check.param_invariant(\n len(required_resource_keys or []) == 0 or len(resource_arg_names) == 0,\n "Cannot specify resource requirements in both @schedule decorator and as arguments to"\n " the decorated function",\n )\n\n self._raw_required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n self._required_resource_keys = self._raw_required_resource_keys or resource_arg_names\n\n @staticmethod\n def dagster_internal_init(\n *,\n name: Optional[str],\n cron_schedule: Optional[Union[str, Sequence[str]]],\n job_name: Optional[str],\n run_config: Optional[Any],\n run_config_fn: Optional[ScheduleRunConfigFunction],\n tags: Optional[Mapping[str, str]],\n tags_fn: Optional[ScheduleTagsFunction],\n should_execute: Optional[ScheduleShouldExecuteFunction],\n environment_vars: Optional[Mapping[str, str]],\n execution_timezone: Optional[str],\n execution_fn: Optional[ScheduleExecutionFunction],\n description: Optional[str],\n job: Optional[ExecutableDefinition],\n default_status: DefaultScheduleStatus,\n required_resource_keys: Optional[Set[str]],\n ) -> "ScheduleDefinition":\n return ScheduleDefinition(\n name=name,\n cron_schedule=cron_schedule,\n job_name=job_name,\n run_config=run_config,\n run_config_fn=run_config_fn,\n tags=tags,\n tags_fn=tags_fn,\n should_execute=should_execute,\n environment_vars=environment_vars,\n execution_timezone=execution_timezone,\n execution_fn=execution_fn,\n description=description,\n job=job,\n default_status=default_status,\n required_resource_keys=required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> ScheduleEvaluationFunctionReturn:\n from dagster._core.definitions.sensor_definition import get_context_param_name\n\n from .decorators.schedule_decorator import DecoratedScheduleFunction\n\n if not isinstance(self._execution_fn, DecoratedScheduleFunction):\n raise DagsterInvalidInvocationError(\n "Schedule invocation is only supported for schedules created via the schedule "\n "decorators."\n )\n\n context_param_name = get_context_param_name(self._execution_fn.decorated_fn)\n context = get_or_create_schedule_context(self._execution_fn.decorated_fn, *args, **kwargs)\n context_param = {context_param_name: context} if context_param_name else {}\n\n resources = validate_and_get_schedule_resource_dict(\n context.resources, self._name, self._required_resource_keys\n )\n result = self._execution_fn.decorated_fn(**context_param, **resources)\n\n if isinstance(result, dict):\n return copy.deepcopy(result)\n else:\n return result\n\n @public\n @property\n def name(self) -> str:\n """str: The name of the schedule."""\n return self._name\n\n @public\n @property\n def job_name(self) -> str:\n """str: The name of the job targeted by this schedule."""\n return self._target.job_name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A description for this schedule."""\n return self._description\n\n @public\n @property\n def cron_schedule(self) -> Union[str, Sequence[str]]:\n """Union[str, Sequence[str]]: The cron schedule representing when this schedule will be evaluated."""\n return self._cron_schedule # type: ignore\n\n @public\n @deprecated(\n breaking_version="2.0",\n additional_warn_text="Setting this property no longer has any effect.",\n )\n @property\n def environment_vars(self) -> Mapping[str, str]:\n """Mapping[str, str]: Environment variables to export to the cron schedule."""\n return self._environment_vars\n\n @public\n @property\n def required_resource_keys(self) -> Set[str]:\n """Set[str]: The set of keys for resources that must be provided to this schedule."""\n return self._required_resource_keys\n\n @public\n @property\n def execution_timezone(self) -> Optional[str]:\n """Optional[str]: The timezone in which this schedule will be evaluated."""\n return self._execution_timezone\n\n @public\n @property\n def job(self) -> Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]:\n """Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]: The job that is\n targeted by this schedule.\n """\n if isinstance(self._target, DirectTarget):\n return self._target.target\n raise DagsterInvalidDefinitionError("No job was provided to ScheduleDefinition.")\n\n def evaluate_tick(self, context: "ScheduleEvaluationContext") -> ScheduleExecutionData:\n """Evaluate schedule using the provided context.\n\n Args:\n context (ScheduleEvaluationContext): The context with which to evaluate this schedule.\n\n Returns:\n ScheduleExecutionData: Contains list of run requests, or skip message if present.\n\n """\n from dagster._core.definitions.partition import CachingDynamicPartitionsLoader\n\n check.inst_param(context, "context", ScheduleEvaluationContext)\n execution_fn: Callable[..., "ScheduleEvaluationFunctionReturn"]\n if isinstance(self._execution_fn, DecoratedScheduleFunction):\n execution_fn = self._execution_fn.wrapped_fn\n else:\n execution_fn = cast(\n Callable[..., "ScheduleEvaluationFunctionReturn"],\n self._execution_fn,\n )\n\n result = list(ensure_gen(execution_fn(context)))\n\n skip_message: Optional[str] = None\n\n run_requests: List[RunRequest] = []\n if not result or result == [None]:\n run_requests = []\n skip_message = "Schedule function returned an empty result"\n elif len(result) == 1:\n item = check.inst(result[0], (SkipReason, RunRequest))\n if isinstance(item, RunRequest):\n run_requests = [item]\n skip_message = None\n elif isinstance(item, SkipReason):\n run_requests = []\n skip_message = item.skip_message\n else:\n # NOTE: mypy is not correctly reading this cast-- not sure why\n # (pyright reads it fine). Hence the type-ignores below.\n result = cast(List[RunRequest], check.is_list(result, of_type=RunRequest))\n check.invariant(\n not any(not request.run_key for request in result),\n "Schedules that return multiple RunRequests must specify a run_key in each"\n " RunRequest",\n )\n run_requests = result\n skip_message = None\n\n dynamic_partitions_store = (\n CachingDynamicPartitionsLoader(context.instance) if context.instance_ref else None\n )\n\n # clone all the run requests with resolved tags and config\n resolved_run_requests = []\n for run_request in run_requests:\n if run_request.partition_key and not run_request.has_resolved_partition():\n if context.repository_def is None:\n raise DagsterInvariantViolationError(\n "Must provide repository def to build_schedule_context when yielding"\n " partitioned run requests"\n )\n\n scheduled_target = context.repository_def.get_job(self._target.job_name)\n resolved_request = run_request.with_resolved_tags_and_config(\n target_definition=scheduled_target,\n dynamic_partitions_requests=[],\n current_time=context.scheduled_execution_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n else:\n resolved_request = run_request\n\n resolved_run_requests.append(\n resolved_request.with_replaced_attrs(\n tags=merge_dicts(resolved_request.tags, DagsterRun.tags_for_schedule(self))\n )\n )\n\n return ScheduleExecutionData(\n run_requests=resolved_run_requests,\n skip_message=skip_message,\n captured_log_key=context.log_key if context.has_captured_logs() else None,\n )\n\n def has_loadable_target(self):\n return isinstance(self._target, DirectTarget)\n\n @property\n def targets_unresolved_asset_job(self) -> bool:\n return self.has_loadable_target() and isinstance(\n self.load_target(), UnresolvedAssetJobDefinition\n )\n\n def load_target(\n self,\n ) -> Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]:\n if isinstance(self._target, DirectTarget):\n return self._target.load()\n\n check.failed("Target is not loadable")\n\n @public\n @property\n def default_status(self) -> DefaultScheduleStatus:\n """DefaultScheduleStatus: The default status for this schedule when it is first loaded in\n a code location.\n """\n return self._default_status
\n
", "current_page_name": "_modules/dagster/_core/definitions/schedule_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.schedule_definition"}, "selector": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.selector

\nfrom typing import AbstractSet, Iterable, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.repository_definition import SINGLETON_REPOSITORY_NAME\nfrom dagster._serdes import create_snapshot_id, whitelist_for_serdes\n\n\nclass JobSubsetSelector(\n    NamedTuple(\n        "_JobSubsetSelector",\n        [\n            ("location_name", str),\n            ("repository_name", str),\n            ("job_name", str),\n            ("op_selection", Optional[Sequence[str]]),\n            ("asset_selection", Optional[AbstractSet[AssetKey]]),\n            ("asset_check_selection", Optional[AbstractSet[AssetCheckKey]]),\n        ],\n    )\n):\n    """The information needed to resolve a job within a host process."""\n\n    def __new__(\n        cls,\n        location_name: str,\n        repository_name: str,\n        job_name: str,\n        op_selection: Optional[Sequence[str]],\n        asset_selection: Optional[Iterable[AssetKey]] = None,\n        asset_check_selection: Optional[Iterable[AssetCheckKey]] = None,\n    ):\n        asset_selection = set(asset_selection) if asset_selection else None\n        asset_check_selection = (\n            set(asset_check_selection) if asset_check_selection is not None else None\n        )\n        return super(JobSubsetSelector, cls).__new__(\n            cls,\n            location_name=check.str_param(location_name, "location_name"),\n            repository_name=check.str_param(repository_name, "repository_name"),\n            job_name=check.str_param(job_name, "job_name"),\n            op_selection=check.opt_nullable_sequence_param(op_selection, "op_selection", str),\n            asset_selection=check.opt_nullable_set_param(\n                asset_selection, "asset_selection", AssetKey\n            ),\n            asset_check_selection=check.opt_nullable_set_param(\n                asset_check_selection, "asset_check_selection", AssetCheckKey\n            ),\n        )\n\n    def to_graphql_input(self):\n        return {\n            "repositoryLocationName": self.location_name,\n            "repositoryName": self.repository_name,\n            "pipelineName": self.job_name,\n            "solidSelection": self.op_selection,\n        }\n\n    def with_op_selection(self, op_selection: Optional[Sequence[str]]) -> Self:\n        check.invariant(\n            self.op_selection is None,\n            f"Can not invoke with_op_selection when op_selection={self.op_selection} is"\n            " already set",\n        )\n        return JobSubsetSelector(\n            self.location_name, self.repository_name, self.job_name, op_selection\n        )\n\n\n
[docs]@whitelist_for_serdes\nclass JobSelector(\n NamedTuple(\n "_JobSelector", [("location_name", str), ("repository_name", str), ("job_name", str)]\n )\n):\n def __new__(\n cls,\n location_name: str,\n repository_name: Optional[str] = None,\n job_name: Optional[str] = None,\n ):\n return super(JobSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.opt_str_param(\n repository_name,\n "repository_name",\n default=SINGLETON_REPOSITORY_NAME,\n ),\n job_name=check.str_param(\n job_name,\n "job_name",\n "Must provide job_name argument even though it is marked as optional in the "\n "function signature. repository_name, a truly optional parameter, is before "\n "that argument and actually optional. Use of keyword arguments is "\n "recommended to avoid confusion.",\n ),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "jobName": self.job_name,\n }\n\n @property\n def selector_id(self):\n return create_snapshot_id(self)\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return JobSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n job_name=graphql_data["jobName"],\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass RepositorySelector(\n NamedTuple("_RepositorySelector", [("location_name", str), ("repository_name", str)])\n):\n def __new__(cls, location_name: str, repository_name: str):\n return super(RepositorySelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n }\n\n @property\n def selector_id(self):\n return create_snapshot_id(self)\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return RepositorySelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n )
\n\n\nclass CodeLocationSelector(NamedTuple("_CodeLocationSelector", [("location_name", str)])):\n def __new__(cls, location_name: str):\n return super(CodeLocationSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n )\n\n def to_repository_selector(self) -> RepositorySelector:\n return RepositorySelector(\n location_name=self.location_name, repository_name=SINGLETON_REPOSITORY_NAME\n )\n\n\nclass ScheduleSelector(\n NamedTuple(\n "_ScheduleSelector",\n [("location_name", str), ("repository_name", str), ("schedule_name", str)],\n )\n):\n def __new__(cls, location_name: str, repository_name: str, schedule_name: str):\n return super(ScheduleSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n schedule_name=check.str_param(schedule_name, "schedule_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "scheduleName": self.schedule_name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return ScheduleSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n schedule_name=graphql_data["scheduleName"],\n )\n\n\nclass ResourceSelector(NamedTuple):\n location_name: str\n repository_name: str\n resource_name: str\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "resourceName": self.resource_name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return ResourceSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n resource_name=graphql_data["resourceName"],\n )\n\n\nclass SensorSelector(\n NamedTuple(\n "_SensorSelector", [("location_name", str), ("repository_name", str), ("sensor_name", str)]\n )\n):\n def __new__(cls, location_name: str, repository_name: str, sensor_name: str):\n return super(SensorSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n sensor_name=check.str_param(sensor_name, "sensor_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "sensorName": self.sensor_name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return SensorSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n sensor_name=graphql_data["sensorName"],\n )\n\n\n@whitelist_for_serdes\nclass InstigatorSelector(\n NamedTuple(\n "_InstigatorSelector", [("location_name", str), ("repository_name", str), ("name", str)]\n )\n):\n def __new__(cls, location_name: str, repository_name: str, name: str):\n return super(InstigatorSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n name=check.str_param(name, "name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "name": self.name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return InstigatorSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n name=graphql_data["name"],\n )\n\n\nclass GraphSelector(\n NamedTuple(\n "_GraphSelector", [("location_name", str), ("repository_name", str), ("graph_name", str)]\n )\n):\n """The information needed to resolve a graph within a host process."""\n\n def __new__(cls, location_name: str, repository_name: str, graph_name: str):\n return super(GraphSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n graph_name=check.str_param(graph_name, "graph_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "graphName": self.graph_name,\n }\n\n\n@whitelist_for_serdes\nclass PartitionSetSelector(\n NamedTuple(\n "_PartitionSetSelector",\n [("location_name", str), ("repository_name", str), ("partition_set_name", str)],\n )\n):\n """The information needed to resolve a partition set within a host process."""\n\n def __new__(cls, location_name: str, repository_name: str, partition_set_name: str):\n return super(PartitionSetSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n partition_set_name=check.str_param(partition_set_name, "partition_set_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "partitionSetName": self.partition_set_name,\n }\n\n\nclass PartitionRangeSelector(\n NamedTuple(\n "_PartitionRangeSelector",\n [("start", str), ("end", str)],\n )\n):\n """The information needed to resolve a partition range."""\n\n def __new__(cls, start: str, end: str):\n return super(PartitionRangeSelector, cls).__new__(\n cls,\n start=check.inst_param(start, "start", str),\n end=check.inst_param(end, "end", str),\n )\n\n def to_graphql_input(self):\n return {\n "start": self.start,\n "end": self.end,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return PartitionRangeSelector(\n start=graphql_data["start"],\n end=graphql_data["end"],\n )\n\n\nclass PartitionsSelector(\n NamedTuple(\n "_PartitionsSelector",\n [("partition_range", PartitionRangeSelector)],\n )\n):\n """The information needed to define selection partitions.\n Using partition_range as property name to avoid shadowing Python 'range' builtin .\n """\n\n def __new__(cls, partition_range: PartitionRangeSelector):\n return super(PartitionsSelector, cls).__new__(\n cls,\n partition_range=check.inst_param(partition_range, "range", PartitionRangeSelector),\n )\n\n def to_graphql_input(self):\n return {\n "range": self.partition_range.to_graphql_input(),\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return PartitionsSelector(\n partition_range=PartitionRangeSelector.from_graphql_input(graphql_data["range"])\n )\n\n\nclass PartitionsByAssetSelector(\n NamedTuple(\n "PartitionsByAssetSelector",\n [\n ("asset_key", AssetKey),\n ("partitions", Optional[PartitionsSelector]),\n ],\n )\n):\n """The information needed to define partitions selection for a given asset key."""\n\n def __new__(cls, asset_key: AssetKey, partitions: Optional[PartitionsSelector] = None):\n return super(PartitionsByAssetSelector, cls).__new__(\n cls,\n asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n partitions=check.opt_inst_param(partitions, "partitions", PartitionsSelector),\n )\n\n def to_graphql_input(self):\n return {\n "assetKey": self.asset_key.to_graphql_input(),\n "partitions": self.partitions.to_graphql_input() if self.partitions else None,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n asset_key = graphql_data["assetKey"]\n partitions = graphql_data.get("partitions")\n return PartitionsByAssetSelector(\n asset_key=AssetKey.from_graphql_input(asset_key),\n partitions=PartitionsSelector.from_graphql_input(partitions) if partitions else None,\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/selector", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.selector"}, "sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.sensor_definition

\nimport inspect\nimport logging\nfrom collections import defaultdict\nfrom contextlib import ExitStack\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    TypeVar,\n    Union,\n    cast,\n)\n\nimport pendulum\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.asset_check_evaluation import AssetCheckEvaluation\nfrom dagster._core.definitions.events import (\n    AssetMaterialization,\n    AssetObservation,\n)\nfrom dagster._core.definitions.instigation_logger import InstigationLogger\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.partition import (\n    CachingDynamicPartitionsLoader,\n)\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.definitions.resource_definition import (\n    Resources,\n)\nfrom dagster._core.definitions.scoped_resources_builder import ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidSubsetError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import IHasInternalInit, normalize_to_repository\n\nfrom ..decorator_utils import (\n    get_function_params,\n)\nfrom .asset_selection import AssetSelection\nfrom .graph_definition import GraphDefinition\nfrom .run_request import (\n    AddDynamicPartitionsRequest,\n    DagsterRunReaction,\n    DeleteDynamicPartitionsRequest,\n    RunRequest,\n    SensorResult,\n    SkipReason,\n)\nfrom .target import DirectTarget, ExecutableDefinition, RepoRelativeTarget\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom .utils import check_valid_name\n\nif TYPE_CHECKING:\n    from dagster import ResourceDefinition\n    from dagster._core.definitions.definitions_class import Definitions\n    from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n\n@whitelist_for_serdes\nclass DefaultSensorStatus(Enum):\n    RUNNING = "RUNNING"\n    STOPPED = "STOPPED"\n\n\n@whitelist_for_serdes\nclass SensorType(Enum):\n    STANDARD = "STANDARD"\n    RUN_STATUS = "RUN_STATUS"\n    ASSET = "ASSET"\n    MULTI_ASSET = "MULTI_ASSET"\n    FRESHNESS_POLICY = "FRESHNESS_POLICY"\n    UNKNOWN = "UNKNOWN"\n\n\nDEFAULT_SENSOR_DAEMON_INTERVAL = 30\n\n\n
[docs]class SensorEvaluationContext:\n """The context object available as the argument to the evaluation function of a :py:class:`dagster.SensorDefinition`.\n\n Users should not instantiate this object directly. To construct a\n `SensorEvaluationContext` for testing purposes, use :py:func:`dagster.\n build_sensor_context`.\n\n Attributes:\n instance_ref (Optional[InstanceRef]): The serialized instance configured to run the schedule\n cursor (Optional[str]): The cursor, passed back from the last sensor evaluation via\n the cursor attribute of SkipReason and RunRequest\n last_completion_time (float): DEPRECATED The last time that the sensor was evaluated (UTC).\n last_run_key (str): DEPRECATED The run key of the RunRequest most recently created by this\n sensor. Use the preferred `cursor` attribute instead.\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n repository_def (Optional[RepositoryDefinition]): The repository or that\n the sensor belongs to. If needed by the sensor top-level resource definitions will be\n pulled from this repository. You can provide either this or `definitions`.\n instance (Optional[DagsterInstance]): The deserialized instance can also be passed in\n directly (primarily useful in testing contexts).\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n If needed by the sensor, top-level resource definitions will be pulled from these\n definitions. You can provide either this or `repository_def`.\n resources (Optional[Dict[str, Any]]): A dict of resource keys to resource\n definitions to be made available during sensor execution.\n\n Example:\n .. code-block:: python\n\n from dagster import sensor, SensorEvaluationContext\n\n @sensor\n def the_sensor(context: SensorEvaluationContext):\n ...\n\n """\n\n def __init__(\n self,\n instance_ref: Optional[InstanceRef],\n last_completion_time: Optional[float],\n last_run_key: Optional[str],\n cursor: Optional[str],\n repository_name: Optional[str],\n repository_def: Optional["RepositoryDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n sensor_name: Optional[str] = None,\n resources: Optional[Mapping[str, "ResourceDefinition"]] = None,\n definitions: Optional["Definitions"] = None,\n ):\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n self._exit_stack = ExitStack()\n self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)\n self._last_completion_time = check.opt_float_param(\n last_completion_time, "last_completion_time"\n )\n self._last_run_key = check.opt_str_param(last_run_key, "last_run_key")\n self._cursor = check.opt_str_param(cursor, "cursor")\n self._repository_name = check.opt_str_param(repository_name, "repository_name")\n self._repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n error_on_none=False,\n )\n self._instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n self._sensor_name = sensor_name\n\n # Wait to set resources unless they're accessed\n self._resource_defs = resources\n self._resources = None\n self._cm_scope_entered = False\n\n self._log_key = (\n [\n repository_name,\n sensor_name,\n pendulum.now("UTC").strftime("%Y%m%d_%H%M%S"),\n ]\n if repository_name and sensor_name\n else None\n )\n self._logger: Optional[InstigationLogger] = None\n self._cursor_updated = False\n\n def __enter__(self) -> "SensorEvaluationContext":\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc) -> None:\n self._exit_stack.close()\n self._logger = None\n\n @property\n def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]:\n return self._resource_defs\n\n def merge_resources(self, resources_dict: Mapping[str, Any]) -> "SensorEvaluationContext":\n """Merge the specified resources into this context.\n\n This method is intended to be used by the Dagster framework, and should not be called by user code.\n\n Args:\n resources_dict (Mapping[str, Any]): The resources to replace in the context.\n """\n check.invariant(\n self._resources is None, "Cannot merge resources in context that has been initialized."\n )\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return SensorEvaluationContext(\n instance_ref=self._instance_ref,\n last_completion_time=self._last_completion_time,\n last_run_key=self._last_run_key,\n cursor=self._cursor,\n repository_name=self._repository_name,\n repository_def=self._repository_def,\n instance=self._instance,\n sensor_name=self._sensor_name,\n resources={\n **(self._resource_defs or {}),\n **wrap_resources_for_execution(resources_dict),\n },\n )\n\n @public\n @property\n def resources(self) -> Resources:\n """Resources: A mapping from resource key to instantiated resources for this sensor."""\n from dagster._core.definitions.scoped_resources_builder import (\n IContainsGenerator,\n )\n from dagster._core.execution.build_resources import build_resources\n\n if not self._resources:\n """\n This is similar to what we do in e.g. the op context - we set up a resource\n building context manager, and immediately enter it. This is so that in cases\n where a user is not using any context-manager based resources, they don't\n need to enter this SensorEvaluationContext themselves.\n\n For example:\n\n my_sensor(build_sensor_context(resources={"my_resource": my_non_cm_resource})\n\n will work ok, but for a CM resource we must do\n\n with build_sensor_context(resources={"my_resource": my_cm_resource}) as context:\n my_sensor(context)\n """\n\n # Early exit if no resources are defined. This skips unnecessary initialization\n # entirely. This allows users to run user code servers in cases where they\n # do not have access to the instance if they use a subset of features do\n # that do not require instance access. In this case, if they do not use\n # resources on sensors they do not require the instance, so we do not\n # instantiate it\n #\n # Tracking at https://github.com/dagster-io/dagster/issues/14345\n if not self._resource_defs:\n self._resources = ScopedResourcesBuilder.build_empty()\n return self._resources\n\n instance = self.instance if self._instance or self._instance_ref else None\n\n resources_cm = build_resources(resources=self._resource_defs or {}, instance=instance)\n self._resources = self._exit_stack.enter_context(resources_cm)\n\n if isinstance(self._resources, IContainsGenerator) and not self._cm_scope_entered:\n self._exit_stack.close()\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access"\n " resources outside of context manager scope. You can use the following syntax"\n " to open a context manager: `with build_schedule_context(...) as context:`"\n )\n\n return self._resources\n\n @public\n @property\n def instance(self) -> DagsterInstance:\n """DagsterInstance: The current DagsterInstance."""\n # self._instance_ref should only ever be None when this SensorEvaluationContext was\n # constructed under test.\n if not self._instance:\n if not self._instance_ref:\n raise DagsterInvariantViolationError(\n "Attempted to initialize dagster instance, but no instance reference was"\n " provided."\n )\n self._instance = self._exit_stack.enter_context(\n DagsterInstance.from_ref(self._instance_ref)\n )\n return cast(DagsterInstance, self._instance)\n\n @property\n def instance_ref(self) -> Optional[InstanceRef]:\n return self._instance_ref\n\n @public\n @property\n def last_completion_time(self) -> Optional[float]:\n """Optional[float]: Timestamp representing the last time this sensor completed an evaluation."""\n return self._last_completion_time\n\n @public\n @property\n def last_run_key(self) -> Optional[str]:\n """Optional[str]: The run key supplied to the most recent RunRequest produced by this sensor."""\n return self._last_run_key\n\n @public\n @property\n def cursor(self) -> Optional[str]:\n """The cursor value for this sensor, which was set in an earlier sensor evaluation."""\n return self._cursor\n\n
[docs] @public\n def update_cursor(self, cursor: Optional[str]) -> None:\n """Updates the cursor value for this sensor, which will be provided on the context for the\n next sensor evaluation.\n\n This can be used to keep track of progress and avoid duplicate work across sensor\n evaluations.\n\n Args:\n cursor (Optional[str]):\n """\n self._cursor = check.opt_str_param(cursor, "cursor")\n self._cursor_updated = True
\n\n @property\n def cursor_updated(self) -> bool:\n return self._cursor_updated\n\n @public\n @property\n def repository_name(self) -> Optional[str]:\n """Optional[str]: The name of the repository that this sensor resides in."""\n return self._repository_name\n\n @public\n @property\n def repository_def(self) -> Optional["RepositoryDefinition"]:\n """Optional[RepositoryDefinition]: The RepositoryDefinition that this sensor resides in."""\n return self._repository_def\n\n @property\n def log(self) -> logging.Logger:\n if self._logger:\n return self._logger\n\n if not self._instance_ref:\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n repository_name=self._repository_name,\n name=self._sensor_name,\n )\n )\n return cast(logging.Logger, self._logger)\n\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n self.instance,\n repository_name=self._repository_name,\n name=self._sensor_name,\n )\n )\n return cast(logging.Logger, self._logger)\n\n def has_captured_logs(self):\n return self._logger and self._logger.has_captured_logs()\n\n @property\n def log_key(self) -> Optional[List[str]]:\n return self._log_key
\n\n\nRawSensorEvaluationFunctionReturn = Union[\n Iterator[Union[SkipReason, RunRequest, DagsterRunReaction, SensorResult]],\n Sequence[RunRequest],\n SkipReason,\n RunRequest,\n DagsterRunReaction,\n SensorResult,\n]\nRawSensorEvaluationFunction: TypeAlias = Callable[..., RawSensorEvaluationFunctionReturn]\n\nSensorEvaluationFunction: TypeAlias = Callable[..., Sequence[Union[SkipReason, RunRequest]]]\n\n\ndef get_context_param_name(fn: Callable) -> Optional[str]:\n """Determines the sensor's context parameter name by excluding all resource parameters."""\n resource_params = {param.name for param in get_resource_args(fn)}\n\n return next(\n (param.name for param in get_function_params(fn) if param.name not in resource_params), None\n )\n\n\ndef validate_and_get_resource_dict(\n resources: Resources, sensor_name: str, required_resource_keys: Set[str]\n) -> Dict[str, Any]:\n """Validates that the context has all the required resources and returns a dictionary of\n resource key to resource object.\n """\n for k in required_resource_keys:\n if not hasattr(resources, k):\n raise DagsterInvalidDefinitionError(\n f"Resource with key '{k}' required by sensor '{sensor_name}' was not provided."\n )\n\n return {k: getattr(resources, k) for k in required_resource_keys}\n\n\ndef _check_dynamic_partitions_requests(\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n) -> None:\n req_keys_to_add_by_partitions_def_name = defaultdict(set)\n req_keys_to_delete_by_partitions_def_name = defaultdict(set)\n\n for req in dynamic_partitions_requests:\n duplicate_req_keys_to_delete = req_keys_to_delete_by_partitions_def_name.get(\n req.partitions_def_name, set()\n ).intersection(req.partition_keys)\n duplicate_req_keys_to_add = req_keys_to_add_by_partitions_def_name.get(\n req.partitions_def_name, set()\n ).intersection(req.partition_keys)\n if isinstance(req, AddDynamicPartitionsRequest):\n if duplicate_req_keys_to_delete:\n raise DagsterInvariantViolationError(\n "Dynamic partition requests cannot contain both add and delete requests for"\n " the same partition keys.Invalid request: partitions_def_name"\n f" '{req.partitions_def_name}', partition_keys: {duplicate_req_keys_to_delete}"\n )\n elif duplicate_req_keys_to_add:\n raise DagsterInvariantViolationError(\n "Cannot request to add duplicate dynamic partition keys: \\npartitions_def_name"\n f" '{req.partitions_def_name}', partition_keys: {duplicate_req_keys_to_add}"\n )\n req_keys_to_add_by_partitions_def_name[req.partitions_def_name].update(\n req.partition_keys\n )\n elif isinstance(req, DeleteDynamicPartitionsRequest):\n if duplicate_req_keys_to_delete:\n raise DagsterInvariantViolationError(\n "Cannot request to add duplicate dynamic partition keys: \\npartitions_def_name"\n f" '{req.partitions_def_name}', partition_keys:"\n f" {req_keys_to_add_by_partitions_def_name}"\n )\n elif duplicate_req_keys_to_add:\n raise DagsterInvariantViolationError(\n "Dynamic partition requests cannot contain both add and delete requests for"\n " the same partition keys.Invalid request: partitions_def_name"\n f" '{req.partitions_def_name}', partition_keys: {duplicate_req_keys_to_add}"\n )\n req_keys_to_delete_by_partitions_def_name[req.partitions_def_name].update(\n req.partition_keys\n )\n else:\n check.failed(f"Unexpected dynamic partition request type: {req}")\n\n\n
[docs]class SensorDefinition(IHasInternalInit):\n """Define a sensor that initiates a set of runs based on some external state.\n\n Args:\n evaluation_fn (Callable[[SensorEvaluationContext]]): The core evaluation function for the\n sensor, which is run at an interval to determine whether a run should be launched or\n not. Takes a :py:class:`~dagster.SensorEvaluationContext`.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n name (Optional[str]): The name of the sensor to create. Defaults to name of evaluation_fn\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[GraphDefinition, JobDefinition, UnresolvedAssetJob]): The job to execute when this sensor fires.\n jobs (Optional[Sequence[GraphDefinition, JobDefinition, UnresolvedAssetJob]]): (experimental) A list of jobs to execute when this sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n asset_selection (AssetSelection): (Experimental) an asset selection to launch a run for if\n the sensor condition is met. This can be provided instead of specifying a job.\n """\n\n def with_updated_jobs(self, new_jobs: Sequence[ExecutableDefinition]) -> "SensorDefinition":\n """Returns a copy of this sensor with the jobs replaced.\n\n Args:\n job (ExecutableDefinition): The job that should execute when this\n schedule runs.\n """\n return SensorDefinition.dagster_internal_init(\n name=self.name,\n evaluation_fn=self._raw_fn,\n minimum_interval_seconds=self.minimum_interval_seconds,\n description=self.description,\n job_name=None, # if original init was passed job name, was resolved to a job\n jobs=new_jobs if len(new_jobs) > 1 else None,\n job=new_jobs[0] if len(new_jobs) == 1 else None,\n default_status=self.default_status,\n asset_selection=self.asset_selection,\n required_resource_keys=self._raw_required_resource_keys,\n )\n\n def with_updated_job(self, new_job: ExecutableDefinition) -> "SensorDefinition":\n """Returns a copy of this sensor with the job replaced.\n\n Args:\n job (ExecutableDefinition): The job that should execute when this\n schedule runs.\n """\n return self.with_updated_jobs([new_job])\n\n def __init__(\n self,\n name: Optional[str] = None,\n *,\n evaluation_fn: Optional[RawSensorEvaluationFunction] = None,\n job_name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n asset_selection: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n from dagster._config.pythonic_config import validate_resource_annotated_function\n\n if evaluation_fn is None:\n raise DagsterInvalidDefinitionError("Must provide evaluation_fn to SensorDefinition.")\n\n if (\n sum(\n [\n int(job is not None),\n int(jobs is not None),\n int(job_name is not None),\n int(asset_selection is not None),\n ]\n )\n > 1\n ):\n raise DagsterInvalidDefinitionError(\n "Attempted to provide more than one of 'job', 'jobs', 'job_name', and "\n "'asset_selection' params to SensorDefinition. Must provide only one."\n )\n\n jobs = jobs if jobs else [job] if job else None\n\n targets: Optional[List[Union[RepoRelativeTarget, DirectTarget]]] = None\n if job_name:\n targets = [\n RepoRelativeTarget(\n job_name=check.str_param(job_name, "job_name"),\n op_selection=None,\n )\n ]\n elif job:\n targets = [DirectTarget(job)]\n elif jobs:\n targets = [DirectTarget(job) for job in jobs]\n elif asset_selection:\n targets = []\n\n if name:\n self._name = check_valid_name(name)\n else:\n self._name = evaluation_fn.__name__\n\n self._raw_fn: RawSensorEvaluationFunction = check.callable_param(\n evaluation_fn, "evaluation_fn"\n )\n self._evaluation_fn: Union[\n SensorEvaluationFunction,\n Callable[\n [SensorEvaluationContext],\n List[Union[SkipReason, RunRequest, DagsterRunReaction]],\n ],\n ] = wrap_sensor_evaluation(self._name, evaluation_fn)\n self._min_interval = check.opt_int_param(\n minimum_interval_seconds, "minimum_interval_seconds", DEFAULT_SENSOR_DAEMON_INTERVAL\n )\n self._description = check.opt_str_param(description, "description")\n self._targets: Sequence[Union[RepoRelativeTarget, DirectTarget]] = check.opt_list_param(\n targets, "targets", (DirectTarget, RepoRelativeTarget)\n )\n self._default_status = check.inst_param(\n default_status, "default_status", DefaultSensorStatus\n )\n self._asset_selection = check.opt_inst_param(\n asset_selection, "asset_selection", AssetSelection\n )\n validate_resource_annotated_function(self._raw_fn)\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(self._raw_fn)}\n\n check.param_invariant(\n len(required_resource_keys or []) == 0 or len(resource_arg_names) == 0,\n "Cannot specify resource requirements in both @sensor decorator and as arguments to"\n " the decorated function",\n )\n self._raw_required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n self._required_resource_keys = self._raw_required_resource_keys or resource_arg_names\n\n @staticmethod\n def dagster_internal_init(\n *,\n name: Optional[str],\n evaluation_fn: Optional[RawSensorEvaluationFunction],\n job_name: Optional[str],\n minimum_interval_seconds: Optional[int],\n description: Optional[str],\n job: Optional[ExecutableDefinition],\n jobs: Optional[Sequence[ExecutableDefinition]],\n default_status: DefaultSensorStatus,\n asset_selection: Optional[AssetSelection],\n required_resource_keys: Optional[Set[str]],\n ) -> "SensorDefinition":\n return SensorDefinition(\n name=name,\n evaluation_fn=evaluation_fn,\n job_name=job_name,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n asset_selection=asset_selection,\n required_resource_keys=required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> RawSensorEvaluationFunctionReturn:\n context_param_name_if_present = get_context_param_name(self._raw_fn)\n context = get_or_create_sensor_context(self._raw_fn, *args, **kwargs)\n\n context_param = (\n {context_param_name_if_present: context} if context_param_name_if_present else {}\n )\n\n resources = validate_and_get_resource_dict(\n context.resources, self.name, self._required_resource_keys\n )\n return self._raw_fn(**context_param, **resources)\n\n @public\n @property\n def required_resource_keys(self) -> Set[str]:\n """Set[str]: The set of keys for resources that must be provided to this sensor."""\n return self._required_resource_keys\n\n @public\n @property\n def name(self) -> str:\n """str: The name of this sensor."""\n return self._name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A description for this sensor."""\n return self._description\n\n @public\n @property\n def minimum_interval_seconds(self) -> Optional[int]:\n """Optional[int]: The minimum number of seconds between sequential evaluations of this sensor."""\n return self._min_interval\n\n @property\n def targets(self) -> Sequence[Union[DirectTarget, RepoRelativeTarget]]:\n return self._targets\n\n @public\n @property\n def job(self) -> Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition]:\n """Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]: The job that is\n targeted by this schedule.\n """\n if self._targets:\n if len(self._targets) == 1 and isinstance(self._targets[0], DirectTarget):\n return self._targets[0].target\n elif len(self._targets) > 1:\n raise DagsterInvalidDefinitionError(\n "Job property not available when SensorDefinition has multiple jobs."\n )\n raise DagsterInvalidDefinitionError("No job was provided to SensorDefinition.")\n\n @public\n @property\n def jobs(self) -> List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition]]:\n """List[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]: A list of jobs\n that are targeted by this schedule.\n """\n if self._targets and all(isinstance(target, DirectTarget) for target in self._targets):\n return [target.target for target in self._targets] # type: ignore # (illegible conditional)\n raise DagsterInvalidDefinitionError("No job was provided to SensorDefinition.")\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.STANDARD\n\n def evaluate_tick(self, context: "SensorEvaluationContext") -> "SensorExecutionData":\n """Evaluate sensor using the provided context.\n\n Args:\n context (SensorEvaluationContext): The context with which to evaluate this sensor.\n\n Returns:\n SensorExecutionData: Contains list of run requests, or skip message if present.\n\n """\n context = check.inst_param(context, "context", SensorEvaluationContext)\n\n result = self._evaluation_fn(context)\n\n skip_message: Optional[str] = None\n run_requests: List[RunRequest] = []\n dagster_run_reactions: List[DagsterRunReaction] = []\n dynamic_partitions_requests: Optional[\n Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]\n ] = []\n updated_cursor = context.cursor\n asset_events = []\n\n if not result or result == [None]:\n skip_message = "Sensor function returned an empty result"\n elif len(result) == 1:\n item = result[0]\n check.inst(item, (SkipReason, RunRequest, DagsterRunReaction, SensorResult))\n\n if isinstance(item, SensorResult):\n run_requests = list(item.run_requests) if item.run_requests else []\n skip_message = (\n item.skip_reason.skip_message\n if item.skip_reason\n else (None if run_requests else "Sensor function returned an empty result")\n )\n\n _check_dynamic_partitions_requests(\n item.dynamic_partitions_requests or [],\n )\n dynamic_partitions_requests = item.dynamic_partitions_requests or []\n\n if item.cursor and context.cursor_updated:\n raise DagsterInvariantViolationError(\n "SensorResult.cursor cannot be set if context.update_cursor() was called."\n )\n updated_cursor = item.cursor\n asset_events = item.asset_events\n\n elif isinstance(item, RunRequest):\n run_requests = [item]\n elif isinstance(item, SkipReason):\n skip_message = item.skip_message if isinstance(item, SkipReason) else None\n elif isinstance(item, DagsterRunReaction):\n dagster_run_reactions = (\n [cast(DagsterRunReaction, item)] if isinstance(item, DagsterRunReaction) else []\n )\n else:\n check.failed(f"Unexpected type {type(item)} in sensor result")\n else:\n if any(isinstance(item, SensorResult) for item in result):\n check.failed(\n "When a SensorResult is returned from a sensor, it must be the only object"\n " returned."\n )\n\n check.is_list(result, (SkipReason, RunRequest, DagsterRunReaction))\n has_skip = any(map(lambda x: isinstance(x, SkipReason), result))\n run_requests = [item for item in result if isinstance(item, RunRequest)]\n dagster_run_reactions = [\n item for item in result if isinstance(item, DagsterRunReaction)\n ]\n\n if has_skip:\n if len(run_requests) > 0:\n check.failed(\n "Expected a single SkipReason or one or more RunRequests: received both "\n "RunRequest and SkipReason"\n )\n elif len(dagster_run_reactions) > 0:\n check.failed(\n "Expected a single SkipReason or one or more DagsterRunReaction: "\n "received both DagsterRunReaction and SkipReason"\n )\n else:\n check.failed("Expected a single SkipReason: received multiple SkipReasons")\n\n _check_dynamic_partitions_requests(dynamic_partitions_requests)\n resolved_run_requests = self.resolve_run_requests(\n run_requests, context, self._asset_selection, dynamic_partitions_requests\n )\n\n return SensorExecutionData(\n resolved_run_requests,\n skip_message,\n updated_cursor,\n dagster_run_reactions,\n captured_log_key=context.log_key if context.has_captured_logs() else None,\n dynamic_partitions_requests=dynamic_partitions_requests,\n asset_events=asset_events,\n )\n\n def has_loadable_targets(self) -> bool:\n for target in self._targets:\n if isinstance(target, DirectTarget):\n return True\n return False\n\n def load_targets(\n self,\n ) -> Sequence[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition]]:\n """Returns job/graph definitions that have been directly passed into the sensor definition.\n Any jobs or graphs that are referenced by name will not be loaded.\n """\n targets = []\n for target in self._targets:\n if isinstance(target, DirectTarget):\n targets.append(target.load())\n return targets\n\n def resolve_run_requests(\n self,\n run_requests: Sequence[RunRequest],\n context: SensorEvaluationContext,\n asset_selection: Optional[AssetSelection],\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n ) -> Sequence[RunRequest]:\n def _get_repo_job_by_name(context: SensorEvaluationContext, job_name: str) -> JobDefinition:\n if context.repository_def is None:\n raise DagsterInvariantViolationError(\n "Must provide repository def to build_sensor_context when yielding partitioned"\n " run requests"\n )\n return context.repository_def.get_job(job_name)\n\n has_multiple_targets = len(self._targets) > 1\n target_names = [target.job_name for target in self._targets]\n\n if run_requests and len(self._targets) == 0 and not self._asset_selection:\n raise Exception(\n f"Error in sensor {self._name}: Sensor evaluation function returned a RunRequest "\n "for a sensor lacking a specified target (job_name, job, or jobs). Targets "\n "can be specified by providing job, jobs, or job_name to the @sensor "\n "decorator."\n )\n\n if asset_selection:\n run_requests = [\n *_run_requests_with_base_asset_jobs(run_requests, context, asset_selection)\n ]\n\n dynamic_partitions_store = (\n CachingDynamicPartitionsLoader(context.instance) if context.instance_ref else None\n )\n\n # Run requests may contain an invalid target, or a partition key that does not exist.\n # We will resolve these run requests, applying the target and partition config/tags.\n resolved_run_requests = []\n for run_request in run_requests:\n if run_request.job_name is None and has_multiple_targets:\n raise Exception(\n f"Error in sensor {self._name}: Sensor returned a RunRequest that did not"\n " specify job_name for the requested run. Expected one of:"\n f" {target_names}"\n )\n elif (\n run_request.job_name\n and run_request.job_name not in target_names\n and not asset_selection\n ):\n raise Exception(\n f"Error in sensor {self._name}: Sensor returned a RunRequest with job_name "\n f"{run_request.job_name}. Expected one of: {target_names}"\n )\n\n if run_request.partition_key and not run_request.has_resolved_partition():\n selected_job = _get_repo_job_by_name(\n context, run_request.job_name if run_request.job_name else target_names[0]\n )\n resolved_run_requests.append(\n run_request.with_resolved_tags_and_config(\n target_definition=selected_job,\n current_time=None,\n dynamic_partitions_store=dynamic_partitions_store,\n dynamic_partitions_requests=dynamic_partitions_requests,\n )\n )\n else:\n resolved_run_requests.append(run_request)\n\n return resolved_run_requests\n\n @property\n def _target(self) -> Optional[Union[DirectTarget, RepoRelativeTarget]]:\n return self._targets[0] if self._targets else None\n\n @public\n @property\n def job_name(self) -> Optional[str]:\n """Optional[str]: The name of the job that is targeted by this sensor."""\n if len(self._targets) > 1:\n raise DagsterInvalidInvocationError(\n f"Cannot use `job_name` property for sensor {self.name}, which targets multiple"\n " jobs."\n )\n return self._targets[0].job_name\n\n @public\n @property\n def default_status(self) -> DefaultSensorStatus:\n """DefaultSensorStatus: The default status for this sensor when it is first loaded in\n a code location.\n """\n return self._default_status\n\n @property\n def asset_selection(self) -> Optional[AssetSelection]:\n return self._asset_selection
\n\n\n@whitelist_for_serdes(\n storage_field_names={"dagster_run_reactions": "pipeline_run_reactions"},\n)\nclass SensorExecutionData(\n NamedTuple(\n "_SensorExecutionData",\n [\n ("run_requests", Optional[Sequence[RunRequest]]),\n ("skip_message", Optional[str]),\n ("cursor", Optional[str]),\n ("dagster_run_reactions", Optional[Sequence[DagsterRunReaction]]),\n ("captured_log_key", Optional[Sequence[str]]),\n (\n "dynamic_partitions_requests",\n Optional[\n Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]\n ],\n ),\n (\n "asset_events",\n Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]],\n ),\n ],\n )\n):\n dagster_run_reactions: Optional[Sequence[DagsterRunReaction]]\n\n def __new__(\n cls,\n run_requests: Optional[Sequence[RunRequest]] = None,\n skip_message: Optional[str] = None,\n cursor: Optional[str] = None,\n dagster_run_reactions: Optional[Sequence[DagsterRunReaction]] = None,\n captured_log_key: Optional[Sequence[str]] = None,\n dynamic_partitions_requests: Optional[\n Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]\n ] = None,\n asset_events: Optional[\n Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]]\n ] = None,\n ):\n check.opt_sequence_param(run_requests, "run_requests", RunRequest)\n check.opt_str_param(skip_message, "skip_message")\n check.opt_str_param(cursor, "cursor")\n check.opt_sequence_param(dagster_run_reactions, "dagster_run_reactions", DagsterRunReaction)\n check.opt_list_param(captured_log_key, "captured_log_key", str)\n check.opt_sequence_param(\n dynamic_partitions_requests,\n "dynamic_partitions_requests",\n (AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest),\n )\n check.opt_sequence_param(\n asset_events,\n "asset_events",\n (AssetMaterialization, AssetObservation, AssetCheckEvaluation),\n )\n check.invariant(\n not (run_requests and skip_message), "Found both skip data and run request data"\n )\n return super(SensorExecutionData, cls).__new__(\n cls,\n run_requests=run_requests,\n skip_message=skip_message,\n cursor=cursor,\n dagster_run_reactions=dagster_run_reactions,\n captured_log_key=captured_log_key,\n dynamic_partitions_requests=dynamic_partitions_requests,\n asset_events=asset_events or [],\n )\n\n\ndef wrap_sensor_evaluation(\n sensor_name: str,\n fn: RawSensorEvaluationFunction,\n) -> SensorEvaluationFunction:\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(fn)}\n\n def _wrapped_fn(context: SensorEvaluationContext):\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, sensor_name, resource_arg_names\n )\n\n context_param_name_if_present = get_context_param_name(fn)\n context_param = (\n {context_param_name_if_present: context} if context_param_name_if_present else {}\n )\n raw_evaluation_result = fn(**context_param, **resource_args_populated)\n\n def check_returned_scalar(scalar):\n if isinstance(scalar, (SkipReason, RunRequest, SensorResult)):\n return scalar\n elif scalar is not None:\n raise Exception(\n f"Error in sensor {sensor_name}: Sensor unexpectedly returned output "\n f"{scalar} of type {type(scalar)}. Should only return SkipReason or "\n "RunRequest objects."\n )\n\n if inspect.isgenerator(raw_evaluation_result):\n result = []\n try:\n while True:\n result.append(next(raw_evaluation_result))\n except StopIteration as e:\n # captures the case where the evaluation function has a yield and also returns a\n # value\n if e.value is not None:\n result.append(check_returned_scalar(e.value))\n\n return result\n elif isinstance(raw_evaluation_result, list):\n return raw_evaluation_result\n else:\n return [check_returned_scalar(raw_evaluation_result)]\n\n return _wrapped_fn\n\n\n
[docs]def build_sensor_context(\n instance: Optional[DagsterInstance] = None,\n cursor: Optional[str] = None,\n repository_name: Optional[str] = None,\n repository_def: Optional["RepositoryDefinition"] = None,\n sensor_name: Optional[str] = None,\n resources: Optional[Mapping[str, object]] = None,\n definitions: Optional["Definitions"] = None,\n instance_ref: Optional["InstanceRef"] = None,\n) -> SensorEvaluationContext:\n """Builds sensor execution context using the provided parameters.\n\n This function can be used to provide a context to the invocation of a sensor definition.If\n provided, the dagster instance must be persistent; DagsterInstance.ephemeral() will result in an\n error.\n\n Args:\n instance (Optional[DagsterInstance]): The dagster instance configured to run the sensor.\n cursor (Optional[str]): A cursor value to provide to the evaluation of the sensor.\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n repository_def (Optional[RepositoryDefinition]): The repository that the sensor belongs to.\n If needed by the sensor top-level resource definitions will be pulled from this repository.\n You can provide either this or `definitions`.\n resources (Optional[Mapping[str, ResourceDefinition]]): A set of resource definitions\n to provide to the sensor. If passed, these will override any resource definitions\n provided by the repository.\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n If needed by the sensor, top-level resource definitions will be pulled from these\n definitions. You can provide either this or `repository_def`.\n\n Examples:\n .. code-block:: python\n\n context = build_sensor_context()\n my_sensor(context)\n\n """\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n check.opt_inst_param(instance, "instance", DagsterInstance)\n check.opt_str_param(cursor, "cursor")\n check.opt_str_param(repository_name, "repository_name")\n repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n error_on_none=False,\n )\n\n return SensorEvaluationContext(\n instance_ref=instance_ref,\n last_completion_time=None,\n last_run_key=None,\n cursor=cursor,\n repository_name=repository_name,\n instance=instance,\n repository_def=repository_def,\n sensor_name=sensor_name,\n resources=wrap_resources_for_execution(resources),\n )
\n\n\nT = TypeVar("T")\n\n\ndef get_sensor_context_from_args_or_kwargs(\n fn: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n context_type: Type[T],\n) -> Optional[T]:\n from dagster._config.pythonic_config import is_coercible_to_resource\n\n context_param_name = get_context_param_name(fn)\n\n kwarg_keys_non_resource = set(kwargs.keys()) - {param.name for param in get_resource_args(fn)}\n if len(args) + len(kwarg_keys_non_resource) > 1:\n raise DagsterInvalidInvocationError(\n "Sensor invocation received multiple non-resource arguments. Only a first "\n "positional context parameter should be provided when invoking."\n )\n\n if any(is_coercible_to_resource(arg) for arg in args):\n raise DagsterInvalidInvocationError(\n "If directly invoking a sensor, you may not provide resources as"\n " positional"\n " arguments, only as keyword arguments."\n )\n\n context: Optional[T] = None\n\n if len(args) > 0:\n context = check.opt_inst(args[0], context_type)\n elif len(kwargs) > 0:\n if context_param_name and context_param_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Sensor invocation expected argument '{context_param_name}'."\n )\n context = check.opt_inst(kwargs.get(context_param_name or "context"), context_type)\n elif context_param_name:\n # If the context parameter is present but no value was provided, we error\n raise DagsterInvalidInvocationError(\n "Sensor evaluation function expected context argument, but no context argument "\n "was provided when invoking."\n )\n\n return context\n\n\ndef get_or_create_sensor_context(\n fn: Callable,\n *args: Any,\n **kwargs: Any,\n) -> SensorEvaluationContext:\n """Based on the passed resource function and the arguments passed to it, returns the\n user-passed SensorEvaluationContext or creates one if it is not passed.\n\n Raises an exception if the user passes more than one argument or if the user-provided\n function requires a context parameter but none is passed.\n """\n context = (\n get_sensor_context_from_args_or_kwargs(\n fn,\n args,\n kwargs,\n context_type=SensorEvaluationContext,\n )\n or build_sensor_context()\n )\n resource_args_from_kwargs = {}\n\n resource_args = {param.name for param in get_resource_args(fn)}\n for resource_arg in resource_args:\n if resource_arg in kwargs:\n resource_args_from_kwargs[resource_arg] = kwargs[resource_arg]\n\n if resource_args_from_kwargs:\n return context.merge_resources(resource_args_from_kwargs)\n\n return context\n\n\ndef _run_requests_with_base_asset_jobs(\n run_requests: Iterable[RunRequest],\n context: SensorEvaluationContext,\n outer_asset_selection: AssetSelection,\n) -> Sequence[RunRequest]:\n """For sensors that target asset selections instead of jobs, finds the corresponding base asset\n for a selected set of assets.\n """\n asset_graph = context.repository_def.asset_graph # type: ignore # (possible none)\n result = []\n for run_request in run_requests:\n if run_request.asset_selection:\n asset_keys = run_request.asset_selection\n\n unexpected_asset_keys = (\n AssetSelection.keys(*asset_keys) - outer_asset_selection\n ).resolve(asset_graph)\n if unexpected_asset_keys:\n raise DagsterInvalidSubsetError(\n "RunRequest includes asset keys that are not part of sensor's asset_selection:"\n f" {unexpected_asset_keys}"\n )\n else:\n asset_keys = outer_asset_selection.resolve(asset_graph)\n\n base_job = context.repository_def.get_implicit_job_def_for_assets(asset_keys) # type: ignore # (possible none)\n result.append(\n run_request.with_replaced_attrs(\n job_name=base_job.name, asset_selection=list(asset_keys) # type: ignore # (possible none)\n )\n )\n\n return result\n
", "current_page_name": "_modules/dagster/_core/definitions/sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.sensor_definition"}, "source_asset": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.source_asset

\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    Optional,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental_param, public\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.data_version import (\n    DATA_VERSION_TAG,\n    DataVersion,\n    DataVersionsByPartition,\n)\nfrom dagster._core.definitions.events import AssetKey, AssetObservation, CoercibleToAssetKey\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataMapping,\n    normalize_metadata,\n)\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.resource_requirement import (\n    ResourceAddable,\n    ResourceRequirement,\n    SourceAssetIOManagerRequirement,\n    ensure_requirements_satisfied,\n    get_resource_key_conflicts,\n)\nfrom dagster._core.definitions.utils import (\n    DEFAULT_GROUP_NAME,\n    DEFAULT_IO_MANAGER_KEY,\n    validate_group_name,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidObservationError,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.decorators.op_decorator import (\n        DecoratedOpFunction,\n    )\nfrom dagster._core.storage.io_manager import IOManagerDefinition\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import disable_dagster_warnings\n\n# Going with this catch-all for the time-being to permit pythonic resources\nSourceAssetObserveFunction: TypeAlias = Callable[..., Any]\n\n\ndef wrap_source_asset_observe_fn_in_op_compute_fn(\n    source_asset: "SourceAsset",\n) -> "DecoratedOpFunction":\n    from dagster._core.definitions.decorators.op_decorator import (\n        DecoratedOpFunction,\n        is_context_provided,\n    )\n    from dagster._core.execution.context.compute import (\n        OpExecutionContext,\n    )\n\n    check.not_none(source_asset.observe_fn, "Must be an observable source asset")\n    assert source_asset.observe_fn  # for type checker\n\n    observe_fn = source_asset.observe_fn\n\n    observe_fn_has_context = is_context_provided(get_function_params(observe_fn))\n\n    def fn(context: OpExecutionContext) -> None:\n        resource_kwarg_keys = [param.name for param in get_resource_args(observe_fn)]\n        resource_kwargs = {key: getattr(context.resources, key) for key in resource_kwarg_keys}\n        observe_fn_return_value = (\n            observe_fn(context, **resource_kwargs)\n            if observe_fn_has_context\n            else observe_fn(**resource_kwargs)\n        )\n\n        if isinstance(observe_fn_return_value, DataVersion):\n            if source_asset.partitions_def is not None:\n                raise DagsterInvalidObservationError(\n                    f"{source_asset.key} is partitioned, so its observe function should return a"\n                    " DataVersionsByPartition, not a DataVersion"\n                )\n\n            context.log_event(\n                AssetObservation(\n                    asset_key=source_asset.key,\n                    tags={DATA_VERSION_TAG: observe_fn_return_value.value},\n                )\n            )\n        elif isinstance(observe_fn_return_value, DataVersionsByPartition):\n            if source_asset.partitions_def is None:\n                raise DagsterInvalidObservationError(\n                    f"{source_asset.key} is not partitioned, so its observe function should return"\n                    " a DataVersion, not a DataVersionsByPartition"\n                )\n\n            for (\n                partition_key,\n                data_version,\n            ) in observe_fn_return_value.data_versions_by_partition.items():\n                context.log_event(\n                    AssetObservation(\n                        asset_key=source_asset.key,\n                        tags={DATA_VERSION_TAG: data_version.value},\n                        partition=partition_key,\n                    )\n                )\n        else:\n            raise DagsterInvalidObservationError(\n                f"Observe function for {source_asset.key} must return a DataVersion or"\n                " DataVersionsByPartition, but returned a value of type"\n                f" {type(observe_fn_return_value)}"\n            )\n\n    return DecoratedOpFunction(fn)\n\n\n
[docs]@experimental_param(param="resource_defs")\n@experimental_param(param="io_manager_def")\nclass SourceAsset(ResourceAddable):\n """A SourceAsset represents an asset that will be loaded by (but not updated by) Dagster.\n\n Attributes:\n key (Union[AssetKey, Sequence[str], str]): The key of the asset.\n metadata (Mapping[str, MetadataValue]): Metadata associated with the asset.\n io_manager_key (Optional[str]): The key for the IOManager that will be used to load the contents of\n the asset when it's used as an input to other assets inside a job.\n io_manager_def (Optional[IOManagerDefinition]): (Experimental) The definition of the IOManager that will be used to load the contents of\n the asset when it's used as an input to other assets inside a job.\n resource_defs (Optional[Mapping[str, ResourceDefinition]]): (Experimental) resource definitions that may be required by the :py:class:`dagster.IOManagerDefinition` provided in the `io_manager_def` argument.\n description (Optional[str]): The description of the asset.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n observe_fn (Optional[SourceAssetObserveFunction]) Observation function for the source asset.\n """\n\n key: PublicAttr[AssetKey]\n metadata: PublicAttr[MetadataMapping]\n raw_metadata: PublicAttr[ArbitraryMetadataMapping]\n io_manager_key: PublicAttr[Optional[str]]\n _io_manager_def: PublicAttr[Optional[IOManagerDefinition]]\n description: PublicAttr[Optional[str]]\n partitions_def: PublicAttr[Optional[PartitionsDefinition]]\n group_name: PublicAttr[str]\n resource_defs: PublicAttr[Dict[str, ResourceDefinition]]\n observe_fn: PublicAttr[Optional[SourceAssetObserveFunction]]\n _node_def: Optional[OpDefinition] # computed lazily\n auto_observe_interval_minutes: Optional[float]\n\n def __init__(\n self,\n key: CoercibleToAssetKey,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n io_manager_key: Optional[str] = None,\n io_manager_def: Optional[object] = None,\n description: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n group_name: Optional[str] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n observe_fn: Optional[SourceAssetObserveFunction] = None,\n *,\n auto_observe_interval_minutes: Optional[float] = None,\n # This is currently private because it is necessary for source asset observation functions,\n # but we have not yet decided on a final API for associated one or more ops with a source\n # asset. If we were to make this public, then we would have a canonical public\n # `required_resource_keys` used for observation that might end up conflicting with a set of\n # required resource keys for a different operation.\n _required_resource_keys: Optional[AbstractSet[str]] = None,\n # Add additional fields to with_resources and with_group below\n ):\n from dagster._core.execution.build_resources import (\n wrap_resources_for_execution,\n )\n\n self.key = AssetKey.from_coercible(key)\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n self.raw_metadata = metadata\n self.metadata = normalize_metadata(metadata, allow_invalid=True)\n\n resource_defs_dict = dict(check.opt_mapping_param(resource_defs, "resource_defs"))\n if io_manager_def:\n if not io_manager_key:\n io_manager_key = self.key.to_python_identifier("io_manager")\n\n if (\n io_manager_key in resource_defs_dict\n and resource_defs_dict[io_manager_key] != io_manager_def\n ):\n raise DagsterInvalidDefinitionError(\n f"Provided conflicting definitions for io manager key '{io_manager_key}'."\n " Please provide only one definition per key."\n )\n\n resource_defs_dict[io_manager_key] = io_manager_def\n\n self.resource_defs = wrap_resources_for_execution(resource_defs_dict)\n\n self.io_manager_key = check.opt_str_param(io_manager_key, "io_manager_key")\n self.partitions_def = check.opt_inst_param(\n partitions_def, "partitions_def", PartitionsDefinition\n )\n self.group_name = validate_group_name(group_name)\n self.description = check.opt_str_param(description, "description")\n self.observe_fn = check.opt_callable_param(observe_fn, "observe_fn")\n self._required_resource_keys = check.opt_set_param(\n _required_resource_keys, "_required_resource_keys", of_type=str\n )\n self._node_def = None\n self.auto_observe_interval_minutes = check.opt_numeric_param(\n auto_observe_interval_minutes, "auto_observe_interval_minutes"\n )\n\n def get_io_manager_key(self) -> str:\n return self.io_manager_key or DEFAULT_IO_MANAGER_KEY\n\n @property\n def io_manager_def(self) -> Optional[IOManagerDefinition]:\n io_manager_key = self.get_io_manager_key()\n return cast(\n Optional[IOManagerDefinition],\n self.resource_defs.get(io_manager_key) if io_manager_key else None,\n )\n\n @public\n @property\n def op(self) -> OpDefinition:\n """OpDefinition: The OpDefinition associated with the observation function of an observable\n source asset.\n\n Throws an error if the asset is not observable.\n """\n check.invariant(\n isinstance(self.node_def, OpDefinition),\n "The NodeDefinition for this AssetsDefinition is not of type OpDefinition.",\n )\n return cast(OpDefinition, self.node_def)\n\n @public\n @property\n def is_observable(self) -> bool:\n """bool: Whether the asset is observable."""\n return self.node_def is not None\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return {requirement.key for requirement in self.get_resource_requirements()}\n\n @property\n def node_def(self) -> Optional[OpDefinition]:\n """Op that generates observation metadata for a source asset."""\n if self.observe_fn is None:\n return None\n\n if self._node_def is None:\n self._node_def = OpDefinition(\n compute_fn=wrap_source_asset_observe_fn_in_op_compute_fn(self),\n name=self.key.to_python_identifier(),\n description=self.description,\n required_resource_keys=self._required_resource_keys,\n )\n return self._node_def\n\n def with_resources(self, resource_defs) -> "SourceAsset":\n from dagster._core.execution.resources_init import get_transitive_required_resource_keys\n\n overlapping_keys = get_resource_key_conflicts(self.resource_defs, resource_defs)\n if overlapping_keys:\n raise DagsterInvalidInvocationError(\n f"SourceAsset with key {self.key} has conflicting resource "\n "definitions with provided resources for the following keys: "\n f"{sorted(list(overlapping_keys))}. Either remove the existing "\n "resources from the asset or change the resource keys so that "\n "they don't overlap."\n )\n\n merged_resource_defs = merge_dicts(resource_defs, self.resource_defs)\n\n # Ensure top-level resource requirements are met - except for\n # io_manager, since that is a default it can be resolved later.\n ensure_requirements_satisfied(merged_resource_defs, list(self.get_resource_requirements()))\n\n io_manager_def = merged_resource_defs.get(self.get_io_manager_key())\n if not io_manager_def and self.get_io_manager_key() != DEFAULT_IO_MANAGER_KEY:\n raise DagsterInvalidDefinitionError(\n f"SourceAsset with asset key {self.key} requires IO manager with key"\n f" '{self.get_io_manager_key()}', but none was provided."\n )\n relevant_keys = get_transitive_required_resource_keys(\n {*self._required_resource_keys, self.get_io_manager_key()}, merged_resource_defs\n )\n\n relevant_resource_defs = {\n key: resource_def\n for key, resource_def in merged_resource_defs.items()\n if key in relevant_keys\n }\n\n io_manager_key = (\n self.get_io_manager_key()\n if self.get_io_manager_key() != DEFAULT_IO_MANAGER_KEY\n else None\n )\n with disable_dagster_warnings():\n return SourceAsset(\n key=self.key,\n io_manager_key=io_manager_key,\n description=self.description,\n partitions_def=self.partitions_def,\n metadata=self.raw_metadata,\n resource_defs=relevant_resource_defs,\n group_name=self.group_name,\n observe_fn=self.observe_fn,\n auto_observe_interval_minutes=self.auto_observe_interval_minutes,\n _required_resource_keys=self._required_resource_keys,\n )\n\n def with_attributes(\n self, group_name: Optional[str] = None, key: Optional[AssetKey] = None\n ) -> "SourceAsset":\n if group_name is not None and self.group_name != DEFAULT_GROUP_NAME:\n raise DagsterInvalidDefinitionError(\n "A group name has already been provided to source asset"\n f" {self.key.to_user_string()}"\n )\n\n with disable_dagster_warnings():\n return SourceAsset(\n key=key or self.key,\n metadata=self.raw_metadata,\n io_manager_key=self.io_manager_key,\n io_manager_def=self.io_manager_def,\n description=self.description,\n partitions_def=self.partitions_def,\n group_name=group_name,\n resource_defs=self.resource_defs,\n observe_fn=self.observe_fn,\n auto_observe_interval_minutes=self.auto_observe_interval_minutes,\n _required_resource_keys=self._required_resource_keys,\n )\n\n def get_resource_requirements(self) -> Iterator[ResourceRequirement]:\n if self.node_def is not None:\n yield from self.node_def.get_resource_requirements()\n yield SourceAssetIOManagerRequirement(\n key=self.get_io_manager_key(), asset_key=self.key.to_string()\n )\n for source_key, resource_def in self.resource_defs.items():\n yield from resource_def.get_resource_requirements(outer_context=source_key)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, SourceAsset):\n return False\n else:\n return (\n self.key == other.key\n and self.raw_metadata == other.raw_metadata\n and self.io_manager_key == other.io_manager_key\n and self.description == other.description\n and self.group_name == other.group_name\n and self.resource_defs == other.resource_defs\n and self.observe_fn == other.observe_fn\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/source_asset", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.source_asset"}, "step_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.step_launcher

\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Iterator, Mapping, NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nif TYPE_CHECKING:\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import StepExecutionContext\n    from dagster._core.execution.plan.state import KnownExecutionState\n\n\n
[docs]class StepRunRef(\n NamedTuple(\n "_StepRunRef",\n [\n ("run_config", Mapping[str, object]),\n ("dagster_run", DagsterRun),\n ("run_id", str),\n ("retry_mode", RetryMode),\n ("step_key", str),\n ("recon_job", ReconstructableJob),\n ("known_state", Optional["KnownExecutionState"]),\n ],\n )\n):\n """A serializable object that specifies what's needed to hydrate a step so\n that it can be executed in a process outside the plan process.\n\n Users should not instantiate this class directly.\n """\n\n def __new__(\n cls,\n run_config: Mapping[str, object],\n dagster_run: DagsterRun,\n run_id: str,\n retry_mode: RetryMode,\n step_key: str,\n recon_job: ReconstructableJob,\n known_state: Optional["KnownExecutionState"],\n ):\n from dagster._core.execution.plan.state import KnownExecutionState\n\n return super(StepRunRef, cls).__new__(\n cls,\n check.mapping_param(run_config, "run_config", key_type=str),\n check.inst_param(dagster_run, "dagster_run", DagsterRun),\n check.str_param(run_id, "run_id"),\n check.inst_param(retry_mode, "retry_mode", RetryMode),\n check.str_param(step_key, "step_key"),\n check.inst_param(recon_job, "recon_job", ReconstructableJob),\n check.opt_inst_param(known_state, "known_state", KnownExecutionState),\n )
\n\n\n
[docs]class StepLauncher(ABC):\n """A StepLauncher is responsible for executing steps, either in-process or in an external process."""\n\n @abstractmethod\n def launch_step(self, step_context: "StepExecutionContext") -> Iterator["DagsterEvent"]:\n """Args:\n step_context (StepExecutionContext): The context that we're executing the step in.\n\n Returns:\n Iterator[DagsterEvent]: The events for the step.\n """
\n
", "current_page_name": "_modules/dagster/_core/definitions/step_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.step_launcher"}, "time_window_partition_mapping": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.time_window_partition_mapping

\nfrom datetime import datetime\nfrom typing import NamedTuple, Optional, cast\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental_param\nfrom dagster._core.definitions.partition import PartitionsDefinition, PartitionsSubset\nfrom dagster._core.definitions.partition_mapping import PartitionMapping, UpstreamPartitionsResult\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    TimeWindowPartitionsDefinition,\n    TimeWindowPartitionsSubset,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._serdes import whitelist_for_serdes\n\n\n
[docs]@whitelist_for_serdes\n@experimental_param(param="allow_nonexistent_upstream_partitions")\nclass TimeWindowPartitionMapping(\n PartitionMapping,\n NamedTuple(\n "_TimeWindowPartitionMapping",\n [\n ("start_offset", PublicAttr[int]),\n ("end_offset", PublicAttr[int]),\n ("allow_nonexistent_upstream_partitions", PublicAttr[bool]),\n ],\n ),\n):\n """The default mapping between two TimeWindowPartitionsDefinitions.\n\n A partition in the downstream partitions definition is mapped to all partitions in the upstream\n asset whose time windows overlap it.\n\n This means that, if the upstream and downstream partitions definitions share the same time\n period, then this mapping is essentially the identity partition mapping - plus conversion of\n datetime formats.\n\n If the upstream time period is coarser than the downstream time period, then each partition in\n the downstream asset will map to a single (larger) upstream partition. E.g. if the downstream is\n hourly and the upstream is daily, then each hourly partition in the downstream will map to the\n daily partition in the upstream that contains that hour.\n\n If the upstream time period is finer than the downstream time period, then each partition in the\n downstream asset will map to multiple upstream partitions. E.g. if the downstream is daily and\n the upstream is hourly, then each daily partition in the downstream asset will map to the 24\n hourly partitions in the upstream that occur on that day.\n\n Attributes:\n start_offset (int): If not 0, then the starts of the upstream windows are shifted by this\n offset relative to the starts of the downstream windows. For example, if start_offset=-1\n and end_offset=0, then the downstream partition "2022-07-04" would map to the upstream\n partitions "2022-07-03" and "2022-07-04". Only permitted to be non-zero when the\n upstream and downstream PartitionsDefinitions are the same. Defaults to 0.\n end_offset (int): If not 0, then the ends of the upstream windows are shifted by this\n offset relative to the ends of the downstream windows. For example, if start_offset=0\n and end_offset=1, then the downstream partition "2022-07-04" would map to the upstream\n partitions "2022-07-04" and "2022-07-05". Only permitted to be non-zero when the\n upstream and downstream PartitionsDefinitions are the same. Defaults to 0.\n allow_nonexistent_upstream_partitions (bool): Defaults to false. If true, does not\n raise an error when mapped upstream partitions fall outside the start-end time window of the\n partitions def. For example, if the upstream partitions def starts on "2023-01-01" but\n the downstream starts on "2022-01-01", setting this bool to true would return no\n partition keys when get_upstream_partitions_for_partitions is called with "2022-06-01".\n When set to false, would raise an error.\n\n Examples:\n .. code-block:: python\n\n from dagster import DailyPartitionsDefinition, TimeWindowPartitionMapping, AssetIn, asset\n\n partitions_def = DailyPartitionsDefinition(start_date="2020-01-01")\n\n @asset(partitions_def=partitions_def)\n def asset1():\n ...\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "asset1": AssetIn(\n partition_mapping=TimeWindowPartitionMapping(start_offset=-1)\n )\n }\n )\n def asset2(asset1):\n ...\n """\n\n def __new__(\n cls,\n start_offset: int = 0,\n end_offset: int = 0,\n allow_nonexistent_upstream_partitions: bool = False,\n ):\n return super(TimeWindowPartitionMapping, cls).__new__(\n cls,\n start_offset=check.int_param(start_offset, "start_offset"),\n end_offset=check.int_param(end_offset, "end_offset"),\n allow_nonexistent_upstream_partitions=check.bool_param(\n allow_nonexistent_upstream_partitions,\n "allow_nonexistent_upstream_partitions",\n ),\n )\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n if not isinstance(downstream_partitions_subset, TimeWindowPartitionsSubset):\n check.failed("downstream_partitions_subset must be a TimeWindowPartitionsSubset")\n\n return self._map_partitions(\n downstream_partitions_subset.partitions_def,\n upstream_partitions_def,\n downstream_partitions_subset,\n start_offset=self.start_offset,\n end_offset=self.end_offset,\n current_time=current_time,\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: Optional[PartitionsDefinition],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n """Returns the partitions in the downstream asset that map to the given upstream partitions.\n\n Filters for partitions that exist at the given current_time, fetching the current time\n if not provided.\n """\n return self._map_partitions(\n upstream_partitions_subset.partitions_def,\n downstream_partitions_def,\n upstream_partitions_subset,\n end_offset=-self.start_offset,\n start_offset=-self.end_offset,\n current_time=current_time,\n ).partitions_subset\n\n def _map_partitions(\n self,\n from_partitions_def: PartitionsDefinition,\n to_partitions_def: Optional[PartitionsDefinition],\n from_partitions_subset: PartitionsSubset,\n start_offset: int,\n end_offset: int,\n current_time: Optional[datetime] = None,\n ) -> UpstreamPartitionsResult:\n """Maps the partitions in from_partitions_subset to partitions in to_partitions_def.\n\n If partitions in from_partitions_subset represent time windows that do not exist in\n to_partitions_def, raises an error if raise_error_on_invalid_mapped_partition is True.\n Otherwise, filters out the partitions that do not exist in to_partitions_def and returns\n the filtered subset, also returning a bool indicating whether there were mapped time windows\n that did not exist in to_partitions_def.\n """\n if not isinstance(from_partitions_subset, TimeWindowPartitionsSubset):\n check.failed("from_partitions_subset must be a TimeWindowPartitionsSubset")\n\n if not isinstance(from_partitions_def, TimeWindowPartitionsDefinition):\n check.failed("from_partitions_def must be a TimeWindowPartitionsDefinition")\n\n if not isinstance(to_partitions_def, TimeWindowPartitionsDefinition):\n check.failed("to_partitions_def must be a TimeWindowPartitionsDefinition")\n\n if (start_offset != 0 or end_offset != 0) and (\n from_partitions_def.cron_schedule != to_partitions_def.cron_schedule\n ):\n raise DagsterInvalidDefinitionError(\n "Can't use the start_offset or end_offset parameters of"\n " TimeWindowPartitionMapping when the cron schedule of the upstream"\n " PartitionsDefinition is different than the cron schedule of the downstream"\n f" one. Attempted to map from cron schedule '{from_partitions_def.cron_schedule}' "\n f"to cron schedule '{to_partitions_def.cron_schedule}'."\n )\n\n if to_partitions_def.timezone != from_partitions_def.timezone:\n raise DagsterInvalidDefinitionError("Timezones don't match")\n\n # skip fancy mapping logic in the simple case\n if from_partitions_def == to_partitions_def and start_offset == 0 and end_offset == 0:\n return UpstreamPartitionsResult(from_partitions_subset, [])\n\n time_windows = []\n for from_partition_time_window in from_partitions_subset.included_time_windows:\n from_start_dt, from_end_dt = from_partition_time_window\n offsetted_start_dt = _offsetted_datetime(\n from_partitions_def, from_start_dt, start_offset\n )\n offsetted_end_dt = _offsetted_datetime(from_partitions_def, from_end_dt, end_offset)\n\n to_start_partition_key = (\n to_partitions_def.get_partition_key_for_timestamp(\n offsetted_start_dt.timestamp(), end_closed=False\n )\n if offsetted_start_dt is not None\n else None\n )\n to_end_partition_key = (\n to_partitions_def.get_partition_key_for_timestamp(\n offsetted_end_dt.timestamp(), end_closed=True\n )\n if offsetted_end_dt is not None\n else None\n )\n\n if to_start_partition_key is not None or to_end_partition_key is not None:\n window_start = (\n to_partitions_def.start_time_for_partition_key(to_start_partition_key)\n if to_start_partition_key\n else cast(TimeWindow, to_partitions_def.get_first_partition_window()).start\n )\n window_end = (\n to_partitions_def.end_time_for_partition_key(to_end_partition_key)\n if to_end_partition_key\n else cast(TimeWindow, to_partitions_def.get_last_partition_window()).end\n )\n\n if window_start < window_end:\n time_windows.append(TimeWindow(window_start, window_end))\n\n first_window = to_partitions_def.get_first_partition_window(current_time=current_time)\n last_window = to_partitions_def.get_last_partition_window(current_time=current_time)\n\n filtered_time_windows = []\n required_but_nonexistent_partition_keys = set()\n\n for time_window in time_windows:\n if (\n first_window\n and last_window\n and time_window.start <= last_window.start\n and time_window.end >= first_window.end\n ):\n window_start = max(time_window.start, first_window.start)\n window_end = min(time_window.end, last_window.end)\n filtered_time_windows.append(TimeWindow(window_start, window_end))\n\n if self.allow_nonexistent_upstream_partitions:\n # If allowed to have nonexistent upstream partitions, do not consider\n # out of range partitions to be invalid\n continue\n else:\n invalid_time_window = None\n if not (first_window and last_window) or (\n time_window.start < first_window.start and time_window.end > last_window.end\n ):\n invalid_time_window = time_window\n elif time_window.start < first_window.start:\n invalid_time_window = TimeWindow(\n time_window.start, min(time_window.end, first_window.start)\n )\n elif time_window.end > last_window.end:\n invalid_time_window = TimeWindow(\n max(time_window.start, last_window.end), time_window.end\n )\n\n if invalid_time_window:\n required_but_nonexistent_partition_keys.update(\n set(\n to_partitions_def.get_partition_keys_in_time_window(\n time_window=invalid_time_window\n )\n )\n )\n\n return UpstreamPartitionsResult(\n TimeWindowPartitionsSubset(\n to_partitions_def,\n num_partitions=sum(\n len(to_partitions_def.get_partition_keys_in_time_window(time_window))\n for time_window in filtered_time_windows\n ),\n included_time_windows=filtered_time_windows,\n ),\n sorted(list(required_but_nonexistent_partition_keys)),\n )
\n\n\ndef _offsetted_datetime(\n partitions_def: TimeWindowPartitionsDefinition, dt: datetime, offset: int\n) -> Optional[datetime]:\n for _ in range(abs(offset)):\n if offset < 0:\n prev_window = partitions_def.get_prev_partition_window(dt)\n if prev_window is None:\n return None\n\n dt = prev_window.start\n else:\n # TODO: what if we're at the end of the line?\n next_window = partitions_def.get_next_partition_window(dt)\n if next_window is None:\n return None\n\n dt = next_window.end\n\n return dt\n
", "current_page_name": "_modules/dagster/_core/definitions/time_window_partition_mapping", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.time_window_partition_mapping"}, "time_window_partitions": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.time_window_partitions

\nimport functools\nimport hashlib\nimport json\nimport re\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    FrozenSet,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._utils.partitions import DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE\nfrom dagster._utils.schedules import (\n    cron_string_iterator,\n    is_valid_cron_schedule,\n    reverse_cron_string_iterator,\n)\n\nfrom ..errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidDeserializationVersionError,\n)\nfrom .partition import (\n    DEFAULT_DATE_FORMAT,\n    PartitionedConfig,\n    PartitionsDefinition,\n    PartitionsSubset,\n    ScheduleType,\n    cron_schedule_from_schedule_type_and_offsets,\n)\nfrom .partition_key_range import PartitionKeyRange\n\n\n
[docs]class TimeWindow(NamedTuple):\n """An interval that is closed at the start and open at the end.\n\n Attributes:\n start (datetime): A pendulum datetime that marks the start of the window.\n end (datetime): A pendulum datetime that marks the end of the window.\n """\n\n start: PublicAttr[datetime]\n end: PublicAttr[datetime]
\n\n\n
[docs]class TimeWindowPartitionsDefinition(\n PartitionsDefinition,\n NamedTuple(\n "_TimeWindowPartitionsDefinition",\n [\n ("start", PublicAttr[datetime]),\n ("timezone", PublicAttr[str]),\n ("end", PublicAttr[Optional[datetime]]),\n ("fmt", PublicAttr[str]),\n ("end_offset", PublicAttr[int]),\n ("cron_schedule", PublicAttr[str]),\n ],\n ),\n):\n r"""A set of partitions where each partitions corresponds to a time window.\n\n The provided cron_schedule determines the bounds of the time windows. E.g. a cron_schedule of\n "0 0 \\\\* \\\\* \\\\*" will result in daily partitions that start at midnight and end at midnight of the\n following day.\n\n The string partition_key associated with each partition corresponds to the start of the\n partition's time window.\n\n The first partition in the set will start on at the first cron_schedule tick that is equal to\n or after the given start datetime. The last partition in the set will end before the current\n time, unless the end_offset argument is set to a positive number.\n\n Args:\n cron_schedule (str): Determines the bounds of the time windows.\n start (datetime): The first partition in the set will start on at the first cron_schedule\n tick that is equal to or after this value.\n timezone (Optional[str]): The timezone in which each time should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n end (datetime): The last partition (excluding) in the set.\n fmt (str): The date format to use for partition_keys.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n """\n\n def __new__(\n cls,\n start: Union[datetime, str],\n fmt: str,\n end: Union[datetime, str, None] = None,\n schedule_type: Optional[ScheduleType] = None,\n timezone: Optional[str] = None,\n end_offset: int = 0,\n minute_offset: Optional[int] = None,\n hour_offset: Optional[int] = None,\n day_offset: Optional[int] = None,\n cron_schedule: Optional[str] = None,\n ):\n check.opt_str_param(timezone, "timezone")\n timezone = timezone or "UTC"\n\n if isinstance(start, datetime):\n start_dt = pendulum.instance(start, tz=timezone)\n else:\n start_dt = pendulum.instance(datetime.strptime(start, fmt), tz=timezone)\n\n if not end:\n end_dt = None\n elif isinstance(end, datetime):\n end_dt = pendulum.instance(end, tz=timezone)\n else:\n end_dt = pendulum.instance(datetime.strptime(end, fmt), tz=timezone)\n\n if cron_schedule is not None:\n check.invariant(\n schedule_type is None and not minute_offset and not hour_offset and not day_offset,\n "If cron_schedule argument is provided, then schedule_type, minute_offset, "\n "hour_offset, and day_offset can't also be provided",\n )\n else:\n if schedule_type is None:\n check.failed("One of schedule_type and cron_schedule must be provided")\n\n cron_schedule = cron_schedule_from_schedule_type_and_offsets(\n schedule_type=schedule_type,\n minute_offset=minute_offset or 0,\n hour_offset=hour_offset or 0,\n day_offset=day_offset or 0,\n )\n\n if not is_valid_cron_schedule(cron_schedule):\n raise DagsterInvalidDefinitionError(\n f"Found invalid cron schedule '{cron_schedule}' for a"\n " TimeWindowPartitionsDefinition."\n )\n\n return super(TimeWindowPartitionsDefinition, cls).__new__(\n cls, start_dt, timezone, end_dt, fmt, end_offset, cron_schedule\n )\n\n def get_current_timestamp(self, current_time: Optional[datetime] = None) -> float:\n return (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n ).timestamp()\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n # Method added for performance reasons.\n # Fetching partition keys requires significantly more compute time to\n # string format datetimes.\n current_timestamp = self.get_current_timestamp(current_time=current_time)\n\n partitions_past_current_time = 0\n\n num_partitions = 0\n for time_window in self._iterate_time_windows(self.start):\n if self.end and time_window.end.timestamp() > self.end.timestamp():\n break\n if (\n time_window.end.timestamp() <= current_timestamp\n or partitions_past_current_time < self.end_offset\n ):\n num_partitions += 1\n\n if time_window.end.timestamp() > current_timestamp:\n partitions_past_current_time += 1\n else:\n break\n\n if self.end_offset < 0:\n num_partitions += self.end_offset\n\n return num_partitions\n\n def get_partition_keys_between_indexes(\n self, start_idx: int, end_idx: int, current_time: Optional[datetime] = None\n ) -> List[str]:\n # Fetches the partition keys between the given start and end indices.\n # Start index is inclusive, end index is exclusive.\n # Method added for performance reasons, to only string format\n # partition keys included within the indices.\n current_timestamp = self.get_current_timestamp(current_time=current_time)\n\n partitions_past_current_time = 0\n partition_keys = []\n reached_end = False\n\n for idx, time_window in enumerate(self._iterate_time_windows(self.start)):\n if time_window.end.timestamp() >= current_timestamp:\n reached_end = True\n if self.end and time_window.end.timestamp() > self.end.timestamp():\n reached_end = True\n if (\n time_window.end.timestamp() <= current_timestamp\n or partitions_past_current_time < self.end_offset\n ):\n if idx >= start_idx and idx < end_idx:\n partition_keys.append(time_window.start.strftime(self.fmt))\n if time_window.end.timestamp() > current_timestamp:\n partitions_past_current_time += 1\n else:\n break\n if len(partition_keys) >= end_idx - start_idx:\n break\n\n if reached_end and self.end_offset < 0:\n partition_keys = partition_keys[: self.end_offset]\n\n return partition_keys\n\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n current_timestamp = self.get_current_timestamp(current_time=current_time)\n\n partitions_past_current_time = 0\n partition_keys: List[str] = []\n for time_window in self._iterate_time_windows(self.start):\n if self.end and time_window.end.timestamp() > self.end.timestamp():\n break\n if (\n time_window.end.timestamp() <= current_timestamp\n or partitions_past_current_time < self.end_offset\n ):\n partition_keys.append(time_window.start.strftime(self.fmt))\n\n if time_window.end.timestamp() > current_timestamp:\n partitions_past_current_time += 1\n else:\n break\n\n if self.end_offset < 0:\n partition_keys = partition_keys[: self.end_offset]\n\n return partition_keys\n\n def _get_validated_time_window_for_partition_key(\n self, partition_key: str, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n """Returns a TimeWindow for the given partition key if it is valid, otherwise returns None."""\n try:\n time_window = self.time_window_for_partition_key(partition_key)\n except ValueError:\n return None\n\n first_partition_window = self.get_first_partition_window(current_time=current_time)\n last_partition_window = self.get_last_partition_window(current_time=current_time)\n if (\n first_partition_window is None\n or last_partition_window is None\n or time_window.start < first_partition_window.start\n or time_window.start > last_partition_window.start\n or time_window.start.strftime(self.fmt) != partition_key\n ):\n return None\n\n return time_window\n\n def __str__(self) -> str:\n schedule_str = (\n self.schedule_type.value.capitalize() if self.schedule_type else self.cron_schedule\n )\n partition_def_str = (\n f"{schedule_str}, starting {self.start.strftime(self.fmt)} {self.timezone}."\n )\n if self.end_offset != 0:\n partition_def_str += (\n " End offsetted by"\n f" {self.end_offset} partition{'' if self.end_offset == 1 else 's'}."\n )\n return partition_def_str\n\n def __repr__(self):\n # Between python 3.8 and 3.9 the repr of a datetime object changed.\n # Replaces start time with timestamp as a workaround to make sure the repr is consistent across versions.\n return (\n f"TimeWindowPartitionsDefinition(start={self.start.timestamp()},"\n f" timezone='{self.timezone}', fmt='{self.fmt}', end_offset={self.end_offset},"\n f" cron_schedule='{self.cron_schedule}')"\n )\n\n def __hash__(self):\n return hash(tuple(self.__repr__()))\n\n @functools.lru_cache(maxsize=100)\n def _time_window_for_partition_key(self, *, partition_key: str) -> TimeWindow:\n partition_key_dt = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n return next(iter(self._iterate_time_windows(partition_key_dt)))\n\n def time_window_for_partition_key(self, partition_key: str) -> TimeWindow:\n return self._time_window_for_partition_key(partition_key=partition_key)\n\n @functools.lru_cache(maxsize=5)\n def time_windows_for_partition_keys(\n self,\n partition_keys: FrozenSet[str],\n validate: bool = True,\n ) -> Sequence[TimeWindow]:\n if len(partition_keys) == 0:\n return []\n\n sorted_pks = sorted(partition_keys, key=lambda pk: datetime.strptime(pk, self.fmt))\n cur_windows_iterator = iter(\n self._iterate_time_windows(\n pendulum.instance(datetime.strptime(sorted_pks[0], self.fmt), tz=self.timezone)\n )\n )\n partition_key_time_windows: List[TimeWindow] = []\n for partition_key in sorted_pks:\n next_window = next(cur_windows_iterator)\n if next_window.start.strftime(self.fmt) == partition_key:\n partition_key_time_windows.append(next_window)\n else:\n cur_windows_iterator = iter(\n self._iterate_time_windows(\n pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n )\n )\n partition_key_time_windows.append(next(cur_windows_iterator))\n\n if validate:\n start_time_window = self.get_first_partition_window()\n end_time_window = self.get_last_partition_window()\n\n if start_time_window is None or end_time_window is None:\n check.failed("No partitions in the PartitionsDefinition")\n\n start_timestamp = start_time_window.start.timestamp()\n end_timestamp = end_time_window.end.timestamp()\n\n partition_key_time_windows = [\n tw\n for tw in partition_key_time_windows\n if tw.start.timestamp() >= start_timestamp and tw.end.timestamp() <= end_timestamp\n ]\n return partition_key_time_windows\n\n def start_time_for_partition_key(self, partition_key: str) -> datetime:\n partition_key_dt = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n # the datetime format might not include granular components, so we need to recover them\n # we make the assumption that the parsed partition key is <= the start datetime\n return next(iter(self._iterate_time_windows(partition_key_dt))).start\n\n def get_next_partition_key(\n self, partition_key: str, current_time: Optional[datetime] = None\n ) -> Optional[str]:\n last_partition_window = self.get_last_partition_window(current_time)\n if last_partition_window is None:\n return None\n\n partition_key_dt = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n windows_iter = iter(self._iterate_time_windows(partition_key_dt))\n next(windows_iter)\n start_time = next(windows_iter).start\n if start_time >= last_partition_window.end:\n return None\n else:\n return start_time.strftime(self.fmt)\n\n def get_next_partition_window(\n self, end_dt: datetime, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n last_partition_window = self.get_last_partition_window(current_time)\n if last_partition_window is None:\n return None\n\n windows_iter = iter(self._iterate_time_windows(end_dt))\n next_window = next(windows_iter)\n if next_window.start >= last_partition_window.end:\n return None\n else:\n return next_window\n\n def get_prev_partition_window(self, start_dt: datetime) -> Optional[TimeWindow]:\n windows_iter = iter(self._reverse_iterate_time_windows(start_dt))\n prev_window = next(windows_iter)\n first_partition_window = self.get_first_partition_window()\n if first_partition_window is None or prev_window.start < first_partition_window.start:\n return None\n else:\n return prev_window\n\n @functools.lru_cache(maxsize=5)\n def _get_first_partition_window(self, *, current_time: datetime) -> Optional[TimeWindow]:\n current_timestamp = current_time.timestamp()\n\n time_window = next(iter(self._iterate_time_windows(self.start)))\n\n if self.end_offset == 0:\n return time_window if time_window.end.timestamp() <= current_timestamp else None\n elif self.end_offset > 0:\n iterator = iter(self._iterate_time_windows(current_time))\n # first returned time window is time window of current time\n curr_window_plus_offset = next(iterator)\n for _ in range(self.end_offset):\n curr_window_plus_offset = next(iterator)\n return (\n time_window\n if time_window.end.timestamp() <= curr_window_plus_offset.start.timestamp()\n else None\n )\n else:\n # end offset < 0\n end_window = None\n iterator = iter(self._reverse_iterate_time_windows(current_time))\n for _ in range(abs(self.end_offset)):\n end_window = next(iterator)\n\n if end_window is None:\n check.failed("end_window should not be None")\n\n return (\n time_window if time_window.end.timestamp() <= end_window.start.timestamp() else None\n )\n\n def get_first_partition_window(\n self, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n current_time = cast(\n datetime,\n (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n ),\n )\n return self._get_first_partition_window(current_time=current_time)\n\n @functools.lru_cache(maxsize=5)\n def _get_last_partition_window(self, *, current_time: datetime) -> Optional[TimeWindow]:\n if self.get_first_partition_window(current_time) is None:\n return None\n\n current_time = (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n )\n\n if self.end and self.end < current_time:\n current_time = self.end\n\n if self.end_offset == 0:\n return next(iter(self._reverse_iterate_time_windows(current_time)))\n else:\n # TODO: make this efficient\n last_partition_key = super().get_last_partition_key(current_time)\n return (\n self.time_window_for_partition_key(last_partition_key)\n if last_partition_key\n else None\n )\n\n def get_last_partition_window(\n self, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n current_time = cast(\n datetime,\n (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n ),\n )\n return self._get_last_partition_window(current_time=current_time)\n\n def get_first_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[str]:\n first_window = self.get_first_partition_window(current_time)\n if first_window is None:\n return None\n\n return first_window.start.strftime(self.fmt)\n\n def get_last_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[str]:\n last_window = self.get_last_partition_window(current_time)\n if last_window is None:\n return None\n\n return last_window.start.strftime(self.fmt)\n\n def end_time_for_partition_key(self, partition_key: str) -> datetime:\n return self.time_window_for_partition_key(partition_key).end\n\n @functools.lru_cache(maxsize=5)\n def get_partition_keys_in_time_window(self, time_window: TimeWindow) -> Sequence[str]:\n result: List[str] = []\n for partition_time_window in self._iterate_time_windows(time_window.start):\n if partition_time_window.start < time_window.end:\n result.append(partition_time_window.start.strftime(self.fmt))\n else:\n break\n return result\n\n def get_partition_key_range_for_time_window(self, time_window: TimeWindow) -> PartitionKeyRange:\n start_partition_key = self.get_partition_key_for_timestamp(time_window.start.timestamp())\n end_partition_key = self.get_partition_key_for_timestamp(\n cast(TimeWindow, self.get_prev_partition_window(time_window.end)).start.timestamp()\n )\n\n return PartitionKeyRange(start_partition_key, end_partition_key)\n\n def get_partition_keys_in_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n start_time = self.start_time_for_partition_key(partition_key_range.start)\n end_time = self.end_time_for_partition_key(partition_key_range.end)\n\n return self.get_partition_keys_in_time_window(TimeWindow(start_time, end_time))\n\n @public\n @property\n def schedule_type(self) -> Optional[ScheduleType]:\n """Optional[ScheduleType]: An enum representing the partition cadence (hourly, daily,\n weekly, or monthly).\n """\n if re.fullmatch(r"\\d+ \\* \\* \\* \\*", self.cron_schedule):\n return ScheduleType.HOURLY\n elif re.fullmatch(r"\\d+ \\d+ \\* \\* \\*", self.cron_schedule):\n return ScheduleType.DAILY\n elif re.fullmatch(r"\\d+ \\d+ \\* \\* \\d+", self.cron_schedule):\n return ScheduleType.WEEKLY\n elif re.fullmatch(r"\\d+ \\d+ \\d+ \\* \\*", self.cron_schedule):\n return ScheduleType.MONTHLY\n else:\n return None\n\n @public\n @property\n def minute_offset(self) -> int:\n """int: Number of minutes past the hour to "split" partitions. Defaults to 0.\n\n For example, returns 15 if each partition starts at 15 minutes past the hour.\n """\n match = re.fullmatch(r"(\\d+) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no minute offset")\n return int(match.groups()[0])\n\n @public\n @property\n def hour_offset(self) -> int:\n """int: Number of hours past 00:00 to "split" partitions. Defaults to 0.\n\n For example, returns 1 if each partition starts at 01:00.\n """\n match = re.fullmatch(r"(\\d+|\\*) (\\d+) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no hour offset")\n return int(match.groups()[1])\n\n @public\n @property\n def day_offset(self) -> int:\n """int: For a weekly or monthly partitions definition, returns the day to "split" partitions\n by. Each partition will start on this day, and end before this day in the following\n week/month. Returns 0 if the day_offset parameter is unset in the\n WeeklyPartitionsDefinition, MonthlyPartitionsDefinition, or the provided cron schedule.\n\n For weekly partitions, returns a value between 0 (representing Sunday) and 6 (representing\n Saturday). Providing a value of 1 means that a partition will exist weekly from Monday to\n the following Sunday.\n\n For monthly partitions, returns a value between 0 (the first day of the month) and 31 (the\n last possible day of the month).\n """\n schedule_type = self.schedule_type\n if schedule_type == ScheduleType.WEEKLY:\n match = re.fullmatch(r"(\\d+|\\*) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*) (\\d+)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no day offset")\n return int(match.groups()[4])\n elif schedule_type == ScheduleType.MONTHLY:\n match = re.fullmatch(r"(\\d+|\\*) (\\d+|\\*) (\\d+) (\\d+|\\*) (\\d+|\\*)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no day offset")\n return int(match.groups()[2])\n else:\n check.failed(f"Unsupported schedule type for day_offset: {schedule_type}")\n\n
[docs] @public\n def get_cron_schedule(\n self,\n minute_of_hour: Optional[int] = None,\n hour_of_day: Optional[int] = None,\n day_of_week: Optional[int] = None,\n day_of_month: Optional[int] = None,\n ) -> str:\n """The schedule executes at the cadence specified by the partitioning, but may overwrite\n the minute/hour/day offset of the partitioning.\n\n This is useful e.g. if you have partitions that span midnight to midnight but you want to\n schedule a job that runs at 2 am.\n """\n if (\n minute_of_hour is None\n and hour_of_day is None\n and day_of_week is None\n and day_of_month is None\n ):\n return self.cron_schedule\n\n schedule_type = self.schedule_type\n if schedule_type is None:\n check.failed(\n f"{self.cron_schedule} does not support"\n " minute_of_hour/hour_of_day/day_of_week/day_of_month arguments"\n )\n\n minute_of_hour = cast(\n int,\n check.opt_int_param(minute_of_hour, "minute_of_hour", default=self.minute_offset),\n )\n\n if schedule_type == ScheduleType.HOURLY:\n check.invariant(\n hour_of_day is None, "Cannot set hour parameter with hourly partitions."\n )\n else:\n hour_of_day = cast(\n int, check.opt_int_param(hour_of_day, "hour_of_day", default=self.hour_offset)\n )\n\n if schedule_type == ScheduleType.DAILY:\n check.invariant(\n day_of_week is None, "Cannot set day of week parameter with daily partitions."\n )\n check.invariant(\n day_of_month is None, "Cannot set day of month parameter with daily partitions."\n )\n\n if schedule_type == ScheduleType.MONTHLY:\n default = self.day_offset or 1\n day_offset = check.opt_int_param(day_of_month, "day_of_month", default=default)\n elif schedule_type == ScheduleType.WEEKLY:\n default = self.day_offset or 0\n day_offset = check.opt_int_param(day_of_week, "day_of_week", default=default)\n else:\n day_offset = 0\n\n return cron_schedule_from_schedule_type_and_offsets(\n schedule_type,\n minute_offset=minute_of_hour,\n hour_offset=hour_of_day or 0,\n day_offset=day_offset,\n )
\n\n def _iterate_time_windows(self, start: datetime) -> Iterable[TimeWindow]:\n """Returns an infinite generator of time windows that start after the given start time."""\n start_timestamp = pendulum.instance(start, tz=self.timezone).timestamp()\n iterator = cron_string_iterator(\n start_timestamp=start_timestamp,\n cron_string=self.cron_schedule,\n execution_timezone=self.timezone,\n )\n prev_time = next(iterator)\n while prev_time.timestamp() < start_timestamp:\n prev_time = next(iterator)\n\n while True:\n next_time = next(iterator)\n yield TimeWindow(prev_time, next_time)\n prev_time = next_time\n\n def _reverse_iterate_time_windows(self, end: datetime) -> Iterable[TimeWindow]:\n """Returns an infinite generator of time windows that end before the given end time."""\n end_timestamp = pendulum.instance(end, tz=self.timezone).timestamp()\n iterator = reverse_cron_string_iterator(\n end_timestamp=end_timestamp,\n cron_string=self.cron_schedule,\n execution_timezone=self.timezone,\n )\n\n prev_time = next(iterator)\n while prev_time.timestamp() > end_timestamp:\n prev_time = next(iterator)\n\n while True:\n next_time = next(iterator)\n yield TimeWindow(next_time, prev_time)\n prev_time = next_time\n\n def get_partition_key_for_timestamp(self, timestamp: float, end_closed: bool = False) -> str:\n """Args:\n timestamp (float): Timestamp from the unix epoch, UTC.\n end_closed (bool): Whether the interval is closed at the end or at the beginning.\n """\n iterator = cron_string_iterator(\n timestamp, self.cron_schedule, self.timezone, start_offset=-1\n )\n # prev will be < timestamp\n prev = next(iterator)\n # prev_next will be >= timestamp\n prev_next = next(iterator)\n\n if end_closed or prev_next.timestamp() > timestamp:\n return prev.strftime(self.fmt)\n else:\n return prev_next.strftime(self.fmt)\n\n def less_than(self, partition_key1: str, partition_key2: str) -> bool:\n """Returns true if the partition_key1 is earlier than partition_key2."""\n return self.start_time_for_partition_key(\n partition_key1\n ) < self.start_time_for_partition_key(partition_key2)\n\n @property\n def partitions_subset_class(self) -> Type["PartitionsSubset"]:\n return TimeWindowPartitionsSubset\n\n def empty_subset(self) -> "PartitionsSubset":\n return self.partitions_subset_class.empty_subset(self)\n\n def is_valid_partition_key(self, partition_key: str) -> bool:\n try:\n partition_time = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n return partition_time >= self.start\n except ValueError:\n return False\n\n def get_serializable_unique_identifier(\n self, dynamic_partitions_store: Optional[DynamicPartitionsStore] = None\n ) -> str:\n return hashlib.sha1(self.__repr__().encode("utf-8")).hexdigest()\n\n def has_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n return bool(self._get_validated_time_window_for_partition_key(partition_key, current_time))
\n\n\n
[docs]class DailyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """A set of daily partitions.\n\n The first partition in the set will start at the start_date at midnight. The last partition\n in the set will end before the current time, unless the end_offset argument is set to a\n positive number. If minute_offset and/or hour_offset are used, the start and end times of\n each partition will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n DailyPartitionsDefinition(start_date="2022-03-12")\n # creates partitions (2022-03-12-00:00, 2022-03-13-00:00), (2022-03-13-00:00, 2022-03-14-00:00), ...\n\n DailyPartitionsDefinition(start_date="2022-03-12", minute_offset=15, hour_offset=16)\n # creates partitions (2022-03-12-16:15, 2022-03-13-16:15), (2022-03-13-16:15, 2022-03-14-16:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n hour_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_DATE_FORMAT\n\n return super(DailyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.DAILY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\ndef wrap_time_window_run_config_fn(\n run_config_fn: Optional[Callable[[datetime, datetime], Mapping[str, Any]]],\n partitions_def: TimeWindowPartitionsDefinition,\n) -> Callable[[str], Mapping[str, Any]]:\n def _run_config_wrapper(key: str) -> Mapping[str, Any]:\n if not run_config_fn:\n return {}\n time_window = partitions_def.time_window_for_partition_key(key)\n return run_config_fn(time_window.start, time_window.end)\n\n return _run_config_wrapper\n\n\ndef wrap_time_window_tags_fn(\n tags_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]],\n partitions_def: TimeWindowPartitionsDefinition,\n) -> Callable[[str], Mapping[str, str]]:\n def _tag_wrapper(key: str) -> Mapping[str, str]:\n if not tags_fn:\n return {}\n time_window = partitions_def.time_window_for_partition_key(key)\n return tags_fn(time_window.start, time_window.end)\n\n return _tag_wrapper\n\n\n
[docs]def daily_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n hour_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[DailyPartitionsDefinition],\n]:\n """Defines run config over a set of daily partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the bounds\n of the date partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at the start_date at midnight. The last partition in\n the set will end before the current time, unless the end_offset argument is set to a positive\n number. If minute_offset and/or hour_offset are used, the start and end times of each partition\n will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @daily_partitioned_config(start_date="2022-03-12")\n # creates partitions (2022-03-12-00:00, 2022-03-13-00:00), (2022-03-13-00:00, 2022-03-14-00:00), ...\n\n @daily_partitioned_config(start_date="2022-03-12", minute_offset=15, hour_offset=16)\n # creates partitions (2022-03-12-16:15, 2022-03-13-16:15), (2022-03-13-16:15, 2022-03-14-16:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[DailyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = DailyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\n
[docs]class HourlyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """A set of hourly partitions.\n\n The first partition in the set will start on the start_date at midnight. The last partition\n in the set will end before the current time, unless the end_offset argument is set to a\n positive number. If minute_offset is provided, the start and end times of each partition\n will be minute_offset past the hour.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n HourlyPartitionsDefinition(start_date=datetime(2022, 03, 12))\n # creates partitions (2022-03-12-00:00, 2022-03-12-01:00), (2022-03-12-01:00, 2022-03-12-02:00), ...\n\n HourlyPartitionsDefinition(start_date=datetime(2022, 03, 12), minute_offset=15)\n # creates partitions (2022-03-12-00:15, 2022-03-12-01:15), (2022-03-12-01:15, 2022-03-12-02:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE\n\n return super(HourlyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.HOURLY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\n
[docs]def hourly_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[HourlyPartitionsDefinition],\n]:\n """Defines run config over a set of hourly partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the date\n partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at the start_date at midnight. The last partition in\n the set will end before the current time, unless the end_offset argument is set to a positive\n number. If minute_offset is provided, the start and end times of each partition will be\n minute_offset past the hour.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @hourly_partitioned_config(start_date=datetime(2022, 03, 12))\n # creates partitions (2022-03-12-00:00, 2022-03-12-01:00), (2022-03-12-01:00, 2022-03-12-02:00), ...\n\n @hourly_partitioned_config(start_date=datetime(2022, 03, 12), minute_offset=15)\n # creates partitions (2022-03-12-00:15, 2022-03-12-01:15), (2022-03-12-01:15, 2022-03-12-02:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[HourlyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = HourlyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\n
[docs]class MonthlyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """A set of monthly partitions.\n\n The first partition in the set will start at the soonest first of the month after start_date\n at midnight. The last partition in the set will end before the current time, unless the\n end_offset argument is set to a positive number. If day_offset is provided, the start and\n end date of each partition will be day_offset. If minute_offset and/or hour_offset are used,\n the start and end times of each partition will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will be\n midnight the sonnest first of the month following start_date. Can provide in either a\n datetime or string format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the month to "split" the partition. Defaults to 1.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n MonthlyPartitionsDefinition(start_date="2022-03-12")\n # creates partitions (2022-04-01-00:00, 2022-05-01-00:00), (2022-05-01-00:00, 2022-06-01-00:00), ...\n\n MonthlyPartitionsDefinition(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=5)\n # creates partitions (2022-04-05-03:15, 2022-05-05-03:15), (2022-05-05-03:15, 2022-06-05-03:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 1,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_DATE_FORMAT\n\n return super(MonthlyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.MONTHLY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\n
[docs]def monthly_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 1,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[MonthlyPartitionsDefinition],\n]:\n """Defines run config over a set of monthly partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the date\n partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at midnight on the soonest first of the month after\n start_date. The last partition in the set will end before the current time, unless the\n end_offset argument is set to a positive number. If day_offset is provided, the start and end\n date of each partition will be day_offset. If minute_offset and/or hour_offset are used, the\n start and end times of each partition will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will be\n midnight the sonnest first of the month following start_date. Can provide in either a\n datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the month to "split" the partition. Defaults to 1.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @monthly_partitioned_config(start_date="2022-03-12")\n # creates partitions (2022-04-01-00:00, 2022-05-01-00:00), (2022-05-01-00:00, 2022-06-01-00:00), ...\n\n @monthly_partitioned_config(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=5)\n # creates partitions (2022-04-05-03:15, 2022-05-05-03:15), (2022-05-05-03:15, 2022-06-05-03:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[MonthlyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = MonthlyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\n
[docs]class WeeklyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """Defines a set of weekly partitions.\n\n The first partition in the set will start at the start_date. The last partition in the set will\n end before the current time, unless the end_offset argument is set to a positive number. If\n day_offset is provided, the start and end date of each partition will be day of the week\n corresponding to day_offset (0 indexed with Sunday as the start of the week). If\n minute_offset and/or hour_offset are used, the start and end times of each partition will be\n hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will\n Sunday at midnight following start_date. Can provide in either a datetime or string\n format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the week to "split" the partition. Defaults to 0 (Sunday).\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n WeeklyPartitionsDefinition(start_date="2022-03-12")\n # creates partitions (2022-03-13-00:00, 2022-03-20-00:00), (2022-03-20-00:00, 2022-03-27-00:00), ...\n\n WeeklyPartitionsDefinition(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=6)\n # creates partitions (2022-03-12-03:15, 2022-03-19-03:15), (2022-03-19-03:15, 2022-03-26-03:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_DATE_FORMAT\n\n return super(WeeklyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.WEEKLY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\n
[docs]def weekly_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[WeeklyPartitionsDefinition],\n]:\n """Defines run config over a set of weekly partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the date\n partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at the start_date. The last partition in the set will\n end before the current time, unless the end_offset argument is set to a positive number. If\n day_offset is provided, the start and end date of each partition will be day of the week\n corresponding to day_offset (0 indexed with Sunday as the start of the week). If\n minute_offset and/or hour_offset are used, the start and end times of each partition will be\n hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will\n Sunday at midnight following start_date. Can provide in either a datetime or string\n format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the week to "split" the partition. Defaults to 0 (Sunday).\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @weekly_partitioned_config(start_date="2022-03-12")\n # creates partitions (2022-03-13-00:00, 2022-03-20-00:00), (2022-03-20-00:00, 2022-03-27-00:00), ...\n\n @weekly_partitioned_config(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=6)\n # creates partitions (2022-03-12-03:15, 2022-03-19-03:15), (2022-03-19-03:15, 2022-03-26-03:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[WeeklyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = WeeklyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\nclass TimeWindowPartitionsSubset(PartitionsSubset):\n # Every time we change the serialization format, we should increment the version number.\n # This will ensure that we can gracefully degrade when deserializing old data.\n SERIALIZATION_VERSION = 1\n\n def __init__(\n self,\n partitions_def: TimeWindowPartitionsDefinition,\n num_partitions: int,\n included_time_windows: Optional[Sequence[TimeWindow]] = None,\n included_partition_keys: Optional[AbstractSet[str]] = None,\n ):\n self._partitions_def = check.inst_param(\n partitions_def, "partitions_def", TimeWindowPartitionsDefinition\n )\n self._included_time_windows = included_time_windows\n self._num_partitions = num_partitions\n\n check.param_invariant(\n not (included_partition_keys and included_time_windows),\n "Cannot specify both included_partition_keys and included_time_windows",\n )\n self._included_time_windows = check.opt_nullable_sequence_param(\n included_time_windows, "included_time_windows", of_type=TimeWindow\n )\n\n self._included_partition_keys = check.opt_nullable_set_param(\n included_partition_keys, "included_partition_keys", of_type=str\n )\n\n @property\n def included_time_windows(self) -> Sequence[TimeWindow]:\n if self._included_time_windows is None:\n result_time_windows, _ = self._add_partitions_to_time_windows(\n initial_windows=[],\n partition_keys=list(check.not_none(self._included_partition_keys)),\n validate=False,\n )\n self._included_time_windows = result_time_windows\n return self._included_time_windows\n\n def _get_partition_time_windows_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n ) -> Sequence[TimeWindow]:\n """Returns a list of partition time windows that are not in the subset.\n Each time window is a single partition.\n """\n first_tw = self._partitions_def.get_first_partition_window(current_time=current_time)\n last_tw = self._partitions_def.get_last_partition_window(current_time=current_time)\n\n if not first_tw or not last_tw:\n check.failed("No partitions found")\n\n if len(self.included_time_windows) == 0:\n return [TimeWindow(first_tw.start, last_tw.end)]\n\n time_windows = []\n if first_tw.start < self.included_time_windows[0].start:\n time_windows.append(TimeWindow(first_tw.start, self.included_time_windows[0].start))\n\n for i in range(len(self.included_time_windows) - 1):\n if self.included_time_windows[i].start >= last_tw.end:\n break\n if self.included_time_windows[i].end < last_tw.end:\n if self.included_time_windows[i + 1].start <= last_tw.end:\n time_windows.append(\n TimeWindow(\n self.included_time_windows[i].end,\n self.included_time_windows[i + 1].start,\n )\n )\n else:\n time_windows.append(\n TimeWindow(\n self.included_time_windows[i].end,\n last_tw.end,\n )\n )\n\n if last_tw.end > self.included_time_windows[-1].end:\n time_windows.append(TimeWindow(self.included_time_windows[-1].end, last_tw.end))\n\n return time_windows\n\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[str]:\n partition_keys: List[str] = []\n for tw in self._get_partition_time_windows_not_in_subset(current_time):\n partition_keys.extend(self._partitions_def.get_partition_keys_in_time_window(tw))\n return partition_keys\n\n @public\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[str]:\n if self._included_partition_keys is None:\n return [\n pk\n for time_window in self.included_time_windows\n for pk in self._partitions_def.get_partition_keys_in_time_window(time_window)\n ]\n return list(self._included_partition_keys) if self._included_partition_keys else []\n\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]:\n return [\n self._partitions_def.get_partition_key_range_for_time_window(window)\n for window in self.included_time_windows\n ]\n\n def _add_partitions_to_time_windows(\n self,\n initial_windows: Sequence[TimeWindow],\n partition_keys: Sequence[str],\n validate: bool = True,\n ) -> Tuple[Sequence[TimeWindow], int]:\n """Merges a set of partition keys into an existing set of time windows, returning the\n minimized set of time windows and the number of partitions added.\n """\n result_windows = [*initial_windows]\n time_windows = self._partitions_def.time_windows_for_partition_keys(\n frozenset(partition_keys), validate=validate\n )\n num_added_partitions = 0\n for window in sorted(time_windows):\n # go in reverse order because it's more common to add partitions at the end than the\n # beginning\n for i in reversed(range(len(result_windows))):\n included_window = result_windows[i]\n lt_end_of_range = window.start < included_window.end\n gte_start_of_range = window.start >= included_window.start\n\n if lt_end_of_range and gte_start_of_range:\n break\n\n if not lt_end_of_range:\n merge_with_range = included_window.end == window.start\n merge_with_later_range = i + 1 < len(result_windows) and (\n window.end == result_windows[i + 1].start\n )\n\n if merge_with_range and merge_with_later_range:\n result_windows[i] = TimeWindow(\n included_window.start, result_windows[i + 1].end\n )\n del result_windows[i + 1]\n elif merge_with_range:\n result_windows[i] = TimeWindow(included_window.start, window.end)\n elif merge_with_later_range:\n result_windows[i + 1] = TimeWindow(window.start, result_windows[i + 1].end)\n else:\n result_windows.insert(i + 1, window)\n\n num_added_partitions += 1\n break\n else:\n if result_windows and window.start == result_windows[0].start:\n result_windows[0] = TimeWindow(window.start, included_window.end) # type: ignore\n else:\n result_windows.insert(0, window)\n\n num_added_partitions += 1\n\n return result_windows, num_added_partitions\n\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "TimeWindowPartitionsSubset":\n # if we are representing things as a static set of keys, continue doing so\n if self._included_partition_keys is not None:\n new_partitions = {*self._included_partition_keys, *partition_keys}\n return TimeWindowPartitionsSubset(\n self._partitions_def,\n num_partitions=len(new_partitions),\n included_partition_keys=new_partitions,\n )\n\n result_windows, added_partitions = self._add_partitions_to_time_windows(\n self.included_time_windows, list(partition_keys)\n )\n\n return TimeWindowPartitionsSubset(\n self._partitions_def,\n num_partitions=self._num_partitions + added_partitions,\n included_time_windows=result_windows,\n )\n\n @classmethod\n def from_serialized(\n cls, partitions_def: PartitionsDefinition, serialized: str\n ) -> "PartitionsSubset":\n if not isinstance(partitions_def, TimeWindowPartitionsDefinition):\n check.failed("Partitions definition must be a TimeWindowPartitionsDefinition")\n partitions_def = cast(TimeWindowPartitionsDefinition, partitions_def)\n\n loaded = json.loads(serialized)\n\n def tuples_to_time_windows(tuples):\n return [\n TimeWindow(\n pendulum.from_timestamp(tup[0], tz=partitions_def.timezone),\n pendulum.from_timestamp(tup[1], tz=partitions_def.timezone),\n )\n for tup in tuples\n ]\n\n if isinstance(loaded, list):\n # backwards compatibility\n time_windows = tuples_to_time_windows(loaded)\n num_partitions = sum(\n len(partitions_def.get_partition_keys_in_time_window(time_window))\n for time_window in time_windows\n )\n elif isinstance(loaded, dict) and (\n "version" not in loaded or loaded["version"] == cls.SERIALIZATION_VERSION\n ): # version 1\n time_windows = tuples_to_time_windows(loaded["time_windows"])\n num_partitions = loaded["num_partitions"]\n else:\n raise DagsterInvalidDeserializationVersionError(\n f"Attempted to deserialize partition subset with version {loaded.get('version')},"\n f" but only version {cls.SERIALIZATION_VERSION} is supported."\n )\n\n return TimeWindowPartitionsSubset(\n partitions_def, num_partitions=num_partitions, included_time_windows=time_windows\n )\n\n @classmethod\n def can_deserialize(\n cls,\n partitions_def: PartitionsDefinition,\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n if serialized_partitions_def_unique_id:\n return (\n partitions_def.get_serializable_unique_identifier()\n == serialized_partitions_def_unique_id\n )\n\n if (\n serialized_partitions_def_class_name\n # note: all TimeWindowPartitionsDefinition subclasses will get serialized as raw\n # TimeWindowPartitionsDefinitions, so this class name check will not always pass,\n # hence the unique id check above\n and serialized_partitions_def_class_name != partitions_def.__class__.__name__\n ):\n return False\n\n data = json.loads(serialized)\n return isinstance(data, list) or (\n isinstance(data, dict)\n and data.get("time_windows") is not None\n and data.get("num_partitions") is not None\n )\n\n @classmethod\n def empty_subset(cls, partitions_def: PartitionsDefinition) -> "PartitionsSubset":\n if not isinstance(partitions_def, TimeWindowPartitionsDefinition):\n check.failed("Partitions definition must be a TimeWindowPartitionsDefinition")\n partitions_def = cast(TimeWindowPartitionsDefinition, partitions_def)\n return cls(partitions_def, 0, [], set())\n\n def serialize(self) -> str:\n return json.dumps(\n {\n "version": self.SERIALIZATION_VERSION,\n "time_windows": [\n (window.start.timestamp(), window.end.timestamp())\n for window in self.included_time_windows\n ],\n "num_partitions": self._num_partitions,\n }\n )\n\n @property\n def partitions_def(self) -> PartitionsDefinition:\n return self._partitions_def\n\n def __eq__(self, other):\n return (\n isinstance(other, TimeWindowPartitionsSubset)\n and self._partitions_def == other._partitions_def\n and (\n # faster comparison, but will not catch all cases\n (\n self._included_time_windows == other._included_time_windows\n and self._included_partition_keys == other._included_partition_keys\n )\n # slower comparison, catches all cases\n or self.included_time_windows == other.included_time_windows\n )\n )\n\n def __len__(self) -> int:\n return self._num_partitions\n\n def __contains__(self, partition_key: str) -> bool:\n if self._included_partition_keys is not None:\n return partition_key in self._included_partition_keys\n\n time_window = self._partitions_def.time_window_for_partition_key(partition_key)\n\n return any(\n time_window.start >= included_time_window.start\n and time_window.start < included_time_window.end\n for included_time_window in self.included_time_windows\n )\n\n def __repr__(self) -> str:\n return f"TimeWindowPartitionsSubset({self.get_partition_key_ranges()})"\n\n\nclass PartitionRangeStatus(Enum):\n MATERIALIZING = "MATERIALIZING"\n MATERIALIZED = "MATERIALIZED"\n FAILED = "FAILED"\n\n\nPARTITION_RANGE_STATUS_PRIORITY = [\n PartitionRangeStatus.MATERIALIZING,\n PartitionRangeStatus.FAILED,\n PartitionRangeStatus.MATERIALIZED,\n]\n\n\nclass PartitionTimeWindowStatus:\n def __init__(self, time_window: TimeWindow, status: PartitionRangeStatus):\n self.time_window = time_window\n self.status = status\n\n def __repr__(self):\n return f"({self.time_window.start} - {self.time_window.end}): {self.status.value}"\n\n def __eq__(self, other):\n return (\n isinstance(other, PartitionTimeWindowStatus)\n and self.time_window == other.time_window\n and self.status == other.status\n )\n\n\ndef _flatten(\n high_pri_time_windows: List[PartitionTimeWindowStatus],\n low_pri_time_windows: List[PartitionTimeWindowStatus],\n) -> List[PartitionTimeWindowStatus]:\n high_pri_time_windows = sorted(high_pri_time_windows, key=lambda t: t.time_window.start)\n low_pri_time_windows = sorted(low_pri_time_windows, key=lambda t: t.time_window.start)\n\n high_pri_idx = 0\n low_pri_idx = 0\n\n filtered_low_pri: List[PartitionTimeWindowStatus] = []\n\n # slice and dice the low pri time windows so there's no overlap with high pri\n while True:\n if low_pri_idx >= len(low_pri_time_windows):\n # reached end of materialized\n break\n if high_pri_idx >= len(high_pri_time_windows):\n # reached end of failed, add all remaining materialized bc there's no overlap\n filtered_low_pri.extend(low_pri_time_windows[low_pri_idx:])\n break\n\n low_pri_tw = low_pri_time_windows[low_pri_idx]\n high_pri_tw = high_pri_time_windows[high_pri_idx]\n\n if low_pri_tw.time_window.start < high_pri_tw.time_window.start:\n if low_pri_tw.time_window.end <= high_pri_tw.time_window.start:\n # low_pri_tw is entirely before high pri\n filtered_low_pri.append(low_pri_tw)\n low_pri_idx += 1\n else:\n # high pri cuts the low pri short\n filtered_low_pri.append(\n PartitionTimeWindowStatus(\n TimeWindow(\n low_pri_tw.time_window.start,\n high_pri_tw.time_window.start,\n ),\n low_pri_tw.status,\n )\n )\n\n if low_pri_tw.time_window.end > high_pri_tw.time_window.end:\n # the low pri time window will continue on the other end of the high pri\n # and get split in two. Modify low_pri[low_pri_idx] to be\n # the second half of the low pri time window. It will be added in the next iteration.\n # (don't add it now, because we need to check if it overlaps with the next high pri)\n low_pri_time_windows[low_pri_idx] = PartitionTimeWindowStatus(\n TimeWindow(high_pri_tw.time_window.end, low_pri_tw.time_window.end),\n low_pri_tw.status,\n )\n high_pri_idx += 1\n else:\n # the rest of the low pri time window is inside the high pri time window\n low_pri_idx += 1\n else:\n if low_pri_tw.time_window.start >= high_pri_tw.time_window.end:\n # high pri is entirely before low pri. The next high pri may overlap\n high_pri_idx += 1\n elif low_pri_tw.time_window.end <= high_pri_tw.time_window.end:\n # low pri is entirely within high pri, skip it\n low_pri_idx += 1\n else:\n # high pri cuts out the start of the low pri. It will continue on the other end.\n # Modify low_pri[low_pri_idx] to shorten the start. It will be added\n # in the next iteration. (don't add it now, because we need to check if it overlaps with the next high pri)\n low_pri_time_windows[low_pri_idx] = PartitionTimeWindowStatus(\n TimeWindow(high_pri_tw.time_window.end, low_pri_tw.time_window.end),\n low_pri_tw.status,\n )\n high_pri_idx += 1\n\n # combine the high pri windwos with the filtered low pri windows\n flattened_time_windows = high_pri_time_windows\n flattened_time_windows.extend(filtered_low_pri)\n flattened_time_windows.sort(key=lambda t: t.time_window.start)\n return flattened_time_windows\n\n\ndef fetch_flattened_time_window_ranges(\n subsets: Mapping[PartitionRangeStatus, TimeWindowPartitionsSubset]\n) -> Sequence[PartitionTimeWindowStatus]:\n """Given potentially overlapping subsets, return a flattened list of timewindows where the highest priority status wins\n on overlaps.\n """\n prioritized_subsets = sorted(\n [(status, subset) for status, subset in subsets.items()],\n key=lambda t: PARTITION_RANGE_STATUS_PRIORITY.index(t[0]),\n )\n\n # progressively add lower priority time windows to the list of higher priority time windows\n flattened_time_window_statuses = []\n for status, subset in prioritized_subsets:\n subset_time_window_statuses = [\n PartitionTimeWindowStatus(tw, status) for tw in subset.included_time_windows\n ]\n flattened_time_window_statuses = _flatten(\n flattened_time_window_statuses, subset_time_window_statuses\n )\n\n return flattened_time_window_statuses\n\n\ndef has_one_dimension_time_window_partitioning(\n partitions_def: Optional[PartitionsDefinition],\n) -> bool:\n from .multi_dimensional_partitions import MultiPartitionsDefinition\n\n if isinstance(partitions_def, TimeWindowPartitionsDefinition):\n return True\n elif isinstance(partitions_def, MultiPartitionsDefinition):\n time_window_dims = [\n dim\n for dim in partitions_def.partitions_defs\n if isinstance(dim.partitions_def, TimeWindowPartitionsDefinition)\n ]\n if len(time_window_dims) == 1:\n return True\n\n return False\n\n\ndef get_time_partitions_def(\n partitions_def: Optional[PartitionsDefinition],\n) -> Optional[TimeWindowPartitionsDefinition]:\n """For a given PartitionsDefinition, return the associated TimeWindowPartitionsDefinition if it\n exists.\n """\n from .multi_dimensional_partitions import MultiPartitionsDefinition\n\n if partitions_def is None:\n return None\n elif isinstance(partitions_def, TimeWindowPartitionsDefinition):\n return partitions_def\n elif isinstance(\n partitions_def, MultiPartitionsDefinition\n ) and has_one_dimension_time_window_partitioning(partitions_def):\n return cast(\n TimeWindowPartitionsDefinition, partitions_def.time_window_dimension.partitions_def\n )\n else:\n return None\n\n\ndef get_time_partition_key(\n partitions_def: Optional[PartitionsDefinition], partition_key: Optional[str]\n) -> str:\n from .multi_dimensional_partitions import MultiPartitionsDefinition\n\n if partitions_def is None or partition_key is None:\n check.failed(\n "Cannot get time partitions key from when partitions def is None or partition key is"\n " None"\n )\n elif isinstance(partitions_def, TimeWindowPartitionsDefinition):\n return partition_key\n elif isinstance(partitions_def, MultiPartitionsDefinition):\n return partitions_def.get_partition_key_from_str(partition_key).keys_by_dimension[\n partitions_def.time_window_dimension.name\n ]\n else:\n check.failed(f"Cannot get time partition from non-time partitions def {partitions_def}")\n
", "current_page_name": "_modules/dagster/_core/definitions/time_window_partitions", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.time_window_partitions"}, "unresolved_asset_job_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.unresolved_asset_job_definition

\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Mapping, NamedTuple, Optional, Sequence, Union\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated\nfrom dagster._core.definitions import AssetKey\nfrom dagster._core.definitions.run_request import RunRequest\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.instance import DynamicPartitionsStore\n\nfrom .asset_layer import build_asset_selection_job\nfrom .config import ConfigMapping\nfrom .metadata import RawMetadataValue\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import (\n        AssetSelection,\n        ExecutorDefinition,\n        HookDefinition,\n        JobDefinition,\n        PartitionedConfig,\n        PartitionsDefinition,\n        ResourceDefinition,\n    )\n    from dagster._core.definitions.asset_graph import InternalAssetGraph\n    from dagster._core.definitions.asset_selection import CoercibleToAssetSelection\n    from dagster._core.definitions.run_config import RunConfig\n\n\nclass UnresolvedAssetJobDefinition(\n    NamedTuple(\n        "_UnresolvedAssetJobDefinition",\n        [\n            ("name", str),\n            ("selection", "AssetSelection"),\n            (\n                "config",\n                Optional[Union[ConfigMapping, Mapping[str, Any], "PartitionedConfig"]],\n            ),\n            ("description", Optional[str]),\n            ("tags", Optional[Mapping[str, Any]]),\n            ("metadata", Optional[Mapping[str, RawMetadataValue]]),\n            ("partitions_def", Optional["PartitionsDefinition"]),\n            ("executor_def", Optional["ExecutorDefinition"]),\n            ("hooks", Optional[AbstractSet["HookDefinition"]]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        name: str,\n        selection: "AssetSelection",\n        config: Optional[\n            Union[ConfigMapping, Mapping[str, Any], "PartitionedConfig", "RunConfig"]\n        ] = None,\n        description: Optional[str] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n        partitions_def: Optional["PartitionsDefinition"] = None,\n        executor_def: Optional["ExecutorDefinition"] = None,\n        hooks: Optional[AbstractSet["HookDefinition"]] = None,\n    ):\n        from dagster._core.definitions import (\n            AssetSelection,\n            ExecutorDefinition,\n            HookDefinition,\n            PartitionsDefinition,\n        )\n        from dagster._core.definitions.run_config import convert_config_input\n\n        return super(UnresolvedAssetJobDefinition, cls).__new__(\n            cls,\n            name=check.str_param(name, "name"),\n            selection=check.inst_param(selection, "selection", AssetSelection),\n            config=convert_config_input(config),\n            description=check.opt_str_param(description, "description"),\n            tags=check.opt_mapping_param(tags, "tags"),\n            metadata=check.opt_mapping_param(metadata, "metadata"),\n            partitions_def=check.opt_inst_param(\n                partitions_def, "partitions_def", PartitionsDefinition\n            ),\n            executor_def=check.opt_inst_param(executor_def, "partitions_def", ExecutorDefinition),\n            hooks=check.opt_nullable_set_param(hooks, "hooks", of_type=HookDefinition),\n        )\n\n    @deprecated(\n        breaking_version="2.0.0",\n        additional_warn_text="Directly instantiate `RunRequest(partition_key=...)` instead.",\n    )\n    def run_request_for_partition(\n        self,\n        partition_key: str,\n        run_key: Optional[str] = None,\n        tags: Optional[Mapping[str, str]] = None,\n        asset_selection: Optional[Sequence[AssetKey]] = None,\n        run_config: Optional[Mapping[str, Any]] = None,\n        current_time: Optional[datetime] = None,\n        dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n    ) -> RunRequest:\n        """Creates a RunRequest object for a run that processes the given partition.\n\n        Args:\n            partition_key: The key of the partition to request a run for.\n            run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that\n                only one run is created per run key across all sensor evaluations.  For schedules,\n                ensures that one run is created per tick, across failure recoveries. Passing in a `None`\n                value means that a run will always be launched per evaluation.\n            tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach\n                to the launched run.\n            run_config (Optional[Mapping[str, Any]]: Configuration for the run. If the job has\n                a :py:class:`PartitionedConfig`, this value will override replace the config\n                provided by it.\n            current_time (Optional[datetime]): Used to determine which time-partitions exist.\n                Defaults to now.\n            dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n                object that is responsible for fetching dynamic partitions. Required when the\n                partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n                can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n        Returns:\n            RunRequest: an object that requests a run to process the given partition.\n        """\n        from dagster._core.definitions.partition import (\n            DynamicPartitionsDefinition,\n            PartitionedConfig,\n        )\n\n        if not self.partitions_def:\n            check.failed("Called run_request_for_partition on a non-partitioned job")\n\n        partitioned_config = PartitionedConfig.from_flexible_config(\n            self.config, self.partitions_def\n        )\n\n        if (\n            isinstance(self.partitions_def, DynamicPartitionsDefinition)\n            and self.partitions_def.name\n        ):\n            # Do not support using run_request_for_partition with dynamic partitions,\n            # since this requires querying the instance once per run request for the\n            # existent dynamic partitions\n            check.failed(\n                "run_request_for_partition is not supported for dynamic partitions. Instead, use"\n                " RunRequest(partition_key=...)"\n            )\n\n        self.partitions_def.validate_partition_key(\n            partition_key,\n            current_time=current_time,\n            dynamic_partitions_store=dynamic_partitions_store,\n        )\n\n        run_config = (\n            run_config\n            if run_config is not None\n            else partitioned_config.get_run_config_for_partition_key(partition_key)\n        )\n        run_request_tags = {\n            **(tags or {}),\n            **partitioned_config.get_tags_for_partition_key(partition_key),\n        }\n\n        return RunRequest(\n            job_name=self.name,\n            run_key=run_key,\n            run_config=run_config,\n            tags=run_request_tags,\n            asset_selection=asset_selection,\n            partition_key=partition_key,\n        )\n\n    def resolve(\n        self,\n        asset_graph: "InternalAssetGraph",\n        default_executor_def: Optional["ExecutorDefinition"] = None,\n        resource_defs: Optional[Mapping[str, "ResourceDefinition"]] = None,\n    ) -> "JobDefinition":\n        """Resolve this UnresolvedAssetJobDefinition into a JobDefinition."""\n        assets = asset_graph.assets\n        source_assets = asset_graph.source_assets\n        selected_asset_keys = self.selection.resolve(asset_graph)\n        selected_asset_checks = self.selection.resolve_checks(asset_graph)\n\n        asset_keys_by_partitions_def = defaultdict(set)\n        for asset_key in selected_asset_keys:\n            partitions_def = asset_graph.get_partitions_def(asset_key)\n            if partitions_def is not None:\n                asset_keys_by_partitions_def[partitions_def].add(asset_key)\n\n        if len(asset_keys_by_partitions_def) > 1:\n            keys_by_partitions_def_str = "\\n".join(\n                f"{partitions_def}: {asset_keys}"\n                for partitions_def, asset_keys in asset_keys_by_partitions_def.items()\n            )\n            raise DagsterInvalidDefinitionError(\n                f"Multiple partitioned assets exist in assets job '{self.name}'. Selected assets"\n                " must have the same partitions definitions, but the selected assets have"\n                f" different partitions definitions: \\n{keys_by_partitions_def_str}"\n            )\n\n        inferred_partitions_def = (\n            next(iter(asset_keys_by_partitions_def.keys()))\n            if asset_keys_by_partitions_def\n            else None\n        )\n        if (\n            inferred_partitions_def\n            and self.partitions_def != inferred_partitions_def\n            and self.partitions_def is not None\n        ):\n            raise DagsterInvalidDefinitionError(\n                f"Job '{self.name}' received a partitions_def of {self.partitions_def}, but the"\n                f" selected assets {next(iter(asset_keys_by_partitions_def.values()))} have a"\n                f" non-matching partitions_def of {inferred_partitions_def}"\n            )\n\n        return build_asset_selection_job(\n            name=self.name,\n            assets=assets,\n            asset_checks=asset_graph.asset_checks,\n            config=self.config,\n            source_assets=source_assets,\n            description=self.description,\n            tags=self.tags,\n            metadata=self.metadata,\n            asset_selection=selected_asset_keys,\n            asset_check_selection=selected_asset_checks,\n            partitions_def=self.partitions_def if self.partitions_def else inferred_partitions_def,\n            executor_def=self.executor_def or default_executor_def,\n            hooks=self.hooks,\n            resource_defs=resource_defs,\n        )\n\n\n
[docs]def define_asset_job(\n name: str,\n selection: Optional["CoercibleToAssetSelection"] = None,\n config: Optional[\n Union[ConfigMapping, Mapping[str, Any], "PartitionedConfig", "RunConfig"]\n ] = None,\n description: Optional[str] = None,\n tags: Optional[Mapping[str, Any]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n partitions_def: Optional["PartitionsDefinition"] = None,\n executor_def: Optional["ExecutorDefinition"] = None,\n hooks: Optional[AbstractSet["HookDefinition"]] = None,\n) -> UnresolvedAssetJobDefinition:\n """Creates a definition of a job which will either materialize a selection of assets or observe\n a selection of source assets. This will only be resolved to a JobDefinition once placed in a\n code location.\n\n Args:\n name (str):\n The name for the job.\n selection (Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]):\n The assets that will be materialized or observed when the job is run.\n\n The selected assets must all be included in the assets that are passed to the assets\n argument of the Definitions object that this job is included on.\n\n The string "my_asset*" selects my_asset and all downstream assets within the code\n location. A list of strings represents the union of all assets selected by strings\n within the list.\n\n The selection will be resolved to a set of assets when the location is loaded. If the\n selection resolves to all source assets, the created job will perform source asset\n observations. If the selection resolves to all regular assets, the created job will\n materialize assets. If the selection resolves to a mixed set of source assets and\n regular assets, an error will be thrown.\n\n config:\n Describes how the Job is parameterized at runtime.\n\n If no value is provided, then the schema for the job's run config is a standard\n format based on its ops and resources.\n\n If a dictionary is provided, then it must conform to the standard config schema, and\n it will be used as the job's run config for the job whenever the job is executed.\n The values provided will be viewable and editable in the Dagster UI, so be\n careful with secrets.\n\n If a :py:class:`ConfigMapping` object is provided, then the schema for the job's run config is\n determined by the config mapping, and the ConfigMapping, which should return\n configuration in the standard format to configure the job.\n tags (Optional[Mapping[str, Any]]):\n Arbitrary information that will be attached to the execution of the Job.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n metadata (Optional[Mapping[str, RawMetadataValue]]): Arbitrary metadata about the job.\n Keys are displayed string labels, and values are one of the following: string, float,\n int, JSON-serializable dict, JSON-serializable list, and one of the data classes\n returned by a MetadataValue static method.\n description (Optional[str]):\n A description for the Job.\n partitions_def (Optional[PartitionsDefinition]):\n Defines the set of partitions for this job. All AssetDefinitions selected for this job\n must have a matching PartitionsDefinition. If no PartitionsDefinition is provided, the\n PartitionsDefinition will be inferred from the selected AssetDefinitions.\n executor_def (Optional[ExecutorDefinition]):\n How this Job will be executed. Defaults to :py:class:`multi_or_in_process_executor`,\n which can be switched between multi-process and in-process modes of execution. The\n default mode of execution is multi-process.\n\n\n Returns:\n UnresolvedAssetJobDefinition: The job, which can be placed inside a code location.\n\n Examples:\n .. code-block:: python\n\n # A job that targets all assets in the code location:\n @asset\n def asset1():\n ...\n\n defs = Definitions(\n assets=[asset1],\n jobs=[define_asset_job("all_assets")],\n )\n\n # A job that targets a single asset\n @asset\n def asset1():\n ...\n\n defs = Definitions(\n assets=[asset1],\n jobs=[define_asset_job("all_assets", selection=[asset1])],\n )\n\n # A job that targets all the assets in a group:\n defs = Definitions(\n assets=assets,\n jobs=[define_asset_job("marketing_job", selection=AssetSelection.groups("marketing"))],\n )\n\n @observable_source_asset\n def source_asset():\n ...\n\n # A job that observes a source asset:\n defs = Definitions(\n assets=assets,\n jobs=[define_asset_job("observation_job", selection=[source_asset])],\n )\n\n # Resources are supplied to the assets, not the job:\n @asset(required_resource_keys={"slack_client"})\n def asset1():\n ...\n\n defs = Definitions(\n assets=[asset1],\n jobs=[define_asset_job("all_assets")],\n resources={"slack_client": prod_slack_client},\n )\n\n """\n from dagster._core.definitions import AssetSelection\n\n # convert string-based selections to AssetSelection objects\n if selection is None:\n resolved_selection = AssetSelection.all()\n else:\n resolved_selection = AssetSelection.from_coercible(selection)\n\n return UnresolvedAssetJobDefinition(\n name=name,\n selection=resolved_selection,\n config=config,\n description=description,\n tags=tags,\n metadata=metadata,\n partitions_def=partitions_def,\n executor_def=executor_def,\n hooks=hooks,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/unresolved_asset_job_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.unresolved_asset_job_definition"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.utils

\nimport keyword\nimport os\nimport re\nfrom glob import glob\nfrom typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, cast\n\nimport yaml\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._core.storage.tags import check_reserved_tags\nfrom dagster._utils.yaml_utils import merge_yaml_strings, merge_yamls\n\nDEFAULT_OUTPUT = "result"\nDEFAULT_GROUP_NAME = "default"  # asset group_name used when none is provided\nDEFAULT_IO_MANAGER_KEY = "io_manager"\n\nDISALLOWED_NAMES = set(\n    [\n        "context",\n        "conf",\n        "config",\n        "meta",\n        "arg_dict",\n        "dict",\n        "input_arg_dict",\n        "output_arg_dict",\n        "int",\n        "str",\n        "float",\n        "bool",\n        "input",\n        "output",\n        "type",\n    ]\n    + list(keyword.kwlist)  # just disallow all python keywords\n)\n\nVALID_NAME_REGEX_STR = r"^[A-Za-z0-9_]+$"\nVALID_NAME_REGEX = re.compile(VALID_NAME_REGEX_STR)\n\n\nclass NoValueSentinel:\n    """Sentinel value to distinguish unset from None."""\n\n\ndef has_valid_name_chars(name: str) -> bool:\n    return bool(VALID_NAME_REGEX.match(name))\n\n\ndef check_valid_name(name: str, allow_list: Optional[List[str]] = None) -> str:\n    check.str_param(name, "name")\n\n    if allow_list and name in allow_list:\n        return name\n\n    if name in DISALLOWED_NAMES:\n        raise DagsterInvalidDefinitionError(\n            f'"{name}" is not a valid name in Dagster. It conflicts with a Dagster or python'\n            " reserved keyword."\n        )\n\n    check_valid_chars(name)\n\n    check.invariant(is_valid_name(name))\n    return name\n\n\ndef check_valid_chars(name: str):\n    if not has_valid_name_chars(name):\n        raise DagsterInvalidDefinitionError(\n            f'"{name}" is not a valid name in Dagster. Names must be in regex'\n            f" {VALID_NAME_REGEX_STR}."\n        )\n\n\ndef is_valid_name(name: str) -> bool:\n    check.str_param(name, "name")\n\n    return name not in DISALLOWED_NAMES and has_valid_name_chars(name)\n\n\ndef _kv_str(key: object, value: object) -> str:\n    return f'{key}="{value!r}"'\n\n\ndef struct_to_string(name: str, **kwargs: object) -> str:\n    # Sort the kwargs to ensure consistent representations across Python versions\n    props_str = ", ".join([_kv_str(key, value) for key, value in sorted(kwargs.items())])\n    return f"{name}({props_str})"\n\n\ndef validate_tags(\n    tags: Optional[Mapping[str, Any]], allow_reserved_tags: bool = True\n) -> Mapping[str, str]:\n    valid_tags: Dict[str, str] = {}\n    for key, value in check.opt_mapping_param(tags, "tags", key_type=str).items():\n        if not isinstance(value, str):\n            valid = False\n            err_reason = f'Could not JSON encode value "{value}"'\n            str_val = None\n            try:\n                str_val = seven.json.dumps(value)\n                err_reason = (\n                    'JSON encoding "{json}" of value "{val}" is not equivalent to original value'\n                    .format(json=str_val, val=value)\n                )\n\n                valid = seven.json.loads(str_val) == value\n            except Exception:\n                pass\n\n            if not valid:\n                raise DagsterInvalidDefinitionError(\n                    f'Invalid value for tag "{key}", {err_reason}. Tag values must be strings '\n                    "or meet the constraint that json.loads(json.dumps(value)) == value."\n                )\n\n            valid_tags[key] = str_val  # type: ignore  # (possible none)\n        else:\n            valid_tags[key] = value\n\n    if not allow_reserved_tags:\n        check_reserved_tags(valid_tags)\n\n    return valid_tags\n\n\ndef validate_group_name(group_name: Optional[str]) -> str:\n    """Ensures a string name is valid and returns a default if no name provided."""\n    if group_name:\n        check_valid_chars(group_name)\n        return group_name\n    return DEFAULT_GROUP_NAME\n\n\n
[docs]def config_from_files(config_files: Sequence[str]) -> Mapping[str, Any]:\n """Constructs run config from YAML files.\n\n Args:\n config_files (List[str]): List of paths or glob patterns for yaml files\n to load and parse as the run config.\n\n Returns:\n Dict[str, Any]: A run config dictionary constructed from provided YAML files.\n\n Raises:\n FileNotFoundError: When a config file produces no results\n DagsterInvariantViolationError: When one of the YAML files is invalid and has a parse\n error.\n """\n config_files = check.opt_sequence_param(config_files, "config_files")\n\n filenames = []\n for file_glob in config_files or []:\n globbed_files = glob(file_glob)\n if not globbed_files:\n raise DagsterInvariantViolationError(\n f'File or glob pattern "{file_glob}" for "config_files" produced no results.'\n )\n\n filenames += [os.path.realpath(globbed_file) for globbed_file in globbed_files]\n\n try:\n run_config = merge_yamls(filenames)\n except yaml.YAMLError as err:\n raise DagsterInvariantViolationError(\n f"Encountered error attempting to parse yaml. Parsing files {filenames} "\n f"loaded by file/patterns {config_files}."\n ) from err\n\n return check.is_dict(cast(Dict[str, object], run_config), key_type=str)
\n\n\n
[docs]def config_from_yaml_strings(yaml_strings: Sequence[str]) -> Mapping[str, Any]:\n """Static constructor for run configs from YAML strings.\n\n Args:\n yaml_strings (List[str]): List of yaml strings to parse as the run config.\n\n Returns:\n Dict[Str, Any]: A run config dictionary constructed from the provided yaml strings\n\n Raises:\n DagsterInvariantViolationError: When one of the YAML documents is invalid and has a\n parse error.\n """\n yaml_strings = check.sequence_param(yaml_strings, "yaml_strings", of_type=str)\n\n try:\n run_config = merge_yaml_strings(yaml_strings)\n except yaml.YAMLError as err:\n raise DagsterInvariantViolationError(\n f"Encountered error attempting to parse yaml. Parsing YAMLs {yaml_strings} "\n ) from err\n\n return check.is_dict(cast(Dict[str, object], run_config), key_type=str)
\n\n\n
[docs]def config_from_pkg_resources(pkg_resource_defs: Sequence[Tuple[str, str]]) -> Mapping[str, Any]:\n """Load a run config from a package resource, using :py:func:`pkg_resources.resource_string`.\n\n Example:\n .. code-block:: python\n\n config_from_pkg_resources(\n pkg_resource_defs=[\n ('dagster_examples.airline_demo.environments', 'local_base.yaml'),\n ('dagster_examples.airline_demo.environments', 'local_warehouse.yaml'),\n ],\n )\n\n\n Args:\n pkg_resource_defs (List[(str, str)]): List of pkg_resource modules/files to\n load as the run config.\n\n Returns:\n Dict[Str, Any]: A run config dictionary constructed from the provided yaml strings\n\n Raises:\n DagsterInvariantViolationError: When one of the YAML documents is invalid and has a\n parse error.\n """\n import pkg_resources # expensive, import only on use\n\n pkg_resource_defs = check.sequence_param(pkg_resource_defs, "pkg_resource_defs", of_type=tuple)\n\n try:\n yaml_strings = [\n pkg_resources.resource_string(*pkg_resource_def).decode("utf-8")\n for pkg_resource_def in pkg_resource_defs\n ]\n except (ModuleNotFoundError, FileNotFoundError, UnicodeDecodeError) as err:\n raise DagsterInvariantViolationError(\n "Encountered error attempting to parse yaml. Loading YAMLs from "\n f"package resources {pkg_resource_defs}."\n ) from err\n\n return config_from_yaml_strings(yaml_strings=yaml_strings)
\n
", "current_page_name": "_modules/dagster/_core/definitions/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.utils"}, "version_strategy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.version_strategy

\nimport hashlib\nimport inspect\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Any, NamedTuple, Optional\n\nfrom dagster._annotations import public\n\nif TYPE_CHECKING:\n    from .op_definition import OpDefinition\n    from .resource_definition import ResourceDefinition\n\n\n
[docs]class OpVersionContext(NamedTuple):\n """Provides execution-time information for computing the version for an op.\n\n Attributes:\n op_def (OpDefinition): The definition of the op to compute a version for.\n op_config (Any): The parsed config to be passed to the op during execution.\n """\n\n op_def: "OpDefinition"\n op_config: Any
\n\n\n
[docs]class ResourceVersionContext(NamedTuple):\n """Provides execution-time information for computing the version for a resource.\n\n Attributes:\n resource_def (ResourceDefinition): The definition of the resource whose version will be computed.\n resource_config (Any): The parsed config to be passed to the resource during execution.\n """\n\n resource_def: "ResourceDefinition"\n resource_config: Any
\n\n\n
[docs]class VersionStrategy(ABC):\n """Abstract class for defining a strategy to version ops and resources.\n\n When subclassing, `get_op_version` must be implemented, and\n `get_resource_version` can be optionally implemented.\n\n `get_op_version` should ingest an OpVersionContext, and `get_resource_version` should ingest a\n ResourceVersionContext. From that, each synthesize a unique string called\n a `version`, which will\n be tagged to outputs of that op in the job. Providing a\n `VersionStrategy` instance to a\n job will enable memoization on that job, such that only steps whose\n outputs do not have an up-to-date version will run.\n """\n\n
[docs] @public\n @abstractmethod\n def get_op_version(self, context: OpVersionContext) -> str:\n """Computes a version for an op.\n\n Args:\n context (OpVersionContext): The context for computing the version.\n\n Returns:\n str: The version for the op.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n def get_resource_version(self, context: ResourceVersionContext) -> Optional[str]:\n """Computes a version for a resource.\n\n Args:\n context (ResourceVersionContext): The context for computing the version.\n\n Returns:\n Optional[str]: The version for the resource. If None, the resource will not be\n memoized.\n """\n return None
\n\n\n
[docs]class SourceHashVersionStrategy(VersionStrategy):\n """VersionStrategy that checks for changes to the source code of ops and resources.\n\n Only checks for changes within the immediate body of the op/resource's\n decorated function (or compute function, if the op/resource was\n constructed directly from a definition).\n """\n\n def _get_source_hash(self, fn):\n code_as_str = inspect.getsource(fn)\n return hashlib.sha1(code_as_str.encode("utf-8")).hexdigest()\n\n
[docs] @public\n def get_op_version(self, context: OpVersionContext) -> str:\n """Computes a version for an op by hashing its source code.\n\n Args:\n context (OpVersionContext): The context for computing the version.\n\n Returns:\n str: The version for the op.\n """\n compute_fn = context.op_def.compute_fn\n if callable(compute_fn):\n return self._get_source_hash(compute_fn)\n else:\n return self._get_source_hash(compute_fn.decorated_fn)
\n\n
[docs] @public\n def get_resource_version(self, context: ResourceVersionContext) -> Optional[str]:\n """Computes a version for a resource by hashing its source code.\n\n Args:\n context (ResourceVersionContext): The context for computing the version.\n\n Returns:\n Optional[str]: The version for the resource. If None, the resource will not be\n memoized.\n """\n return self._get_source_hash(context.resource_def.resource_fn)
\n
", "current_page_name": "_modules/dagster/_core/definitions/version_strategy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.version_strategy"}}, "errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.errors

\n"""Core Dagster error classes.\n\nAll errors thrown by the Dagster framework inherit from :py:class:`~dagster.DagsterError`. Users\nshould not subclass this base class for their own exceptions.\n\nThere is another exception base class, :py:class:`~dagster.DagsterUserCodeExecutionError`, which is\nused by the framework in concert with the :py:func:`~dagster._core.errors.user_code_error_boundary`.\n\nDagster uses this construct to wrap user code into which it calls. User code can perform arbitrary\ncomputations and may itself throw exceptions. The error boundary catches these user code-generated\nexceptions, and then reraises them wrapped in a subclass of\n:py:class:`~dagster.DagsterUserCodeExecutionError`.\n\nThe wrapped exceptions include additional context for the original exceptions, injected by the\nDagster runtime.\n"""\n\nimport sys\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Type\n\nimport dagster._check as check\nfrom dagster._utils.interrupts import raise_interrupts_as\n\nif TYPE_CHECKING:\n    from dagster._core.log_manager import DagsterLogManager\n\n\nclass DagsterExecutionInterruptedError(BaseException):\n    """Pipeline execution was interrupted during the execution process.\n\n    Just like KeyboardInterrupt this inherits from BaseException\n    as to not be accidentally caught by code that catches Exception\n    and thus prevent the interpreter from exiting.\n    """\n\n\n
[docs]class DagsterError(Exception):\n """Base class for all errors thrown by the Dagster framework.\n\n Users should not subclass this base class for their own exceptions.\n """\n\n @property\n def is_user_code_error(self):\n """Returns true if this error is attributable to user code."""\n return False
\n\n\n
[docs]class DagsterInvalidDefinitionError(DagsterError):\n """Indicates that the rules for a definition have been violated by the user."""
\n\n\nclass DagsterInvalidObservationError(DagsterError):\n """Indicates that an invalid value was returned from a source asset observation function."""\n\n\n
[docs]class DagsterInvalidSubsetError(DagsterError):\n """Indicates that a subset of a pipeline is invalid because either:\n - One or more ops in the specified subset do not exist on the job.'\n - The subset produces an invalid job.\n """
\n\n\nclass DagsterInvalidDeserializationVersionError(DagsterError):\n """Indicates that a serialized value has an unsupported version and cannot be deserialized."""\n\n\nPYTHONIC_CONFIG_ERROR_VERBIAGE = """\nThis config type can be a:\n - Python primitive type\n - int, float, bool, str, list\n - A Python Dict or List type containing other valid types\n - Custom data classes extending dagster.Config\n - A Pydantic discriminated union type (https://docs.pydantic.dev/usage/types/#discriminated-unions-aka-tagged-unions)\n"""\n\nPYTHONIC_RESOURCE_ADDITIONAL_TYPES = """\n\nIf this config type represents a resource dependency, its annotation must either:\n - Extend dagster.ConfigurableResource, dagster.ConfigurableIOManager, or\n - Be wrapped in a ResourceDependency annotation, e.g. ResourceDependency[{invalid_type_str}]\n"""\n\n\ndef _generate_pythonic_config_error_message(\n config_class: Optional[Type],\n field_name: Optional[str],\n invalid_type: Any,\n is_resource: bool = False,\n) -> str:\n invalid_type_name = getattr(invalid_type, "__name__", "<my type>")\n pythonic_config_error_verbiage = (\n PYTHONIC_CONFIG_ERROR_VERBIAGE + (PYTHONIC_RESOURCE_ADDITIONAL_TYPES if is_resource else "")\n ).format(invalid_type_str=invalid_type_name)\n\n return ("""\nError defining Dagster config class{config_class}{field_name}.\nUnable to resolve config type {invalid_type} to a supported Dagster config type.\n\n{PYTHONIC_CONFIG_ERROR_VERBIAGE}""").format(\n config_class=f" {config_class!r}" if config_class else "",\n field_name=f" on field '{field_name}'" if field_name else "",\n invalid_type=repr(invalid_type),\n PYTHONIC_CONFIG_ERROR_VERBIAGE=pythonic_config_error_verbiage,\n )\n\n\nclass DagsterInvalidPythonicConfigDefinitionError(DagsterError):\n """Indicates that you have attempted to construct a Pythonic config or resource class with an invalid value."""\n\n def __init__(\n self,\n config_class: Optional[Type],\n field_name: Optional[str],\n invalid_type: Any,\n is_resource: bool = False,\n **kwargs,\n ):\n self.invalid_type = invalid_type\n self.field_name = field_name\n self.config_class = config_class\n super(DagsterInvalidPythonicConfigDefinitionError, self).__init__(\n _generate_pythonic_config_error_message(\n config_class=config_class,\n field_name=field_name,\n invalid_type=invalid_type,\n is_resource=is_resource,\n ),\n **kwargs,\n )\n\n\nclass DagsterInvalidDagsterTypeInPythonicConfigDefinitionError(DagsterError):\n """Indicates that you have attempted to construct a Pythonic config or resource class with a DagsterType\n annotated field.\n """\n\n def __init__(\n self,\n config_class_name: str,\n field_name: Optional[str],\n **kwargs,\n ):\n self.field_name = field_name\n super(DagsterInvalidDagsterTypeInPythonicConfigDefinitionError, self).__init__(\n f"""Error defining Dagster config class '{config_class_name}' on field '{field_name}'. DagsterTypes cannot be used to annotate a config type. DagsterType is meant only for type checking and coercion in op and asset inputs and outputs.\n{PYTHONIC_CONFIG_ERROR_VERBIAGE}""",\n **kwargs,\n )\n\n\nCONFIG_ERROR_VERBIAGE = """\nThis value can be a:\n - Field\n - Python primitive types that resolve to dagster config types\n - int, float, bool, str, list.\n - A dagster config type: Int, Float, Bool, Array, Optional, Selector, Shape, Permissive, Map\n - A bare python dictionary, which is wrapped in Field(Shape(...)). Any values\n in the dictionary get resolved by the same rules, recursively.\n - A python list with a single entry that can resolve to a type, e.g. [int]\n"""\n\n\n
[docs]class DagsterInvalidConfigDefinitionError(DagsterError):\n """Indicates that you have attempted to construct a config with an invalid value.\n\n Acceptable values for config types are any of:\n 1. A Python primitive type that resolves to a Dagster config type\n (:py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n :py:class:`~python:str`, or :py:class:`~python:list`).\n\n 2. A Dagster config type: :py:data:`~dagster.Int`, :py:data:`~dagster.Float`,\n :py:data:`~dagster.Bool`, :py:data:`~dagster.String`,\n :py:data:`~dagster.StringSource`, :py:data:`~dagster.Any`,\n :py:class:`~dagster.Array`, :py:data:`~dagster.Noneable`, :py:data:`~dagster.Enum`,\n :py:class:`~dagster.Selector`, :py:class:`~dagster.Shape`, or\n :py:class:`~dagster.Permissive`.\n\n 3. A bare python dictionary, which will be automatically wrapped in\n :py:class:`~dagster.Shape`. Values of the dictionary are resolved recursively\n according to the same rules.\n\n 4. A bare python list of length one which itself is config type.\n Becomes :py:class:`Array` with list element as an argument.\n\n 5. An instance of :py:class:`~dagster.Field`.\n """\n\n def __init__(self, original_root, current_value, stack, reason=None, **kwargs):\n self.original_root = original_root\n self.current_value = current_value\n self.stack = stack\n super(DagsterInvalidConfigDefinitionError, self).__init__(\n (\n "Error defining config. Original value passed: {original_root}. "\n "{stack_str}{current_value} "\n "cannot be resolved.{reason_str}"\n + CONFIG_ERROR_VERBIAGE\n ).format(\n original_root=repr(original_root),\n stack_str="Error at stack path :" + ":".join(stack) + ". " if stack else "",\n current_value=repr(current_value),\n reason_str=f" Reason: {reason}." if reason else "",\n ),\n **kwargs,\n )
\n\n\n
[docs]class DagsterInvariantViolationError(DagsterError):\n """Indicates the user has violated a well-defined invariant that can only be enforced\n at runtime.\n """
\n\n\n
[docs]class DagsterExecutionStepNotFoundError(DagsterError):\n """Thrown when the user specifies execution step keys that do not exist."""\n\n def __init__(self, *args, **kwargs):\n self.step_keys = check.list_param(kwargs.pop("step_keys"), "step_keys", str)\n super(DagsterExecutionStepNotFoundError, self).__init__(*args, **kwargs)
\n\n\nclass DagsterExecutionPlanSnapshotNotFoundError(DagsterError):\n """Thrown when an expected execution plan snapshot could not be found on a PipelineRun."""\n\n\n
[docs]class DagsterRunNotFoundError(DagsterError):\n """Thrown when a run cannot be found in run storage."""\n\n def __init__(self, *args, **kwargs):\n self.invalid_run_id = check.str_param(kwargs.pop("invalid_run_id"), "invalid_run_id")\n super(DagsterRunNotFoundError, self).__init__(*args, **kwargs)
\n\n\n
[docs]class DagsterStepOutputNotFoundError(DagsterError):\n """Indicates that previous step outputs required for an execution step to proceed are not\n available.\n """\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.output_name = check.str_param(kwargs.pop("output_name"), "output_name")\n super(DagsterStepOutputNotFoundError, self).__init__(*args, **kwargs)
\n\n\n@contextmanager\ndef raise_execution_interrupts() -> Iterator[None]:\n with raise_interrupts_as(DagsterExecutionInterruptedError):\n yield\n\n\n
[docs]@contextmanager\ndef user_code_error_boundary(\n error_cls: Type["DagsterUserCodeExecutionError"],\n msg_fn: Callable[[], str],\n log_manager: Optional["DagsterLogManager"] = None,\n **kwargs: object,\n) -> Iterator[None]:\n """Wraps the execution of user-space code in an error boundary. This places a uniform\n policy around any user code invoked by the framework. This ensures that all user\n errors are wrapped in an exception derived from DagsterUserCodeExecutionError,\n and that the original stack trace of the user error is preserved, so that it\n can be reported without confusing framework code in the stack trace, if a\n tool author wishes to do so.\n\n Examples:\n .. code-block:: python\n\n with user_code_error_boundary(\n # Pass a class that inherits from DagsterUserCodeExecutionError\n DagsterExecutionStepExecutionError,\n # Pass a function that produces a message\n "Error occurred during step execution"\n ):\n call_user_provided_function()\n\n """\n check.callable_param(msg_fn, "msg_fn")\n check.class_param(error_cls, "error_cls", superclass=DagsterUserCodeExecutionError)\n\n with raise_execution_interrupts():\n if log_manager:\n log_manager.begin_python_log_capture()\n try:\n yield\n except DagsterError as de:\n # The system has thrown an error that is part of the user-framework contract\n raise de\n except Exception as e:\n # An exception has been thrown by user code and computation should cease\n # with the error reported further up the stack\n raise error_cls(\n msg_fn(), user_exception=e, original_exc_info=sys.exc_info(), **kwargs\n ) from e\n finally:\n if log_manager:\n log_manager.end_python_log_capture()
\n\n\n
[docs]class DagsterUserCodeExecutionError(DagsterError):\n """This is the base class for any exception that is meant to wrap an\n :py:class:`~python:Exception` thrown by user code. It wraps that existing user code.\n The ``original_exc_info`` argument to the constructor is meant to be a tuple of the type\n returned by :py:func:`sys.exc_info <python:sys.exc_info>` at the call site of the constructor.\n\n Users should not subclass this base class for their own exceptions and should instead throw\n freely from user code. User exceptions will be automatically wrapped and rethrown.\n """\n\n def __init__(self, *args, **kwargs):\n # original_exc_info should be gotten from a sys.exc_info() call at the\n # callsite inside of the exception handler. this will allow consuming\n # code to *re-raise* the user error in it's original format\n # for cleaner error reporting that does not have framework code in it\n user_exception = check.inst_param(kwargs.pop("user_exception"), "user_exception", Exception)\n original_exc_info = check.tuple_param(kwargs.pop("original_exc_info"), "original_exc_info")\n\n check.invariant(original_exc_info[0] is not None)\n\n super(DagsterUserCodeExecutionError, self).__init__(args[0], *args[1:], **kwargs)\n\n self.user_exception = check.opt_inst_param(user_exception, "user_exception", Exception)\n self.original_exc_info = original_exc_info\n\n @property\n def is_user_code_error(self) -> bool:\n return True
\n\n\n
[docs]class DagsterTypeCheckError(DagsterUserCodeExecutionError):\n """Indicates an error in the op type system at runtime. E.g. a op receives an\n unexpected input, or produces an output that does not match the type of the output definition.\n """
\n\n\nclass DagsterExecutionLoadInputError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while loading an input for a step."""\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.input_name = check.str_param(kwargs.pop("input_name"), "input_name")\n super(DagsterExecutionLoadInputError, self).__init__(*args, **kwargs)\n\n\nclass DagsterExecutionHandleOutputError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while handling an output for a step."""\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.output_name = check.str_param(kwargs.pop("output_name"), "output_name")\n super(DagsterExecutionHandleOutputError, self).__init__(*args, **kwargs)\n\n\n
[docs]class DagsterExecutionStepExecutionError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while executing the body of an execution step."""\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.op_name = check.str_param(kwargs.pop("op_name"), "op_name")\n self.op_def_name = check.str_param(kwargs.pop("op_def_name"), "op_def_name")\n super(DagsterExecutionStepExecutionError, self).__init__(*args, **kwargs)
\n\n\n
[docs]class DagsterResourceFunctionError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while executing the body of the ``resource_fn`` in a\n :py:class:`~dagster.ResourceDefinition` during resource initialization.\n """
\n\n\n
[docs]class DagsterConfigMappingFunctionError(DagsterUserCodeExecutionError):\n """Indicates that an unexpected error occurred while executing the body of a config mapping\n function defined in a :py:class:`~dagster.JobDefinition` or `~dagster.GraphDefinition` during\n config parsing.\n """
\n\n\nclass DagsterTypeLoadingError(DagsterUserCodeExecutionError):\n """Indicates that an unexpected error occurred while executing the body of an type load\n function defined in a :py:class:`~dagster.DagsterTypeLoader` during loading of a custom type.\n """\n\n\n
[docs]class DagsterUnknownResourceError(DagsterError, AttributeError):\n # inherits from AttributeError as it is raised within a __getattr__ call... used to support\n # object hasattr method\n """Indicates that an unknown resource was accessed in the body of an execution step. May often\n happen by accessing a resource in the compute function of an op without first supplying the\n op with the correct `required_resource_keys` argument.\n """\n\n def __init__(self, resource_name, *args, **kwargs):\n self.resource_name = check.str_param(resource_name, "resource_name")\n msg = (\n f"Unknown resource `{resource_name}`. Specify `{resource_name}` as a required resource "\n "on the compute / config function that accessed it."\n )\n super(DagsterUnknownResourceError, self).__init__(msg, *args, **kwargs)
\n\n\nclass DagsterInvalidInvocationError(DagsterError):\n """Indicates that an error has occurred when an op has been invoked, but before the actual\n core compute has been reached.\n """\n\n\n
[docs]class DagsterInvalidConfigError(DagsterError):\n """Thrown when provided config is invalid (does not type check against the relevant config\n schema).\n """\n\n def __init__(self, preamble, errors, config_value, *args, **kwargs):\n from dagster._config import EvaluationError\n\n check.str_param(preamble, "preamble")\n self.errors = check.list_param(errors, "errors", of_type=EvaluationError)\n self.config_value = config_value\n\n error_msg = preamble\n error_messages = []\n\n for i_error, error in enumerate(self.errors):\n error_messages.append(error.message)\n error_msg += f"\\n Error {i_error + 1}: {error.message}"\n\n self.message = error_msg\n self.error_messages = error_messages\n\n super(DagsterInvalidConfigError, self).__init__(error_msg, *args, **kwargs)
\n\n\n
[docs]class DagsterUnmetExecutorRequirementsError(DagsterError):\n """Indicates the resolved executor is incompatible with the state of other systems\n such as the :py:class:`~dagster._core.instance.DagsterInstance` or system storage configuration.\n """
\n\n\n
[docs]class DagsterSubprocessError(DagsterError):\n """An exception has occurred in one or more of the child processes dagster manages.\n This error forwards the message and stack trace for all of the collected errors.\n """\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.subprocess_error_infos = check.list_param(\n kwargs.pop("subprocess_error_infos"), "subprocess_error_infos", SerializableErrorInfo\n )\n super(DagsterSubprocessError, self).__init__(*args, **kwargs)
\n\n\nclass DagsterUserCodeUnreachableError(DagsterError):\n """Dagster was unable to reach a user code server to fetch information about user code."""\n\n\nclass DagsterUserCodeProcessError(DagsterError):\n """An exception has occurred in a user code process that the host process raising this error\n was communicating with.\n """\n\n @staticmethod\n def from_error_info(error_info):\n from dagster._utils.error import SerializableErrorInfo\n\n check.inst_param(error_info, "error_info", SerializableErrorInfo)\n return DagsterUserCodeProcessError(\n error_info.to_string(), user_code_process_error_infos=[error_info]\n )\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.user_code_process_error_infos = check.list_param(\n kwargs.pop("user_code_process_error_infos"),\n "user_code_process_error_infos",\n SerializableErrorInfo,\n )\n super(DagsterUserCodeProcessError, self).__init__(*args, **kwargs)\n\n\nclass DagsterMaxRetriesExceededError(DagsterError):\n """Raised when raise_on_error is true, and retries were exceeded, this error should be raised."""\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.user_code_process_error_infos = check.list_param(\n kwargs.pop("user_code_process_error_infos"),\n "user_code_process_error_infos",\n SerializableErrorInfo,\n )\n super(DagsterMaxRetriesExceededError, self).__init__(*args, **kwargs)\n\n @staticmethod\n def from_error_info(error_info):\n from dagster._utils.error import SerializableErrorInfo\n\n check.inst_param(error_info, "error_info", SerializableErrorInfo)\n return DagsterMaxRetriesExceededError(\n error_info.to_string(), user_code_process_error_infos=[error_info]\n )\n\n\nclass DagsterCodeLocationNotFoundError(DagsterError):\n pass\n\n\nclass DagsterCodeLocationLoadError(DagsterError):\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.load_error_infos = check.list_param(\n kwargs.pop("load_error_infos"),\n "load_error_infos",\n SerializableErrorInfo,\n )\n super(DagsterCodeLocationLoadError, self).__init__(*args, **kwargs)\n\n\nclass DagsterLaunchFailedError(DagsterError):\n """Indicates an error while attempting to launch a pipeline run."""\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.serializable_error_info = check.opt_inst_param(\n kwargs.pop("serializable_error_info", None),\n "serializable_error_info",\n SerializableErrorInfo,\n )\n super(DagsterLaunchFailedError, self).__init__(*args, **kwargs)\n\n\nclass DagsterBackfillFailedError(DagsterError):\n """Indicates an error while attempting to launch a backfill."""\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.serializable_error_info = check.opt_inst_param(\n kwargs.pop("serializable_error_info", None),\n "serializable_error_info",\n SerializableErrorInfo,\n )\n super(DagsterBackfillFailedError, self).__init__(*args, **kwargs)\n\n\nclass DagsterRunAlreadyExists(DagsterError):\n """Indicates that a pipeline run already exists in a run storage."""\n\n\nclass DagsterSnapshotDoesNotExist(DagsterError):\n """Indicates you attempted to create a pipeline run with a nonexistent snapshot id."""\n\n\nclass DagsterRunConflict(DagsterError):\n """Indicates that a conflicting pipeline run exists in a run storage."""\n\n\n
[docs]class DagsterTypeCheckDidNotPass(DagsterError):\n """Indicates that a type check failed.\n\n This is raised when ``raise_on_error`` is ``True`` in calls to the synchronous job and\n graph execution APIs (e.g. `graph.execute_in_process()`, `job.execute_in_process()` -- typically\n within a test), and a :py:class:`~dagster.DagsterType`'s type check fails by returning either\n ``False`` or an instance of :py:class:`~dagster.TypeCheck` whose ``success`` member is ``False``.\n """\n\n def __init__(self, description=None, metadata=None, dagster_type=None):\n from dagster import DagsterType\n from dagster._core.definitions.metadata import normalize_metadata\n\n super(DagsterTypeCheckDidNotPass, self).__init__(description)\n self.description = check.opt_str_param(description, "description")\n self.metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n )\n self.dagster_type = check.opt_inst_param(dagster_type, "dagster_type", DagsterType)
\n\n\nclass DagsterAssetCheckFailedError(DagsterError):\n """Indicates than an asset check failed."""\n\n\n
[docs]class DagsterEventLogInvalidForRun(DagsterError):\n """Raised when the event logs for a historical run are malformed or invalid."""\n\n def __init__(self, run_id):\n self.run_id = check.str_param(run_id, "run_id")\n super(DagsterEventLogInvalidForRun, self).__init__(\n f"Event logs invalid for run id {run_id}"\n )
\n\n\nclass ScheduleExecutionError(DagsterUserCodeExecutionError):\n """Errors raised in a user process during the execution of schedule."""\n\n\nclass SensorExecutionError(DagsterUserCodeExecutionError):\n """Errors raised in a user process during the execution of a sensor (or its job)."""\n\n\nclass PartitionExecutionError(DagsterUserCodeExecutionError):\n """Errors raised during the execution of user-provided functions of a partition set schedule."""\n\n\nclass DagsterInvalidAssetKey(DagsterError):\n """Error raised by invalid asset key."""\n\n\nclass DagsterInvalidMetadata(DagsterError):\n """Error raised by invalid metadata parameters."""\n\n\nclass HookExecutionError(DagsterUserCodeExecutionError):\n """Error raised during the execution of a user-defined hook."""\n\n\nclass RunStatusSensorExecutionError(DagsterUserCodeExecutionError):\n """Error raised during the execution of a user-defined run status sensor."""\n\n\nclass FreshnessPolicySensorExecutionError(DagsterUserCodeExecutionError):\n """Error raised during the execution of a user-defined freshness policy sensor."""\n\n\nclass DagsterImportError(DagsterError):\n """Import error raised while importing user-code."""\n\n\nclass JobError(DagsterUserCodeExecutionError):\n """Errors raised during the execution of user-provided functions for a defined Job."""\n\n\nclass DagsterUnknownStepStateError(DagsterError):\n """When job execution completes with steps in an unknown state."""\n\n\nclass DagsterObjectStoreError(DagsterError):\n """Errors during an object store operation."""\n\n\nclass DagsterInvalidPropertyError(DagsterError):\n """Indicates that an invalid property was accessed. May often happen by accessing a property\n that no longer exists after breaking changes.\n """\n\n\nclass DagsterHomeNotSetError(DagsterError):\n """The user has tried to use a command that requires an instance or invoke DagsterInstance.get()\n without setting DAGSTER_HOME env var.\n """\n\n\nclass DagsterUnknownPartitionError(DagsterError):\n """The user has tried to access run config for a partition name that does not exist."""\n\n\nclass DagsterUndefinedDataVersionError(DagsterError):\n """The user attempted to retrieve the most recent logical version for an asset, but no logical version is defined."""\n\n\nclass DagsterAssetBackfillDataLoadError(DagsterError):\n """Indicates that an asset backfill is now unloadable. May happen when (1) a code location containing\n targeted assets is unloadable or (2) and asset or an asset's partitions definition has been removed.\n """\n\n\nclass DagsterDefinitionChangedDeserializationError(DagsterError):\n """Indicates that a stored value can't be deserialized because the definition needed to interpret\n it has changed.\n """\n\n\nclass DagsterPipesExecutionError(DagsterError):\n """Indicates that an error occurred during the execution of an external process."""\n
", "current_page_name": "_modules/dagster/_core/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.errors"}, "event_api": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.event_api

\nfrom datetime import datetime\nfrom typing import Callable, Mapping, NamedTuple, Optional, Sequence, Union\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization, AssetObservation\nfrom dagster._core.errors import DagsterInvalidInvocationError\nfrom dagster._core.events import DagsterEventType\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._serdes import whitelist_for_serdes\n\nEventHandlerFn: TypeAlias = Callable[[EventLogEntry, str], None]\n\n\n
[docs]class RunShardedEventsCursor(NamedTuple):\n """Pairs an id-based event log cursor with a timestamp-based run cursor, for improved\n performance on run-sharded event log storages (e.g. the default SqliteEventLogStorage). For\n run-sharded storages, the id field is ignored, since they may not be unique across shards.\n """\n\n id: int\n run_updated_after: datetime
\n\n\n
[docs]@whitelist_for_serdes\nclass EventLogRecord(NamedTuple):\n """Internal representation of an event record, as stored in a\n :py:class:`~dagster._core.storage.event_log.EventLogStorage`.\n\n Users should not instantiate this class directly.\n """\n\n storage_id: PublicAttr[int]\n event_log_entry: PublicAttr[EventLogEntry]\n\n @property\n def run_id(self) -> str:\n return self.event_log_entry.run_id\n\n @property\n def timestamp(self) -> float:\n return self.event_log_entry.timestamp\n\n @property\n def asset_key(self) -> Optional[AssetKey]:\n dagster_event = self.event_log_entry.dagster_event\n if dagster_event:\n return dagster_event.asset_key\n\n return None\n\n @property\n def partition_key(self) -> Optional[str]:\n dagster_event = self.event_log_entry.dagster_event\n if dagster_event:\n return dagster_event.partition\n\n return None\n\n @property\n def asset_materialization(self) -> Optional[AssetMaterialization]:\n return self.event_log_entry.asset_materialization\n\n @property\n def asset_observation(self) -> Optional[AssetObservation]:\n return self.event_log_entry.asset_observation
\n\n\n
[docs]@whitelist_for_serdes\nclass EventRecordsFilter(\n NamedTuple(\n "_EventRecordsFilter",\n [\n ("event_type", DagsterEventType),\n ("asset_key", Optional[AssetKey]),\n ("asset_partitions", Optional[Sequence[str]]),\n ("after_cursor", Optional[Union[int, RunShardedEventsCursor]]),\n ("before_cursor", Optional[Union[int, RunShardedEventsCursor]]),\n ("after_timestamp", Optional[float]),\n ("before_timestamp", Optional[float]),\n ("storage_ids", Optional[Sequence[int]]),\n ("tags", Optional[Mapping[str, Union[str, Sequence[str]]]]),\n ],\n )\n):\n """Defines a set of filter fields for fetching a set of event log entries or event log records.\n\n Args:\n event_type (DagsterEventType): Filter argument for dagster event type\n asset_key (Optional[AssetKey]): Asset key for which to get asset materialization event\n entries / records.\n asset_partitions (Optional[List[str]]): Filter parameter such that only asset\n events with a partition value matching one of the provided values. Only\n valid when the `asset_key` parameter is provided.\n after_cursor (Optional[Union[int, RunShardedEventsCursor]]): Filter parameter such that only\n records with storage_id greater than the provided value are returned. Using a\n run-sharded events cursor will result in a significant performance gain when run against\n a SqliteEventLogStorage implementation (which is run-sharded)\n before_cursor (Optional[Union[int, RunShardedEventsCursor]]): Filter parameter such that\n records with storage_id less than the provided value are returned. Using a run-sharded\n events cursor will result in a significant performance gain when run against\n a SqliteEventLogStorage implementation (which is run-sharded)\n after_timestamp (Optional[float]): Filter parameter such that only event records for\n events with timestamp greater than the provided value are returned.\n before_timestamp (Optional[float]): Filter parameter such that only event records for\n events with timestamp less than the provided value are returned.\n """\n\n def __new__(\n cls,\n event_type: DagsterEventType,\n asset_key: Optional[AssetKey] = None,\n asset_partitions: Optional[Sequence[str]] = None,\n after_cursor: Optional[Union[int, RunShardedEventsCursor]] = None,\n before_cursor: Optional[Union[int, RunShardedEventsCursor]] = None,\n after_timestamp: Optional[float] = None,\n before_timestamp: Optional[float] = None,\n storage_ids: Optional[Sequence[int]] = None,\n tags: Optional[Mapping[str, Union[str, Sequence[str]]]] = None,\n ):\n check.opt_sequence_param(asset_partitions, "asset_partitions", of_type=str)\n check.inst_param(event_type, "event_type", DagsterEventType)\n\n tags = check.opt_mapping_param(tags, "tags", key_type=str)\n if tags and event_type is not DagsterEventType.ASSET_MATERIALIZATION:\n raise DagsterInvalidInvocationError(\n "Can only filter by tags for asset materialization events"\n )\n\n # type-ignores work around mypy type inference bug\n return super(EventRecordsFilter, cls).__new__(\n cls,\n event_type=event_type,\n asset_key=check.opt_inst_param(asset_key, "asset_key", AssetKey),\n asset_partitions=asset_partitions,\n after_cursor=check.opt_inst_param(\n after_cursor, "after_cursor", (int, RunShardedEventsCursor)\n ),\n before_cursor=check.opt_inst_param(\n before_cursor, "before_cursor", (int, RunShardedEventsCursor)\n ),\n after_timestamp=check.opt_float_param(after_timestamp, "after_timestamp"),\n before_timestamp=check.opt_float_param(before_timestamp, "before_timestamp"),\n storage_ids=check.opt_nullable_sequence_param(storage_ids, "storage_ids", of_type=int),\n tags=check.opt_mapping_param(tags, "tags", key_type=str),\n )
\n
", "current_page_name": "_modules/dagster/_core/event_api", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.event_api"}, "events": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.events

\n"""Structured representations of system events."""\nimport logging\nimport os\nimport sys\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    HookDefinition,\n    NodeHandle,\n)\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationPlanned,\n)\nfrom dagster._core.definitions.events import AssetLineageInfo, ObjectStoreOperationType\nfrom dagster._core.definitions.metadata import (\n    MetadataFieldSerializer,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import HookExecutionError\nfrom dagster._core.execution.context.system import IPlanContext, IStepContext, StepExecutionContext\nfrom dagster._core.execution.plan.handle import ResolvedFromDynamicStepHandle, StepHandle\nfrom dagster._core.execution.plan.inputs import StepInputData\nfrom dagster._core.execution.plan.objects import StepFailureData, StepRetryData, StepSuccessData\nfrom dagster._core.execution.plan.outputs import StepOutputData\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.captured_log_manager import CapturedLogContext\nfrom dagster._core.storage.dagster_run import DagsterRunStatus\nfrom dagster._serdes import (\n    NamedTupleSerializer,\n    whitelist_for_serdes,\n)\nfrom dagster._serdes.serdes import UnpackContext\nfrom dagster._utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info\nfrom dagster._utils.timing import format_duration\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.events import ObjectStoreOperation\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.execution.plan.step import StepKind\n\n\nEventSpecificData = Union[\n    StepOutputData,\n    StepFailureData,\n    StepSuccessData,\n    "StepMaterializationData",\n    "StepExpectationResultData",\n    StepInputData,\n    "EngineEventData",\n    "HookErroredData",\n    StepRetryData,\n    "JobFailureData",\n    "JobCanceledData",\n    "ObjectStoreOperationResultData",\n    "HandledOutputData",\n    "LoadedInputData",\n    "ComputeLogsCaptureData",\n    "AssetObservationData",\n    "AssetMaterializationPlannedData",\n    "AssetCheckEvaluation",\n    "AssetCheckEvaluationPlanned",\n]\n\n\n
[docs]class DagsterEventType(str, Enum):\n """The types of events that may be yielded by op and job execution."""\n\n STEP_OUTPUT = "STEP_OUTPUT"\n STEP_INPUT = "STEP_INPUT"\n STEP_FAILURE = "STEP_FAILURE"\n STEP_START = "STEP_START"\n STEP_SUCCESS = "STEP_SUCCESS"\n STEP_SKIPPED = "STEP_SKIPPED"\n\n # The process carrying out step execution is starting/started. Shown as a\n # marker start/end in the Dagster UI.\n STEP_WORKER_STARTING = "STEP_WORKER_STARTING"\n STEP_WORKER_STARTED = "STEP_WORKER_STARTED"\n\n # Resource initialization for execution has started/succeede/failed. Shown\n # as a marker start/end in the Dagster UI.\n RESOURCE_INIT_STARTED = "RESOURCE_INIT_STARTED"\n RESOURCE_INIT_SUCCESS = "RESOURCE_INIT_SUCCESS"\n RESOURCE_INIT_FAILURE = "RESOURCE_INIT_FAILURE"\n\n STEP_UP_FOR_RETRY = "STEP_UP_FOR_RETRY" # "failed" but want to retry\n STEP_RESTARTED = "STEP_RESTARTED"\n\n ASSET_MATERIALIZATION = "ASSET_MATERIALIZATION"\n ASSET_MATERIALIZATION_PLANNED = "ASSET_MATERIALIZATION_PLANNED"\n ASSET_OBSERVATION = "ASSET_OBSERVATION"\n STEP_EXPECTATION_RESULT = "STEP_EXPECTATION_RESULT"\n ASSET_CHECK_EVALUATION_PLANNED = "ASSET_CHECK_EVALUATION_PLANNED"\n ASSET_CHECK_EVALUATION = "ASSET_CHECK_EVALUATION"\n\n # We want to display RUN_* events in the Dagster UI and in our LogManager output, but in order to\n # support backcompat for our storage layer, we need to keep the persisted value to be strings\n # of the form "PIPELINE_*". We may have user code that pass in the DagsterEventType\n # enum values into storage APIs (like get_event_records, which takes in an EventRecordsFilter).\n RUN_ENQUEUED = "PIPELINE_ENQUEUED"\n RUN_DEQUEUED = "PIPELINE_DEQUEUED"\n RUN_STARTING = "PIPELINE_STARTING" # Launch is happening, execution hasn't started yet\n RUN_START = "PIPELINE_START" # Execution has started\n RUN_SUCCESS = "PIPELINE_SUCCESS"\n RUN_FAILURE = "PIPELINE_FAILURE"\n RUN_CANCELING = "PIPELINE_CANCELING"\n RUN_CANCELED = "PIPELINE_CANCELED"\n\n # Keep these legacy enum values around, to keep back-compatability for user code that might be\n # using these constants to filter event records\n PIPELINE_ENQUEUED = RUN_ENQUEUED\n PIPELINE_DEQUEUED = RUN_DEQUEUED\n PIPELINE_STARTING = RUN_STARTING\n PIPELINE_START = RUN_START\n PIPELINE_SUCCESS = RUN_SUCCESS\n PIPELINE_FAILURE = RUN_FAILURE\n PIPELINE_CANCELING = RUN_CANCELING\n PIPELINE_CANCELED = RUN_CANCELED\n\n OBJECT_STORE_OPERATION = "OBJECT_STORE_OPERATION"\n ASSET_STORE_OPERATION = "ASSET_STORE_OPERATION"\n LOADED_INPUT = "LOADED_INPUT"\n HANDLED_OUTPUT = "HANDLED_OUTPUT"\n\n ENGINE_EVENT = "ENGINE_EVENT"\n\n HOOK_COMPLETED = "HOOK_COMPLETED"\n HOOK_ERRORED = "HOOK_ERRORED"\n HOOK_SKIPPED = "HOOK_SKIPPED"\n\n ALERT_START = "ALERT_START"\n ALERT_SUCCESS = "ALERT_SUCCESS"\n ALERT_FAILURE = "ALERT_FAILURE"\n\n LOGS_CAPTURED = "LOGS_CAPTURED"
\n\n\nEVENT_TYPE_VALUE_TO_DISPLAY_STRING = {\n "PIPELINE_ENQUEUED": "RUN_ENQUEUED",\n "PIPELINE_DEQUEUED": "RUN_DEQUEUED",\n "PIPELINE_STARTING": "RUN_STARTING",\n "PIPELINE_START": "RUN_START",\n "PIPELINE_SUCCESS": "RUN_SUCCESS",\n "PIPELINE_FAILURE": "RUN_FAILURE",\n "PIPELINE_CANCELING": "RUN_CANCELING",\n "PIPELINE_CANCELED": "RUN_CANCELED",\n}\n\nSTEP_EVENTS = {\n DagsterEventType.STEP_INPUT,\n DagsterEventType.STEP_START,\n DagsterEventType.STEP_OUTPUT,\n DagsterEventType.STEP_FAILURE,\n DagsterEventType.STEP_SUCCESS,\n DagsterEventType.STEP_SKIPPED,\n DagsterEventType.ASSET_MATERIALIZATION,\n DagsterEventType.ASSET_OBSERVATION,\n DagsterEventType.STEP_EXPECTATION_RESULT,\n DagsterEventType.ASSET_CHECK_EVALUATION,\n DagsterEventType.OBJECT_STORE_OPERATION,\n DagsterEventType.HANDLED_OUTPUT,\n DagsterEventType.LOADED_INPUT,\n DagsterEventType.STEP_RESTARTED,\n DagsterEventType.STEP_UP_FOR_RETRY,\n}\n\nFAILURE_EVENTS = {\n DagsterEventType.RUN_FAILURE,\n DagsterEventType.STEP_FAILURE,\n DagsterEventType.RUN_CANCELED,\n}\n\nPIPELINE_EVENTS = {\n DagsterEventType.RUN_ENQUEUED,\n DagsterEventType.RUN_DEQUEUED,\n DagsterEventType.RUN_STARTING,\n DagsterEventType.RUN_START,\n DagsterEventType.RUN_SUCCESS,\n DagsterEventType.RUN_FAILURE,\n DagsterEventType.RUN_CANCELING,\n DagsterEventType.RUN_CANCELED,\n}\n\nHOOK_EVENTS = {\n DagsterEventType.HOOK_COMPLETED,\n DagsterEventType.HOOK_ERRORED,\n DagsterEventType.HOOK_SKIPPED,\n}\n\nALERT_EVENTS = {\n DagsterEventType.ALERT_START,\n DagsterEventType.ALERT_SUCCESS,\n DagsterEventType.ALERT_FAILURE,\n}\n\nMARKER_EVENTS = {\n DagsterEventType.ENGINE_EVENT,\n DagsterEventType.STEP_WORKER_STARTING,\n DagsterEventType.STEP_WORKER_STARTED,\n DagsterEventType.RESOURCE_INIT_STARTED,\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n DagsterEventType.RESOURCE_INIT_FAILURE,\n}\n\n\nEVENT_TYPE_TO_PIPELINE_RUN_STATUS = {\n DagsterEventType.RUN_START: DagsterRunStatus.STARTED,\n DagsterEventType.RUN_SUCCESS: DagsterRunStatus.SUCCESS,\n DagsterEventType.RUN_FAILURE: DagsterRunStatus.FAILURE,\n DagsterEventType.RUN_ENQUEUED: DagsterRunStatus.QUEUED,\n DagsterEventType.RUN_STARTING: DagsterRunStatus.STARTING,\n DagsterEventType.RUN_CANCELING: DagsterRunStatus.CANCELING,\n DagsterEventType.RUN_CANCELED: DagsterRunStatus.CANCELED,\n}\n\nPIPELINE_RUN_STATUS_TO_EVENT_TYPE = {v: k for k, v in EVENT_TYPE_TO_PIPELINE_RUN_STATUS.items()}\n\nASSET_EVENTS = {\n DagsterEventType.ASSET_MATERIALIZATION,\n DagsterEventType.ASSET_OBSERVATION,\n DagsterEventType.ASSET_MATERIALIZATION_PLANNED,\n}\n\nASSET_CHECK_EVENTS = {\n DagsterEventType.ASSET_CHECK_EVALUATION,\n DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED,\n}\n\n\ndef _assert_type(\n method: str,\n expected_type: Union[DagsterEventType, Sequence[DagsterEventType]],\n actual_type: DagsterEventType,\n) -> None:\n _expected_type = (\n [expected_type] if isinstance(expected_type, DagsterEventType) else expected_type\n )\n check.invariant(\n actual_type in _expected_type,\n f"{method} only callable when event_type is"\n f" {','.join([t.value for t in _expected_type])}, called on {actual_type}",\n )\n\n\ndef _validate_event_specific_data(\n event_type: DagsterEventType, event_specific_data: Optional["EventSpecificData"]\n) -> Optional["EventSpecificData"]:\n if event_type == DagsterEventType.STEP_OUTPUT:\n check.inst_param(event_specific_data, "event_specific_data", StepOutputData)\n elif event_type == DagsterEventType.STEP_FAILURE:\n check.inst_param(event_specific_data, "event_specific_data", StepFailureData)\n elif event_type == DagsterEventType.STEP_SUCCESS:\n check.inst_param(event_specific_data, "event_specific_data", StepSuccessData)\n elif event_type == DagsterEventType.ASSET_MATERIALIZATION:\n check.inst_param(event_specific_data, "event_specific_data", StepMaterializationData)\n elif event_type == DagsterEventType.STEP_EXPECTATION_RESULT:\n check.inst_param(event_specific_data, "event_specific_data", StepExpectationResultData)\n elif event_type == DagsterEventType.STEP_INPUT:\n check.inst_param(event_specific_data, "event_specific_data", StepInputData)\n elif event_type in (\n DagsterEventType.ENGINE_EVENT,\n DagsterEventType.STEP_WORKER_STARTING,\n DagsterEventType.STEP_WORKER_STARTED,\n DagsterEventType.RESOURCE_INIT_STARTED,\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n DagsterEventType.RESOURCE_INIT_FAILURE,\n ):\n check.inst_param(event_specific_data, "event_specific_data", EngineEventData)\n elif event_type == DagsterEventType.HOOK_ERRORED:\n check.inst_param(event_specific_data, "event_specific_data", HookErroredData)\n elif event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:\n check.inst_param(\n event_specific_data, "event_specific_data", AssetMaterializationPlannedData\n )\n elif event_type == DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED:\n check.inst_param(event_specific_data, "event_specific_data", AssetCheckEvaluationPlanned)\n elif event_type == DagsterEventType.ASSET_CHECK_EVALUATION:\n check.inst_param(event_specific_data, "event_specific_data", AssetCheckEvaluation)\n\n return event_specific_data\n\n\ndef log_step_event(step_context: IStepContext, event: "DagsterEvent") -> None:\n event_type = DagsterEventType(event.event_type_value)\n log_level = logging.ERROR if event_type in FAILURE_EVENTS else logging.DEBUG\n\n step_context.log.log_dagster_event(\n level=log_level,\n msg=event.message or f"{event_type} for step {step_context.step.key}",\n dagster_event=event,\n )\n\n\ndef log_job_event(job_context: IPlanContext, event: "DagsterEvent") -> None:\n event_type = DagsterEventType(event.event_type_value)\n log_level = logging.ERROR if event_type in FAILURE_EVENTS else logging.DEBUG\n\n job_context.log.log_dagster_event(\n level=log_level,\n msg=event.message or f"{event_type} for pipeline {job_context.job_name}",\n dagster_event=event,\n )\n\n\ndef log_resource_event(log_manager: DagsterLogManager, event: "DagsterEvent") -> None:\n event_specific_data = cast(EngineEventData, event.event_specific_data)\n\n log_level = logging.ERROR if event_specific_data.error else logging.DEBUG\n log_manager.log_dagster_event(level=log_level, msg=event.message or "", dagster_event=event)\n\n\nclass DagsterEventSerializer(NamedTupleSerializer["DagsterEvent"]):\n def before_unpack(self, context, unpacked_dict: Any) -> Dict[str, Any]:\n event_type_value, event_specific_data = _handle_back_compat(\n unpacked_dict["event_type_value"], unpacked_dict.get("event_specific_data")\n )\n unpacked_dict["event_type_value"] = event_type_value\n unpacked_dict["event_specific_data"] = event_specific_data\n\n return unpacked_dict\n\n def handle_unpack_error(\n self,\n exc: Exception,\n context: UnpackContext,\n storage_dict: Dict[str, Any],\n ) -> "DagsterEvent":\n event_type_value, _ = _handle_back_compat(\n storage_dict["event_type_value"], storage_dict.get("event_specific_data")\n )\n step_key = storage_dict.get("step_key")\n orig_message = storage_dict.get("message")\n new_message = (\n f"Could not deserialize event of type {event_type_value}. This event may have been"\n " written by a newer version of Dagster."\n + (f' Original message: "{orig_message}"' if orig_message else "")\n )\n return DagsterEvent(\n event_type_value=DagsterEventType.ENGINE_EVENT.value,\n job_name=storage_dict["pipeline_name"],\n message=new_message,\n step_key=step_key,\n event_specific_data=EngineEventData(\n error=serializable_error_info_from_exc_info(sys.exc_info())\n ),\n )\n\n\n
[docs]@whitelist_for_serdes(\n serializer=DagsterEventSerializer,\n storage_field_names={\n "node_handle": "solid_handle",\n "job_name": "pipeline_name",\n },\n)\nclass DagsterEvent(\n NamedTuple(\n "_DagsterEvent",\n [\n ("event_type_value", str),\n ("job_name", str),\n ("step_handle", Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]]),\n ("node_handle", Optional[NodeHandle]),\n ("step_kind_value", Optional[str]),\n ("logging_tags", Optional[Mapping[str, str]]),\n ("event_specific_data", Optional["EventSpecificData"]),\n ("message", Optional[str]),\n ("pid", Optional[int]),\n ("step_key", Optional[str]),\n ],\n )\n):\n """Events yielded by op and job execution.\n\n Users should not instantiate this class.\n\n Attributes:\n event_type_value (str): Value for a DagsterEventType.\n job_name (str)\n node_handle (NodeHandle)\n step_kind_value (str): Value for a StepKind.\n logging_tags (Dict[str, str])\n event_specific_data (Any): Type must correspond to event_type_value.\n message (str)\n pid (int)\n step_key (Optional[str]): DEPRECATED\n """\n\n @staticmethod\n def from_step(\n event_type: "DagsterEventType",\n step_context: IStepContext,\n event_specific_data: Optional["EventSpecificData"] = None,\n message: Optional[str] = None,\n ) -> "DagsterEvent":\n event = DagsterEvent(\n event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n event_specific_data=_validate_event_specific_data(event_type, event_specific_data),\n message=check.opt_str_param(message, "message"),\n pid=os.getpid(),\n )\n\n log_step_event(step_context, event)\n\n return event\n\n @staticmethod\n def from_job(\n event_type: DagsterEventType,\n job_context: IPlanContext,\n message: Optional[str] = None,\n event_specific_data: Optional["EventSpecificData"] = None,\n step_handle: Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]] = None,\n ) -> "DagsterEvent":\n check.opt_inst_param(\n step_handle, "step_handle", (StepHandle, ResolvedFromDynamicStepHandle)\n )\n\n event = DagsterEvent(\n event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,\n job_name=job_context.job_name,\n message=check.opt_str_param(message, "message"),\n event_specific_data=_validate_event_specific_data(event_type, event_specific_data),\n step_handle=step_handle,\n pid=os.getpid(),\n )\n\n log_job_event(job_context, event)\n\n return event\n\n @staticmethod\n def from_resource(\n event_type: DagsterEventType,\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n message: Optional[str] = None,\n event_specific_data: Optional["EngineEventData"] = None,\n ) -> "DagsterEvent":\n event = DagsterEvent(\n event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,\n job_name=job_name,\n message=check.opt_str_param(message, "message"),\n event_specific_data=_validate_event_specific_data(\n DagsterEventType.ENGINE_EVENT, event_specific_data\n ),\n step_handle=execution_plan.step_handle_for_single_step_plans(),\n pid=os.getpid(),\n )\n log_resource_event(log_manager, event)\n return event\n\n def __new__(\n cls,\n event_type_value: str,\n job_name: str,\n step_handle: Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]] = None,\n node_handle: Optional[NodeHandle] = None,\n step_kind_value: Optional[str] = None,\n logging_tags: Optional[Mapping[str, str]] = None,\n event_specific_data: Optional["EventSpecificData"] = None,\n message: Optional[str] = None,\n pid: Optional[int] = None,\n # legacy\n step_key: Optional[str] = None,\n ):\n # old events may contain node_handle but not step_handle\n if node_handle is not None and step_handle is None:\n step_handle = StepHandle(node_handle)\n\n # Legacy events may have step_key set directly, preserve those to stay in sync\n # with legacy execution plan snapshots.\n if step_handle is not None and step_key is None:\n step_key = step_handle.to_key()\n\n return super(DagsterEvent, cls).__new__(\n cls,\n check.str_param(event_type_value, "event_type_value"),\n check.str_param(job_name, "job_name"),\n check.opt_inst_param(\n step_handle, "step_handle", (StepHandle, ResolvedFromDynamicStepHandle)\n ),\n check.opt_inst_param(node_handle, "node_handle", NodeHandle),\n check.opt_str_param(step_kind_value, "step_kind_value"),\n check.opt_mapping_param(logging_tags, "logging_tags"),\n _validate_event_specific_data(DagsterEventType(event_type_value), event_specific_data),\n check.opt_str_param(message, "message"),\n check.opt_int_param(pid, "pid"),\n check.opt_str_param(step_key, "step_key"),\n )\n\n @property\n def node_name(self) -> str:\n check.invariant(self.node_handle is not None)\n node_handle = cast(NodeHandle, self.node_handle)\n return node_handle.name\n\n @public\n @property\n def event_type(self) -> DagsterEventType:\n """DagsterEventType: The type of this event."""\n return DagsterEventType(self.event_type_value)\n\n @public\n @property\n def is_step_event(self) -> bool:\n """bool: If this event relates to a specific step."""\n return self.event_type in STEP_EVENTS\n\n @public\n @property\n def is_hook_event(self) -> bool:\n """bool: If this event relates to the execution of a hook."""\n return self.event_type in HOOK_EVENTS\n\n @property\n def is_alert_event(self) -> bool:\n return self.event_type in ALERT_EVENTS\n\n @property\n def step_kind(self) -> "StepKind":\n from dagster._core.execution.plan.step import StepKind\n\n return StepKind(self.step_kind_value)\n\n @public\n @property\n def is_step_success(self) -> bool:\n """bool: If this event is of type STEP_SUCCESS."""\n return self.event_type == DagsterEventType.STEP_SUCCESS\n\n @public\n @property\n def is_successful_output(self) -> bool:\n """bool: If this event is of type STEP_OUTPUT."""\n return self.event_type == DagsterEventType.STEP_OUTPUT\n\n @public\n @property\n def is_step_start(self) -> bool:\n """bool: If this event is of type STEP_START."""\n return self.event_type == DagsterEventType.STEP_START\n\n @public\n @property\n def is_step_failure(self) -> bool:\n """bool: If this event is of type STEP_FAILURE."""\n return self.event_type == DagsterEventType.STEP_FAILURE\n\n @public\n @property\n def is_resource_init_failure(self) -> bool:\n """bool: If this event is of type RESOURCE_INIT_FAILURE."""\n return self.event_type == DagsterEventType.RESOURCE_INIT_FAILURE\n\n @public\n @property\n def is_step_skipped(self) -> bool:\n """bool: If this event is of type STEP_SKIPPED."""\n return self.event_type == DagsterEventType.STEP_SKIPPED\n\n @public\n @property\n def is_step_up_for_retry(self) -> bool:\n """bool: If this event is of type STEP_UP_FOR_RETRY."""\n return self.event_type == DagsterEventType.STEP_UP_FOR_RETRY\n\n @public\n @property\n def is_step_restarted(self) -> bool:\n """bool: If this event is of type STEP_RESTARTED."""\n return self.event_type == DagsterEventType.STEP_RESTARTED\n\n @property\n def is_job_success(self) -> bool:\n return self.event_type == DagsterEventType.RUN_SUCCESS\n\n @property\n def is_job_failure(self) -> bool:\n return self.event_type == DagsterEventType.RUN_FAILURE\n\n @property\n def is_run_failure(self) -> bool:\n return self.event_type == DagsterEventType.RUN_FAILURE\n\n @public\n @property\n def is_failure(self) -> bool:\n """bool: If this event represents the failure of a run or step."""\n return self.event_type in FAILURE_EVENTS\n\n @property\n def is_job_event(self) -> bool:\n return self.event_type in PIPELINE_EVENTS\n\n @public\n @property\n def is_engine_event(self) -> bool:\n """bool: If this event is of type ENGINE_EVENT."""\n return self.event_type == DagsterEventType.ENGINE_EVENT\n\n @public\n @property\n def is_handled_output(self) -> bool:\n """bool: If this event is of type HANDLED_OUTPUT."""\n return self.event_type == DagsterEventType.HANDLED_OUTPUT\n\n @public\n @property\n def is_loaded_input(self) -> bool:\n """bool: If this event is of type LOADED_INPUT."""\n return self.event_type == DagsterEventType.LOADED_INPUT\n\n @public\n @property\n def is_step_materialization(self) -> bool:\n """bool: If this event is of type ASSET_MATERIALIZATION."""\n return self.event_type == DagsterEventType.ASSET_MATERIALIZATION\n\n @public\n @property\n def is_expectation_result(self) -> bool:\n """bool: If this event is of type STEP_EXPECTATION_RESULT."""\n return self.event_type == DagsterEventType.STEP_EXPECTATION_RESULT\n\n @public\n @property\n def is_asset_observation(self) -> bool:\n """bool: If this event is of type ASSET_OBSERVATION."""\n return self.event_type == DagsterEventType.ASSET_OBSERVATION\n\n @public\n @property\n def is_asset_materialization_planned(self) -> bool:\n """bool: If this event is of type ASSET_MATERIALIZATION_PLANNED."""\n return self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED\n\n @public\n @property\n def asset_key(self) -> Optional[AssetKey]:\n """Optional[AssetKey]: For events that correspond to a specific asset_key / partition\n (ASSET_MATERIALIZTION, ASSET_OBSERVATION, ASSET_MATERIALIZATION_PLANNED), returns that\n asset key. Otherwise, returns None.\n """\n if self.event_type == DagsterEventType.ASSET_MATERIALIZATION:\n return self.step_materialization_data.materialization.asset_key\n elif self.event_type == DagsterEventType.ASSET_OBSERVATION:\n return self.asset_observation_data.asset_observation.asset_key\n elif self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:\n return self.asset_materialization_planned_data.asset_key\n else:\n return None\n\n @public\n @property\n def partition(self) -> Optional[str]:\n """Optional[AssetKey]: For events that correspond to a specific asset_key / partition\n (ASSET_MATERIALIZTION, ASSET_OBSERVATION, ASSET_MATERIALIZATION_PLANNED), returns that\n partition. Otherwise, returns None.\n """\n if self.event_type == DagsterEventType.ASSET_MATERIALIZATION:\n return self.step_materialization_data.materialization.partition\n elif self.event_type == DagsterEventType.ASSET_OBSERVATION:\n return self.asset_observation_data.asset_observation.partition\n elif self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:\n return self.asset_materialization_planned_data.partition\n else:\n return None\n\n @property\n def step_input_data(self) -> "StepInputData":\n _assert_type("step_input_data", DagsterEventType.STEP_INPUT, self.event_type)\n return cast(StepInputData, self.event_specific_data)\n\n @property\n def step_output_data(self) -> StepOutputData:\n _assert_type("step_output_data", DagsterEventType.STEP_OUTPUT, self.event_type)\n return cast(StepOutputData, self.event_specific_data)\n\n @property\n def step_success_data(self) -> "StepSuccessData":\n _assert_type("step_success_data", DagsterEventType.STEP_SUCCESS, self.event_type)\n return cast(StepSuccessData, self.event_specific_data)\n\n @property\n def step_failure_data(self) -> "StepFailureData":\n _assert_type("step_failure_data", DagsterEventType.STEP_FAILURE, self.event_type)\n return cast(StepFailureData, self.event_specific_data)\n\n @property\n def step_retry_data(self) -> "StepRetryData":\n _assert_type("step_retry_data", DagsterEventType.STEP_UP_FOR_RETRY, self.event_type)\n return cast(StepRetryData, self.event_specific_data)\n\n @property\n def step_materialization_data(self) -> "StepMaterializationData":\n _assert_type(\n "step_materialization_data", DagsterEventType.ASSET_MATERIALIZATION, self.event_type\n )\n return cast(StepMaterializationData, self.event_specific_data)\n\n @property\n def asset_observation_data(self) -> "AssetObservationData":\n _assert_type("asset_observation_data", DagsterEventType.ASSET_OBSERVATION, self.event_type)\n return cast(AssetObservationData, self.event_specific_data)\n\n @property\n def asset_materialization_planned_data(self) -> "AssetMaterializationPlannedData":\n _assert_type(\n "asset_materialization_planned",\n DagsterEventType.ASSET_MATERIALIZATION_PLANNED,\n self.event_type,\n )\n return cast(AssetMaterializationPlannedData, self.event_specific_data)\n\n @property\n def asset_check_planned_data(self) -> "AssetCheckEvaluationPlanned":\n _assert_type(\n "asset_check_planned",\n DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED,\n self.event_type,\n )\n return cast(AssetCheckEvaluationPlanned, self.event_specific_data)\n\n @property\n def step_expectation_result_data(self) -> "StepExpectationResultData":\n _assert_type(\n "step_expectation_result_data",\n DagsterEventType.STEP_EXPECTATION_RESULT,\n self.event_type,\n )\n return cast(StepExpectationResultData, self.event_specific_data)\n\n @property\n def materialization(self) -> AssetMaterialization:\n _assert_type(\n "step_materialization_data", DagsterEventType.ASSET_MATERIALIZATION, self.event_type\n )\n return cast(StepMaterializationData, self.event_specific_data).materialization\n\n @property\n def asset_check_evaluation_data(self) -> AssetCheckEvaluation:\n _assert_type(\n "asset_check_evaluation", DagsterEventType.ASSET_CHECK_EVALUATION, self.event_type\n )\n return cast(AssetCheckEvaluation, self.event_specific_data)\n\n @property\n def job_failure_data(self) -> "JobFailureData":\n _assert_type("job_failure_data", DagsterEventType.RUN_FAILURE, self.event_type)\n return cast(JobFailureData, self.event_specific_data)\n\n @property\n def engine_event_data(self) -> "EngineEventData":\n _assert_type(\n "engine_event_data",\n [\n DagsterEventType.ENGINE_EVENT,\n DagsterEventType.RESOURCE_INIT_STARTED,\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n DagsterEventType.RESOURCE_INIT_FAILURE,\n DagsterEventType.STEP_WORKER_STARTED,\n DagsterEventType.STEP_WORKER_STARTING,\n ],\n self.event_type,\n )\n return cast(EngineEventData, self.event_specific_data)\n\n @property\n def hook_completed_data(self) -> Optional["EventSpecificData"]:\n _assert_type("hook_completed_data", DagsterEventType.HOOK_COMPLETED, self.event_type)\n return self.event_specific_data\n\n @property\n def hook_errored_data(self) -> "HookErroredData":\n _assert_type("hook_errored_data", DagsterEventType.HOOK_ERRORED, self.event_type)\n return cast(HookErroredData, self.event_specific_data)\n\n @property\n def hook_skipped_data(self) -> Optional["EventSpecificData"]:\n _assert_type("hook_skipped_data", DagsterEventType.HOOK_SKIPPED, self.event_type)\n return self.event_specific_data\n\n @property\n def logs_captured_data(self) -> "ComputeLogsCaptureData":\n _assert_type("logs_captured_data", DagsterEventType.LOGS_CAPTURED, self.event_type)\n return cast(ComputeLogsCaptureData, self.event_specific_data)\n\n @staticmethod\n def step_output_event(\n step_context: StepExecutionContext, step_output_data: StepOutputData\n ) -> "DagsterEvent":\n output_def = step_context.op.output_def_named(\n step_output_data.step_output_handle.output_name\n )\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_OUTPUT,\n step_context=step_context,\n event_specific_data=step_output_data,\n message=(\n 'Yielded output "{output_name}"{mapping_clause} of type'\n ' "{output_type}".{type_check_clause}'.format(\n output_name=step_output_data.step_output_handle.output_name,\n output_type=output_def.dagster_type.display_name,\n type_check_clause=(\n (\n " Warning! Type check failed."\n if not step_output_data.type_check_data.success\n else " (Type check passed)."\n )\n if step_output_data.type_check_data\n else " (No type check)."\n ),\n mapping_clause=(\n f' mapping key "{step_output_data.step_output_handle.mapping_key}"'\n if step_output_data.step_output_handle.mapping_key\n else ""\n ),\n )\n ),\n )\n\n @staticmethod\n def step_failure_event(\n step_context: IStepContext,\n step_failure_data: "StepFailureData",\n message=None,\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_FAILURE,\n step_context=step_context,\n event_specific_data=step_failure_data,\n message=(message or f'Execution of step "{step_context.step.key}" failed.'),\n )\n\n @staticmethod\n def step_retry_event(\n step_context: IStepContext, step_retry_data: "StepRetryData"\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_UP_FOR_RETRY,\n step_context=step_context,\n event_specific_data=step_retry_data,\n message=(\n 'Execution of step "{step_key}" failed and has requested a retry{wait_str}.'.format(\n step_key=step_context.step.key,\n wait_str=(\n f" in {step_retry_data.seconds_to_wait} seconds"\n if step_retry_data.seconds_to_wait\n else ""\n ),\n )\n ),\n )\n\n @staticmethod\n def step_input_event(\n step_context: StepExecutionContext, step_input_data: "StepInputData"\n ) -> "DagsterEvent":\n input_def = step_context.op_def.input_def_named(step_input_data.input_name)\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_INPUT,\n step_context=step_context,\n event_specific_data=step_input_data,\n message='Got input "{input_name}" of type "{input_type}".{type_check_clause}'.format(\n input_name=step_input_data.input_name,\n input_type=input_def.dagster_type.display_name,\n type_check_clause=(\n (\n " Warning! Type check failed."\n if not step_input_data.type_check_data.success\n else " (Type check passed)."\n )\n if step_input_data.type_check_data\n else " (No type check)."\n ),\n ),\n )\n\n @staticmethod\n def step_start_event(step_context: IStepContext) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_START,\n step_context=step_context,\n message=f'Started execution of step "{step_context.step.key}".',\n )\n\n @staticmethod\n def step_restarted_event(step_context: IStepContext, previous_attempts: int) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_RESTARTED,\n step_context=step_context,\n message='Started re-execution (attempt # {n}) of step "{step_key}".'.format(\n step_key=step_context.step.key, n=previous_attempts + 1\n ),\n )\n\n @staticmethod\n def step_success_event(\n step_context: IStepContext, success: "StepSuccessData"\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_SUCCESS,\n step_context=step_context,\n event_specific_data=success,\n message='Finished execution of step "{step_key}" in {duration}.'.format(\n step_key=step_context.step.key,\n duration=format_duration(success.duration_ms),\n ),\n )\n\n @staticmethod\n def step_skipped_event(step_context: IStepContext) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_SKIPPED,\n step_context=step_context,\n message=f'Skipped execution of step "{step_context.step.key}".',\n )\n\n @staticmethod\n def asset_materialization(\n step_context: IStepContext,\n materialization: AssetMaterialization,\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n step_context=step_context,\n event_specific_data=StepMaterializationData(materialization),\n message=(\n materialization.description\n if materialization.description\n else "Materialized value{label_clause}.".format(\n label_clause=f" {materialization.label}" if materialization.label else ""\n )\n ),\n )\n\n @staticmethod\n def asset_observation(\n step_context: IStepContext, observation: AssetObservation\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.ASSET_OBSERVATION,\n step_context=step_context,\n event_specific_data=AssetObservationData(observation),\n )\n\n @staticmethod\n def asset_check_evaluation(\n step_context: IStepContext, asset_check_evaluation: AssetCheckEvaluation\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.ASSET_CHECK_EVALUATION,\n step_context=step_context,\n event_specific_data=asset_check_evaluation,\n )\n\n @staticmethod\n def step_expectation_result(\n step_context: IStepContext, expectation_result: ExpectationResult\n ) -> "DagsterEvent":\n def _msg():\n if expectation_result.description:\n return expectation_result.description\n\n return "Expectation{label_clause} {result_verb}".format(\n label_clause=" " + expectation_result.label if expectation_result.label else "",\n result_verb="passed" if expectation_result.success else "failed",\n )\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_EXPECTATION_RESULT,\n step_context=step_context,\n event_specific_data=StepExpectationResultData(expectation_result),\n message=_msg(),\n )\n\n @staticmethod\n def job_start(job_context: IPlanContext) -> "DagsterEvent":\n return DagsterEvent.from_job(\n DagsterEventType.RUN_START,\n job_context,\n message=f'Started execution of run for "{job_context.job_name}".',\n )\n\n @staticmethod\n def job_success(job_context: IPlanContext) -> "DagsterEvent":\n return DagsterEvent.from_job(\n DagsterEventType.RUN_SUCCESS,\n job_context,\n message=f'Finished execution of run for "{job_context.job_name}".',\n )\n\n @staticmethod\n def job_failure(\n job_context_or_name: Union[IPlanContext, str],\n context_msg: str,\n error_info: Optional[SerializableErrorInfo] = None,\n ) -> "DagsterEvent":\n check.str_param(context_msg, "context_msg")\n if isinstance(job_context_or_name, IPlanContext):\n return DagsterEvent.from_job(\n DagsterEventType.RUN_FAILURE,\n job_context_or_name,\n message=(\n f'Execution of run for "{job_context_or_name.job_name}" failed. {context_msg}'\n ),\n event_specific_data=JobFailureData(error_info),\n )\n else:\n # when the failure happens trying to bring up context, the job_context hasn't been\n # built and so can't use from_pipeline\n check.str_param(job_context_or_name, "pipeline_name")\n event = DagsterEvent(\n event_type_value=DagsterEventType.RUN_FAILURE.value,\n job_name=job_context_or_name,\n event_specific_data=JobFailureData(error_info),\n message=f'Execution of run for "{job_context_or_name}" failed. {context_msg}',\n pid=os.getpid(),\n )\n return event\n\n @staticmethod\n def job_canceled(\n job_context: IPlanContext, error_info: Optional[SerializableErrorInfo] = None\n ) -> "DagsterEvent":\n return DagsterEvent.from_job(\n DagsterEventType.RUN_CANCELED,\n job_context,\n message=f'Execution of run for "{job_context.job_name}" canceled.',\n event_specific_data=JobCanceledData(\n check.opt_inst_param(error_info, "error_info", SerializableErrorInfo)\n ),\n )\n\n @staticmethod\n def step_worker_starting(\n step_context: IStepContext,\n message: str,\n metadata: Mapping[str, MetadataValue],\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n DagsterEventType.STEP_WORKER_STARTING,\n step_context,\n message=message,\n event_specific_data=EngineEventData(\n metadata=metadata, marker_start="step_process_start"\n ),\n )\n\n @staticmethod\n def step_worker_started(\n log_manager: DagsterLogManager,\n job_name: str,\n message: str,\n metadata: Mapping[str, MetadataValue],\n step_key: Optional[str],\n ) -> "DagsterEvent":\n event = DagsterEvent(\n DagsterEventType.STEP_WORKER_STARTED.value,\n job_name=job_name,\n message=message,\n event_specific_data=EngineEventData(metadata=metadata, marker_end="step_process_start"),\n pid=os.getpid(),\n step_key=step_key,\n )\n log_manager.log_dagster_event(\n level=logging.DEBUG,\n msg=message,\n dagster_event=event,\n )\n return event\n\n @staticmethod\n def resource_init_start(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_keys: AbstractSet[str],\n ) -> "DagsterEvent":\n return DagsterEvent.from_resource(\n DagsterEventType.RESOURCE_INIT_STARTED,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Starting initialization of resources [{}].".format(\n ", ".join(sorted(resource_keys))\n ),\n event_specific_data=EngineEventData(metadata={}, marker_start="resources"),\n )\n\n @staticmethod\n def resource_init_success(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_instances: Mapping[str, Any],\n resource_init_times: Mapping[str, str],\n ) -> "DagsterEvent":\n metadata = {}\n for key in resource_instances.keys():\n metadata[key] = MetadataValue.python_artifact(resource_instances[key].__class__)\n metadata[f"{key}:init_time"] = resource_init_times[key]\n\n return DagsterEvent.from_resource(\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Finished initialization of resources [{}].".format(\n ", ".join(sorted(resource_init_times.keys()))\n ),\n event_specific_data=EngineEventData(\n metadata=metadata,\n marker_end="resources",\n ),\n )\n\n @staticmethod\n def resource_init_failure(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_keys: AbstractSet[str],\n error: SerializableErrorInfo,\n ) -> "DagsterEvent":\n return DagsterEvent.from_resource(\n DagsterEventType.RESOURCE_INIT_FAILURE,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Initialization of resources [{}] failed.".format(", ".join(resource_keys)),\n event_specific_data=EngineEventData(\n metadata={},\n marker_end="resources",\n error=error,\n ),\n )\n\n @staticmethod\n def resource_teardown_failure(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_keys: AbstractSet[str],\n error: SerializableErrorInfo,\n ) -> "DagsterEvent":\n return DagsterEvent.from_resource(\n DagsterEventType.ENGINE_EVENT,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Teardown of resources [{}] failed.".format(", ".join(resource_keys)),\n event_specific_data=EngineEventData(\n metadata={},\n marker_start=None,\n marker_end=None,\n error=error,\n ),\n )\n\n @staticmethod\n def engine_event(\n plan_context: IPlanContext,\n message: str,\n event_specific_data: Optional["EngineEventData"] = None,\n ) -> "DagsterEvent":\n if isinstance(plan_context, IStepContext):\n return DagsterEvent.from_step(\n DagsterEventType.ENGINE_EVENT,\n step_context=plan_context,\n event_specific_data=event_specific_data,\n message=message,\n )\n else:\n return DagsterEvent.from_job(\n DagsterEventType.ENGINE_EVENT,\n plan_context,\n message,\n event_specific_data=event_specific_data,\n )\n\n @staticmethod\n def object_store_operation(\n step_context: IStepContext, object_store_operation_result: "ObjectStoreOperation"\n ) -> "DagsterEvent":\n object_store_name = (\n f"{object_store_operation_result.object_store_name} "\n if object_store_operation_result.object_store_name\n else ""\n )\n\n serialization_strategy_modifier = (\n f" using {object_store_operation_result.serialization_strategy_name}"\n if object_store_operation_result.serialization_strategy_name\n else ""\n )\n\n value_name = object_store_operation_result.value_name\n\n if (\n ObjectStoreOperationType(object_store_operation_result.op)\n == ObjectStoreOperationType.SET_OBJECT\n ):\n message = (\n f"Stored intermediate object for output {value_name} in "\n f"{object_store_name}object store{serialization_strategy_modifier}."\n )\n elif (\n ObjectStoreOperationType(object_store_operation_result.op)\n == ObjectStoreOperationType.GET_OBJECT\n ):\n message = (\n f"Retrieved intermediate object for input {value_name} in "\n f"{object_store_name}object store{serialization_strategy_modifier}."\n )\n elif (\n ObjectStoreOperationType(object_store_operation_result.op)\n == ObjectStoreOperationType.CP_OBJECT\n ):\n message = (\n "Copied intermediate object for input {value_name} from {key} to {dest_key}"\n ).format(\n value_name=value_name,\n key=object_store_operation_result.key,\n dest_key=object_store_operation_result.dest_key,\n )\n else:\n message = ""\n\n return DagsterEvent.from_step(\n DagsterEventType.OBJECT_STORE_OPERATION,\n step_context,\n event_specific_data=ObjectStoreOperationResultData(\n op=object_store_operation_result.op,\n value_name=value_name,\n address=object_store_operation_result.key,\n metadata={"key": MetadataValue.path(object_store_operation_result.key)},\n version=object_store_operation_result.version,\n mapping_key=object_store_operation_result.mapping_key,\n ),\n message=message,\n )\n\n @staticmethod\n def handled_output(\n step_context: IStepContext,\n output_name: str,\n manager_key: str,\n message_override: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ) -> "DagsterEvent":\n message = f'Handled output "{output_name}" using IO manager "{manager_key}"'\n return DagsterEvent.from_step(\n event_type=DagsterEventType.HANDLED_OUTPUT,\n step_context=step_context,\n event_specific_data=HandledOutputData(\n output_name=output_name,\n manager_key=manager_key,\n metadata=metadata if metadata else {},\n ),\n message=message_override or message,\n )\n\n @staticmethod\n def loaded_input(\n step_context: IStepContext,\n input_name: str,\n manager_key: str,\n upstream_output_name: Optional[str] = None,\n upstream_step_key: Optional[str] = None,\n message_override: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ) -> "DagsterEvent":\n message = f'Loaded input "{input_name}" using input manager "{manager_key}"'\n if upstream_output_name:\n message += f', from output "{upstream_output_name}" of step "{upstream_step_key}"'\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.LOADED_INPUT,\n step_context=step_context,\n event_specific_data=LoadedInputData(\n input_name=input_name,\n manager_key=manager_key,\n upstream_output_name=upstream_output_name,\n upstream_step_key=upstream_step_key,\n metadata=metadata if metadata else {},\n ),\n message=message_override or message,\n )\n\n @staticmethod\n def hook_completed(\n step_context: StepExecutionContext, hook_def: HookDefinition\n ) -> "DagsterEvent":\n event_type = DagsterEventType.HOOK_COMPLETED\n\n event = DagsterEvent(\n event_type_value=event_type.value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n message=(\n f'Finished the execution of hook "{hook_def.name}" triggered for'\n f' "{step_context.op.name}".'\n ),\n )\n\n step_context.log.log_dagster_event(\n level=logging.DEBUG, msg=event.message or "", dagster_event=event\n )\n\n return event\n\n @staticmethod\n def hook_errored(\n step_context: StepExecutionContext, error: HookExecutionError\n ) -> "DagsterEvent":\n event_type = DagsterEventType.HOOK_ERRORED\n\n event = DagsterEvent(\n event_type_value=event_type.value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n event_specific_data=_validate_event_specific_data(\n event_type,\n HookErroredData(\n error=serializable_error_info_from_exc_info(error.original_exc_info)\n ),\n ),\n )\n\n step_context.log.log_dagster_event(level=logging.ERROR, msg=str(error), dagster_event=event)\n\n return event\n\n @staticmethod\n def hook_skipped(\n step_context: StepExecutionContext, hook_def: HookDefinition\n ) -> "DagsterEvent":\n event_type = DagsterEventType.HOOK_SKIPPED\n\n event = DagsterEvent(\n event_type_value=event_type.value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n message=(\n f'Skipped the execution of hook "{hook_def.name}". It did not meet its triggering '\n f'condition during the execution of "{step_context.op.name}".'\n ),\n )\n\n step_context.log.log_dagster_event(\n level=logging.DEBUG, msg=event.message or "", dagster_event=event\n )\n\n return event\n\n @staticmethod\n def legacy_compute_log_step_event(step_context: StepExecutionContext):\n step_key = step_context.step.key\n return DagsterEvent.from_step(\n DagsterEventType.LOGS_CAPTURED,\n step_context,\n message=f"Started capturing logs for step: {step_key}.",\n event_specific_data=ComputeLogsCaptureData(\n step_keys=[step_key],\n file_key=step_key,\n ),\n )\n\n @staticmethod\n def capture_logs(\n job_context: IPlanContext,\n step_keys: Sequence[str],\n log_key: Sequence[str],\n log_context: CapturedLogContext,\n ):\n file_key = log_key[-1]\n return DagsterEvent.from_job(\n DagsterEventType.LOGS_CAPTURED,\n job_context,\n message=f"Started capturing logs in process (pid: {os.getpid()}).",\n event_specific_data=ComputeLogsCaptureData(\n step_keys=step_keys,\n file_key=file_key,\n external_stdout_url=log_context.external_stdout_url,\n external_stderr_url=log_context.external_stderr_url,\n external_url=log_context.external_url,\n ),\n )
\n\n\ndef get_step_output_event(\n events: Sequence[DagsterEvent], step_key: str, output_name: Optional[str] = "result"\n) -> Optional["DagsterEvent"]:\n check.sequence_param(events, "events", of_type=DagsterEvent)\n check.str_param(step_key, "step_key")\n check.str_param(output_name, "output_name")\n for event in events:\n if (\n event.event_type == DagsterEventType.STEP_OUTPUT\n and event.step_key == step_key\n and event.step_output_data.output_name == output_name\n ):\n return event\n return None\n\n\n@whitelist_for_serdes\nclass AssetObservationData(\n NamedTuple("_AssetObservation", [("asset_observation", AssetObservation)])\n):\n def __new__(cls, asset_observation: AssetObservation):\n return super(AssetObservationData, cls).__new__(\n cls,\n asset_observation=check.inst_param(\n asset_observation, "asset_observation", AssetObservation\n ),\n )\n\n\n@whitelist_for_serdes\nclass StepMaterializationData(\n NamedTuple(\n "_StepMaterializationData",\n [\n ("materialization", AssetMaterialization),\n ("asset_lineage", Sequence[AssetLineageInfo]),\n ],\n )\n):\n def __new__(\n cls,\n materialization: AssetMaterialization,\n asset_lineage: Optional[Sequence[AssetLineageInfo]] = None,\n ):\n return super(StepMaterializationData, cls).__new__(\n cls,\n materialization=check.inst_param(\n materialization, "materialization", AssetMaterialization\n ),\n asset_lineage=check.opt_sequence_param(\n asset_lineage, "asset_lineage", of_type=AssetLineageInfo\n ),\n )\n\n\n@whitelist_for_serdes\nclass AssetMaterializationPlannedData(\n NamedTuple(\n "_AssetMaterializationPlannedData",\n [("asset_key", AssetKey), ("partition", Optional[str])],\n )\n):\n def __new__(cls, asset_key: AssetKey, partition: Optional[str] = None):\n return super(AssetMaterializationPlannedData, cls).__new__(\n cls,\n asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n partition=check.opt_str_param(partition, "partition"),\n )\n\n\n@whitelist_for_serdes\nclass StepExpectationResultData(\n NamedTuple(\n "_StepExpectationResultData",\n [\n ("expectation_result", ExpectationResult),\n ],\n )\n):\n def __new__(cls, expectation_result: ExpectationResult):\n return super(StepExpectationResultData, cls).__new__(\n cls,\n expectation_result=check.inst_param(\n expectation_result, "expectation_result", ExpectationResult\n ),\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass ObjectStoreOperationResultData(\n NamedTuple(\n "_ObjectStoreOperationResultData",\n [\n ("op", ObjectStoreOperationType),\n ("value_name", Optional[str]),\n ("metadata", Mapping[str, MetadataValue]),\n ("address", Optional[str]),\n ("version", Optional[str]),\n ("mapping_key", Optional[str]),\n ],\n )\n):\n def __new__(\n cls,\n op: ObjectStoreOperationType,\n value_name: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n address: Optional[str] = None,\n version: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ):\n return super(ObjectStoreOperationResultData, cls).__new__(\n cls,\n op=cast(ObjectStoreOperationType, check.str_param(op, "op")),\n value_name=check.opt_str_param(value_name, "value_name"),\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n address=check.opt_str_param(address, "address"),\n version=check.opt_str_param(version, "version"),\n mapping_key=check.opt_str_param(mapping_key, "mapping_key"),\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass EngineEventData(\n NamedTuple(\n "_EngineEventData",\n [\n ("metadata", Mapping[str, MetadataValue]),\n ("error", Optional[SerializableErrorInfo]),\n ("marker_start", Optional[str]),\n ("marker_end", Optional[str]),\n ],\n )\n):\n # serdes log\n # * added optional error\n # * added marker_start / marker_end\n #\n def __new__(\n cls,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n error: Optional[SerializableErrorInfo] = None,\n marker_start: Optional[str] = None,\n marker_end: Optional[str] = None,\n ):\n return super(EngineEventData, cls).__new__(\n cls,\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n error=check.opt_inst_param(error, "error", SerializableErrorInfo),\n marker_start=check.opt_str_param(marker_start, "marker_start"),\n marker_end=check.opt_str_param(marker_end, "marker_end"),\n )\n\n @staticmethod\n def in_process(\n pid: int, step_keys_to_execute: Optional[Sequence[str]] = None\n ) -> "EngineEventData":\n return EngineEventData(\n metadata={\n "pid": MetadataValue.text(str(pid)),\n **(\n {"step_keys": MetadataValue.text(str(step_keys_to_execute))}\n if step_keys_to_execute\n else {}\n ),\n }\n )\n\n @staticmethod\n def multiprocess(\n pid: int, step_keys_to_execute: Optional[Sequence[str]] = None\n ) -> "EngineEventData":\n return EngineEventData(\n metadata={\n "pid": MetadataValue.text(str(pid)),\n **(\n {"step_keys": MetadataValue.text(str(step_keys_to_execute))}\n if step_keys_to_execute\n else {}\n ),\n }\n )\n\n @staticmethod\n def interrupted(steps_interrupted: Sequence[str]) -> "EngineEventData":\n return EngineEventData(\n metadata={"steps_interrupted": MetadataValue.text(str(steps_interrupted))}\n )\n\n @staticmethod\n def engine_error(error: SerializableErrorInfo) -> "EngineEventData":\n return EngineEventData(metadata={}, error=error)\n\n\n@whitelist_for_serdes(storage_name="PipelineFailureData")\nclass JobFailureData(\n NamedTuple(\n "_JobFailureData",\n [\n ("error", Optional[SerializableErrorInfo]),\n ],\n )\n):\n def __new__(cls, error: Optional[SerializableErrorInfo]):\n return super(JobFailureData, cls).__new__(\n cls, error=check.opt_inst_param(error, "error", SerializableErrorInfo)\n )\n\n\n@whitelist_for_serdes(storage_name="PipelineCanceledData")\nclass JobCanceledData(\n NamedTuple(\n "_JobCanceledData",\n [\n ("error", Optional[SerializableErrorInfo]),\n ],\n )\n):\n def __new__(cls, error: Optional[SerializableErrorInfo]):\n return super(JobCanceledData, cls).__new__(\n cls, error=check.opt_inst_param(error, "error", SerializableErrorInfo)\n )\n\n\n@whitelist_for_serdes\nclass HookErroredData(\n NamedTuple(\n "_HookErroredData",\n [\n ("error", SerializableErrorInfo),\n ],\n )\n):\n def __new__(cls, error: SerializableErrorInfo):\n return super(HookErroredData, cls).__new__(\n cls, error=check.inst_param(error, "error", SerializableErrorInfo)\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass HandledOutputData(\n NamedTuple(\n "_HandledOutputData",\n [\n ("output_name", str),\n ("manager_key", str),\n ("metadata", Mapping[str, MetadataValue]),\n ],\n )\n):\n def __new__(\n cls,\n output_name: str,\n manager_key: str,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ):\n return super(HandledOutputData, cls).__new__(\n cls,\n output_name=check.str_param(output_name, "output_name"),\n manager_key=check.str_param(manager_key, "manager_key"),\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass LoadedInputData(\n NamedTuple(\n "_LoadedInputData",\n [\n ("input_name", str),\n ("manager_key", str),\n ("upstream_output_name", Optional[str]),\n ("upstream_step_key", Optional[str]),\n ("metadata", Mapping[str, MetadataValue]),\n ],\n )\n):\n def __new__(\n cls,\n input_name: str,\n manager_key: str,\n upstream_output_name: Optional[str] = None,\n upstream_step_key: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ):\n return super(LoadedInputData, cls).__new__(\n cls,\n input_name=check.str_param(input_name, "input_name"),\n manager_key=check.str_param(manager_key, "manager_key"),\n upstream_output_name=check.opt_str_param(upstream_output_name, "upstream_output_name"),\n upstream_step_key=check.opt_str_param(upstream_step_key, "upstream_step_key"),\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n )\n\n\n@whitelist_for_serdes(storage_field_names={"file_key": "log_key"})\nclass ComputeLogsCaptureData(\n NamedTuple(\n "_ComputeLogsCaptureData",\n [\n ("file_key", str), # renamed log_key => file_key to avoid confusion\n ("step_keys", Sequence[str]),\n ("external_url", Optional[str]),\n ("external_stdout_url", Optional[str]),\n ("external_stderr_url", Optional[str]),\n ],\n )\n):\n def __new__(\n cls,\n file_key: str,\n step_keys: Sequence[str],\n external_url: Optional[str] = None,\n external_stdout_url: Optional[str] = None,\n external_stderr_url: Optional[str] = None,\n ):\n return super(ComputeLogsCaptureData, cls).__new__(\n cls,\n file_key=check.str_param(file_key, "file_key"),\n step_keys=check.opt_list_param(step_keys, "step_keys", of_type=str),\n external_url=check.opt_str_param(external_url, "external_url"),\n external_stdout_url=check.opt_str_param(external_stdout_url, "external_stdout_url"),\n external_stderr_url=check.opt_str_param(external_stderr_url, "external_stderr_url"),\n )\n\n\n###################################################################################################\n# THE GRAVEYARD\n#\n# -|- -|- -|-\n# | | |\n# _-'~~~~~`-_ . _-'~~~~~`-_ _-'~~~~~`-_\n# .' '. .' '. .' '.\n# | R I P | | R I P | | R I P |\n# | | | | | |\n# | Synthetic | | Asset | | Pipeline |\n# | Process | | Store | | Init |\n# | Events | | Operations | | Failures |\n# | | | | | |\n###################################################################################################\n\n\n# Old data structures referenced below\n# class AssetStoreOperationData(NamedTuple):\n# op: str\n# step_key: str\n# output_name: str\n# asset_store_key: str\n#\n#\n# class AssetStoreOperationType(Enum):\n# SET_ASSET = "SET_ASSET"\n# GET_ASSET = "GET_ASSET"\n#\n#\n# class PipelineInitFailureData(NamedTuple):\n# error: SerializableErrorInfo\n\n\ndef _handle_back_compat(\n event_type_value: str,\n event_specific_data: Optional[Dict[str, Any]],\n) -> Tuple[str, Optional[Dict[str, Any]]]:\n # transform old specific process events in to engine events\n if event_type_value in [\n "PIPELINE_PROCESS_START",\n "PIPELINE_PROCESS_STARTED",\n "PIPELINE_PROCESS_EXITED",\n ]:\n return "ENGINE_EVENT", {"__class__": "EngineEventData"}\n\n # changes asset store ops in to get/set asset\n elif event_type_value == "ASSET_STORE_OPERATION":\n assert (\n event_specific_data is not None\n ), "ASSET_STORE_OPERATION event must have specific data"\n if event_specific_data["op"] in (\n "GET_ASSET",\n '{"__enum__": "AssetStoreOperationType.GET_ASSET"}',\n ):\n return (\n "LOADED_INPUT",\n {\n "__class__": "LoadedInputData",\n "input_name": event_specific_data["output_name"],\n "manager_key": event_specific_data["asset_store_key"],\n },\n )\n if event_specific_data["op"] in (\n "SET_ASSET",\n '{"__enum__": "AssetStoreOperationType.SET_ASSET"}',\n ):\n return (\n "HANDLED_OUTPUT",\n {\n "__class__": "HandledOutputData",\n "output_name": event_specific_data["output_name"],\n "manager_key": event_specific_data["asset_store_key"],\n },\n )\n\n # previous name for ASSET_MATERIALIZATION was STEP_MATERIALIZATION\n if event_type_value == "STEP_MATERIALIZATION":\n assert event_specific_data is not None, "STEP_MATERIALIZATION event must have specific data"\n return "ASSET_MATERIALIZATION", event_specific_data\n\n # transform PIPELINE_INIT_FAILURE to PIPELINE_FAILURE\n if event_type_value == "PIPELINE_INIT_FAILURE":\n assert (\n event_specific_data is not None\n ), "PIPELINE_INIT_FAILURE event must have specific data"\n return "PIPELINE_FAILURE", {\n "__class__": "PipelineFailureData",\n "error": event_specific_data.get("error"),\n }\n\n return event_type_value, event_specific_data\n
", "current_page_name": "_modules/dagster/_core/events", "customsidebar": null, "favicon_url": null, "log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.events.log

\nfrom typing import Mapping, NamedTuple, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.definitions.events import AssetMaterialization, AssetObservation\nfrom dagster._core.events import DagsterEvent, DagsterEventType\nfrom dagster._core.utils import coerce_valid_log_level\nfrom dagster._serdes.serdes import (\n    deserialize_value,\n    serialize_value,\n    whitelist_for_serdes,\n)\nfrom dagster._utils.error import SerializableErrorInfo\nfrom dagster._utils.log import (\n    JsonEventLoggerHandler,\n    StructuredLoggerHandler,\n    StructuredLoggerMessage,\n    construct_single_handler_logger,\n)\n\n\n
[docs]@whitelist_for_serdes(\n # These were originally distinguished from each other but ended up being empty subclasses\n # of EventLogEntry -- instead of using the subclasses we were relying on\n # EventLogEntry.is_dagster_event to distinguish events that originate in the logging\n # machinery from events that are yielded by user code\n old_storage_names={"DagsterEventRecord", "LogMessageRecord", "EventRecord"},\n old_fields={"message": ""},\n storage_field_names={"job_name": "pipeline_name"},\n)\nclass EventLogEntry(\n NamedTuple(\n "_EventLogEntry",\n [\n ("error_info", PublicAttr[Optional[SerializableErrorInfo]]),\n ("level", PublicAttr[Union[str, int]]),\n ("user_message", PublicAttr[str]),\n ("run_id", PublicAttr[str]),\n ("timestamp", PublicAttr[float]),\n ("step_key", PublicAttr[Optional[str]]),\n ("job_name", PublicAttr[Optional[str]]),\n ("dagster_event", PublicAttr[Optional[DagsterEvent]]),\n ],\n )\n):\n """Entries in the event log.\n\n Users should not instantiate this object directly. These entries may originate from the logging machinery (DagsterLogManager/context.log), from\n framework events (e.g. EngineEvent), or they may correspond to events yielded by user code\n (e.g. Output).\n\n Args:\n error_info (Optional[SerializableErrorInfo]): Error info for an associated exception, if\n any, as generated by serializable_error_info_from_exc_info and friends.\n level (Union[str, int]): The Python log level at which to log this event. Note that\n framework and user code events are also logged to Python logging. This value may be an\n integer or a (case-insensitive) string member of PYTHON_LOGGING_LEVELS_NAMES.\n user_message (str): For log messages, this is the user-generated message.\n run_id (str): The id of the run which generated this event.\n timestamp (float): The Unix timestamp of this event.\n step_key (Optional[str]): The step key for the step which generated this event. Some events\n are generated outside of a step context.\n job_name (Optional[str]): The job which generated this event. Some events are\n generated outside of a job context.\n dagster_event (Optional[DagsterEvent]): For framework and user events, the associated\n structured event.\n """\n\n def __new__(\n cls,\n error_info,\n level,\n user_message,\n run_id,\n timestamp,\n step_key=None,\n job_name=None,\n dagster_event=None,\n ):\n return super(EventLogEntry, cls).__new__(\n cls,\n check.opt_inst_param(error_info, "error_info", SerializableErrorInfo),\n coerce_valid_log_level(level),\n check.str_param(user_message, "user_message"),\n check.str_param(run_id, "run_id"),\n check.float_param(timestamp, "timestamp"),\n check.opt_str_param(step_key, "step_key"),\n check.opt_str_param(job_name, "job_name"),\n check.opt_inst_param(dagster_event, "dagster_event", DagsterEvent),\n )\n\n @public\n @property\n def is_dagster_event(self) -> bool:\n """bool: If this entry contains a DagsterEvent."""\n return bool(self.dagster_event)\n\n
[docs] @public\n def get_dagster_event(self) -> DagsterEvent:\n """DagsterEvent: Returns the DagsterEvent contained within this entry. If this entry does not\n contain a DagsterEvent, an error will be raised.\n """\n if not isinstance(self.dagster_event, DagsterEvent):\n check.failed(\n "Not a dagster event, check is_dagster_event before calling get_dagster_event",\n )\n\n return self.dagster_event
\n\n def to_json(self):\n return serialize_value(self)\n\n @staticmethod\n def from_json(json_str: str):\n return deserialize_value(json_str, EventLogEntry)\n\n @public\n @property\n def dagster_event_type(self) -> Optional[DagsterEventType]:\n """Optional[DagsterEventType]: The type of the DagsterEvent contained by this entry, if any."""\n return self.dagster_event.event_type if self.dagster_event else None\n\n @public\n @property\n def message(self) -> str:\n """Return the message from the structured DagsterEvent if present, fallback to user_message."""\n if self.is_dagster_event:\n msg = self.get_dagster_event().message\n if msg is not None:\n return msg\n\n return self.user_message\n\n @property\n def asset_materialization(self) -> Optional[AssetMaterialization]:\n if (\n self.dagster_event\n and self.dagster_event.event_type_value == DagsterEventType.ASSET_MATERIALIZATION\n ):\n materialization = self.dagster_event.step_materialization_data.materialization\n if isinstance(materialization, AssetMaterialization):\n return materialization\n\n return None\n\n @property\n def asset_observation(self) -> Optional[AssetObservation]:\n if (\n self.dagster_event\n and self.dagster_event.event_type_value == DagsterEventType.ASSET_OBSERVATION\n ):\n observation = self.dagster_event.asset_observation_data.asset_observation\n if isinstance(observation, AssetObservation):\n return observation\n\n return None\n\n @property\n def tags(self) -> Optional[Mapping[str, str]]:\n materialization = self.asset_materialization\n if materialization:\n return materialization.tags\n\n observation = self.asset_observation\n if observation:\n return observation.tags\n\n return None
\n\n\ndef construct_event_record(logger_message: StructuredLoggerMessage) -> EventLogEntry:\n check.inst_param(logger_message, "logger_message", StructuredLoggerMessage)\n\n return EventLogEntry(\n level=logger_message.level,\n user_message=logger_message.meta["orig_message"],\n run_id=logger_message.meta["run_id"],\n timestamp=logger_message.record.created,\n step_key=logger_message.meta.get("step_key"),\n job_name=logger_message.meta.get("job_name"),\n dagster_event=logger_message.meta.get("dagster_event"),\n error_info=None,\n )\n\n\ndef construct_event_logger(event_record_callback):\n """Callback receives a stream of event_records. Piggybacks on the logging machinery."""\n check.callable_param(event_record_callback, "event_record_callback")\n\n return construct_single_handler_logger(\n "event-logger",\n "debug",\n StructuredLoggerHandler(\n lambda logger_message: event_record_callback(construct_event_record(logger_message))\n ),\n )\n\n\ndef construct_json_event_logger(json_path):\n """Record a stream of event records to json."""\n check.str_param(json_path, "json_path")\n return construct_single_handler_logger(\n "json-event-record-logger",\n "debug",\n JsonEventLoggerHandler(\n json_path,\n lambda record: construct_event_record(\n StructuredLoggerMessage(\n name=record.name,\n message=record.msg,\n level=record.levelno,\n meta=record.dagster_meta,\n record=record,\n )\n ),\n ),\n )\n
", "current_page_name": "_modules/dagster/_core/events/log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}, {"link": "../", "title": "dagster._core.events"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.events.log"}, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.events"}, "execution": {"api": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.api

\nimport sys\nfrom contextlib import contextmanager\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._core.definitions import IJob, JobDefinition\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.job_base import InMemoryJob\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.repository_definition import RepositoryLoadData\nfrom dagster._core.errors import DagsterExecutionInterruptedError, DagsterInvariantViolationError\nfrom dagster._core.events import DagsterEvent, EngineEventData\nfrom dagster._core.execution.context.system import PlanOrchestrationContext\nfrom dagster._core.execution.plan.execute_plan import inner_plan_execution_iterator\nfrom dagster._core.execution.plan.plan import ExecutionPlan\nfrom dagster._core.execution.plan.state import KnownExecutionState\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.instance import DagsterInstance, InstanceRef\nfrom dagster._core.selector import parse_step_selection\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.system_config.objects import ResolvedRunConfig\nfrom dagster._core.telemetry import log_dagster_event, log_repo_stats, telemetry_wrapper\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster._utils.interrupts import capture_interrupts\nfrom dagster._utils.merger import merge_dicts\n\nfrom .context_creation_job import (\n    ExecutionContextManager,\n    PlanExecutionContextManager,\n    PlanOrchestrationContextManager,\n    orchestration_context_event_generator,\n    scoped_job_context,\n)\nfrom .job_execution_result import JobExecutionResult\n\nif TYPE_CHECKING:\n    from dagster._core.execution.plan.outputs import StepOutputHandle\n\n## Brief guide to the execution APIs\n# | function name               | operates over      | sync  | supports    | creates new DagsterRun  |\n# |                             |                    |       | reexecution | in instance             |\n# | --------------------------- | ------------------ | ----- | ----------- | ----------------------- |\n# | execute_job                 | ReconstructableJob | sync  | yes         | yes                     |\n# | execute_run_iterator        | DagsterRun         | async | (1)         | no                      |\n# | execute_run                 | DagsterRun         | sync  | (1)         | no                      |\n# | execute_plan_iterator       | ExecutionPlan      | async | (2)         | no                      |\n# | execute_plan                | ExecutionPlan      | sync  | (2)         | no                      |\n#\n# Notes on reexecution support:\n# (1) The appropriate bits must be set on the DagsterRun passed to this function. Specifically,\n#     parent_run_id and root_run_id must be set and consistent, and if a resolved_op_selection or\n#     step_keys_to_execute are set they must be consistent with the parent and root runs.\n# (2) As for (1), but the ExecutionPlan passed must also agree in all relevant bits.\n\n\ndef execute_run_iterator(\n    job: IJob,\n    dagster_run: DagsterRun,\n    instance: DagsterInstance,\n    resume_from_failure: bool = False,\n) -> Iterator[DagsterEvent]:\n    check.inst_param(job, "job", IJob)\n    check.inst_param(dagster_run, "dagster_run", DagsterRun)\n    check.inst_param(instance, "instance", DagsterInstance)\n\n    if dagster_run.status == DagsterRunStatus.CANCELED:\n        # This can happen if the run was force-terminated while it was starting\n        def gen_execute_on_cancel():\n            yield instance.report_engine_event(\n                "Not starting execution since the run was canceled before execution could start",\n                dagster_run,\n            )\n\n        return gen_execute_on_cancel()\n\n    if not resume_from_failure:\n        if dagster_run.status not in (DagsterRunStatus.NOT_STARTED, DagsterRunStatus.STARTING):\n            if dagster_run.is_finished:\n\n                def gen_ignore_duplicate_run_worker():\n                    yield instance.report_engine_event(\n                        "Ignoring a run worker that started after the run had already finished.",\n                        dagster_run,\n                    )\n\n                return gen_ignore_duplicate_run_worker()\n            elif instance.run_monitoring_enabled:\n                # This can happen if the pod was unexpectedly restarted by the cluster - ignore it since\n                # the run monitoring daemon will also spin up a new pod\n                def gen_ignore_duplicate_run_worker():\n                    yield instance.report_engine_event(\n                        "Ignoring a duplicate run that was started from somewhere other than"\n                        " the run monitor daemon",\n                        dagster_run,\n                    )\n\n                return gen_ignore_duplicate_run_worker()\n            else:\n\n                def gen_fail_restarted_run_worker():\n                    yield instance.report_engine_event(\n                        f"{dagster_run.job_name} ({dagster_run.run_id}) started a new"\n                        f" run worker while the run was already in state {dagster_run.status}."\n                        " This most frequently happens when the run worker unexpectedly stops"\n                        " and is restarted by the cluster. Marking the run as failed.",\n                        dagster_run,\n                    )\n                    yield instance.report_run_failed(dagster_run)\n\n                return gen_fail_restarted_run_worker()\n\n    else:\n        check.invariant(\n            dagster_run.status == DagsterRunStatus.STARTED\n            or dagster_run.status == DagsterRunStatus.STARTING,\n            desc=(\n                "Run of {} ({}) in state {}, expected STARTED or STARTING because it's "\n                "resuming from a run worker failure".format(\n                    dagster_run.job_name, dagster_run.run_id, dagster_run.status\n                )\n            ),\n        )\n\n    if (\n        dagster_run.resolved_op_selection\n        or dagster_run.asset_selection\n        or dagster_run.asset_check_selection\n    ):\n        # when `execute_run_iterator` is directly called, the sub pipeline hasn't been created\n        # note that when we receive the solids to execute via DagsterRun, it won't support\n        # solid selection query syntax\n        job = job.get_subset(\n            op_selection=(\n                list(dagster_run.resolved_op_selection)\n                if dagster_run.resolved_op_selection\n                else None\n            ),\n            asset_selection=dagster_run.asset_selection,\n            asset_check_selection=dagster_run.asset_check_selection,\n        )\n\n    execution_plan = _get_execution_plan_from_run(job, dagster_run, instance)\n    if isinstance(job, ReconstructableJob):\n        job = job.with_repository_load_data(execution_plan.repository_load_data)\n\n    return iter(\n        ExecuteRunWithPlanIterable(\n            execution_plan=execution_plan,\n            iterator=job_execution_iterator,\n            execution_context_manager=PlanOrchestrationContextManager(\n                context_event_generator=orchestration_context_event_generator,\n                job=job,\n                execution_plan=execution_plan,\n                dagster_run=dagster_run,\n                instance=instance,\n                run_config=dagster_run.run_config,\n                raise_on_error=False,\n                executor_defs=None,\n                output_capture=None,\n                resume_from_failure=resume_from_failure,\n            ),\n        )\n    )\n\n\ndef execute_run(\n    job: IJob,\n    dagster_run: DagsterRun,\n    instance: DagsterInstance,\n    raise_on_error: bool = False,\n) -> JobExecutionResult:\n    """Executes an existing job run synchronously.\n\n    Synchronous version of execute_run_iterator.\n\n    Args:\n        job (IJob): The pipeline to execute.\n        dagster_run (DagsterRun): The run to execute\n        instance (DagsterInstance): The instance in which the run has been created.\n        raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n            Defaults to ``False``.\n\n    Returns:\n        JobExecutionResult: The result of the execution.\n    """\n    if isinstance(job, JobDefinition):\n        raise DagsterInvariantViolationError(\n            "execute_run requires a reconstructable job but received job definition directly"\n            " instead. To support hand-off to other processes please wrap your definition in a call"\n            " to reconstructable(). Learn more about reconstructable here:"\n            " https://docs.dagster.io/_apidocs/execution#dagster.reconstructable"\n        )\n\n    check.inst_param(job, "job", IJob)\n    check.inst_param(dagster_run, "dagster_run", DagsterRun)\n    check.inst_param(instance, "instance", DagsterInstance)\n\n    if dagster_run.status == DagsterRunStatus.CANCELED:\n        message = "Not starting execution since the run was canceled before execution could start"\n        instance.report_engine_event(\n            message,\n            dagster_run,\n        )\n        raise DagsterInvariantViolationError(message)\n\n    check.invariant(\n        dagster_run.status == DagsterRunStatus.NOT_STARTED\n        or dagster_run.status == DagsterRunStatus.STARTING,\n        desc="Run {} ({}) in state {}, expected NOT_STARTED or STARTING".format(\n            dagster_run.job_name, dagster_run.run_id, dagster_run.status\n        ),\n    )\n    if (\n        dagster_run.resolved_op_selection\n        or dagster_run.asset_selection\n        or dagster_run.asset_check_selection\n    ):\n        # when `execute_run` is directly called, the sub job hasn't been created\n        # note that when we receive the solids to execute via DagsterRun, it won't support\n        # solid selection query syntax\n        job = job.get_subset(\n            op_selection=(\n                list(dagster_run.resolved_op_selection)\n                if dagster_run.resolved_op_selection\n                else None\n            ),\n            asset_selection=dagster_run.asset_selection,\n            asset_check_selection=dagster_run.asset_check_selection,\n        )\n\n    execution_plan = _get_execution_plan_from_run(job, dagster_run, instance)\n    if isinstance(job, ReconstructableJob):\n        job = job.with_repository_load_data(execution_plan.repository_load_data)\n\n    output_capture: Optional[Dict[StepOutputHandle, Any]] = {}\n\n    _execute_run_iterable = ExecuteRunWithPlanIterable(\n        execution_plan=execution_plan,\n        iterator=job_execution_iterator,\n        execution_context_manager=PlanOrchestrationContextManager(\n            context_event_generator=orchestration_context_event_generator,\n            job=job,\n            execution_plan=execution_plan,\n            dagster_run=dagster_run,\n            instance=instance,\n            run_config=dagster_run.run_config,\n            raise_on_error=raise_on_error,\n            executor_defs=None,\n            output_capture=output_capture,\n        ),\n    )\n    event_list = list(_execute_run_iterable)\n\n    # We need to reload the run object after execution for it to be accurate\n    reloaded_dagster_run = check.not_none(instance.get_run_by_id(dagster_run.run_id))\n\n    return JobExecutionResult(\n        job.get_definition(),\n        scoped_job_context(\n            execution_plan,\n            job,\n            reloaded_dagster_run.run_config,\n            reloaded_dagster_run,\n            instance,\n        ),\n        event_list,\n        reloaded_dagster_run,\n    )\n\n\n@contextmanager\ndef ephemeral_instance_if_missing(\n    instance: Optional[DagsterInstance],\n) -> Iterator[DagsterInstance]:\n    if instance:\n        yield instance\n    else:\n        with DagsterInstance.ephemeral() as ephemeral_instance:\n            yield ephemeral_instance\n\n\n
[docs]class ReexecutionOptions(NamedTuple):\n """Reexecution options for python-based execution in Dagster.\n\n Args:\n parent_run_id (str): The run_id of the run to reexecute.\n step_selection (Sequence[str]):\n The list of step selections to reexecute. Must be a subset or match of the\n set of steps executed in the original run. For example:\n\n - ``['some_op']``: selects ``some_op`` itself.\n - ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n - ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n - ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n """\n\n parent_run_id: str\n step_selection: Sequence[str] = []\n\n @staticmethod\n def from_failure(run_id: str, instance: DagsterInstance) -> "ReexecutionOptions":\n """Creates reexecution options from a failed run.\n\n Args:\n run_id (str): The run_id of the failed run. Run must fail in order to be reexecuted.\n instance (DagsterInstance): The DagsterInstance that the original run occurred in.\n\n Returns:\n ReexecutionOptions: Reexecution options to pass to a python execution.\n """\n from dagster._core.execution.plan.state import KnownExecutionState\n\n parent_run = check.not_none(instance.get_run_by_id(run_id))\n check.invariant(\n parent_run.status == DagsterRunStatus.FAILURE,\n "Cannot reexecute from failure a run that is not failed",\n )\n # Tried to thread through KnownExecutionState to execution plan creation, but little benefit.\n # It is recalculated later by the re-execution machinery.\n step_keys_to_execute, _ = KnownExecutionState.build_resume_retry_reexecution(\n instance, parent_run=cast(DagsterRun, instance.get_run_by_id(run_id))\n )\n return ReexecutionOptions(parent_run_id=run_id, step_selection=step_keys_to_execute)
\n\n\n
[docs]def execute_job(\n job: ReconstructableJob,\n instance: "DagsterInstance",\n run_config: Any = None,\n tags: Optional[Mapping[str, Any]] = None,\n raise_on_error: bool = False,\n op_selection: Optional[Sequence[str]] = None,\n reexecution_options: Optional[ReexecutionOptions] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n) -> JobExecutionResult:\n """Execute a job synchronously.\n\n This API represents dagster's python entrypoint for out-of-process\n execution. For most testing purposes, :py:meth:`~dagster.JobDefinition.\n execute_in_process` will be more suitable, but when wanting to run\n execution using an out-of-process executor (such as :py:class:`dagster.\n multiprocess_executor`), then `execute_job` is suitable.\n\n `execute_job` expects a persistent :py:class:`DagsterInstance` for\n execution, meaning the `$DAGSTER_HOME` environment variable must be set.\n It also expects a reconstructable pointer to a :py:class:`JobDefinition` so\n that it can be reconstructed in separate processes. This can be done by\n wrapping the ``JobDefinition`` in a call to :py:func:`dagster.\n reconstructable`.\n\n .. code-block:: python\n\n from dagster import DagsterInstance, execute_job, job, reconstructable\n\n @job\n def the_job():\n ...\n\n instance = DagsterInstance.get()\n result = execute_job(reconstructable(the_job), instance=instance)\n assert result.success\n\n\n If using the :py:meth:`~dagster.GraphDefinition.to_job` method to\n construct the ``JobDefinition``, then the invocation must be wrapped in a\n module-scope function, which can be passed to ``reconstructable``.\n\n .. code-block:: python\n\n from dagster import graph, reconstructable\n\n @graph\n def the_graph():\n ...\n\n def define_job():\n return the_graph.to_job(...)\n\n result = execute_job(reconstructable(define_job), ...)\n\n Since `execute_job` is potentially executing outside of the current\n process, output objects need to be retrieved by use of the provided job's\n io managers. Output objects can be retrieved by opening the result of\n `execute_job` as a context manager.\n\n .. code-block:: python\n\n from dagster import execute_job\n\n with execute_job(...) as result:\n output_obj = result.output_for_node("some_op")\n\n ``execute_job`` can also be used to reexecute a run, by providing a :py:class:`ReexecutionOptions` object.\n\n .. code-block:: python\n\n from dagster import ReexecutionOptions, execute_job\n\n instance = DagsterInstance.get()\n\n options = ReexecutionOptions.from_failure(run_id=failed_run_id, instance)\n execute_job(reconstructable(job), instance, reexecution_options=options)\n\n Parameters:\n job (ReconstructableJob): A reconstructable pointer to a :py:class:`JobDefinition`.\n instance (DagsterInstance): The instance to execute against.\n run_config (Optional[dict]): The configuration that parametrizes this run, as a dict.\n tags (Optional[Dict[str, Any]]): Arbitrary key-value pairs that will be added to run logs.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``False``.\n op_selection (Optional[List[str]]): A list of op selection queries (including single\n op names) to execute. For example:\n\n - ``['some_op']``: selects ``some_op`` itself.\n - ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n - ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n - ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n reexecution_options (Optional[ReexecutionOptions]):\n Reexecution options to provide to the run, if this run is\n intended to be a reexecution of a previous run. Cannot be used in\n tandem with the ``op_selection`` argument.\n\n Returns:\n :py:class:`JobExecutionResult`: The result of job execution.\n """\n check.inst_param(job, "job", ReconstructableJob)\n check.inst_param(instance, "instance", DagsterInstance)\n check.opt_sequence_param(asset_selection, "asset_selection", of_type=AssetKey)\n\n # get the repository load data here because we call job.get_definition() later in this fn\n job_def, _ = _job_with_repository_load_data(job)\n\n if reexecution_options is not None and op_selection is not None:\n raise DagsterInvariantViolationError(\n "re-execution and op selection cannot be used together at this time."\n )\n\n if reexecution_options:\n if run_config is None:\n run = check.not_none(instance.get_run_by_id(reexecution_options.parent_run_id))\n run_config = run.run_config\n return _reexecute_job(\n job_arg=job_def,\n parent_run_id=reexecution_options.parent_run_id,\n run_config=run_config,\n step_selection=list(reexecution_options.step_selection),\n tags=tags,\n instance=instance,\n raise_on_error=raise_on_error,\n )\n else:\n return _logged_execute_job(\n job_arg=job_def,\n instance=instance,\n run_config=run_config,\n tags=tags,\n op_selection=op_selection,\n raise_on_error=raise_on_error,\n asset_selection=asset_selection,\n )
\n\n\n@telemetry_wrapper\ndef _logged_execute_job(\n job_arg: Union[IJob, JobDefinition],\n instance: DagsterInstance,\n run_config: Optional[Mapping[str, object]] = None,\n tags: Optional[Mapping[str, str]] = None,\n op_selection: Optional[Sequence[str]] = None,\n raise_on_error: bool = True,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n) -> JobExecutionResult:\n check.inst_param(instance, "instance", DagsterInstance)\n\n job_arg, repository_load_data = _job_with_repository_load_data(job_arg)\n\n (\n job_arg,\n run_config,\n tags,\n resolved_op_selection,\n op_selection,\n ) = _check_execute_job_args(\n job_arg=job_arg,\n run_config=run_config,\n tags=tags,\n op_selection=op_selection,\n )\n\n log_repo_stats(instance=instance, job=job_arg, source="execute_pipeline")\n\n dagster_run = instance.create_run_for_job(\n job_def=job_arg.get_definition(),\n run_config=run_config,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n tags=tags,\n job_code_origin=(\n job_arg.get_python_origin() if isinstance(job_arg, ReconstructableJob) else None\n ),\n repository_load_data=repository_load_data,\n asset_selection=frozenset(asset_selection) if asset_selection else None,\n )\n\n return execute_run(\n job_arg,\n dagster_run,\n instance,\n raise_on_error=raise_on_error,\n )\n\n\ndef _reexecute_job(\n job_arg: Union[IJob, JobDefinition],\n parent_run_id: str,\n run_config: Optional[Mapping[str, object]] = None,\n step_selection: Optional[Sequence[str]] = None,\n tags: Optional[Mapping[str, str]] = None,\n instance: Optional[DagsterInstance] = None,\n raise_on_error: bool = True,\n) -> JobExecutionResult:\n """Reexecute an existing job run."""\n check.opt_sequence_param(step_selection, "step_selection", of_type=str)\n\n check.str_param(parent_run_id, "parent_run_id")\n\n with ephemeral_instance_if_missing(instance) as execute_instance:\n job_arg, repository_load_data = _job_with_repository_load_data(job_arg)\n\n (job_arg, run_config, tags, _, _) = _check_execute_job_args(\n job_arg=job_arg,\n run_config=run_config,\n tags=tags,\n )\n\n parent_dagster_run = execute_instance.get_run_by_id(parent_run_id)\n if parent_dagster_run is None:\n check.failed(\n f"No parent run with id {parent_run_id} found in instance.",\n )\n\n execution_plan: Optional[ExecutionPlan] = None\n # resolve step selection DSL queries using parent execution information\n if step_selection:\n execution_plan = _resolve_reexecute_step_selection(\n execute_instance,\n job_arg,\n run_config,\n cast(DagsterRun, parent_dagster_run),\n step_selection,\n )\n\n if parent_dagster_run.asset_selection:\n job_arg = job_arg.get_subset(\n op_selection=None, asset_selection=parent_dagster_run.asset_selection\n )\n\n dagster_run = execute_instance.create_run_for_job(\n job_def=job_arg.get_definition(),\n execution_plan=execution_plan,\n run_config=run_config,\n tags=tags,\n op_selection=parent_dagster_run.op_selection,\n asset_selection=parent_dagster_run.asset_selection,\n resolved_op_selection=parent_dagster_run.resolved_op_selection,\n root_run_id=parent_dagster_run.root_run_id or parent_dagster_run.run_id,\n parent_run_id=parent_dagster_run.run_id,\n job_code_origin=(\n job_arg.get_python_origin() if isinstance(job_arg, ReconstructableJob) else None\n ),\n repository_load_data=repository_load_data,\n )\n\n return execute_run(\n job_arg,\n dagster_run,\n execute_instance,\n raise_on_error=raise_on_error,\n )\n check.failed("Should not reach here.")\n\n\ndef execute_plan_iterator(\n execution_plan: ExecutionPlan,\n job: IJob,\n dagster_run: DagsterRun,\n instance: DagsterInstance,\n retry_mode: Optional[RetryMode] = None,\n run_config: Optional[Mapping[str, object]] = None,\n) -> Iterator[DagsterEvent]:\n check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n check.inst_param(job, "job", IJob)\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.inst_param(instance, "instance", DagsterInstance)\n retry_mode = check.opt_inst_param(retry_mode, "retry_mode", RetryMode, RetryMode.DISABLED)\n run_config = check.opt_mapping_param(run_config, "run_config")\n\n if isinstance(job, ReconstructableJob):\n job = job.with_repository_load_data(execution_plan.repository_load_data)\n\n return iter(\n ExecuteRunWithPlanIterable(\n execution_plan=execution_plan,\n iterator=inner_plan_execution_iterator,\n execution_context_manager=PlanExecutionContextManager(\n job=job,\n retry_mode=retry_mode,\n execution_plan=execution_plan,\n run_config=run_config,\n dagster_run=dagster_run,\n instance=instance,\n ),\n )\n )\n\n\ndef execute_plan(\n execution_plan: ExecutionPlan,\n job: IJob,\n instance: DagsterInstance,\n dagster_run: DagsterRun,\n run_config: Optional[Mapping[str, object]] = None,\n retry_mode: Optional[RetryMode] = None,\n) -> Sequence[DagsterEvent]:\n """This is the entry point of dagster-graphql executions. For the dagster CLI entry point, see\n execute_job() above.\n """\n check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n check.inst_param(job, "job", IJob)\n check.inst_param(instance, "instance", DagsterInstance)\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n run_config = check.opt_mapping_param(run_config, "run_config")\n check.opt_inst_param(retry_mode, "retry_mode", RetryMode)\n\n return list(\n execute_plan_iterator(\n execution_plan=execution_plan,\n job=job,\n run_config=run_config,\n dagster_run=dagster_run,\n instance=instance,\n retry_mode=retry_mode,\n )\n )\n\n\ndef _get_execution_plan_from_run(\n job: IJob,\n dagster_run: DagsterRun,\n instance: DagsterInstance,\n) -> ExecutionPlan:\n execution_plan_snapshot = (\n instance.get_execution_plan_snapshot(dagster_run.execution_plan_snapshot_id)\n if dagster_run.execution_plan_snapshot_id\n else None\n )\n\n # Rebuild from snapshot if able and selection has not changed\n if (\n execution_plan_snapshot is not None\n and execution_plan_snapshot.can_reconstruct_plan\n and job.resolved_op_selection == dagster_run.resolved_op_selection\n and job.asset_selection == dagster_run.asset_selection\n and job.asset_check_selection == dagster_run.asset_check_selection\n ):\n return ExecutionPlan.rebuild_from_snapshot(\n dagster_run.job_name,\n execution_plan_snapshot,\n )\n\n return create_execution_plan(\n job,\n run_config=dagster_run.run_config,\n step_keys_to_execute=dagster_run.step_keys_to_execute,\n instance_ref=instance.get_ref() if instance.is_persistent else None,\n repository_load_data=(\n execution_plan_snapshot.repository_load_data if execution_plan_snapshot else None\n ),\n known_state=(\n execution_plan_snapshot.initial_known_state if execution_plan_snapshot else None\n ),\n )\n\n\ndef create_execution_plan(\n job: Union[IJob, JobDefinition],\n run_config: Optional[Mapping[str, object]] = None,\n step_keys_to_execute: Optional[Sequence[str]] = None,\n known_state: Optional[KnownExecutionState] = None,\n instance_ref: Optional[InstanceRef] = None,\n tags: Optional[Mapping[str, str]] = None,\n repository_load_data: Optional[RepositoryLoadData] = None,\n) -> ExecutionPlan:\n if isinstance(job, IJob):\n # If you have repository_load_data, make sure to use it when building plan\n if isinstance(job, ReconstructableJob) and repository_load_data is not None:\n job = job.with_repository_load_data(repository_load_data)\n job_def = job.get_definition()\n else:\n job_def = job\n\n run_config = check.opt_mapping_param(run_config, "run_config", key_type=str)\n check.opt_nullable_sequence_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)\n check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)\n tags = check.opt_mapping_param(tags, "tags", key_type=str, value_type=str)\n known_state = check.opt_inst_param(\n known_state,\n "known_state",\n KnownExecutionState,\n default=KnownExecutionState(),\n )\n repository_load_data = check.opt_inst_param(\n repository_load_data, "repository_load_data", RepositoryLoadData\n )\n\n resolved_run_config = ResolvedRunConfig.build(job_def, run_config)\n\n return ExecutionPlan.build(\n job_def,\n resolved_run_config,\n step_keys_to_execute=step_keys_to_execute,\n known_state=known_state,\n instance_ref=instance_ref,\n tags=tags,\n repository_load_data=repository_load_data,\n )\n\n\ndef job_execution_iterator(\n job_context: PlanOrchestrationContext, execution_plan: ExecutionPlan\n) -> Iterator[DagsterEvent]:\n """A complete execution of a pipeline. Yields pipeline start, success,\n and failure events.\n\n Args:\n pipeline_context (PlanOrchestrationContext):\n execution_plan (ExecutionPlan):\n """\n # TODO: restart event?\n if not job_context.resume_from_failure:\n yield DagsterEvent.job_start(job_context)\n\n job_exception_info = None\n job_canceled_info = None\n failed_steps = []\n generator_closed = False\n try:\n for event in job_context.executor.execute(job_context, execution_plan):\n if event.is_step_failure:\n failed_steps.append(event.step_key)\n elif event.is_resource_init_failure and event.step_key:\n failed_steps.append(event.step_key)\n\n # Telemetry\n log_dagster_event(event, job_context)\n\n yield event\n except GeneratorExit:\n # Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed\n # (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/).\n generator_closed = True\n job_exception_info = serializable_error_info_from_exc_info(sys.exc_info())\n if job_context.raise_on_error:\n raise\n except (KeyboardInterrupt, DagsterExecutionInterruptedError):\n job_canceled_info = serializable_error_info_from_exc_info(sys.exc_info())\n if job_context.raise_on_error:\n raise\n except BaseException:\n job_exception_info = serializable_error_info_from_exc_info(sys.exc_info())\n if job_context.raise_on_error:\n raise # finally block will run before this is re-raised\n finally:\n if job_canceled_info:\n reloaded_run = job_context.instance.get_run_by_id(job_context.run_id)\n if reloaded_run and reloaded_run.status == DagsterRunStatus.CANCELING:\n event = DagsterEvent.job_canceled(job_context, job_canceled_info)\n elif reloaded_run and reloaded_run.status == DagsterRunStatus.CANCELED:\n # This happens if the run was force-terminated but was still able to send\n # a cancellation request\n event = DagsterEvent.engine_event(\n job_context,\n "Computational resources were cleaned up after the run was forcibly marked"\n " as canceled.",\n EngineEventData(),\n )\n elif job_context.instance.run_will_resume(job_context.run_id):\n event = DagsterEvent.engine_event(\n job_context,\n "Execution was interrupted unexpectedly. No user initiated termination"\n " request was found, not treating as failure because run will be resumed.",\n EngineEventData(),\n )\n elif reloaded_run and reloaded_run.status == DagsterRunStatus.FAILURE:\n event = DagsterEvent.engine_event(\n job_context,\n "Execution was interrupted for a run that was already in a failure state.",\n EngineEventData(),\n )\n else:\n event = DagsterEvent.job_failure(\n job_context,\n "Execution was interrupted unexpectedly. "\n "No user initiated termination request was found, treating as failure.",\n job_canceled_info,\n )\n elif job_exception_info:\n event = DagsterEvent.job_failure(\n job_context,\n "An exception was thrown during execution.",\n job_exception_info,\n )\n elif failed_steps:\n event = DagsterEvent.job_failure(\n job_context,\n f"Steps failed: {failed_steps}.",\n )\n else:\n event = DagsterEvent.job_success(job_context)\n if not generator_closed:\n yield event\n\n\nclass ExecuteRunWithPlanIterable:\n """Utility class to consolidate execution logic.\n\n This is a class and not a function because, e.g., in constructing a `scoped_pipeline_context`\n for `JobExecutionResult`, we need to pull out the `pipeline_context` after we're done\n yielding events. This broadly follows a pattern we make use of in other places,\n cf. `dagster._utils.EventGenerationManager`.\n """\n\n def __init__(\n self,\n execution_plan: ExecutionPlan,\n iterator: Callable[..., Iterator[DagsterEvent]],\n execution_context_manager: ExecutionContextManager[Any],\n ):\n self.execution_plan = check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n self.iterator = check.callable_param(iterator, "iterator")\n self.execution_context_manager = check.inst_param(\n execution_context_manager, "execution_context_manager", ExecutionContextManager\n )\n\n self.job_context = None\n\n def __iter__(self) -> Iterator[DagsterEvent]:\n # Since interrupts can't be raised at arbitrary points safely, delay them until designated\n # checkpoints during the execution.\n # To be maximally certain that interrupts are always caught during an execution process,\n # you can safely add an additional `with capture_interrupts()` at the very beginning of the\n # process that performs the execution.\n with capture_interrupts():\n yield from self.execution_context_manager.prepare_context()\n self.job_context = self.execution_context_manager.get_context()\n generator_closed = False\n try:\n if self.job_context: # False if we had a pipeline init failure\n yield from self.iterator(\n execution_plan=self.execution_plan,\n job_context=self.job_context,\n )\n except GeneratorExit:\n # Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed\n # (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/).\n generator_closed = True\n raise\n finally:\n for event in self.execution_context_manager.shutdown_context():\n if not generator_closed:\n yield event\n\n\ndef _check_execute_job_args(\n job_arg: Union[JobDefinition, IJob],\n run_config: Optional[Mapping[str, object]],\n tags: Optional[Mapping[str, str]],\n op_selection: Optional[Sequence[str]] = None,\n) -> Tuple[\n IJob,\n Optional[Mapping],\n Mapping[str, str],\n Optional[AbstractSet[str]],\n Optional[Sequence[str]],\n]:\n ijob = InMemoryJob(job_arg) if isinstance(job_arg, JobDefinition) else job_arg\n job_def = job_arg if isinstance(job_arg, JobDefinition) else job_arg.get_definition()\n\n run_config = check.opt_mapping_param(run_config, "run_config")\n\n tags = check.opt_mapping_param(tags, "tags", key_type=str)\n check.opt_sequence_param(op_selection, "op_selection", of_type=str)\n\n tags = merge_dicts(job_def.tags, tags)\n\n # generate job subset from the given op_selection\n if op_selection:\n ijob = ijob.get_subset(op_selection=op_selection)\n\n return (\n ijob,\n run_config,\n tags,\n ijob.resolved_op_selection,\n op_selection,\n )\n\n\ndef _resolve_reexecute_step_selection(\n instance: DagsterInstance,\n job: IJob,\n run_config: Optional[Mapping],\n parent_dagster_run: DagsterRun,\n step_selection: Sequence[str],\n) -> ExecutionPlan:\n if parent_dagster_run.op_selection:\n job = job.get_subset(op_selection=parent_dagster_run.op_selection)\n\n state = KnownExecutionState.build_for_reexecution(instance, parent_dagster_run)\n\n parent_plan = create_execution_plan(\n job,\n parent_dagster_run.run_config,\n known_state=state,\n )\n step_keys_to_execute = parse_step_selection(parent_plan.get_all_step_deps(), step_selection)\n execution_plan = create_execution_plan(\n job,\n run_config,\n step_keys_to_execute=list(step_keys_to_execute),\n known_state=state.update_for_step_selection(step_keys_to_execute),\n tags=parent_dagster_run.tags,\n )\n return execution_plan\n\n\ndef _job_with_repository_load_data(\n job_arg: Union[JobDefinition, IJob],\n) -> Tuple[Union[JobDefinition, IJob], Optional[RepositoryLoadData]]:\n """For ReconstructableJob, generate and return any required RepositoryLoadData, alongside\n a ReconstructableJob with this repository load data baked in.\n """\n if isinstance(job_arg, ReconstructableJob):\n # Unless this ReconstructableJob alread has repository_load_data attached, this will\n # force the repository_load_data to be computed from scratch.\n repository_load_data = job_arg.repository.get_definition().repository_load_data\n return job_arg.with_repository_load_data(repository_load_data), repository_load_data\n return job_arg, None\n
", "current_page_name": "_modules/dagster/_core/execution/api", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.api"}, "build_resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.build_resources

\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Generator, Mapping, Optional, cast\n\nimport dagster._check as check\nfrom dagster._config import process_config\nfrom dagster._core.definitions.resource_definition import (\n    ResourceDefinition,\n    Resources,\n    ScopedResourcesBuilder,\n)\nfrom dagster._core.definitions.run_config import define_resource_dictionary_cls\nfrom dagster._core.errors import DagsterInvalidConfigError\nfrom dagster._core.execution.resources_init import resource_initialization_manager\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.io_manager import IOManager, IOManagerDefinition\nfrom dagster._core.system_config.objects import ResourceConfig, config_map_resources\n\nfrom .api import ephemeral_instance_if_missing\nfrom .context_creation_job import initialize_console_manager\n\n\ndef get_mapped_resource_config(\n    resource_defs: Mapping[str, ResourceDefinition], resource_config: Mapping[str, Any]\n) -> Mapping[str, ResourceConfig]:\n    resource_config_schema = define_resource_dictionary_cls(\n        resource_defs, set(resource_defs.keys())\n    )\n    config_evr = process_config(resource_config_schema, resource_config)\n    if not config_evr.success:\n        raise DagsterInvalidConfigError(\n            "Error in config for resources ",\n            config_evr.errors,\n            resource_config,\n        )\n    config_value = cast(Dict[str, Any], config_evr.value)\n    return config_map_resources(resource_defs, config_value)\n\n\n
[docs]@contextmanager\ndef build_resources(\n resources: Mapping[str, Any],\n instance: Optional[DagsterInstance] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n dagster_run: Optional[DagsterRun] = None,\n log_manager: Optional[DagsterLogManager] = None,\n) -> Generator[Resources, None, None]:\n """Context manager that yields resources using provided resource definitions and run config.\n\n This API allows for using resources in an independent context. Resources will be initialized\n with the provided run config, and optionally, dagster_run. The resulting resources will be\n yielded on a dictionary keyed identically to that provided for `resource_defs`. Upon exiting the\n context, resources will also be torn down safely.\n\n Args:\n resources (Mapping[str, Any]): Resource instances or definitions to build. All\n required resource dependencies to a given resource must be contained within this\n dictionary, or the resource build will fail.\n instance (Optional[DagsterInstance]): The dagster instance configured to instantiate\n resources on.\n resource_config (Optional[Mapping[str, Any]]): A dict representing the config to be\n provided to each resource during initialization and teardown.\n dagster_run (Optional[PipelineRun]): The pipeline run to provide during resource\n initialization and teardown. If the provided resources require either the `dagster_run`\n or `run_id` attributes of the provided context during resource initialization and/or\n teardown, this must be provided, or initialization will fail.\n log_manager (Optional[DagsterLogManager]): Log Manager to use during resource\n initialization. Defaults to system log manager.\n\n Examples:\n .. code-block:: python\n\n from dagster import resource, build_resources\n\n @resource\n def the_resource():\n return "foo"\n\n with build_resources(resources={"from_def": the_resource, "from_val": "bar"}) as resources:\n assert resources.from_def == "foo"\n assert resources.from_val == "bar"\n\n """\n resources = check.mapping_param(resources, "resource_defs", key_type=str)\n instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n resource_config = check.opt_mapping_param(resource_config, "resource_config", key_type=str)\n log_manager = check.opt_inst_param(log_manager, "log_manager", DagsterLogManager)\n resource_defs = wrap_resources_for_execution(resources)\n mapped_resource_config = get_mapped_resource_config(resource_defs, resource_config)\n\n with ephemeral_instance_if_missing(instance) as dagster_instance:\n resources_manager = resource_initialization_manager(\n resource_defs=resource_defs,\n resource_configs=mapped_resource_config,\n log_manager=log_manager if log_manager else initialize_console_manager(dagster_run),\n execution_plan=None,\n dagster_run=dagster_run,\n resource_keys_to_init=set(resource_defs.keys()),\n instance=dagster_instance,\n emit_persistent_events=False,\n )\n try:\n list(resources_manager.generate_setup_events())\n instantiated_resources = check.inst(\n resources_manager.get_object(), ScopedResourcesBuilder\n )\n yield instantiated_resources.build(\n set(instantiated_resources.resource_instance_dict.keys())\n )\n finally:\n list(resources_manager.generate_teardown_events())
\n\n\ndef wrap_resources_for_execution(\n resources: Optional[Mapping[str, Any]] = None\n) -> Dict[str, ResourceDefinition]:\n return (\n {\n resource_key: wrap_resource_for_execution(resource)\n for resource_key, resource in resources.items()\n }\n if resources\n else {}\n )\n\n\ndef wrap_resource_for_execution(resource: Any) -> ResourceDefinition:\n from dagster._config.pythonic_config import ConfigurableResourceFactory, PartialResource\n\n # Wrap instantiated resource values in a resource definition.\n # If an instantiated IO manager is provided, wrap it in an IO manager definition.\n if isinstance(resource, (ConfigurableResourceFactory, PartialResource)):\n return resource.get_resource_definition()\n elif isinstance(resource, ResourceDefinition):\n return resource\n elif isinstance(resource, IOManager):\n return IOManagerDefinition.hardcoded_io_manager(resource)\n else:\n return ResourceDefinition.hardcoded_resource(resource)\n
", "current_page_name": "_modules/dagster/_core/execution/build_resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.build_resources"}, "context": {"compute": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.compute

\nfrom abc import ABC, ABCMeta, abstractmethod\nfrom inspect import _empty as EmptyAnnotation\nfrom typing import (\n    AbstractSet,\n    Any,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, experimental, public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey, AssetCheckSpec\nfrom dagster._core.definitions.asset_checks import AssetChecksDefinition\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.data_version import (\n    DataProvenance,\n    DataVersion,\n    extract_data_provenance_from_entry,\n)\nfrom dagster._core.definitions.decorators.op_decorator import DecoratedOpFunction\nfrom dagster._core.definitions.dependency import Node, NodeHandle\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    UserEvent,\n)\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidPropertyError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._utils.forked_pdb import ForkedPdb\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n)\n\nfrom .system import StepExecutionContext\n\n\n# This metaclass has to exist for OpExecutionContext to have a metaclass\nclass AbstractComputeMetaclass(ABCMeta):\n    pass\n\n\nclass AbstractComputeExecutionContext(ABC, metaclass=AbstractComputeMetaclass):\n    """Base class for op context implemented by OpExecutionContext and DagstermillExecutionContext."""\n\n    @abstractmethod\n    def has_tag(self, key: str) -> bool:\n        """Implement this method to check if a logging tag is set."""\n\n    @abstractmethod\n    def get_tag(self, key: str) -> Optional[str]:\n        """Implement this method to get a logging tag."""\n\n    @property\n    @abstractmethod\n    def run_id(self) -> str:\n        """The run id for the context."""\n\n    @property\n    @abstractmethod\n    def op_def(self) -> OpDefinition:\n        """The op definition corresponding to the execution step being executed."""\n\n    @property\n    @abstractmethod\n    def job_def(self) -> JobDefinition:\n        """The job being executed."""\n\n    @property\n    @abstractmethod\n    def run(self) -> DagsterRun:\n        """The DagsterRun object corresponding to the execution."""\n\n    @property\n    @abstractmethod\n    def resources(self) -> Any:\n        """Resources available in the execution context."""\n\n    @property\n    @abstractmethod\n    def log(self) -> DagsterLogManager:\n        """The log manager available in the execution context."""\n\n    @property\n    @abstractmethod\n    def op_config(self) -> Any:\n        """The parsed config specific to this op."""\n\n\nclass OpExecutionContextMetaClass(AbstractComputeMetaclass):\n    def __instancecheck__(cls, instance) -> bool:\n        # This makes isinstance(context, OpExecutionContext) throw a deprecation warning when\n        # context is an AssetExecutionContext. This metaclass can be deleted once AssetExecutionContext\n        # has been split into it's own class in 1.7.0\n        if type(instance) is AssetExecutionContext and cls is not AssetExecutionContext:\n            deprecation_warning(\n                subject="AssetExecutionContext",\n                additional_warn_text=(\n                    "Starting in version 1.7.0 AssetExecutionContext will no longer be a subclass"\n                    " of OpExecutionContext."\n                ),\n                breaking_version="1.7.0",\n                stacklevel=1,\n            )\n        return super().__instancecheck__(instance)\n\n\n
[docs]class OpExecutionContext(AbstractComputeExecutionContext, metaclass=OpExecutionContextMetaClass):\n """The ``context`` object that can be made available as the first argument to the function\n used for computing an op or asset.\n\n This context object provides system information such as resources, config, and logging.\n\n To construct an execution context for testing purposes, use :py:func:`dagster.build_op_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import op, OpExecutionContext\n\n @op\n def hello_world(context: OpExecutionContext):\n context.log.info("Hello, world!")\n """\n\n __slots__ = ["_step_execution_context"]\n\n def __init__(self, step_execution_context: StepExecutionContext):\n self._step_execution_context = check.inst_param(\n step_execution_context,\n "step_execution_context",\n StepExecutionContext,\n )\n self._pdb: Optional[ForkedPdb] = None\n self._events: List[DagsterEvent] = []\n self._output_metadata: Dict[str, Any] = {}\n\n @public\n @property\n def op_config(self) -> Any:\n """Any: The parsed config specific to this op."""\n return self._step_execution_context.op_config\n\n @property\n def dagster_run(self) -> DagsterRun:\n """PipelineRun: The current pipeline run."""\n return self._step_execution_context.dagster_run\n\n @property\n def run(self) -> DagsterRun:\n """DagsterRun: The current run."""\n return self.dagster_run\n\n @public\n @property\n def instance(self) -> DagsterInstance:\n """DagsterInstance: The current Dagster instance."""\n return self._step_execution_context.instance\n\n @public\n @property\n def pdb(self) -> ForkedPdb:\n """dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the op.\n\n Example:\n .. code-block:: python\n\n @op\n def debug(context):\n context.pdb.set_trace()\n """\n if self._pdb is None:\n self._pdb = ForkedPdb()\n\n return self._pdb\n\n @property\n def file_manager(self):\n """Deprecated access to the file manager.\n\n :meta private:\n """\n raise DagsterInvalidPropertyError(\n "You have attempted to access the file manager which has been moved to resources in"\n " 0.10.0. Please access it via `context.resources.file_manager` instead."\n )\n\n @public\n @property\n def resources(self) -> Any:\n """Resources: The currently available resources."""\n return self._step_execution_context.resources\n\n @property\n def step_launcher(self) -> Optional[StepLauncher]:\n """Optional[StepLauncher]: The current step launcher, if any."""\n return self._step_execution_context.step_launcher\n\n @public\n @property\n def run_id(self) -> str:\n """str: The id of the current execution's run."""\n return self._step_execution_context.run_id\n\n @public\n @property\n def run_config(self) -> Mapping[str, object]:\n """dict: The run config for the current execution."""\n return self._step_execution_context.run_config\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """JobDefinition: The currently executing pipeline."""\n return self._step_execution_context.job_def\n\n @public\n @property\n def job_name(self) -> str:\n """str: The name of the currently executing pipeline."""\n return self._step_execution_context.job_name\n\n @public\n @property\n def log(self) -> DagsterLogManager:\n """DagsterLogManager: The log manager available in the execution context."""\n return self._step_execution_context.log\n\n @property\n def node_handle(self) -> NodeHandle:\n """NodeHandle: The current op's handle.\n\n :meta private:\n """\n return self._step_execution_context.node_handle\n\n @property\n def op_handle(self) -> NodeHandle:\n """NodeHandle: The current op's handle.\n\n :meta private:\n """\n return self.node_handle\n\n @property\n def op(self) -> Node:\n """Node: The object representing the invoked op within the graph.\n\n :meta private:\n\n """\n return self._step_execution_context.job_def.get_node(self.node_handle)\n\n @public\n @property\n def op_def(self) -> OpDefinition:\n """OpDefinition: The current op definition."""\n return cast(OpDefinition, self.op.definition)\n\n @public\n @property\n def has_partition_key(self) -> bool:\n """Whether the current run is a partitioned run."""\n return self._step_execution_context.has_partition_key\n\n @public\n @property\n def partition_key(self) -> str:\n """The partition key for the current run.\n\n Raises an error if the current run is not a partitioned run. Or if the current run is operating\n over a range of partitions (ie. a backfill of several partitions executed in a single run).\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def my_asset(context: AssetExecutionContext):\n context.log.info(context.partition_key)\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n """\n return self._step_execution_context.partition_key\n\n @deprecated(breaking_version="2.0", additional_warn_text="Use `partition_key_range` instead.")\n @public\n @property\n def asset_partition_key_range(self) -> PartitionKeyRange:\n """The range of partition keys for the current run.\n\n If run is for a single partition key, return a `PartitionKeyRange` with the same start and\n end. Raises an error if the current run is not a partitioned run.\n """\n return self.partition_key_range\n\n @public\n @property\n def partition_key_range(self) -> PartitionKeyRange:\n """The range of partition keys for the current run.\n\n If run is for a single partition key, returns a `PartitionKeyRange` with the same start and\n end. Raises an error if the current run is not a partitioned run.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def my_asset(context: AssetExecutionContext):\n context.log.info(context.partition_key_range)\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n """\n return self._step_execution_context.asset_partition_key_range\n\n @public\n @property\n def partition_time_window(self) -> TimeWindow:\n """The partition time window for the current run.\n\n Raises an error if the current run is not a partitioned run, or if the job's partition\n definition is not a TimeWindowPartitionsDefinition.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def my_asset(context: AssetExecutionContext):\n context.log.info(context.partition_time_window)\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n """\n return self._step_execution_context.partition_time_window\n\n
[docs] @public\n def has_tag(self, key: str) -> bool:\n """Check if a logging tag is set.\n\n Args:\n key (str): The tag to check.\n\n Returns:\n bool: Whether the tag is set.\n """\n return self._step_execution_context.has_tag(key)
\n\n
[docs] @public\n def get_tag(self, key: str) -> Optional[str]:\n """Get a logging tag.\n\n Args:\n key (tag): The tag to get.\n\n Returns:\n Optional[str]: The value of the tag, if present.\n """\n return self._step_execution_context.get_tag(key)
\n\n @property\n def run_tags(self) -> Mapping[str, str]:\n """Mapping[str, str]: The tags for the current run."""\n return self._step_execution_context.run_tags\n\n def has_events(self) -> bool:\n return bool(self._events)\n\n def consume_events(self) -> Iterator[DagsterEvent]:\n """Pops and yields all user-generated events that have been recorded from this context.\n\n If consume_events has not yet been called, this will yield all logged events since the beginning of the op's computation. If consume_events has been called, it will yield all events since the last time consume_events was called. Designed for internal use. Users should never need to invoke this method.\n """\n events = self._events\n self._events = []\n yield from events\n\n
[docs] @public\n def log_event(self, event: UserEvent) -> None:\n """Log an AssetMaterialization, AssetObservation, or ExpectationResult from within the body of an op.\n\n Events logged with this method will appear in the list of DagsterEvents, as well as the event log.\n\n Args:\n event (Union[AssetMaterialization, AssetObservation, ExpectationResult]): The event to log.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import op, AssetMaterialization\n\n @op\n def log_materialization(context):\n context.log_event(AssetMaterialization("foo"))\n """\n if isinstance(event, AssetMaterialization):\n self._events.append(\n DagsterEvent.asset_materialization(self._step_execution_context, event)\n )\n elif isinstance(event, AssetObservation):\n self._events.append(DagsterEvent.asset_observation(self._step_execution_context, event))\n elif isinstance(event, ExpectationResult):\n self._events.append(\n DagsterEvent.step_expectation_result(self._step_execution_context, event)\n )\n else:\n check.failed(f"Unexpected event {event}")
\n\n
[docs] @public\n def add_output_metadata(\n self,\n metadata: Mapping[str, Any],\n output_name: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ) -> None:\n """Add metadata to one of the outputs of an op.\n\n This can be invoked multiple times per output in the body of an op. If the same key is\n passed multiple times, the value associated with the last call will be used.\n\n Args:\n metadata (Mapping[str, Any]): The metadata to attach to the output\n output_name (Optional[str]): The name of the output to attach metadata to. If there is only one output on the op, then this argument does not need to be provided. The metadata will automatically be attached to the only output.\n mapping_key (Optional[str]): The mapping key of the output to attach metadata to. If the\n output is not dynamic, this argument does not need to be provided.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import Out, op\n from typing import Tuple\n\n @op\n def add_metadata(context):\n context.add_output_metadata({"foo", "bar"})\n return 5 # Since the default output is called "result", metadata will be attached to the output "result".\n\n @op(out={"a": Out(), "b": Out()})\n def add_metadata_two_outputs(context) -> Tuple[str, int]:\n context.add_output_metadata({"foo": "bar"}, output_name="b")\n context.add_output_metadata({"baz": "bat"}, output_name="a")\n\n return ("dog", 5)\n\n """\n metadata = check.mapping_param(metadata, "metadata", key_type=str)\n output_name = check.opt_str_param(output_name, "output_name")\n mapping_key = check.opt_str_param(mapping_key, "mapping_key")\n\n self._step_execution_context.add_output_metadata(\n metadata=metadata, output_name=output_name, mapping_key=mapping_key\n )
\n\n def get_output_metadata(\n self, output_name: str, mapping_key: Optional[str] = None\n ) -> Optional[Mapping[str, Any]]:\n return self._step_execution_context.get_output_metadata(\n output_name=output_name, mapping_key=mapping_key\n )\n\n def get_step_execution_context(self) -> StepExecutionContext:\n """Allows advanced users (e.g. framework authors) to punch through to the underlying\n step execution context.\n\n :meta private:\n\n Returns:\n StepExecutionContext: The underlying system context.\n """\n return self._step_execution_context\n\n @public\n @property\n def retry_number(self) -> int:\n """Which retry attempt is currently executing i.e. 0 for initial attempt, 1 for first retry, etc."""\n return self._step_execution_context.previous_attempt_count\n\n def describe_op(self):\n return self._step_execution_context.describe_op()\n\n
[docs] @public\n def get_mapping_key(self) -> Optional[str]:\n """Which mapping_key this execution is for if downstream of a DynamicOutput, otherwise None."""\n return self._step_execution_context.step.get_mapping_key()
\n\n #############################################################################################\n # asset related methods\n #############################################################################################\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """The AssetKey for the current asset. In a multi_asset, use asset_key_for_output instead."""\n if self.has_assets_def and len(self.assets_def.keys_by_output_name.keys()) > 1:\n raise DagsterInvariantViolationError(\n "Cannot call `context.asset_key` in a multi_asset with more than one asset. Use"\n " `context.asset_key_for_output` instead."\n )\n return next(iter(self.assets_def.keys_by_output_name.values()))\n\n @public\n @property\n def has_assets_def(self) -> bool:\n """If there is a backing AssetsDefinition for what is currently executing."""\n assets_def = self.job_def.asset_layer.assets_def_for_node(self.node_handle)\n return assets_def is not None\n\n @public\n @property\n def assets_def(self) -> AssetsDefinition:\n """The backing AssetsDefinition for what is currently executing, errors if not available."""\n assets_def = self.job_def.asset_layer.assets_def_for_node(self.node_handle)\n if assets_def is None:\n raise DagsterInvalidPropertyError(\n f"Op '{self.op.name}' does not have an assets definition."\n )\n return assets_def\n\n @public\n @property\n def selected_asset_keys(self) -> AbstractSet[AssetKey]:\n """Get the set of AssetKeys this execution is expected to materialize."""\n if not self.has_assets_def:\n return set()\n return self.assets_def.keys\n\n @public\n @property\n def has_asset_checks_def(self) -> bool:\n """Return a boolean indicating the presence of a backing AssetChecksDefinition\n for the current execution.\n\n Returns:\n bool: True if there is a backing AssetChecksDefinition for the current execution, otherwise False.\n """\n return self.job_def.asset_layer.asset_checks_def_for_node(self.node_handle) is not None\n\n @public\n @property\n def asset_checks_def(self) -> AssetChecksDefinition:\n """The backing AssetChecksDefinition for what is currently executing, errors if not\n available.\n\n Returns:\n AssetChecksDefinition.\n """\n asset_checks_def = self.job_def.asset_layer.asset_checks_def_for_node(self.node_handle)\n if asset_checks_def is None:\n raise DagsterInvalidPropertyError(\n f"Op '{self.op.name}' does not have an asset checks definition."\n )\n\n return asset_checks_def\n\n @public\n @property\n def selected_asset_check_keys(self) -> AbstractSet[AssetCheckKey]:\n if self.has_assets_def:\n return self.assets_def.check_keys\n\n if self.has_asset_checks_def:\n check.failed("Subset selection is not yet supported within an AssetChecksDefinition")\n\n return set()\n\n @public\n @property\n def selected_output_names(self) -> AbstractSet[str]:\n """Get the output names that correspond to the current selection of assets this execution is expected to materialize."""\n # map selected asset keys to the output names they correspond to\n selected_asset_keys = self.selected_asset_keys\n selected_outputs: Set[str] = set()\n for output_name in self.op.output_dict.keys():\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n self.node_handle, output_name\n )\n if any( # For graph-backed assets, check if a downstream asset is selected\n [\n asset_key in selected_asset_keys\n for asset_key in self.job_def.asset_layer.downstream_dep_assets(\n self.node_handle, output_name\n )\n ]\n ) or (asset_info and asset_info.key in selected_asset_keys):\n selected_outputs.add(output_name)\n\n return selected_outputs\n\n
[docs] @public\n def asset_key_for_output(self, output_name: str = "result") -> AssetKey:\n """Return the AssetKey for the corresponding output."""\n asset_output_info = self.job_def.asset_layer.asset_info_for_output(\n node_handle=self.op_handle, output_name=output_name\n )\n if asset_output_info is None:\n check.failed(f"Output '{output_name}' has no asset")\n else:\n return asset_output_info.key
\n\n
[docs] @public\n def output_for_asset_key(self, asset_key: AssetKey) -> str:\n """Return the output name for the corresponding asset key."""\n node_output_handle = self.job_def.asset_layer.node_output_handle_for_asset(asset_key)\n if node_output_handle is None:\n check.failed(f"Asset key '{asset_key}' has no output")\n else:\n return node_output_handle.output_name
\n\n
[docs] @public\n def asset_key_for_input(self, input_name: str) -> AssetKey:\n """Return the AssetKey for the corresponding input."""\n key = self.job_def.asset_layer.asset_key_for_input(\n node_handle=self.op_handle, input_name=input_name\n )\n if key is None:\n check.failed(f"Input '{input_name}' has no asset")\n else:\n return key
\n\n
[docs] @public\n def asset_partition_key_for_output(self, output_name: str = "result") -> str:\n """Returns the asset partition key for the given output.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the partition key for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_for_output())\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_for_output("first_asset"))\n context.log.info(context.asset_partition_key_for_output("second_asset"))\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n # "2023-08-21"\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_for_output())\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n\n """\n return self._step_execution_context.asset_partition_key_for_output(output_name)
\n\n
[docs] @public\n def asset_partitions_time_window_for_output(self, output_name: str = "result") -> TimeWindow:\n """The time window for the partitions of the output asset.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partitions_time_window_for_output`` to get the TimeWindow of all of the partitions\n being materialized by the backfill.\n\n Raises an error if either of the following are true:\n - The output asset has no partitioning.\n - The output asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the time window for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_time_window_for_output())\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_time_window_for_output("first_asset"))\n context.log.info(context.asset_partitions_time_window_for_output("second_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n # TimeWindow("2023-08-21", "2023-08-26")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partitions_time_window_for_output())\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n """\n return self._step_execution_context.asset_partitions_time_window_for_output(output_name)
\n\n
[docs] @public\n def asset_partition_key_range_for_output(\n self, output_name: str = "result"\n ) -> PartitionKeyRange:\n """Return the PartitionKeyRange for the corresponding output. Errors if the run is not partitioned.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_key_range_for_output`` to get all of the partitions being materialized\n by the backfill.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the partition key range for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_range_for_output())\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_range_for_output("first_asset"))\n context.log.info(context.asset_partition_key_range_for_output("second_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_range_for_output())\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n """\n return self._step_execution_context.asset_partition_key_range_for_output(output_name)
\n\n
[docs] @public\n def asset_partition_key_range_for_input(self, input_name: str) -> PartitionKeyRange:\n """Return the PartitionKeyRange for the corresponding input. Errors if the asset depends on a\n non-contiguous chunk of the input.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_key_range_for_input`` to get the range of partitions keys of the input that\n are relevant to that backfill.\n\n Args:\n input_name (str): The name of the input to get the time window for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_key_range_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n @asset(\n ins={\n "upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n partitions_def=partitions_def,\n )\n def another_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_key_range_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-20", end="2023-08-24")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_range_for_input("self_dependent_asset"))\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-20", end="2023-08-24")\n\n\n """\n return self._step_execution_context.asset_partition_key_range_for_input(input_name)
\n\n
[docs] @public\n def asset_partition_key_for_input(self, input_name: str) -> str:\n """Returns the partition key of the upstream asset corresponding to the given input.\n\n Args:\n input_name (str): The name of the input to get the partition key for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_key_for_input("upstream_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_for_input("self_dependent_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-20"\n\n """\n return self._step_execution_context.asset_partition_key_for_input(input_name)
\n\n
[docs] @public\n def asset_partitions_def_for_output(self, output_name: str = "result") -> PartitionsDefinition:\n """The PartitionsDefinition on the asset corresponding to this output.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the PartitionsDefinition for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_def_for_output())\n\n # materializing the 2023-08-21 partition of this asset will log:\n # DailyPartitionsDefinition("2023-08-20")\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_def_for_output("first_asset"))\n context.log.info(context.asset_partitions_def_for_output("second_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # DailyPartitionsDefinition("2023-08-20")\n # DailyPartitionsDefinition("2023-08-20")\n\n """\n asset_key = self.asset_key_for_output(output_name)\n result = self._step_execution_context.job_def.asset_layer.partitions_def_for_asset(\n asset_key\n )\n if result is None:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {asset_key}, but it is not"\n " partitioned"\n )\n\n return result
\n\n
[docs] @public\n def asset_partitions_def_for_input(self, input_name: str) -> PartitionsDefinition:\n """The PartitionsDefinition on the upstream asset corresponding to this input.\n\n Args:\n input_name (str): The name of the input to get the PartitionsDefinition for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partitions_def_for_input("upstream_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # DailyPartitionsDefinition("2023-08-20")\n\n """\n asset_key = self.asset_key_for_input(input_name)\n result = self._step_execution_context.job_def.asset_layer.partitions_def_for_asset(\n asset_key\n )\n if result is None:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {asset_key}, but it is not"\n " partitioned"\n )\n\n return result
\n\n
[docs] @public\n def asset_partition_keys_for_output(self, output_name: str = "result") -> Sequence[str]:\n """Returns a list of the partition keys for the given output.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_keys_for_output`` to get all of the partitions being materialized\n by the backfill.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the partition keys for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_keys_for_output())\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_keys_for_output("first_asset"))\n context.log.info(context.asset_partition_keys_for_output("second_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_keys_for_output())\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n """\n return self.asset_partitions_def_for_output(output_name).get_partition_keys_in_range(\n self._step_execution_context.asset_partition_key_range_for_output(output_name),\n dynamic_partitions_store=self.instance,\n )
\n\n
[docs] @public\n def asset_partition_keys_for_input(self, input_name: str) -> Sequence[str]:\n """Returns a list of the partition keys of the upstream asset corresponding to the\n given input.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_keys_for_input`` to get all of the partition keys of the input that\n are relevant to that backfill.\n\n Args:\n input_name (str): The name of the input to get the time window for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_keys_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n\n @asset(\n ins={\n "upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n partitions_def=partitions_def,\n )\n def another_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_keys_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-20", "2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24"]\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_keys_for_input("self_dependent_asset"))\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-20", "2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24"]\n """\n return list(\n self._step_execution_context.asset_partitions_subset_for_input(\n input_name\n ).get_partition_keys()\n )
\n\n
[docs] @public\n def asset_partitions_time_window_for_input(self, input_name: str = "result") -> TimeWindow:\n """The time window for the partitions of the input asset.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partitions_time_window_for_input`` to get the time window of the input that\n are relevant to that backfill.\n\n Raises an error if either of the following are true:\n - The input asset has no partitioning.\n - The input asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n\n Args:\n input_name (str): The name of the input to get the partition key for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partitions_time_window_for_input("upstream_asset"))\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n\n @asset(\n ins={\n "upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n partitions_def=partitions_def,\n )\n def another_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partitions_time_window_for_input("upstream_asset"))\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-20", "2023-08-21")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partitions_time_window_for_input("self_dependent_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-20", "2023-08-21")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-20", "2023-08-25")\n\n """\n return self._step_execution_context.asset_partitions_time_window_for_input(input_name)
\n\n
[docs] @public\n @experimental\n def get_asset_provenance(self, asset_key: AssetKey) -> Optional[DataProvenance]:\n """Return the provenance information for the most recent materialization of an asset.\n\n Args:\n asset_key (AssetKey): Key of the asset for which to retrieve provenance.\n\n Returns:\n Optional[DataProvenance]: Provenance information for the most recent\n materialization of the asset. Returns `None` if the asset was never materialized or\n the materialization record is too old to contain provenance information.\n """\n record = self.instance.get_latest_data_version_record(asset_key)\n\n return (\n None if record is None else extract_data_provenance_from_entry(record.event_log_entry)\n )
\n\n def set_data_version(self, asset_key: AssetKey, data_version: DataVersion) -> None:\n """Set the data version for an asset being materialized by the currently executing step.\n This is useful for external execution situations where it is not possible to return\n an `Output`.\n\n Args:\n asset_key (AssetKey): Key of the asset for which to set the data version.\n data_version (DataVersion): The data version to set.\n """\n self._step_execution_context.set_data_version(asset_key, data_version)\n\n @property\n def asset_check_spec(self) -> AssetCheckSpec:\n asset_checks_def = check.not_none(\n self.job_def.asset_layer.asset_checks_def_for_node(self.node_handle),\n "This context does not correspond to an AssetChecksDefinition",\n )\n return asset_checks_def.spec\n\n # In this mode no conversion is done on returned values and missing but expected outputs are not\n # allowed.\n @property\n def requires_typed_event_stream(self) -> bool:\n return self._step_execution_context.requires_typed_event_stream\n\n @property\n def typed_event_stream_error_message(self) -> Optional[str]:\n return self._step_execution_context.typed_event_stream_error_message\n\n def set_requires_typed_event_stream(self, *, error_message: Optional[str] = None) -> None:\n self._step_execution_context.set_requires_typed_event_stream(error_message=error_message)
\n\n\n
[docs]class AssetExecutionContext(OpExecutionContext):\n def __init__(self, step_execution_context: StepExecutionContext):\n super().__init__(step_execution_context=step_execution_context)
\n\n\ndef build_execution_context(\n step_context: StepExecutionContext,\n) -> Union[OpExecutionContext, AssetExecutionContext]:\n """Get the correct context based on the type of step (op or asset) and the user provided context\n type annotation. Follows these rules.\n\n step type annotation result\n asset AssetExecutionContext AssetExecutionContext\n asset OpExecutionContext OpExecutionContext\n asset None AssetExecutionContext\n op AssetExecutionContext Error - we cannot init an AssetExecutionContext w/o an AssetsDefinition\n op OpExecutionContext OpExecutionContext\n op None OpExecutionContext\n For ops in graph-backed assets\n step type annotation result\n op AssetExecutionContext AssetExecutionContext\n op OpExecutionContext OpExecutionContext\n op None OpExecutionContext\n """\n is_sda_step = step_context.is_sda_step\n is_op_in_graph_asset = is_sda_step and step_context.is_op_in_graph\n context_annotation = EmptyAnnotation\n compute_fn = step_context.op_def._compute_fn # noqa: SLF001\n compute_fn = (\n compute_fn\n if isinstance(compute_fn, DecoratedOpFunction)\n else DecoratedOpFunction(compute_fn)\n )\n if compute_fn.has_context_arg():\n context_param = compute_fn.get_context_arg()\n context_annotation = context_param.annotation\n\n # It would be nice to do this check at definition time, rather than at run time, but we don't\n # know if the op is part of an op job or a graph-backed asset until we have the step execution context\n if context_annotation is AssetExecutionContext and not is_sda_step:\n # AssetExecutionContext requires an AssetsDefinition during init, so an op in an op job\n # cannot be annotated with AssetExecutionContext\n raise DagsterInvalidDefinitionError(\n "Cannot annotate @op `context` parameter with type AssetExecutionContext unless the"\n " op is part of a graph-backed asset. `context` must be annotated with"\n " OpExecutionContext, or left blank."\n )\n\n if context_annotation is EmptyAnnotation:\n # if no type hint has been given, default to:\n # * AssetExecutionContext for sda steps, not in graph-backed assets\n # * OpExecutionContext for non sda steps\n # * OpExecutionContext for ops in graph-backed assets\n if is_op_in_graph_asset or not is_sda_step:\n return OpExecutionContext(step_context)\n return AssetExecutionContext(step_context)\n if context_annotation is AssetExecutionContext:\n return AssetExecutionContext(step_context)\n return OpExecutionContext(step_context)\n
", "current_page_name": "_modules/dagster/_core/execution/context/compute", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.compute"}, "hook": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.hook

\nimport warnings\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Dict, Mapping, Optional, Set, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\nfrom ...definitions.composition import PendingNodeInvocation\nfrom ...definitions.decorators.graph_decorator import graph\nfrom ...definitions.dependency import Node\nfrom ...definitions.hook_definition import HookDefinition\nfrom ...definitions.op_definition import OpDefinition\nfrom ...definitions.resource_definition import IContainsGenerator, Resources\nfrom ...errors import DagsterInvalidPropertyError, DagsterInvariantViolationError\nfrom ...log_manager import DagsterLogManager\nfrom ..plan.step import ExecutionStep\nfrom ..plan.utils import RetryRequestedFromPolicy\nfrom .system import StepExecutionContext\n\nif TYPE_CHECKING:\n    from dagster._core.instance import DagsterInstance\n\n\ndef _property_msg(prop_name: str, method_name: str) -> str:\n    return (\n        f"The {prop_name} {method_name} is not set when a `HookContext` is constructed from "\n        "`build_hook_context`."\n    )\n\n\ndef _check_property_on_test_context(\n    context: "HookContext", attr_str: str, user_facing_name: str, param_on_builder: str\n):\n    """Check if attribute is not None on context. If none, error, and point user in direction of\n    how to specify the parameter on the context object.\n    """\n    value = getattr(context, attr_str)\n    if value is None:\n        raise DagsterInvalidPropertyError(\n            f"Attribute '{user_facing_name}' was not provided when "\n            f"constructing context. Provide a value for the '{param_on_builder}' parameter on "\n            "'build_hook_context'. To learn more, check out the testing hooks section of Dagster's "\n            "concepts docs: https://docs.dagster.io/concepts/ops-jobs-graphs/op-hooks#testing-hooks"\n        )\n    else:\n        return value\n\n\n
[docs]class HookContext:\n """The ``context`` object available to a hook function on an DagsterEvent."""\n\n def __init__(\n self,\n step_execution_context: StepExecutionContext,\n hook_def: HookDefinition,\n ):\n self._step_execution_context = step_execution_context\n self._hook_def = check.inst_param(hook_def, "hook_def", HookDefinition)\n self._required_resource_keys = hook_def.required_resource_keys\n self._resources = step_execution_context.scoped_resources_builder.build(\n self._required_resource_keys\n )\n\n @public\n @property\n def job_name(self) -> str:\n """The name of the job where this hook is being triggered."""\n return self._step_execution_context.job_name\n\n @public\n @property\n def run_id(self) -> str:\n """The id of the run where this hook is being triggered."""\n return self._step_execution_context.run_id\n\n @public\n @property\n def hook_def(self) -> HookDefinition:\n """The hook that the context object belongs to."""\n return self._hook_def\n\n @public\n @property\n def instance(self) -> "DagsterInstance":\n """The instance configured to run the current job."""\n return self._step_execution_context.instance\n\n @property\n def op(self) -> Node:\n """The op instance associated with the hook."""\n return self._step_execution_context.op\n\n @property\n def step(self) -> ExecutionStep:\n warnings.warn(\n "The step property of HookContext has been deprecated, and will be removed "\n "in a future release."\n )\n return self._step_execution_context.step\n\n @public\n @property\n def step_key(self) -> str:\n """The key for the step where this hook is being triggered."""\n return self._step_execution_context.step.key\n\n @public\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n """Resources required by this hook."""\n return self._required_resource_keys\n\n @public\n @property\n def resources(self) -> "Resources":\n """Resources available in the hook context."""\n return self._resources\n\n @property\n def solid_config(self) -> Any:\n solid_config = self._step_execution_context.resolved_run_config.ops.get(\n str(self._step_execution_context.step.node_handle)\n )\n return solid_config.config if solid_config else None\n\n @public\n @property\n def op_config(self) -> Any:\n """The parsed config specific to this op."""\n return self.solid_config\n\n # Because of the fact that we directly use the log manager of the step, if a user calls\n # hook_context.log.with_tags, then they will end up mutating the step's logging tags as well.\n # This is not problematic because the hook only runs after the step has been completed.\n @public\n @property\n def log(self) -> DagsterLogManager:\n """Centralized log dispatch from user code."""\n return self._step_execution_context.log\n\n @property\n def solid_exception(self) -> Optional[BaseException]:\n """The thrown exception in a failed solid.\n\n Returns:\n Optional[BaseException]: the exception object, None if the solid execution succeeds.\n """\n return self.op_exception\n\n @public\n @property\n def op_exception(self) -> Optional[BaseException]:\n """The thrown exception in a failed op."""\n exc = self._step_execution_context.step_exception\n\n if isinstance(exc, RetryRequestedFromPolicy):\n return exc.__cause__\n\n return exc\n\n @property\n def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n """The computed output values.\n\n Returns a dictionary where keys are output names and the values are:\n * the output values in the normal case\n * a dictionary from mapping key to corresponding value in the mapped case\n """\n results: Dict[str, Union[Any, Dict[str, Any]]] = {}\n captured = self._step_execution_context.step_output_capture\n\n if captured is None:\n check.failed("Outputs were unexpectedly not captured for hook")\n\n # make the returned values more user-friendly\n for step_output_handle, value in captured.items():\n if step_output_handle.mapping_key:\n if results.get(step_output_handle.output_name) is None:\n results[step_output_handle.output_name] = {\n step_output_handle.mapping_key: value\n }\n else:\n results[step_output_handle.output_name][step_output_handle.mapping_key] = value\n else:\n results[step_output_handle.output_name] = value\n\n return results\n\n @public\n @property\n def op_output_values(self):\n """Computed output values in an op."""\n return self.solid_output_values
\n\n\nclass UnboundHookContext(HookContext):\n def __init__(\n self,\n resources: Mapping[str, Any],\n op: Optional[Union[OpDefinition, PendingNodeInvocation]],\n run_id: Optional[str],\n job_name: Optional[str],\n op_exception: Optional[Exception],\n instance: Optional["DagsterInstance"],\n ):\n from ..build_resources import build_resources, wrap_resources_for_execution\n from ..context_creation_job import initialize_console_manager\n\n self._op = None\n if op is not None:\n\n @graph(name="hook_context_container")\n def temp_graph():\n op()\n\n self._op = temp_graph.nodes[0]\n\n # Open resource context manager\n self._resource_defs = wrap_resources_for_execution(resources)\n self._resources_cm = build_resources(self._resource_defs)\n self._resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n\n self._run_id = run_id\n self._job_name = job_name\n self._op_exception = op_exception\n self._instance = instance\n\n self._log = initialize_console_manager(None)\n\n self._cm_scope_entered = False\n\n def __enter__(self):\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc: Any):\n self._resources_cm.__exit__(*exc)\n\n def __del__(self):\n if self._resources_contain_cm and not self._cm_scope_entered:\n self._resources_cm.__exit__(None, None, None)\n\n @property\n def job_name(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_job_name", user_facing_name="job_name", param_on_builder="job_name"\n )\n\n @property\n def run_id(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_run_id", user_facing_name="run_id", param_on_builder="run_id"\n )\n\n @property\n def hook_def(self) -> HookDefinition:\n raise DagsterInvalidPropertyError(_property_msg("hook_def", "property"))\n\n @property\n def op(self) -> Node:\n return _check_property_on_test_context(\n self, attr_str="_op", user_facing_name="op", param_on_builder="op"\n )\n\n @property\n def step(self) -> ExecutionStep:\n raise DagsterInvalidPropertyError(_property_msg("step", "property"))\n\n @property\n def step_key(self) -> str:\n raise DagsterInvalidPropertyError(_property_msg("step_key", "property"))\n\n @property\n def required_resource_keys(self) -> Set[str]:\n raise DagsterInvalidPropertyError(_property_msg("hook_def", "property"))\n\n @property\n def resources(self) -> "Resources":\n if self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_hook_context(...) as context:`"\n )\n return self._resources\n\n @property\n def solid_config(self) -> Any:\n raise DagsterInvalidPropertyError(_property_msg("solid_config", "property"))\n\n @property\n def log(self) -> DagsterLogManager:\n return self._log\n\n @property\n def op_exception(self) -> Optional[BaseException]:\n return self._op_exception\n\n @property\n def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n """The computed output values.\n\n Returns a dictionary where keys are output names and the values are:\n * the output values in the normal case\n * a dictionary from mapping key to corresponding value in the mapped case\n """\n raise DagsterInvalidPropertyError(_property_msg("solid_output_values", "method"))\n\n @property\n def instance(self) -> "DagsterInstance":\n if not self._instance:\n raise DagsterInvariantViolationError(\n "Tried to access the HookContext instance, but no instance was provided to"\n " `build_hook_context`."\n )\n\n return self._instance\n\n\nclass BoundHookContext(HookContext):\n def __init__(\n self,\n hook_def: HookDefinition,\n resources: Resources,\n op: Optional[Node],\n log_manager: DagsterLogManager,\n run_id: Optional[str],\n job_name: Optional[str],\n op_exception: Optional[Exception],\n instance: Optional["DagsterInstance"],\n ):\n self._hook_def = hook_def\n self._resources = resources\n self._op = op\n self._log_manager = log_manager\n self._run_id = run_id\n self._job_name = job_name\n self._op_exception = op_exception\n self._instance = instance\n\n @property\n def job_name(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_job_name", user_facing_name="job_name", param_on_builder="job_name"\n )\n\n @property\n def run_id(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_run_id", user_facing_name="run_id", param_on_builder="run_id"\n )\n\n @property\n def hook_def(self) -> HookDefinition:\n return self._hook_def\n\n @property\n def op(self) -> Node:\n return _check_property_on_test_context(\n self, attr_str="_op", user_facing_name="op", param_on_builder="op"\n )\n\n @property\n def step(self) -> ExecutionStep:\n raise DagsterInvalidPropertyError(_property_msg("step", "property"))\n\n @property\n def step_key(self) -> str:\n raise DagsterInvalidPropertyError(_property_msg("step_key", "property"))\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return self._hook_def.required_resource_keys\n\n @property\n def resources(self) -> "Resources":\n return self._resources\n\n @property\n def solid_config(self) -> Any:\n raise DagsterInvalidPropertyError(_property_msg("solid_config", "property"))\n\n @property\n def log(self) -> DagsterLogManager:\n return self._log_manager\n\n @property\n def op_exception(self):\n return self._op_exception\n\n @property\n def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n """The computed output values.\n\n Returns a dictionary where keys are output names and the values are:\n * the output values in the normal case\n * a dictionary from mapping key to corresponding value in the mapped case\n """\n raise DagsterInvalidPropertyError(_property_msg("solid_output_values", "method"))\n\n @property\n def instance(self) -> "DagsterInstance":\n if not self._instance:\n raise DagsterInvariantViolationError(\n "Tried to access the HookContext instance, but no instance was provided to"\n " `build_hook_context`."\n )\n\n return self._instance\n\n\n
[docs]def build_hook_context(\n resources: Optional[Mapping[str, Any]] = None,\n op: Optional[Union[OpDefinition, PendingNodeInvocation]] = None,\n run_id: Optional[str] = None,\n job_name: Optional[str] = None,\n op_exception: Optional[Exception] = None,\n instance: Optional["DagsterInstance"] = None,\n) -> UnboundHookContext:\n """Builds hook context from provided parameters.\n\n ``build_hook_context`` can be used as either a function or a context manager. If there is a\n provided resource to ``build_hook_context`` that is a context manager, then it must be used as a\n context manager. This function can be used to provide the context argument to the invocation of\n a hook definition.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can\n either be values or resource definitions.\n op (Optional[OpDefinition, PendingNodeInvocation]): The op definition which the\n hook may be associated with.\n run_id (Optional[str]): The id of the run in which the hook is invoked (provided for mocking purposes).\n job_name (Optional[str]): The name of the job in which the hook is used (provided for mocking purposes).\n op_exception (Optional[Exception]): The exception that caused the hook to be triggered.\n instance (Optional[DagsterInstance]): The Dagster instance configured to run the hook.\n\n Examples:\n .. code-block:: python\n\n context = build_hook_context()\n hook_to_invoke(context)\n\n with build_hook_context(resources={"foo": context_manager_resource}) as context:\n hook_to_invoke(context)\n """\n op = check.opt_inst_param(op, "op", (OpDefinition, PendingNodeInvocation))\n\n from dagster._core.instance import DagsterInstance\n\n return UnboundHookContext(\n resources=check.opt_mapping_param(resources, "resources", key_type=str),\n op=op,\n run_id=check.opt_str_param(run_id, "run_id"),\n job_name=check.opt_str_param(job_name, "job_name"),\n op_exception=check.opt_inst_param(op_exception, "op_exception", Exception),\n instance=check.opt_inst_param(instance, "instance", DagsterInstance),\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/hook", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.hook"}, "init": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.init

\nfrom typing import Any, Mapping, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.resource_definition import (\n    IContainsGenerator,\n    ResourceDefinition,\n    Resources,\n)\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\n\n\n
[docs]class InitResourceContext:\n """The context object available as the argument to the initialization function of a :py:class:`dagster.ResourceDefinition`.\n\n Users should not instantiate this object directly. To construct an `InitResourceContext` for testing purposes, use :py:func:`dagster.build_init_resource_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import resource, InitResourceContext\n\n @resource\n def the_resource(init_context: InitResourceContext):\n init_context.log.info("Hello, world!")\n """\n\n def __init__(\n self,\n resource_config: Any,\n resources: Resources,\n resource_def: Optional[ResourceDefinition] = None,\n instance: Optional[DagsterInstance] = None,\n dagster_run: Optional[DagsterRun] = None,\n log_manager: Optional[DagsterLogManager] = None,\n ):\n self._resource_config = resource_config\n self._resource_def = resource_def\n self._log_manager = log_manager\n self._instance = instance\n self._resources = resources\n self._dagster_run = dagster_run\n\n @public\n @property\n def resource_config(self) -> Any:\n """The configuration data provided by the run config. The schema\n for this data is defined by the ``config_field`` argument to\n :py:class:`ResourceDefinition`.\n """\n return self._resource_config\n\n @public\n @property\n def resource_def(self) -> Optional[ResourceDefinition]:\n """The definition of the resource currently being constructed."""\n return self._resource_def\n\n @public\n @property\n def resources(self) -> Resources:\n """The resources that are available to the resource that we are initalizing."""\n return self._resources\n\n @public\n @property\n def instance(self) -> Optional[DagsterInstance]:\n """The Dagster instance configured for the current execution context."""\n return self._instance\n\n @property\n def dagster_run(self) -> Optional[DagsterRun]:\n """The dagster run to use. When initializing resources outside of execution context, this will be None."""\n return self._dagster_run\n\n @public\n @property\n def log(self) -> Optional[DagsterLogManager]:\n """The Dagster log manager configured for the current execution context."""\n return self._log_manager\n\n # backcompat: keep around this property from when InitResourceContext used to be a NamedTuple\n @public\n @property\n def log_manager(self) -> Optional[DagsterLogManager]:\n """The log manager for this run of the job."""\n return self._log_manager\n\n @public\n @property\n def run_id(self) -> Optional[str]:\n """The id for this run of the job or pipeline. When initializing resources outside of\n execution context, this will be None.\n """\n return self.dagster_run.run_id if self.dagster_run else None\n\n def replace_config(self, config: Any) -> "InitResourceContext":\n return InitResourceContext(\n resource_config=config,\n resources=self.resources,\n instance=self.instance,\n resource_def=self.resource_def,\n dagster_run=self.dagster_run,\n log_manager=self.log,\n )
\n\n\nclass UnboundInitResourceContext(InitResourceContext):\n """Resource initialization context outputted by ``build_init_resource_context``.\n\n Represents a context whose config has not yet been validated against a resource definition,\n hence the inability to access the `resource_def` attribute. When an instance of\n ``UnboundInitResourceContext`` is passed to a resource invocation, config is validated,\n and it is subsumed into an `InitResourceContext`, which contains the resource_def validated\n against.\n """\n\n def __init__(\n self,\n resource_config: Any,\n resources: Optional[Union[Resources, Mapping[str, Any]]],\n instance: Optional[DagsterInstance],\n ):\n from dagster._core.execution.api import ephemeral_instance_if_missing\n from dagster._core.execution.build_resources import (\n build_resources,\n wrap_resources_for_execution,\n )\n from dagster._core.execution.context_creation_job import initialize_console_manager\n\n self._instance_provided = (\n check.opt_inst_param(instance, "instance", DagsterInstance) is not None\n )\n # Construct ephemeral instance if missing\n self._instance_cm = ephemeral_instance_if_missing(instance)\n # Pylint can't infer that the ephemeral_instance context manager has an __enter__ method,\n # so ignore lint error\n instance = self._instance_cm.__enter__()\n\n if isinstance(resources, Resources):\n check.failed("Should not have a Resources object directly from this initialization")\n\n self._resource_defs = wrap_resources_for_execution(\n check.opt_mapping_param(resources, "resources")\n )\n\n self._resources_cm = build_resources(self._resource_defs, instance=instance)\n resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(resources, IContainsGenerator)\n\n self._cm_scope_entered = False\n super(UnboundInitResourceContext, self).__init__(\n resource_config=resource_config,\n resources=resources,\n resource_def=None,\n instance=instance,\n dagster_run=None,\n log_manager=initialize_console_manager(None),\n )\n\n def __enter__(self):\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc):\n self._resources_cm.__exit__(*exc)\n if self._instance_provided:\n self._instance_cm.__exit__(*exc)\n\n def __del__(self):\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n self._resources_cm.__exit__(None, None, None)\n if self._instance_provided and not self._cm_scope_entered:\n self._instance_cm.__exit__(None, None, None)\n\n @property\n def resource_config(self) -> Any:\n return self._resource_config\n\n @property\n def resource_def(self) -> Optional[ResourceDefinition]:\n raise DagsterInvariantViolationError(\n "UnboundInitLoggerContext has not been validated against a logger definition."\n )\n\n @property\n def resources(self) -> Resources:\n """The resources that are available to the resource that we are initalizing."""\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_init_resource_context(...) as context:`"\n )\n return self._resources\n\n @property\n def instance(self) -> Optional[DagsterInstance]:\n return self._instance\n\n @property\n def log(self) -> Optional[DagsterLogManager]:\n return self._log_manager\n\n # backcompat: keep around this property from when InitResourceContext used to be a NamedTuple\n @property\n def log_manager(self) -> Optional[DagsterLogManager]:\n return self._log_manager\n\n @property\n def run_id(self) -> Optional[str]:\n return None\n\n\n
[docs]def build_init_resource_context(\n config: Optional[Mapping[str, Any]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n instance: Optional[DagsterInstance] = None,\n) -> InitResourceContext:\n """Builds resource initialization context from provided parameters.\n\n ``build_init_resource_context`` can be used as either a function or context manager. If there is a\n provided resource to ``build_init_resource_context`` that is a context manager, then it must be\n used as a context manager. This function can be used to provide the context argument to the\n invocation of a resource.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be\n either values or resource definitions.\n config (Optional[Any]): The resource config to provide to the context.\n instance (Optional[DagsterInstance]): The dagster instance configured for the context.\n Defaults to DagsterInstance.ephemeral().\n\n Examples:\n .. code-block:: python\n\n context = build_init_resource_context()\n resource_to_init(context)\n\n with build_init_resource_context(\n resources={"foo": context_manager_resource}\n ) as context:\n resource_to_init(context)\n\n """\n return UnboundInitResourceContext(\n resource_config=check.opt_mapping_param(config, "config", key_type=str),\n instance=check.opt_inst_param(instance, "instance", DagsterInstance),\n resources=check.opt_mapping_param(resources, "resources", key_type=str),\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/init", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.init"}, "input": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.input

\nfrom datetime import datetime\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.events import AssetKey, AssetObservation, CoercibleToAssetKey\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataValue,\n)\nfrom dagster._core.definitions.partition import PartitionsSubset\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.time_window_partitions import TimeWindow, TimeWindowPartitionsSubset\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.instance import DagsterInstance, DynamicPartitionsStore\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import PartitionsDefinition\n    from dagster._core.definitions.op_definition import OpDefinition\n    from dagster._core.definitions.resource_definition import Resources\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import StepExecutionContext\n    from dagster._core.log_manager import DagsterLogManager\n    from dagster._core.types.dagster_type import DagsterType\n\n    from .output import OutputContext\n\n\n
[docs]class InputContext:\n """The ``context`` object available to the load_input method of :py:class:`InputManager`.\n\n Users should not instantiate this object directly. In order to construct\n an `InputContext` for testing an IO Manager's `load_input` method, use\n :py:func:`dagster.build_input_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import IOManager, InputContext\n\n class MyIOManager(IOManager):\n def load_input(self, context: InputContext):\n ...\n """\n\n def __init__(\n self,\n *,\n name: Optional[str] = None,\n job_name: Optional[str] = None,\n op_def: Optional["OpDefinition"] = None,\n config: Optional[Any] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n upstream_output: Optional["OutputContext"] = None,\n dagster_type: Optional["DagsterType"] = None,\n log_manager: Optional["DagsterLogManager"] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n resources: Optional[Union["Resources", Mapping[str, Any]]] = None,\n step_context: Optional["StepExecutionContext"] = None,\n asset_key: Optional[AssetKey] = None,\n partition_key: Optional[str] = None,\n asset_partitions_subset: Optional[PartitionsSubset] = None,\n asset_partitions_def: Optional["PartitionsDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n ):\n from dagster._core.definitions.resource_definition import IContainsGenerator, Resources\n from dagster._core.execution.build_resources import build_resources\n\n self._name = name\n self._job_name = job_name\n self._op_def = op_def\n self._config = config\n self._metadata = metadata or {}\n self._upstream_output = upstream_output\n self._dagster_type = dagster_type\n self._log = log_manager\n self._resource_config = resource_config\n self._step_context = step_context\n self._asset_key = asset_key\n if self._step_context and self._step_context.has_partition_key:\n self._partition_key: Optional[str] = self._step_context.partition_key\n else:\n self._partition_key = partition_key\n\n self._asset_partitions_subset = asset_partitions_subset\n self._asset_partitions_def = asset_partitions_def\n\n if isinstance(resources, Resources):\n self._resources_cm = None\n self._resources = resources\n else:\n self._resources_cm = build_resources(\n check.opt_mapping_param(resources, "resources", key_type=str)\n )\n self._resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n self._cm_scope_entered = False\n\n self._events: List["DagsterEvent"] = []\n self._observations: List[AssetObservation] = []\n self._instance = instance\n\n def __enter__(self):\n if self._resources_cm:\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc):\n if self._resources_cm:\n self._resources_cm.__exit__(*exc)\n\n def __del__(self):\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n self._resources_cm.__exit__(None, None, None)\n\n @property\n def instance(self) -> DagsterInstance:\n if self._instance is None:\n raise DagsterInvariantViolationError(\n "Attempting to access instance, "\n "but it was not provided when constructing the InputContext"\n )\n return self._instance\n\n @public\n @property\n def has_input_name(self) -> bool:\n """If we're the InputContext is being used to load the result of a run from outside the run,\n then it won't have an input name.\n """\n return self._name is not None\n\n @public\n @property\n def name(self) -> str:\n """The name of the input that we're loading."""\n if self._name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access name, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._name\n\n @property\n def job_name(self) -> str:\n if self._job_name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access job_name, "\n "but it was not provided when constructing the InputContext"\n )\n return self._job_name\n\n @public\n @property\n def op_def(self) -> "OpDefinition":\n """The definition of the op that's loading the input."""\n if self._op_def is None:\n raise DagsterInvariantViolationError(\n "Attempting to access op_def, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._op_def\n\n @public\n @property\n def config(self) -> Any:\n """The config attached to the input that we're loading."""\n return self._config\n\n @public\n @property\n def metadata(self) -> Optional[ArbitraryMetadataMapping]:\n """A dict of metadata that is assigned to the InputDefinition that we're loading for.\n This property only contains metadata passed in explicitly with :py:class:`AssetIn`\n or :py:class:`In`. To access metadata of an upstream asset or operation definition,\n use the metadata in :py:attr:`.InputContext.upstream_output`.\n """\n return self._metadata\n\n @public\n @property\n def upstream_output(self) -> Optional["OutputContext"]:\n """Info about the output that produced the object we're loading."""\n return self._upstream_output\n\n @public\n @property\n def dagster_type(self) -> "DagsterType":\n """The type of this input.\n Dagster types do not propagate from an upstream output to downstream inputs,\n and this property only captures type information for the input that is either\n passed in explicitly with :py:class:`AssetIn` or :py:class:`In`, or can be\n infered from type hints. For an asset input, the Dagster type from the upstream\n asset definition is ignored.\n """\n if self._dagster_type is None:\n raise DagsterInvariantViolationError(\n "Attempting to access dagster_type, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._dagster_type\n\n @public\n @property\n def log(self) -> "DagsterLogManager":\n """The log manager to use for this input."""\n if self._log is None:\n raise DagsterInvariantViolationError(\n "Attempting to access log, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._log\n\n @public\n @property\n def resource_config(self) -> Optional[Mapping[str, Any]]:\n """The config associated with the resource that initializes the InputManager."""\n return self._resource_config\n\n @public\n @property\n def resources(self) -> Any:\n """The resources required by the resource that initializes the\n input manager. If using the :py:func:`@input_manager` decorator, these resources\n correspond to those requested with the `required_resource_keys` parameter.\n """\n if self._resources is None:\n raise DagsterInvariantViolationError(\n "Attempting to access resources, "\n "but it was not provided when constructing the InputContext"\n )\n\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_input_context(...) as context:`"\n )\n return self._resources\n\n @public\n @property\n def has_asset_key(self) -> bool:\n """Returns True if an asset is being loaded as input, otherwise returns False. A return value of False\n indicates that an output from an op is being loaded as the input.\n """\n return self._asset_key is not None\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """The ``AssetKey`` of the asset that is being loaded as an input."""\n if self._asset_key is None:\n raise DagsterInvariantViolationError(\n "Attempting to access asset_key, but no asset is associated with this input"\n )\n\n return self._asset_key\n\n @public\n @property\n def asset_partitions_def(self) -> "PartitionsDefinition":\n """The PartitionsDefinition on the upstream asset corresponding to this input."""\n if self._asset_partitions_def is None:\n if self.asset_key:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {self.asset_key}, but it is not"\n " partitioned"\n )\n else:\n raise DagsterInvariantViolationError(\n "Attempting to access partitions def for asset, but input does not correspond"\n " to an asset"\n )\n\n return self._asset_partitions_def\n\n @property\n def step_context(self) -> "StepExecutionContext":\n if self._step_context is None:\n raise DagsterInvariantViolationError(\n "Attempting to access step_context, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._step_context\n\n @public\n @property\n def has_partition_key(self) -> bool:\n """Whether the current run is a partitioned run."""\n return self._partition_key is not None\n\n @public\n @property\n def partition_key(self) -> str:\n """The partition key for the current run.\n\n Raises an error if the current run is not a partitioned run.\n """\n if self._partition_key is None:\n check.failed(\n "Tried to access partition_key on a non-partitioned run.",\n )\n\n return self._partition_key\n\n @public\n @property\n def has_asset_partitions(self) -> bool:\n """Returns True if the asset being loaded as input is partitioned."""\n return self._asset_partitions_subset is not None\n\n @public\n @property\n def asset_partition_key(self) -> str:\n """The partition key for input asset.\n\n Raises an error if the input asset has no partitioning, or if the run covers a partition\n range for the input asset.\n """\n subset = self._asset_partitions_subset\n\n if subset is None:\n check.failed("The input does not correspond to a partitioned asset.")\n\n partition_keys = list(subset.get_partition_keys())\n if len(partition_keys) == 1:\n return partition_keys[0]\n else:\n check.failed(\n f"Tried to access partition key for asset '{self.asset_key}', "\n f"but the number of input partitions != 1: '{subset}'."\n )\n\n @public\n @property\n def asset_partition_key_range(self) -> PartitionKeyRange:\n """The partition key range for input asset.\n\n Raises an error if the input asset has no partitioning.\n """\n subset = self._asset_partitions_subset\n\n if subset is None:\n check.failed(\n "Tried to access asset_partition_key_range, but the asset is not partitioned.",\n )\n\n partition_key_ranges = subset.get_partition_key_ranges(\n dynamic_partitions_store=self.instance\n )\n if len(partition_key_ranges) != 1:\n check.failed(\n "Tried to access asset_partition_key_range, but there are "\n f"({len(partition_key_ranges)}) key ranges associated with this input.",\n )\n\n return partition_key_ranges[0]\n\n @public\n @property\n def asset_partition_keys(self) -> Sequence[str]:\n """The partition keys for input asset.\n\n Raises an error if the input asset has no partitioning.\n """\n if self._asset_partitions_subset is None:\n check.failed(\n "Tried to access asset_partition_keys, but the asset is not partitioned.",\n )\n\n return list(self._asset_partitions_subset.get_partition_keys())\n\n @public\n @property\n def asset_partitions_time_window(self) -> TimeWindow:\n """The time window for the partitions of the input asset.\n\n Raises an error if either of the following are true:\n - The input asset has no partitioning.\n - The input asset is not partitioned with a TimeWindowPartitionsDefinition.\n """\n subset = self._asset_partitions_subset\n\n if subset is None:\n check.failed(\n "Tried to access asset_partitions_time_window, but the asset is not partitioned.",\n )\n\n if not isinstance(subset, TimeWindowPartitionsSubset):\n check.failed(\n "Tried to access asset_partitions_time_window, but the asset is not partitioned"\n " with time windows.",\n )\n\n time_windows = subset.included_time_windows\n if len(time_windows) != 1:\n check.failed(\n "Tried to access asset_partitions_time_window, but there are "\n f"({len(time_windows)}) time windows associated with this input.",\n )\n\n return time_windows[0]\n\n
[docs] @public\n def get_identifier(self) -> Sequence[str]:\n """Utility method to get a collection of identifiers that as a whole represent a unique\n step input.\n\n If not using memoization, the unique identifier collection consists of\n\n - ``run_id``: the id of the run which generates the input.\n Note: This method also handles the re-execution memoization logic. If the step that\n generates the input is skipped in the re-execution, the ``run_id`` will be the id\n of its parent run.\n - ``step_key``: the key for a compute step.\n - ``name``: the name of the output. (default: 'result').\n\n If using memoization, the ``version`` corresponding to the step output is used in place of\n the ``run_id``.\n\n Returns:\n List[str, ...]: A list of identifiers, i.e. (run_id or version), step_key, and output_name\n """\n if self.upstream_output is None:\n raise DagsterInvariantViolationError(\n "InputContext.upstream_output not defined. Cannot compute an identifier"\n )\n\n return self.upstream_output.get_identifier()
\n\n
[docs] @public\n def get_asset_identifier(self) -> Sequence[str]:\n """The sequence of strings making up the AssetKey for the asset being loaded as an input.\n If the asset is partitioned, the identifier contains the partition key as the final element in the\n sequence. For example, for the asset key ``AssetKey(["foo", "bar", "baz"])``, materialized with\n partition key "2023-06-01", ``get_asset_identifier`` will return ``["foo", "bar", "baz", "2023-06-01"]``.\n """\n if self.asset_key is not None:\n if self.has_asset_partitions:\n return [*self.asset_key.path, self.asset_partition_key]\n else:\n return self.asset_key.path\n else:\n check.failed("Can't get asset identifier for an input with no asset key")
\n\n def consume_events(self) -> Iterator["DagsterEvent"]:\n """Pops and yields all user-generated events that have been recorded from this context.\n\n If consume_events has not yet been called, this will yield all logged events since the call to `handle_input`. If consume_events has been called, it will yield all events since the last time consume_events was called. Designed for internal use. Users should never need to invoke this method.\n """\n events = self._events\n self._events = []\n yield from events\n\n def add_input_metadata(\n self,\n metadata: Mapping[str, Any],\n description: Optional[str] = None,\n ) -> None:\n """Accepts a dictionary of metadata. Metadata entries will appear on the LOADED_INPUT event.\n If the input is an asset, metadata will be attached to an asset observation.\n\n The asset observation will be yielded from the run and appear in the event log.\n Only valid if the context has an asset key.\n """\n from dagster._core.definitions.metadata import normalize_metadata\n from dagster._core.events import DagsterEvent\n\n metadata = check.mapping_param(metadata, "metadata", key_type=str)\n self._metadata = {**self._metadata, **normalize_metadata(metadata)}\n if self.has_asset_key:\n check.opt_str_param(description, "description")\n\n observation = AssetObservation(\n asset_key=self.asset_key,\n description=description,\n partition=self.asset_partition_key if self.has_asset_partitions else None,\n metadata=metadata,\n )\n self._observations.append(observation)\n if self._step_context:\n self._events.append(DagsterEvent.asset_observation(self._step_context, observation))\n\n def get_observations(\n self,\n ) -> Sequence[AssetObservation]:\n """Retrieve the list of user-generated asset observations that were observed via the context.\n\n User-generated events that were yielded will not appear in this list.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import IOManager, build_input_context, AssetObservation\n\n class MyIOManager(IOManager):\n def load_input(self, context, obj):\n ...\n\n def test_load_input():\n mgr = MyIOManager()\n context = build_input_context()\n mgr.load_input(context)\n observations = context.get_observations()\n ...\n """\n return self._observations\n\n def consume_metadata(self) -> Mapping[str, MetadataValue]:\n result = self._metadata\n self._metadata = {}\n return result
\n\n\n
[docs]def build_input_context(\n name: Optional[str] = None,\n config: Optional[Any] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n upstream_output: Optional["OutputContext"] = None,\n dagster_type: Optional["DagsterType"] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n op_def: Optional["OpDefinition"] = None,\n step_context: Optional["StepExecutionContext"] = None,\n asset_key: Optional[CoercibleToAssetKey] = None,\n partition_key: Optional[str] = None,\n asset_partition_key_range: Optional[PartitionKeyRange] = None,\n asset_partitions_def: Optional["PartitionsDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n) -> "InputContext":\n """Builds input context from provided parameters.\n\n ``build_input_context`` can be used as either a function, or a context manager. If resources\n that are also context managers are provided, then ``build_input_context`` must be used as a\n context manager.\n\n Args:\n name (Optional[str]): The name of the input that we're loading.\n config (Optional[Any]): The config attached to the input that we're loading.\n metadata (Optional[Dict[str, Any]]): A dict of metadata that is assigned to the\n InputDefinition that we're loading for.\n upstream_output (Optional[OutputContext]): Info about the output that produced the object\n we're loading.\n dagster_type (Optional[DagsterType]): The type of this input.\n resource_config (Optional[Dict[str, Any]]): The resource config to make available from the\n input context. This usually corresponds to the config provided to the resource that\n loads the input manager.\n resources (Optional[Dict[str, Any]]): The resources to make available from the context.\n For a given key, you can provide either an actual instance of an object, or a resource\n definition.\n asset_key (Optional[Union[AssetKey, Sequence[str], str]]): The asset key attached to the InputDefinition.\n op_def (Optional[OpDefinition]): The definition of the op that's loading the input.\n step_context (Optional[StepExecutionContext]): For internal use.\n partition_key (Optional[str]): String value representing partition key to execute with.\n asset_partition_key_range (Optional[str]): The range of asset partition keys to load.\n asset_partitions_def: Optional[PartitionsDefinition]: The PartitionsDefinition of the asset\n being loaded.\n\n Examples:\n .. code-block:: python\n\n build_input_context()\n\n with build_input_context(resources={"foo": context_manager_resource}) as context:\n do_something\n """\n from dagster._core.definitions import OpDefinition, PartitionsDefinition\n from dagster._core.execution.context.output import OutputContext\n from dagster._core.execution.context.system import StepExecutionContext\n from dagster._core.execution.context_creation_job import initialize_console_manager\n from dagster._core.types.dagster_type import DagsterType\n\n name = check.opt_str_param(name, "name")\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n upstream_output = check.opt_inst_param(upstream_output, "upstream_output", OutputContext)\n dagster_type = check.opt_inst_param(dagster_type, "dagster_type", DagsterType)\n resource_config = check.opt_mapping_param(resource_config, "resource_config", key_type=str)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n op_def = check.opt_inst_param(op_def, "op_def", OpDefinition)\n step_context = check.opt_inst_param(step_context, "step_context", StepExecutionContext)\n asset_key = AssetKey.from_coercible(asset_key) if asset_key else None\n partition_key = check.opt_str_param(partition_key, "partition_key")\n asset_partition_key_range = check.opt_inst_param(\n asset_partition_key_range, "asset_partition_key_range", PartitionKeyRange\n )\n asset_partitions_def = check.opt_inst_param(\n asset_partitions_def, "asset_partitions_def", PartitionsDefinition\n )\n if asset_partitions_def and asset_partition_key_range:\n asset_partitions_subset = asset_partitions_def.empty_subset().with_partition_key_range(\n asset_partition_key_range, dynamic_partitions_store=instance\n )\n elif asset_partition_key_range:\n asset_partitions_subset = KeyRangeNoPartitionsDefPartitionsSubset(asset_partition_key_range)\n else:\n asset_partitions_subset = None\n\n return InputContext(\n name=name,\n job_name=None,\n config=config,\n metadata=metadata,\n upstream_output=upstream_output,\n dagster_type=dagster_type,\n log_manager=initialize_console_manager(None),\n resource_config=resource_config,\n resources=resources,\n step_context=step_context,\n op_def=op_def,\n asset_key=asset_key,\n partition_key=partition_key,\n asset_partitions_subset=asset_partitions_subset,\n asset_partitions_def=asset_partitions_def,\n instance=instance,\n )
\n\n\nclass KeyRangeNoPartitionsDefPartitionsSubset(PartitionsSubset):\n """For build_input_context when no PartitionsDefinition has been provided."""\n\n def __init__(self, key_range: PartitionKeyRange):\n self._key_range = key_range\n\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[str]:\n raise NotImplementedError()\n\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[str]:\n if self._key_range.start == self._key_range.end:\n return self._key_range.start\n else:\n raise NotImplementedError()\n\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]:\n return [self._key_range]\n\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "PartitionsSubset":\n raise NotImplementedError()\n\n def with_partition_key_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "PartitionsSubset":\n raise NotImplementedError()\n\n def serialize(self) -> str:\n raise NotImplementedError()\n\n @property\n def partitions_def(self) -> "PartitionsDefinition":\n raise NotImplementedError()\n\n def __len__(self) -> int:\n raise NotImplementedError()\n\n def __contains__(self, value) -> bool:\n raise NotImplementedError()\n\n @classmethod\n def from_serialized(\n cls, partitions_def: "PartitionsDefinition", serialized: str\n ) -> "PartitionsSubset":\n raise NotImplementedError()\n\n @classmethod\n def can_deserialize(\n cls,\n partitions_def: "PartitionsDefinition",\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n raise NotImplementedError()\n\n @classmethod\n def empty_subset(cls, partitions_def: "PartitionsDefinition") -> "PartitionsSubset":\n raise NotImplementedError()\n
", "current_page_name": "_modules/dagster/_core/execution/context/input", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.input"}, "invocation": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.invocation

\nfrom contextlib import ExitStack\nfrom typing import (\n    AbstractSet,\n    Any,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.composition import PendingNodeInvocation\nfrom dagster._core.definitions.decorators.op_decorator import DecoratedOpFunction\nfrom dagster._core.definitions.dependency import Node, NodeHandle\nfrom dagster._core.definitions.events import (\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    UserEvent,\n)\nfrom dagster._core.definitions.hook_definition import HookDefinition\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.resource_definition import (\n    IContainsGenerator,\n    ResourceDefinition,\n    Resources,\n    ScopedResourcesBuilder,\n)\nfrom dagster._core.definitions.resource_requirement import ensure_requirements_satisfied\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    TimeWindowPartitionsDefinition,\n    has_one_dimension_time_window_partitioning,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidInvocationError,\n    DagsterInvalidPropertyError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.execution.build_resources import build_resources, wrap_resources_for_execution\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.types.dagster_type import DagsterType\nfrom dagster._utils.forked_pdb import ForkedPdb\nfrom dagster._utils.merger import merge_dicts\n\nfrom .compute import OpExecutionContext\nfrom .system import StepExecutionContext, TypeCheckContext\n\n\ndef _property_msg(prop_name: str, method_name: str) -> str:\n    return (\n        f"The {prop_name} {method_name} is not set on the context when a solid is directly invoked."\n    )\n\n\nclass UnboundOpExecutionContext(OpExecutionContext):\n    """The ``context`` object available as the first argument to a solid's compute function when\n    being invoked directly. Can also be used as a context manager.\n    """\n\n    def __init__(\n        self,\n        op_config: Any,\n        resources_dict: Mapping[str, Any],\n        resources_config: Mapping[str, Any],\n        instance: Optional[DagsterInstance],\n        partition_key: Optional[str],\n        partition_key_range: Optional[PartitionKeyRange],\n        mapping_key: Optional[str],\n        assets_def: Optional[AssetsDefinition],\n    ):\n        from dagster._core.execution.api import ephemeral_instance_if_missing\n        from dagster._core.execution.context_creation_job import initialize_console_manager\n\n        self._op_config = op_config\n        self._mapping_key = mapping_key\n\n        self._exit_stack = ExitStack()\n\n        # Construct ephemeral instance if missing\n        self._instance = self._exit_stack.enter_context(ephemeral_instance_if_missing(instance))\n\n        self._resources_config = resources_config\n        # Open resource context manager\n        self._resources_contain_cm = False\n        self._resource_defs = wrap_resources_for_execution(resources_dict)\n        self._resources = self._exit_stack.enter_context(\n            build_resources(\n                resources=self._resource_defs,\n                instance=self._instance,\n                resource_config=resources_config,\n            )\n        )\n        self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n\n        self._log = initialize_console_manager(None)\n        self._pdb: Optional[ForkedPdb] = None\n        self._cm_scope_entered = False\n        check.invariant(\n            not (partition_key and partition_key_range),\n            "Must supply at most one of partition_key or partition_key_range",\n        )\n        self._partition_key = partition_key\n        self._partition_key_range = partition_key_range\n        self._user_events: List[UserEvent] = []\n        self._output_metadata: Dict[str, Any] = {}\n\n        self._assets_def = check.opt_inst_param(assets_def, "assets_def", AssetsDefinition)\n\n    def __enter__(self):\n        self._cm_scope_entered = True\n        return self\n\n    def __exit__(self, *exc):\n        self._exit_stack.close()\n\n    def __del__(self):\n        self._exit_stack.close()\n\n    @property\n    def op_config(self) -> Any:\n        return self._op_config\n\n    @property\n    def resource_keys(self) -> AbstractSet[str]:\n        return self._resource_defs.keys()\n\n    @property\n    def resources(self) -> Resources:\n        if self._resources_contain_cm and not self._cm_scope_entered:\n            raise DagsterInvariantViolationError(\n                "At least one provided resource is a generator, but attempting to access "\n                "resources outside of context manager scope. You can use the following syntax to "\n                "open a context manager: `with build_op_context(...) as context:`"\n            )\n        return self._resources\n\n    @property\n    def dagster_run(self) -> DagsterRun:\n        raise DagsterInvalidPropertyError(_property_msg("pipeline_run", "property"))\n\n    @property\n    def instance(self) -> DagsterInstance:\n        return self._instance\n\n    @property\n    def pdb(self) -> ForkedPdb:\n        """dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.\n\n        Example:\n        .. code-block:: python\n\n            @solid\n            def debug_solid(context):\n                context.pdb.set_trace()\n\n        """\n        if self._pdb is None:\n            self._pdb = ForkedPdb()\n\n        return self._pdb\n\n    @property\n    def step_launcher(self) -> Optional[StepLauncher]:\n        raise DagsterInvalidPropertyError(_property_msg("step_launcher", "property"))\n\n    @property\n    def run_id(self) -> str:\n        """str: Hard-coded value to indicate that we are directly invoking solid."""\n        return "EPHEMERAL"\n\n    @property\n    def run_config(self) -> dict:\n        raise DagsterInvalidPropertyError(_property_msg("run_config", "property"))\n\n    @property\n    def job_def(self) -> JobDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("job_def", "property"))\n\n    @property\n    def job_name(self) -> str:\n        raise DagsterInvalidPropertyError(_property_msg("job_name", "property"))\n\n    @property\n    def log(self) -> DagsterLogManager:\n        """DagsterLogManager: A console manager constructed for this context."""\n        return self._log\n\n    @property\n    def node_handle(self) -> NodeHandle:\n        raise DagsterInvalidPropertyError(_property_msg("solid_handle", "property"))\n\n    @property\n    def op(self) -> JobDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("op", "property"))\n\n    @property\n    def solid(self) -> Node:\n        raise DagsterInvalidPropertyError(_property_msg("solid", "property"))\n\n    @property\n    def op_def(self) -> OpDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("op_def", "property"))\n\n    @property\n    def assets_def(self) -> AssetsDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("assets_def", "property"))\n\n    @property\n    def has_partition_key(self) -> bool:\n        return self._partition_key is not None\n\n    @property\n    def partition_key(self) -> str:\n        if self._partition_key:\n            return self._partition_key\n        check.failed("Tried to access partition_key for a non-partitioned run")\n\n    @property\n    def partition_key_range(self) -> PartitionKeyRange:\n        """The range of partition keys for the current run.\n\n        If run is for a single partition key, return a `PartitionKeyRange` with the same start and\n        end. Raises an error if the current run is not a partitioned run.\n        """\n        if self._partition_key_range:\n            return self._partition_key_range\n        elif self._partition_key:\n            return PartitionKeyRange(self._partition_key, self._partition_key)\n        else:\n            check.failed("Tried to access partition_key range for a non-partitioned run")\n\n    def asset_partition_key_for_output(self, output_name: str = "result") -> str:\n        return self.partition_key\n\n    def has_tag(self, key: str) -> bool:\n        raise DagsterInvalidPropertyError(_property_msg("has_tag", "method"))\n\n    def get_tag(self, key: str) -> str:\n        raise DagsterInvalidPropertyError(_property_msg("get_tag", "method"))\n\n    def get_step_execution_context(self) -> StepExecutionContext:\n        raise DagsterInvalidPropertyError(_property_msg("get_step_execution_context", "methods"))\n\n    def bind(\n        self,\n        op_def: OpDefinition,\n        pending_invocation: Optional[PendingNodeInvocation[OpDefinition]],\n        assets_def: Optional[AssetsDefinition],\n        config_from_args: Optional[Mapping[str, Any]],\n        resources_from_args: Optional[Mapping[str, Any]],\n    ) -> "BoundOpExecutionContext":\n        from dagster._core.definitions.resource_invocation import resolve_bound_config\n\n        if resources_from_args:\n            if self._resource_defs:\n                raise DagsterInvalidInvocationError(\n                    "Cannot provide resources in both context and kwargs"\n                )\n            resource_defs = wrap_resources_for_execution(resources_from_args)\n            # add new resources context to the stack to be cleared on exit\n            resources = self._exit_stack.enter_context(\n                build_resources(resource_defs, self.instance)\n            )\n        elif assets_def and assets_def.resource_defs:\n            for key in sorted(list(assets_def.resource_defs.keys())):\n                if key in self._resource_defs:\n                    raise DagsterInvalidInvocationError(\n                        f"Error when invoking {assets_def!s} resource '{key}' "\n                        "provided on both the definition and invocation context. Please "\n                        "provide on only one or the other."\n                    )\n            resource_defs = wrap_resources_for_execution(\n                {**self._resource_defs, **assets_def.resource_defs}\n            )\n            # add new resources context to the stack to be cleared on exit\n            resources = self._exit_stack.enter_context(\n                build_resources(resource_defs, self.instance, self._resources_config)\n            )\n        else:\n            resources = self.resources\n            resource_defs = self._resource_defs\n\n        _validate_resource_requirements(resource_defs, op_def)\n\n        if self.op_config and config_from_args:\n            raise DagsterInvalidInvocationError("Cannot provide config in both context and kwargs")\n        op_config = resolve_bound_config(config_from_args or self.op_config, op_def)\n\n        return BoundOpExecutionContext(\n            op_def=op_def,\n            op_config=op_config,\n            resources=resources,\n            resources_config=self._resources_config,\n            instance=self.instance,\n            log_manager=self.log,\n            pdb=self.pdb,\n            tags=(\n                pending_invocation.tags\n                if isinstance(pending_invocation, PendingNodeInvocation)\n                else None\n            ),\n            hook_defs=(\n                pending_invocation.hook_defs\n                if isinstance(pending_invocation, PendingNodeInvocation)\n                else None\n            ),\n            alias=(\n                pending_invocation.given_alias\n                if isinstance(pending_invocation, PendingNodeInvocation)\n                else None\n            ),\n            user_events=self._user_events,\n            output_metadata=self._output_metadata,\n            mapping_key=self._mapping_key,\n            partition_key=self._partition_key,\n            partition_key_range=self._partition_key_range,\n            assets_def=assets_def,\n        )\n\n    def get_events(self) -> Sequence[UserEvent]:\n        """Retrieve the list of user-generated events that were logged via the context.\n\n        **Examples:**\n\n        .. code-block:: python\n\n            from dagster import op, build_op_context, AssetMaterialization, ExpectationResult\n\n            @op\n            def my_op(context):\n                ...\n\n            def test_my_op():\n                context = build_op_context()\n                my_op(context)\n                all_user_events = context.get_events()\n                materializations = [event for event in all_user_events if isinstance(event, AssetMaterialization)]\n                expectation_results = [event for event in all_user_events if isinstance(event, ExpectationResult)]\n                ...\n        """\n        return self._user_events\n\n    def get_output_metadata(\n        self, output_name: str, mapping_key: Optional[str] = None\n    ) -> Optional[Mapping[str, Any]]:\n        """Retrieve metadata that was logged for an output and mapping_key, if it exists.\n\n        If metadata cannot be found for the particular output_name/mapping_key combination, None will be returned.\n\n        Args:\n            output_name (str): The name of the output to retrieve logged metadata for.\n            mapping_key (Optional[str]): The mapping key to retrieve metadata for (only applies when using dynamic outputs).\n\n        Returns:\n            Optional[Mapping[str, Any]]: The metadata values present for the output_name/mapping_key combination, if present.\n        """\n        metadata = self._output_metadata.get(output_name)\n        if mapping_key and metadata:\n            return metadata.get(mapping_key)\n        return metadata\n\n    def get_mapping_key(self) -> Optional[str]:\n        return self._mapping_key\n\n\ndef _validate_resource_requirements(\n    resource_defs: Mapping[str, ResourceDefinition], op_def: OpDefinition\n) -> None:\n    """Validate correctness of resources against required resource keys."""\n    if cast(DecoratedOpFunction, op_def.compute_fn).has_context_arg():\n        for requirement in op_def.get_resource_requirements():\n            if not requirement.is_io_manager_requirement:\n                ensure_requirements_satisfied(resource_defs, [requirement])\n\n\nclass BoundOpExecutionContext(OpExecutionContext):\n    """The op execution context that is passed to the compute function during invocation.\n\n    This context is bound to a specific op definition, for which the resources and config have\n    been validated.\n    """\n\n    _op_def: OpDefinition\n    _op_config: Any\n    _resources: "Resources"\n    _resources_config: Mapping[str, Any]\n    _instance: DagsterInstance\n    _log_manager: DagsterLogManager\n    _pdb: Optional[ForkedPdb]\n    _tags: Mapping[str, str]\n    _hook_defs: Optional[AbstractSet[HookDefinition]]\n    _alias: str\n    _user_events: List[UserEvent]\n    _seen_outputs: Dict[str, Union[str, Set[str]]]\n    _output_metadata: Dict[str, Any]\n    _mapping_key: Optional[str]\n    _partition_key: Optional[str]\n    _partition_key_range: Optional[PartitionKeyRange]\n    _assets_def: Optional[AssetsDefinition]\n\n    def __init__(\n        self,\n        op_def: OpDefinition,\n        op_config: Any,\n        resources: "Resources",\n        resources_config: Mapping[str, Any],\n        instance: DagsterInstance,\n        log_manager: DagsterLogManager,\n        pdb: Optional[ForkedPdb],\n        tags: Optional[Mapping[str, str]],\n        hook_defs: Optional[AbstractSet[HookDefinition]],\n        alias: Optional[str],\n        user_events: List[UserEvent],\n        output_metadata: Dict[str, Any],\n        mapping_key: Optional[str],\n        partition_key: Optional[str],\n        partition_key_range: Optional[PartitionKeyRange],\n        assets_def: Optional[AssetsDefinition],\n    ):\n        self._op_def = op_def\n        self._op_config = op_config\n        self._resources = resources\n        self._instance = instance\n        self._log = log_manager\n        self._pdb = pdb\n        self._tags = merge_dicts(self._op_def.tags, tags) if tags else self._op_def.tags\n        self._hook_defs = hook_defs\n        self._alias = alias if alias else self._op_def.name\n        self._resources_config = resources_config\n        self._user_events = user_events\n        self._seen_outputs = {}\n        self._output_metadata = output_metadata\n        self._mapping_key = mapping_key\n        self._partition_key = partition_key\n        self._partition_key_range = partition_key_range\n        self._assets_def = assets_def\n        self._requires_typed_event_stream = False\n        self._typed_event_stream_error_message = None\n\n    @property\n    def op_config(self) -> Any:\n        return self._op_config\n\n    @property\n    def resources(self) -> Resources:\n        return self._resources\n\n    @property\n    def dagster_run(self) -> DagsterRun:\n        raise DagsterInvalidPropertyError(_property_msg("pipeline_run", "property"))\n\n    @property\n    def instance(self) -> DagsterInstance:\n        return self._instance\n\n    @property\n    def pdb(self) -> ForkedPdb:\n        """dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.\n\n        Example:\n        .. code-block:: python\n\n            @solid\n            def debug_solid(context):\n                context.pdb.set_trace()\n\n        """\n        if self._pdb is None:\n            self._pdb = ForkedPdb()\n\n        return self._pdb\n\n    @property\n    def step_launcher(self) -> Optional[StepLauncher]:\n        raise DagsterInvalidPropertyError(_property_msg("step_launcher", "property"))\n\n    @property\n    def run_id(self) -> str:\n        """str: Hard-coded value to indicate that we are directly invoking solid."""\n        return "EPHEMERAL"\n\n    @property\n    def run_config(self) -> Mapping[str, object]:\n        run_config: Dict[str, object] = {}\n        if self._op_config:\n            run_config["ops"] = {self._op_def.name: {"config": self._op_config}}\n        run_config["resources"] = self._resources_config\n        return run_config\n\n    @property\n    def job_def(self) -> JobDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("job_def", "property"))\n\n    @property\n    def job_name(self) -> str:\n        raise DagsterInvalidPropertyError(_property_msg("job_name", "property"))\n\n    @property\n    def log(self) -> DagsterLogManager:\n        """DagsterLogManager: A console manager constructed for this context."""\n        return self._log\n\n    @property\n    def node_handle(self) -> NodeHandle:\n        raise DagsterInvalidPropertyError(_property_msg("node_handle", "property"))\n\n    @property\n    def op(self) -> Node:\n        raise DagsterInvalidPropertyError(_property_msg("op", "property"))\n\n    @property\n    def op_def(self) -> OpDefinition:\n        return self._op_def\n\n    @property\n    def has_assets_def(self) -> bool:\n        return self._assets_def is not None\n\n    @property\n    def assets_def(self) -> AssetsDefinition:\n        if self._assets_def is None:\n            raise DagsterInvalidPropertyError(\n                f"Op {self.op_def.name} does not have an assets definition."\n            )\n        return self._assets_def\n\n    @property\n    def has_partition_key(self) -> bool:\n        return self._partition_key is not None\n\n    def has_tag(self, key: str) -> bool:\n        return key in self._tags\n\n    def get_tag(self, key: str) -> Optional[str]:\n        return self._tags.get(key)\n\n    @property\n    def alias(self) -> str:\n        return self._alias\n\n    def get_step_execution_context(self) -> StepExecutionContext:\n        raise DagsterInvalidPropertyError(_property_msg("get_step_execution_context", "methods"))\n\n    def for_type(self, dagster_type: DagsterType) -> TypeCheckContext:\n        resources = cast(NamedTuple, self.resources)\n        return TypeCheckContext(\n            self.run_id,\n            self.log,\n            ScopedResourcesBuilder(resources._asdict()),\n            dagster_type,\n        )\n\n    def get_mapping_key(self) -> Optional[str]:\n        return self._mapping_key\n\n    def describe_op(self) -> str:\n        if isinstance(self.op_def, OpDefinition):\n            return f'op "{self.op_def.name}"'\n\n        return f'solid "{self.op_def.name}"'\n\n    def log_event(self, event: UserEvent) -> None:\n        check.inst_param(\n            event,\n            "event",\n            (AssetMaterialization, AssetObservation, ExpectationResult),\n        )\n        self._user_events.append(event)\n\n    def observe_output(self, output_name: str, mapping_key: Optional[str] = None) -> None:\n        if mapping_key:\n            if output_name not in self._seen_outputs:\n                self._seen_outputs[output_name] = set()\n            cast(Set[str], self._seen_outputs[output_name]).add(mapping_key)\n        else:\n            self._seen_outputs[output_name] = "seen"\n\n    def has_seen_output(self, output_name: str, mapping_key: Optional[str] = None) -> bool:\n        if mapping_key:\n            return (\n                output_name in self._seen_outputs and mapping_key in self._seen_outputs[output_name]\n            )\n        return output_name in self._seen_outputs\n\n    @property\n    def partition_key(self) -> str:\n        if self._partition_key is not None:\n            return self._partition_key\n        check.failed("Tried to access partition_key for a non-partitioned asset")\n\n    @property\n    def partition_key_range(self) -> PartitionKeyRange:\n        """The range of partition keys for the current run.\n\n        If run is for a single partition key, return a `PartitionKeyRange` with the same start and\n        end. Raises an error if the current run is not a partitioned run.\n        """\n        if self._partition_key_range:\n            return self._partition_key_range\n        elif self._partition_key:\n            return PartitionKeyRange(self._partition_key, self._partition_key)\n        else:\n            check.failed("Tried to access partition_key range for a non-partitioned run")\n\n    def asset_partition_key_for_output(self, output_name: str = "result") -> str:\n        return self.partition_key\n\n    def asset_partitions_time_window_for_output(self, output_name: str = "result") -> TimeWindow:\n        partitions_def = self.assets_def.partitions_def\n        if partitions_def is None:\n            check.failed("Tried to access partition_key for a non-partitioned asset")\n\n        if not has_one_dimension_time_window_partitioning(partitions_def=partitions_def):\n            raise DagsterInvariantViolationError(\n                "Expected a TimeWindowPartitionsDefinition or MultiPartitionsDefinition with a"\n                f" single time dimension, but instead found {type(partitions_def)}"\n            )\n\n        return cast(\n            Union[MultiPartitionsDefinition, TimeWindowPartitionsDefinition], partitions_def\n        ).time_window_for_partition_key(self.partition_key)\n\n    def add_output_metadata(\n        self,\n        metadata: Mapping[str, Any],\n        output_name: Optional[str] = None,\n        mapping_key: Optional[str] = None,\n    ) -> None:\n        """Add metadata to one of the outputs of an op.\n\n        This can only be used once per output in the body of an op. Using this method with the same output_name more than once within an op will result in an error.\n\n        Args:\n            metadata (Mapping[str, Any]): The metadata to attach to the output\n            output_name (Optional[str]): The name of the output to attach metadata to. If there is only one output on the op, then this argument does not need to be provided. The metadata will automatically be attached to the only output.\n\n        **Examples:**\n\n        .. code-block:: python\n\n            from dagster import Out, op\n            from typing import Tuple\n\n            @op\n            def add_metadata(context):\n                context.add_output_metadata({"foo", "bar"})\n                return 5 # Since the default output is called "result", metadata will be attached to the output "result".\n\n            @op(out={"a": Out(), "b": Out()})\n            def add_metadata_two_outputs(context) -> Tuple[str, int]:\n                context.add_output_metadata({"foo": "bar"}, output_name="b")\n                context.add_output_metadata({"baz": "bat"}, output_name="a")\n\n                return ("dog", 5)\n\n        """\n        metadata = check.mapping_param(metadata, "metadata", key_type=str)\n        output_name = check.opt_str_param(output_name, "output_name")\n        mapping_key = check.opt_str_param(mapping_key, "mapping_key")\n\n        if output_name is None and len(self.op_def.output_defs) == 1:\n            output_def = self.op_def.output_defs[0]\n            output_name = output_def.name\n        elif output_name is None:\n            raise DagsterInvariantViolationError(\n                "Attempted to log metadata without providing output_name, but multiple outputs"\n                " exist. Please provide an output_name to the invocation of"\n                " `context.add_output_metadata`."\n            )\n        else:\n            output_def = self.op_def.output_def_named(output_name)\n\n        if self.has_seen_output(output_name, mapping_key):\n            output_desc = (\n                f"output '{output_def.name}'"\n                if not mapping_key\n                else f"output '{output_def.name}' with mapping_key '{mapping_key}'"\n            )\n            raise DagsterInvariantViolationError(\n                f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log output"\n                f" metadata for {output_desc} which has already been yielded. Metadata must be"\n                " logged before the output is yielded."\n            )\n        if output_def.is_dynamic and not mapping_key:\n            raise DagsterInvariantViolationError(\n                f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log metadata"\n                f" for dynamic output '{output_def.name}' without providing a mapping key. When"\n                " logging metadata for a dynamic output, it is necessary to provide a mapping key."\n            )\n\n        output_name = output_def.name\n        if output_name in self._output_metadata:\n            if not mapping_key or mapping_key in self._output_metadata[output_name]:\n                raise DagsterInvariantViolationError(\n                    f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log"\n                    f" metadata for output '{output_name}' more than once."\n                )\n        if mapping_key:\n            if output_name not in self._output_metadata:\n                self._output_metadata[output_name] = {}\n            self._output_metadata[output_name][mapping_key] = metadata\n\n        else:\n            self._output_metadata[output_name] = metadata\n\n    # In this mode no conversion is done on returned values and missing but expected outputs are not\n    # allowed.\n    @property\n    def requires_typed_event_stream(self) -> bool:\n        return self._requires_typed_event_stream\n\n    @property\n    def typed_event_stream_error_message(self) -> Optional[str]:\n        return self._typed_event_stream_error_message\n\n    def set_requires_typed_event_stream(self, *, error_message: Optional[str]) -> None:\n        self._requires_typed_event_stream = True\n        self._typed_event_stream_error_message = error_message\n\n\n
[docs]def build_op_context(\n resources: Optional[Mapping[str, Any]] = None,\n op_config: Any = None,\n resources_config: Optional[Mapping[str, Any]] = None,\n instance: Optional[DagsterInstance] = None,\n config: Any = None,\n partition_key: Optional[str] = None,\n partition_key_range: Optional[PartitionKeyRange] = None,\n mapping_key: Optional[str] = None,\n _assets_def: Optional[AssetsDefinition] = None,\n) -> UnboundOpExecutionContext:\n """Builds op execution context from provided parameters.\n\n ``build_op_context`` can be used as either a function or context manager. If there is a\n provided resource that is a context manager, then ``build_op_context`` must be used as a\n context manager. This function can be used to provide the context argument when directly\n invoking a op.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be\n either values or resource definitions.\n op_config (Optional[Mapping[str, Any]]): The config to provide to the op.\n resources_config (Optional[Mapping[str, Any]]): The config to provide to the resources.\n instance (Optional[DagsterInstance]): The dagster instance configured for the context.\n Defaults to DagsterInstance.ephemeral().\n mapping_key (Optional[str]): A key representing the mapping key from an upstream dynamic\n output. Can be accessed using ``context.get_mapping_key()``.\n partition_key (Optional[str]): String value representing partition key to execute with.\n partition_key_range (Optional[PartitionKeyRange]): Partition key range to execute with.\n _assets_def (Optional[AssetsDefinition]): Internal argument that populates the op's assets\n definition, not meant to be populated by users.\n\n Examples:\n .. code-block:: python\n\n context = build_op_context()\n op_to_invoke(context)\n\n with build_op_context(resources={"foo": context_manager_resource}) as context:\n op_to_invoke(context)\n """\n if op_config and config:\n raise DagsterInvalidInvocationError(\n "Attempted to invoke ``build_op_context`` with both ``op_config``, and its "\n "legacy version, ``config``. Please provide one or the other."\n )\n\n op_config = op_config if op_config else config\n return UnboundOpExecutionContext(\n resources_dict=check.opt_mapping_param(resources, "resources", key_type=str),\n resources_config=check.opt_mapping_param(\n resources_config, "resources_config", key_type=str\n ),\n op_config=op_config,\n instance=check.opt_inst_param(instance, "instance", DagsterInstance),\n partition_key=check.opt_str_param(partition_key, "partition_key"),\n partition_key_range=check.opt_inst_param(\n partition_key_range, "partition_key_range", PartitionKeyRange\n ),\n mapping_key=check.opt_str_param(mapping_key, "mapping_key"),\n assets_def=check.opt_inst_param(_assets_def, "_assets_def", AssetsDefinition),\n )
\n\n\n
[docs]def build_asset_context(\n resources: Optional[Mapping[str, Any]] = None,\n resources_config: Optional[Mapping[str, Any]] = None,\n asset_config: Optional[Mapping[str, Any]] = None,\n instance: Optional[DagsterInstance] = None,\n partition_key: Optional[str] = None,\n partition_key_range: Optional[PartitionKeyRange] = None,\n):\n """Builds asset execution context from provided parameters.\n\n ``build_asset_context`` can be used as either a function or context manager. If there is a\n provided resource that is a context manager, then ``build_asset_context`` must be used as a\n context manager. This function can be used to provide the context argument when directly\n invoking an asset.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be\n either values or resource definitions.\n resources_config (Optional[Mapping[str, Any]]): The config to provide to the resources.\n asset_config (Optional[Mapping[str, Any]]): The config to provide to the asset.\n instance (Optional[DagsterInstance]): The dagster instance configured for the context.\n Defaults to DagsterInstance.ephemeral().\n partition_key (Optional[str]): String value representing partition key to execute with.\n partition_key_range (Optional[PartitionKeyRange]): Partition key range to execute with.\n\n Examples:\n .. code-block:: python\n\n context = build_asset_context()\n asset_to_invoke(context)\n\n with build_asset_context(resources={"foo": context_manager_resource}) as context:\n asset_to_invoke(context)\n """\n return build_op_context(\n op_config=asset_config,\n resources=resources,\n resources_config=resources_config,\n partition_key=partition_key,\n partition_key_range=partition_key_range,\n instance=instance,\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/invocation", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.invocation"}, "logger": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.logger

\nfrom typing import Any, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.errors import DagsterInvariantViolationError\n\nfrom .output import RUN_ID_PLACEHOLDER\n\n\n
[docs]class InitLoggerContext:\n """The context object available as the argument to the initialization function of a :py:class:`dagster.LoggerDefinition`.\n\n Users should not instantiate this object directly. To construct an\n `InitLoggerContext` for testing purposes, use :py:func:`dagster.\n build_init_logger_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import logger, InitLoggerContext\n\n @logger\n def hello_world(init_context: InitLoggerContext):\n ...\n\n """\n\n def __init__(\n self,\n logger_config: Any,\n logger_def: Optional[LoggerDefinition] = None,\n job_def: Optional[JobDefinition] = None,\n run_id: Optional[str] = None,\n ):\n self._logger_config = logger_config\n self._job_def = check.opt_inst_param(job_def, "job_def", JobDefinition)\n self._logger_def = check.opt_inst_param(logger_def, "logger_def", LoggerDefinition)\n self._run_id = check.opt_str_param(run_id, "run_id")\n\n @public\n @property\n def logger_config(self) -> Any:\n """The configuration data provided by the run config. The\n schema for this data is defined by ``config_schema`` on the :py:class:`LoggerDefinition`.\n """\n return self._logger_config\n\n @property\n def job_def(self) -> Optional[JobDefinition]:\n """The job definition currently being executed."""\n return self._job_def\n\n @public\n @property\n def logger_def(self) -> Optional[LoggerDefinition]:\n """The logger definition for the logger being constructed."""\n return self._logger_def\n\n @public\n @property\n def run_id(self) -> Optional[str]:\n """The ID for this run of the job."""\n return self._run_id
\n\n\nclass UnboundInitLoggerContext(InitLoggerContext):\n """Logger initialization context outputted by ``build_init_logger_context``.\n\n Represents a context whose config has not yet been validated against a logger definition, hence\n the inability to access the `logger_def` attribute. When an instance of\n ``UnboundInitLoggerContext`` is passed to ``LoggerDefinition.initialize``, config is validated,\n and it is subsumed into an `InitLoggerContext`, which contains the logger_def validated against.\n """\n\n def __init__(self, logger_config: Any, job_def: Optional[JobDefinition]):\n super(UnboundInitLoggerContext, self).__init__(\n logger_config, logger_def=None, job_def=job_def, run_id=None\n )\n\n @property\n def logger_def(self) -> LoggerDefinition:\n raise DagsterInvariantViolationError(\n "UnboundInitLoggerContext has not been validated against a logger definition."\n )\n\n @property\n def run_id(self) -> Optional[str]:\n return RUN_ID_PLACEHOLDER\n
", "current_page_name": "_modules/dagster/_core/execution/context/logger", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.logger"}, "output": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.output

\nimport warnings\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    ContextManager,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.asset_layer import AssetOutputInfo\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    CoercibleToAssetKey,\n)\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataValue,\n    RawMetadataValue,\n)\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.errors import DagsterInvalidMetadata, DagsterInvariantViolationError\nfrom dagster._core.execution.plan.utils import build_resources_for_manager\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import JobDefinition, PartitionsDefinition\n    from dagster._core.definitions.op_definition import OpDefinition\n    from dagster._core.definitions.resource_definition import Resources\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import StepExecutionContext\n    from dagster._core.execution.plan.outputs import StepOutputHandle\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.log_manager import DagsterLogManager\n    from dagster._core.system_config.objects import ResolvedRunConfig\n    from dagster._core.types.dagster_type import DagsterType\n\nRUN_ID_PLACEHOLDER = "__EPHEMERAL_RUN_ID"\n\n\n
[docs]class OutputContext:\n """The context object that is available to the `handle_output` method of an :py:class:`IOManager`.\n\n Users should not instantiate this object directly. To construct an\n `OutputContext` for testing an IO Manager's `handle_output` method, use\n :py:func:`dagster.build_output_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import IOManager, OutputContext\n\n class MyIOManager(IOManager):\n def handle_output(self, context: OutputContext, obj):\n ...\n """\n\n _step_key: Optional[str]\n _name: Optional[str]\n _job_name: Optional[str]\n _run_id: Optional[str]\n _metadata: ArbitraryMetadataMapping\n _user_generated_metadata: Mapping[str, MetadataValue]\n _mapping_key: Optional[str]\n _config: object\n _op_def: Optional["OpDefinition"]\n _dagster_type: Optional["DagsterType"]\n _log: Optional["DagsterLogManager"]\n _version: Optional[str]\n _resource_config: Optional[Mapping[str, object]]\n _step_context: Optional["StepExecutionContext"]\n _asset_info: Optional[AssetOutputInfo]\n _warn_on_step_context_use: bool\n _resources: Optional["Resources"]\n _resources_cm: Optional[ContextManager["Resources"]]\n _resources_contain_cm: Optional[bool]\n _cm_scope_entered: Optional[bool]\n _events: List["DagsterEvent"]\n _user_events: List[Union[AssetMaterialization, AssetObservation]]\n\n def __init__(\n self,\n step_key: Optional[str] = None,\n name: Optional[str] = None,\n job_name: Optional[str] = None,\n run_id: Optional[str] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n mapping_key: Optional[str] = None,\n config: object = None,\n dagster_type: Optional["DagsterType"] = None,\n log_manager: Optional["DagsterLogManager"] = None,\n version: Optional[str] = None,\n resource_config: Optional[Mapping[str, object]] = None,\n resources: Optional[Union["Resources", Mapping[str, object]]] = None,\n step_context: Optional["StepExecutionContext"] = None,\n op_def: Optional["OpDefinition"] = None,\n asset_info: Optional[AssetOutputInfo] = None,\n warn_on_step_context_use: bool = False,\n partition_key: Optional[str] = None,\n ):\n from dagster._core.definitions.resource_definition import IContainsGenerator, Resources\n from dagster._core.execution.build_resources import build_resources\n\n self._step_key = step_key\n self._name = name\n self._job_name = job_name\n self._run_id = run_id\n self._metadata = metadata or {}\n self._mapping_key = mapping_key\n self._config = config\n self._op_def = op_def\n self._dagster_type = dagster_type\n self._log = log_manager\n self._version = version\n self._resource_config = resource_config\n self._step_context = step_context\n self._asset_info = asset_info\n self._warn_on_step_context_use = warn_on_step_context_use\n if self._step_context and self._step_context.has_partition_key:\n self._partition_key: Optional[str] = self._step_context.partition_key\n else:\n self._partition_key = partition_key\n\n if isinstance(resources, Resources):\n self._resources_cm = None\n self._resources = resources\n else:\n self._resources_cm = build_resources(\n check.opt_mapping_param(resources, "resources", key_type=str)\n )\n self._resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n self._cm_scope_entered = False\n\n self._events = []\n self._user_events = []\n self._user_generated_metadata = {}\n\n def __enter__(self):\n if self._resources_cm:\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc):\n if self._resources_cm:\n self._resources_cm.__exit__(*exc)\n\n def __del__(self):\n if (\n hasattr(self, "_resources_cm")\n and self._resources_cm\n and self._resources_contain_cm\n and not self._cm_scope_entered\n ):\n self._resources_cm.__exit__(None, None, None)\n\n @public\n @property\n def step_key(self) -> str:\n """The step_key for the compute step that produced the output."""\n if self._step_key is None:\n raise DagsterInvariantViolationError(\n "Attempting to access step_key, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._step_key\n\n @public\n @property\n def name(self) -> str:\n """The name of the output that produced the output."""\n if self._name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access name, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._name\n\n @property\n def job_name(self) -> str:\n if self._job_name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access pipeline_name, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._job_name\n\n @public\n @property\n def run_id(self) -> str:\n """The id of the run that produced the output."""\n if self._run_id is None:\n raise DagsterInvariantViolationError(\n "Attempting to access run_id, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._run_id\n\n @public\n @property\n def metadata(self) -> Optional[ArbitraryMetadataMapping]:\n """A dict of the metadata that is assigned to the OutputDefinition that produced\n the output.\n """\n return self._metadata\n\n @public\n @property\n def mapping_key(self) -> Optional[str]:\n """The key that identifies a unique mapped output. None for regular outputs."""\n return self._mapping_key\n\n @public\n @property\n def config(self) -> Any:\n """The configuration for the output."""\n return self._config\n\n @public\n @property\n def op_def(self) -> "OpDefinition":\n """The definition of the op that produced the output."""\n from dagster._core.definitions import OpDefinition\n\n if self._op_def is None:\n raise DagsterInvariantViolationError(\n "Attempting to access op_def, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return cast(OpDefinition, self._op_def)\n\n @public\n @property\n def dagster_type(self) -> "DagsterType":\n """The type of this output."""\n if self._dagster_type is None:\n raise DagsterInvariantViolationError(\n "Attempting to access dagster_type, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._dagster_type\n\n @public\n @property\n def log(self) -> "DagsterLogManager":\n """The log manager to use for this output."""\n if self._log is None:\n raise DagsterInvariantViolationError(\n "Attempting to access log, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._log\n\n @public\n @property\n def version(self) -> Optional[str]:\n """(Experimental) The version of the output."""\n return self._version\n\n @public\n @property\n def resource_config(self) -> Optional[Mapping[str, object]]:\n """The config associated with the resource that initializes the InputManager."""\n return self._resource_config\n\n @public\n @property\n def resources(self) -> Any:\n """The resources required by the output manager, specified by the `required_resource_keys`\n parameter.\n """\n if self._resources is None:\n raise DagsterInvariantViolationError(\n "Attempting to access resources, "\n "but it was not provided when constructing the OutputContext"\n )\n\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_output_context(...) as context:`"\n )\n return self._resources\n\n @property\n def asset_info(self) -> Optional[AssetOutputInfo]:\n """(Experimental) Asset info corresponding to the output."""\n return self._asset_info\n\n @public\n @property\n def has_asset_key(self) -> bool:\n """Returns True if an asset is being stored, otherwise returns False. A return value of False\n indicates that an output from an op is being stored.\n """\n return self._asset_info is not None\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """The ``AssetKey`` of the asset that is being stored as an output."""\n if self._asset_info is None:\n raise DagsterInvariantViolationError(\n "Attempting to access asset_key, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._asset_info.key\n\n @public\n @property\n def asset_partitions_def(self) -> "PartitionsDefinition":\n """The PartitionsDefinition on the asset corresponding to this output."""\n asset_key = self.asset_key\n result = self.step_context.job_def.asset_layer.partitions_def_for_asset(asset_key)\n if result is None:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {asset_key}, but it is not"\n " partitioned"\n )\n\n return result\n\n @property\n def step_context(self) -> "StepExecutionContext":\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.step_context"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n if self._step_context is None:\n raise DagsterInvariantViolationError(\n "Attempting to access step_context, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._step_context\n\n @public\n @property\n def has_partition_key(self) -> bool:\n """Whether the current run is a partitioned run."""\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.has_partition_key"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self._partition_key is not None\n\n @public\n @property\n def partition_key(self) -> str:\n """The partition key for the current run.\n\n Raises an error if the current run is not a partitioned run.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.partition_key"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n if self._partition_key is None:\n check.failed(\n "Tried to access partition_key on a non-partitioned run.",\n )\n\n return self._partition_key\n\n @public\n @property\n def has_asset_partitions(self) -> bool:\n """Returns True if the asset being stored is partitioned."""\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.has_asset_partitions"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n if self._step_context is not None:\n return self._step_context.has_asset_partitions_for_output(self.name)\n else:\n return False\n\n @public\n @property\n def asset_partition_key(self) -> str:\n """The partition key for output asset.\n\n Raises an error if the output asset has no partitioning, or if the run covers a partition\n range for the output asset.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partition_key"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.step_context.asset_partition_key_for_output(self.name)\n\n @public\n @property\n def asset_partition_key_range(self) -> PartitionKeyRange:\n """The partition key range for output asset.\n\n Raises an error if the output asset has no partitioning.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partition_key_range"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.step_context.asset_partition_key_range_for_output(self.name)\n\n @public\n @property\n def asset_partition_keys(self) -> Sequence[str]:\n """The partition keys for the output asset.\n\n Raises an error if the output asset has no partitioning.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partition_keys"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.asset_partitions_def.get_partition_keys_in_range(\n self.step_context.asset_partition_key_range_for_output(self.name),\n dynamic_partitions_store=self.step_context.instance,\n )\n\n @public\n @property\n def asset_partitions_time_window(self) -> TimeWindow:\n """The time window for the partitions of the output asset.\n\n Raises an error if either of the following are true:\n - The output asset has no partitioning.\n - The output asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partitions_time_window"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.step_context.asset_partitions_time_window_for_output(self.name)\n\n def get_run_scoped_output_identifier(self) -> Sequence[str]:\n """Utility method to get a collection of identifiers that as a whole represent a unique\n step output.\n\n The unique identifier collection consists of\n\n - ``run_id``: the id of the run which generates the output.\n Note: This method also handles the re-execution memoization logic. If the step that\n generates the output is skipped in the re-execution, the ``run_id`` will be the id\n of its parent run.\n - ``step_key``: the key for a compute step.\n - ``name``: the name of the output. (default: 'result').\n\n Returns:\n Sequence[str, ...]: A list of identifiers, i.e. run id, step key, and output name\n """\n warnings.warn(\n "`OutputContext.get_run_scoped_output_identifier` is deprecated. Use "\n "`OutputContext.get_identifier` instead."\n )\n # if run_id is None and this is a re-execution, it means we failed to find its source run id\n check.invariant(\n self.run_id is not None,\n "Unable to find the run scoped output identifier: run_id is None on OutputContext.",\n )\n check.invariant(\n self.step_key is not None,\n "Unable to find the run scoped output identifier: step_key is None on OutputContext.",\n )\n check.invariant(\n self.name is not None,\n "Unable to find the run scoped output identifier: name is None on OutputContext.",\n )\n run_id = cast(str, self.run_id)\n step_key = cast(str, self.step_key)\n name = cast(str, self.name)\n\n if self.mapping_key:\n return [run_id, step_key, name, self.mapping_key]\n\n return [run_id, step_key, name]\n\n
[docs] @public\n def get_identifier(self) -> Sequence[str]:\n """Utility method to get a collection of identifiers that as a whole represent a unique\n step output.\n\n If not using memoization, the unique identifier collection consists of\n\n - ``run_id``: the id of the run which generates the output.\n Note: This method also handles the re-execution memoization logic. If the step that\n generates the output is skipped in the re-execution, the ``run_id`` will be the id\n of its parent run.\n - ``step_key``: the key for a compute step.\n - ``name``: the name of the output. (default: 'result').\n\n If using memoization, the ``version`` corresponding to the step output is used in place of\n the ``run_id``.\n\n Returns:\n Sequence[str, ...]: A list of identifiers, i.e. (run_id or version), step_key, and output_name\n """\n version = self.version\n step_key = self.step_key\n name = self.name\n if version is not None:\n check.invariant(\n self.mapping_key is None,\n f"Mapping key and version both provided for output '{name}' of step"\n f" '{step_key}'. Dynamic mapping is not supported when using versioning.",\n )\n identifier = ["versioned_outputs", version, step_key, name]\n else:\n run_id = self.run_id\n identifier = [run_id, step_key, name]\n if self.mapping_key:\n identifier.append(self.mapping_key)\n\n return identifier
\n\n def get_output_identifier(self) -> Sequence[str]:\n warnings.warn(\n "`OutputContext.get_output_identifier` is deprecated. Use "\n "`OutputContext.get_identifier` instead."\n )\n\n return self.get_identifier()\n\n
[docs] @public\n def get_asset_identifier(self) -> Sequence[str]:\n """The sequence of strings making up the AssetKey for the asset being stored as an output.\n If the asset is partitioned, the identifier contains the partition key as the final element in the\n sequence. For example, for the asset key ``AssetKey(["foo", "bar", "baz"])`` materialized with\n partition key "2023-06-01", ``get_asset_identifier`` will return ``["foo", "bar", "baz", "2023-06-01"]``.\n """\n if self.asset_key is not None:\n if self.has_asset_partitions:\n return [*self.asset_key.path, self.asset_partition_key]\n else:\n return self.asset_key.path\n else:\n check.failed("Can't get asset output identifier for an output with no asset key")
\n\n def get_asset_output_identifier(self) -> Sequence[str]:\n warnings.warn(\n "`OutputContext.get_asset_output_identifier` is deprecated. Use "\n "`OutputContext.get_asset_identifier` instead."\n )\n\n return self.get_asset_identifier()\n\n
[docs] @public\n def log_event(self, event: Union[AssetObservation, AssetMaterialization]) -> None:\n """Log an AssetMaterialization or AssetObservation from within the body of an io manager's `handle_output` method.\n\n Events logged with this method will appear in the event log.\n\n Args:\n event (Union[AssetMaterialization, AssetObservation]): The event to log.\n\n Examples:\n .. code-block:: python\n\n from dagster import IOManager, AssetMaterialization\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n context.log_event(AssetMaterialization("foo"))\n """\n from dagster._core.events import DagsterEvent\n\n if isinstance(event, (AssetMaterialization)):\n if self._step_context:\n self._events.append(DagsterEvent.asset_materialization(self._step_context, event))\n self._user_events.append(event)\n elif isinstance(event, AssetObservation):\n if self._step_context:\n self._events.append(DagsterEvent.asset_observation(self._step_context, event))\n self._user_events.append(event)\n else:\n check.failed(f"Unexpected event {event}")
\n\n def consume_events(self) -> Iterator["DagsterEvent"]:\n """Pops and yields all user-generated events that have been recorded from this context.\n\n If consume_events has not yet been called, this will yield all logged events since the call to `handle_output`. If consume_events has been called, it will yield all events since the last time consume_events was called. Designed for internal use. Users should never need to invoke this method.\n """\n events = self._events\n self._events = []\n yield from events\n\n def get_logged_events(\n self,\n ) -> Sequence[Union[AssetMaterialization, AssetObservation]]:\n """Retrieve the list of user-generated events that were logged via the context.\n\n\n User-generated events that were yielded will not appear in this list.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import IOManager, build_output_context, AssetMaterialization\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n ...\n\n def test_handle_output():\n mgr = MyIOManager()\n context = build_output_context()\n mgr.handle_output(context)\n all_user_events = context.get_logged_events()\n materializations = [event for event in all_user_events if isinstance(event, AssetMaterialization)]\n ...\n """\n return self._user_events\n\n
[docs] @public\n def add_output_metadata(self, metadata: Mapping[str, RawMetadataValue]) -> None:\n """Add a dictionary of metadata to the handled output.\n\n Metadata entries added will show up in the HANDLED_OUTPUT and ASSET_MATERIALIZATION events for the run.\n\n Args:\n metadata (Mapping[str, RawMetadataValue]): A metadata dictionary to log\n\n Examples:\n .. code-block:: python\n\n from dagster import IOManager\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n context.add_output_metadata({"foo": "bar"})\n """\n from dagster._core.definitions.metadata import normalize_metadata\n\n overlapping_labels = set(self._user_generated_metadata.keys()) & metadata.keys()\n if overlapping_labels:\n raise DagsterInvalidMetadata(\n f"Tried to add metadata for key(s) that already have metadata: {overlapping_labels}"\n )\n\n self._user_generated_metadata = {\n **self._user_generated_metadata,\n **normalize_metadata(metadata),\n }
\n\n def get_logged_metadata(\n self,\n ) -> Mapping[str, MetadataValue]:\n """Get the mapping of metadata entries that have been logged for use with this output."""\n return self._user_generated_metadata\n\n def consume_logged_metadata(\n self,\n ) -> Mapping[str, MetadataValue]:\n """Pops and yields all user-generated metadata entries that have been recorded from this context.\n\n If consume_logged_metadata has not yet been called, this will yield all logged events since\n the call to `handle_output`. If consume_logged_metadata has been called, it will yield all\n events since the last time consume_logged_metadata_entries was called. Designed for internal\n use. Users should never need to invoke this method.\n """\n result = self._user_generated_metadata\n self._user_generated_metadata = {}\n return result or {}
\n\n\ndef get_output_context(\n execution_plan: "ExecutionPlan",\n job_def: "JobDefinition",\n resolved_run_config: "ResolvedRunConfig",\n step_output_handle: "StepOutputHandle",\n run_id: Optional[str],\n log_manager: Optional["DagsterLogManager"],\n step_context: Optional["StepExecutionContext"],\n resources: Optional["Resources"],\n version: Optional[str],\n warn_on_step_context_use: bool = False,\n) -> "OutputContext":\n """Args:\n run_id (str): The run ID of the run that produced the output, not necessarily the run that\n the context will be used in.\n """\n step = execution_plan.get_step_by_key(step_output_handle.step_key)\n # get config\n op_config = resolved_run_config.ops[step.node_handle.to_string()]\n outputs_config = op_config.outputs\n\n if outputs_config:\n output_config = outputs_config.get_output_manager_config(step_output_handle.output_name)\n else:\n output_config = None\n\n step_output = execution_plan.get_step_output(step_output_handle)\n output_def = job_def.get_node(step_output.node_handle).output_def_named(step_output.name)\n\n io_manager_key = output_def.io_manager_key\n resource_config = resolved_run_config.resources[io_manager_key].config\n\n node_handle = execution_plan.get_step_by_key(step.key).node_handle\n asset_info = job_def.asset_layer.asset_info_for_output(\n node_handle=node_handle, output_name=step_output.name\n )\n if asset_info is not None:\n metadata = job_def.asset_layer.metadata_for_asset(asset_info.key) or output_def.metadata\n else:\n metadata = output_def.metadata\n\n if step_context:\n check.invariant(\n not resources,\n "Expected either resources or step context to be set, but "\n "received both. If step context is provided, resources for IO manager will be "\n "retrieved off of that.",\n )\n resources = build_resources_for_manager(io_manager_key, step_context)\n\n return OutputContext(\n step_key=step_output_handle.step_key,\n name=step_output_handle.output_name,\n job_name=job_def.name,\n run_id=run_id,\n metadata=metadata,\n mapping_key=step_output_handle.mapping_key,\n config=output_config,\n op_def=job_def.get_node(step.node_handle).definition, # type: ignore # (should be OpDefinition not NodeDefinition)\n dagster_type=output_def.dagster_type,\n log_manager=log_manager,\n version=version,\n step_context=step_context,\n resource_config=resource_config,\n resources=resources,\n asset_info=asset_info,\n warn_on_step_context_use=warn_on_step_context_use,\n )\n\n\ndef step_output_version(\n job_def: "JobDefinition",\n execution_plan: "ExecutionPlan",\n resolved_run_config: "ResolvedRunConfig",\n step_output_handle: "StepOutputHandle",\n) -> Optional[str]:\n from dagster._core.execution.resolve_versions import resolve_step_output_versions\n\n step_output_versions = resolve_step_output_versions(\n job_def, execution_plan, resolved_run_config\n )\n return (\n step_output_versions[step_output_handle]\n if step_output_handle in step_output_versions\n else None\n )\n\n\n
[docs]def build_output_context(\n step_key: Optional[str] = None,\n name: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n run_id: Optional[str] = None,\n mapping_key: Optional[str] = None,\n config: Optional[Any] = None,\n dagster_type: Optional["DagsterType"] = None,\n version: Optional[str] = None,\n resource_config: Optional[Mapping[str, object]] = None,\n resources: Optional[Mapping[str, object]] = None,\n op_def: Optional["OpDefinition"] = None,\n asset_key: Optional[CoercibleToAssetKey] = None,\n partition_key: Optional[str] = None,\n) -> "OutputContext":\n """Builds output context from provided parameters.\n\n ``build_output_context`` can be used as either a function, or a context manager. If resources\n that are also context managers are provided, then ``build_output_context`` must be used as a\n context manager.\n\n Args:\n step_key (Optional[str]): The step_key for the compute step that produced the output.\n name (Optional[str]): The name of the output that produced the output.\n metadata (Optional[Mapping[str, Any]]): A dict of the metadata that is assigned to the\n OutputDefinition that produced the output.\n mapping_key (Optional[str]): The key that identifies a unique mapped output. None for regular outputs.\n config (Optional[Any]): The configuration for the output.\n dagster_type (Optional[DagsterType]): The type of this output.\n version (Optional[str]): (Experimental) The version of the output.\n resource_config (Optional[Mapping[str, Any]]): The resource config to make available from the\n input context. This usually corresponds to the config provided to the resource that\n loads the output manager.\n resources (Optional[Resources]): The resources to make available from the context.\n For a given key, you can provide either an actual instance of an object, or a resource\n definition.\n op_def (Optional[OpDefinition]): The definition of the op that produced the output.\n asset_key: Optional[Union[AssetKey, Sequence[str], str]]: The asset key corresponding to the\n output.\n partition_key: Optional[str]: String value representing partition key to execute with.\n\n Examples:\n .. code-block:: python\n\n build_output_context()\n\n with build_output_context(resources={"foo": context_manager_resource}) as context:\n do_something\n\n """\n from dagster._core.definitions import OpDefinition\n from dagster._core.execution.context_creation_job import initialize_console_manager\n from dagster._core.types.dagster_type import DagsterType\n\n step_key = check.opt_str_param(step_key, "step_key")\n name = check.opt_str_param(name, "name")\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n run_id = check.opt_str_param(run_id, "run_id", default=RUN_ID_PLACEHOLDER)\n mapping_key = check.opt_str_param(mapping_key, "mapping_key")\n dagster_type = check.opt_inst_param(dagster_type, "dagster_type", DagsterType)\n version = check.opt_str_param(version, "version")\n resource_config = check.opt_mapping_param(resource_config, "resource_config", key_type=str)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n op_def = check.opt_inst_param(op_def, "op_def", OpDefinition)\n asset_key = AssetKey.from_coercible(asset_key) if asset_key else None\n partition_key = check.opt_str_param(partition_key, "partition_key")\n\n return OutputContext(\n step_key=step_key,\n name=name,\n job_name=None,\n run_id=run_id,\n metadata=metadata,\n mapping_key=mapping_key,\n config=config,\n dagster_type=dagster_type,\n log_manager=initialize_console_manager(None),\n version=version,\n resource_config=resource_config,\n resources=resources,\n step_context=None,\n op_def=op_def,\n asset_info=AssetOutputInfo(key=asset_key) if asset_key else None,\n partition_key=partition_key,\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/output", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.output"}, "system": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.system

\n"""This module contains the execution context objects that are internal to the system.\nNot every property on these should be exposed to random Jane or Joe dagster user\nso we have a different layer of objects that encode the explicit public API\nin the user_context module.\n"""\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom hashlib import sha256\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.data_version import (\n    DATA_VERSION_TAG,\n    SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD,\n    extract_data_version_from_entry,\n)\nfrom dagster._core.definitions.dependency import OpNode\nfrom dagster._core.definitions.events import AssetKey, AssetLineageInfo\nfrom dagster._core.definitions.hook_definition import HookDefinition\nfrom dagster._core.definitions.job_base import IJob\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition, PartitionsSubset\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.partition_mapping import (\n    PartitionMapping,\n    infer_partition_mapping,\n)\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.resource_definition import ScopedResourcesBuilder\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    TimeWindowPartitionsDefinition,\n    has_one_dimension_time_window_partitioning,\n)\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.plan.handle import ResolvedFromDynamicStepHandle, StepHandle\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._core.execution.plan.step import ExecutionStep\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.io_manager import IOManager\nfrom dagster._core.storage.tags import (\n    ASSET_PARTITION_RANGE_END_TAG,\n    ASSET_PARTITION_RANGE_START_TAG,\n    MULTIDIMENSIONAL_PARTITION_PREFIX,\n    PARTITION_NAME_TAG,\n)\nfrom dagster._core.system_config.objects import ResolvedRunConfig\nfrom dagster._core.types.dagster_type import DagsterType\n\nfrom .input import InputContext\nfrom .output import OutputContext, get_output_context\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.data_version import (\n        DataVersion,\n    )\n    from dagster._core.definitions.dependency import NodeHandle\n    from dagster._core.definitions.resource_definition import Resources\n    from dagster._core.event_api import EventLogRecord\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.execution.plan.state import KnownExecutionState\n    from dagster._core.instance import DagsterInstance\n\n    from .hook import HookContext\n\n\ndef is_iterable(obj: Any) -> bool:\n    try:\n        iter(obj)\n    except:\n        return False\n    return True\n\n\nclass IPlanContext(ABC):\n    """Context interface to represent run information that does not require access to user code.\n\n    The information available via this interface is accessible to the system throughout a run.\n    """\n\n    @property\n    @abstractmethod\n    def plan_data(self) -> "PlanData":\n        raise NotImplementedError()\n\n    @property\n    def job(self) -> IJob:\n        return self.plan_data.job\n\n    @property\n    def dagster_run(self) -> DagsterRun:\n        return self.plan_data.dagster_run\n\n    @property\n    def run_id(self) -> str:\n        return self.dagster_run.run_id\n\n    @property\n    def run_config(self) -> Mapping[str, object]:\n        return self.dagster_run.run_config\n\n    @property\n    def job_name(self) -> str:\n        return self.dagster_run.job_name\n\n    @property\n    def instance(self) -> "DagsterInstance":\n        return self.plan_data.instance\n\n    @property\n    def raise_on_error(self) -> bool:\n        return self.plan_data.raise_on_error\n\n    @property\n    def retry_mode(self) -> RetryMode:\n        return self.plan_data.retry_mode\n\n    @property\n    def execution_plan(self) -> "ExecutionPlan":\n        return self.plan_data.execution_plan\n\n    @property\n    @abstractmethod\n    def output_capture(self) -> Optional[Mapping[StepOutputHandle, Any]]:\n        raise NotImplementedError()\n\n    @property\n    def log(self) -> DagsterLogManager:\n        raise NotImplementedError()\n\n    @property\n    def logging_tags(self) -> Mapping[str, str]:\n        return self.log.logging_metadata.all_tags()\n\n    @property\n    def event_tags(self) -> Mapping[str, str]:\n        return self.log.logging_metadata.event_tags()\n\n    def has_tag(self, key: str) -> bool:\n        check.str_param(key, "key")\n        return key in self.dagster_run.tags\n\n    def get_tag(self, key: str) -> Optional[str]:\n        check.str_param(key, "key")\n        return self.dagster_run.tags.get(key)\n\n    @property\n    def run_tags(self) -> Mapping[str, str]:\n        return self.dagster_run.tags\n\n\nclass PlanData(NamedTuple):\n    """The data about a run that is available during both orchestration and execution.\n\n    This object does not contain any information that requires access to user code, such as the\n    pipeline definition and resources.\n    """\n\n    job: IJob\n    dagster_run: DagsterRun\n    instance: "DagsterInstance"\n    execution_plan: "ExecutionPlan"\n    raise_on_error: bool = False\n    retry_mode: RetryMode = RetryMode.DISABLED\n\n\nclass ExecutionData(NamedTuple):\n    """The data that is available to the system during execution.\n\n    This object contains information that requires access to user code, such as the pipeline\n    definition and resources.\n    """\n\n    scoped_resources_builder: ScopedResourcesBuilder\n    resolved_run_config: ResolvedRunConfig\n    job_def: JobDefinition\n\n\nclass IStepContext(IPlanContext):\n    """Interface to represent data to be available during either step orchestration or execution."""\n\n    @property\n    @abstractmethod\n    def step(self) -> ExecutionStep:\n        raise NotImplementedError()\n\n    @property\n    @abstractmethod\n    def node_handle(self) -> "NodeHandle":\n        raise NotImplementedError()\n\n\nclass PlanOrchestrationContext(IPlanContext):\n    """Context for the orchestration of a run.\n\n    This context assumes inability to run user code directly.\n    """\n\n    def __init__(\n        self,\n        plan_data: PlanData,\n        log_manager: DagsterLogManager,\n        executor: Executor,\n        output_capture: Optional[Dict[StepOutputHandle, Any]],\n        resume_from_failure: bool = False,\n    ):\n        self._plan_data = plan_data\n        self._log_manager = log_manager\n        self._executor = executor\n        self._output_capture = output_capture\n        self._resume_from_failure = resume_from_failure\n\n    @property\n    def plan_data(self) -> PlanData:\n        return self._plan_data\n\n    @property\n    def reconstructable_job(self) -> ReconstructableJob:\n        if not isinstance(self.job, ReconstructableJob):\n            raise DagsterInvariantViolationError(\n                "reconstructable_pipeline property must be a ReconstructableJob"\n            )\n        return self.job\n\n    @property\n    def log(self) -> DagsterLogManager:\n        return self._log_manager\n\n    @property\n    def executor(self) -> Executor:\n        return self._executor\n\n    @property\n    def output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]:\n        return self._output_capture\n\n    def for_step(self, step: ExecutionStep) -> "IStepContext":\n        return StepOrchestrationContext(\n            plan_data=self.plan_data,\n            log_manager=self._log_manager.with_tags(**step.logging_tags),\n            executor=self.executor,\n            step=step,\n            output_capture=self.output_capture,\n        )\n\n    @property\n    def resume_from_failure(self) -> bool:\n        return self._resume_from_failure\n\n\nclass StepOrchestrationContext(PlanOrchestrationContext, IStepContext):\n    """Context for the orchestration of a step.\n\n    This context assumes inability to run user code directly. Thus, it does not include any resource\n    information.\n    """\n\n    def __init__(\n        self,\n        plan_data: PlanData,\n        log_manager: DagsterLogManager,\n        executor: Executor,\n        step: ExecutionStep,\n        output_capture: Optional[Dict[StepOutputHandle, Any]],\n    ):\n        super(StepOrchestrationContext, self).__init__(\n            plan_data, log_manager, executor, output_capture\n        )\n        self._step = step\n\n    @property\n    def step(self) -> ExecutionStep:\n        return self._step\n\n    @property\n    def node_handle(self) -> "NodeHandle":\n        return self.step.node_handle\n\n\nclass PlanExecutionContext(IPlanContext):\n    """Context for the execution of a plan.\n\n    This context assumes that user code can be run directly, and thus includes resource and\n    information.\n    """\n\n    def __init__(\n        self,\n        plan_data: PlanData,\n        execution_data: ExecutionData,\n        log_manager: DagsterLogManager,\n        output_capture: Optional[Dict[StepOutputHandle, Any]] = None,\n    ):\n        self._plan_data = plan_data\n        self._execution_data = execution_data\n        self._log_manager = log_manager\n        self._output_capture = output_capture\n\n    @property\n    def plan_data(self) -> PlanData:\n        return self._plan_data\n\n    @property\n    def output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]:\n        return self._output_capture\n\n    def for_step(\n        self,\n        step: ExecutionStep,\n        known_state: Optional["KnownExecutionState"] = None,\n    ) -> IStepContext:\n        return StepExecutionContext(\n            plan_data=self.plan_data,\n            execution_data=self._execution_data,\n            log_manager=self._log_manager.with_tags(**step.logging_tags),\n            step=step,\n            output_capture=self.output_capture,\n            known_state=known_state,\n        )\n\n    @property\n    def job_def(self) -> JobDefinition:\n        return self._execution_data.job_def\n\n    @property\n    def resolved_run_config(self) -> ResolvedRunConfig:\n        return self._execution_data.resolved_run_config\n\n    @property\n    def scoped_resources_builder(self) -> ScopedResourcesBuilder:\n        return self._execution_data.scoped_resources_builder\n\n    @property\n    def log(self) -> DagsterLogManager:\n        return self._log_manager\n\n    @property\n    def partitions_def(self) -> Optional[PartitionsDefinition]:\n        from dagster._core.definitions.job_definition import JobDefinition\n\n        job_def = self._execution_data.job_def\n        if not isinstance(job_def, JobDefinition):\n            check.failed(\n                "Can only call 'partitions_def', when using jobs, not legacy pipelines",\n            )\n        partitions_def = job_def.partitions_def\n        return partitions_def\n\n    @property\n    def has_partitions(self) -> bool:\n        tags = self._plan_data.dagster_run.tags\n        return bool(\n            PARTITION_NAME_TAG in tags\n            or any([tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX) for tag in tags.keys()])\n            or (\n                tags.get(ASSET_PARTITION_RANGE_START_TAG)\n                and tags.get(ASSET_PARTITION_RANGE_END_TAG)\n            )\n        )\n\n    @property\n    def partition_key(self) -> str:\n        from dagster._core.definitions.multi_dimensional_partitions import (\n            MultiPartitionsDefinition,\n            get_multipartition_key_from_tags,\n        )\n\n        if not self.has_partitions:\n            raise DagsterInvariantViolationError(\n                "Cannot access partition_key for a non-partitioned run"\n            )\n\n        tags = self._plan_data.dagster_run.tags\n        if any([tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX) for tag in tags.keys()]):\n            return get_multipartition_key_from_tags(tags)\n        elif PARTITION_NAME_TAG in tags:\n            return tags[PARTITION_NAME_TAG]\n        else:\n            range_start = tags[ASSET_PARTITION_RANGE_START_TAG]\n            range_end = tags[ASSET_PARTITION_RANGE_END_TAG]\n\n            if range_start != range_end:\n                raise DagsterInvariantViolationError(\n                    "Cannot access partition_key for a partitioned run with a range of partitions."\n                    " Call partition_key_range instead."\n                )\n            else:\n                if isinstance(self.partitions_def, MultiPartitionsDefinition):\n                    return self.partitions_def.get_partition_key_from_str(cast(str, range_start))\n                return cast(str, range_start)\n\n    @property\n    def asset_partition_key_range(self) -> PartitionKeyRange:\n        from dagster._core.definitions.multi_dimensional_partitions import (\n            MultiPartitionsDefinition,\n            get_multipartition_key_from_tags,\n        )\n\n        if not self.has_partitions:\n            raise DagsterInvariantViolationError(\n                "Cannot access partition_key for a non-partitioned run"\n            )\n\n        tags = self._plan_data.dagster_run.tags\n        if any([tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX) for tag in tags.keys()]):\n            multipartition_key = get_multipartition_key_from_tags(tags)\n            return PartitionKeyRange(multipartition_key, multipartition_key)\n        elif PARTITION_NAME_TAG in tags:\n            partition_key = tags[PARTITION_NAME_TAG]\n            return PartitionKeyRange(partition_key, partition_key)\n        else:\n            partition_key_range_start = tags[ASSET_PARTITION_RANGE_START_TAG]\n            if partition_key_range_start is not None:\n                if isinstance(self.partitions_def, MultiPartitionsDefinition):\n                    return PartitionKeyRange(\n                        self.partitions_def.get_partition_key_from_str(partition_key_range_start),\n                        self.partitions_def.get_partition_key_from_str(\n                            tags[ASSET_PARTITION_RANGE_END_TAG]\n                        ),\n                    )\n            return PartitionKeyRange(partition_key_range_start, tags[ASSET_PARTITION_RANGE_END_TAG])\n\n    @property\n    def partition_time_window(self) -> TimeWindow:\n        partitions_def = self.partitions_def\n\n        if partitions_def is None:\n            raise DagsterInvariantViolationError("Partitions definition is not defined")\n\n        if not has_one_dimension_time_window_partitioning(partitions_def=partitions_def):\n            raise DagsterInvariantViolationError(\n                "Expected a TimeWindowPartitionsDefinition or MultiPartitionsDefinition with a"\n                f" single time dimension, but instead found {type(partitions_def)}"\n            )\n\n        if self.has_partition_key:\n            return cast(\n                Union[MultiPartitionsDefinition, TimeWindowPartitionsDefinition], partitions_def\n            ).time_window_for_partition_key(self.partition_key)\n        elif self.has_partition_key_range:\n            partition_key_range = self.asset_partition_key_range\n            partitions_def = cast(\n                Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition], partitions_def\n            )\n            return TimeWindow(\n                partitions_def.time_window_for_partition_key(partition_key_range.start).start,\n                partitions_def.time_window_for_partition_key(partition_key_range.end).end,\n            )\n\n        else:\n            check.failed(\n                "Has a PartitionsDefinition, so should either have a partition key or a partition"\n                " key range"\n            )\n\n    @property\n    def has_partition_key(self) -> bool:\n        return PARTITION_NAME_TAG in self._plan_data.dagster_run.tags\n\n    @property\n    def has_partition_key_range(self) -> bool:\n        return ASSET_PARTITION_RANGE_START_TAG in self._plan_data.dagster_run.tags\n\n    def for_type(self, dagster_type: DagsterType) -> "TypeCheckContext":\n        return TypeCheckContext(\n            self.run_id, self.log, self._execution_data.scoped_resources_builder, dagster_type\n        )\n\n\n@dataclass\nclass InputAssetVersionInfo:\n    # This is the storage id of the last materialization of any partition of an asset. Thus it is\n    # computed the same way for both partitioned and non-partitioned assets.\n    storage_id: int\n\n    # If the input asset is partitioned, this is a hash of the sorted data versions of each dependency\n    # partition. If the input asset is not partitioned, this is the data version of the asset. It\n    # can be none if we are sourcing a materialization from before data versions.\n    data_version: Optional["DataVersion"]\n\n    # This is the run_id on the event that the storage_id references\n    run_id: str\n\n    # This is the timestamp on the event that the storage_id references\n    timestamp: float\n\n\n
[docs]class StepExecutionContext(PlanExecutionContext, IStepContext):\n """Context for the execution of a step. Users should not instantiate this class directly.\n\n This context assumes that user code can be run directly, and thus includes resource and information.\n """\n\n def __init__(\n self,\n plan_data: PlanData,\n execution_data: ExecutionData,\n log_manager: DagsterLogManager,\n step: ExecutionStep,\n output_capture: Optional[Dict[StepOutputHandle, Any]],\n known_state: Optional["KnownExecutionState"],\n ):\n from dagster._core.execution.resources_init import get_required_resource_keys_for_step\n\n super(StepExecutionContext, self).__init__(\n plan_data=plan_data,\n execution_data=execution_data,\n log_manager=log_manager,\n output_capture=output_capture,\n )\n self._step = step\n self._required_resource_keys = get_required_resource_keys_for_step(\n plan_data.job.get_definition(),\n step,\n plan_data.execution_plan,\n )\n self._resources = execution_data.scoped_resources_builder.build(\n self._required_resource_keys\n )\n self._known_state = known_state\n self._input_lineage: List[AssetLineageInfo] = []\n\n resources_iter = cast(Iterable, self._resources)\n\n step_launcher_resources = [\n resource for resource in resources_iter if isinstance(resource, StepLauncher)\n ]\n\n self._step_launcher: Optional[StepLauncher] = None\n if len(step_launcher_resources) > 1:\n raise DagsterInvariantViolationError(\n "Multiple required resources for {described_op} have inherited StepLauncher"\n "There should be at most one step launcher resource per {node_type}.".format(\n described_op=self.describe_op(), node_type=self.op_def.node_type_str\n )\n )\n elif len(step_launcher_resources) == 1:\n self._step_launcher = step_launcher_resources[0]\n\n self._step_exception: Optional[BaseException] = None\n\n self._step_output_capture: Optional[Dict[StepOutputHandle, Any]] = None\n # Enable step output capture if there are any hooks which will receive them.\n # Expect in the future that hooks may control whether or not they get outputs,\n # but for now presence of any will cause output capture.\n if self.job_def.get_all_hooks_for_handle(self.node_handle):\n self._step_output_capture = {}\n\n self._output_metadata: Dict[str, Any] = {}\n self._seen_outputs: Dict[str, Union[str, Set[str]]] = {}\n\n self._input_asset_version_info: Dict[AssetKey, Optional["InputAssetVersionInfo"]] = {}\n self._is_external_input_asset_version_info_loaded = False\n self._data_version_cache: Dict[AssetKey, "DataVersion"] = {}\n\n self._requires_typed_event_stream = False\n self._typed_event_stream_error_message = None\n\n # In this mode no conversion is done on returned values and missing but expected outputs are not\n # allowed.\n @property\n def requires_typed_event_stream(self) -> bool:\n return self._requires_typed_event_stream\n\n @property\n def typed_event_stream_error_message(self) -> Optional[str]:\n return self._typed_event_stream_error_message\n\n # Error message will be appended to the default error message.\n def set_requires_typed_event_stream(self, *, error_message: Optional[str] = None):\n self._requires_typed_event_stream = True\n self._typed_event_stream_error_message = error_message\n\n @property\n def step(self) -> ExecutionStep:\n return self._step\n\n @property\n def node_handle(self) -> "NodeHandle":\n return self.step.node_handle\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return self._required_resource_keys\n\n @property\n def resources(self) -> "Resources":\n return self._resources\n\n @property\n def step_launcher(self) -> Optional[StepLauncher]:\n return self._step_launcher\n\n @property\n def op_def(self) -> OpDefinition:\n return self.op.definition\n\n @property\n def job_def(self) -> "JobDefinition":\n return self._execution_data.job_def\n\n @property\n def op(self) -> OpNode:\n return self.job_def.get_op(self._step.node_handle)\n\n @property\n def op_retry_policy(self) -> Optional[RetryPolicy]:\n return self.job_def.get_retry_policy_for_handle(self.node_handle)\n\n def describe_op(self) -> str:\n return f'op "{self.node_handle}"'\n\n def get_io_manager(self, step_output_handle: StepOutputHandle) -> IOManager:\n step_output = self.execution_plan.get_step_output(step_output_handle)\n io_manager_key = (\n self.job_def.get_node(step_output.node_handle)\n .output_def_named(step_output.name)\n .io_manager_key\n )\n\n output_manager = getattr(self.resources, io_manager_key)\n return check.inst(output_manager, IOManager)\n\n def get_output_context(self, step_output_handle: StepOutputHandle) -> OutputContext:\n return get_output_context(\n self.execution_plan,\n self.job_def,\n self.resolved_run_config,\n step_output_handle,\n self._get_source_run_id(step_output_handle),\n log_manager=self.log,\n step_context=self,\n resources=None,\n version=self.execution_plan.get_version_for_step_output_handle(step_output_handle),\n )\n\n def for_input_manager(\n self,\n name: str,\n config: Any,\n metadata: Any,\n dagster_type: DagsterType,\n source_handle: Optional[StepOutputHandle] = None,\n resource_config: Any = None,\n resources: Optional["Resources"] = None,\n artificial_output_context: Optional["OutputContext"] = None,\n ) -> InputContext:\n if source_handle and artificial_output_context:\n check.failed("Cannot specify both source_handle and artificial_output_context.")\n\n upstream_output: Optional[OutputContext] = None\n\n if source_handle is not None:\n version = self.execution_plan.get_version_for_step_output_handle(source_handle)\n\n # NOTE: this is using downstream step_context for upstream OutputContext. step_context\n # will be set to None for 0.15 release.\n upstream_output = get_output_context(\n self.execution_plan,\n self.job_def,\n self.resolved_run_config,\n source_handle,\n self._get_source_run_id(source_handle),\n log_manager=self.log,\n step_context=self,\n resources=None,\n version=version,\n warn_on_step_context_use=True,\n )\n else:\n upstream_output = artificial_output_context\n\n asset_key = self.job_def.asset_layer.asset_key_for_input(\n node_handle=self.node_handle, input_name=name\n )\n asset_partitions_subset = (\n self.asset_partitions_subset_for_input(name)\n if self.has_asset_partitions_for_input(name)\n else None\n )\n\n asset_partitions_def = (\n self.job_def.asset_layer.partitions_def_for_asset(asset_key) if asset_key else None\n )\n return InputContext(\n job_name=self.job_def.name,\n name=name,\n op_def=self.op_def,\n config=config,\n metadata=metadata,\n upstream_output=upstream_output,\n dagster_type=dagster_type,\n log_manager=self.log,\n step_context=self,\n resource_config=resource_config,\n resources=resources,\n asset_key=asset_key,\n asset_partitions_subset=asset_partitions_subset,\n asset_partitions_def=asset_partitions_def,\n instance=self.instance,\n )\n\n def for_hook(self, hook_def: HookDefinition) -> "HookContext":\n from .hook import HookContext\n\n return HookContext(self, hook_def)\n\n def get_known_state(self) -> "KnownExecutionState":\n if not self._known_state:\n check.failed(\n "Attempted to access KnownExecutionState but it was not provided at context"\n " creation"\n )\n return self._known_state\n\n def can_load(\n self,\n step_output_handle: StepOutputHandle,\n ) -> bool:\n # can load from upstream in the same run\n if step_output_handle in self.get_known_state().ready_outputs:\n return True\n\n if (\n self._should_load_from_previous_runs(step_output_handle)\n # should and can load from a previous run\n and self._get_source_run_id_from_logs(step_output_handle)\n ):\n return True\n\n return False\n\n def observe_output(self, output_name: str, mapping_key: Optional[str] = None) -> None:\n if mapping_key:\n if output_name not in self._seen_outputs:\n self._seen_outputs[output_name] = set()\n cast(Set[str], self._seen_outputs[output_name]).add(mapping_key)\n else:\n self._seen_outputs[output_name] = "seen"\n\n def has_seen_output(self, output_name: str, mapping_key: Optional[str] = None) -> bool:\n if mapping_key:\n return (\n output_name in self._seen_outputs and mapping_key in self._seen_outputs[output_name]\n )\n return output_name in self._seen_outputs\n\n def add_output_metadata(\n self,\n metadata: Mapping[str, Any],\n output_name: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ) -> None:\n if output_name is None and len(self.op_def.output_defs) == 1:\n output_def = self.op_def.output_defs[0]\n output_name = output_def.name\n elif output_name is None:\n raise DagsterInvariantViolationError(\n "Attempted to log metadata without providing output_name, but multiple outputs"\n " exist. Please provide an output_name to the invocation of"\n " `context.add_output_metadata`."\n )\n else:\n output_def = self.op_def.output_def_named(output_name)\n\n if self.has_seen_output(output_name, mapping_key):\n output_desc = (\n f"output '{output_def.name}'"\n if not mapping_key\n else f"output '{output_def.name}' with mapping_key '{mapping_key}'"\n )\n raise DagsterInvariantViolationError(\n f"In {self.op_def.node_type_str} '{self.op.name}', attempted to log output"\n f" metadata for {output_desc} which has already been yielded. Metadata must be"\n " logged before the output is yielded."\n )\n if output_def.is_dynamic and not mapping_key:\n raise DagsterInvariantViolationError(\n f"In {self.op_def.node_type_str} '{self.op.name}', attempted to log metadata"\n f" for dynamic output '{output_def.name}' without providing a mapping key. When"\n " logging metadata for a dynamic output, it is necessary to provide a mapping key."\n )\n\n if mapping_key:\n if output_name not in self._output_metadata:\n self._output_metadata[output_name] = {}\n if mapping_key in self._output_metadata[output_name]:\n self._output_metadata[output_name][mapping_key].update(metadata)\n else:\n self._output_metadata[output_name][mapping_key] = metadata\n else:\n if output_name in self._output_metadata:\n self._output_metadata[output_name].update(metadata)\n else:\n self._output_metadata[output_name] = metadata\n\n def get_output_metadata(\n self, output_name: str, mapping_key: Optional[str] = None\n ) -> Optional[Mapping[str, Any]]:\n metadata = self._output_metadata.get(output_name)\n if mapping_key and metadata:\n return metadata.get(mapping_key)\n return metadata\n\n def _get_source_run_id_from_logs(self, step_output_handle: StepOutputHandle) -> Optional[str]:\n # walk through event logs to find the right run_id based on the run lineage\n\n parent_state = self.get_known_state().parent_state\n while parent_state:\n # if the parent run has yielded an StepOutput event for the given step output,\n # we find the source run id\n if step_output_handle in parent_state.produced_outputs:\n return parent_state.run_id\n\n # else, keep looking backwards\n parent_state = parent_state.get_parent_state()\n\n # When a fixed path is provided via io manager, it's able to run step subset using an execution\n # plan when the ascendant outputs were not previously created by dagster-controlled\n # computations. for example, in backfills, with fixed path io manager, we allow users to\n # "re-execute" runs with steps where the outputs weren't previously stored by dagster.\n\n # Warn about this special case because it will also reach here when all previous runs have\n # skipped yielding this output. From the logs, we have no easy way to differentiate the fixed\n # path case and the skipping case, until we record the skipping info in KnownExecutionState,\n # i.e. resolve https://github.com/dagster-io/dagster/issues/3511\n self.log.warning(\n f"No previously stored outputs found for source {step_output_handle}. "\n "This is either because you are using an IO Manager that does not depend on run ID, "\n "or because all the previous runs have skipped the output in conditional execution."\n )\n return None\n\n def _should_load_from_previous_runs(self, step_output_handle: StepOutputHandle) -> bool:\n # should not load if not a re-execution\n if self.dagster_run.parent_run_id is None:\n return False\n # should not load if re-executing the entire pipeline\n if self.dagster_run.step_keys_to_execute is None:\n return False\n\n # should not load if the entire dynamic step is being executed in the current run\n handle = StepHandle.parse_from_key(step_output_handle.step_key)\n if (\n isinstance(handle, ResolvedFromDynamicStepHandle)\n and handle.unresolved_form.to_key() in self.dagster_run.step_keys_to_execute\n ):\n return False\n\n # should not load if this step is being executed in the current run\n return step_output_handle.step_key not in self.dagster_run.step_keys_to_execute\n\n def _get_source_run_id(self, step_output_handle: StepOutputHandle) -> Optional[str]:\n if self._should_load_from_previous_runs(step_output_handle):\n return self._get_source_run_id_from_logs(step_output_handle)\n else:\n return self.dagster_run.run_id\n\n def capture_step_exception(self, exception: BaseException):\n self._step_exception = check.inst_param(exception, "exception", BaseException)\n\n @property\n def step_exception(self) -> Optional[BaseException]:\n return self._step_exception\n\n @property\n def step_output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]:\n return self._step_output_capture\n\n @property\n def previous_attempt_count(self) -> int:\n return self.get_known_state().get_retry_state().get_attempt_count(self._step.key)\n\n @property\n def op_config(self) -> Any:\n op_config = self.resolved_run_config.ops.get(str(self.node_handle))\n return op_config.config if op_config else None\n\n @property\n def is_op_in_graph(self) -> bool:\n """Whether this step corresponds to an op within a graph (either @graph, or @graph_asset)."""\n return self.step.node_handle.parent is not None\n\n @property\n def is_sda_step(self) -> bool:\n """Whether this step corresponds to a software define asset, inferred by presence of asset info on outputs.\n\n note: ops can materialize assets as well.\n """\n for output in self.step.step_outputs:\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n self.node_handle, output.name\n )\n if asset_info is not None:\n return True\n return False\n\n def set_data_version(self, asset_key: AssetKey, data_version: "DataVersion") -> None:\n self._data_version_cache[asset_key] = data_version\n\n def has_data_version(self, asset_key: AssetKey) -> bool:\n return asset_key in self._data_version_cache\n\n def get_data_version(self, asset_key: AssetKey) -> "DataVersion":\n return self._data_version_cache[asset_key]\n\n @property\n def input_asset_records(self) -> Optional[Mapping[AssetKey, Optional["InputAssetVersionInfo"]]]:\n return self._input_asset_version_info\n\n @property\n def is_external_input_asset_version_info_loaded(self) -> bool:\n return self._is_external_input_asset_version_info_loaded\n\n def get_input_asset_version_info(self, key: AssetKey) -> Optional["InputAssetVersionInfo"]:\n if key not in self._input_asset_version_info:\n self._fetch_input_asset_version_info(key)\n return self._input_asset_version_info[key]\n\n # "external" refers to records for inputs generated outside of this step\n def fetch_external_input_asset_version_info(self) -> None:\n output_keys = self.get_output_asset_keys()\n\n all_dep_keys: List[AssetKey] = []\n for output_key in output_keys:\n if output_key not in self.job_def.asset_layer.asset_deps:\n continue\n dep_keys = self.job_def.asset_layer.upstream_assets_for_asset(output_key)\n for key in dep_keys:\n if key not in all_dep_keys and key not in output_keys:\n all_dep_keys.append(key)\n\n self._input_asset_version_info = {}\n for key in all_dep_keys:\n self._fetch_input_asset_version_info(key)\n self._is_external_input_asset_version_info_loaded = True\n\n def _fetch_input_asset_version_info(self, key: AssetKey) -> None:\n from dagster._core.definitions.data_version import (\n extract_data_version_from_entry,\n )\n\n event = self._get_input_asset_event(key)\n if event is None:\n self._input_asset_version_info[key] = None\n else:\n storage_id = event.storage_id\n # Input name will be none if this is an internal dep\n input_name = self.job_def.asset_layer.input_for_asset_key(self.node_handle, key)\n # Exclude AllPartitionMapping for now to avoid huge queries\n if input_name and self.has_asset_partitions_for_input(input_name):\n subset = self.asset_partitions_subset_for_input(\n input_name, require_valid_partitions=False\n )\n input_keys = list(subset.get_partition_keys())\n\n # This check represents a temporary constraint that prevents huge query results for upstream\n # partition data versions from timing out runs. If a partitioned dependency (a) uses an\n # AllPartitionMapping; and (b) has greater than or equal to\n # SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD dependency partitions, then we\n # process it as a non-partitioned dependency (note that this was the behavior for\n # all partition dependencies prior to 2023-08). This means that stale status\n # results cannot be accurately computed for the dependency, and there is thus\n # corresponding logic in the CachingStaleStatusResolver to account for this. This\n # constraint should be removed when we have thoroughly examined the performance of\n # the data version retrieval query and can guarantee decent performance.\n if len(input_keys) < SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD:\n data_version = self._get_partitions_data_version_from_keys(key, input_keys)\n else:\n data_version = extract_data_version_from_entry(event.event_log_entry)\n else:\n data_version = extract_data_version_from_entry(event.event_log_entry)\n self._input_asset_version_info[key] = InputAssetVersionInfo(\n storage_id, data_version, event.run_id, event.timestamp\n )\n\n def partition_mapping_for_input(self, input_name: str) -> Optional[PartitionMapping]:\n asset_layer = self.job_def.asset_layer\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n if upstream_asset_key:\n upstream_asset_partitions_def = asset_layer.partitions_def_for_asset(upstream_asset_key)\n assets_def = asset_layer.assets_def_for_node(self.node_handle)\n partitions_def = assets_def.partitions_def if assets_def else None\n explicit_partition_mapping = self.job_def.asset_layer.partition_mapping_for_node_input(\n self.node_handle, upstream_asset_key\n )\n return infer_partition_mapping(\n explicit_partition_mapping,\n partitions_def,\n upstream_asset_partitions_def,\n )\n else:\n return None\n\n def _get_input_asset_event(self, key: AssetKey) -> Optional["EventLogRecord"]:\n event = self.instance.get_latest_data_version_record(key)\n if event:\n self._check_input_asset_event(key, event)\n return event\n\n def _check_input_asset_event(self, key: AssetKey, event: "EventLogRecord") -> None:\n assert event.event_log_entry\n event_data_version = extract_data_version_from_entry(event.event_log_entry)\n if key in self._data_version_cache and self._data_version_cache[key] != event_data_version:\n self.log.warning(\n f"Data version mismatch for asset {key}. Data version from materialization within"\n f" current step is `{self._data_version_cache[key]}`. Data version from most recent"\n f" materialization is `{event_data_version}`. Most recent materialization will be"\n " used for provenance tracking."\n )\n\n def _get_partitions_data_version_from_keys(\n self, key: AssetKey, partition_keys: Sequence[str]\n ) -> "DataVersion":\n from dagster._core.definitions.data_version import (\n DataVersion,\n )\n from dagster._core.events import DagsterEventType\n\n # TODO: this needs to account for observations also\n event_type = DagsterEventType.ASSET_MATERIALIZATION\n tags_by_partition = (\n self.instance._event_storage.get_latest_tags_by_partition( # noqa: SLF001\n key, event_type, [DATA_VERSION_TAG], asset_partitions=list(partition_keys)\n )\n )\n partition_data_versions = [\n pair[1][DATA_VERSION_TAG]\n for pair in sorted(tags_by_partition.items(), key=lambda x: x[0])\n ]\n hash_sig = sha256()\n hash_sig.update(bytearray("".join(partition_data_versions), "utf8"))\n return DataVersion(hash_sig.hexdigest())\n\n # Call this to clear the cache for an input asset record. This is necessary when an old\n # materialization for an asset was loaded during `fetch_external_input_asset_records` because an\n # intrastep asset is not required, but then that asset is materialized during the step. If we\n # don't clear the cache for this asset, then we won't use the most up-to-date asset record.\n def wipe_input_asset_version_info(self, key: AssetKey) -> None:\n if key in self._input_asset_version_info:\n del self._input_asset_version_info[key]\n\n def get_output_asset_keys(self) -> AbstractSet[AssetKey]:\n output_keys: Set[AssetKey] = set()\n for step_output in self.step.step_outputs:\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n self.node_handle, step_output.name\n )\n if asset_info is None or not asset_info.is_required:\n continue\n output_keys.add(asset_info.key)\n return output_keys\n\n def has_asset_partitions_for_input(self, input_name: str) -> bool:\n asset_layer = self.job_def.asset_layer\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n\n return (\n upstream_asset_key is not None\n and asset_layer.partitions_def_for_asset(upstream_asset_key) is not None\n )\n\n def asset_partition_key_range_for_input(self, input_name: str) -> PartitionKeyRange:\n subset = self.asset_partitions_subset_for_input(input_name)\n partition_key_ranges = subset.get_partition_key_ranges(\n dynamic_partitions_store=self.instance\n )\n\n if len(partition_key_ranges) != 1:\n check.failed(\n "Tried to access asset partition key range, but there are "\n f"({len(partition_key_ranges)}) key ranges associated with this input.",\n )\n\n return partition_key_ranges[0]\n\n def asset_partitions_subset_for_input(\n self, input_name: str, *, require_valid_partitions: bool = True\n ) -> PartitionsSubset:\n asset_layer = self.job_def.asset_layer\n assets_def = asset_layer.assets_def_for_node(self.node_handle)\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n\n if upstream_asset_key is not None:\n upstream_asset_partitions_def = asset_layer.partitions_def_for_asset(upstream_asset_key)\n\n if upstream_asset_partitions_def is not None:\n partitions_def = assets_def.partitions_def if assets_def else None\n partitions_subset = (\n partitions_def.empty_subset().with_partition_key_range(\n self.asset_partition_key_range, dynamic_partitions_store=self.instance\n )\n if partitions_def\n else None\n )\n partition_mapping = infer_partition_mapping(\n asset_layer.partition_mapping_for_node_input(\n self.node_handle, upstream_asset_key\n ),\n partitions_def,\n upstream_asset_partitions_def,\n )\n mapped_partitions_result = (\n partition_mapping.get_upstream_mapped_partitions_result_for_partitions(\n partitions_subset,\n upstream_asset_partitions_def,\n dynamic_partitions_store=self.instance,\n )\n )\n\n if (\n require_valid_partitions\n and mapped_partitions_result.required_but_nonexistent_partition_keys\n ):\n raise DagsterInvariantViolationError(\n f"Partition key range {self.asset_partition_key_range} in"\n f" {self.node_handle.name} depends on invalid partition keys"\n f" {mapped_partitions_result.required_but_nonexistent_partition_keys} in"\n f" upstream asset {upstream_asset_key}"\n )\n\n return mapped_partitions_result.partitions_subset\n\n check.failed("The input has no asset partitions")\n\n def asset_partition_key_for_input(self, input_name: str) -> str:\n start, end = self.asset_partition_key_range_for_input(input_name)\n if start == end:\n return start\n else:\n check.failed(\n f"Tried to access partition key for input '{input_name}' of step '{self.step.key}',"\n f" but the step input has a partition range: '{start}' to '{end}'."\n )\n\n def _partitions_def_for_output(self, output_name: str) -> Optional[PartitionsDefinition]:\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n node_handle=self.node_handle, output_name=output_name\n )\n if asset_info:\n return asset_info.partitions_def\n else:\n return None\n\n def partitions_def_for_output(self, output_name: str) -> Optional[PartitionsDefinition]:\n return self._partitions_def_for_output(output_name)\n\n def has_asset_partitions_for_output(self, output_name: str) -> bool:\n return self._partitions_def_for_output(output_name) is not None\n\n def asset_partition_key_range_for_output(self, output_name: str) -> PartitionKeyRange:\n if self._partitions_def_for_output(output_name) is not None:\n return self.asset_partition_key_range\n\n check.failed("The output has no asset partitions")\n\n def asset_partition_key_for_output(self, output_name: str) -> str:\n start, end = self.asset_partition_key_range_for_output(output_name)\n if start == end:\n return start\n else:\n check.failed(\n f"Tried to access partition key for output '{output_name}' of step"\n f" '{self.step.key}', but the step output has a partition range: '{start}' to"\n f" '{end}'."\n )\n\n def asset_partitions_time_window_for_output(self, output_name: str) -> TimeWindow:\n """The time window for the partitions of the asset correponding to the given output.\n\n Raises an error if either of the following are true:\n - The output asset has no partitioning.\n - The output asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n """\n partitions_def = self._partitions_def_for_output(output_name)\n\n if not partitions_def:\n raise ValueError(\n "Tried to get asset partitions for an output that does not correspond to a "\n "partitioned asset."\n )\n\n if not has_one_dimension_time_window_partitioning(partitions_def):\n raise ValueError(\n "Tried to get asset partitions for an output that correponds to a partitioned "\n "asset that is not time-partitioned."\n )\n\n partitions_def = cast(\n Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition], partitions_def\n )\n partition_key_range = self.asset_partition_key_range_for_output(output_name)\n return TimeWindow(\n # mypy thinks partitions_def is <nothing> here because ????\n partitions_def.time_window_for_partition_key(partition_key_range.start).start,\n partitions_def.time_window_for_partition_key(partition_key_range.end).end,\n )\n\n def asset_partitions_time_window_for_input(self, input_name: str) -> TimeWindow:\n """The time window for the partitions of the asset correponding to the given input.\n\n Raises an error if either of the following are true:\n - The input asset has no partitioning.\n - The input asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n """\n asset_layer = self.job_def.asset_layer\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n\n if upstream_asset_key is None:\n raise ValueError("The input has no corresponding asset")\n\n upstream_asset_partitions_def = asset_layer.partitions_def_for_asset(upstream_asset_key)\n\n if not upstream_asset_partitions_def:\n raise ValueError(\n "Tried to get asset partitions for an input that does not correspond to a "\n "partitioned asset."\n )\n\n if not has_one_dimension_time_window_partitioning(upstream_asset_partitions_def):\n raise ValueError(\n "Tried to get asset partitions for an input that correponds to a partitioned "\n "asset that is not time-partitioned."\n )\n\n upstream_asset_partitions_def = cast(\n Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition],\n upstream_asset_partitions_def,\n )\n partition_key_range = self.asset_partition_key_range_for_input(input_name)\n\n return TimeWindow(\n upstream_asset_partitions_def.time_window_for_partition_key(\n partition_key_range.start\n ).start,\n upstream_asset_partitions_def.time_window_for_partition_key(\n partition_key_range.end\n ).end,\n )\n\n def get_type_loader_context(self) -> "DagsterTypeLoaderContext":\n return DagsterTypeLoaderContext(\n plan_data=self.plan_data,\n execution_data=self._execution_data,\n log_manager=self._log_manager,\n step=self.step,\n output_capture=self._output_capture,\n known_state=self._known_state,\n )\n\n def output_observes_source_asset(self, output_name: str) -> bool:\n """Returns True if this step observes a source asset."""\n asset_layer = self.job_def.asset_layer\n if asset_layer is None:\n return False\n asset_key = asset_layer.asset_key_for_output(self.node_handle, output_name)\n if asset_key is None:\n return False\n return asset_layer.is_observable_for_asset(asset_key)
\n\n\n
[docs]class TypeCheckContext:\n """The ``context`` object available to a type check function on a DagsterType."""\n\n def __init__(\n self,\n run_id: str,\n log_manager: DagsterLogManager,\n scoped_resources_builder: ScopedResourcesBuilder,\n dagster_type: DagsterType,\n ):\n self._run_id = run_id\n self._log = log_manager\n self._resources = scoped_resources_builder.build(dagster_type.required_resource_keys)\n\n @public\n @property\n def resources(self) -> "Resources":\n """An object whose attributes contain the resources available to this op."""\n return self._resources\n\n @public\n @property\n def run_id(self) -> str:\n """The id of this job run."""\n return self._run_id\n\n @public\n @property\n def log(self) -> DagsterLogManager:\n """Centralized log dispatch from user code."""\n return self._log
\n\n\n
[docs]class DagsterTypeLoaderContext(StepExecutionContext):\n """The context object provided to a :py:class:`@dagster_type_loader <dagster_type_loader>`-decorated function during execution.\n\n Users should not construct this object directly.\n """\n\n @public\n @property\n def resources(self) -> "Resources":\n """The resources available to the type loader, specified by the `required_resource_keys` argument of the decorator."""\n return super(DagsterTypeLoaderContext, self).resources\n\n @public\n @property\n def job_def(self) -> "JobDefinition":\n """The underlying job definition being executed."""\n return super(DagsterTypeLoaderContext, self).job_def\n\n @public\n @property\n def op_def(self) -> "OpDefinition":\n """The op for which type loading is occurring."""\n return super(DagsterTypeLoaderContext, self).op_def
\n
", "current_page_name": "_modules/dagster/_core/execution/context/system", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.system"}}, "execute_in_process_result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.execute_in_process_result

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions import JobDefinition, NodeHandle\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.utils import DEFAULT_OUTPUT\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nfrom .execution_result import ExecutionResult\n\n\n
[docs]class ExecuteInProcessResult(ExecutionResult):\n """Result object returned by in-process testing APIs.\n\n Users should not instantiate this object directly. Used for retrieving run success, events, and outputs from execution methods that return this object.\n\n This object is returned by:\n - :py:meth:`dagster.GraphDefinition.execute_in_process`\n - :py:meth:`dagster.JobDefinition.execute_in_process`\n - :py:meth:`dagster.materialize_to_memory`\n - :py:meth:`dagster.materialize`\n """\n\n _handle: NodeHandle\n _event_list: Sequence[DagsterEvent]\n _dagster_run: DagsterRun\n _output_capture: Mapping[StepOutputHandle, Any]\n _job_def: JobDefinition\n\n def __init__(\n self,\n event_list: Sequence[DagsterEvent],\n dagster_run: DagsterRun,\n output_capture: Optional[Mapping[StepOutputHandle, Any]],\n job_def: JobDefinition,\n ):\n self._job_def = job_def\n\n self._event_list = event_list\n self._dagster_run = dagster_run\n\n self._output_capture = check.opt_mapping_param(\n output_capture, "output_capture", key_type=StepOutputHandle\n )\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """JobDefinition: The job definition that was executed."""\n return self._job_def\n\n @public\n @property\n def dagster_run(self) -> DagsterRun:\n """DagsterRun: The Dagster run that was executed."""\n return self._dagster_run\n\n @public\n @property\n def all_events(self) -> Sequence[DagsterEvent]:\n """List[DagsterEvent]: All dagster events emitted during execution."""\n return self._event_list\n\n @public\n @property\n def run_id(self) -> str:\n """str: The run ID of the executed :py:class:`DagsterRun`."""\n return self.dagster_run.run_id\n\n def _get_output_for_handle(self, handle: NodeHandle, output_name: str) -> Any:\n mapped_outputs = {}\n step_key = str(handle)\n output_found = False\n for step_output_handle, value in self._output_capture.items():\n # For the mapped output case, where step keys are in the format\n # "step_key[upstream_mapped_output_name]" within the step output handle.\n if (\n step_output_handle.step_key.startswith(f"{step_key}[")\n and step_output_handle.output_name == output_name\n ):\n output_found = True\n key_start = step_output_handle.step_key.find("[")\n key_end = step_output_handle.step_key.find("]")\n upstream_mapped_output_name = step_output_handle.step_key[key_start + 1 : key_end]\n mapped_outputs[upstream_mapped_output_name] = value\n\n # For all other cases, search for exact match.\n elif (\n step_key == step_output_handle.step_key\n and step_output_handle.output_name == output_name\n ):\n output_found = True\n if not step_output_handle.mapping_key:\n return self._output_capture[step_output_handle]\n mapped_outputs[step_output_handle.mapping_key] = value\n\n if not output_found:\n raise DagsterInvariantViolationError(\n f"No outputs found for output '{output_name}' from node '{handle}'."\n )\n return mapped_outputs\n\n
[docs] @public\n def output_for_node(self, node_str: str, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output value with a particular name from the in-process run of the job.\n\n Args:\n node_str (str): Name of the op/graph whose output should be retrieved. If the intended\n graph/op is nested within another graph, the syntax is `outer_graph.inner_node`.\n output_name (Optional[str]): Name of the output on the op/graph to retrieve. Defaults to\n `result`, the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(ExecuteInProcessResult, self).output_for_node(\n node_str, output_name=output_name\n )
\n\n
[docs] @public\n def asset_value(self, asset_key: CoercibleToAssetKey) -> Any:\n """Retrieves the value of an asset that was materialized during the execution of the job.\n\n Args:\n asset_key (CoercibleToAssetKey): The key of the asset to retrieve.\n\n Returns:\n Any: The value of the retrieved asset.\n """\n node_output_handle = self._job_def.asset_layer.node_output_handle_for_asset(\n AssetKey.from_coercible(asset_key)\n )\n return self.output_for_node(\n node_str=str(node_output_handle.node_handle), output_name=node_output_handle.output_name\n )
\n\n
[docs] @public\n def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output of top-level job, if an output is returned.\n\n Args:\n output_name (Optional[str]): The name of the output to retrieve. Defaults to `result`,\n the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(ExecuteInProcessResult, self).output_value(output_name=output_name)
\n
", "current_page_name": "_modules/dagster/_core/execution/execute_in_process_result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.execute_in_process_result"}, "job_execution_result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.job_execution_result

\nfrom typing import Any, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions import JobDefinition, NodeHandle\nfrom dagster._core.definitions.utils import DEFAULT_OUTPUT\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.plan.utils import build_resources_for_manager\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nfrom .execution_result import ExecutionResult\n\n\n
[docs]class JobExecutionResult(ExecutionResult):\n """Result object returned by :py:func:`dagster.execute_job`.\n\n Used for retrieving run success, events, and outputs from `execute_job`.\n Users should not directly instantiate this class.\n\n Events and run information can be retrieved off of the object directly. In\n order to access outputs, the `ExecuteJobResult` object needs to be opened\n as a context manager, which will re-initialize the resources from\n execution.\n """\n\n def __init__(self, job_def, reconstruct_context, event_list, dagster_run):\n self._job_def = job_def\n self._reconstruct_context = reconstruct_context\n self._context = None\n self._event_list = event_list\n self._dagster_run = dagster_run\n\n def __enter__(self) -> "JobExecutionResult":\n context = self._reconstruct_context.__enter__()\n self._context = context\n return self\n\n def __exit__(self, *exc):\n exit_result = self._reconstruct_context.__exit__(*exc)\n self._context = None\n return exit_result\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """JobDefinition: The job definition that was executed."""\n return self._job_def\n\n @public\n @property\n def dagster_run(self) -> DagsterRun:\n """DagsterRun: The Dagster run that was executed."""\n return self._dagster_run\n\n @public\n @property\n def all_events(self) -> Sequence[DagsterEvent]:\n """Sequence[DagsterEvent]: List of all events yielded by the job execution."""\n return self._event_list\n\n @public\n @property\n def run_id(self) -> str:\n """str: The id of the Dagster run that was executed."""\n return self.dagster_run.run_id\n\n
[docs] @public\n def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output of top-level job, if an output is returned.\n\n In order to use this method, the `ExecuteJobResult` object must be opened as a context manager. If this method is used without opening the context manager, it will result in a :py:class:`DagsterInvariantViolationError`. If the top-level job has no output, calling this method will also result in a :py:class:`DagsterInvariantViolationError`.\n\n Args:\n output_name (Optional[str]): The name of the output to retrieve. Defaults to `result`,\n the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(JobExecutionResult, self).output_value(output_name=output_name)
\n\n
[docs] @public\n def output_for_node(self, node_str: str, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output value with a particular name from the run of the job.\n\n In order to use this method, the `ExecuteJobResult` object must be opened as a context manager. If this method is used without opening the context manager, it will result in a :py:class:`DagsterInvariantViolationError`.\n\n Args:\n node_str (str): Name of the op/graph whose output should be retrieved. If the intended\n graph/op is nested within another graph, the syntax is `outer_graph.inner_node`.\n output_name (Optional[str]): Name of the output on the op/graph to retrieve. Defaults to\n `result`, the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(JobExecutionResult, self).output_for_node(node_str, output_name=output_name)
\n\n def _get_output_for_handle(self, handle: NodeHandle, output_name: str) -> Any:\n if not self._context:\n raise DagsterInvariantViolationError(\n "In order to access output objects, the result of `execute_job` must be opened as a"\n " context manager: 'with execute_job(...) as result:"\n )\n found = False\n result = None\n for compute_step_event in self.compute_events_for_handle(handle):\n if (\n compute_step_event.is_successful_output\n and compute_step_event.step_output_data.output_name == output_name\n ):\n found = True\n output = compute_step_event.step_output_data\n step = self._context.execution_plan.get_step_by_key(compute_step_event.step_key)\n dagster_type = (\n self.job_def.get_node(handle).output_def_named(output_name).dagster_type\n )\n value = self._get_value(self._context.for_step(step), output, dagster_type)\n check.invariant(\n not (output.mapping_key and step.get_mapping_key()),\n "Not set up to handle mapped outputs downstream of mapped steps",\n )\n mapping_key = output.mapping_key or step.get_mapping_key()\n if mapping_key:\n if result is None:\n result = {mapping_key: value}\n else:\n result[mapping_key] = (\n value # pylint:disable=unsupported-assignment-operation\n )\n else:\n result = value\n\n if found:\n return result\n\n node = self.job_def.get_node(handle)\n raise DagsterInvariantViolationError(\n f"Did not find result {output_name} in {node.describe_node()}"\n )\n\n def _get_value(self, context, step_output_data, dagster_type):\n step_output_handle = step_output_data.step_output_handle\n manager = context.get_io_manager(step_output_handle)\n manager_key = context.execution_plan.get_manager_key(step_output_handle, self.job_def)\n res = manager.load_input(\n context.for_input_manager(\n name=None,\n config=None,\n metadata=None,\n dagster_type=dagster_type,\n source_handle=step_output_handle,\n resource_config=context.resolved_run_config.resources[manager_key].config,\n resources=build_resources_for_manager(manager_key, context),\n )\n )\n return res
\n
", "current_page_name": "_modules/dagster/_core/execution/job_execution_result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.job_execution_result"}, "validate_run_config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.validate_run_config

\nfrom typing import Any, Mapping, Optional, Union\n\nimport dagster._check as check\nfrom dagster._core.definitions import JobDefinition\nfrom dagster._core.definitions.run_config import RunConfig, convert_config_input\nfrom dagster._core.system_config.objects import ResolvedRunConfig\n\n\n
[docs]def validate_run_config(\n job_def: JobDefinition,\n run_config: Optional[Union[Mapping[str, Any], RunConfig]] = None,\n) -> Mapping[str, Any]:\n """Function to validate a provided run config blob against a given job.\n\n If validation is successful, this function will return a dictionary representation of the\n validated config actually used during execution.\n\n Args:\n job_def (JobDefinition): The job definition to validate run\n config against\n run_config (Optional[Dict[str, Any]]): The run config to validate\n\n Returns:\n Dict[str, Any]: A dictionary representation of the validated config.\n """\n check.inst_param(job_def, "job_def", JobDefinition)\n run_config = check.opt_mapping_param(\n convert_config_input(run_config), "run_config", key_type=str\n )\n\n return ResolvedRunConfig.build(job_def, run_config).to_dict()
\n
", "current_page_name": "_modules/dagster/_core/execution/validate_run_config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.validate_run_config"}, "with_resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.with_resources

\nfrom typing import Any, Iterable, List, Mapping, Optional, Sequence, TypeVar, cast\n\nfrom dagster import _check as check\nfrom dagster._core.execution.build_resources import wrap_resources_for_execution\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..._config import Shape\nfrom ..definitions.resource_requirement import ResourceAddable\nfrom ..definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom ..errors import DagsterInvalidConfigError, DagsterInvalidInvocationError\n\nT = TypeVar("T", bound=ResourceAddable)\n\n\n
[docs]def with_resources(\n definitions: Iterable[T],\n resource_defs: Mapping[str, object],\n resource_config_by_key: Optional[Mapping[str, Any]] = None,\n) -> Sequence[T]:\n """Adds dagster resources to copies of resource-requiring dagster definitions.\n\n An error will be thrown if any provided definitions have a conflicting\n resource definition provided for a key provided to resource_defs. Resource\n config can be provided, with keys in the config dictionary corresponding to\n the keys for each resource definition. If any definition has unsatisfied\n resource keys after applying with_resources, an error will be thrown.\n\n Args:\n definitions (Iterable[ResourceAddable]): Dagster definitions to provide resources to.\n resource_defs (Mapping[str, object]):\n Mapping of resource keys to objects to satisfy\n resource requirements of provided dagster definitions.\n resource_config_by_key (Optional[Mapping[str, Any]]):\n Specifies config for provided resources. The key in this dictionary\n corresponds to configuring the same key in the resource_defs\n dictionary.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset, resource, with_resources\n\n @resource(config_schema={"bar": str})\n def foo_resource():\n ...\n\n @asset(required_resource_keys={"foo"})\n def asset1(context):\n foo = context.resources.foo\n ...\n\n @asset(required_resource_keys={"foo"})\n def asset2(context):\n foo = context.resources.foo\n ...\n\n asset1_with_foo, asset2_with_foo = with_resources(\n [the_asset, other_asset],\n resource_config_by_key={\n "foo": {\n "config": {"bar": ...}\n }\n }\n )\n """\n from dagster._config import validate_config\n from dagster._core.definitions.job_definition import (\n default_job_io_manager_with_fs_io_manager_schema,\n )\n\n check.mapping_param(resource_defs, "resource_defs")\n resource_config_by_key = check.opt_mapping_param(\n resource_config_by_key, "resource_config_by_key"\n )\n\n resource_defs = wrap_resources_for_execution(\n merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n resource_defs,\n )\n )\n\n for key, resource_def in resource_defs.items():\n if key in resource_config_by_key:\n resource_config = resource_config_by_key[key]\n if not isinstance(resource_config, dict) or "config" not in resource_config:\n raise DagsterInvalidInvocationError(\n f"Error with config for resource key '{key}': Expected a "\n "dictionary of the form {'config': ...}, but received "\n f"{resource_config}"\n )\n\n outer_config_shape = Shape({"config": resource_def.get_config_field()})\n config_evr = validate_config(outer_config_shape, resource_config)\n if not config_evr.success:\n raise DagsterInvalidConfigError(\n f"Error when applying config for resource with key '{key}' ",\n config_evr.errors,\n resource_config,\n )\n resource_defs[key] = resource_defs[key].configured(resource_config["config"])\n\n transformed_defs: List[T] = []\n for definition in definitions:\n transformed_defs.append(cast(T, definition.with_resources(resource_defs)))\n\n return transformed_defs
\n
", "current_page_name": "_modules/dagster/_core/execution/with_resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.with_resources"}}, "executor": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.executor.base

\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Iterator\n\nfrom dagster._annotations import public\nfrom dagster._core.execution.retries import RetryMode\n\nif TYPE_CHECKING:\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import PlanOrchestrationContext\n    from dagster._core.execution.plan.plan import ExecutionPlan\n\n\n
[docs]class Executor(ABC):\n
[docs] @public\n @abstractmethod\n def execute(\n self, plan_context: "PlanOrchestrationContext", execution_plan: "ExecutionPlan"\n ) -> Iterator["DagsterEvent"]:\n """For the given context and execution plan, orchestrate a series of sub plan executions in a way that satisfies the whole plan being executed.\n\n Args:\n plan_context (PlanOrchestrationContext): The plan's orchestration context.\n execution_plan (ExecutionPlan): The plan to execute.\n\n Returns:\n A stream of dagster events.\n """
\n\n @public\n @property\n @abstractmethod\n def retries(self) -> RetryMode:\n """Whether retries are enabled or disabled for this instance of the executor.\n\n Executors should allow this to be controlled via configuration if possible.\n\n Returns: RetryMode\n """
\n
", "current_page_name": "_modules/dagster/_core/executor/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.executor.base"}, "init": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.executor.init

\nfrom typing import Mapping, NamedTuple\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions import ExecutorDefinition, IJob\nfrom dagster._core.instance import DagsterInstance\n\n\n
[docs]class InitExecutorContext(\n NamedTuple(\n "InitExecutorContext",\n [\n ("job", PublicAttr[IJob]),\n ("executor_def", PublicAttr[ExecutorDefinition]),\n ("executor_config", PublicAttr[Mapping[str, object]]),\n ("instance", PublicAttr[DagsterInstance]),\n ],\n )\n):\n """Executor-specific initialization context.\n\n Attributes:\n job (IJob): The job to be executed.\n executor_def (ExecutorDefinition): The definition of the executor currently being\n constructed.\n executor_config (dict): The parsed config passed to the executor.\n instance (DagsterInstance): The current instance.\n """\n\n def __new__(\n cls,\n job: IJob,\n executor_def: ExecutorDefinition,\n executor_config: Mapping[str, object],\n instance: DagsterInstance,\n ):\n return super(InitExecutorContext, cls).__new__(\n cls,\n job=check.inst_param(job, "job", IJob),\n executor_def=check.inst_param(executor_def, "executor_def", ExecutorDefinition),\n executor_config=check.mapping_param(executor_config, "executor_config", key_type=str),\n instance=check.inst_param(instance, "instance", DagsterInstance),\n )
\n
", "current_page_name": "_modules/dagster/_core/executor/init", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.executor.init"}}, "instance": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.instance

\nimport logging\nimport logging.config\nimport os\nimport sys\nimport time\nimport weakref\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom enum import Enum\nfrom tempfile import TemporaryDirectory\nfrom types import TracebackType\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport yaml\nfrom typing_extensions import Protocol, Self, TypeAlias, TypeVar, runtime_checkable\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationPlanned,\n)\nfrom dagster._core.definitions.data_version import extract_data_provenance_from_entry\nfrom dagster._core.definitions.events import AssetKey, AssetObservation\nfrom dagster._core.errors import (\n    DagsterHomeNotSetError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n    DagsterRunAlreadyExists,\n    DagsterRunConflict,\n)\nfrom dagster._core.log_manager import DagsterLogRecord\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.dagster_run import (\n    IN_PROGRESS_RUN_STATUSES,\n    DagsterRun,\n    DagsterRunStatsSnapshot,\n    DagsterRunStatus,\n    JobBucket,\n    RunPartitionData,\n    RunRecord,\n    RunsFilter,\n    TagBucket,\n)\nfrom dagster._core.storage.tags import (\n    ASSET_PARTITION_RANGE_END_TAG,\n    ASSET_PARTITION_RANGE_START_TAG,\n    PARENT_RUN_ID_TAG,\n    PARTITION_NAME_TAG,\n    RESUME_RETRY_TAG,\n    ROOT_RUN_ID_TAG,\n)\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._seven import get_current_datetime_in_utc\nfrom dagster._utils import PrintFn, traced\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n    experimental_warning,\n)\n\nfrom .config import (\n    DAGSTER_CONFIG_YAML_FILENAME,\n    DEFAULT_LOCAL_CODE_SERVER_STARTUP_TIMEOUT,\n    get_default_tick_retention_settings,\n    get_tick_retention_settings,\n)\nfrom .ref import InstanceRef\n\n# 'airflow_execution_date' and 'is_airflow_ingest_pipeline' are hardcoded tags used in the\n# airflow ingestion logic (see: dagster_pipeline_factory.py). 'airflow_execution_date' stores the\n# 'execution_date' used in Airflow operator execution and 'is_airflow_ingest_pipeline' determines\n# whether 'airflow_execution_date' is needed.\n# https://github.com/dagster-io/dagster/issues/2403\nAIRFLOW_EXECUTION_DATE_STR = "airflow_execution_date"\nIS_AIRFLOW_INGEST_PIPELINE_STR = "is_airflow_ingest_pipeline"\n\n# Our internal guts can handle empty strings for job name and run id\n# However making these named constants for documentation, to encode where we are making the assumption,\n# and to allow us to change this more easily in the future, provided we are disciplined about\n# actually using this constants.\nRUNLESS_RUN_ID = ""\nRUNLESS_JOB_NAME = ""\n\nif TYPE_CHECKING:\n    from dagster._core.debug import DebugRunPayload\n    from dagster._core.definitions.asset_check_spec import AssetCheckKey\n    from dagster._core.definitions.job_definition import (\n        JobDefinition,\n    )\n    from dagster._core.definitions.partition import PartitionsDefinition\n    from dagster._core.definitions.repository_definition.repository_definition import (\n        RepositoryLoadData,\n    )\n    from dagster._core.definitions.run_request import InstigatorType\n    from dagster._core.event_api import EventHandlerFn\n    from dagster._core.events import (\n        AssetMaterialization,\n        DagsterEvent,\n        DagsterEventType,\n        EngineEventData,\n    )\n    from dagster._core.events.log import EventLogEntry\n    from dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.execution.plan.resume_retry import ReexecutionStrategy\n    from dagster._core.execution.stats import RunStepKeyStatsSnapshot\n    from dagster._core.host_representation import (\n        CodeLocation,\n        ExternalJob,\n        ExternalJobOrigin,\n        ExternalSensor,\n        HistoricalJob,\n    )\n    from dagster._core.host_representation.external import ExternalSchedule\n    from dagster._core.launcher import RunLauncher\n    from dagster._core.run_coordinator import RunCoordinator\n    from dagster._core.scheduler import Scheduler, SchedulerDebugInfo\n    from dagster._core.scheduler.instigation import (\n        InstigatorState,\n        InstigatorStatus,\n        InstigatorTick,\n        TickData,\n        TickStatus,\n    )\n    from dagster._core.secrets import SecretsLoader\n    from dagster._core.snap import ExecutionPlanSnapshot, JobSnapshot\n    from dagster._core.storage.asset_check_execution_record import AssetCheckInstanceSupport\n    from dagster._core.storage.compute_log_manager import ComputeLogManager\n    from dagster._core.storage.daemon_cursor import DaemonCursorStorage\n    from dagster._core.storage.event_log import EventLogStorage\n    from dagster._core.storage.event_log.base import (\n        AssetRecord,\n        EventLogConnection,\n        EventLogRecord,\n        EventRecordsFilter,\n    )\n    from dagster._core.storage.partition_status_cache import (\n        AssetPartitionStatus,\n        AssetStatusCacheValue,\n    )\n    from dagster._core.storage.root import LocalArtifactStorage\n    from dagster._core.storage.runs import RunStorage\n    from dagster._core.storage.schedules import ScheduleStorage\n    from dagster._core.storage.sql import AlembicVersion\n    from dagster._core.workspace.workspace import IWorkspace\n    from dagster._daemon.types import DaemonHeartbeat, DaemonStatus\n\n\nDagsterInstanceOverrides: TypeAlias = Mapping[str, Any]\n\n\ndef _check_run_equality(\n    pipeline_run: DagsterRun, candidate_run: DagsterRun\n) -> Mapping[str, Tuple[Any, Any]]:\n    field_diff: Dict[str, Tuple[Any, Any]] = {}\n    for field in pipeline_run._fields:\n        expected_value = getattr(pipeline_run, field)\n        candidate_value = getattr(candidate_run, field)\n        if expected_value != candidate_value:\n            field_diff[field] = (expected_value, candidate_value)\n\n    return field_diff\n\n\ndef _format_field_diff(field_diff: Mapping[str, Tuple[Any, Any]]) -> str:\n    return "\\n".join(\n        [\n            (\n                "    {field_name}:\\n"\n                + "        Expected: {expected_value}\\n"\n                + "        Received: {candidate_value}"\n            ).format(\n                field_name=field_name,\n                expected_value=expected_value,\n                candidate_value=candidate_value,\n            )\n            for field_name, (\n                expected_value,\n                candidate_value,\n            ) in field_diff.items()\n        ]\n    )\n\n\nclass _EventListenerLogHandler(logging.Handler):\n    def __init__(self, instance: "DagsterInstance"):\n        self._instance = instance\n        super(_EventListenerLogHandler, self).__init__()\n\n    def emit(self, record: DagsterLogRecord) -> None:\n        from dagster._core.events import EngineEventData\n        from dagster._core.events.log import StructuredLoggerMessage, construct_event_record\n\n        event = construct_event_record(\n            StructuredLoggerMessage(\n                name=record.name,\n                message=record.msg,\n                level=record.levelno,\n                meta=record.dagster_meta,  # type: ignore\n                record=record,\n            )\n        )\n\n        try:\n            self._instance.handle_new_event(event)\n        except Exception as e:\n            sys.stderr.write(f"Exception while writing logger call to event log: {e}\\n")\n            if event.dagster_event:\n                # Swallow user-generated log failures so that the entire step/run doesn't fail, but\n                # raise failures writing system-generated log events since they are the source of\n                # truth for the state of the run\n                raise\n            elif event.run_id:\n                self._instance.report_engine_event(\n                    "Exception while writing logger call to event log",\n                    job_name=event.job_name,\n                    run_id=event.run_id,\n                    step_key=event.step_key,\n                    engine_event_data=EngineEventData(\n                        error=serializable_error_info_from_exc_info(sys.exc_info()),\n                    ),\n                )\n\n\nclass InstanceType(Enum):\n    PERSISTENT = "PERSISTENT"\n    EPHEMERAL = "EPHEMERAL"\n\n\nT_DagsterInstance = TypeVar("T_DagsterInstance", bound="DagsterInstance", default="DagsterInstance")\n\n\nclass MayHaveInstanceWeakref(Generic[T_DagsterInstance]):\n    """Mixin for classes that can have a weakref back to a Dagster instance."""\n\n    _instance_weakref: "Optional[weakref.ReferenceType[T_DagsterInstance]]"\n\n    def __init__(self):\n        self._instance_weakref = None\n\n    @property\n    def has_instance(self) -> bool:\n        return hasattr(self, "_instance_weakref") and (self._instance_weakref is not None)\n\n    @property\n    def _instance(self) -> T_DagsterInstance:\n        instance = (\n            self._instance_weakref()\n            # Backcompat with custom subclasses that don't call super().__init__()\n            # in their own __init__ implementations\n            if (hasattr(self, "_instance_weakref") and self._instance_weakref is not None)\n            else None\n        )\n        if instance is None:\n            raise DagsterInvariantViolationError(\n                "Attempted to resolve undefined DagsterInstance weakref."\n            )\n        else:\n            return instance\n\n    def register_instance(self, instance: T_DagsterInstance) -> None:\n        check.invariant(\n            # Backcompat with custom subclasses that don't call super().__init__()\n            # in their own __init__ implementations\n            (not hasattr(self, "_instance_weakref") or self._instance_weakref is None),\n            "Must only call initialize once",\n        )\n\n        # Store a weakref to avoid a circular reference / enable GC\n        self._instance_weakref = weakref.ref(instance)\n\n\n@runtime_checkable\nclass DynamicPartitionsStore(Protocol):\n    @abstractmethod\n    def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]: ...\n\n    @abstractmethod\n    def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool: ...\n\n\n
[docs]class DagsterInstance(DynamicPartitionsStore):\n """Core abstraction for managing Dagster's access to storage and other resources.\n\n Use DagsterInstance.get() to grab the current DagsterInstance which will load based on\n the values in the ``dagster.yaml`` file in ``$DAGSTER_HOME``.\n\n Alternatively, DagsterInstance.ephemeral() can use used which provides a set of\n transient in-memory components.\n\n Configuration of this class should be done by setting values in ``$DAGSTER_HOME/dagster.yaml``.\n For example, to use Postgres for dagster storage, you can write a ``dagster.yaml`` such as the\n following:\n\n .. literalinclude:: ../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :language: YAML\n\n Args:\n instance_type (InstanceType): Indicates whether the instance is ephemeral or persistent.\n Users should not attempt to set this value directly or in their ``dagster.yaml`` files.\n local_artifact_storage (LocalArtifactStorage): The local artifact storage is used to\n configure storage for any artifacts that require a local disk, such as schedules, or\n when using the filesystem system storage to manage files and intermediates. By default,\n this will be a :py:class:`dagster._core.storage.root.LocalArtifactStorage`. Configurable\n in ``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass`\n machinery.\n run_storage (RunStorage): The run storage is used to store metadata about ongoing and past\n pipeline runs. By default, this will be a\n :py:class:`dagster._core.storage.runs.SqliteRunStorage`. Configurable in ``dagster.yaml``\n using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.\n event_storage (EventLogStorage): Used to store the structured event logs generated by\n pipeline runs. By default, this will be a\n :py:class:`dagster._core.storage.event_log.SqliteEventLogStorage`. Configurable in\n ``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.\n compute_log_manager (Optional[ComputeLogManager]): The compute log manager handles stdout\n and stderr logging for op compute functions. By default, this will be a\n :py:class:`dagster._core.storage.local_compute_log_manager.LocalComputeLogManager`.\n Configurable in ``dagster.yaml`` using the\n :py:class:`~dagster.serdes.ConfigurableClass` machinery.\n run_coordinator (Optional[RunCoordinator]): A runs coordinator may be used to manage the execution\n of pipeline runs.\n run_launcher (Optional[RunLauncher]): Optionally, a run launcher may be used to enable\n a Dagster instance to launch pipeline runs, e.g. on a remote Kubernetes cluster, in\n addition to running them locally.\n settings (Optional[Dict]): Specifies certain per-instance settings,\n such as feature flags. These are set in the ``dagster.yaml`` under a set of whitelisted\n keys.\n ref (Optional[InstanceRef]): Used by internal machinery to pass instances across process\n boundaries.\n """\n\n # Stores TemporaryDirectory instances that were created for DagsterInstance.local_temp() calls\n # to be removed once the instance is garbage collected.\n _TEMP_DIRS: "weakref.WeakKeyDictionary[DagsterInstance, TemporaryDirectory]" = (\n weakref.WeakKeyDictionary()\n )\n\n def __init__(\n self,\n instance_type: InstanceType,\n local_artifact_storage: "LocalArtifactStorage",\n run_storage: "RunStorage",\n event_storage: "EventLogStorage",\n run_coordinator: Optional["RunCoordinator"],\n compute_log_manager: Optional["ComputeLogManager"],\n run_launcher: Optional["RunLauncher"],\n scheduler: Optional["Scheduler"] = None,\n schedule_storage: Optional["ScheduleStorage"] = None,\n settings: Optional[Mapping[str, Any]] = None,\n secrets_loader: Optional["SecretsLoader"] = None,\n ref: Optional[InstanceRef] = None,\n **_kwargs: Any, # we accept kwargs for forward-compat of custom instances\n ):\n from dagster._core.launcher import RunLauncher\n from dagster._core.run_coordinator import RunCoordinator\n from dagster._core.scheduler import Scheduler\n from dagster._core.secrets import SecretsLoader\n from dagster._core.storage.captured_log_manager import CapturedLogManager\n from dagster._core.storage.compute_log_manager import ComputeLogManager\n from dagster._core.storage.event_log import EventLogStorage\n from dagster._core.storage.root import LocalArtifactStorage\n from dagster._core.storage.runs import RunStorage\n from dagster._core.storage.schedules import ScheduleStorage\n\n self._instance_type = check.inst_param(instance_type, "instance_type", InstanceType)\n self._local_artifact_storage = check.inst_param(\n local_artifact_storage, "local_artifact_storage", LocalArtifactStorage\n )\n self._event_storage = check.inst_param(event_storage, "event_storage", EventLogStorage)\n self._event_storage.register_instance(self)\n\n self._run_storage = check.inst_param(run_storage, "run_storage", RunStorage)\n self._run_storage.register_instance(self)\n\n if compute_log_manager:\n self._compute_log_manager = check.inst_param(\n compute_log_manager, "compute_log_manager", ComputeLogManager\n )\n if not isinstance(self._compute_log_manager, CapturedLogManager):\n deprecation_warning(\n "ComputeLogManager",\n "1.2.0",\n "Implement the CapturedLogManager interface instead.",\n )\n self._compute_log_manager.register_instance(self)\n else:\n check.invariant(\n ref, "Compute log manager must be provided if instance is not from a ref"\n )\n self._compute_log_manager = None\n\n self._scheduler = check.opt_inst_param(scheduler, "scheduler", Scheduler)\n\n self._schedule_storage = check.opt_inst_param(\n schedule_storage, "schedule_storage", ScheduleStorage\n )\n if self._schedule_storage:\n self._schedule_storage.register_instance(self)\n\n if run_coordinator:\n self._run_coordinator = check.inst_param(\n run_coordinator, "run_coordinator", RunCoordinator\n )\n self._run_coordinator.register_instance(self)\n else:\n check.invariant(ref, "Run coordinator must be provided if instance is not from a ref")\n self._run_coordinator = None\n\n if run_launcher:\n self._run_launcher: Optional[RunLauncher] = check.inst_param(\n run_launcher, "run_launcher", RunLauncher\n )\n run_launcher.register_instance(self)\n else:\n check.invariant(ref, "Run launcher must be provided if instance is not from a ref")\n self._run_launcher = None\n\n self._settings = check.opt_mapping_param(settings, "settings")\n\n self._secrets_loader = check.opt_inst_param(secrets_loader, "secrets_loader", SecretsLoader)\n\n if self._secrets_loader:\n self._secrets_loader.register_instance(self)\n\n self._ref = check.opt_inst_param(ref, "ref", InstanceRef)\n\n self._subscribers: Dict[str, List[Callable]] = defaultdict(list)\n\n run_monitoring_enabled = self.run_monitoring_settings.get("enabled", False)\n self._run_monitoring_enabled = run_monitoring_enabled\n if self.run_monitoring_enabled and self.run_monitoring_max_resume_run_attempts:\n check.invariant(\n self.run_launcher.supports_resume_run,\n "The configured run launcher does not support resuming runs. Set"\n " max_resume_run_attempts to 0 to use run monitoring. Any runs with a failed"\n " run worker will be marked as failed, but will not be resumed.",\n )\n\n if self.run_retries_enabled:\n check.invariant(\n self.event_log_storage.supports_event_consumer_queries(),\n "Run retries are enabled, but the configured event log storage does not support"\n " them. Consider switching to Postgres or Mysql.",\n )\n\n # ctors\n\n
[docs] @public\n @staticmethod\n def ephemeral(\n tempdir: Optional[str] = None,\n preload: Optional[Sequence["DebugRunPayload"]] = None,\n settings: Optional[Dict] = None,\n ) -> "DagsterInstance":\n """Create a `DagsterInstance` suitable for ephemeral execution, useful in test contexts. An\n ephemeral instance uses mostly in-memory components. Use `local_temp` to create a test\n instance that is fully persistent.\n\n Args:\n tempdir (Optional[str]): The path of a directory to be used for local artifact storage.\n preload (Optional[Sequence[DebugRunPayload]]): A sequence of payloads to load into the\n instance's run storage. Useful for debugging.\n settings (Optional[Dict]): Settings for the instance.\n\n Returns:\n DagsterInstance: An ephemeral DagsterInstance.\n """\n from dagster._core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher\n from dagster._core.run_coordinator import DefaultRunCoordinator\n from dagster._core.storage.event_log import InMemoryEventLogStorage\n from dagster._core.storage.noop_compute_log_manager import NoOpComputeLogManager\n from dagster._core.storage.root import LocalArtifactStorage, TemporaryLocalArtifactStorage\n from dagster._core.storage.runs import InMemoryRunStorage\n\n if tempdir is not None:\n local_storage = LocalArtifactStorage(tempdir)\n else:\n local_storage = TemporaryLocalArtifactStorage()\n\n return DagsterInstance(\n instance_type=InstanceType.EPHEMERAL,\n local_artifact_storage=local_storage,\n run_storage=InMemoryRunStorage(preload=preload),\n event_storage=InMemoryEventLogStorage(preload=preload),\n compute_log_manager=NoOpComputeLogManager(),\n run_coordinator=DefaultRunCoordinator(),\n run_launcher=SyncInMemoryRunLauncher(),\n settings=settings,\n )
\n\n
[docs] @public\n @staticmethod\n def get() -> "DagsterInstance":\n """Get the current `DagsterInstance` as specified by the ``DAGSTER_HOME`` environment variable.\n\n Returns:\n DagsterInstance: The current DagsterInstance.\n """\n dagster_home_path = os.getenv("DAGSTER_HOME")\n\n if not dagster_home_path:\n raise DagsterHomeNotSetError(\n "The environment variable $DAGSTER_HOME is not set. \\nDagster requires this"\n " environment variable to be set to an existing directory in your filesystem. This"\n " directory is used to store metadata across sessions, or load the dagster.yaml"\n " file which can configure storing metadata in an external database.\\nYou can"\n " resolve this error by exporting the environment variable. For example, you can"\n " run the following command in your shell or include it in your shell configuration"\n ' file:\\n\\texport DAGSTER_HOME=~"/dagster_home"\\nor PowerShell\\n$env:DAGSTER_HOME'\n " = ($home + '\\\\dagster_home')or batchset"\n " DAGSTER_HOME=%UserProfile%/dagster_homeAlternatively, DagsterInstance.ephemeral()"\n " can be used for a transient instance.\\n"\n )\n\n dagster_home_path = os.path.expanduser(dagster_home_path)\n\n if not os.path.isabs(dagster_home_path):\n raise DagsterInvariantViolationError(\n (\n '$DAGSTER_HOME "{}" must be an absolute path. Dagster requires this '\n "environment variable to be set to an existing directory in your filesystem."\n ).format(dagster_home_path)\n )\n\n if not (os.path.exists(dagster_home_path) and os.path.isdir(dagster_home_path)):\n raise DagsterInvariantViolationError(\n (\n '$DAGSTER_HOME "{}" is not a directory or does not exist. Dagster requires this'\n " environment variable to be set to an existing directory in your filesystem"\n ).format(dagster_home_path)\n )\n\n return DagsterInstance.from_config(dagster_home_path)
\n\n
[docs] @public\n @staticmethod\n def local_temp(\n tempdir: Optional[str] = None,\n overrides: Optional[DagsterInstanceOverrides] = None,\n ) -> "DagsterInstance":\n """Create a DagsterInstance that uses a temporary directory for local storage. This is a\n regular, fully persistent instance. Use `ephemeral` to get an ephemeral instance with\n in-memory components.\n\n Args:\n tempdir (Optional[str]): The path of a directory to be used for local artifact storage.\n overrides (Optional[DagsterInstanceOverrides]): Override settings for the instance.\n\n Returns:\n DagsterInstance\n """\n if tempdir is None:\n created_dir = TemporaryDirectory()\n i = DagsterInstance.from_ref(\n InstanceRef.from_dir(created_dir.name, overrides=overrides)\n )\n DagsterInstance._TEMP_DIRS[i] = created_dir\n return i\n\n return DagsterInstance.from_ref(InstanceRef.from_dir(tempdir, overrides=overrides))
\n\n @staticmethod\n def from_config(\n config_dir: str,\n config_filename: str = DAGSTER_CONFIG_YAML_FILENAME,\n ) -> "DagsterInstance":\n instance_ref = InstanceRef.from_dir(config_dir, config_filename=config_filename)\n return DagsterInstance.from_ref(instance_ref)\n\n @staticmethod\n def from_ref(instance_ref: InstanceRef) -> "DagsterInstance":\n check.inst_param(instance_ref, "instance_ref", InstanceRef)\n\n # DagsterInstance doesn't implement ConfigurableClass, but we may still sometimes want to\n # have custom subclasses of DagsterInstance. This machinery allows for those custom\n # subclasses to receive additional keyword arguments passed through the config YAML.\n klass = instance_ref.custom_instance_class or DagsterInstance\n kwargs = instance_ref.custom_instance_class_config\n\n unified_storage = instance_ref.storage\n run_storage = unified_storage.run_storage if unified_storage else instance_ref.run_storage\n event_storage = (\n unified_storage.event_log_storage if unified_storage else instance_ref.event_storage\n )\n schedule_storage = (\n unified_storage.schedule_storage if unified_storage else instance_ref.schedule_storage\n )\n\n return klass(\n instance_type=InstanceType.PERSISTENT,\n local_artifact_storage=instance_ref.local_artifact_storage,\n run_storage=run_storage, # type: ignore # (possible none)\n event_storage=event_storage, # type: ignore # (possible none)\n schedule_storage=schedule_storage,\n compute_log_manager=None, # lazy load\n scheduler=instance_ref.scheduler,\n run_coordinator=None, # lazy load\n run_launcher=None, # lazy load\n settings=instance_ref.settings,\n secrets_loader=instance_ref.secrets_loader,\n ref=instance_ref,\n **kwargs,\n )\n\n # flags\n\n @property\n def is_persistent(self) -> bool:\n return self._instance_type == InstanceType.PERSISTENT\n\n @property\n def is_ephemeral(self) -> bool:\n return self._instance_type == InstanceType.EPHEMERAL\n\n def get_ref(self) -> InstanceRef:\n if self._ref:\n return self._ref\n\n check.failed(\n "Attempted to prepare an ineligible DagsterInstance ({inst_type}) for cross "\n "process communication.{dagster_home_msg}".format(\n inst_type=self._instance_type,\n dagster_home_msg=(\n "\\nDAGSTER_HOME environment variable is not set, set it to "\n "a directory on the filesystem for dagster to use for storage and cross "\n "process coordination."\n if os.getenv("DAGSTER_HOME") is None\n else ""\n ),\n )\n )\n\n @property\n def root_directory(self) -> str:\n return self._local_artifact_storage.base_dir\n\n def _info(self, component: object) -> Union[str, Mapping[Any, Any]]:\n # ConfigurableClass may not have inst_data if it's a direct instantiation\n # which happens for ephemeral instances\n if isinstance(component, ConfigurableClass) and component.inst_data:\n return component.inst_data.info_dict()\n if type(component) is dict:\n return component\n return component.__class__.__name__\n\n def _info_str_for_component(self, component_name: str, component: object) -> str:\n return yaml.dump(\n {component_name: self._info(component)}, default_flow_style=False, sort_keys=False\n )\n\n def info_dict(self) -> Mapping[str, object]:\n settings: Mapping[str, object] = self._settings if self._settings else {}\n\n ret = {\n "local_artifact_storage": self._info(self._local_artifact_storage),\n "run_storage": self._info(self._run_storage),\n "event_log_storage": self._info(self._event_storage),\n "compute_logs": self._info(self._compute_log_manager),\n "schedule_storage": self._info(self._schedule_storage),\n "scheduler": self._info(self._scheduler),\n "run_coordinator": self._info(self._run_coordinator),\n "run_launcher": self._info(self.run_launcher),\n }\n ret.update(\n {\n settings_key: self._info(settings_value)\n for settings_key, settings_value in settings.items()\n }\n )\n\n return ret\n\n def info_str(self) -> str:\n return yaml.dump(self.info_dict(), default_flow_style=False, sort_keys=False)\n\n def schema_str(self) -> str:\n def _schema_dict(alembic_version: "AlembicVersion") -> Optional[Mapping[str, object]]:\n if not alembic_version:\n return None\n db_revision, head_revision = alembic_version\n return {\n "current": db_revision,\n "latest": head_revision,\n }\n\n return yaml.dump(\n {\n "schema": {\n "event_log_storage": _schema_dict(self._event_storage.alembic_version()), # type: ignore # (possible none)\n "run_storage": _schema_dict(self._event_storage.alembic_version()), # type: ignore # (possible none)\n "schedule_storage": _schema_dict(self._event_storage.alembic_version()), # type: ignore # (possible none)\n }\n },\n default_flow_style=False,\n sort_keys=False,\n )\n\n @property\n def run_storage(self) -> "RunStorage":\n return self._run_storage\n\n @property\n def event_log_storage(self) -> "EventLogStorage":\n return self._event_storage\n\n @property\n def daemon_cursor_storage(self) -> "DaemonCursorStorage":\n return self._run_storage\n\n # schedule storage\n\n @property\n def schedule_storage(self) -> Optional["ScheduleStorage"]:\n return self._schedule_storage\n\n @property\n def scheduler(self) -> Optional["Scheduler"]:\n return self._scheduler\n\n @property\n def scheduler_class(self) -> Optional[str]:\n return self.scheduler.__class__.__name__ if self.scheduler else None\n\n # run coordinator\n\n @property\n def run_coordinator(self) -> "RunCoordinator":\n # Lazily load in case the run coordinator requires dependencies that are not available\n # everywhere that loads the instance\n if not self._run_coordinator:\n check.invariant(\n self._ref, "Run coordinator not provided, and no instance ref available"\n )\n run_coordinator = cast(InstanceRef, self._ref).run_coordinator\n check.invariant(run_coordinator, "Run coordinator not configured in instance ref")\n self._run_coordinator = cast("RunCoordinator", run_coordinator)\n self._run_coordinator.register_instance(self)\n return self._run_coordinator\n\n # run launcher\n\n @property\n def run_launcher(self) -> "RunLauncher":\n # Lazily load in case the launcher requires dependencies that are not available everywhere\n # that loads the instance (e.g. The EcsRunLauncher requires boto3)\n if not self._run_launcher:\n check.invariant(self._ref, "Run launcher not provided, and no instance ref available")\n launcher = cast(InstanceRef, self._ref).run_launcher\n check.invariant(launcher, "Run launcher not configured in instance ref")\n self._run_launcher = cast("RunLauncher", launcher)\n self._run_launcher.register_instance(self)\n return self._run_launcher\n\n # compute logs\n\n @property\n def compute_log_manager(self) -> "ComputeLogManager":\n if not self._compute_log_manager:\n check.invariant(\n self._ref, "Compute log manager not provided, and no instance ref available"\n )\n compute_log_manager = cast(InstanceRef, self._ref).compute_log_manager\n check.invariant(\n compute_log_manager, "Compute log manager not configured in instance ref"\n )\n self._compute_log_manager = cast("ComputeLogManager", compute_log_manager)\n self._compute_log_manager.register_instance(self)\n return self._compute_log_manager\n\n def get_settings(self, settings_key: str) -> Any:\n check.str_param(settings_key, "settings_key")\n if self._settings and settings_key in self._settings:\n return self._settings.get(settings_key)\n return {}\n\n @property\n def telemetry_enabled(self) -> bool:\n if self.is_ephemeral:\n return False\n\n dagster_telemetry_enabled_default = True\n\n telemetry_settings = self.get_settings("telemetry")\n\n if not telemetry_settings:\n return dagster_telemetry_enabled_default\n\n if "enabled" in telemetry_settings:\n return telemetry_settings["enabled"]\n else:\n return dagster_telemetry_enabled_default\n\n @property\n def nux_enabled(self) -> bool:\n if self.is_ephemeral:\n return False\n\n nux_enabled_by_default = True\n\n nux_settings = self.get_settings("nux")\n if not nux_settings:\n return nux_enabled_by_default\n\n if "enabled" in nux_settings:\n return nux_settings["enabled"]\n else:\n return nux_enabled_by_default\n\n # run monitoring\n\n @property\n def run_monitoring_enabled(self) -> bool:\n return self._run_monitoring_enabled\n\n @property\n def run_monitoring_settings(self) -> Any:\n return self.get_settings("run_monitoring")\n\n @property\n def run_monitoring_start_timeout_seconds(self) -> int:\n return self.run_monitoring_settings.get("start_timeout_seconds", 180)\n\n @property\n def run_monitoring_cancel_timeout_seconds(self) -> int:\n return self.run_monitoring_settings.get("cancel_timeout_seconds", 180)\n\n @property\n def code_server_settings(self) -> Any:\n return self.get_settings("code_servers")\n\n @property\n def code_server_process_startup_timeout(self) -> int:\n return self.code_server_settings.get(\n "local_startup_timeout", DEFAULT_LOCAL_CODE_SERVER_STARTUP_TIMEOUT\n )\n\n @property\n def code_server_reload_timeout(self) -> int:\n return self.code_server_settings.get(\n "reload_timeout", DEFAULT_LOCAL_CODE_SERVER_STARTUP_TIMEOUT\n )\n\n @property\n def wait_for_local_code_server_processes_on_shutdown(self) -> bool:\n return self.code_server_settings.get("wait_for_local_processes_on_shutdown", False)\n\n @property\n def run_monitoring_max_resume_run_attempts(self) -> int:\n return self.run_monitoring_settings.get("max_resume_run_attempts", 0)\n\n @property\n def run_monitoring_poll_interval_seconds(self) -> int:\n return self.run_monitoring_settings.get("poll_interval_seconds", 120)\n\n @property\n def cancellation_thread_poll_interval_seconds(self) -> int:\n return self.get_settings("run_monitoring").get(\n "cancellation_thread_poll_interval_seconds", 10\n )\n\n @property\n def run_retries_enabled(self) -> bool:\n return self.get_settings("run_retries").get("enabled", False)\n\n @property\n def run_retries_max_retries(self) -> int:\n return self.get_settings("run_retries").get("max_retries")\n\n @property\n def auto_materialize_enabled(self) -> bool:\n return self.get_settings("auto_materialize").get("enabled", True)\n\n @property\n def auto_materialize_minimum_interval_seconds(self) -> int:\n return self.get_settings("auto_materialize").get("minimum_interval_seconds")\n\n @property\n def auto_materialize_run_tags(self) -> Dict[str, str]:\n return self.get_settings("auto_materialize").get("run_tags", {})\n\n @property\n def auto_materialize_respect_materialization_data_versions(self) -> bool:\n return self.get_settings("auto_materialize").get(\n "respect_materialization_data_versions", False\n )\n\n # python logs\n\n @property\n def managed_python_loggers(self) -> Sequence[str]:\n python_log_settings = self.get_settings("python_logs") or {}\n loggers: Sequence[str] = python_log_settings.get("managed_python_loggers", [])\n return loggers\n\n @property\n def python_log_level(self) -> Optional[str]:\n python_log_settings = self.get_settings("python_logs") or {}\n return python_log_settings.get("python_log_level")\n\n def upgrade(self, print_fn: Optional[PrintFn] = None) -> None:\n from dagster._core.storage.migration.utils import upgrading_instance\n\n with upgrading_instance(self):\n if print_fn:\n print_fn("Updating run storage...")\n self._run_storage.upgrade() # type: ignore # (unknown method on run storage)\n self._run_storage.migrate(print_fn)\n\n if print_fn:\n print_fn("Updating event storage...")\n self._event_storage.upgrade()\n self._event_storage.reindex_assets(print_fn=print_fn)\n\n if print_fn:\n print_fn("Updating schedule storage...")\n self._schedule_storage.upgrade() # type: ignore # (possible none)\n self._schedule_storage.migrate(print_fn) # type: ignore # (possible none)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n if self._schedule_storage:\n self._schedule_storage.optimize_for_webserver(\n statement_timeout=statement_timeout, pool_recycle=pool_recycle\n )\n self._run_storage.optimize_for_webserver(\n statement_timeout=statement_timeout, pool_recycle=pool_recycle\n )\n self._event_storage.optimize_for_webserver(\n statement_timeout=statement_timeout, pool_recycle=pool_recycle\n )\n\n def reindex(self, print_fn: PrintFn = lambda _: None) -> None:\n print_fn("Checking for reindexing...")\n self._event_storage.reindex_events(print_fn)\n self._event_storage.reindex_assets(print_fn)\n self._run_storage.optimize(print_fn)\n self._schedule_storage.optimize(print_fn) # type: ignore # (possible none)\n print_fn("Done.")\n\n def dispose(self) -> None:\n self._local_artifact_storage.dispose()\n self._run_storage.dispose()\n if self._run_coordinator:\n self._run_coordinator.dispose()\n if self._run_launcher:\n self._run_launcher.dispose()\n self._event_storage.dispose()\n if self._compute_log_manager:\n self._compute_log_manager.dispose()\n if self._secrets_loader:\n self._secrets_loader.dispose()\n\n if self in DagsterInstance._TEMP_DIRS:\n DagsterInstance._TEMP_DIRS[self].cleanup()\n del DagsterInstance._TEMP_DIRS[self]\n\n # run storage\n
[docs] @public\n def get_run_by_id(self, run_id: str) -> Optional[DagsterRun]:\n """Get a :py:class:`DagsterRun` matching the provided `run_id`.\n\n Args:\n run_id (str): The id of the run to retrieve.\n\n Returns:\n Optional[DagsterRun]: The run corresponding to the given id. If no run matching the id\n is found, return `None`.\n """\n record = self.get_run_record_by_id(run_id)\n if record is None:\n return None\n return record.dagster_run
\n\n
[docs] @public\n @traced\n def get_run_record_by_id(self, run_id: str) -> Optional[RunRecord]:\n """Get a :py:class:`RunRecord` matching the provided `run_id`.\n\n Args:\n run_id (str): The id of the run record to retrieve.\n\n Returns:\n Optional[RunRecord]: The run record corresponding to the given id. If no run matching\n the id is found, return `None`.\n """\n records = self._run_storage.get_run_records(RunsFilter(run_ids=[run_id]))\n if not records:\n return None\n return records[0]
\n\n @traced\n def get_job_snapshot(self, snapshot_id: str) -> "JobSnapshot":\n return self._run_storage.get_job_snapshot(snapshot_id)\n\n @traced\n def has_job_snapshot(self, snapshot_id: str) -> bool:\n return self._run_storage.has_job_snapshot(snapshot_id)\n\n @traced\n def has_snapshot(self, snapshot_id: str) -> bool:\n return self._run_storage.has_snapshot(snapshot_id)\n\n @traced\n def get_historical_job(self, snapshot_id: str) -> "HistoricalJob":\n from dagster._core.host_representation import HistoricalJob\n\n snapshot = self._run_storage.get_job_snapshot(snapshot_id)\n parent_snapshot = (\n self._run_storage.get_job_snapshot(snapshot.lineage_snapshot.parent_snapshot_id)\n if snapshot.lineage_snapshot\n else None\n )\n return HistoricalJob(snapshot, snapshot_id, parent_snapshot)\n\n @traced\n def has_historical_job(self, snapshot_id: str) -> bool:\n return self._run_storage.has_job_snapshot(snapshot_id)\n\n @traced\n def get_execution_plan_snapshot(self, snapshot_id: str) -> "ExecutionPlanSnapshot":\n return self._run_storage.get_execution_plan_snapshot(snapshot_id)\n\n @traced\n def get_run_stats(self, run_id: str) -> DagsterRunStatsSnapshot:\n return self._event_storage.get_stats_for_run(run_id)\n\n @traced\n def get_run_step_stats(\n self, run_id: str, step_keys: Optional[Sequence[str]] = None\n ) -> Sequence["RunStepKeyStatsSnapshot"]:\n return self._event_storage.get_step_stats_for_run(run_id, step_keys)\n\n @traced\n def get_run_tags(\n self,\n tag_keys: Optional[Sequence[str]] = None,\n value_prefix: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[Tuple[str, Set[str]]]:\n return self._run_storage.get_run_tags(\n tag_keys=tag_keys, value_prefix=value_prefix, limit=limit\n )\n\n @traced\n def get_run_tag_keys(self) -> Sequence[str]:\n return self._run_storage.get_run_tag_keys()\n\n @traced\n def get_run_group(self, run_id: str) -> Optional[Tuple[str, Sequence[DagsterRun]]]:\n return self._run_storage.get_run_group(run_id)\n\n def create_run_for_job(\n self,\n job_def: "JobDefinition",\n execution_plan: Optional["ExecutionPlan"] = None,\n run_id: Optional[str] = None,\n run_config: Optional[Mapping[str, object]] = None,\n resolved_op_selection: Optional[AbstractSet[str]] = None,\n status: Optional[Union[DagsterRunStatus, str]] = None,\n tags: Optional[Mapping[str, str]] = None,\n root_run_id: Optional[str] = None,\n parent_run_id: Optional[str] = None,\n op_selection: Optional[Sequence[str]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n external_job_origin: Optional["ExternalJobOrigin"] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n repository_load_data: Optional["RepositoryLoadData"] = None,\n ) -> DagsterRun:\n from dagster._core.definitions.job_definition import JobDefinition\n from dagster._core.execution.api import create_execution_plan\n from dagster._core.execution.plan.plan import ExecutionPlan\n from dagster._core.snap import snapshot_from_execution_plan\n\n check.inst_param(job_def, "pipeline_def", JobDefinition)\n check.opt_inst_param(execution_plan, "execution_plan", ExecutionPlan)\n\n # note that op_selection is required to execute the solid subset, which is the\n # frozenset version of the previous solid_subset.\n # op_selection is not required and will not be converted to op_selection here.\n # i.e. this function doesn't handle solid queries.\n # op_selection is only used to pass the user queries further down.\n check.opt_set_param(resolved_op_selection, "resolved_op_selection", of_type=str)\n check.opt_list_param(op_selection, "op_selection", of_type=str)\n check.opt_set_param(asset_selection, "asset_selection", of_type=AssetKey)\n\n # op_selection never provided\n if asset_selection or op_selection:\n # for cases when `create_run_for_pipeline` is directly called\n job_def = job_def.get_subset(\n asset_selection=asset_selection,\n op_selection=op_selection,\n )\n step_keys_to_execute = None\n\n if execution_plan:\n step_keys_to_execute = execution_plan.step_keys_to_execute\n\n else:\n execution_plan = create_execution_plan(\n job=job_def,\n run_config=run_config,\n instance_ref=self.get_ref() if self.is_persistent else None,\n tags=tags,\n repository_load_data=repository_load_data,\n )\n\n return self.create_run(\n job_name=job_def.name,\n run_id=run_id,\n run_config=run_config,\n op_selection=op_selection,\n asset_selection=asset_selection,\n asset_check_selection=None,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=DagsterRunStatus(status) if status else None,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=job_def.get_job_snapshot(),\n execution_plan_snapshot=snapshot_from_execution_plan(\n execution_plan,\n job_def.get_job_snapshot_id(),\n ),\n parent_job_snapshot=job_def.get_parent_job_snapshot(),\n external_job_origin=external_job_origin,\n job_code_origin=job_code_origin,\n )\n\n def _construct_run_with_snapshots(\n self,\n job_name: str,\n run_id: str,\n run_config: Optional[Mapping[str, object]],\n resolved_op_selection: Optional[AbstractSet[str]],\n step_keys_to_execute: Optional[Sequence[str]],\n status: Optional[DagsterRunStatus],\n tags: Mapping[str, str],\n root_run_id: Optional[str],\n parent_run_id: Optional[str],\n job_snapshot: Optional["JobSnapshot"],\n execution_plan_snapshot: Optional["ExecutionPlanSnapshot"],\n parent_job_snapshot: Optional["JobSnapshot"],\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet["AssetCheckKey"]] = None,\n op_selection: Optional[Sequence[str]] = None,\n external_job_origin: Optional["ExternalJobOrigin"] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n ) -> DagsterRun:\n # https://github.com/dagster-io/dagster/issues/2403\n if tags and IS_AIRFLOW_INGEST_PIPELINE_STR in tags:\n if AIRFLOW_EXECUTION_DATE_STR not in tags:\n tags = {\n **tags,\n AIRFLOW_EXECUTION_DATE_STR: get_current_datetime_in_utc().isoformat(),\n }\n\n check.invariant(\n not (not job_snapshot and execution_plan_snapshot),\n "It is illegal to have an execution plan snapshot and not have a pipeline snapshot."\n " It is possible to have no execution plan snapshot since we persist runs that do"\n " not successfully compile execution plans in the scheduled case.",\n )\n\n job_snapshot_id = (\n self._ensure_persisted_job_snapshot(job_snapshot, parent_job_snapshot)\n if job_snapshot\n else None\n )\n\n execution_plan_snapshot_id = (\n self._ensure_persisted_execution_plan_snapshot(\n execution_plan_snapshot, job_snapshot_id, step_keys_to_execute\n )\n if execution_plan_snapshot and job_snapshot_id\n else None\n )\n\n return DagsterRun(\n job_name=job_name,\n run_id=run_id,\n run_config=run_config,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=status,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot_id=job_snapshot_id,\n execution_plan_snapshot_id=execution_plan_snapshot_id,\n external_job_origin=external_job_origin,\n job_code_origin=job_code_origin,\n has_repository_load_data=execution_plan_snapshot is not None\n and execution_plan_snapshot.repository_load_data is not None,\n )\n\n def _ensure_persisted_job_snapshot(\n self,\n job_snapshot: "JobSnapshot",\n parent_job_snapshot: "Optional[JobSnapshot]",\n ) -> str:\n from dagster._core.snap import JobSnapshot, create_job_snapshot_id\n\n check.inst_param(job_snapshot, "job_snapshot", JobSnapshot)\n check.opt_inst_param(parent_job_snapshot, "parent_job_snapshot", JobSnapshot)\n\n if job_snapshot.lineage_snapshot:\n if not self._run_storage.has_job_snapshot(\n job_snapshot.lineage_snapshot.parent_snapshot_id\n ):\n check.invariant(\n create_job_snapshot_id(parent_job_snapshot) # type: ignore # (possible none)\n == job_snapshot.lineage_snapshot.parent_snapshot_id,\n "Parent pipeline snapshot id out of sync with passed parent pipeline snapshot",\n )\n\n returned_job_snapshot_id = self._run_storage.add_job_snapshot(\n parent_job_snapshot # type: ignore # (possible none)\n )\n check.invariant(\n job_snapshot.lineage_snapshot.parent_snapshot_id == returned_job_snapshot_id\n )\n\n job_snapshot_id = create_job_snapshot_id(job_snapshot)\n if not self._run_storage.has_job_snapshot(job_snapshot_id):\n returned_job_snapshot_id = self._run_storage.add_job_snapshot(job_snapshot)\n check.invariant(job_snapshot_id == returned_job_snapshot_id)\n\n return job_snapshot_id\n\n def _ensure_persisted_execution_plan_snapshot(\n self,\n execution_plan_snapshot: "ExecutionPlanSnapshot",\n job_snapshot_id: str,\n step_keys_to_execute: Optional[Sequence[str]],\n ) -> str:\n from dagster._core.snap.execution_plan_snapshot import (\n ExecutionPlanSnapshot,\n create_execution_plan_snapshot_id,\n )\n\n check.inst_param(execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot)\n check.str_param(job_snapshot_id, "job_snapshot_id")\n check.opt_nullable_sequence_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)\n\n check.invariant(\n execution_plan_snapshot.job_snapshot_id == job_snapshot_id,\n "Snapshot mismatch: Snapshot ID in execution plan snapshot is "\n f'"{execution_plan_snapshot.job_snapshot_id}" and snapshot_id created in memory is '\n f'"{job_snapshot_id}"',\n )\n\n execution_plan_snapshot_id = create_execution_plan_snapshot_id(execution_plan_snapshot)\n\n if not self._run_storage.has_execution_plan_snapshot(execution_plan_snapshot_id):\n returned_execution_plan_snapshot_id = self._run_storage.add_execution_plan_snapshot(\n execution_plan_snapshot\n )\n\n check.invariant(execution_plan_snapshot_id == returned_execution_plan_snapshot_id)\n\n return execution_plan_snapshot_id\n\n def _log_asset_planned_events(\n self, dagster_run: DagsterRun, execution_plan_snapshot: "ExecutionPlanSnapshot"\n ) -> None:\n from dagster._core.events import (\n AssetMaterializationPlannedData,\n DagsterEvent,\n DagsterEventType,\n )\n\n job_name = dagster_run.job_name\n\n for step in execution_plan_snapshot.steps:\n if step.key in execution_plan_snapshot.step_keys_to_execute:\n for output in step.outputs:\n asset_key = check.not_none(output.properties).asset_key\n if asset_key:\n # Logs and stores asset_materialization_planned event\n partition_tag = dagster_run.tags.get(PARTITION_NAME_TAG)\n partition_range_start, partition_range_end = dagster_run.tags.get(\n ASSET_PARTITION_RANGE_START_TAG\n ), dagster_run.tags.get(ASSET_PARTITION_RANGE_END_TAG)\n\n if partition_tag and (partition_range_start or partition_range_end):\n raise DagsterInvariantViolationError(\n f"Cannot have {ASSET_PARTITION_RANGE_START_TAG} or"\n f" {ASSET_PARTITION_RANGE_END_TAG} set along with"\n f" {PARTITION_NAME_TAG}"\n )\n\n if partition_range_start or partition_range_end:\n if not partition_range_start or not partition_range_end:\n raise DagsterInvariantViolationError(\n f"Cannot have {ASSET_PARTITION_RANGE_START_TAG} or"\n f" {ASSET_PARTITION_RANGE_END_TAG} set without the other"\n )\n\n # TODO: resolve which partitions are in the range, and emit an event for each\n\n partition = (\n partition_tag\n if check.not_none(output.properties).is_asset_partitioned\n else None\n )\n\n event = DagsterEvent(\n event_type_value=DagsterEventType.ASSET_MATERIALIZATION_PLANNED.value,\n job_name=job_name,\n message=(\n f"{job_name} intends to materialize asset {asset_key.to_string()}"\n ),\n event_specific_data=AssetMaterializationPlannedData(\n asset_key, partition=partition\n ),\n step_key=step.key,\n )\n self.report_dagster_event(event, dagster_run.run_id, logging.DEBUG)\n\n if check.not_none(output.properties).asset_check_key:\n asset_check_key = check.not_none(\n check.not_none(output.properties).asset_check_key\n )\n target_asset_key = asset_check_key.asset_key\n check_name = asset_check_key.name\n\n event = DagsterEvent(\n event_type_value=DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED.value,\n job_name=job_name,\n message=(\n f"{job_name} intends to execute asset check {check_name} on"\n f" asset {target_asset_key.to_string()}"\n ),\n event_specific_data=AssetCheckEvaluationPlanned(\n target_asset_key,\n check_name=check_name,\n ),\n step_key=step.key,\n )\n self.report_dagster_event(event, dagster_run.run_id, logging.DEBUG)\n\n def create_run(\n self,\n *,\n job_name: str,\n run_id: Optional[str],\n run_config: Optional[Mapping[str, object]],\n status: Optional[DagsterRunStatus],\n tags: Optional[Mapping[str, Any]],\n root_run_id: Optional[str],\n parent_run_id: Optional[str],\n step_keys_to_execute: Optional[Sequence[str]],\n execution_plan_snapshot: Optional["ExecutionPlanSnapshot"],\n job_snapshot: Optional["JobSnapshot"],\n parent_job_snapshot: Optional["JobSnapshot"],\n asset_selection: Optional[AbstractSet[AssetKey]],\n asset_check_selection: Optional[AbstractSet["AssetCheckKey"]],\n resolved_op_selection: Optional[AbstractSet[str]],\n op_selection: Optional[Sequence[str]],\n external_job_origin: Optional["ExternalJobOrigin"],\n job_code_origin: Optional[JobPythonOrigin],\n ) -> DagsterRun:\n from dagster._core.definitions.asset_check_spec import AssetCheckKey\n from dagster._core.definitions.utils import validate_tags\n from dagster._core.host_representation.origin import ExternalJobOrigin\n from dagster._core.snap import ExecutionPlanSnapshot, JobSnapshot\n\n check.str_param(job_name, "job_name")\n check.opt_str_param(\n run_id, "run_id"\n ) # will be assigned to make_new_run_id() lower in callstack\n check.opt_mapping_param(run_config, "run_config", key_type=str)\n\n check.opt_inst_param(status, "status", DagsterRunStatus)\n check.opt_mapping_param(tags, "tags", key_type=str)\n\n validated_tags = validate_tags(tags)\n\n check.opt_str_param(root_run_id, "root_run_id")\n check.opt_str_param(parent_run_id, "parent_run_id")\n\n # If step_keys_to_execute is None, then everything is executed. In some cases callers\n # are still exploding and sending the full list of step keys even though that is\n # unnecessary.\n\n check.opt_sequence_param(step_keys_to_execute, "step_keys_to_execute")\n check.opt_inst_param(\n execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot\n )\n\n if root_run_id or parent_run_id:\n check.invariant(\n root_run_id and parent_run_id,\n "If root_run_id or parent_run_id is passed, this is a re-execution scenario and"\n " root_run_id and parent_run_id must both be passed.",\n )\n\n # The job_snapshot should always be set in production scenarios. In tests\n # we have sometimes omitted it out of convenience.\n\n check.opt_inst_param(job_snapshot, "job_snapshot", JobSnapshot)\n check.opt_inst_param(parent_job_snapshot, "parent_job_snapshot", JobSnapshot)\n\n if parent_job_snapshot:\n check.invariant(\n job_snapshot,\n "If parent_job_snapshot is set, job_snapshot should also be.",\n )\n\n # op_selection is a sequence of selection queries assigned by the user.\n # *Most* callers expand the op_selection into an explicit set of\n # resolved_op_selection via accessing external_job.resolved_op_selection\n # but not all do. Some (launch execution mutation in graphql and backfill run\n # creation, for example) actually pass the solid *selection* into the\n # resolved_op_selection parameter, but just as a frozen set, rather than\n # fully resolving the selection, as the daemon launchers do. Given the\n # state of callers we just check to ensure that the arguments are well-formed.\n #\n # asset_selection adds another dimension to this lovely dance. op_selection\n # and asset_selection are mutually exclusive and should never both be set.\n # This is invariant is checked in a sporadic fashion around\n # the codebase, but is never enforced in a typed fashion.\n #\n # Additionally, the way that callsites currently behave *if* asset selection\n # is set (i.e., not None) then *neither* op_selection *nor*\n # resolved_op_selection is passed. In the asset selection case resolving\n # the set of assets into the canonical resolved_op_selection is done in\n # the user process, and the exact resolution is never persisted in the run.\n # We are asserting that invariant here to maintain that behavior.\n #\n # Finally, asset_check_selection can be passed along with asset_selection. It\n # is mutually exclusive with op_selection and resolved_op_selection. A `None`\n # value will include any asset checks that target selected assets. An empty set\n # will include no asset checks.\n\n check.opt_set_param(resolved_op_selection, "resolved_op_selection", of_type=str)\n check.opt_sequence_param(op_selection, "op_selection", of_type=str)\n check.opt_set_param(asset_selection, "asset_selection", of_type=AssetKey)\n check.opt_set_param(asset_check_selection, "asset_check_selection", of_type=AssetCheckKey)\n\n if asset_selection is not None or asset_check_selection is not None:\n check.invariant(\n op_selection is None,\n "Cannot pass op_selection with either of asset_selection or asset_check_selection",\n )\n\n check.invariant(\n resolved_op_selection is None,\n "Cannot pass resolved_op_selection with either of asset_selection or"\n " asset_check_selection",\n )\n\n # The "python origin" arguments exist so a job can be reconstructed in memory\n # after a DagsterRun has been fetched from the database.\n #\n # There are cases (notably in _logged_execute_job with Reconstructable jobs)\n # where job_code_origin and is not. In some cloud test cases only\n # external_job_origin is passed But they are almost always passed together.\n # If these are not set the created run will never be able to be relaunched from\n # the information just in the run or in another process.\n\n check.opt_inst_param(external_job_origin, "external_job_origin", ExternalJobOrigin)\n check.opt_inst_param(job_code_origin, "job_code_origin", JobPythonOrigin)\n\n dagster_run = self._construct_run_with_snapshots(\n job_name=job_name,\n run_id=run_id, # type: ignore # (possible none)\n run_config=run_config,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=status,\n tags=validated_tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=job_snapshot,\n execution_plan_snapshot=execution_plan_snapshot,\n parent_job_snapshot=parent_job_snapshot,\n external_job_origin=external_job_origin,\n job_code_origin=job_code_origin,\n )\n\n dagster_run = self._run_storage.add_run(dagster_run)\n\n if execution_plan_snapshot:\n self._log_asset_planned_events(dagster_run, execution_plan_snapshot)\n\n return dagster_run\n\n def create_reexecuted_run(\n self,\n *,\n parent_run: DagsterRun,\n code_location: "CodeLocation",\n external_job: "ExternalJob",\n strategy: "ReexecutionStrategy",\n extra_tags: Optional[Mapping[str, Any]] = None,\n run_config: Optional[Mapping[str, Any]] = None,\n use_parent_run_tags: bool = False,\n ) -> DagsterRun:\n from dagster._core.execution.plan.resume_retry import (\n ReexecutionStrategy,\n )\n from dagster._core.execution.plan.state import KnownExecutionState\n from dagster._core.host_representation import CodeLocation, ExternalJob\n\n check.inst_param(parent_run, "parent_run", DagsterRun)\n check.inst_param(code_location, "code_location", CodeLocation)\n check.inst_param(external_job, "external_job", ExternalJob)\n check.inst_param(strategy, "strategy", ReexecutionStrategy)\n check.opt_mapping_param(extra_tags, "extra_tags", key_type=str)\n check.opt_mapping_param(run_config, "run_config", key_type=str)\n\n check.bool_param(use_parent_run_tags, "use_parent_run_tags")\n\n root_run_id = parent_run.root_run_id or parent_run.run_id\n parent_run_id = parent_run.run_id\n\n tags = merge_dicts(\n external_job.tags,\n (\n # these can differ from external_job.tags if tags were added at launch time\n parent_run.tags\n if use_parent_run_tags\n else {}\n ),\n extra_tags or {},\n {\n PARENT_RUN_ID_TAG: parent_run_id,\n ROOT_RUN_ID_TAG: root_run_id,\n },\n )\n\n run_config = run_config if run_config is not None else parent_run.run_config\n\n if strategy == ReexecutionStrategy.FROM_FAILURE:\n check.invariant(\n parent_run.status == DagsterRunStatus.FAILURE,\n "Cannot reexecute from failure a run that is not failed",\n )\n\n (\n step_keys_to_execute,\n known_state,\n ) = KnownExecutionState.build_resume_retry_reexecution(\n self,\n parent_run=parent_run,\n )\n tags[RESUME_RETRY_TAG] = "true"\n elif strategy == ReexecutionStrategy.ALL_STEPS:\n step_keys_to_execute = None\n known_state = None\n else:\n raise DagsterInvariantViolationError(f"Unknown reexecution strategy: {strategy}")\n\n external_execution_plan = code_location.get_external_execution_plan(\n external_job,\n run_config,\n step_keys_to_execute=step_keys_to_execute,\n known_state=known_state,\n instance=self,\n )\n\n return self.create_run(\n job_name=parent_run.job_name,\n run_id=None,\n run_config=run_config,\n resolved_op_selection=parent_run.resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=DagsterRunStatus.NOT_STARTED,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=external_job.job_snapshot,\n execution_plan_snapshot=external_execution_plan.execution_plan_snapshot,\n parent_job_snapshot=external_job.parent_job_snapshot,\n op_selection=parent_run.op_selection,\n asset_selection=parent_run.asset_selection,\n asset_check_selection=parent_run.asset_check_selection,\n external_job_origin=external_job.get_external_origin(),\n job_code_origin=external_job.get_python_origin(),\n )\n\n def register_managed_run(\n self,\n job_name: str,\n run_id: str,\n run_config: Optional[Mapping[str, object]],\n resolved_op_selection: Optional[AbstractSet[str]],\n step_keys_to_execute: Optional[Sequence[str]],\n tags: Mapping[str, str],\n root_run_id: Optional[str],\n parent_run_id: Optional[str],\n job_snapshot: Optional["JobSnapshot"],\n execution_plan_snapshot: Optional["ExecutionPlanSnapshot"],\n parent_job_snapshot: Optional["JobSnapshot"],\n op_selection: Optional[Sequence[str]] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n ) -> DagsterRun:\n # The usage of this method is limited to dagster-airflow, specifically in Dagster\n # Operators that are executed in Airflow. Because a common workflow in Airflow is to\n # retry dags from arbitrary tasks, we need any node to be capable of creating a\n # DagsterRun.\n #\n # The try-except DagsterRunAlreadyExists block handles the race when multiple "root" tasks\n # simultaneously execute self._run_storage.add_run(dagster_run). When this happens, only\n # one task succeeds in creating the run, while the others get DagsterRunAlreadyExists\n # error; at this point, the failed tasks try again to fetch the existing run.\n # https://github.com/dagster-io/dagster/issues/2412\n\n dagster_run = self._construct_run_with_snapshots(\n job_name=job_name,\n run_id=run_id,\n run_config=run_config,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=DagsterRunStatus.MANAGED,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=job_snapshot,\n execution_plan_snapshot=execution_plan_snapshot,\n parent_job_snapshot=parent_job_snapshot,\n job_code_origin=job_code_origin,\n )\n\n def get_run() -> DagsterRun:\n candidate_run = self.get_run_by_id(dagster_run.run_id)\n\n field_diff = _check_run_equality(dagster_run, candidate_run) # type: ignore # (possible none)\n\n if field_diff:\n raise DagsterRunConflict(\n "Found conflicting existing run with same id {run_id}. Runs differ in:"\n "\\n{field_diff}".format(\n run_id=dagster_run.run_id,\n field_diff=_format_field_diff(field_diff),\n ),\n )\n return candidate_run # type: ignore # (possible none)\n\n if self.has_run(dagster_run.run_id):\n return get_run()\n\n try:\n return self._run_storage.add_run(dagster_run)\n except DagsterRunAlreadyExists:\n return get_run()\n\n @traced\n def add_run(self, dagster_run: DagsterRun) -> DagsterRun:\n return self._run_storage.add_run(dagster_run)\n\n @traced\n def add_snapshot(\n self,\n snapshot: Union["JobSnapshot", "ExecutionPlanSnapshot"],\n snapshot_id: Optional[str] = None,\n ) -> None:\n return self._run_storage.add_snapshot(snapshot, snapshot_id)\n\n @traced\n def handle_run_event(self, run_id: str, event: "DagsterEvent") -> None:\n return self._run_storage.handle_run_event(run_id, event)\n\n @traced\n def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]) -> None:\n return self._run_storage.add_run_tags(run_id, new_tags)\n\n @traced\n def has_run(self, run_id: str) -> bool:\n return self._run_storage.has_run(run_id)\n\n @traced\n def get_runs(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[DagsterRun]:\n return self._run_storage.get_runs(filters, cursor, limit, bucket_by)\n\n @traced\n def get_run_ids(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[str]:\n return self._run_storage.get_run_ids(filters, cursor=cursor, limit=limit)\n\n @traced\n def get_runs_count(self, filters: Optional[RunsFilter] = None) -> int:\n return self._run_storage.get_runs_count(filters)\n\n
[docs] @public\n @traced\n def get_run_records(\n self,\n filters: Optional[RunsFilter] = None,\n limit: Optional[int] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n cursor: Optional[str] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[RunRecord]:\n """Return a list of run records stored in the run storage, sorted by the given column in given order.\n\n Args:\n filters (Optional[RunsFilter]): the filter by which to filter runs.\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n order_by (Optional[str]): Name of the column to sort by. Defaults to id.\n ascending (Optional[bool]): Sort the result in ascending order if True, descending\n otherwise. Defaults to descending.\n\n Returns:\n List[RunRecord]: List of run records stored in the run storage.\n """\n return self._run_storage.get_run_records(\n filters, limit, order_by, ascending, cursor, bucket_by\n )
\n\n @traced\n def get_run_partition_data(self, runs_filter: RunsFilter) -> Sequence[RunPartitionData]:\n """Get run partition data for a given partitioned job."""\n return self._run_storage.get_run_partition_data(runs_filter)\n\n def wipe(self) -> None:\n self._run_storage.wipe()\n self._event_storage.wipe()\n\n
[docs] @public\n @traced\n def delete_run(self, run_id: str) -> None:\n """Delete a run and all events generated by that from storage.\n\n Args:\n run_id (str): The id of the run to delete.\n """\n self._run_storage.delete_run(run_id)\n self._event_storage.delete_events(run_id)
\n\n # event storage\n @traced\n def logs_after(\n self,\n run_id: str,\n cursor: Optional[int] = None,\n of_type: Optional["DagsterEventType"] = None,\n limit: Optional[int] = None,\n ) -> Sequence["EventLogEntry"]:\n return self._event_storage.get_logs_for_run(\n run_id,\n cursor=cursor,\n of_type=of_type,\n limit=limit,\n )\n\n @traced\n def all_logs(\n self,\n run_id: str,\n of_type: Optional[Union["DagsterEventType", Set["DagsterEventType"]]] = None,\n ) -> Sequence["EventLogEntry"]:\n return self._event_storage.get_logs_for_run(run_id, of_type=of_type)\n\n @traced\n def get_records_for_run(\n self,\n run_id: str,\n cursor: Optional[str] = None,\n of_type: Optional[Union["DagsterEventType", Set["DagsterEventType"]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> "EventLogConnection":\n return self._event_storage.get_records_for_run(run_id, cursor, of_type, limit, ascending)\n\n def watch_event_logs(self, run_id: str, cursor: Optional[str], cb: "EventHandlerFn") -> None:\n return self._event_storage.watch(run_id, cursor, cb)\n\n def end_watch_event_logs(self, run_id: str, cb: "EventHandlerFn") -> None:\n return self._event_storage.end_watch(run_id, cb)\n\n # asset storage\n\n @traced\n def can_cache_asset_status_data(self) -> bool:\n return self._event_storage.can_cache_asset_status_data()\n\n @traced\n def update_asset_cached_status_data(\n self, asset_key: AssetKey, cache_values: "AssetStatusCacheValue"\n ) -> None:\n self._event_storage.update_asset_cached_status_data(asset_key, cache_values)\n\n @traced\n def wipe_asset_cached_status(self, asset_keys: Sequence[AssetKey]) -> None:\n check.list_param(asset_keys, "asset_keys", of_type=AssetKey)\n for asset_key in asset_keys:\n self._event_storage.wipe_asset_cached_status(asset_key)\n\n @traced\n def all_asset_keys(self) -> Sequence[AssetKey]:\n return self._event_storage.all_asset_keys()\n\n
[docs] @public\n @traced\n def get_asset_keys(\n self,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[AssetKey]:\n """Return a filtered subset of asset keys managed by this instance.\n\n Args:\n prefix (Optional[Sequence[str]]): Return only assets having this key prefix.\n limit (Optional[int]): Maximum number of keys to return.\n cursor (Optional[str]): Cursor to use for pagination.\n\n Returns:\n Sequence[AssetKey]: List of asset keys.\n """\n return self._event_storage.get_asset_keys(prefix=prefix, limit=limit, cursor=cursor)
\n\n
[docs] @public\n @traced\n def has_asset_key(self, asset_key: AssetKey) -> bool:\n """Return true if this instance manages the given asset key.\n\n Args:\n asset_key (AssetKey): Asset key to check.\n """\n return self._event_storage.has_asset_key(asset_key)
\n\n @traced\n def get_latest_materialization_events(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional["EventLogEntry"]]:\n return self._event_storage.get_latest_materialization_events(asset_keys)\n\n
[docs] @public\n @traced\n def get_latest_materialization_event(self, asset_key: AssetKey) -> Optional["EventLogEntry"]:\n """Fetch the latest materialization event for the given asset key.\n\n Args:\n asset_key (AssetKey): Asset key to return materialization for.\n\n Returns:\n Optional[AssetMaterialization]: The latest materialization event for the given asset\n key, or `None` if the asset has not been materialized.\n """\n return self._event_storage.get_latest_materialization_events([asset_key]).get(asset_key)
\n\n
[docs] @public\n @traced\n def get_event_records(\n self,\n event_records_filter: "EventRecordsFilter",\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Sequence["EventLogRecord"]:\n """Return a list of event records stored in the event log storage.\n\n Args:\n event_records_filter (Optional[EventRecordsFilter]): the filter by which to filter event\n records.\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n ascending (Optional[bool]): Sort the result in ascending order if True, descending\n otherwise. Defaults to descending.\n\n Returns:\n List[EventLogRecord]: List of event log records stored in the event log storage.\n """\n return self._event_storage.get_event_records(event_records_filter, limit, ascending)
\n\n
[docs] @public\n @traced\n def get_status_by_partition(\n self,\n asset_key: AssetKey,\n partition_keys: Sequence[str],\n partitions_def: "PartitionsDefinition",\n ) -> Optional[Mapping[str, "AssetPartitionStatus"]]:\n """Get the current status of provided partition_keys for the provided asset.\n\n Args:\n asset_key (AssetKey): The asset to get per-partition status for.\n partition_keys (Sequence[str]): The partitions to get status for.\n partitions_def (PartitionsDefinition): The PartitionsDefinition of the asset to get\n per-partition status for.\n\n Returns:\n Optional[Mapping[str, AssetPartitionStatus]]: status for each partition key\n\n """\n from dagster._core.storage.partition_status_cache import (\n AssetPartitionStatus,\n AssetStatusCacheValue,\n get_and_update_asset_status_cache_value,\n )\n\n cached_value = get_and_update_asset_status_cache_value(self, asset_key, partitions_def)\n\n if isinstance(cached_value, AssetStatusCacheValue):\n materialized_partitions = cached_value.deserialize_materialized_partition_subsets(\n partitions_def\n )\n failed_partitions = cached_value.deserialize_failed_partition_subsets(partitions_def)\n in_progress_partitions = cached_value.deserialize_in_progress_partition_subsets(\n partitions_def\n )\n\n status_by_partition = {}\n\n for partition_key in partition_keys:\n if partition_key in in_progress_partitions:\n status_by_partition[partition_key] = AssetPartitionStatus.IN_PROGRESS\n elif partition_key in failed_partitions:\n status_by_partition[partition_key] = AssetPartitionStatus.FAILED\n elif partition_key in materialized_partitions:\n status_by_partition[partition_key] = AssetPartitionStatus.MATERIALIZED\n else:\n status_by_partition[partition_key] = None\n\n return status_by_partition
\n\n
[docs] @public\n @traced\n def get_asset_records(\n self, asset_keys: Optional[Sequence[AssetKey]] = None\n ) -> Sequence["AssetRecord"]:\n """Return an `AssetRecord` for each of the given asset keys.\n\n Args:\n asset_keys (Optional[Sequence[AssetKey]]): List of asset keys to retrieve records for.\n\n Returns:\n Sequence[AssetRecord]: List of asset records.\n """\n return self._event_storage.get_asset_records(asset_keys)
\n\n @traced\n def get_event_tags_for_asset(\n self,\n asset_key: AssetKey,\n filter_tags: Optional[Mapping[str, str]] = None,\n filter_event_id: Optional[int] = None,\n ) -> Sequence[Mapping[str, str]]:\n """Fetches asset event tags for the given asset key.\n\n If filter_tags is provided, searches for events containing all of the filter tags. Then,\n returns all tags for those events. This enables searching for multipartitioned asset\n partition tags with a fixed dimension value, e.g. all of the tags for events where\n "country" == "US".\n\n If filter_event_id is provided, searches for the event with the provided event_id.\n\n Returns a list of dicts, where each dict is a mapping of tag key to tag value for a\n single event.\n """\n return self._event_storage.get_event_tags_for_asset(asset_key, filter_tags, filter_event_id)\n\n
[docs] @public\n @traced\n def wipe_assets(self, asset_keys: Sequence[AssetKey]) -> None:\n """Wipes asset event history from the event log for the given asset keys.\n\n Args:\n asset_keys (Sequence[AssetKey]): Asset keys to wipe.\n """\n check.list_param(asset_keys, "asset_keys", of_type=AssetKey)\n for asset_key in asset_keys:\n self._event_storage.wipe_asset(asset_key)
\n\n @traced\n def get_materialization_count_by_partition(\n self, asset_keys: Sequence[AssetKey], after_cursor: Optional[int] = None\n ) -> Mapping[AssetKey, Mapping[str, int]]:\n return self._event_storage.get_materialization_count_by_partition(asset_keys, after_cursor)\n\n @traced\n def get_materialized_partitions(\n self,\n asset_key: AssetKey,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Set[str]:\n return self._event_storage.get_materialized_partitions(\n asset_key, before_cursor=before_cursor, after_cursor=after_cursor\n )\n\n @traced\n def get_latest_storage_id_by_partition(\n self, asset_key: AssetKey, event_type: "DagsterEventType"\n ) -> Mapping[str, int]:\n """Fetch the latest materialzation storage id for each partition for a given asset key.\n\n Returns a mapping of partition to storage id.\n """\n return self._event_storage.get_latest_storage_id_by_partition(asset_key, event_type)\n\n
[docs] @public\n @traced\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n """Get the set of partition keys for the specified :py:class:`DynamicPartitionsDefinition`.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n """\n check.str_param(partitions_def_name, "partitions_def_name")\n return self._event_storage.get_dynamic_partitions(partitions_def_name)
\n\n
[docs] @public\n @traced\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n """Add partitions to the specified :py:class:`DynamicPartitionsDefinition` idempotently.\n Does not add any partitions that already exist.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n partition_keys (Sequence[str]): Partition keys to add.\n """\n from dagster._core.definitions.partition import (\n raise_error_on_invalid_partition_key_substring,\n )\n\n check.str_param(partitions_def_name, "partitions_def_name")\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n if isinstance(partition_keys, str):\n # Guard against a single string being passed in `partition_keys`\n raise DagsterInvalidInvocationError("partition_keys must be a sequence of strings")\n raise_error_on_invalid_partition_key_substring(partition_keys)\n return self._event_storage.add_dynamic_partitions(partitions_def_name, partition_keys)
\n\n
[docs] @public\n @traced\n def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:\n """Delete a partition for the specified :py:class:`DynamicPartitionsDefinition`.\n If the partition does not exist, exits silently.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n partition_key (Sequence[str]): Partition key to delete.\n """\n check.str_param(partitions_def_name, "partitions_def_name")\n check.sequence_param(partition_key, "partition_key", of_type=str)\n self._event_storage.delete_dynamic_partition(partitions_def_name, partition_key)
\n\n
[docs] @public\n @traced\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n """Check if a partition key exists for the :py:class:`DynamicPartitionsDefinition`.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n partition_key (Sequence[str]): Partition key to check.\n """\n check.str_param(partitions_def_name, "partitions_def_name")\n check.str_param(partition_key, "partition_key")\n return self._event_storage.has_dynamic_partition(partitions_def_name, partition_key)
\n\n # event subscriptions\n\n def _get_yaml_python_handlers(self) -> Sequence[logging.Handler]:\n if self._settings:\n logging_config = self.get_settings("python_logs").get("dagster_handler_config", {})\n\n if logging_config:\n experimental_warning("Handling yaml-defined logging configuration")\n\n # Handlers can only be retrieved from dictConfig configuration if they are attached\n # to a logger. We add a dummy logger to the configuration that allows us to access user\n # defined handlers.\n handler_names = logging_config.get("handlers", {}).keys()\n\n dagster_dummy_logger_name = "dagster_dummy_logger"\n\n processed_dict_conf = {\n "version": 1,\n "disable_existing_loggers": False,\n "loggers": {dagster_dummy_logger_name: {"handlers": handler_names}},\n }\n processed_dict_conf.update(logging_config)\n\n logging.config.dictConfig(processed_dict_conf)\n\n dummy_logger = logging.getLogger(dagster_dummy_logger_name)\n return dummy_logger.handlers\n return []\n\n def _get_event_log_handler(self) -> _EventListenerLogHandler:\n event_log_handler = _EventListenerLogHandler(self)\n event_log_handler.setLevel(10)\n return event_log_handler\n\n def get_handlers(self) -> Sequence[logging.Handler]:\n handlers: List[logging.Handler] = [self._get_event_log_handler()]\n handlers.extend(self._get_yaml_python_handlers())\n return handlers\n\n def store_event(self, event: "EventLogEntry") -> None:\n self._event_storage.store_event(event)\n\n def handle_new_event(self, event: "EventLogEntry") -> None:\n run_id = event.run_id\n\n self._event_storage.store_event(event)\n\n if event.is_dagster_event and event.get_dagster_event().is_job_event:\n self._run_storage.handle_run_event(run_id, event.get_dagster_event())\n\n for sub in self._subscribers[run_id]:\n sub(event)\n\n def add_event_listener(self, run_id: str, cb) -> None:\n self._subscribers[run_id].append(cb)\n\n def report_engine_event(\n self,\n message: str,\n dagster_run: Optional[DagsterRun] = None,\n engine_event_data: Optional["EngineEventData"] = None,\n cls: Optional[Type[object]] = None,\n step_key: Optional[str] = None,\n job_name: Optional[str] = None,\n run_id: Optional[str] = None,\n ) -> "DagsterEvent":\n """Report a EngineEvent that occurred outside of a job execution context."""\n from dagster._core.events import DagsterEvent, DagsterEventType, EngineEventData\n\n check.opt_class_param(cls, "cls")\n check.str_param(message, "message")\n check.opt_inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(run_id, "run_id")\n check.opt_str_param(job_name, "job_name")\n\n check.invariant(\n dagster_run or (job_name and run_id),\n "Must include either dagster_run or job_name and run_id",\n )\n\n run_id = run_id if run_id else dagster_run.run_id # type: ignore\n job_name = job_name if job_name else dagster_run.job_name # type: ignore\n\n engine_event_data = check.opt_inst_param(\n engine_event_data,\n "engine_event_data",\n EngineEventData,\n EngineEventData({}),\n )\n\n if cls:\n message = f"[{cls.__name__}] {message}"\n\n log_level = logging.INFO\n if engine_event_data and engine_event_data.error:\n log_level = logging.ERROR\n\n dagster_event = DagsterEvent(\n event_type_value=DagsterEventType.ENGINE_EVENT.value,\n job_name=job_name,\n message=message,\n event_specific_data=engine_event_data,\n step_key=step_key,\n )\n self.report_dagster_event(dagster_event, run_id=run_id, log_level=log_level)\n return dagster_event\n\n def report_dagster_event(\n self,\n dagster_event: "DagsterEvent",\n run_id: str,\n log_level: Union[str, int] = logging.INFO,\n ) -> None:\n """Takes a DagsterEvent and stores it in persistent storage for the corresponding DagsterRun."""\n from dagster._core.events.log import EventLogEntry\n\n event_record = EventLogEntry(\n user_message="",\n level=log_level,\n job_name=dagster_event.job_name,\n run_id=run_id,\n error_info=None,\n timestamp=time.time(),\n step_key=dagster_event.step_key,\n dagster_event=dagster_event,\n )\n self.handle_new_event(event_record)\n\n def report_run_canceling(self, run: DagsterRun, message: Optional[str] = None):\n from dagster._core.events import DagsterEvent, DagsterEventType\n\n check.inst_param(run, "run", DagsterRun)\n message = check.opt_str_param(\n message,\n "message",\n "Sending run termination request.",\n )\n canceling_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_CANCELING.value,\n job_name=run.job_name,\n message=message,\n )\n self.report_dagster_event(canceling_event, run_id=run.run_id)\n\n def report_run_canceled(\n self,\n dagster_run: DagsterRun,\n message: Optional[str] = None,\n ) -> "DagsterEvent":\n from dagster._core.events import DagsterEvent, DagsterEventType\n\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n\n message = check.opt_str_param(\n message,\n "mesage",\n "This run has been marked as canceled from outside the execution context.",\n )\n\n dagster_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_CANCELED.value,\n job_name=dagster_run.job_name,\n message=message,\n )\n self.report_dagster_event(dagster_event, run_id=dagster_run.run_id, log_level=logging.ERROR)\n return dagster_event\n\n def report_run_failed(\n self, dagster_run: DagsterRun, message: Optional[str] = None\n ) -> "DagsterEvent":\n from dagster._core.events import DagsterEvent, DagsterEventType\n\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n\n message = check.opt_str_param(\n message,\n "message",\n "This run has been marked as failed from outside the execution context.",\n )\n\n dagster_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_FAILURE.value,\n job_name=dagster_run.job_name,\n message=message,\n )\n self.report_dagster_event(dagster_event, run_id=dagster_run.run_id, log_level=logging.ERROR)\n return dagster_event\n\n # directories\n\n def file_manager_directory(self, run_id: str) -> str:\n return self._local_artifact_storage.file_manager_dir(run_id)\n\n def storage_directory(self) -> str:\n return self._local_artifact_storage.storage_dir\n\n def schedules_directory(self) -> str:\n return self._local_artifact_storage.schedules_dir\n\n # Runs coordinator\n\n def submit_run(self, run_id: str, workspace: "IWorkspace") -> DagsterRun:\n """Submit a pipeline run to the coordinator.\n\n This method delegates to the ``RunCoordinator``, configured on the instance, and will\n call its implementation of ``RunCoordinator.submit_run()`` to send the run to the\n coordinator for execution. Runs should be created in the instance (e.g., by calling\n ``DagsterInstance.create_run()``) *before* this method is called, and\n should be in the ``PipelineRunStatus.NOT_STARTED`` state. They also must have a non-null\n ExternalPipelineOrigin.\n\n Args:\n run_id (str): The id of the run.\n """\n from dagster._core.host_representation import ExternalJobOrigin\n from dagster._core.run_coordinator import SubmitRunContext\n\n run = self.get_run_by_id(run_id)\n if run is None:\n raise DagsterInvariantViolationError(\n f"Could not load run {run_id} that was passed to submit_run"\n )\n\n check.inst(\n run.external_job_origin,\n ExternalJobOrigin,\n "External pipeline origin must be set for submitted runs",\n )\n check.inst(\n run.job_code_origin,\n JobPythonOrigin,\n "Python origin must be set for submitted runs",\n )\n\n try:\n submitted_run = self.run_coordinator.submit_run(\n SubmitRunContext(run, workspace=workspace)\n )\n except:\n from dagster._core.events import EngineEventData\n\n error = serializable_error_info_from_exc_info(sys.exc_info())\n self.report_engine_event(\n error.message,\n run,\n EngineEventData.engine_error(error),\n )\n self.report_run_failed(run)\n raise\n\n return submitted_run\n\n # Run launcher\n\n def launch_run(self, run_id: str, workspace: "IWorkspace") -> DagsterRun:\n """Launch a pipeline run.\n\n This method is typically called using `instance.submit_run` rather than being invoked\n directly. This method delegates to the ``RunLauncher``, if any, configured on the instance,\n and will call its implementation of ``RunLauncher.launch_run()`` to begin the execution of\n the specified run. Runs should be created in the instance (e.g., by calling\n ``DagsterInstance.create_run()``) *before* this method is called, and should be in the\n ``PipelineRunStatus.NOT_STARTED`` state.\n\n Args:\n run_id (str): The id of the run the launch.\n """\n from dagster._core.events import DagsterEvent, DagsterEventType, EngineEventData\n from dagster._core.launcher import LaunchRunContext\n\n run = self.get_run_by_id(run_id)\n if run is None:\n raise DagsterInvariantViolationError(\n f"Could not load run {run_id} that was passed to launch_run"\n )\n\n launch_started_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_STARTING.value,\n job_name=run.job_name,\n )\n self.report_dagster_event(launch_started_event, run_id=run.run_id)\n\n run = self.get_run_by_id(run_id)\n if run is None:\n check.failed(f"Failed to reload run {run_id}")\n\n try:\n self.run_launcher.launch_run(LaunchRunContext(dagster_run=run, workspace=workspace))\n except:\n error = serializable_error_info_from_exc_info(sys.exc_info())\n self.report_engine_event(\n error.message,\n run,\n EngineEventData.engine_error(error),\n )\n self.report_run_failed(run)\n raise\n\n return run\n\n def resume_run(self, run_id: str, workspace: "IWorkspace", attempt_number: int) -> DagsterRun:\n """Resume a pipeline run.\n\n This method should be called on runs which have already been launched, but whose run workers\n have died.\n\n Args:\n run_id (str): The id of the run the launch.\n """\n from dagster._core.events import EngineEventData\n from dagster._core.launcher import ResumeRunContext\n from dagster._daemon.monitoring import RESUME_RUN_LOG_MESSAGE\n\n run = self.get_run_by_id(run_id)\n if run is None:\n raise DagsterInvariantViolationError(\n f"Could not load run {run_id} that was passed to resume_run"\n )\n if run.status not in IN_PROGRESS_RUN_STATUSES:\n raise DagsterInvariantViolationError(\n f"Run {run_id} is not in a state that can be resumed"\n )\n\n self.report_engine_event(\n RESUME_RUN_LOG_MESSAGE,\n run,\n )\n\n try:\n self.run_launcher.resume_run(\n ResumeRunContext(\n dagster_run=run,\n workspace=workspace,\n resume_attempt_number=attempt_number,\n )\n )\n except:\n error = serializable_error_info_from_exc_info(sys.exc_info())\n self.report_engine_event(\n error.message,\n run,\n EngineEventData.engine_error(error),\n )\n self.report_run_failed(run)\n raise\n\n return run\n\n def count_resume_run_attempts(self, run_id: str) -> int:\n from dagster._daemon.monitoring import count_resume_run_attempts\n\n return count_resume_run_attempts(self, run_id)\n\n def run_will_resume(self, run_id: str) -> bool:\n if not self.run_monitoring_enabled:\n return False\n return self.count_resume_run_attempts(run_id) < self.run_monitoring_max_resume_run_attempts\n\n # Scheduler\n\n def start_schedule(self, external_schedule: "ExternalSchedule") -> "InstigatorState":\n return self._scheduler.start_schedule(self, external_schedule) # type: ignore\n\n def stop_schedule(\n self,\n schedule_origin_id: str,\n schedule_selector_id: str,\n external_schedule: Optional["ExternalSchedule"],\n ) -> "InstigatorState":\n return self._scheduler.stop_schedule( # type: ignore\n self, schedule_origin_id, schedule_selector_id, external_schedule\n )\n\n def scheduler_debug_info(self) -> "SchedulerDebugInfo":\n from dagster._core.definitions.run_request import InstigatorType\n from dagster._core.scheduler import SchedulerDebugInfo\n\n errors = []\n\n schedules: List[str] = []\n for schedule_state in self.all_instigator_state(instigator_type=InstigatorType.SCHEDULE):\n schedule_info: Mapping[str, Mapping[str, object]] = {\n schedule_state.instigator_name: {\n "status": schedule_state.status.value,\n "cron_schedule": schedule_state.instigator_data.cron_schedule,\n "schedule_origin_id": schedule_state.instigator_origin_id,\n "repository_origin_id": schedule_state.repository_origin_id,\n }\n }\n\n schedules.append(yaml.safe_dump(schedule_info, default_flow_style=False))\n\n return SchedulerDebugInfo(\n scheduler_config_info=self._info_str_for_component("Scheduler", self.scheduler),\n scheduler_info=self.scheduler.debug_info(), # type: ignore\n schedule_storage=schedules,\n errors=errors,\n )\n\n # Schedule / Sensor Storage\n\n def start_sensor(self, external_sensor: "ExternalSensor") -> "InstigatorState":\n from dagster._core.definitions.run_request import InstigatorType\n from dagster._core.scheduler.instigation import (\n InstigatorState,\n InstigatorStatus,\n SensorInstigatorData,\n )\n\n stored_state = self.get_instigator_state(\n external_sensor.get_external_origin_id(), external_sensor.selector_id\n )\n\n computed_state = external_sensor.get_current_instigator_state(stored_state)\n if computed_state.is_running:\n return computed_state\n\n if not stored_state:\n return self.add_instigator_state(\n InstigatorState(\n external_sensor.get_external_origin(),\n InstigatorType.SENSOR,\n InstigatorStatus.RUNNING,\n SensorInstigatorData(min_interval=external_sensor.min_interval_seconds),\n )\n )\n else:\n return self.update_instigator_state(stored_state.with_status(InstigatorStatus.RUNNING))\n\n def stop_sensor(\n self,\n instigator_origin_id: str,\n selector_id: str,\n external_sensor: Optional["ExternalSensor"],\n ) -> "InstigatorState":\n from dagster._core.definitions.run_request import InstigatorType\n from dagster._core.scheduler.instigation import (\n InstigatorState,\n InstigatorStatus,\n SensorInstigatorData,\n )\n\n stored_state = self.get_instigator_state(instigator_origin_id, selector_id)\n computed_state: InstigatorState\n if external_sensor:\n computed_state = external_sensor.get_current_instigator_state(stored_state)\n else:\n computed_state = check.not_none(stored_state)\n\n if not computed_state.is_running:\n return computed_state\n\n if not stored_state:\n assert external_sensor\n return self.add_instigator_state(\n InstigatorState(\n external_sensor.get_external_origin(),\n InstigatorType.SENSOR,\n InstigatorStatus.STOPPED,\n SensorInstigatorData(min_interval=external_sensor.min_interval_seconds),\n )\n )\n else:\n return self.update_instigator_state(stored_state.with_status(InstigatorStatus.STOPPED))\n\n @traced\n def all_instigator_state(\n self,\n repository_origin_id: Optional[str] = None,\n repository_selector_id: Optional[str] = None,\n instigator_type: Optional["InstigatorType"] = None,\n instigator_statuses: Optional[Set["InstigatorStatus"]] = None,\n ):\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.all_instigator_state(\n repository_origin_id, repository_selector_id, instigator_type, instigator_statuses\n )\n\n @traced\n def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional["InstigatorState"]:\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.get_instigator_state(origin_id, selector_id)\n\n def add_instigator_state(self, state: "InstigatorState") -> "InstigatorState":\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.add_instigator_state(state)\n\n def update_instigator_state(self, state: "InstigatorState") -> "InstigatorState":\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.update_instigator_state(state)\n\n def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:\n return self._schedule_storage.delete_instigator_state(origin_id, selector_id) # type: ignore # (possible none)\n\n @property\n def supports_batch_tick_queries(self) -> bool:\n return self._schedule_storage and self._schedule_storage.supports_batch_queries # type: ignore # (possible none)\n\n @traced\n def get_batch_ticks(\n self,\n selector_ids: Sequence[str],\n limit: Optional[int] = None,\n statuses: Optional[Sequence["TickStatus"]] = None,\n ) -> Mapping[str, Sequence["InstigatorTick"]]:\n if not self._schedule_storage:\n return {}\n return self._schedule_storage.get_batch_ticks(selector_ids, limit, statuses)\n\n @traced\n def get_tick(\n self, origin_id: str, selector_id: str, timestamp: float\n ) -> Optional["InstigatorTick"]:\n matches = self._schedule_storage.get_ticks( # type: ignore # (possible none)\n origin_id, selector_id, before=timestamp + 1, after=timestamp - 1, limit=1\n )\n return matches[0] if len(matches) else None\n\n @traced\n def get_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses: Optional[Sequence["TickStatus"]] = None,\n ) -> Sequence["InstigatorTick"]:\n return self._schedule_storage.get_ticks( # type: ignore # (possible none)\n origin_id, selector_id, before=before, after=after, limit=limit, statuses=statuses\n )\n\n def create_tick(self, tick_data: "TickData") -> "InstigatorTick":\n return check.not_none(self._schedule_storage).create_tick(tick_data)\n\n def update_tick(self, tick: "InstigatorTick"):\n return check.not_none(self._schedule_storage).update_tick(tick)\n\n def purge_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: float,\n tick_statuses: Optional[Sequence["TickStatus"]] = None,\n ) -> None:\n self._schedule_storage.purge_ticks(origin_id, selector_id, before, tick_statuses) # type: ignore # (possible none)\n\n def wipe_all_schedules(self) -> None:\n if self._scheduler:\n self._scheduler.wipe(self) # type: ignore # (possible none)\n\n self._schedule_storage.wipe() # type: ignore # (possible none)\n\n def logs_path_for_schedule(self, schedule_origin_id: str) -> str:\n return self._scheduler.get_logs_path(self, schedule_origin_id) # type: ignore # (possible none)\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(\n self,\n exception_type: Optional[Type[BaseException]],\n exception_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> None:\n self.dispose()\n\n # dagster daemon\n def add_daemon_heartbeat(self, daemon_heartbeat: "DaemonHeartbeat") -> None:\n """Called on a regular interval by the daemon."""\n self._run_storage.add_daemon_heartbeat(daemon_heartbeat)\n\n def get_daemon_heartbeats(self) -> Mapping[str, "DaemonHeartbeat"]:\n """Latest heartbeats of all daemon types."""\n return self._run_storage.get_daemon_heartbeats()\n\n def wipe_daemon_heartbeats(self) -> None:\n self._run_storage.wipe_daemon_heartbeats()\n\n def get_required_daemon_types(self) -> Sequence[str]:\n from dagster._core.run_coordinator import QueuedRunCoordinator\n from dagster._core.scheduler import DagsterDaemonScheduler\n from dagster._daemon.asset_daemon import AssetDaemon\n from dagster._daemon.auto_run_reexecution.event_log_consumer import EventLogConsumerDaemon\n from dagster._daemon.daemon import (\n BackfillDaemon,\n MonitoringDaemon,\n SchedulerDaemon,\n SensorDaemon,\n )\n from dagster._daemon.run_coordinator.queued_run_coordinator_daemon import (\n QueuedRunCoordinatorDaemon,\n )\n\n if self.is_ephemeral:\n return []\n\n daemons = [SensorDaemon.daemon_type(), BackfillDaemon.daemon_type()]\n if isinstance(self.scheduler, DagsterDaemonScheduler):\n daemons.append(SchedulerDaemon.daemon_type())\n if isinstance(self.run_coordinator, QueuedRunCoordinator):\n daemons.append(QueuedRunCoordinatorDaemon.daemon_type())\n if self.run_monitoring_enabled:\n daemons.append(MonitoringDaemon.daemon_type())\n if self.run_retries_enabled:\n daemons.append(EventLogConsumerDaemon.daemon_type())\n if self.auto_materialize_enabled:\n daemons.append(AssetDaemon.daemon_type())\n return daemons\n\n def get_daemon_statuses(\n self, daemon_types: Optional[Sequence[str]] = None\n ) -> Mapping[str, "DaemonStatus"]:\n """Get the current status of the daemons. If daemon_types aren't provided, defaults to all\n required types. Returns a dict of daemon type to status.\n """\n from dagster._daemon.controller import get_daemon_statuses\n\n check.opt_sequence_param(daemon_types, "daemon_types", of_type=str)\n return get_daemon_statuses(\n self, daemon_types=daemon_types or self.get_required_daemon_types(), ignore_errors=True\n )\n\n @property\n def daemon_skip_heartbeats_without_errors(self) -> bool:\n # If enabled, daemon threads won't write heartbeats unless they encounter an error. This is\n # enabled in cloud, where we don't need to use heartbeats to check if daemons are running, but\n # do need to surface errors to users. This is an optimization to reduce DB writes.\n return False\n\n # backfill\n def get_backfills(\n self,\n status: Optional["BulkActionStatus"] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence["PartitionBackfill"]:\n return self._run_storage.get_backfills(status=status, cursor=cursor, limit=limit)\n\n def get_backfill(self, backfill_id: str) -> Optional["PartitionBackfill"]:\n return self._run_storage.get_backfill(backfill_id)\n\n def add_backfill(self, partition_backfill: "PartitionBackfill") -> None:\n self._run_storage.add_backfill(partition_backfill)\n\n def update_backfill(self, partition_backfill: "PartitionBackfill") -> None:\n self._run_storage.update_backfill(partition_backfill)\n\n @property\n def should_start_background_run_thread(self) -> bool:\n """Gate on an experimental feature to start a thread that monitors for if the run should be canceled."""\n return False\n\n def get_tick_retention_settings(\n self, instigator_type: "InstigatorType"\n ) -> Mapping["TickStatus", int]:\n from dagster._core.definitions.run_request import InstigatorType\n\n retention_settings = self.get_settings("retention")\n\n if instigator_type == InstigatorType.SCHEDULE:\n tick_settings = retention_settings.get("schedule")\n elif instigator_type == InstigatorType.SENSOR:\n tick_settings = retention_settings.get("sensor")\n elif instigator_type == InstigatorType.AUTO_MATERIALIZE:\n tick_settings = retention_settings.get("auto_materialize")\n else:\n raise Exception(f"Unexpected instigator type {instigator_type}")\n\n default_tick_settings = get_default_tick_retention_settings(instigator_type)\n return get_tick_retention_settings(tick_settings, default_tick_settings)\n\n def inject_env_vars(self, location_name: Optional[str]) -> None:\n if not self._secrets_loader:\n return\n\n new_env = self._secrets_loader.get_secrets_for_environment(location_name)\n for k, v in new_env.items():\n os.environ[k] = v\n\n def get_latest_data_version_record(\n self,\n key: AssetKey,\n is_source: Optional[bool] = None,\n partition_key: Optional[str] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Optional["EventLogRecord"]:\n from dagster._core.event_api import EventRecordsFilter\n from dagster._core.events import DagsterEventType\n\n # When we cant don't know whether the requested key corresponds to a source or regular\n # asset, we need to retrieve both the latest observation and materialization for all assets.\n # If there is a materialization, it's a regular asset and we can ignore the observation.\n\n observation: Optional[EventLogRecord] = None\n if is_source or is_source is None:\n observations = self.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_OBSERVATION,\n asset_key=key,\n asset_partitions=[partition_key] if partition_key else None,\n before_cursor=before_cursor,\n after_cursor=after_cursor,\n ),\n limit=1,\n )\n observation = next(iter(observations), None)\n\n materialization: Optional[EventLogRecord] = None\n if not is_source:\n materializations = self.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=key,\n asset_partitions=[partition_key] if partition_key else None,\n before_cursor=before_cursor,\n after_cursor=after_cursor,\n ),\n limit=1,\n )\n materialization = next(iter(materializations), None)\n\n return materialization or observation\n\n
[docs] @public\n def get_latest_materialization_code_versions(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional[str]]:\n """Returns the code version used for the latest materialization of each of the provided\n assets.\n\n Args:\n asset_keys (Iterable[AssetKey]): The asset keys to find latest materialization code\n versions for.\n\n Returns:\n Mapping[AssetKey, Optional[str]]: A dictionary with a key for each of the provided asset\n keys. The values will be None if the asset has no materializations. If an asset does\n not have a code version explicitly assigned to its definitions, but was\n materialized, Dagster assigns the run ID as its code version.\n """\n result: Dict[AssetKey, Optional[str]] = {}\n latest_materialization_events = self.get_latest_materialization_events(asset_keys)\n for asset_key in asset_keys:\n event_log_entry = latest_materialization_events.get(asset_key)\n if event_log_entry is None:\n result[asset_key] = None\n else:\n data_provenance = extract_data_provenance_from_entry(event_log_entry)\n result[asset_key] = data_provenance.code_version if data_provenance else None\n\n return result
\n\n @experimental\n def report_runless_asset_event(\n self,\n asset_event: Union["AssetMaterialization", "AssetObservation", "AssetCheckEvaluation"],\n ):\n """Record an event log entry related to assets that does not belong to a Dagster run."""\n from dagster._core.events import (\n AssetMaterialization,\n AssetObservationData,\n DagsterEvent,\n DagsterEventType,\n StepMaterializationData,\n )\n\n if isinstance(asset_event, AssetMaterialization):\n event_type_value = DagsterEventType.ASSET_MATERIALIZATION.value\n data_payload = StepMaterializationData(asset_event)\n elif isinstance(asset_event, AssetCheckEvaluation):\n event_type_value = DagsterEventType.ASSET_CHECK_EVALUATION.value\n data_payload = asset_event\n elif isinstance(asset_event, AssetObservation):\n event_type_value = DagsterEventType.ASSET_OBSERVATION.value\n data_payload = AssetObservationData(asset_event)\n else:\n raise DagsterInvariantViolationError(\n f"Received unexpected asset event type {asset_event}, expected"\n " AssetMaterialization, AssetObservation or AssetCheckEvaluation"\n )\n\n return self.report_dagster_event(\n run_id=RUNLESS_RUN_ID,\n dagster_event=DagsterEvent(\n event_type_value=event_type_value,\n event_specific_data=data_payload,\n job_name=RUNLESS_JOB_NAME,\n ),\n )\n\n def get_asset_check_support(self) -> "AssetCheckInstanceSupport":\n from dagster._core.storage.asset_check_execution_record import AssetCheckInstanceSupport\n\n return (\n AssetCheckInstanceSupport.SUPPORTED\n if self.event_log_storage.supports_asset_checks\n else AssetCheckInstanceSupport.NEEDS_MIGRATION\n )
\n
", "current_page_name": "_modules/dagster/_core/instance", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "ref": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.instance.ref

\nimport os\nfrom typing import TYPE_CHECKING, Any, Mapping, NamedTuple, Optional, Sequence, Type\n\nimport yaml\n\nimport dagster._check as check\nfrom dagster._serdes import ConfigurableClassData, class_from_code_pointer, whitelist_for_serdes\n\nfrom .config import DAGSTER_CONFIG_YAML_FILENAME, dagster_instance_config\n\nif TYPE_CHECKING:\n    from dagster._core.instance import DagsterInstance, DagsterInstanceOverrides\n    from dagster._core.launcher.base import RunLauncher\n    from dagster._core.run_coordinator.base import RunCoordinator\n    from dagster._core.scheduler.scheduler import Scheduler\n    from dagster._core.secrets.loader import SecretsLoader\n    from dagster._core.storage.base_storage import DagsterStorage\n    from dagster._core.storage.compute_log_manager import ComputeLogManager\n    from dagster._core.storage.event_log.base import EventLogStorage\n    from dagster._core.storage.root import LocalArtifactStorage\n    from dagster._core.storage.runs.base import RunStorage\n    from dagster._core.storage.schedules.base import ScheduleStorage\n\n\ndef compute_logs_directory(base: str) -> str:\n    return os.path.join(base, "storage")\n\n\ndef _runs_directory(base: str) -> str:\n    return os.path.join(base, "history", "")\n\n\ndef _event_logs_directory(base: str) -> str:\n    return os.path.join(base, "history", "runs", "")\n\n\ndef _schedule_directory(base: str) -> str:\n    return os.path.join(base, "schedules")\n\n\ndef configurable_class_data(config_field: Mapping[str, Any]) -> ConfigurableClassData:\n    return ConfigurableClassData(\n        check.str_elem(config_field, "module"),\n        check.str_elem(config_field, "class"),\n        yaml.dump(check.opt_dict_elem(config_field, "config"), default_flow_style=False),\n    )\n\n\ndef configurable_class_data_or_default(\n    config_value: Mapping[str, Any], field_name: str, default: Optional[ConfigurableClassData]\n) -> Optional[ConfigurableClassData]:\n    return (\n        configurable_class_data(config_value[field_name])\n        if config_value.get(field_name)\n        else default\n    )\n\n\ndef configurable_secrets_loader_data(\n    config_field: Mapping[str, Any], default: Optional[ConfigurableClassData]\n) -> Optional[ConfigurableClassData]:\n    if not config_field:\n        return default\n    elif "custom" in config_field:\n        return configurable_class_data(config_field["custom"])\n    else:\n        return None\n\n\ndef configurable_storage_data(\n    config_field: Mapping[str, Any], defaults: Mapping[str, Optional[ConfigurableClassData]]\n) -> Sequence[Optional[ConfigurableClassData]]:\n    storage_data: ConfigurableClassData\n    run_storage_data: Optional[ConfigurableClassData]\n    event_storage_data: Optional[ConfigurableClassData]\n    schedule_storage_data: Optional[ConfigurableClassData]\n\n    if not config_field:\n        storage_data = check.not_none(defaults.get("storage"))\n        run_storage_data = check.not_none(defaults.get("run_storage"))\n        event_storage_data = check.not_none(defaults.get("event_log_storage"))\n        schedule_storage_data = check.not_none(defaults.get("schedule_storage"))\n    elif "postgres" in config_field:\n        config_yaml = yaml.dump(config_field["postgres"], default_flow_style=False)\n        storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="DagsterPostgresStorage",\n            config_yaml=config_yaml,\n        )\n        # for backwards compatibility\n        run_storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="PostgresRunStorage",\n            config_yaml=config_yaml,\n        )\n        event_storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="PostgresEventLogStorage",\n            config_yaml=config_yaml,\n        )\n        schedule_storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="PostgresScheduleStorage",\n            config_yaml=config_yaml,\n        )\n\n    elif "mysql" in config_field:\n        config_yaml = yaml.dump(config_field["mysql"], default_flow_style=False)\n        storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="DagsterMySQLStorage",\n            config_yaml=config_yaml,\n        )\n        # for backwards compatibility\n        run_storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="MySQLRunStorage",\n            config_yaml=config_yaml,\n        )\n        event_storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="MySQLEventLogStorage",\n            config_yaml=config_yaml,\n        )\n        schedule_storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="MySQLScheduleStorage",\n            config_yaml=config_yaml,\n        )\n\n    elif "sqlite" in config_field:\n        base_dir = config_field["sqlite"]["base_dir"]\n        storage_data = ConfigurableClassData(\n            "dagster._core.storage.sqlite_storage",\n            "DagsterSqliteStorage",\n            yaml.dump({"base_dir": base_dir}, default_flow_style=False),\n        )\n\n        # Back-compat fo the legacy storage field only works if the base_dir is a string\n        # (env var doesn't work since each storage has a different value for the base_dir field)\n        if isinstance(base_dir, str):\n            run_storage_data = ConfigurableClassData(\n                "dagster._core.storage.runs",\n                "SqliteRunStorage",\n                yaml.dump({"base_dir": _runs_directory(base_dir)}, default_flow_style=False),\n            )\n\n            event_storage_data = ConfigurableClassData(\n                "dagster._core.storage.event_log",\n                "SqliteEventLogStorage",\n                yaml.dump({"base_dir": _event_logs_directory(base_dir)}, default_flow_style=False),\n            )\n\n            schedule_storage_data = ConfigurableClassData(\n                "dagster._core.storage.schedules",\n                "SqliteScheduleStorage",\n                yaml.dump({"base_dir": _schedule_directory(base_dir)}, default_flow_style=False),\n            )\n        else:\n            run_storage_data = None\n            event_storage_data = None\n            schedule_storage_data = None\n    else:\n        storage_data = configurable_class_data(config_field["custom"])\n        storage_config_yaml = yaml.dump(\n            {\n                "module_name": storage_data.module_name,\n                "class_name": storage_data.class_name,\n                "config_yaml": storage_data.config_yaml,\n            },\n            default_flow_style=False,\n        )\n        run_storage_data = ConfigurableClassData(\n            "dagster._core.storage.legacy_storage", "LegacyRunStorage", storage_config_yaml\n        )\n        event_storage_data = ConfigurableClassData(\n            "dagster._core.storage.legacy_storage", "LegacyEventLogStorage", storage_config_yaml\n        )\n        schedule_storage_data = ConfigurableClassData(\n            "dagster._core.storage.legacy_storage", "LegacyScheduleStorage", storage_config_yaml\n        )\n\n    return [storage_data, run_storage_data, event_storage_data, schedule_storage_data]\n\n\n
[docs]@whitelist_for_serdes\nclass InstanceRef(\n NamedTuple(\n "_InstanceRef",\n [\n ("local_artifact_storage_data", ConfigurableClassData),\n ("compute_logs_data", ConfigurableClassData),\n ("scheduler_data", Optional[ConfigurableClassData]),\n ("run_coordinator_data", Optional[ConfigurableClassData]),\n ("run_launcher_data", Optional[ConfigurableClassData]),\n ("settings", Mapping[str, object]),\n # Required for backwards compatibility, but going forward will be unused by new versions\n # of DagsterInstance, which instead will instead grab the constituent storages from the\n # unified `storage_data`, if it is populated.\n ("run_storage_data", Optional[ConfigurableClassData]),\n ("event_storage_data", Optional[ConfigurableClassData]),\n ("schedule_storage_data", Optional[ConfigurableClassData]),\n ("custom_instance_class_data", Optional[ConfigurableClassData]),\n # unified storage field\n ("storage_data", Optional[ConfigurableClassData]),\n ("secrets_loader_data", Optional[ConfigurableClassData]),\n ],\n )\n):\n """Serializable representation of a :py:class:`DagsterInstance`.\n\n Users should not instantiate this class directly.\n """\n\n def __new__(\n cls,\n local_artifact_storage_data: ConfigurableClassData,\n compute_logs_data: ConfigurableClassData,\n scheduler_data: Optional[ConfigurableClassData],\n run_coordinator_data: Optional[ConfigurableClassData],\n run_launcher_data: Optional[ConfigurableClassData],\n settings: Mapping[str, object],\n run_storage_data: Optional[ConfigurableClassData],\n event_storage_data: Optional[ConfigurableClassData],\n schedule_storage_data: Optional[ConfigurableClassData],\n custom_instance_class_data: Optional[ConfigurableClassData] = None,\n storage_data: Optional[ConfigurableClassData] = None,\n secrets_loader_data: Optional[ConfigurableClassData] = None,\n ):\n return super(cls, InstanceRef).__new__(\n cls,\n local_artifact_storage_data=check.inst_param(\n local_artifact_storage_data, "local_artifact_storage_data", ConfigurableClassData\n ),\n compute_logs_data=check.inst_param(\n compute_logs_data, "compute_logs_data", ConfigurableClassData\n ),\n scheduler_data=check.opt_inst_param(\n scheduler_data, "scheduler_data", ConfigurableClassData\n ),\n run_coordinator_data=check.opt_inst_param(\n run_coordinator_data, "run_coordinator_data", ConfigurableClassData\n ),\n run_launcher_data=check.opt_inst_param(\n run_launcher_data, "run_launcher_data", ConfigurableClassData\n ),\n settings=check.opt_mapping_param(settings, "settings", key_type=str),\n run_storage_data=check.opt_inst_param(\n run_storage_data, "run_storage_data", ConfigurableClassData\n ),\n event_storage_data=check.opt_inst_param(\n event_storage_data, "event_storage_data", ConfigurableClassData\n ),\n schedule_storage_data=check.opt_inst_param(\n schedule_storage_data, "schedule_storage_data", ConfigurableClassData\n ),\n custom_instance_class_data=check.opt_inst_param(\n custom_instance_class_data,\n "instance_class",\n ConfigurableClassData,\n ),\n storage_data=check.opt_inst_param(storage_data, "storage_data", ConfigurableClassData),\n secrets_loader_data=check.opt_inst_param(\n secrets_loader_data, "secrets_loader_data", ConfigurableClassData\n ),\n )\n\n @staticmethod\n def config_defaults(base_dir: str) -> Mapping[str, Optional[ConfigurableClassData]]:\n default_run_storage_data = ConfigurableClassData(\n "dagster._core.storage.runs",\n "SqliteRunStorage",\n yaml.dump({"base_dir": _runs_directory(base_dir)}, default_flow_style=False),\n )\n default_event_log_storage_data = ConfigurableClassData(\n "dagster._core.storage.event_log",\n "SqliteEventLogStorage",\n yaml.dump({"base_dir": _event_logs_directory(base_dir)}, default_flow_style=False),\n )\n default_schedule_storage_data = ConfigurableClassData(\n "dagster._core.storage.schedules",\n "SqliteScheduleStorage",\n yaml.dump({"base_dir": _schedule_directory(base_dir)}, default_flow_style=False),\n )\n\n return {\n "local_artifact_storage": ConfigurableClassData(\n "dagster._core.storage.root",\n "LocalArtifactStorage",\n yaml.dump({"base_dir": base_dir}, default_flow_style=False),\n ),\n "storage": ConfigurableClassData(\n "dagster._core.storage.sqlite_storage",\n "DagsterSqliteStorage",\n yaml.dump({"base_dir": base_dir}, default_flow_style=False),\n ),\n "compute_logs": ConfigurableClassData(\n "dagster._core.storage.local_compute_log_manager",\n "LocalComputeLogManager",\n yaml.dump({"base_dir": compute_logs_directory(base_dir)}, default_flow_style=False),\n ),\n "scheduler": ConfigurableClassData(\n "dagster._core.scheduler",\n "DagsterDaemonScheduler",\n yaml.dump({}),\n ),\n "run_coordinator": ConfigurableClassData(\n "dagster._core.run_coordinator", "DefaultRunCoordinator", yaml.dump({})\n ),\n "run_launcher": ConfigurableClassData(\n "dagster",\n "DefaultRunLauncher",\n yaml.dump({}),\n ),\n # For back-compat, the default is actually set in the secrets_loader property above,\n # so that old clients loading new config don't try to load a class that they\n # don't recognize\n "secrets": None,\n # LEGACY DEFAULTS\n "run_storage": default_run_storage_data,\n "event_log_storage": default_event_log_storage_data,\n "schedule_storage": default_schedule_storage_data,\n }\n\n @staticmethod\n def from_dir(\n base_dir: str,\n *,\n config_dir: Optional[str] = None,\n config_filename: str = DAGSTER_CONFIG_YAML_FILENAME,\n overrides: Optional["DagsterInstanceOverrides"] = None,\n ) -> "InstanceRef":\n if config_dir is None:\n config_dir = base_dir\n\n overrides = check.opt_mapping_param(overrides, "overrides")\n config_value, custom_instance_class = dagster_instance_config(\n config_dir, config_filename=config_filename, overrides=overrides\n )\n\n if custom_instance_class:\n config_keys = set(custom_instance_class.config_schema().keys()) # type: ignore # (undefined method)\n custom_instance_class_config = {\n key: val for key, val in config_value.items() if key in config_keys\n }\n custom_instance_class_data = ConfigurableClassData(\n config_value["instance_class"]["module"],\n config_value["instance_class"]["class"],\n yaml.dump(custom_instance_class_config, default_flow_style=False),\n )\n defaults = custom_instance_class.config_defaults(base_dir) # type: ignore # (undefined method)\n else:\n custom_instance_class_data = None\n defaults = InstanceRef.config_defaults(base_dir)\n\n local_artifact_storage_data = configurable_class_data_or_default(\n config_value, "local_artifact_storage", defaults["local_artifact_storage"]\n )\n\n compute_logs_data = configurable_class_data_or_default(\n config_value,\n "compute_logs",\n defaults["compute_logs"],\n )\n\n if (\n config_value.get("run_storage")\n or config_value.get("event_log_storage")\n or config_value.get("schedule_storage")\n ):\n # using legacy config, specifying config for each of the constituent storages, make sure\n # to create a composite storage\n run_storage_data = configurable_class_data_or_default(\n config_value, "run_storage", defaults["run_storage"]\n )\n event_storage_data = configurable_class_data_or_default(\n config_value, "event_log_storage", defaults["event_log_storage"]\n )\n schedule_storage_data = configurable_class_data_or_default(\n config_value, "schedule_storage", defaults["schedule_storage"]\n )\n storage_data = ConfigurableClassData(\n module_name="dagster._core.storage.legacy_storage",\n class_name="CompositeStorage",\n config_yaml=yaml.dump(\n {\n "run_storage": {\n "module_name": run_storage_data.module_name, # type: ignore # (possible none)\n "class_name": run_storage_data.class_name, # type: ignore # (possible none)\n "config_yaml": run_storage_data.config_yaml, # type: ignore # (possible none)\n },\n "event_log_storage": {\n "module_name": event_storage_data.module_name, # type: ignore # (possible none)\n "class_name": event_storage_data.class_name, # type: ignore # (possible none)\n "config_yaml": event_storage_data.config_yaml, # type: ignore # (possible none)\n },\n "schedule_storage": {\n "module_name": schedule_storage_data.module_name, # type: ignore # (possible none)\n "class_name": schedule_storage_data.class_name, # type: ignore # (possible none)\n "config_yaml": schedule_storage_data.config_yaml, # type: ignore # (possible none)\n },\n },\n default_flow_style=False,\n ),\n )\n\n else:\n [\n storage_data,\n run_storage_data,\n event_storage_data,\n schedule_storage_data,\n ] = configurable_storage_data(\n config_value.get("storage"), defaults # type: ignore # (possible none)\n )\n\n scheduler_data = configurable_class_data_or_default(\n config_value, "scheduler", defaults["scheduler"]\n )\n\n if config_value.get("run_queue"):\n run_coordinator_data = configurable_class_data(\n {\n "module": "dagster.core.run_coordinator",\n "class": "QueuedRunCoordinator",\n "config": config_value["run_queue"],\n }\n )\n else:\n run_coordinator_data = configurable_class_data_or_default(\n config_value,\n "run_coordinator",\n defaults["run_coordinator"],\n )\n\n run_launcher_data = configurable_class_data_or_default(\n config_value,\n "run_launcher",\n defaults["run_launcher"],\n )\n\n secrets_loader_data = configurable_secrets_loader_data(\n config_value.get("secrets"), defaults["secrets"] # type: ignore # (possible none)\n )\n\n settings_keys = {\n "telemetry",\n "python_logs",\n "run_monitoring",\n "run_retries",\n "code_servers",\n "retention",\n "sensors",\n "schedules",\n "nux",\n "auto_materialize",\n }\n settings = {key: config_value.get(key) for key in settings_keys if config_value.get(key)}\n\n return InstanceRef(\n local_artifact_storage_data=local_artifact_storage_data, # type: ignore # (possible none)\n run_storage_data=run_storage_data,\n event_storage_data=event_storage_data,\n compute_logs_data=compute_logs_data, # type: ignore # (possible none)\n schedule_storage_data=schedule_storage_data,\n scheduler_data=scheduler_data,\n run_coordinator_data=run_coordinator_data,\n run_launcher_data=run_launcher_data,\n settings=settings,\n custom_instance_class_data=custom_instance_class_data,\n storage_data=storage_data,\n secrets_loader_data=secrets_loader_data,\n )\n\n @staticmethod\n def from_dict(instance_ref_dict):\n def value_for_ref_item(k, v):\n if v is None:\n return None\n if k == "settings":\n return v\n return ConfigurableClassData(*v)\n\n return InstanceRef(**{k: value_for_ref_item(k, v) for k, v in instance_ref_dict.items()})\n\n @property\n def local_artifact_storage(self) -> "LocalArtifactStorage":\n from dagster._core.storage.root import LocalArtifactStorage\n\n return self.local_artifact_storage_data.rehydrate(as_type=LocalArtifactStorage)\n\n @property\n def storage(self) -> Optional["DagsterStorage"]:\n from dagster._core.storage.base_storage import DagsterStorage\n\n return self.storage_data.rehydrate(as_type=DagsterStorage) if self.storage_data else None\n\n @property\n def run_storage(self) -> Optional["RunStorage"]:\n from dagster._core.storage.runs.base import RunStorage\n\n return (\n self.run_storage_data.rehydrate(as_type=RunStorage) if self.run_storage_data else None\n )\n\n @property\n def event_storage(self) -> Optional["EventLogStorage"]:\n from dagster._core.storage.event_log.base import EventLogStorage\n\n return (\n self.event_storage_data.rehydrate(as_type=EventLogStorage)\n if self.event_storage_data\n else None\n )\n\n @property\n def schedule_storage(self) -> Optional["ScheduleStorage"]:\n from dagster._core.storage.schedules.base import ScheduleStorage\n\n return (\n self.schedule_storage_data.rehydrate(as_type=ScheduleStorage)\n if self.schedule_storage_data\n else None\n )\n\n @property\n def compute_log_manager(self) -> "ComputeLogManager":\n from dagster._core.storage.compute_log_manager import ComputeLogManager\n\n return self.compute_logs_data.rehydrate(as_type=ComputeLogManager)\n\n @property\n def scheduler(self) -> Optional["Scheduler"]:\n from dagster._core.scheduler.scheduler import Scheduler\n\n return self.scheduler_data.rehydrate(as_type=Scheduler) if self.scheduler_data else None\n\n @property\n def run_coordinator(self) -> Optional["RunCoordinator"]:\n from dagster._core.run_coordinator.base import RunCoordinator\n\n return (\n self.run_coordinator_data.rehydrate(as_type=RunCoordinator)\n if self.run_coordinator_data\n else None\n )\n\n @property\n def run_launcher(self) -> Optional["RunLauncher"]:\n from dagster._core.launcher.base import RunLauncher\n\n return (\n self.run_launcher_data.rehydrate(as_type=RunLauncher)\n if self.run_launcher_data\n else None\n )\n\n @property\n def secrets_loader(self) -> Optional["SecretsLoader"]:\n from dagster._core.secrets.loader import SecretsLoader\n\n # Defining a default here rather than in stored config to avoid\n # back-compat issues when loading the config on older versions where\n # EnvFileLoader was not defined\n return (\n self.secrets_loader_data.rehydrate(as_type=SecretsLoader)\n if self.secrets_loader_data\n else None\n )\n\n @property\n def custom_instance_class(self) -> Type["DagsterInstance"]:\n return ( # type: ignore # (ambiguous return type)\n class_from_code_pointer(\n self.custom_instance_class_data.module_name,\n self.custom_instance_class_data.class_name,\n )\n if self.custom_instance_class_data\n else None\n )\n\n @property\n def custom_instance_class_config(self) -> Mapping[str, Any]:\n return (\n self.custom_instance_class_data.config_dict if self.custom_instance_class_data else {}\n )\n\n def to_dict(self) -> Mapping[str, Any]:\n return self._asdict()
\n
", "current_page_name": "_modules/dagster/_core/instance/ref", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}, {"link": "../", "title": "dagster._core.instance"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.instance.ref"}, "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.instance"}, "instance_for_test": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.instance_for_test

\nimport os\nimport sys\nimport tempfile\nfrom contextlib import ExitStack, contextmanager\nfrom typing import Any, Iterator, Mapping, Optional\n\nimport yaml\n\nfrom dagster._utils.error import serializable_error_info_from_exc_info\n\nfrom .._utils.env import environ\nfrom .._utils.merger import merge_dicts\nfrom .instance import DagsterInstance\n\n\n
[docs]@contextmanager\ndef instance_for_test(\n overrides: Optional[Mapping[str, Any]] = None,\n set_dagster_home: bool = True,\n temp_dir: Optional[str] = None,\n) -> Iterator[DagsterInstance]:\n """Creates a persistent :py:class:`~dagster.DagsterInstance` available within a context manager.\n\n When a context manager is opened, if no `temp_dir` parameter is set, a new\n temporary directory will be created for the duration of the context\n manager's opening. If the `set_dagster_home` parameter is set to True\n (True by default), the `$DAGSTER_HOME` environment variable will be\n overridden to be this directory (or the directory passed in by `temp_dir`)\n for the duration of the context manager being open.\n\n Args:\n overrides (Optional[Mapping[str, Any]]):\n Config to provide to instance (config format follows that typically found in an `instance.yaml` file).\n set_dagster_home (Optional[bool]):\n If set to True, the `$DAGSTER_HOME` environment variable will be\n overridden to be the directory used by this instance for the\n duration that the context manager is open. Upon the context\n manager closing, the `$DAGSTER_HOME` variable will be re-set to the original value. (Defaults to True).\n temp_dir (Optional[str]):\n The directory to use for storing local artifacts produced by the\n instance. If not set, a temporary directory will be created for\n the duration of the context manager being open, and all artifacts\n will be torn down afterward.\n """\n with ExitStack() as stack:\n if not temp_dir:\n temp_dir = stack.enter_context(tempfile.TemporaryDirectory())\n\n # wait for any grpc processes that created runs during test disposal to finish,\n # since they might also be using this instance's tempdir (and to keep each test\n # isolated / avoid race conditions in newer versions of grpcio when servers are\n # shutting down and spinning up at the same time)\n instance_overrides = merge_dicts(\n {\n "telemetry": {"enabled": False},\n "code_servers": {"wait_for_local_processes_on_shutdown": True},\n },\n (overrides if overrides else {}),\n )\n\n if set_dagster_home:\n stack.enter_context(\n environ({"DAGSTER_HOME": temp_dir, "DAGSTER_DISABLE_TELEMETRY": "yes"})\n )\n\n with open(os.path.join(temp_dir, "dagster.yaml"), "w", encoding="utf8") as fd:\n yaml.dump(instance_overrides, fd, default_flow_style=False)\n\n with DagsterInstance.from_config(temp_dir) as instance:\n try:\n yield instance\n except:\n sys.stderr.write(\n "Test raised an exception, attempting to clean up instance:"\n + serializable_error_info_from_exc_info(sys.exc_info()).to_string()\n + "\\n"\n )\n raise\n finally:\n cleanup_test_instance(instance)
\n\n\ndef cleanup_test_instance(instance: DagsterInstance) -> None:\n # To avoid filesystem contention when we close the temporary directory, wait for\n # all runs to reach a terminal state, and close any subprocesses or threads\n # that might be accessing the run history DB.\n\n # Since launcher is lazy loaded, we don't need to do anyting if it's None\n if instance._run_launcher: # noqa: SLF001\n instance._run_launcher.join() # noqa: SLF001\n
", "current_page_name": "_modules/dagster/_core/instance_for_test", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.instance_for_test"}, "launcher": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.launcher.base

\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom typing import NamedTuple, Optional\n\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.workspace.workspace import IWorkspace\nfrom dagster._serdes import whitelist_for_serdes\n\n\nclass LaunchRunContext(NamedTuple):\n    """Context available within a run launcher's launch_run call."""\n\n    dagster_run: DagsterRun\n    workspace: Optional[IWorkspace]\n\n    @property\n    def job_code_origin(self) -> Optional[JobPythonOrigin]:\n        return self.dagster_run.job_code_origin\n\n\nclass ResumeRunContext(NamedTuple):\n    """Context available within a run launcher's resume_run call."""\n\n    dagster_run: DagsterRun\n    workspace: Optional[IWorkspace]\n    resume_attempt_number: Optional[int] = None\n\n    @property\n    def job_code_origin(self) -> Optional[JobPythonOrigin]:\n        return self.dagster_run.job_code_origin\n\n\n@whitelist_for_serdes\nclass WorkerStatus(Enum):\n    RUNNING = "RUNNING"\n    NOT_FOUND = "NOT_FOUND"\n    FAILED = "FAILED"\n    SUCCESS = "SUCCESS"\n    UNKNOWN = "UNKNOWN"\n\n\nclass CheckRunHealthResult(NamedTuple):\n    """Result of a check_run_worker_health call."""\n\n    status: WorkerStatus\n    msg: Optional[str] = None\n    transient: Optional[bool] = None\n    run_worker_id: Optional[str] = None  # Identifier for a particular run worker\n\n    def __str__(self) -> str:\n        return f"{self.status.value}: '{self.msg}'"\n\n\n
[docs]class RunLauncher(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n @abstractmethod\n def launch_run(self, context: LaunchRunContext) -> None:\n """Launch a run.\n\n This method should begin the execution of the specified run, and may emit engine events.\n Runs should be created in the instance (e.g., by calling\n ``DagsterInstance.create_run()``) *before* this method is called, and\n should be in the ``PipelineRunStatus.STARTING`` state. Typically, this method will\n not be invoked directly, but should be invoked through ``DagsterInstance.launch_run()``.\n\n Args:\n context (LaunchRunContext): information about the launch - every run launcher\n will need the PipelineRun, and some run launchers may need information from the\n IWorkspace from which the run was launched.\n """\n\n @abstractmethod\n def terminate(self, run_id: str) -> bool:\n """Terminates a process.\n\n Returns False is the process was already terminated. Returns true if\n the process was alive and was successfully terminated\n """\n\n def dispose(self) -> None:\n """Do any resource cleanup that should happen when the DagsterInstance is\n cleaning itself up.\n """\n\n def join(self, timeout: int = 30) -> None:\n pass\n\n @property\n def supports_check_run_worker_health(self) -> bool:\n """Whether the run launcher supports check_run_worker_health."""\n return False\n\n def check_run_worker_health(self, run: DagsterRun) -> CheckRunHealthResult:\n raise NotImplementedError(\n "This run launcher does not support run monitoring. Please disable it on your instance."\n )\n\n def get_run_worker_debug_info(self, run: DagsterRun) -> Optional[str]:\n return None\n\n @property\n def supports_resume_run(self) -> bool:\n """Whether the run launcher supports resume_run."""\n return False\n\n def resume_run(self, context: ResumeRunContext) -> None:\n raise NotImplementedError(\n "This run launcher does not support resuming runs. If using "\n "run monitoring, set max_resume_run_attempts to 0."\n )
\n
", "current_page_name": "_modules/dagster/_core/launcher/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.launcher.base"}, "default_run_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.launcher.default_run_launcher

\nimport time\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, cast\n\nfrom typing_extensions import Self\n\nimport dagster._seven as seven\nfrom dagster import (\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.errors import (\n    DagsterInvariantViolationError,\n    DagsterLaunchFailedError,\n    DagsterUserCodeProcessError,\n)\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import GRPC_INFO_TAG\nfrom dagster._serdes import (\n    ConfigurableClass,\n    deserialize_value,\n)\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom dagster._utils.merger import merge_dicts\n\nfrom .base import LaunchRunContext, RunLauncher\n\nif TYPE_CHECKING:\n    from dagster._core.instance import DagsterInstance\n    from dagster._grpc.client import DagsterGrpcClient\n\n\n# note: this class is a top level export, so we defer many imports til use for performance\n
[docs]class DefaultRunLauncher(RunLauncher, ConfigurableClass):\n """Launches runs against running GRPC servers."""\n\n def __init__(\n self,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = inst_data\n\n self._run_ids = set()\n\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {}\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return DefaultRunLauncher(inst_data=inst_data)\n\n @staticmethod\n def launch_run_from_grpc_client(\n instance: "DagsterInstance", run: DagsterRun, grpc_client: "DagsterGrpcClient"\n ):\n # defer for perf\n from dagster._grpc.types import ExecuteExternalJobArgs, StartRunResult\n\n instance.add_run_tags(\n run.run_id,\n {\n GRPC_INFO_TAG: seven.json.dumps(\n merge_dicts(\n {"host": grpc_client.host},\n (\n {"port": grpc_client.port}\n if grpc_client.port\n else {"socket": grpc_client.socket}\n ),\n ({"use_ssl": True} if grpc_client.use_ssl else {}),\n )\n )\n },\n )\n\n res = deserialize_value(\n grpc_client.start_run(\n ExecuteExternalJobArgs(\n job_origin=run.external_job_origin, # type: ignore # (possible none)\n run_id=run.run_id,\n instance_ref=instance.get_ref(),\n )\n ),\n StartRunResult,\n )\n if not res.success:\n raise (\n DagsterLaunchFailedError(\n res.message, serializable_error_info=res.serializable_error_info\n )\n )\n\n def launch_run(self, context: LaunchRunContext) -> None:\n # defer for perf\n from dagster._core.host_representation.code_location import (\n GrpcServerCodeLocation,\n )\n\n run = context.dagster_run\n\n check.inst_param(run, "run", DagsterRun)\n\n if not context.workspace:\n raise DagsterInvariantViolationError(\n "DefaultRunLauncher requires a workspace to be included in its LaunchRunContext"\n )\n\n external_job_origin = check.not_none(run.external_job_origin)\n code_location = context.workspace.get_code_location(\n external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n check.inst(\n code_location,\n GrpcServerCodeLocation,\n "DefaultRunLauncher: Can't launch runs for pipeline not loaded from a GRPC server",\n )\n\n DefaultRunLauncher.launch_run_from_grpc_client(\n self._instance, run, cast(GrpcServerCodeLocation, code_location).client\n )\n\n self._run_ids.add(run.run_id)\n\n def _get_grpc_client_for_termination(self, run_id):\n # defer for perf\n from dagster._grpc.client import DagsterGrpcClient\n\n if not self.has_instance:\n return None\n\n run = self._instance.get_run_by_id(run_id)\n if not run or run.is_finished:\n return None\n\n tags = run.tags\n\n if GRPC_INFO_TAG not in tags:\n return None\n\n grpc_info = seven.json.loads(tags.get(GRPC_INFO_TAG))\n\n return DagsterGrpcClient(\n port=grpc_info.get("port"),\n socket=grpc_info.get("socket"),\n host=grpc_info.get("host"),\n use_ssl=bool(grpc_info.get("use_ssl", False)),\n )\n\n def terminate(self, run_id):\n # defer for perf\n from dagster._grpc.types import CancelExecutionRequest, CancelExecutionResult\n\n check.str_param(run_id, "run_id")\n if not self.has_instance:\n return False\n\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n client = self._get_grpc_client_for_termination(run_id)\n\n if not client:\n self._instance.report_engine_event(\n message="Unable to get grpc client to send termination request to.",\n dagster_run=run,\n cls=self.__class__,\n )\n return False\n\n res = deserialize_value(\n client.cancel_execution(CancelExecutionRequest(run_id=run_id)), CancelExecutionResult\n )\n\n if res.serializable_error_info:\n raise DagsterUserCodeProcessError.from_error_info(res.serializable_error_info)\n\n return res.success\n\n def join(self, timeout=30):\n # If this hasn't been initialized at all, we can just do a noop\n if not self.has_instance:\n return\n\n total_time = 0\n interval = 0.01\n\n while True:\n active_run_ids = [\n run_id\n for run_id in self._run_ids\n if (\n self._instance.get_run_by_id(run_id)\n and not self._instance.get_run_by_id(run_id).is_finished\n )\n ]\n\n if len(active_run_ids) == 0:\n return\n\n if total_time >= timeout:\n raise Exception(f"Timed out waiting for these runs to finish: {active_run_ids!r}")\n\n total_time += interval\n time.sleep(interval)\n interval = interval * 2
\n
", "current_page_name": "_modules/dagster/_core/launcher/default_run_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.launcher.default_run_launcher"}}, "log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.log_manager

\nimport datetime\nimport logging\nfrom typing import TYPE_CHECKING, Any, Mapping, NamedTuple, Optional, Sequence, Union, cast\n\nfrom typing_extensions import Protocol\n\nimport dagster._check as check\nfrom dagster._core.utils import coerce_valid_log_level, make_new_run_id\nfrom dagster._utils.log import get_dagster_logger\n\nif TYPE_CHECKING:\n    from dagster import DagsterInstance\n    from dagster._core.events import DagsterEvent\n    from dagster._core.storage.dagster_run import DagsterRun\n\nDAGSTER_META_KEY = "dagster_meta"\n\n\nclass IDagsterMeta(Protocol):\n    @property\n    def dagster_meta(self) -> "DagsterLoggingMetadata": ...\n\n\n# The type-checker complains here that DagsterLogRecord does not implement the `dagster_meta`\n# property of `IDagsterMeta`. We ignore this error because we don't need to implement this method--\n# `DagsterLogRecord` is a stub class that is never instantiated. We only ever cast\n# `logging.LogRecord` objects to `DagsterLogRecord`, because it gives us typed access to the\n# `dagster_meta` property. `dagster_meta` itself is set on these `logging.LogRecord` objects via the\n# `extra` argument to `logging.Logger.log` (see `DagsterLogManager.log_dagster_event`), but\n# `logging.LogRecord` has no way of exposing to the type-checker the attributes that are dynamically\n# defined via `extra`.\nclass DagsterLogRecord(logging.LogRecord, IDagsterMeta):  # type: ignore\n    pass\n\n\nclass DagsterMessageProps(\n    NamedTuple(\n        "_DagsterMessageProps",\n        [\n            ("orig_message", Optional[str]),\n            ("log_message_id", Optional[str]),\n            ("log_timestamp", Optional[str]),\n            ("dagster_event", Optional[Any]),\n        ],\n    )\n):\n    """Internal class used to represent specific attributes about a logged message."""\n\n    def __new__(\n        cls,\n        orig_message: str,\n        log_message_id: Optional[str] = None,\n        log_timestamp: Optional[str] = None,\n        dagster_event: Optional["DagsterEvent"] = None,\n    ):\n        return super().__new__(\n            cls,\n            orig_message=check.str_param(orig_message, "orig_message"),\n            log_message_id=check.opt_str_param(\n                log_message_id, "log_message_id", default=make_new_run_id()\n            ),\n            log_timestamp=check.opt_str_param(\n                log_timestamp,\n                "log_timestamp",\n                default=datetime.datetime.utcnow().isoformat(),\n            ),\n            dagster_event=dagster_event,\n        )\n\n    @property\n    def error_str(self) -> Optional[str]:\n        if self.dagster_event is None:\n            return None\n\n        event_specific_data = self.dagster_event.event_specific_data\n        if not event_specific_data:\n            return None\n\n        error = getattr(event_specific_data, "error", None)\n        if error:\n            return f'\\n\\n{getattr(event_specific_data, "error_display_string", error.to_string())}'\n        return None\n\n    @property\n    def pid(self) -> Optional[str]:\n        if self.dagster_event is None or self.dagster_event.pid is None:\n            return None\n        return str(self.dagster_event.pid)\n\n    @property\n    def step_key(self) -> Optional[str]:\n        if self.dagster_event is None:\n            return None\n        return self.dagster_event.step_key\n\n    @property\n    def event_type_value(self) -> Optional[str]:\n        if self.dagster_event is None:\n            return None\n        return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(\n    NamedTuple(\n        "_DagsterLoggingMetadata",\n        [\n            ("run_id", Optional[str]),\n            ("job_name", Optional[str]),\n            ("job_tags", Mapping[str, str]),\n            ("step_key", Optional[str]),\n            ("op_name", Optional[str]),\n            ("resource_name", Optional[str]),\n            ("resource_fn_name", Optional[str]),\n        ],\n    )\n):\n    """Internal class used to represent the context in which a given message was logged (i.e. the\n    step, pipeline run, resource, etc.).\n    """\n\n    def __new__(\n        cls,\n        run_id: Optional[str] = None,\n        job_name: Optional[str] = None,\n        job_tags: Optional[Mapping[str, str]] = None,\n        step_key: Optional[str] = None,\n        op_name: Optional[str] = None,\n        resource_name: Optional[str] = None,\n        resource_fn_name: Optional[str] = None,\n    ):\n        return super().__new__(\n            cls,\n            run_id=run_id,\n            job_name=job_name,\n            job_tags=job_tags or {},\n            step_key=step_key,\n            op_name=op_name,\n            resource_name=resource_name,\n            resource_fn_name=resource_fn_name,\n        )\n\n    @property\n    def log_source(self) -> str:\n        if self.resource_name is None:\n            return self.job_name or "system"\n        return f"resource:{self.resource_name}"\n\n    def all_tags(self) -> Mapping[str, str]:\n        # converts all values into strings\n        return {k: str(v) for k, v in self._asdict().items()}\n\n    def event_tags(self) -> Mapping[str, str]:\n        # Exclude pipeline_tags since it can be quite large and can be found on the run\n        return {k: str(v) for k, v in self._asdict().items() if k != "job_tags"}\n\n\ndef construct_log_string(\n    logging_metadata: DagsterLoggingMetadata, message_props: DagsterMessageProps\n) -> str:\n    from dagster._core.events import EVENT_TYPE_VALUE_TO_DISPLAY_STRING\n\n    event_type_str = (\n        EVENT_TYPE_VALUE_TO_DISPLAY_STRING[message_props.event_type_value]\n        if message_props.event_type_value in EVENT_TYPE_VALUE_TO_DISPLAY_STRING\n        else message_props.event_type_value\n    )\n    return " - ".join(\n        filter(\n            None,\n            (\n                logging_metadata.log_source,\n                logging_metadata.run_id,\n                message_props.pid,\n                logging_metadata.step_key,\n                event_type_str,\n                message_props.orig_message,\n            ),\n        )\n    ) + (message_props.error_str or "")\n\n\ndef get_dagster_meta_dict(\n    logging_metadata: DagsterLoggingMetadata, dagster_message_props: DagsterMessageProps\n) -> Mapping[str, object]:\n    # combine all dagster meta information into a single dictionary\n    meta_dict = {\n        **logging_metadata._asdict(),\n        **dagster_message_props._asdict(),\n    }\n    # step-level events can be logged from a pipeline context. for these cases, pull the step\n    # key from the underlying DagsterEvent\n    if meta_dict["step_key"] is None:\n        meta_dict["step_key"] = dagster_message_props.step_key\n\n    return meta_dict\n\n\nclass DagsterLogHandler(logging.Handler):\n    """Internal class used to turn regular logs into Dagster logs by adding Dagster-specific\n    metadata (such as pipeline_name or step_key), as well as reformatting the underlying message.\n\n    Note: The `loggers` argument will be populated with the set of @loggers supplied to the current\n    pipeline run. These essentially work as handlers (they do not create their own log messages,\n    they simply re-log messages that are created from context.log.x() calls), which is why they are\n    referenced from within this handler class.\n    """\n\n    def __init__(\n        self,\n        logging_metadata: DagsterLoggingMetadata,\n        loggers: Sequence[logging.Logger],\n        handlers: Sequence[logging.Handler],\n    ):\n        self._logging_metadata = logging_metadata\n        self._loggers = loggers\n        self._handlers = handlers\n        self._should_capture = True\n        super().__init__()\n\n    @property\n    def logging_metadata(self) -> DagsterLoggingMetadata:\n        return self._logging_metadata\n\n    def with_tags(self, **new_tags: str) -> "DagsterLogHandler":\n        return DagsterLogHandler(\n            logging_metadata=self.logging_metadata._replace(**new_tags),\n            loggers=self._loggers,\n            handlers=self._handlers,\n        )\n\n    def _extract_extra(self, record: logging.LogRecord) -> Mapping[str, Any]:\n        """In the logging.Logger log() implementation, the elements of the `extra` dictionary\n        argument are smashed into the __dict__ of the underlying logging.LogRecord.\n        This function figures out what the original `extra` values of the log call were by\n        comparing the set of attributes in the received record to those of a default record.\n        """\n        ref_attrs = list(logging.makeLogRecord({}).__dict__.keys()) + [\n            "message",\n            "asctime",\n        ]\n        return {k: v for k, v in record.__dict__.items() if k not in ref_attrs}\n\n    def _convert_record(self, record: logging.LogRecord) -> DagsterLogRecord:\n        # we store the originating DagsterEvent in the DAGSTER_META_KEY field, if applicable\n        dagster_meta = getattr(record, DAGSTER_META_KEY, None)\n\n        # generate some properties for this specific record\n        dagster_message_props = DagsterMessageProps(\n            orig_message=record.getMessage(), dagster_event=dagster_meta\n        )\n\n        # set the dagster meta info for the record\n        setattr(\n            record,\n            DAGSTER_META_KEY,\n            get_dagster_meta_dict(self._logging_metadata, dagster_message_props),\n        )\n\n        # update the message to be formatted like other dagster logs\n        record.msg = construct_log_string(self._logging_metadata, dagster_message_props)\n        record.args = ()\n\n        # DagsterLogRecord is a LogRecord with a `dagster_meta` field\n        return cast(DagsterLogRecord, record)\n\n    def filter(self, record: logging.LogRecord) -> bool:\n        """If you list multiple levels of a python logging hierarchy as managed loggers, and do not\n        set the propagate attribute to False, this will result in that record getting logged\n        multiple times, as the DagsterLogHandler will be invoked at each level of the hierarchy as\n        the message is propagated. This filter prevents this from happening.\n        """\n        return self._should_capture and not isinstance(\n            getattr(record, DAGSTER_META_KEY, None), dict\n        )\n\n    def emit(self, record: logging.LogRecord) -> None:\n        """For any received record, add Dagster metadata, and have handlers handle it."""\n        try:\n            # to prevent the potential for infinite loops in which a handler produces log messages\n            # which are then captured and then handled by that same handler (etc.), do not capture\n            # any log messages while one is currently being emitted\n            self._should_capture = False\n            dagster_record = self._convert_record(record)\n            # built-in handlers\n            for handler in self._handlers:\n                if dagster_record.levelno >= handler.level:\n                    handler.handle(dagster_record)\n            # user-defined @loggers\n            for logger in self._loggers:\n                logger.log(\n                    dagster_record.levelno,\n                    dagster_record.msg,\n                    exc_info=dagster_record.exc_info,\n                    extra=self._extract_extra(record),\n                )\n        finally:\n            self._should_capture = True\n\n\n
[docs]class DagsterLogManager(logging.Logger):\n """Centralized dispatch for logging from user code.\n\n Handles the construction of uniform structured log messages and passes them through to the\n underlying loggers/handlers.\n\n An instance of the log manager is made available to ops as ``context.log``. Users should not\n initialize instances of the log manager directly. To configure custom loggers, set the\n ``logger_defs`` argument in an `@job` decorator or when calling the `to_job()` method on a\n :py:class:`GraphDefinition`.\n\n The log manager inherits standard convenience methods like those exposed by the Python standard\n library :py:mod:`python:logging` module (i.e., within the body of an op,\n ``context.log.{debug, info, warning, warn, error, critical, fatal}``).\n\n The underlying integer API can also be called directly using, e.g.\n ``context.log.log(5, msg)``, and the log manager will delegate to the ``log`` method\n defined on each of the loggers it manages.\n\n User-defined custom log levels are not supported, and calls to, e.g.,\n ``context.log.trace`` or ``context.log.notice`` will result in hard exceptions **at runtime**.\n """\n\n def __init__(\n self,\n dagster_handler: DagsterLogHandler,\n level: int = logging.NOTSET,\n managed_loggers: Optional[Sequence[logging.Logger]] = None,\n ):\n super().__init__(name="dagster", level=coerce_valid_log_level(level))\n self._managed_loggers = check.opt_sequence_param(\n managed_loggers, "managed_loggers", of_type=logging.Logger\n )\n self._dagster_handler = dagster_handler\n self.addHandler(dagster_handler)\n\n @classmethod\n def create(\n cls,\n loggers: Sequence[logging.Logger],\n handlers: Optional[Sequence[logging.Handler]] = None,\n instance: Optional["DagsterInstance"] = None,\n dagster_run: Optional["DagsterRun"] = None,\n ) -> "DagsterLogManager":\n """Create a DagsterLogManager with a set of subservient loggers."""\n handlers = check.opt_sequence_param(handlers, "handlers", of_type=logging.Handler)\n\n managed_loggers = [get_dagster_logger()]\n python_log_level = logging.NOTSET\n\n if instance:\n handlers = [*handlers, *instance.get_handlers()]\n managed_loggers += [\n logging.getLogger(lname) if lname != "root" else logging.getLogger()\n for lname in instance.managed_python_loggers\n ]\n if instance.python_log_level is not None:\n python_log_level = coerce_valid_log_level(instance.python_log_level)\n\n # set all loggers to the declared logging level\n for logger in managed_loggers:\n logger.setLevel(python_log_level)\n\n if dagster_run:\n logging_metadata = DagsterLoggingMetadata(\n run_id=dagster_run.run_id,\n job_name=dagster_run.job_name,\n job_tags=dagster_run.tags,\n )\n else:\n logging_metadata = DagsterLoggingMetadata()\n\n return cls(\n dagster_handler=DagsterLogHandler(\n logging_metadata=logging_metadata,\n loggers=loggers,\n handlers=handlers,\n ),\n level=python_log_level,\n managed_loggers=managed_loggers,\n )\n\n @property\n def logging_metadata(self) -> DagsterLoggingMetadata:\n return self._dagster_handler.logging_metadata\n\n def begin_python_log_capture(self) -> None:\n for logger in self._managed_loggers:\n logger.addHandler(self._dagster_handler)\n\n def end_python_log_capture(self) -> None:\n for logger in self._managed_loggers:\n logger.removeHandler(self._dagster_handler)\n\n def log_dagster_event(\n self, level: Union[str, int], msg: str, dagster_event: "DagsterEvent"\n ) -> None:\n """Log a DagsterEvent at the given level. Attributes about the context it was logged in\n (such as the solid name or pipeline name) will be automatically attached to the created record.\n\n Args:\n level (str, int): either a string representing the desired log level ("INFO", "WARN"),\n or an integer level such as logging.INFO or logging.DEBUG.\n msg (str): message describing the event\n dagster_event (DagsterEvent): DagsterEvent that will be logged\n """\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level: Union[str, int], msg: object, *args: Any, **kwargs: Any) -> None:\n """Log a message at the given level. Attributes about the context it was logged in (such as\n the solid name or pipeline name) will be automatically attached to the created record.\n\n Args:\n level (str, int): either a string representing the desired log level ("INFO", "WARN"),\n or an integer level such as logging.INFO or logging.DEBUG.\n msg (str): the message to be logged\n *args: the logged message will be msg % args\n """\n level = coerce_valid_log_level(level)\n # log DagsterEvents regardless of level\n if self.isEnabledFor(level) or ("extra" in kwargs and DAGSTER_META_KEY in kwargs["extra"]):\n self._log(level, msg, args, **kwargs)\n\n def with_tags(self, **new_tags: str) -> "DagsterLogManager":\n """Add new tags in "new_tags" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n new_tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n """\n return DagsterLogManager(\n dagster_handler=self._dagster_handler.with_tags(**new_tags),\n managed_loggers=self._managed_loggers,\n level=self.level,\n )
\n
", "current_page_name": "_modules/dagster/_core/log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.log_manager"}, "pipes": {"client": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.pipes.client

\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Iterator, List, Optional, Sequence\n\nfrom dagster_pipes import (\n    DagsterPipesError,\n    PipesContextData,\n    PipesExtras,\n    PipesParams,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.result import MaterializeResult\nfrom dagster._core.execution.context.compute import OpExecutionContext\n\nfrom .context import PipesExecutionResult\n\nif TYPE_CHECKING:\n    from .context import PipesMessageHandler\n\n\n
[docs]@experimental\nclass PipesClient(ABC):\n """Pipes client base class.\n\n Pipes clients for specific external environments should subclass this.\n """\n\n
[docs] @public\n @abstractmethod\n def run(\n self,\n *,\n context: OpExecutionContext,\n extras: Optional[PipesExtras] = None,\n **kwargs,\n ) -> "PipesClientCompletedInvocation":\n """Synchronously execute an external process with the pipes protocol. Derived\n clients must have `context` and `extras` arguments, but also can add arbitrary\n arguments that are appropriate for their own implementation.\n\n Args:\n context (OpExecutionContext): The context from the executing op/asset.\n extras (Optional[PipesExtras]): Arbitrary data to pass to the external environment.\n\n Returns:\n PipesClientCompletedInvocation: Wrapper containing results reported by the external\n process.\n """
\n\n\n@experimental\nclass PipesClientCompletedInvocation:\n def __init__(self, results: Sequence["PipesExecutionResult"]):\n self._results = results\n\n def get_results(self) -> Sequence["PipesExecutionResult"]:\n """Get the stream of results as a Sequence of a completed pipes\n client invocation. For each "report" call in the external process,\n one result object will be in the list.\n\n Returns: Sequence[PipesExecutionResult]\n """\n return tuple(self._results)\n\n def get_materialize_result(self) -> MaterializeResult:\n """Get a single materialize result for a pipes invocation. This coalesces\n the materialization result and any separately reported asset check results from\n the external process.\n\n This does not work on invocations that materialize multiple assets and will fail\n in that case. For multiple assets use `get_results` instead to get the result stream.\n\n Returns: MaterializeResult\n """\n return materialize_result_from_pipes_results(self.get_results())\n\n def get_asset_check_result(self) -> AssetCheckResult:\n """Get a single asset check result for a pipes invocation.\n\n This does not work on invocations that have anything except a single asset check result.\n Use `get_results` instead to get the result stream in those cases.\n\n Returns: AssetCheckResult\n """\n return _check_result_from_pipes_results(self.get_results())\n\n\n
[docs]@experimental\nclass PipesContextInjector(ABC):\n @abstractmethod\n @contextmanager\n def inject_context(self, context_data: "PipesContextData") -> Iterator[PipesParams]:\n """A `@contextmanager` that injects context data into the external process.\n\n This method should write the context data to a location accessible to the external\n process. It should yield parameters that the external process can use to locate and load the\n context data.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A JSON-serializable dict of parameters to be used used by the external\n process to locate and load the injected context data.\n """\n\n @abstractmethod\n def no_messages_debug_text(self) -> str:\n """A message to be displayed when no messages are received from the external process to aid with debugging.\n\n Example: "Attempted to inject context using a magic portal. Expected PipesMagicPortalContextLoader to be\n explicitly passed to open_dagster_pipes in the external process."\n """
\n\n\n
[docs]@experimental\nclass PipesMessageReader(ABC):\n @abstractmethod\n @contextmanager\n def read_messages(self, handler: "PipesMessageHandler") -> Iterator[PipesParams]:\n """A `@contextmanager` that reads messages reported by an external process.\n\n This method should start a thread to continuously read messages from some location\n accessible to the external process. It should yield parameters that the external process\n can use to direct its message output.\n\n Args:\n handler (PipesMessageHandler): The message handler to use to process messages read from\n the external process.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to determine\n where to write messages.\n """\n\n @abstractmethod\n def no_messages_debug_text(self) -> str:\n """A message to be displayed when no messages are received from the external process to aid with\n debugging.\n\n Example: "Attempted to read messages using a magic portal. Expected PipesMagicPortalMessageWriter\n to be explicitly passed to open_dagster_pipes in the external process."\n """
\n\n\ndef materialize_result_from_pipes_results(\n all_results: Sequence[PipesExecutionResult],\n) -> MaterializeResult:\n mat_results: List[MaterializeResult] = [\n mat_result for mat_result in all_results if isinstance(mat_result, MaterializeResult)\n ]\n check_results: List[AssetCheckResult] = [\n check_result for check_result in all_results if isinstance(check_result, AssetCheckResult)\n ]\n\n check.invariant(len(mat_results) > 0, "No materialization results received. Internal error?")\n if len(mat_results) > 1:\n raise DagsterPipesError(\n "Multiple materialize results returned with asset keys"\n f" {sorted([check.not_none(mr.asset_key).to_user_string() for mr in mat_results])}."\n " If you are materializing multiple assets in a pipes invocation, use"\n " get_results() instead.",\n )\n mat_result = next(iter(mat_results))\n for check_result in check_results:\n if check_result.asset_key:\n check.invariant(\n mat_result.asset_key == check_result.asset_key,\n "Check result specified an asset key that is not part of the returned"\n " materialization. If this was deliberate, use get_results() instead.",\n )\n\n if check_results:\n return mat_result._replace(\n check_results=[*(mat_result.check_results or []), *check_results]\n )\n else:\n return mat_result\n\n\ndef _check_result_from_pipes_results(\n all_results: Sequence[PipesExecutionResult],\n) -> AssetCheckResult:\n mat_results: List[MaterializeResult] = [\n mat_result for mat_result in all_results if isinstance(mat_result, MaterializeResult)\n ]\n check_results: List[AssetCheckResult] = [\n check_result for check_result in all_results if isinstance(check_result, AssetCheckResult)\n ]\n\n # return the single asset check result if thats what we got\n if len(mat_results) == 0 and len(check_results) == 1:\n return next(iter(check_results))\n\n # otherwise error\n raise DagsterPipesError(\n f"Did not find singular AssetCheckResult, got {len(mat_results)} MaterializeResults and"\n f" {len(check_results)} AssetCheckResults. Correct the reported results or use"\n " get_results() instead.",\n )\n
", "current_page_name": "_modules/dagster/_core/pipes/client", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.pipes.client"}, "context": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.pipes.context

\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom queue import Queue\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, Mapping, Optional, Set, Union\n\nfrom dagster_pipes import (\n    DAGSTER_PIPES_CONTEXT_ENV_VAR,\n    DAGSTER_PIPES_MESSAGES_ENV_VAR,\n    PIPES_METADATA_TYPE_INFER,\n    PipesContextData,\n    PipesDataProvenance,\n    PipesExtras,\n    PipesMessage,\n    PipesMetadataType,\n    PipesMetadataValue,\n    PipesParams,\n    PipesTimeWindow,\n    encode_env_var,\n)\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.asset_check_spec import AssetCheckSeverity\nfrom dagster._core.definitions.data_version import DataProvenance, DataVersion\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.metadata import MetadataValue, normalize_metadata_value\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.result import MaterializeResult\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    has_one_dimension_time_window_partitioning,\n)\nfrom dagster._core.errors import DagsterPipesExecutionError\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dagster._core.execution.context.invocation import BoundOpExecutionContext\n\nif TYPE_CHECKING:\n    from dagster._core.pipes.client import PipesMessageReader\n\nPipesExecutionResult: TypeAlias = Union[MaterializeResult, AssetCheckResult]\n\n\n
[docs]@experimental\nclass PipesMessageHandler:\n """Class to process :py:obj:`PipesMessage` objects received from a pipes process.\n\n Args:\n context (OpExecutionContext): The context for the executing op/asset.\n """\n\n def __init__(self, context: OpExecutionContext) -> None:\n self._context = context\n # Queue is thread-safe\n self._result_queue: Queue[PipesExecutionResult] = Queue()\n # Only read by the main thread after all messages are handled, so no need for a lock\n self._unmaterialized_assets: Set[AssetKey] = set(context.selected_asset_keys)\n self._received_any_msg = False\n self._received_closed_msg = False\n\n @contextmanager\n def handle_messages(self, message_reader: "PipesMessageReader") -> Iterator[PipesParams]:\n with message_reader.read_messages(self) as params:\n yield params\n for key in self._unmaterialized_assets:\n self._result_queue.put(MaterializeResult(asset_key=key))\n\n def clear_result_queue(self) -> Iterator[PipesExecutionResult]:\n while not self._result_queue.empty():\n yield self._result_queue.get()\n\n @property\n def received_any_message(self) -> bool:\n return self._received_any_msg\n\n @property\n def received_closed_message(self) -> bool:\n return self._received_closed_msg\n\n def _resolve_metadata(\n self, metadata: Mapping[str, PipesMetadataValue]\n ) -> Mapping[str, MetadataValue]:\n return {\n k: self._resolve_metadata_value(v["raw_value"], v["type"]) for k, v in metadata.items()\n }\n\n def _resolve_metadata_value(\n self, value: Any, metadata_type: PipesMetadataType\n ) -> MetadataValue:\n if metadata_type == PIPES_METADATA_TYPE_INFER:\n return normalize_metadata_value(value)\n elif metadata_type == "text":\n return MetadataValue.text(value)\n elif metadata_type == "url":\n return MetadataValue.url(value)\n elif metadata_type == "path":\n return MetadataValue.path(value)\n elif metadata_type == "notebook":\n return MetadataValue.notebook(value)\n elif metadata_type == "json":\n return MetadataValue.json(value)\n elif metadata_type == "md":\n return MetadataValue.md(value)\n elif metadata_type == "float":\n return MetadataValue.float(value)\n elif metadata_type == "int":\n return MetadataValue.int(value)\n elif metadata_type == "bool":\n return MetadataValue.bool(value)\n elif metadata_type == "dagster_run":\n return MetadataValue.dagster_run(value)\n elif metadata_type == "asset":\n return MetadataValue.asset(AssetKey.from_user_string(value))\n elif metadata_type == "table":\n return MetadataValue.table(value)\n elif metadata_type == "null":\n return MetadataValue.null()\n else:\n check.failed(f"Unexpected metadata type {metadata_type}")\n\n # Type ignores because we currently validate in individual handlers\n def handle_message(self, message: PipesMessage) -> None:\n if self._received_closed_msg:\n self._context.log.warn(f"[pipes] unexpected message received after closed: `{message}`")\n\n if not self._received_any_msg:\n self._received_any_msg = True\n self._context.log.info("[pipes] external process successfully opened dagster pipes.")\n\n if message["method"] == "opened":\n pass\n elif message["method"] == "closed":\n self._handle_closed()\n elif message["method"] == "report_asset_materialization":\n self._handle_report_asset_materialization(**message["params"]) # type: ignore\n elif message["method"] == "report_asset_check":\n self._handle_report_asset_check(**message["params"]) # type: ignore\n elif message["method"] == "log":\n self._handle_log(**message["params"]) # type: ignore\n else:\n raise DagsterPipesExecutionError(f"Unknown message method: {message['method']}")\n\n def _handle_closed(self) -> None:\n self._received_closed_msg = True\n\n def _handle_report_asset_materialization(\n self,\n asset_key: str,\n metadata: Optional[Mapping[str, PipesMetadataValue]],\n data_version: Optional[str],\n ) -> None:\n check.str_param(asset_key, "asset_key")\n check.opt_str_param(data_version, "data_version")\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n resolved_asset_key = AssetKey.from_user_string(asset_key)\n resolved_metadata = self._resolve_metadata(metadata)\n resolved_data_version = None if data_version is None else DataVersion(data_version)\n result = MaterializeResult(\n asset_key=resolved_asset_key,\n metadata=resolved_metadata,\n data_version=resolved_data_version,\n )\n self._result_queue.put(result)\n self._unmaterialized_assets.remove(resolved_asset_key)\n\n def _handle_report_asset_check(\n self,\n asset_key: str,\n check_name: str,\n passed: bool,\n severity: str,\n metadata: Mapping[str, PipesMetadataValue],\n ) -> None:\n check.str_param(asset_key, "asset_key")\n check.str_param(check_name, "check_name")\n check.bool_param(passed, "passed")\n check.literal_param(severity, "severity", [x.value for x in AssetCheckSeverity])\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n resolved_asset_key = AssetKey.from_user_string(asset_key)\n resolved_metadata = self._resolve_metadata(metadata)\n resolved_severity = AssetCheckSeverity(severity)\n result = AssetCheckResult(\n asset_key=resolved_asset_key,\n check_name=check_name,\n passed=passed,\n severity=resolved_severity,\n metadata=resolved_metadata,\n )\n self._result_queue.put(result)\n\n def _handle_log(self, message: str, level: str = "info") -> None:\n check.str_param(message, "message")\n self._context.log.log(level, message)
\n\n\n
[docs]@experimental\n@dataclass\nclass PipesSession:\n """Object representing a pipes session.\n\n A pipes session is defined by a pair of :py:class:`PipesContextInjector` and\n :py:class:`PipesMessageReader` objects. At the opening of the session, the context injector\n writes context data to an externally accessible location, and the message reader starts\n monitoring an externally accessible location. These locations are encoded in parameters stored\n on a `PipesSession` object.\n\n During the session, an external process should be started and the parameters injected into its\n environment. The typical way to do this is to call :py:meth:`PipesSession.get_bootstrap_env_vars`\n and pass the result as environment variables.\n\n During execution, results (e.g. asset materializations) are reported by the external process and\n buffered on the `PipesSession` object. The buffer can periodically be cleared and yielded to\n Dagster machinery by calling `yield from PipesSession.get_results()`.\n\n When the external process exits, the session can be closed. Closing consists of handling any\n unprocessed messages written by the external process and cleaning up any resources used for\n context injection and message reading.\n\n Args:\n context_data (PipesContextData): The context for the executing op/asset.\n message_handler (PipesMessageHandler): The message handler to use for processing messages\n context_injector_params (PipesParams): Parameters yielded by the context injector,\n indicating the location from which the external process should load context data.\n message_reader_params (PipesParams): Parameters yielded by the message reader, indicating\n the location to which the external process should write messages.\n """\n\n context_data: PipesContextData\n message_handler: PipesMessageHandler\n context_injector_params: PipesParams\n message_reader_params: PipesParams\n\n
[docs] @public\n def get_bootstrap_env_vars(self) -> Dict[str, str]:\n """Encode context injector and message reader params as environment variables.\n\n Passing environment variables is the typical way to expose the pipes I/O parameters\n to a pipes process.\n\n Returns:\n Mapping[str, str]: Environment variables to pass to the external process. The values are\n serialized as json, compressed with gzip, and then base-64-encoded.\n """\n return {\n param_name: encode_env_var(param_value)\n for param_name, param_value in self.get_bootstrap_params().items()\n }
\n\n
[docs] @public\n def get_bootstrap_params(self) -> Dict[str, Any]:\n """Get the params necessary to bootstrap a launched pipes process. These parameters are typically\n are as environment variable. See `get_bootstrap_env_vars`. It is the context injector's\n responsibility to decide how to pass these parameters to the external environment.\n\n Returns:\n Mapping[str, str]: Parameters to pass to the external process and their corresponding\n values that must be passed by the context injector.\n """\n return {\n DAGSTER_PIPES_CONTEXT_ENV_VAR: self.context_injector_params,\n DAGSTER_PIPES_MESSAGES_ENV_VAR: self.message_reader_params,\n }
\n\n
[docs] @public\n def get_results(self) -> Iterator[PipesExecutionResult]:\n """Iterator over buffered :py:class:`PipesExecutionResult` objects received from the\n external process.\n\n When this is called it clears the results buffer.\n\n Yields:\n ExtResult: Result reported by external process.\n """\n yield from self.message_handler.clear_result_queue()
\n\n\ndef build_external_execution_context_data(\n context: OpExecutionContext,\n extras: Optional[PipesExtras],\n) -> "PipesContextData":\n asset_keys = (\n [_convert_asset_key(key) for key in sorted(context.selected_asset_keys)]\n if context.has_assets_def\n else None\n )\n code_version_by_asset_key = (\n {\n _convert_asset_key(key): context.assets_def.code_versions_by_key[key]\n for key in context.selected_asset_keys\n }\n if context.has_assets_def\n else None\n )\n provenance_by_asset_key = (\n {\n _convert_asset_key(key): _convert_data_provenance(context.get_asset_provenance(key))\n for key in context.selected_asset_keys\n }\n if context.has_assets_def\n else None\n )\n partition_key = context.partition_key if context.has_partition_key else None\n partition_key_range = context.partition_key_range if context.has_partition_key else None\n partition_time_window = (\n context.partition_time_window\n if context.has_partition_key\n and has_one_dimension_time_window_partitioning(\n context.get_step_execution_context().partitions_def\n )\n else None\n )\n return PipesContextData(\n asset_keys=asset_keys,\n code_version_by_asset_key=code_version_by_asset_key,\n provenance_by_asset_key=provenance_by_asset_key,\n partition_key=partition_key,\n partition_key_range=(\n _convert_partition_key_range(partition_key_range) if partition_key_range else None\n ),\n partition_time_window=(\n _convert_time_window(partition_time_window) if partition_time_window else None\n ),\n run_id=context.run_id,\n job_name=None if isinstance(context, BoundOpExecutionContext) else context.job_name,\n retry_number=0 if isinstance(context, BoundOpExecutionContext) else context.retry_number,\n extras=extras or {},\n )\n\n\ndef _convert_asset_key(asset_key: AssetKey) -> str:\n return asset_key.to_user_string()\n\n\ndef _convert_data_provenance(\n provenance: Optional[DataProvenance],\n) -> Optional["PipesDataProvenance"]:\n return (\n None\n if provenance is None\n else PipesDataProvenance(\n code_version=provenance.code_version,\n input_data_versions={\n _convert_asset_key(k): v.value for k, v in provenance.input_data_versions.items()\n },\n is_user_provided=provenance.is_user_provided,\n )\n )\n\n\ndef _convert_time_window(\n time_window: TimeWindow,\n) -> "PipesTimeWindow":\n return PipesTimeWindow(\n start=time_window.start.isoformat(),\n end=time_window.end.isoformat(),\n )\n\n\ndef _convert_partition_key_range(\n partition_key_range: PartitionKeyRange,\n) -> "PipesTimeWindow":\n return PipesTimeWindow(\n start=partition_key_range.start,\n end=partition_key_range.end,\n )\n
", "current_page_name": "_modules/dagster/_core/pipes/context", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.pipes.context"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.pipes.utils

\nimport datetime\nimport json\nimport os\nimport sys\nimport tempfile\nimport time\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom threading import Event, Thread\nfrom typing import Iterator, Optional, TextIO\n\nfrom dagster_pipes import (\n    PIPES_PROTOCOL_VERSION_FIELD,\n    PipesContextData,\n    PipesDefaultContextLoader,\n    PipesDefaultMessageWriter,\n    PipesExtras,\n    PipesParams,\n)\n\nfrom dagster import (\n    OpExecutionContext,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom dagster._core.pipes.client import (\n    PipesContextInjector,\n    PipesMessageReader,\n)\nfrom dagster._core.pipes.context import (\n    PipesMessageHandler,\n    PipesSession,\n    build_external_execution_context_data,\n)\nfrom dagster._utils import tail_file\n\n_CONTEXT_INJECTOR_FILENAME = "context"\n_MESSAGE_READER_FILENAME = "messages"\n\n\n
[docs]@experimental\nclass PipesFileContextInjector(PipesContextInjector):\n """Context injector that injects context data into the external process by writing it to a\n specified file.\n\n Args:\n path (str): The path of a file to which to write context data. The file will be deleted on\n close of the pipes session.\n """\n\n def __init__(self, path: str):\n self._path = check.str_param(path, "path")\n\n @contextmanager\n def inject_context(self, context_data: "PipesContextData") -> Iterator[PipesParams]:\n """Inject context to external environment by writing it to a file as JSON and exposing the\n path to the file.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n with open(self._path, "w") as input_stream:\n json.dump(context_data, input_stream)\n try:\n yield {PipesDefaultContextLoader.FILE_PATH_KEY: self._path}\n finally:\n if os.path.exists(self._path):\n os.remove(self._path)\n\n def no_messages_debug_text(self) -> str:\n return f"Attempted to inject context via file {self._path}"
\n\n\n
[docs]@experimental\nclass PipesTempFileContextInjector(PipesContextInjector):\n """Context injector that injects context data into the external process by writing it to an\n automatically-generated temporary file.\n """\n\n @contextmanager\n def inject_context(self, context: "PipesContextData") -> Iterator[PipesParams]:\n """Inject context to external environment by writing it to an automatically-generated\n temporary file as JSON and exposing the path to the file.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n with tempfile.TemporaryDirectory() as tempdir:\n with PipesFileContextInjector(\n os.path.join(tempdir, _CONTEXT_INJECTOR_FILENAME)\n ).inject_context(context) as params:\n yield params\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to inject context via a temporary file."
\n\n\n
[docs]class PipesEnvContextInjector(PipesContextInjector):\n """Context injector that injects context data into the external process by injecting it directly into the external process environment."""\n\n @contextmanager\n def inject_context(\n self,\n context_data: "PipesContextData",\n ) -> Iterator[PipesParams]:\n """Inject context to external environment by embedding directly in the parameters that will\n be passed to the external process (typically as environment variables).\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n yield {PipesDefaultContextLoader.DIRECT_KEY: context_data}\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to inject context directly, typically as an environment variable."
\n\n\n
[docs]@experimental\nclass PipesFileMessageReader(PipesMessageReader):\n """Message reader that reads messages by tailing a specified file.\n\n Args:\n path (str): The path of the file to which messages will be written. The file will be deleted\n on close of the pipes session.\n """\n\n def __init__(self, path: str):\n self._path = check.str_param(path, "path")\n\n @contextmanager\n def read_messages(\n self,\n handler: "PipesMessageHandler",\n ) -> Iterator[PipesParams]:\n """Set up a thread to read streaming messages from the external process by tailing the\n target file.\n\n Args:\n handler (PipesMessageHandler): object to process incoming messages\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol messages.\n """\n is_task_complete = Event()\n thread = None\n try:\n open(self._path, "w").close() # create file\n thread = Thread(\n target=self._reader_thread, args=(handler, is_task_complete), daemon=True\n )\n thread.start()\n yield {PipesDefaultMessageWriter.FILE_PATH_KEY: self._path}\n finally:\n is_task_complete.set()\n if os.path.exists(self._path):\n os.remove(self._path)\n if thread:\n thread.join()\n\n def _reader_thread(self, handler: "PipesMessageHandler", is_resource_complete: Event) -> None:\n for line in tail_file(self._path, lambda: is_resource_complete.is_set()):\n message = json.loads(line)\n handler.handle_message(message)\n\n def no_messages_debug_text(self) -> str:\n return f"Attempted to read messages from file {self._path}."
\n\n\n
[docs]@experimental\nclass PipesTempFileMessageReader(PipesMessageReader):\n """Message reader that reads messages by tailing an automatically-generated temporary file."""\n\n @contextmanager\n def read_messages(\n self,\n handler: "PipesMessageHandler",\n ) -> Iterator[PipesParams]:\n """Set up a thread to read streaming messages from the external process by an\n automatically-generated temporary file.\n\n Args:\n handler (PipesMessageHandler): object to process incoming messages\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol messages.\n """\n with tempfile.TemporaryDirectory() as tempdir:\n with PipesFileMessageReader(\n os.path.join(tempdir, _MESSAGE_READER_FILENAME)\n ).read_messages(handler) as params:\n yield params\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to read messages from a local temporary file."
\n\n\n# Number of seconds to wait after an external process has completed for stdio logs to become\n# available. If this is exceeded, proceed with exiting without picking up logs.\nWAIT_FOR_STDIO_LOGS_TIMEOUT = 60\n\n\n
[docs]@experimental\nclass PipesBlobStoreMessageReader(PipesMessageReader):\n """Message reader that reads a sequence of message chunks written by an external process into a\n blob store such as S3, Azure blob storage, or GCS.\n\n The reader maintains a counter, starting at 1, that is synchronized with a message writer in\n some pipes process. The reader starts a thread that periodically attempts to read a chunk\n indexed by the counter at some location expected to be written by the pipes process. The chunk\n should be a file with each line corresponding to a JSON-encoded pipes message. When a chunk is\n successfully read, the messages are processed and the counter is incremented. The\n :py:class:`PipesBlobStoreMessageWriter` on the other end is expected to similarly increment a\n counter (starting from 1) on successful write, keeping counters on the read and write end in\n sync.\n\n If `stdout_reader` or `stderr_reader` are passed, this reader will also start them when\n `read_messages` is called. If they are not passed, then the reader performs no stdout/stderr\n forwarding.\n\n Args:\n interval (float): interval in seconds between attempts to download a chunk\n stdout_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stdout logs.\n stderr_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stderr logs.\n """\n\n interval: float\n counter: int\n stdout_reader: "PipesBlobStoreStdioReader"\n stderr_reader: "PipesBlobStoreStdioReader"\n\n def __init__(\n self,\n interval: float = 10,\n stdout_reader: Optional["PipesBlobStoreStdioReader"] = None,\n stderr_reader: Optional["PipesBlobStoreStdioReader"] = None,\n ):\n self.interval = interval\n self.counter = 1\n self.stdout_reader = (\n check.opt_inst_param(stdout_reader, "stdout_reader", PipesBlobStoreStdioReader)\n or PipesNoOpStdioReader()\n )\n self.stderr_reader = (\n check.opt_inst_param(stderr_reader, "stderr_reader", PipesBlobStoreStdioReader)\n or PipesNoOpStdioReader()\n )\n\n @contextmanager\n def read_messages(\n self,\n handler: "PipesMessageHandler",\n ) -> Iterator[PipesParams]:\n """Set up a thread to read streaming messages by periodically reading message chunks from a\n target location.\n\n Args:\n handler (PipesMessageHandler): object to process incoming messages\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol message chunks.\n """\n with self.get_params() as params:\n is_task_complete = Event()\n messages_thread = None\n try:\n messages_thread = Thread(\n target=self._messages_thread, args=(handler, params, is_task_complete)\n )\n messages_thread.start()\n self.stdout_reader.start(params, is_task_complete)\n self.stderr_reader.start(params, is_task_complete)\n yield params\n finally:\n self.wait_for_stdio_logs(params)\n is_task_complete.set()\n if messages_thread:\n messages_thread.join()\n self.stdout_reader.stop()\n self.stderr_reader.stop()\n\n # In cases where we are forwarding logs, in some cases the logs might not be written out until\n # after the run completes. We wait for them to exist.\n def wait_for_stdio_logs(self, params):\n start_or_last_download = datetime.datetime.now()\n while (\n datetime.datetime.now() - start_or_last_download\n ).seconds <= WAIT_FOR_STDIO_LOGS_TIMEOUT and (\n (self.stdout_reader and not self.stdout_reader.is_ready(params))\n or (self.stderr_reader and not self.stderr_reader.is_ready(params))\n ):\n time.sleep(5)\n\n @abstractmethod\n @contextmanager\n def get_params(self) -> Iterator[PipesParams]:\n """Yield a set of parameters to be passed to a message writer in a pipes process.\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol message chunks.\n """\n\n @abstractmethod\n def download_messages_chunk(self, index: int, params: PipesParams) -> Optional[str]: ...\n\n def _messages_thread(\n self,\n handler: "PipesMessageHandler",\n params: PipesParams,\n is_task_complete: Event,\n ) -> None:\n start_or_last_download = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n if (now - start_or_last_download).seconds > self.interval or is_task_complete.is_set():\n start_or_last_download = now\n chunk = self.download_messages_chunk(self.counter, params)\n if chunk:\n for line in chunk.split("\\n"):\n message = json.loads(line)\n handler.handle_message(message)\n self.counter += 1\n elif is_task_complete.is_set():\n break\n time.sleep(1)
\n\n\nclass PipesBlobStoreStdioReader(ABC):\n @abstractmethod\n def start(self, params: PipesParams, is_task_complete: Event) -> None: ...\n\n @abstractmethod\n def stop(self) -> None: ...\n\n @abstractmethod\n def is_ready(self, params: PipesParams) -> bool: ...\n\n\n@experimental\nclass PipesChunkedStdioReader(PipesBlobStoreStdioReader):\n """Reader for reading stdout/stderr logs from a blob store such as S3, Azure blob storage, or GCS.\n\n Args:\n interval (float): interval in seconds between attempts to download a chunk.\n target_stream (TextIO): The stream to which to write the logs. Typcially `sys.stdout` or `sys.stderr`.\n """\n\n def __init__(self, *, interval: float = 10, target_stream: TextIO):\n self.interval = interval\n self.target_stream = target_stream\n self.thread: Optional[Thread] = None\n\n @abstractmethod\n def download_log_chunk(self, params: PipesParams) -> Optional[str]: ...\n\n def start(self, params: PipesParams, is_task_complete: Event) -> None:\n self.thread = Thread(target=self._reader_thread, args=(params, is_task_complete))\n self.thread.start()\n\n def stop(self) -> None:\n if self.thread:\n self.thread.join()\n\n def _reader_thread(\n self,\n params: PipesParams,\n is_task_complete: Event,\n ) -> None:\n start_or_last_download = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n if (\n (now - start_or_last_download).seconds > self.interval or is_task_complete.is_set()\n ) and self.is_ready(params):\n start_or_last_download = now\n chunk = self.download_log_chunk(params)\n if chunk:\n self.target_stream.write(chunk)\n elif is_task_complete.is_set():\n break\n time.sleep(self.interval)\n\n\nclass PipesNoOpStdioReader(PipesBlobStoreStdioReader):\n """Default implementation for a pipes stdio reader that does nothing."""\n\n def start(self, params: PipesParams, is_task_complete: Event) -> None:\n pass\n\n def stop(self) -> None:\n pass\n\n def is_ready(self, params: PipesParams) -> bool:\n return True\n\n\ndef extract_message_or_forward_to_stdout(handler: "PipesMessageHandler", log_line: str):\n # exceptions as control flow, you love to see it\n try:\n message = json.loads(log_line)\n if PIPES_PROTOCOL_VERSION_FIELD in message.keys():\n handler.handle_message(message)\n else:\n sys.stdout.writelines((log_line, "\\n"))\n except Exception:\n # move non-message logs in to stdout for compute log capture\n sys.stdout.writelines((log_line, "\\n"))\n\n\n_FAIL_TO_YIELD_ERROR_MESSAGE = (\n "Did you forget to `yield from pipes_session.get_results()` or `return"\n " <PipesClient>.run(...).get_results`? If using `open_pipes_session`,"\n " `pipes_session.get_results` should be called once after the `open_pipes_session` block has"\n " exited to yield any remaining buffered results via `<PipesSession>.get_results()`."\n " If using `<PipesClient>.run`, you should always return"\n " `<PipesClient>.run(...).get_results()` or `<PipesClient>.run(...).get_materialize_result()`."\n)\n\n\n
[docs]@experimental\n@contextmanager\ndef open_pipes_session(\n context: OpExecutionContext,\n context_injector: PipesContextInjector,\n message_reader: PipesMessageReader,\n extras: Optional[PipesExtras] = None,\n) -> Iterator[PipesSession]:\n """Context manager that opens and closes a pipes session.\n\n This context manager should be used to wrap the launch of an external process using the pipe\n protocol to report results back to Dagster. The yielded :py:class:`PipesSession` should be used\n to (a) obtain the environment variables that need to be provided to the external process; (b)\n access results streamed back from the external process.\n\n This method is an alternative to :py:class:`PipesClient` subclasses for users who want more\n control over how pipes processes are launched. When using `open_pipes_session`, it is the user's\n responsibility to inject the message reader and context injector parameters available on the\n yielded `PipesSession` and pass them to the appropriate API when launching the external process.\n Typically these parameters should be set as environment variables.\n\n\n Args:\n context (OpExecutionContext): The context for the current op/asset execution.\n context_injector (PipesContextInjector): The context injector to use to inject context into the external process.\n message_reader (PipesMessageReader): The message reader to use to read messages from the external process.\n extras (Optional[PipesExtras]): Optional extras to pass to the external process via the injected context.\n\n Yields:\n PipesSession: Interface for interacting with the external process.\n\n .. code-block:: python\n\n import subprocess\n from dagster import open_pipes_session\n\n extras = {"foo": "bar"}\n\n @asset\n def ext_asset(context: OpExecutionContext):\n with open_pipes_session(\n context=context,\n extras={"foo": "bar"},\n context_injector=ExtTempFileContextInjector(),\n message_reader=ExtTempFileMessageReader(),\n ) as pipes_session:\n subprocess.Popen(\n ["/bin/python", "/path/to/script.py"],\n env={**pipes_session.get_bootstrap_env_vars()}\n )\n while process.poll() is None:\n yield from pipes_session.get_results()\n\n yield from pipes_session.get_results()\n """\n context.set_requires_typed_event_stream(error_message=_FAIL_TO_YIELD_ERROR_MESSAGE)\n context_data = build_external_execution_context_data(context, extras)\n message_handler = PipesMessageHandler(context)\n try:\n with context_injector.inject_context(\n context_data\n ) as ci_params, message_handler.handle_messages(message_reader) as mr_params:\n yield PipesSession(\n context_data=context_data,\n message_handler=message_handler,\n context_injector_params=ci_params,\n message_reader_params=mr_params,\n )\n finally:\n if not message_handler.received_any_message:\n context.log.warn(\n "[pipes] did not receive any messages from external process. Check stdout / stderr"\n " logs from the external process if"\n f" possible.\\n{context_injector.__class__.__name__}:"\n f" {context_injector.no_messages_debug_text()}\\n{message_reader.__class__.__name__}:"\n f" {message_reader.no_messages_debug_text()}\\n"\n )\n elif not message_handler.received_closed_message:\n context.log.warn(\n "[pipes] did not receive closed message from external process. Buffered messages"\n " may have been discarded without being delivered. Use `open_dagster_pipes` as a"\n " context manager (a with block) to ensure that cleanup is successfully completed."\n " If that is not possible, manually call `PipesContext.close()` before process"\n " exit."\n )
\n
", "current_page_name": "_modules/dagster/_core/pipes/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.pipes.utils"}}, "run_coordinator": {"default_run_coordinator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.run_coordinator.default_run_coordinator

\nimport logging\nfrom typing import Mapping, Optional\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\nfrom .base import RunCoordinator, SubmitRunContext\n\n\n
[docs]class DefaultRunCoordinator(RunCoordinator, ConfigurableClass):\n """Immediately send runs to the run launcher."""\n\n def __init__(self, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self._logger = logging.getLogger("dagster.run_coordinator.default_run_coordinator")\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, object]\n ) -> Self:\n return cls(inst_data=inst_data, **config_value)\n\n def submit_run(self, context: SubmitRunContext) -> DagsterRun:\n dagster_run = context.dagster_run\n\n if dagster_run.status == DagsterRunStatus.NOT_STARTED:\n self._instance.launch_run(dagster_run.run_id, context.workspace)\n else:\n self._logger.warning(\n f"submit_run called for run {dagster_run.run_id} with status "\n f"{dagster_run.status.value}, skipping launch."\n )\n\n run = self._instance.get_run_by_id(dagster_run.run_id)\n if run is None:\n check.failed(f"Failed to reload run {dagster_run.run_id}")\n return run\n\n def cancel_run(self, run_id: str) -> bool:\n return self._instance.run_launcher.terminate(run_id)
\n
", "current_page_name": "_modules/dagster/_core/run_coordinator/default_run_coordinator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.run_coordinator.default_run_coordinator"}, "queued_run_coordinator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.run_coordinator.queued_run_coordinator

\nimport logging\nfrom typing import Any, Mapping, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Self\n\nfrom dagster import (\n    DagsterEvent,\n    DagsterEventType,\n    IntSource,\n    String,\n    _check as check,\n)\nfrom dagster._builtins import Bool\nfrom dagster._config import Array, Field, Noneable, ScalarUnion, Shape\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.instance import T_DagsterInstance\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\nfrom .base import RunCoordinator, SubmitRunContext\n\n\nclass RunQueueConfig(\n    NamedTuple(\n        "_RunQueueConfig",\n        [\n            ("max_concurrent_runs", int),\n            ("tag_concurrency_limits", Sequence[Mapping[str, Any]]),\n            ("max_user_code_failure_retries", int),\n            ("user_code_failure_retry_delay", int),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        max_concurrent_runs: int,\n        tag_concurrency_limits: Optional[Sequence[Mapping[str, Any]]],\n        max_user_code_failure_retries: int = 0,\n        user_code_failure_retry_delay: int = 60,\n    ):\n        return super(RunQueueConfig, cls).__new__(\n            cls,\n            check.int_param(max_concurrent_runs, "max_concurrent_runs"),\n            check.opt_sequence_param(tag_concurrency_limits, "tag_concurrency_limits"),\n            check.int_param(max_user_code_failure_retries, "max_user_code_failure_retries"),\n            check.int_param(user_code_failure_retry_delay, "user_code_failure_retry_delay"),\n        )\n\n\n
[docs]class QueuedRunCoordinator(RunCoordinator[T_DagsterInstance], ConfigurableClass):\n """Enqueues runs via the run storage, to be deqeueued by the Dagster Daemon process. Requires\n the Dagster Daemon process to be alive in order for runs to be launched.\n """\n\n def __init__(\n self,\n max_concurrent_runs: Optional[int] = None,\n tag_concurrency_limits: Optional[Sequence[Mapping[str, Any]]] = None,\n dequeue_interval_seconds: Optional[int] = None,\n dequeue_use_threads: Optional[bool] = None,\n dequeue_num_workers: Optional[int] = None,\n max_user_code_failure_retries: Optional[int] = None,\n user_code_failure_retry_delay: Optional[int] = None,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data: Optional[ConfigurableClassData] = check.opt_inst_param(\n inst_data, "inst_data", ConfigurableClassData\n )\n self._max_concurrent_runs: int = check.opt_int_param(\n max_concurrent_runs, "max_concurrent_runs", 10\n )\n check.invariant(\n self._max_concurrent_runs >= -1,\n "Negative values other than -1 (which disables the limit) for max_concurrent_runs"\n " are disallowed.",\n )\n self._tag_concurrency_limits: Sequence[Mapping[str, Any]] = check.opt_list_param(\n tag_concurrency_limits,\n "tag_concurrency_limits",\n )\n self._dequeue_interval_seconds: int = check.opt_int_param(\n dequeue_interval_seconds, "dequeue_interval_seconds", 5\n )\n self._dequeue_use_threads: bool = check.opt_bool_param(\n dequeue_use_threads, "dequeue_use_threads", False\n )\n self._dequeue_num_workers: Optional[int] = check.opt_int_param(\n dequeue_num_workers, "dequeue_num_workers"\n )\n self._max_user_code_failure_retries: int = check.opt_int_param(\n max_user_code_failure_retries, "max_user_code_failure_retries", 0\n )\n self._user_code_failure_retry_delay: int = check.opt_int_param(\n user_code_failure_retry_delay, "user_code_failure_retry_delay", 60\n )\n self._logger = logging.getLogger("dagster.run_coordinator.queued_run_coordinator")\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n def get_run_queue_config(self) -> RunQueueConfig:\n return RunQueueConfig(\n max_concurrent_runs=self._max_concurrent_runs,\n tag_concurrency_limits=self._tag_concurrency_limits,\n max_user_code_failure_retries=self._max_user_code_failure_retries,\n user_code_failure_retry_delay=self._user_code_failure_retry_delay,\n )\n\n @property\n def dequeue_interval_seconds(self) -> int:\n return self._dequeue_interval_seconds\n\n @property\n def dequeue_use_threads(self) -> bool:\n return self._dequeue_use_threads\n\n @property\n def dequeue_num_workers(self) -> Optional[int]:\n return self._dequeue_num_workers\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {\n "max_concurrent_runs": Field(\n config=IntSource,\n is_required=False,\n description=(\n "The maximum number of runs that are allowed to be in progress at once."\n " Defaults to 10. Set to -1 to disable the limit. Set to 0 to stop any runs"\n " from launching. Any other negative values are disallowed."\n ),\n ),\n "tag_concurrency_limits": Field(\n config=Noneable(\n Array(\n Shape(\n {\n "key": String,\n "value": Field(\n ScalarUnion(\n scalar_type=String,\n non_scalar_schema=Shape({"applyLimitPerUniqueValue": Bool}),\n ),\n is_required=False,\n ),\n "limit": Field(int),\n }\n )\n )\n ),\n is_required=False,\n description=(\n "A set of limits that are applied to runs with particular tags. If a value is"\n " set, the limit is applied to only that key-value pair. If no value is set,"\n " the limit is applied across all values of that key. If the value is set to a"\n " dict with `applyLimitPerUniqueValue: true`, the limit will apply to the"\n " number of unique values for that key."\n ),\n ),\n "dequeue_interval_seconds": Field(\n config=IntSource,\n is_required=False,\n description=(\n "The interval in seconds at which the Dagster Daemon "\n "should periodically check the run queue for new runs to launch."\n ),\n ),\n "dequeue_use_threads": Field(\n config=bool,\n is_required=False,\n description=(\n "Whether or not to use threads for concurrency when launching dequeued runs."\n ),\n ),\n "dequeue_num_workers": Field(\n config=IntSource,\n is_required=False,\n description=(\n "If dequeue_use_threads is true, limit the number of concurrent worker threads."\n ),\n ),\n "max_user_code_failure_retries": Field(\n config=IntSource,\n is_required=False,\n default_value=0,\n description=(\n "If there is an error reaching a Dagster gRPC server while dequeuing the run,"\n " how many times to retry the dequeue before failing it. The only run launcher"\n " that requires the gRPC server to be running is the DefaultRunLauncher, so"\n " setting this will have no effect unless that run launcher is being used."\n ),\n ),\n "user_code_failure_retry_delay": Field(\n config=IntSource,\n is_required=False,\n default_value=60,\n description=(\n "If there is an error reaching a Dagster gRPC server while dequeuing the run,"\n " how long to wait before retrying any runs from that same code location. The"\n " only run launcher that requires the gRPC server to be running is the"\n " DefaultRunLauncher, so setting this will have no effect unless that run"\n " launcher is being used."\n ),\n ),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return cls(\n inst_data=inst_data,\n max_concurrent_runs=config_value.get("max_concurrent_runs"),\n tag_concurrency_limits=config_value.get("tag_concurrency_limits"),\n dequeue_interval_seconds=config_value.get("dequeue_interval_seconds"),\n dequeue_use_threads=config_value.get("dequeue_use_threads"),\n dequeue_num_workers=config_value.get("dequeue_num_workers"),\n max_user_code_failure_retries=config_value.get("max_user_code_failure_retries"),\n user_code_failure_retry_delay=config_value.get("user_code_failure_retry_delay"),\n )\n\n def submit_run(self, context: SubmitRunContext) -> DagsterRun:\n dagster_run = context.dagster_run\n\n if dagster_run.status == DagsterRunStatus.NOT_STARTED:\n enqueued_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_ENQUEUED.value,\n job_name=dagster_run.job_name,\n )\n self._instance.report_dagster_event(enqueued_event, run_id=dagster_run.run_id)\n else:\n # the run was already submitted, this is a no-op\n self._logger.warning(\n f"submit_run called for run {dagster_run.run_id} with status "\n f"{dagster_run.status.value}, skipping enqueue."\n )\n\n run = self._instance.get_run_by_id(dagster_run.run_id)\n if run is None:\n check.failed(f"Failed to reload run {dagster_run.run_id}")\n return run\n\n def cancel_run(self, run_id: str) -> bool:\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n # NOTE: possible race condition if the dequeuer acts on this run at the same time\n # https://github.com/dagster-io/dagster/issues/3323\n if run.status == DagsterRunStatus.QUEUED:\n self._instance.report_run_canceling(\n run,\n message="Canceling run from the queue.",\n )\n self._instance.report_run_canceled(run)\n return True\n else:\n return self._instance.run_launcher.terminate(run_id)
\n
", "current_page_name": "_modules/dagster/_core/run_coordinator/queued_run_coordinator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.run_coordinator.queued_run_coordinator"}}, "scheduler": {"scheduler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.scheduler.scheduler

\nimport abc\nimport os\nfrom typing import Any, Mapping, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._config import Field, IntSource\nfrom dagster._core.definitions.run_request import InstigatorType\nfrom dagster._core.errors import DagsterError\nfrom dagster._core.host_representation import ExternalSchedule\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.scheduler.instigation import (\n    InstigatorState,\n    InstigatorStatus,\n    ScheduleInstigatorData,\n)\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom dagster._seven import get_current_datetime_in_utc\nfrom dagster._utils import mkdir_p\n\n\nclass DagsterSchedulerError(DagsterError):\n    """Base class for all Dagster Scheduler errors."""\n\n\nclass DagsterScheduleDoesNotExist(DagsterSchedulerError):\n    """Errors raised when fetching a schedule."""\n\n\nclass SchedulerDebugInfo(\n    NamedTuple(\n        "SchedulerDebugInfo",\n        [\n            ("errors", Sequence[str]),\n            ("scheduler_config_info", str),\n            ("scheduler_info", str),\n            ("schedule_storage", Sequence[str]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        errors: Sequence[str],\n        scheduler_config_info: str,\n        scheduler_info: str,\n        schedule_storage: Sequence[str],\n    ):\n        return super(SchedulerDebugInfo, cls).__new__(\n            cls,\n            errors=check.sequence_param(errors, "errors", of_type=str),\n            scheduler_config_info=check.str_param(scheduler_config_info, "scheduler_config_info"),\n            scheduler_info=check.str_param(scheduler_info, "scheduler_info"),\n            schedule_storage=check.sequence_param(\n                schedule_storage, "schedule_storage", of_type=str\n            ),\n        )\n\n\n
[docs]class Scheduler(abc.ABC):\n """Abstract base class for a scheduler. This component is responsible for interfacing with\n an external system such as cron to ensure scheduled repeated execution according.\n """\n\n def start_schedule(\n self, instance: DagsterInstance, external_schedule: ExternalSchedule\n ) -> InstigatorState:\n """Updates the status of the given schedule to `InstigatorStatus.RUNNING` in schedule storage,.\n\n This should not be overridden by subclasses.\n\n Args:\n instance (DagsterInstance): The current instance.\n external_schedule (ExternalSchedule): The schedule to start\n\n """\n check.inst_param(instance, "instance", DagsterInstance)\n check.inst_param(external_schedule, "external_schedule", ExternalSchedule)\n\n stored_state = instance.get_instigator_state(\n external_schedule.get_external_origin_id(), external_schedule.selector_id\n )\n computed_state = external_schedule.get_current_instigator_state(stored_state)\n if computed_state.is_running:\n return computed_state\n\n new_instigator_data = ScheduleInstigatorData(\n external_schedule.cron_schedule,\n get_current_datetime_in_utc().timestamp(),\n )\n\n if not stored_state:\n started_state = InstigatorState(\n external_schedule.get_external_origin(),\n InstigatorType.SCHEDULE,\n InstigatorStatus.RUNNING,\n new_instigator_data,\n )\n instance.add_instigator_state(started_state)\n else:\n started_state = stored_state.with_status(InstigatorStatus.RUNNING).with_data(\n new_instigator_data\n )\n instance.update_instigator_state(started_state)\n return started_state\n\n def stop_schedule(\n self,\n instance: DagsterInstance,\n schedule_origin_id: str,\n schedule_selector_id: str,\n external_schedule: Optional[ExternalSchedule],\n ) -> InstigatorState:\n """Updates the status of the given schedule to `InstigatorStatus.STOPPED` in schedule storage,.\n\n This should not be overridden by subclasses.\n\n Args:\n schedule_origin_id (string): The id of the schedule target to stop running.\n """\n check.str_param(schedule_origin_id, "schedule_origin_id")\n check.opt_inst_param(external_schedule, "external_schedule", ExternalSchedule)\n\n stored_state = instance.get_instigator_state(schedule_origin_id, schedule_selector_id)\n\n if not external_schedule:\n computed_state = stored_state\n else:\n computed_state = external_schedule.get_current_instigator_state(stored_state)\n\n if computed_state and not computed_state.is_running:\n return computed_state\n\n if not stored_state:\n assert external_schedule\n stopped_state = InstigatorState(\n external_schedule.get_external_origin(),\n InstigatorType.SCHEDULE,\n InstigatorStatus.STOPPED,\n ScheduleInstigatorData(\n external_schedule.cron_schedule,\n ),\n )\n instance.add_instigator_state(stopped_state)\n else:\n stopped_state = stored_state.with_status(InstigatorStatus.STOPPED).with_data(\n ScheduleInstigatorData(\n cron_schedule=computed_state.instigator_data.cron_schedule, # type: ignore\n )\n )\n instance.update_instigator_state(stopped_state)\n\n return stopped_state\n\n @abc.abstractmethod\n def debug_info(self) -> str:\n """Returns debug information about the scheduler."""\n\n @abc.abstractmethod\n def get_logs_path(self, instance: DagsterInstance, schedule_origin_id: str) -> str:\n """Get path to store logs for schedule.\n\n Args:\n schedule_origin_id (string): The id of the schedule target to retrieve the log path for\n """
\n\n\nDEFAULT_MAX_CATCHUP_RUNS = 5\n\n\n
[docs]class DagsterDaemonScheduler(Scheduler, ConfigurableClass):\n """Default scheduler implementation that submits runs from the `dagster-daemon`\n long-lived process. Periodically checks each running schedule for execution times that don't\n have runs yet and launches them.\n """\n\n def __init__(\n self,\n max_catchup_runs: int = DEFAULT_MAX_CATCHUP_RUNS,\n max_tick_retries: int = 0,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self.max_catchup_runs = check.opt_int_param(\n max_catchup_runs, "max_catchup_runs", DEFAULT_MAX_CATCHUP_RUNS\n )\n self.max_tick_retries = check.opt_int_param(max_tick_retries, "max_tick_retries", 0)\n self._inst_data = inst_data\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "max_catchup_runs": Field(\n IntSource,\n is_required=False,\n default_value=DEFAULT_MAX_CATCHUP_RUNS,\n description="""For partitioned schedules, controls the maximum number of past\n partitions for each schedule that will be considered when looking for missing\n runs . Generally this parameter will only come into play if the scheduler\n falls behind or launches after experiencing downtime. This parameter will not be checked for\n schedules without partition sets (for example, schedules created using the @schedule\n decorator) - only the most recent execution time will be considered for those schedules.\n\n Note that no matter what this value is, the scheduler will never launch a run from a time\n before the schedule was turned on (even if the start_date on the schedule is earlier) - if\n you want to launch runs for earlier partitions, launch a backfill.\n """,\n ),\n "max_tick_retries": Field(\n IntSource,\n default_value=0,\n is_required=False,\n description=(\n "For each schedule tick that raises an error, how many times to retry that tick"\n ),\n ),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return DagsterDaemonScheduler(inst_data=inst_data, **config_value)\n\n def debug_info(self) -> str:\n return ""\n\n def wipe(self, instance: DagsterInstance) -> None:\n pass\n\n def _get_or_create_logs_directory(\n self, instance: DagsterInstance, schedule_origin_id: str\n ) -> str:\n check.inst_param(instance, "instance", DagsterInstance)\n check.str_param(schedule_origin_id, "schedule_origin_id")\n\n logs_directory = os.path.join(instance.schedules_directory(), "logs", schedule_origin_id)\n if not os.path.isdir(logs_directory):\n mkdir_p(logs_directory)\n\n return logs_directory\n\n def get_logs_path(self, instance: DagsterInstance, schedule_origin_id: str) -> str:\n check.inst_param(instance, "instance", DagsterInstance)\n check.str_param(schedule_origin_id, "schedule_origin_id")\n\n logs_directory = self._get_or_create_logs_directory(instance, schedule_origin_id)\n return os.path.join(logs_directory, "scheduler.log")
\n
", "current_page_name": "_modules/dagster/_core/scheduler/scheduler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.scheduler.scheduler"}}, "storage": {"asset_value_loader": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.asset_value_loader

\nfrom contextlib import ExitStack\nfrom typing import Any, Dict, Mapping, Optional, Type, cast\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.job_definition import (\n    default_job_io_manager_with_fs_io_manager_schema,\n)\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom dagster._core.execution.build_resources import build_resources, get_mapped_resource_config\nfrom dagster._core.execution.context.input import build_input_context\nfrom dagster._core.execution.context.output import build_output_context\nfrom dagster._core.execution.resources_init import get_transitive_required_resource_keys\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.config import is_dagster_home_set\nfrom dagster._core.types.dagster_type import resolve_dagster_type\nfrom dagster._utils.merger import merge_dicts\n\nfrom .io_manager import IOManager\n\n\n
[docs]class AssetValueLoader:\n """Caches resource definitions that are used to load asset values across multiple load\n invocations.\n\n Should not be instantiated directly. Instead, use\n :py:meth:`~dagster.RepositoryDefinition.get_asset_value_loader`.\n """\n\n def __init__(\n self,\n assets_defs_by_key: Mapping[AssetKey, AssetsDefinition],\n source_assets_by_key: Mapping[AssetKey, SourceAsset],\n instance: Optional[DagsterInstance] = None,\n ):\n self._assets_defs_by_key = assets_defs_by_key\n self._source_assets_by_key = source_assets_by_key\n self._resource_instance_cache: Dict[str, object] = {}\n self._exit_stack: ExitStack = ExitStack().__enter__()\n if not instance and is_dagster_home_set():\n self._instance = self._exit_stack.enter_context(DagsterInstance.get())\n else:\n self._instance = instance\n\n def _ensure_resource_instances_in_cache(\n self,\n resource_defs: Mapping[str, ResourceDefinition],\n resource_config: Optional[Mapping[str, Any]] = None,\n ):\n for built_resource_key, built_resource in (\n self._exit_stack.enter_context(\n build_resources(\n resources={\n resource_key: self._resource_instance_cache.get(resource_key, resource_def)\n for resource_key, resource_def in resource_defs.items()\n },\n instance=self._instance,\n resource_config=resource_config,\n )\n )\n ._asdict()\n .items()\n ):\n self._resource_instance_cache[built_resource_key] = built_resource\n\n
[docs] @public\n def load_asset_value(\n self,\n asset_key: CoercibleToAssetKey,\n *,\n python_type: Optional[Type[object]] = None,\n partition_key: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n ) -> object:\n """Loads the contents of an asset as a Python object.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the asset.\n\n Args:\n asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.\n python_type (Optional[Type]): The python type to load the asset as. This is what will\n be returned inside `load_input` by `context.dagster_type.typing_type`.\n partition_key (Optional[str]): The partition of the asset to load.\n metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`\n (is equivalent to setting the metadata argument in `In` or `AssetIn`).\n resource_config (Optional[Any]): A dictionary of resource configurations to be passed\n to the :py:class:`IOManager`.\n\n Returns:\n The contents of an asset as a Python object.\n """\n asset_key = AssetKey.from_coercible(asset_key)\n resource_config = resource_config or {}\n output_metadata = {}\n\n if asset_key in self._assets_defs_by_key:\n assets_def = self._assets_defs_by_key[asset_key]\n\n resource_defs = merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n assets_def.resource_defs,\n )\n io_manager_key = assets_def.get_io_manager_key_for_asset_key(asset_key)\n io_manager_def = resource_defs[io_manager_key]\n name = assets_def.get_output_name_for_asset_key(asset_key)\n output_metadata = assets_def.metadata_by_key[asset_key]\n op_def = assets_def.get_op_def_for_asset_key(asset_key)\n asset_partitions_def = assets_def.partitions_def\n elif asset_key in self._source_assets_by_key:\n source_asset = self._source_assets_by_key[asset_key]\n\n resource_defs = merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n source_asset.resource_defs,\n )\n io_manager_key = source_asset.get_io_manager_key()\n io_manager_def = resource_defs[io_manager_key]\n name = asset_key.path[-1]\n output_metadata = source_asset.raw_metadata\n op_def = None\n asset_partitions_def = source_asset.partitions_def\n else:\n check.failed(f"Asset key {asset_key} not found")\n\n required_resource_keys = get_transitive_required_resource_keys(\n io_manager_def.required_resource_keys, resource_defs\n ) | {io_manager_key}\n\n self._ensure_resource_instances_in_cache(\n {k: v for k, v in resource_defs.items() if k in required_resource_keys},\n resource_config=resource_config,\n )\n io_manager = cast(IOManager, self._resource_instance_cache[io_manager_key])\n\n io_config = resource_config.get(io_manager_key)\n io_resource_config = {io_manager_key: io_config} if io_config else {}\n\n io_manager_config = get_mapped_resource_config(\n {io_manager_key: io_manager_def}, io_resource_config\n )\n\n input_context = build_input_context(\n name=None,\n asset_key=asset_key,\n dagster_type=resolve_dagster_type(python_type),\n upstream_output=build_output_context(\n name=name,\n metadata=output_metadata,\n asset_key=asset_key,\n op_def=op_def,\n resource_config=resource_config,\n ),\n resources=self._resource_instance_cache,\n resource_config=io_manager_config[io_manager_key].config,\n partition_key=partition_key,\n asset_partition_key_range=(\n PartitionKeyRange(partition_key, partition_key)\n if partition_key is not None\n else None\n ),\n asset_partitions_def=asset_partitions_def,\n instance=self._instance,\n metadata=metadata,\n )\n\n return io_manager.load_input(input_context)
\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n self._exit_stack.close()
\n
", "current_page_name": "_modules/dagster/_core/storage/asset_value_loader", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.asset_value_loader"}, "base_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.base_storage

\nfrom abc import ABC, abstractmethod\n\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\n\nfrom .event_log.base import EventLogStorage\nfrom .runs.base import RunStorage\nfrom .schedules.base import ScheduleStorage\n\n\n
[docs]class DagsterStorage(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract base class for Dagster persistent storage, for reading and writing data for runs,\n events, and schedule/sensor state.\n\n Users should not directly instantiate concrete subclasses of this class; they are instantiated\n by internal machinery when ``dagster-webserver`` and ``dagster-daemon`` load, based on the values in the\n ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of concrete subclasses of this class\n should be done by setting values in that file.\n """\n\n @property\n @abstractmethod\n def event_log_storage(self) -> EventLogStorage[T_DagsterInstance]:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def run_storage(self) -> RunStorage[T_DagsterInstance]:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def schedule_storage(self) -> ScheduleStorage[T_DagsterInstance]:\n raise NotImplementedError()
\n
", "current_page_name": "_modules/dagster/_core/storage/base_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.base_storage"}, "captured_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.captured_log_manager

\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import IO, Callable, Generator, Iterator, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Final, Self\n\nimport dagster._check as check\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\n\nMAX_BYTES_CHUNK_READ: Final = 4194304  # 4 MB\n\n\nclass CapturedLogContext(\n    NamedTuple(\n        "_CapturedLogContext",\n        [\n            ("log_key", Sequence[str]),\n            ("external_url", Optional[str]),\n            ("external_stdout_url", Optional[str]),\n            ("external_stderr_url", Optional[str]),\n        ],\n    )\n):\n    """Object representing the context in which logs are captured.  Can be used by external logging\n    sidecar implementations to point the Dagster UI to an external url to view compute logs instead of a\n    Dagster-managed location.\n    """\n\n    def __new__(\n        cls,\n        log_key: Sequence[str],\n        external_stdout_url: Optional[str] = None,\n        external_stderr_url: Optional[str] = None,\n        external_url: Optional[str] = None,\n    ):\n        if external_url and (external_stdout_url or external_stderr_url):\n            check.failed(\n                "Cannot specify both `external_url` and one of"\n                " `external_stdout_url`/`external_stderr_url`"\n            )\n\n        return super(CapturedLogContext, cls).__new__(\n            cls,\n            log_key,\n            external_stdout_url=external_stdout_url,\n            external_stderr_url=external_stderr_url,\n            external_url=external_url,\n        )\n\n\nclass CapturedLogData(\n    NamedTuple(\n        "_CapturedLogData",\n        [\n            ("log_key", Sequence[str]),\n            ("stdout", Optional[bytes]),\n            ("stderr", Optional[bytes]),\n            ("cursor", Optional[str]),\n        ],\n    )\n):\n    """Object representing captured log data, either a partial chunk of the log data or the full\n    capture.  Contains the raw bytes and optionally the cursor offset for the partial chunk.\n    """\n\n    def __new__(\n        cls,\n        log_key: Sequence[str],\n        stdout: Optional[bytes] = None,\n        stderr: Optional[bytes] = None,\n        cursor: Optional[str] = None,\n    ):\n        return super(CapturedLogData, cls).__new__(cls, log_key, stdout, stderr, cursor)\n\n\nclass CapturedLogMetadata(\n    NamedTuple(\n        "_CapturedLogMetadata",\n        [\n            ("stdout_location", Optional[str]),\n            ("stderr_location", Optional[str]),\n            ("stdout_download_url", Optional[str]),\n            ("stderr_download_url", Optional[str]),\n        ],\n    )\n):\n    """Object representing metadata info for the captured log data, containing a display string for\n    the location of the log data and a URL for direct download of the captured log data.\n    """\n\n    def __new__(\n        cls,\n        stdout_location: Optional[str] = None,\n        stderr_location: Optional[str] = None,\n        stdout_download_url: Optional[str] = None,\n        stderr_download_url: Optional[str] = None,\n    ):\n        return super(CapturedLogMetadata, cls).__new__(\n            cls,\n            stdout_location=stdout_location,\n            stderr_location=stderr_location,\n            stdout_download_url=stdout_download_url,\n            stderr_download_url=stderr_download_url,\n        )\n\n\nclass CapturedLogSubscription:\n    def __init__(\n        self, manager: "CapturedLogManager", log_key: Sequence[str], cursor: Optional[str]\n    ):\n        self._manager = manager\n        self._log_key = log_key\n        self._cursor = cursor\n        self._observer: Optional[Callable[[CapturedLogData], None]] = None\n        self.is_complete = False\n\n    def __call__(self, observer: Optional[Callable[[CapturedLogData], None]]) -> Self:\n        self._observer = observer\n        self.fetch()\n        if self._manager.is_capture_complete(self._log_key):\n            self.complete()\n        return self\n\n    @property\n    def log_key(self) -> Sequence[str]:\n        return self._log_key\n\n    def dispose(self) -> None:\n        self._observer = None\n        self._manager.unsubscribe(self)\n\n    def fetch(self) -> None:\n        if not self._observer:\n            return\n\n        should_fetch = True\n        while should_fetch:\n            log_data = self._manager.get_log_data(\n                self._log_key,\n                self._cursor,\n                max_bytes=MAX_BYTES_CHUNK_READ,\n            )\n            if not self._cursor or log_data.cursor != self._cursor:\n                self._observer(log_data)\n                self._cursor = log_data.cursor\n            should_fetch = _has_max_data(log_data.stdout) or _has_max_data(log_data.stderr)\n\n    def complete(self) -> None:\n        self.is_complete = True\n\n\ndef _has_max_data(chunk: Optional[bytes]) -> bool:\n    # function is used as predicate but does not actually return a boolean\n    return chunk and len(chunk) >= MAX_BYTES_CHUNK_READ  # type: ignore\n\n\n
[docs]class CapturedLogManager(ABC):\n """Abstract base class for capturing the unstructured logs (stdout/stderr) in the current\n process, stored / retrieved with a provided log_key.\n """\n\n @abstractmethod\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:\n """Context manager for capturing the stdout/stderr within the current process, and persisting\n it under the given log key.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n """\n\n @abstractmethod\n @contextmanager\n def open_log_stream(\n self, log_key: Sequence[str], io_type: ComputeIOType\n ) -> Iterator[Optional[IO[bytes]]]:\n """Context manager for providing an IO stream that enables the caller to write to a log stream\n managed by the captured log manager, to be read later using the given log key.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n """\n\n @abstractmethod\n def is_capture_complete(self, log_key: Sequence[str]) -> bool:\n """Flag indicating when the log capture for a given log key has completed.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n\n Returns:\n Boolean\n """\n\n @abstractmethod\n def get_log_data(\n self,\n log_key: Sequence[str],\n cursor: Optional[str] = None,\n max_bytes: Optional[int] = None,\n ) -> CapturedLogData:\n """Returns a chunk of the captured stdout logs for a given log key.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n cursor (Optional[str]): A cursor representing the position of the log chunk to fetch\n max_bytes (Optional[int]): A limit on the size of the log chunk to fetch\n\n Returns:\n CapturedLogData\n """\n\n @abstractmethod\n def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:\n """Returns the metadata of the captured logs for a given log key, including\n displayable information on where the logs are persisted.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n\n Returns:\n CapturedLogMetadata\n """\n\n @abstractmethod\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ) -> None:\n """Deletes the captured logs for a given log key.\n\n Args:\n log_key(Optional[List[String]]): The log key of the logs to delete\n prefix(Optional[List[String]]): The prefix of the log keys to delete\n """\n\n @abstractmethod\n def subscribe(\n self, log_key: Sequence[str], cursor: Optional[str] = None\n ) -> CapturedLogSubscription:\n """Registers an observable object for log data.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n cursor (Optional[String]): The string cursor marking the position within the log stream\n Returns:\n ComputeLogSubscription\n """\n\n @abstractmethod\n def unsubscribe(self, subscription: CapturedLogSubscription) -> None:\n """Deregisters an observable object from receiving log updates.\n\n Args:\n subscription (CapturedLogSubscription): subscription object which manages when to send\n back data to the subscriber\n """\n\n def build_log_key_for_run(self, run_id: str, step_key: str) -> Sequence[str]:\n """Legacy adapter to translate run_id/key to captured log manager-based log_key."""\n return [run_id, "compute_logs", step_key]
\n
", "current_page_name": "_modules/dagster/_core/storage/captured_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.captured_log_manager"}, "compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.compute_log_manager

\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom enum import Enum\nfrom typing import Callable, Iterator, NamedTuple, Optional\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nMAX_BYTES_FILE_READ = 33554432  # 32 MB\nMAX_BYTES_CHUNK_READ = 4194304  # 4 MB\n\n\nclass ComputeIOType(Enum):\n    STDOUT = "stdout"\n    STDERR = "stderr"\n\n\nclass ComputeLogFileData(\n    NamedTuple(\n        "ComputeLogFileData",\n        [\n            ("path", str),\n            ("data", Optional[str]),\n            ("cursor", int),\n            ("size", int),\n            ("download_url", Optional[str]),\n        ],\n    )\n):\n    """Representation of a chunk of compute execution log data."""\n\n    def __new__(\n        cls, path: str, data: Optional[str], cursor: int, size: int, download_url: Optional[str]\n    ):\n        return super(ComputeLogFileData, cls).__new__(\n            cls,\n            path=check.str_param(path, "path"),\n            data=check.opt_str_param(data, "data"),\n            cursor=check.int_param(cursor, "cursor"),\n            size=check.int_param(size, "size"),\n            download_url=check.opt_str_param(download_url, "download_url"),\n        )\n\n\n
[docs]class ComputeLogManager(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract base class for storing unstructured compute logs (stdout/stderr) from the compute\n steps of pipeline solids.\n """\n\n @contextmanager\n def watch(self, dagster_run: DagsterRun, step_key: Optional[str] = None) -> Iterator[None]:\n """Watch the stdout/stderr for a given execution for a given run_id / step_key and persist it.\n\n Args:\n dagster_run (DagsterRun): The run config\n step_key (Optional[String]): The step_key for a compute step\n """\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n\n if not self.enabled(dagster_run, step_key):\n yield\n return\n\n self.on_watch_start(dagster_run, step_key)\n with self._watch_logs(dagster_run, step_key):\n yield\n self.on_watch_finish(dagster_run, step_key)\n\n @contextmanager\n @abstractmethod\n def _watch_logs(\n self, dagster_run: DagsterRun, step_key: Optional[str] = None\n ) -> Iterator[None]:\n """Method to watch the stdout/stderr logs for a given run_id / step_key. Kept separate from\n blessed `watch` method, which triggers all the start/finish hooks that are necessary to\n implement the different remote implementations.\n\n Args:\n dagster_run (DagsterRun): The run config\n step_key (Optional[String]): The step_key for a compute step\n """\n\n @abstractmethod\n def get_local_path(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n """Get the local path of the logfile for a given execution step. This determines the\n location on the local filesystem to which stdout/stderr will be rerouted.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either ComputeIOType.STDOUT or\n ComputeIOType.STDERR\n\n Returns:\n str\n """\n ...\n\n @abstractmethod\n def is_watch_completed(self, run_id: str, key: str) -> bool:\n """Flag indicating when computation for a given execution step has completed.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n\n Returns:\n Boolean\n """\n\n @abstractmethod\n def on_watch_start(self, dagster_run: DagsterRun, step_key: Optional[str]) -> None:\n """Hook called when starting to watch compute logs.\n\n Args:\n pipeline_run (PipelineRun): The pipeline run config\n step_key (Optional[String]): The step_key for a compute step\n """\n\n @abstractmethod\n def on_watch_finish(self, dagster_run: DagsterRun, step_key: Optional[str]) -> None:\n """Hook called when computation for a given execution step is finished.\n\n Args:\n pipeline_run (PipelineRun): The pipeline run config\n step_key (Optional[String]): The step_key for a compute step\n """\n\n @abstractmethod\n def download_url(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n """Get a URL where the logs can be downloaded.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either stdout or stderr\n\n Returns:\n String\n """\n\n @abstractmethod\n def read_logs_file(\n self,\n run_id: str,\n key: str,\n io_type: ComputeIOType,\n cursor: int = 0,\n max_bytes: int = MAX_BYTES_FILE_READ,\n ) -> ComputeLogFileData:\n """Get compute log data for a given compute step.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either stdout or stderr\n cursor (Optional[Int]): Starting cursor (byte) of log file\n max_bytes (Optional[Int]): Maximum number of bytes to be read and returned\n\n Returns:\n ComputeLogFileData\n """\n\n def enabled(self, _dagster_run: DagsterRun, _step_key: Optional[str]) -> bool:\n """Hook for disabling compute log capture.\n\n Args:\n _step_key (Optional[String]): The step_key for a compute step\n\n Returns:\n Boolean\n """\n return True\n\n @abstractmethod\n def on_subscribe(self, subscription: "ComputeLogSubscription") -> None:\n """Hook for managing streaming subscriptions for log data from `dagster-webserver`.\n\n Args:\n subscription (ComputeLogSubscription): subscription object which manages when to send\n back data to the subscriber\n """\n\n def on_unsubscribe(self, subscription: "ComputeLogSubscription") -> None:\n pass\n\n def observable(\n self, run_id: str, key: str, io_type: ComputeIOType, cursor: Optional[str] = None\n ) -> "ComputeLogSubscription":\n """Return a ComputeLogSubscription which streams back log data from the execution logs for a given\n compute step.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either stdout or stderr\n cursor (Optional[Int]): Starting cursor (byte) of log file\n\n Returns:\n Observable\n """\n check.str_param(run_id, "run_id")\n check.str_param(key, "key")\n check.inst_param(io_type, "io_type", ComputeIOType)\n check.opt_str_param(cursor, "cursor")\n\n if cursor:\n cursor = int(cursor) # type: ignore # (var reassigned diff type)\n else:\n cursor = 0 # type: ignore # (var reassigned diff type)\n\n subscription = ComputeLogSubscription(self, run_id, key, io_type, cursor) # type: ignore # (var reassigned diff type)\n self.on_subscribe(subscription)\n return subscription\n\n def dispose(self):\n pass
\n\n\nclass ComputeLogSubscription:\n """Observable object that generates ComputeLogFileData objects as compute step execution logs\n are written.\n """\n\n def __init__(\n self,\n manager: ComputeLogManager,\n run_id: str,\n key: str,\n io_type: ComputeIOType,\n cursor: int,\n ):\n self.manager = manager\n self.run_id = run_id\n self.key = key\n self.io_type = io_type\n self.cursor = cursor\n self.observer: Optional[Callable[[ComputeLogFileData], None]] = None\n self.is_complete = False\n\n def __call__(self, observer: Callable[[ComputeLogFileData], None]) -> Self:\n self.observer = observer\n self.fetch()\n if self.manager.is_watch_completed(self.run_id, self.key):\n self.complete()\n return self\n\n def dispose(self) -> None:\n # called when the connection gets closed, allowing the observer to get GC'ed\n self.observer = None\n self.manager.on_unsubscribe(self)\n\n def fetch(self) -> None:\n if not self.observer:\n return\n\n should_fetch = True\n while should_fetch:\n update = self.manager.read_logs_file(\n self.run_id,\n self.key,\n self.io_type,\n self.cursor,\n max_bytes=MAX_BYTES_CHUNK_READ,\n )\n if not self.cursor or update.cursor != self.cursor:\n self.observer(update)\n self.cursor = update.cursor\n should_fetch = update.data and len(update.data.encode("utf-8")) >= MAX_BYTES_CHUNK_READ\n\n def complete(self) -> None:\n self.is_complete = True\n if not self.observer:\n return\n
", "current_page_name": "_modules/dagster/_core/storage/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.compute_log_manager"}, "dagster_run": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.dagster_run

\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Union,\n)\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.tags import PARENT_RUN_ID_TAG, ROOT_RUN_ID_TAG\nfrom dagster._core.utils import make_new_run_id\nfrom dagster._serdes.serdes import (\n    NamedTupleSerializer,\n    whitelist_for_serdes,\n)\n\nfrom .tags import (\n    BACKFILL_ID_TAG,\n    REPOSITORY_LABEL_TAG,\n    RESUME_RETRY_TAG,\n    SCHEDULE_NAME_TAG,\n    SENSOR_NAME_TAG,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.host_representation.external import ExternalSchedule, ExternalSensor\n    from dagster._core.host_representation.origin import ExternalJobOrigin\n\n\n
[docs]@whitelist_for_serdes(storage_name="PipelineRunStatus")\nclass DagsterRunStatus(Enum):\n """The status of run execution."""\n\n # Runs waiting to be launched by the Dagster Daemon.\n QUEUED = "QUEUED"\n\n # Runs that have been launched, but execution has not yet started."""\n NOT_STARTED = "NOT_STARTED"\n\n # Runs that are managed outside of the Dagster control plane.\n MANAGED = "MANAGED"\n\n # Runs that have been launched, but execution has not yet started.\n STARTING = "STARTING"\n\n # Runs that have been launched and execution has started.\n STARTED = "STARTED"\n\n # Runs that have successfully completed.\n SUCCESS = "SUCCESS"\n\n # Runs that have failed to complete.\n FAILURE = "FAILURE"\n\n # Runs that are in-progress and pending to be canceled.\n CANCELING = "CANCELING"\n\n # Runs that have been canceled before completion.\n CANCELED = "CANCELED"
\n\n\n# These statuses that indicate a run may be using compute resources\nIN_PROGRESS_RUN_STATUSES = [\n DagsterRunStatus.STARTING,\n DagsterRunStatus.STARTED,\n DagsterRunStatus.CANCELING,\n]\n\n# This serves as an explicit list of run statuses that indicate that the run is not using compute\n# resources. This and the enum above should cover all run statuses.\nNON_IN_PROGRESS_RUN_STATUSES = [\n DagsterRunStatus.QUEUED,\n DagsterRunStatus.NOT_STARTED,\n DagsterRunStatus.SUCCESS,\n DagsterRunStatus.FAILURE,\n DagsterRunStatus.MANAGED,\n DagsterRunStatus.CANCELED,\n]\n\nFINISHED_STATUSES = [\n DagsterRunStatus.SUCCESS,\n DagsterRunStatus.FAILURE,\n DagsterRunStatus.CANCELED,\n]\n\n# Run statuses for runs that can be safely canceled.\n# Does not include the other unfinished statuses for the following reasons:\n# STARTING: Control has been ceded to the run worker, which will eventually move the run to a STARTED.\n# NOT_STARTED: Mostly replaced with STARTING. Runs are only here in the the brief window between\n# creating the run and launching or enqueueing it.\nCANCELABLE_RUN_STATUSES = [DagsterRunStatus.STARTED, DagsterRunStatus.QUEUED]\n\n\n@whitelist_for_serdes(storage_name="PipelineRunStatsSnapshot")\nclass DagsterRunStatsSnapshot(\n NamedTuple(\n "_DagsterRunStatsSnapshot",\n [\n ("run_id", str),\n ("steps_succeeded", int),\n ("steps_failed", int),\n ("materializations", int),\n ("expectations", int),\n ("enqueued_time", Optional[float]),\n ("launch_time", Optional[float]),\n ("start_time", Optional[float]),\n ("end_time", Optional[float]),\n ],\n )\n):\n def __new__(\n cls,\n run_id: str,\n steps_succeeded: int,\n steps_failed: int,\n materializations: int,\n expectations: int,\n enqueued_time: Optional[float],\n launch_time: Optional[float],\n start_time: Optional[float],\n end_time: Optional[float],\n ):\n return super(DagsterRunStatsSnapshot, cls).__new__(\n cls,\n run_id=check.str_param(run_id, "run_id"),\n steps_succeeded=check.int_param(steps_succeeded, "steps_succeeded"),\n steps_failed=check.int_param(steps_failed, "steps_failed"),\n materializations=check.int_param(materializations, "materializations"),\n expectations=check.int_param(expectations, "expectations"),\n enqueued_time=check.opt_float_param(enqueued_time, "enqueued_time"),\n launch_time=check.opt_float_param(launch_time, "launch_time"),\n start_time=check.opt_float_param(start_time, "start_time"),\n end_time=check.opt_float_param(end_time, "end_time"),\n )\n\n\nclass DagsterRunSerializer(NamedTupleSerializer["DagsterRun"]):\n # serdes log\n # * removed reexecution_config - serdes logic expected to strip unknown keys so no need to preserve\n # * added pipeline_snapshot_id\n # * renamed previous_run_id -> parent_run_id, added root_run_id\n # * added execution_plan_snapshot_id\n # * removed selector\n # * added solid_subset\n # * renamed solid_subset -> solid_selection, added solids_to_execute\n # * renamed environment_dict -> run_config\n # * added asset_selection\n # * added has_repository_load_data\n def before_unpack(self, context, unpacked_dict: Dict[str, Any]) -> Dict[str, Any]:\n # back compat for environment dict => run_config\n if "environment_dict" in unpacked_dict:\n check.invariant(\n unpacked_dict.get("run_config") is None,\n "Cannot set both run_config and environment_dict. Use run_config parameter.",\n )\n unpacked_dict["run_config"] = unpacked_dict["environment_dict"]\n del unpacked_dict["environment_dict"]\n\n # back compat for previous_run_id => parent_run_id, root_run_id\n if "previous_run_id" in unpacked_dict and not (\n "parent_run_id" in unpacked_dict and "root_run_id" in unpacked_dict\n ):\n unpacked_dict["parent_run_id"] = unpacked_dict["previous_run_id"]\n unpacked_dict["root_run_id"] = unpacked_dict["previous_run_id"]\n del unpacked_dict["previous_run_id"]\n\n # back compat for selector => pipeline_name, solids_to_execute\n if "selector" in unpacked_dict:\n selector = unpacked_dict["selector"]\n\n if not isinstance(selector, ExecutionSelector):\n check.failed(f"unexpected entry for 'select', {selector}")\n selector_name = selector.name\n selector_subset = selector.solid_subset\n\n job_name = unpacked_dict.get("pipeline_name")\n check.invariant(\n job_name is None or selector_name == job_name,\n f"Conflicting pipeline name {job_name} in arguments to PipelineRun: "\n f"selector was passed with pipeline {selector_name}",\n )\n if job_name is None:\n unpacked_dict["pipeline_name"] = selector_name\n\n solids_to_execute = unpacked_dict.get("solids_to_execute")\n check.invariant(\n solids_to_execute is None\n or (selector_subset and set(selector_subset) == solids_to_execute),\n f"Conflicting solids_to_execute {solids_to_execute} in arguments to"\n f" PipelineRun: selector was passed with subset {selector_subset}",\n )\n # for old runs that only have selector but no solids_to_execute\n if solids_to_execute is None:\n solids_to_execute = frozenset(selector_subset) if selector_subset else None\n\n # back compat for solid_subset => solids_to_execute\n if "solid_subset" in unpacked_dict:\n unpacked_dict["solids_to_execute"] = unpacked_dict["solid_subset"]\n del unpacked_dict["solid_subset"]\n\n return unpacked_dict\n\n\n
[docs]@whitelist_for_serdes(\n serializer=DagsterRunSerializer,\n # DagsterRun is serialized as PipelineRun so that it can be read by older (pre 0.13.x) version\n # of Dagster, but is read back in as a DagsterRun.\n storage_name="PipelineRun",\n old_fields={"mode": None},\n storage_field_names={\n "job_name": "pipeline_name",\n "job_snapshot_id": "pipeline_snapshot_id",\n "external_job_origin": "external_pipeline_origin",\n "job_code_origin": "pipeline_code_origin",\n "op_selection": "solid_selection",\n "resolved_op_selection": "solids_to_execute",\n },\n)\nclass DagsterRun(\n NamedTuple(\n "_DagsterRun",\n [\n ("job_name", PublicAttr[str]),\n ("run_id", str),\n ("run_config", Mapping[str, object]),\n ("asset_selection", Optional[AbstractSet[AssetKey]]),\n ("asset_check_selection", Optional[AbstractSet[AssetCheckKey]]),\n ("op_selection", Optional[Sequence[str]]),\n ("resolved_op_selection", Optional[AbstractSet[str]]),\n ("step_keys_to_execute", Optional[Sequence[str]]),\n ("status", DagsterRunStatus),\n ("tags", Mapping[str, str]),\n ("root_run_id", Optional[str]),\n ("parent_run_id", Optional[str]),\n ("job_snapshot_id", Optional[str]),\n ("execution_plan_snapshot_id", Optional[str]),\n ("external_job_origin", Optional["ExternalJobOrigin"]),\n ("job_code_origin", Optional[JobPythonOrigin]),\n ("has_repository_load_data", bool),\n ],\n )\n):\n """Serializable internal representation of a dagster run, as stored in a\n :py:class:`~dagster._core.storage.runs.RunStorage`.\n """\n\n def __new__(\n cls,\n job_name: str,\n run_id: Optional[str] = None,\n run_config: Optional[Mapping[str, object]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n op_selection: Optional[Sequence[str]] = None,\n resolved_op_selection: Optional[AbstractSet[str]] = None,\n step_keys_to_execute: Optional[Sequence[str]] = None,\n status: Optional[DagsterRunStatus] = None,\n tags: Optional[Mapping[str, str]] = None,\n root_run_id: Optional[str] = None,\n parent_run_id: Optional[str] = None,\n job_snapshot_id: Optional[str] = None,\n execution_plan_snapshot_id: Optional[str] = None,\n external_job_origin: Optional["ExternalJobOrigin"] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n has_repository_load_data: Optional[bool] = None,\n ):\n check.invariant(\n (root_run_id is not None and parent_run_id is not None)\n or (root_run_id is None and parent_run_id is None),\n "Must set both root_run_id and parent_run_id when creating a PipelineRun that "\n "belongs to a run group",\n )\n # a set which contains the names of the ops to execute\n resolved_op_selection = check.opt_nullable_set_param(\n resolved_op_selection, "resolved_op_selection", of_type=str\n )\n # a list of op queries provided by the user\n # possible to be None when resolved_op_selection is set by the user directly\n op_selection = check.opt_nullable_sequence_param(op_selection, "op_selection", of_type=str)\n check.opt_nullable_sequence_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)\n\n asset_selection = check.opt_nullable_set_param(\n asset_selection, "asset_selection", of_type=AssetKey\n )\n asset_check_selection = check.opt_nullable_set_param(\n asset_check_selection, "asset_check_selection", of_type=AssetCheckKey\n )\n\n # Placing this with the other imports causes a cyclic import\n # https://github.com/dagster-io/dagster/issues/3181\n from dagster._core.host_representation.origin import ExternalJobOrigin\n\n if status == DagsterRunStatus.QUEUED:\n check.inst_param(\n external_job_origin,\n "external_job_origin",\n ExternalJobOrigin,\n "external_job_origin is required for queued runs",\n )\n\n if run_id is None:\n run_id = make_new_run_id()\n\n return super(DagsterRun, cls).__new__(\n cls,\n job_name=check.str_param(job_name, "job_name"),\n run_id=check.str_param(run_id, "run_id"),\n run_config=check.opt_mapping_param(run_config, "run_config", key_type=str),\n op_selection=op_selection,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=check.opt_inst_param(\n status, "status", DagsterRunStatus, DagsterRunStatus.NOT_STARTED\n ),\n tags=check.opt_mapping_param(tags, "tags", key_type=str, value_type=str),\n root_run_id=check.opt_str_param(root_run_id, "root_run_id"),\n parent_run_id=check.opt_str_param(parent_run_id, "parent_run_id"),\n job_snapshot_id=check.opt_str_param(job_snapshot_id, "job_snapshot_id"),\n execution_plan_snapshot_id=check.opt_str_param(\n execution_plan_snapshot_id, "execution_plan_snapshot_id"\n ),\n external_job_origin=check.opt_inst_param(\n external_job_origin, "external_job_origin", ExternalJobOrigin\n ),\n job_code_origin=check.opt_inst_param(\n job_code_origin, "job_code_origin", JobPythonOrigin\n ),\n has_repository_load_data=check.opt_bool_param(\n has_repository_load_data, "has_repository_load_data", default=False\n ),\n )\n\n def with_status(self, status: DagsterRunStatus) -> Self:\n if status == DagsterRunStatus.QUEUED:\n # Placing this with the other imports causes a cyclic import\n # https://github.com/dagster-io/dagster/issues/3181\n from dagster._core.host_representation.origin import ExternalJobOrigin\n\n check.inst(\n self.external_job_origin,\n ExternalJobOrigin,\n "external_pipeline_origin is required for queued runs",\n )\n\n return self._replace(status=status)\n\n def with_job_origin(self, origin: "ExternalJobOrigin") -> Self:\n from dagster._core.host_representation.origin import ExternalJobOrigin\n\n check.inst_param(origin, "origin", ExternalJobOrigin)\n return self._replace(external_job_origin=origin)\n\n def with_tags(self, tags: Mapping[str, str]) -> Self:\n return self._replace(tags=tags)\n\n def get_root_run_id(self) -> Optional[str]:\n return self.tags.get(ROOT_RUN_ID_TAG)\n\n def get_parent_run_id(self) -> Optional[str]:\n return self.tags.get(PARENT_RUN_ID_TAG)\n\n def tags_for_storage(self) -> Mapping[str, str]:\n repository_tags = {}\n if self.external_job_origin:\n # tag the run with a label containing the repository name / location name, to allow for\n # per-repository filtering of runs from the Dagster UI.\n repository_tags[REPOSITORY_LABEL_TAG] = (\n self.external_job_origin.external_repository_origin.get_label()\n )\n\n if not self.tags:\n return repository_tags\n\n return {**repository_tags, **self.tags}\n\n @public\n @property\n def is_finished(self) -> bool:\n """bool: If this run has completely finished execution."""\n return self.status in FINISHED_STATUSES\n\n @public\n @property\n def is_success(self) -> bool:\n """bool: If this run has successfully finished executing."""\n return self.status == DagsterRunStatus.SUCCESS\n\n @public\n @property\n def is_failure(self) -> bool:\n """bool: If this run has failed."""\n return self.status == DagsterRunStatus.FAILURE\n\n @public\n @property\n def is_failure_or_canceled(self) -> bool:\n """bool: If this run has either failed or was canceled."""\n return self.status == DagsterRunStatus.FAILURE or self.status == DagsterRunStatus.CANCELED\n\n @public\n @property\n def is_resume_retry(self) -> bool:\n """bool: If this run was created from retrying another run from the point of failure."""\n return self.tags.get(RESUME_RETRY_TAG) == "true"\n\n @property\n def previous_run_id(self) -> Optional[str]:\n # Compat\n return self.parent_run_id\n\n @staticmethod\n def tags_for_schedule(schedule) -> Mapping[str, str]:\n return {SCHEDULE_NAME_TAG: schedule.name}\n\n @staticmethod\n def tags_for_sensor(sensor) -> Mapping[str, str]:\n return {SENSOR_NAME_TAG: sensor.name}\n\n @staticmethod\n def tags_for_backfill_id(backfill_id: str) -> Mapping[str, str]:\n return {BACKFILL_ID_TAG: backfill_id}
\n\n\nclass RunsFilterSerializer(NamedTupleSerializer["RunsFilter"]):\n def before_unpack(\n self,\n context,\n unpacked_dict: Dict[str, Any],\n ) -> Dict[str, Any]:\n # We store empty run ids as [] but only accept None\n if "run_ids" in unpacked_dict and unpacked_dict["run_ids"] == []:\n unpacked_dict["run_ids"] = None\n return unpacked_dict\n\n\n
[docs]@whitelist_for_serdes(\n serializer=RunsFilterSerializer,\n old_storage_names={"PipelineRunsFilter"},\n storage_field_names={"job_name": "pipeline_name"},\n)\nclass RunsFilter(\n NamedTuple(\n "_RunsFilter",\n [\n ("run_ids", Sequence[str]),\n ("job_name", Optional[str]),\n ("statuses", Sequence[DagsterRunStatus]),\n ("tags", Mapping[str, Union[str, Sequence[str]]]),\n ("snapshot_id", Optional[str]),\n ("updated_after", Optional[datetime]),\n ("updated_before", Optional[datetime]),\n ("created_after", Optional[datetime]),\n ("created_before", Optional[datetime]),\n ],\n )\n):\n """Defines a filter across job runs, for use when querying storage directly.\n\n Each field of the RunsFilter represents a logical AND with each other. For\n example, if you specify job_name and tags, then you will receive only runs\n with the specified job_name AND the specified tags. If left blank, then\n all values will be permitted for that field.\n\n Args:\n run_ids (Optional[List[str]]): A list of job run_id values.\n job_name (Optional[str]):\n Name of the job to query for. If blank, all job_names will be accepted.\n statuses (Optional[List[DagsterRunStatus]]):\n A list of run statuses to filter by. If blank, all run statuses will be allowed.\n tags (Optional[Dict[str, Union[str, List[str]]]]):\n A dictionary of run tags to query by. All tags specified here must be present for a given run to pass the filter.\n snapshot_id (Optional[str]): The ID of the job snapshot to query for. Intended for internal use.\n updated_after (Optional[DateTime]): Filter by runs that were last updated before this datetime.\n created_before (Optional[DateTime]): Filter by runs that were created before this datetime.\n\n """\n\n def __new__(\n cls,\n run_ids: Optional[Sequence[str]] = None,\n job_name: Optional[str] = None,\n statuses: Optional[Sequence[DagsterRunStatus]] = None,\n tags: Optional[Mapping[str, Union[str, Sequence[str]]]] = None,\n snapshot_id: Optional[str] = None,\n updated_after: Optional[datetime] = None,\n updated_before: Optional[datetime] = None,\n created_after: Optional[datetime] = None,\n created_before: Optional[datetime] = None,\n ):\n check.invariant(run_ids != [], "When filtering on run ids, a non-empty list must be used.")\n\n return super(RunsFilter, cls).__new__(\n cls,\n run_ids=check.opt_sequence_param(run_ids, "run_ids", of_type=str),\n job_name=check.opt_str_param(job_name, "job_name"),\n statuses=check.opt_sequence_param(statuses, "statuses", of_type=DagsterRunStatus),\n tags=check.opt_mapping_param(tags, "tags", key_type=str),\n snapshot_id=check.opt_str_param(snapshot_id, "snapshot_id"),\n updated_after=check.opt_inst_param(updated_after, "updated_after", datetime),\n updated_before=check.opt_inst_param(updated_before, "updated_before", datetime),\n created_after=check.opt_inst_param(created_after, "created_after", datetime),\n created_before=check.opt_inst_param(created_before, "created_before", datetime),\n )\n\n @staticmethod\n def for_schedule(schedule: "ExternalSchedule") -> "RunsFilter":\n return RunsFilter(tags=DagsterRun.tags_for_schedule(schedule))\n\n @staticmethod\n def for_sensor(sensor: "ExternalSensor") -> "RunsFilter":\n return RunsFilter(tags=DagsterRun.tags_for_sensor(sensor))\n\n @staticmethod\n def for_backfill(backfill_id: str) -> "RunsFilter":\n return RunsFilter(tags=DagsterRun.tags_for_backfill_id(backfill_id))
\n\n\nclass JobBucket(NamedTuple):\n job_names: List[str]\n bucket_limit: Optional[int]\n\n\nclass TagBucket(NamedTuple):\n tag_key: str\n tag_values: List[str]\n bucket_limit: Optional[int]\n\n\n
[docs]class RunRecord(\n NamedTuple(\n "_RunRecord",\n [\n ("storage_id", int),\n ("dagster_run", DagsterRun),\n ("create_timestamp", datetime),\n ("update_timestamp", datetime),\n ("start_time", Optional[float]),\n ("end_time", Optional[float]),\n ],\n )\n):\n """Internal representation of a run record, as stored in a\n :py:class:`~dagster._core.storage.runs.RunStorage`.\n\n Users should not invoke this class directly.\n """\n\n def __new__(\n cls,\n storage_id: int,\n dagster_run: DagsterRun,\n create_timestamp: datetime,\n update_timestamp: datetime,\n start_time: Optional[float] = None,\n end_time: Optional[float] = None,\n ):\n return super(RunRecord, cls).__new__(\n cls,\n storage_id=check.int_param(storage_id, "storage_id"),\n dagster_run=check.inst_param(dagster_run, "dagster_run", DagsterRun),\n create_timestamp=check.inst_param(create_timestamp, "create_timestamp", datetime),\n update_timestamp=check.inst_param(update_timestamp, "update_timestamp", datetime),\n # start_time and end_time fields will be populated once the run has started and ended, respectively, but will be None beforehand.\n start_time=check.opt_float_param(start_time, "start_time"),\n end_time=check.opt_float_param(end_time, "end_time"),\n )
\n\n\n@whitelist_for_serdes\nclass RunPartitionData(\n NamedTuple(\n "_RunPartitionData",\n [\n ("run_id", str),\n ("partition", str),\n ("status", DagsterRunStatus),\n ("start_time", Optional[float]),\n ("end_time", Optional[float]),\n ],\n )\n):\n def __new__(\n cls,\n run_id: str,\n partition: str,\n status: DagsterRunStatus,\n start_time: Optional[float],\n end_time: Optional[float],\n ):\n return super(RunPartitionData, cls).__new__(\n cls,\n run_id=check.str_param(run_id, "run_id"),\n partition=check.str_param(partition, "partition"),\n status=check.inst_param(status, "status", DagsterRunStatus),\n start_time=check.opt_inst(start_time, float),\n end_time=check.opt_inst(end_time, float),\n )\n\n\n###################################################################################################\n# GRAVEYARD\n#\n# -|-\n# |\n# _-'~~~~~`-_\n# .' '.\n# | R I P |\n# | |\n# | Execution |\n# | Selector |\n# | |\n# | |\n###################################################################################################\n\n\n@whitelist_for_serdes\nclass ExecutionSelector(\n NamedTuple("_ExecutionSelector", [("name", str), ("solid_subset", Optional[Sequence[str]])])\n):\n """Kept here to maintain loading of PipelineRuns from when it was still alive."""\n\n def __new__(cls, name: str, solid_subset: Optional[Sequence[str]] = None):\n return super(ExecutionSelector, cls).__new__(\n cls,\n name=check.str_param(name, "name"),\n solid_subset=(\n None\n if solid_subset is None\n else check.sequence_param(solid_subset, "solid_subset", of_type=str)\n ),\n )\n
", "current_page_name": "_modules/dagster/_core/storage/dagster_run", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.dagster_run"}, "event_log": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.base

\nimport base64\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Iterable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._core.assets import AssetDetails\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.event_api import EventHandlerFn, EventLogRecord, EventRecordsFilter\nfrom dagster._core.events import DagsterEventType\nfrom dagster._core.execution.stats import (\n    RunStepKeyStatsSnapshot,\n    build_run_stats_from_events,\n    build_run_step_stats_from_events,\n)\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.storage.asset_check_execution_record import AssetCheckExecutionRecord\nfrom dagster._core.storage.dagster_run import DagsterRunStatsSnapshot\nfrom dagster._core.storage.sql import AlembicVersion\nfrom dagster._seven import json\nfrom dagster._utils import PrintFn\nfrom dagster._utils.concurrency import ConcurrencyClaimStatus, ConcurrencyKeyInfo\n\nif TYPE_CHECKING:\n    from dagster._core.events.log import EventLogEntry\n    from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\n\nclass EventLogConnection(NamedTuple):\n    records: Sequence[EventLogRecord]\n    cursor: str\n    has_more: bool\n\n\nclass EventLogCursorType(Enum):\n    OFFSET = "OFFSET"\n    STORAGE_ID = "STORAGE_ID"\n\n\nclass EventLogCursor(NamedTuple):\n    """Representation of an event record cursor, keeping track of the log query state."""\n\n    cursor_type: EventLogCursorType\n    value: int\n\n    def is_offset_cursor(self) -> bool:\n        return self.cursor_type == EventLogCursorType.OFFSET\n\n    def is_id_cursor(self) -> bool:\n        return self.cursor_type == EventLogCursorType.STORAGE_ID\n\n    def offset(self) -> int:\n        check.invariant(self.cursor_type == EventLogCursorType.OFFSET)\n        return max(0, int(self.value))\n\n    def storage_id(self) -> int:\n        check.invariant(self.cursor_type == EventLogCursorType.STORAGE_ID)\n        return int(self.value)\n\n    def __str__(self) -> str:\n        return self.to_string()\n\n    def to_string(self) -> str:\n        raw = json.dumps({"type": self.cursor_type.value, "value": self.value})\n        return base64.b64encode(bytes(raw, encoding="utf-8")).decode("utf-8")\n\n    @staticmethod\n    def parse(cursor_str: str) -> "EventLogCursor":\n        raw = json.loads(base64.b64decode(cursor_str).decode("utf-8"))\n        return EventLogCursor(EventLogCursorType(raw["type"]), raw["value"])\n\n    @staticmethod\n    def from_offset(offset: int) -> "EventLogCursor":\n        return EventLogCursor(EventLogCursorType.OFFSET, offset)\n\n    @staticmethod\n    def from_storage_id(storage_id: int) -> "EventLogCursor":\n        return EventLogCursor(EventLogCursorType.STORAGE_ID, storage_id)\n\n\nclass AssetEntry(\n    NamedTuple(\n        "_AssetEntry",\n        [\n            ("asset_key", AssetKey),\n            ("last_materialization_record", Optional[EventLogRecord]),\n            ("last_run_id", Optional[str]),\n            ("asset_details", Optional[AssetDetails]),\n            ("cached_status", Optional["AssetStatusCacheValue"]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        asset_key: AssetKey,\n        last_materialization_record: Optional[EventLogRecord] = None,\n        last_run_id: Optional[str] = None,\n        asset_details: Optional[AssetDetails] = None,\n        cached_status: Optional["AssetStatusCacheValue"] = None,\n    ):\n        from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\n        return super(AssetEntry, cls).__new__(\n            cls,\n            asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n            last_materialization_record=check.opt_inst_param(\n                last_materialization_record, "last_materialization_record", EventLogRecord\n            ),\n            last_run_id=check.opt_str_param(last_run_id, "last_run_id"),\n            asset_details=check.opt_inst_param(asset_details, "asset_details", AssetDetails),\n            cached_status=check.opt_inst_param(\n                cached_status, "cached_status", AssetStatusCacheValue\n            ),\n        )\n\n    @property\n    def last_materialization(self) -> Optional["EventLogEntry"]:\n        if self.last_materialization_record is None:\n            return None\n        return self.last_materialization_record.event_log_entry\n\n    @property\n    def last_materialization_storage_id(self) -> Optional[int]:\n        if self.last_materialization_record is None:\n            return None\n        return self.last_materialization_record.storage_id\n\n\n
[docs]class AssetRecord(NamedTuple):\n """Internal representation of an asset record, as stored in a :py:class:`~dagster._core.storage.event_log.EventLogStorage`.\n\n Users should not invoke this class directly.\n """\n\n storage_id: int\n asset_entry: AssetEntry
\n\n\n
[docs]class EventLogStorage(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract base class for storing structured event logs from pipeline runs.\n\n Note that event log storages using SQL databases as backing stores should implement\n :py:class:`~dagster._core.storage.event_log.SqlEventLogStorage`.\n\n Users should not directly instantiate concrete subclasses of this class; they are instantiated\n by internal machinery when ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the\n ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of concrete subclasses of this class\n should be done by setting values in that file.\n """\n\n def get_logs_for_run(\n self,\n run_id: str,\n cursor: Optional[Union[str, int]] = None,\n of_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> Sequence["EventLogEntry"]:\n """Get all of the logs corresponding to a run.\n\n Args:\n run_id (str): The id of the run for which to fetch logs.\n cursor (Optional[Union[str, int]]): Cursor value to track paginated queries. Legacy\n support for integer offset cursors.\n of_type (Optional[DagsterEventType]): the dagster event type to filter the logs.\n limit (Optional[int]): Max number of records to return.\n """\n if isinstance(cursor, int):\n cursor = EventLogCursor.from_offset(cursor + 1).to_string()\n records = self.get_records_for_run(\n run_id, cursor, of_type, limit, ascending=ascending\n ).records\n return [record.event_log_entry for record in records]\n\n @abstractmethod\n def get_records_for_run(\n self,\n run_id: str,\n cursor: Optional[str] = None,\n of_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> EventLogConnection:\n """Get all of the event log records corresponding to a run.\n\n Args:\n run_id (str): The id of the run for which to fetch logs.\n cursor (Optional[str]): Cursor value to track paginated queries.\n of_type (Optional[DagsterEventType]): the dagster event type to filter the logs.\n limit (Optional[int]): Max number of records to return.\n """\n\n def get_stats_for_run(self, run_id: str) -> DagsterRunStatsSnapshot:\n """Get a summary of events that have ocurred in a run."""\n return build_run_stats_from_events(run_id, self.get_logs_for_run(run_id))\n\n def get_step_stats_for_run(\n self, run_id: str, step_keys: Optional[Sequence[str]] = None\n ) -> Sequence[RunStepKeyStatsSnapshot]:\n """Get per-step stats for a pipeline run."""\n logs = self.get_logs_for_run(run_id)\n if step_keys:\n logs = [\n event\n for event in logs\n if event.is_dagster_event and event.get_dagster_event().step_key in step_keys\n ]\n\n return build_run_step_stats_from_events(run_id, logs)\n\n @abstractmethod\n def store_event(self, event: "EventLogEntry") -> None:\n """Store an event corresponding to a pipeline run.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n\n @abstractmethod\n def delete_events(self, run_id: str) -> None:\n """Remove events for a given run id."""\n\n @abstractmethod\n def upgrade(self) -> None:\n """This method should perform any schema migrations necessary to bring an\n out-of-date instance of the storage up to date.\n """\n\n @abstractmethod\n def reindex_events(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the event_log tables."""\n\n @abstractmethod\n def reindex_assets(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the asset tables."""\n\n @abstractmethod\n def wipe(self) -> None:\n """Clear the log storage."""\n\n @abstractmethod\n def watch(self, run_id: str, cursor: Optional[str], callback: EventHandlerFn) -> None:\n """Call this method to start watching."""\n\n @abstractmethod\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n """Call this method to stop watching."""\n\n @property\n @abstractmethod\n def is_persistent(self) -> bool:\n """bool: Whether the storage is persistent."""\n\n def dispose(self) -> None:\n """Explicit lifecycle management."""\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n """Allows for optimizing database connection / use in the context of a long lived webserver process."""\n\n @abstractmethod\n def get_event_records(\n self,\n event_records_filter: EventRecordsFilter,\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Sequence[EventLogRecord]:\n pass\n\n def supports_event_consumer_queries(self) -> bool:\n return False\n\n def get_logs_for_all_runs_by_log_id(\n self,\n after_cursor: int = -1,\n dagster_event_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ) -> Mapping[int, "EventLogEntry"]:\n """Get event records across all runs. Only supported for non sharded sql storage."""\n raise NotImplementedError()\n\n def get_maximum_record_id(self) -> Optional[int]:\n """Get the current greatest record id in the event log. Only supported for non sharded sql storage."""\n raise NotImplementedError()\n\n @abstractmethod\n def can_cache_asset_status_data(self) -> bool:\n pass\n\n @abstractmethod\n def wipe_asset_cached_status(self, asset_key: AssetKey) -> None:\n pass\n\n @abstractmethod\n def get_asset_records(\n self, asset_keys: Optional[Sequence[AssetKey]] = None\n ) -> Sequence[AssetRecord]:\n pass\n\n @abstractmethod\n def has_asset_key(self, asset_key: AssetKey) -> bool:\n pass\n\n @abstractmethod\n def all_asset_keys(self) -> Sequence[AssetKey]:\n pass\n\n @abstractmethod\n def update_asset_cached_status_data(\n self, asset_key: AssetKey, cache_values: "AssetStatusCacheValue"\n ) -> None:\n pass\n\n def get_asset_keys(\n self,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[AssetKey]:\n # base implementation of get_asset_keys, using the existing `all_asset_keys` and doing the\n # filtering in-memory\n asset_keys = sorted(self.all_asset_keys(), key=str)\n if prefix:\n asset_keys = [\n asset_key for asset_key in asset_keys if asset_key.path[: len(prefix)] == prefix\n ]\n if cursor:\n cursor_asset = AssetKey.from_db_string(cursor)\n if cursor_asset and cursor_asset in asset_keys:\n idx = asset_keys.index(cursor_asset)\n asset_keys = asset_keys[idx + 1 :]\n if limit:\n asset_keys = asset_keys[:limit]\n return asset_keys\n\n @abstractmethod\n def get_latest_materialization_events(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional["EventLogEntry"]]:\n pass\n\n def supports_add_asset_event_tags(self) -> bool:\n return False\n\n def add_asset_event_tags(\n self,\n event_id: int,\n event_timestamp: float,\n asset_key: AssetKey,\n new_tags: Mapping[str, str],\n ) -> None:\n raise NotImplementedError()\n\n @abstractmethod\n def get_event_tags_for_asset(\n self,\n asset_key: AssetKey,\n filter_tags: Optional[Mapping[str, str]] = None,\n filter_event_id: Optional[int] = None,\n ) -> Sequence[Mapping[str, str]]:\n pass\n\n @abstractmethod\n def wipe_asset(self, asset_key: AssetKey) -> None:\n """Remove asset index history from event log for given asset_key."""\n\n @abstractmethod\n def get_materialized_partitions(\n self,\n asset_key: AssetKey,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Set[str]:\n pass\n\n @abstractmethod\n def get_materialization_count_by_partition(\n self, asset_keys: Sequence[AssetKey], after_cursor: Optional[int] = None\n ) -> Mapping[AssetKey, Mapping[str, int]]:\n pass\n\n @abstractmethod\n def get_latest_storage_id_by_partition(\n self, asset_key: AssetKey, event_type: DagsterEventType\n ) -> Mapping[str, int]:\n pass\n\n @abstractmethod\n def get_latest_tags_by_partition(\n self,\n asset_key: AssetKey,\n event_type: DagsterEventType,\n tag_keys: Sequence[str],\n asset_partitions: Optional[Sequence[str]] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Mapping[str, Mapping[str, str]]:\n pass\n\n @abstractmethod\n def get_latest_asset_partition_materialization_attempts_without_materializations(\n self, asset_key: AssetKey\n ) -> Mapping[str, Tuple[str, int]]:\n pass\n\n @abstractmethod\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n """Get the list of partition keys for a dynamic partitions definition."""\n raise NotImplementedError()\n\n @abstractmethod\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n """Check if a dynamic partition exists."""\n raise NotImplementedError()\n\n @abstractmethod\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n """Add a partition for the specified dynamic partitions definition."""\n raise NotImplementedError()\n\n @abstractmethod\n def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:\n """Delete a partition for the specified dynamic partitions definition."""\n raise NotImplementedError()\n\n def alembic_version(self) -> Optional[AlembicVersion]:\n return None\n\n @property\n def is_run_sharded(self) -> bool:\n """Indicates that the EventLogStoarge is sharded."""\n return False\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n """Indicates that the EventLogStorage supports global concurrency limits."""\n return False\n\n @abstractmethod\n def set_concurrency_slots(self, concurrency_key: str, num: int) -> None:\n """Allocate concurrency slots for the given concurrency key."""\n raise NotImplementedError()\n\n @abstractmethod\n def get_concurrency_keys(self) -> Set[str]:\n """Get the set of concurrency limited keys."""\n raise NotImplementedError()\n\n @abstractmethod\n def get_concurrency_info(self, concurrency_key: str) -> ConcurrencyKeyInfo:\n """Get concurrency info for key."""\n raise NotImplementedError()\n\n @abstractmethod\n def claim_concurrency_slot(\n self, concurrency_key: str, run_id: str, step_key: str, priority: Optional[int] = None\n ) -> ConcurrencyClaimStatus:\n """Claim concurrency slots for step."""\n raise NotImplementedError()\n\n @abstractmethod\n def check_concurrency_claim(\n self, concurrency_key: str, run_id: str, step_key: str\n ) -> ConcurrencyClaimStatus:\n """Claim concurrency slots for step."""\n raise NotImplementedError()\n\n @abstractmethod\n def get_concurrency_run_ids(self) -> Set[str]:\n """Get a list of run_ids that are occupying or waiting for a concurrency key slot."""\n raise NotImplementedError()\n\n @abstractmethod\n def free_concurrency_slots_for_run(self, run_id: str) -> None:\n """Frees concurrency slots for a given run."""\n raise NotImplementedError()\n\n @abstractmethod\n def free_concurrency_slot_for_step(self, run_id: str, step_key: str) -> None:\n """Frees concurrency slots for a given run/step."""\n raise NotImplementedError()\n\n @property\n def supports_asset_checks(self):\n return True\n\n @abstractmethod\n def get_asset_check_execution_history(\n self,\n check_key: AssetCheckKey,\n limit: int,\n cursor: Optional[int] = None,\n ) -> Sequence[AssetCheckExecutionRecord]:\n """Get executions for one asset check, sorted by recency."""\n pass\n\n @abstractmethod\n def get_latest_asset_check_execution_by_key(\n self, check_keys: Sequence[AssetCheckKey]\n ) -> Mapping[AssetCheckKey, AssetCheckExecutionRecord]:\n """Get the latest executions for a list of asset checks."""\n pass
\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.base"}, "sql_event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.sql_event_log

\nimport logging\nfrom abc import abstractmethod\nfrom collections import OrderedDict, defaultdict\nfrom datetime import datetime\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    ContextManager,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._core.assets import AssetDetails\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationPlanned,\n)\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization\nfrom dagster._core.errors import (\n    DagsterEventLogInvalidForRun,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.event_api import RunShardedEventsCursor\nfrom dagster._core.events import ASSET_CHECK_EVENTS, ASSET_EVENTS, MARKER_EVENTS, DagsterEventType\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.execution.stats import RunStepKeyStatsSnapshot, build_run_step_stats_from_events\nfrom dagster._core.storage.asset_check_execution_record import (\n    AssetCheckExecutionRecord,\n    AssetCheckExecutionRecordStatus,\n)\nfrom dagster._core.storage.sql import SqlAlchemyQuery, SqlAlchemyRow\nfrom dagster._core.storage.sqlalchemy_compat import (\n    db_case,\n    db_fetch_mappings,\n    db_select,\n    db_subquery,\n)\nfrom dagster._serdes import (\n    deserialize_value,\n    serialize_value,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._utils import (\n    PrintFn,\n    datetime_as_float,\n    utc_datetime_from_naive,\n    utc_datetime_from_timestamp,\n)\nfrom dagster._utils.concurrency import (\n    ConcurrencyClaimStatus,\n    ConcurrencyKeyInfo,\n    ConcurrencySlotStatus,\n)\n\nfrom ..dagster_run import DagsterRunStatsSnapshot\nfrom .base import (\n    AssetEntry,\n    AssetRecord,\n    EventLogConnection,\n    EventLogCursor,\n    EventLogRecord,\n    EventLogStorage,\n    EventRecordsFilter,\n)\nfrom .migration import ASSET_DATA_MIGRATIONS, ASSET_KEY_INDEX_COLS, EVENT_LOG_DATA_MIGRATIONS\nfrom .schema import (\n    AssetCheckExecutionsTable,\n    AssetEventTagsTable,\n    AssetKeyTable,\n    ConcurrencySlotsTable,\n    DynamicPartitionsTable,\n    PendingStepsTable,\n    SecondaryIndexMigrationTable,\n    SqlEventLogStorageTable,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\nMAX_CONCURRENCY_SLOTS = 1000\nMIN_ASSET_ROWS = 25\n\n# We are using third-party library objects for DB connections-- at this time, these libraries are\n# untyped. When/if we upgrade to typed variants, the `Any` here can be replaced or the alias as a\n# whole can be dropped.\nSqlDbConnection: TypeAlias = Any\n\n\n
[docs]class SqlEventLogStorage(EventLogStorage):\n """Base class for SQL backed event log storages.\n\n Distinguishes between run-based connections and index connections in order to support run-level\n sharding, while maintaining the ability to do cross-run queries\n """\n\n @abstractmethod\n def run_connection(self, run_id: Optional[str]) -> ContextManager[Connection]:\n """Context manager yielding a connection to access the event logs for a specific run.\n\n Args:\n run_id (Optional[str]): Enables those storages which shard based on run_id, e.g.,\n SqliteEventLogStorage, to connect appropriately.\n """\n\n @abstractmethod\n def index_connection(self) -> ContextManager[Connection]:\n """Context manager yielding a connection to access cross-run indexed tables."""\n\n @abstractmethod\n def upgrade(self) -> None:\n """This method should perform any schema migrations necessary to bring an\n out-of-date instance of the storage up to date.\n """\n\n @abstractmethod\n def has_table(self, table_name: str) -> bool:\n """This method checks if a table exists in the database."""\n\n def prepare_insert_event(self, event):\n """Helper method for preparing the event log SQL insertion statement. Abstracted away to\n have a single place for the logical table representation of the event, while having a way\n for SQL backends to implement different execution implementations for `store_event`. See\n the `dagster-postgres` implementation which overrides the generic SQL implementation of\n `store_event`.\n """\n dagster_event_type = None\n asset_key_str = None\n partition = None\n step_key = event.step_key\n if event.is_dagster_event:\n dagster_event_type = event.dagster_event.event_type_value\n step_key = event.dagster_event.step_key\n if event.dagster_event.asset_key:\n check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey)\n asset_key_str = event.dagster_event.asset_key.to_string()\n if event.dagster_event.partition:\n partition = event.dagster_event.partition\n\n # https://stackoverflow.com/a/54386260/324449\n return SqlEventLogStorageTable.insert().values(\n run_id=event.run_id,\n event=serialize_value(event),\n dagster_event_type=dagster_event_type,\n # Postgres requires a datetime that is in UTC but has no timezone info set\n # in order to be stored correctly\n timestamp=datetime.utcfromtimestamp(event.timestamp),\n step_key=step_key,\n asset_key=asset_key_str,\n partition=partition,\n )\n\n def has_asset_key_col(self, column_name: str) -> bool:\n with self.index_connection() as conn:\n column_names = [x.get("name") for x in db.inspect(conn).get_columns(AssetKeyTable.name)]\n return column_name in column_names\n\n def has_asset_key_index_cols(self) -> bool:\n return self.has_asset_key_col("last_materialization_timestamp")\n\n def store_asset_event(self, event: EventLogEntry, event_id: int):\n check.inst_param(event, "event", EventLogEntry)\n\n if not (event.dagster_event and event.dagster_event.asset_key):\n return\n\n # We switched to storing the entire event record of the last materialization instead of just\n # the AssetMaterialization object, so that we have access to metadata like timestamp,\n # pipeline, run_id, etc.\n #\n # This should make certain asset queries way more performant, without having to do extra\n # queries against the event log.\n #\n # This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`\n # to `last_materialization_event`, for clarity. For now, we should do some back-compat.\n #\n # https://github.com/dagster-io/dagster/issues/3945\n\n values = self._get_asset_entry_values(event, event_id, self.has_asset_key_index_cols())\n insert_statement = AssetKeyTable.insert().values(\n asset_key=event.dagster_event.asset_key.to_string(), **values\n )\n update_statement = (\n AssetKeyTable.update()\n .values(**values)\n .where(\n AssetKeyTable.c.asset_key == event.dagster_event.asset_key.to_string(),\n )\n )\n\n with self.index_connection() as conn:\n try:\n conn.execute(insert_statement)\n except db_exc.IntegrityError:\n conn.execute(update_statement)\n\n def _get_asset_entry_values(\n self, event: EventLogEntry, event_id: int, has_asset_key_index_cols: bool\n ) -> Dict[str, Any]:\n # The AssetKeyTable contains a `last_materialization_timestamp` column that is exclusively\n # used to determine if an asset exists (last materialization timestamp > wipe timestamp).\n # This column is used nowhere else, and as of AssetObservation/AssetMaterializationPlanned\n # event creation, we want to extend this functionality to ensure that assets with any event\n # (observation, materialization, or materialization planned) yielded with timestamp\n # > wipe timestamp display in the Dagster UI.\n\n # As of the following PRs, we update last_materialization_timestamp to store the timestamp\n # of the latest asset observation, materialization, or materialization_planned that has occurred.\n # https://github.com/dagster-io/dagster/pull/6885\n # https://github.com/dagster-io/dagster/pull/7319\n\n entry_values: Dict[str, Any] = {}\n dagster_event = check.not_none(event.dagster_event)\n if dagster_event.is_step_materialization:\n entry_values.update(\n {\n "last_materialization": serialize_value(\n EventLogRecord(\n storage_id=event_id,\n event_log_entry=event,\n )\n ),\n "last_run_id": event.run_id,\n }\n )\n if has_asset_key_index_cols:\n entry_values.update(\n {\n "last_materialization_timestamp": utc_datetime_from_timestamp(\n event.timestamp\n ),\n }\n )\n elif dagster_event.is_asset_materialization_planned:\n # The AssetKeyTable also contains a `last_run_id` column that is updated upon asset\n # materialization. This column was not being used until the below PR. This new change\n # writes to the column upon `ASSET_MATERIALIZATION_PLANNED` events to fetch the last\n # run id for a set of assets in one roundtrip call to event log storage.\n # https://github.com/dagster-io/dagster/pull/7319\n entry_values.update({"last_run_id": event.run_id})\n if has_asset_key_index_cols:\n entry_values.update(\n {\n "last_materialization_timestamp": utc_datetime_from_timestamp(\n event.timestamp\n ),\n }\n )\n elif dagster_event.is_asset_observation:\n if has_asset_key_index_cols:\n entry_values.update(\n {\n "last_materialization_timestamp": utc_datetime_from_timestamp(\n event.timestamp\n ),\n }\n )\n\n return entry_values\n\n def supports_add_asset_event_tags(self) -> bool:\n return self.has_table(AssetEventTagsTable.name)\n\n def add_asset_event_tags(\n self,\n event_id: int,\n event_timestamp: float,\n asset_key: AssetKey,\n new_tags: Mapping[str, str],\n ) -> None:\n check.int_param(event_id, "event_id")\n check.float_param(event_timestamp, "event_timestamp")\n check.inst_param(asset_key, "asset_key", AssetKey)\n check.mapping_param(new_tags, "new_tags", key_type=str, value_type=str)\n\n if not self.supports_add_asset_event_tags():\n raise DagsterInvalidInvocationError(\n "In order to add asset event tags, you must run `dagster instance migrate` to "\n "create the AssetEventTags table."\n )\n\n current_tags_list = self.get_event_tags_for_asset(asset_key, filter_event_id=event_id)\n\n asset_key_str = asset_key.to_string()\n\n if len(current_tags_list) == 0:\n current_tags: Mapping[str, str] = {}\n else:\n current_tags = current_tags_list[0]\n\n with self.index_connection() as conn:\n current_tags_set = set(current_tags.keys())\n new_tags_set = set(new_tags.keys())\n\n existing_tags = current_tags_set & new_tags_set\n added_tags = new_tags_set.difference(existing_tags)\n\n for tag in existing_tags:\n conn.execute(\n AssetEventTagsTable.update()\n .where(\n db.and_(\n AssetEventTagsTable.c.event_id == event_id,\n AssetEventTagsTable.c.asset_key == asset_key_str,\n AssetEventTagsTable.c.key == tag,\n )\n )\n .values(value=new_tags[tag])\n )\n\n if added_tags:\n conn.execute(\n AssetEventTagsTable.insert(),\n [\n dict(\n event_id=event_id,\n asset_key=asset_key_str,\n key=tag,\n value=new_tags[tag],\n # Postgres requires a datetime that is in UTC but has no timezone info\n # set in order to be stored correctly\n event_timestamp=datetime.utcfromtimestamp(event_timestamp),\n )\n for tag in added_tags\n ],\n )\n\n def store_asset_event_tags(self, event: EventLogEntry, event_id: int) -> None:\n check.inst_param(event, "event", EventLogEntry)\n check.int_param(event_id, "event_id")\n\n if event.dagster_event and event.dagster_event.asset_key:\n if event.dagster_event.is_step_materialization:\n tags = event.dagster_event.step_materialization_data.materialization.tags\n elif event.dagster_event.is_asset_observation:\n tags = event.dagster_event.asset_observation_data.asset_observation.tags\n else:\n tags = None\n\n if not tags or not self.has_table(AssetEventTagsTable.name):\n # If tags table does not exist, silently exit. This is to support OSS\n # users who have not yet run the migration to create the table.\n # On read, we will throw an error if the table does not exist.\n return\n\n check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey)\n asset_key_str = event.dagster_event.asset_key.to_string()\n\n with self.index_connection() as conn:\n conn.execute(\n AssetEventTagsTable.insert(),\n [\n dict(\n event_id=event_id,\n asset_key=asset_key_str,\n key=key,\n value=value,\n # Postgres requires a datetime that is in UTC but has no timezone info\n # set in order to be stored correctly\n event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n )\n for key, value in tags.items()\n ],\n )\n\n def store_event(self, event: EventLogEntry) -> None:\n """Store an event corresponding to a pipeline run.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n check.inst_param(event, "event", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event)\n run_id = event.run_id\n\n event_id = None\n\n with self.run_connection(run_id) as conn:\n result = conn.execute(insert_event_statement)\n event_id = result.inserted_primary_key[0]\n\n if (\n event.is_dagster_event\n and event.dagster_event_type in ASSET_EVENTS\n and event.dagster_event.asset_key # type: ignore\n ):\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n "Cannot store asset event tags for null event id."\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, event_id)\n\n def get_records_for_run(\n self,\n run_id,\n cursor: Optional[str] = None,\n of_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> EventLogConnection:\n """Get all of the logs corresponding to a run.\n\n Args:\n run_id (str): The id of the run for which to fetch logs.\n cursor (Optional[int]): Zero-indexed logs will be returned starting from cursor + 1,\n i.e., if cursor is -1, all logs will be returned. (default: -1)\n of_type (Optional[DagsterEventType]): the dagster event type to filter the logs.\n limit (Optional[int]): the maximum number of events to fetch\n """\n check.str_param(run_id, "run_id")\n check.opt_str_param(cursor, "cursor")\n\n check.invariant(not of_type or isinstance(of_type, (DagsterEventType, frozenset, set)))\n\n dagster_event_types = (\n {of_type}\n if isinstance(of_type, DagsterEventType)\n else check.opt_set_param(of_type, "dagster_event_type", of_type=DagsterEventType)\n )\n\n query = (\n db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])\n .where(SqlEventLogStorageTable.c.run_id == run_id)\n .order_by(\n SqlEventLogStorageTable.c.id.asc()\n if ascending\n else SqlEventLogStorageTable.c.id.desc()\n )\n )\n if dagster_event_types:\n query = query.where(\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [dagster_event_type.value for dagster_event_type in dagster_event_types]\n )\n )\n\n # adjust 0 based index cursor to SQL offset\n if cursor is not None:\n cursor_obj = EventLogCursor.parse(cursor)\n if cursor_obj.is_offset_cursor():\n query = query.offset(cursor_obj.offset())\n elif cursor_obj.is_id_cursor():\n if ascending:\n query = query.where(SqlEventLogStorageTable.c.id > cursor_obj.storage_id())\n else:\n query = query.where(SqlEventLogStorageTable.c.id < cursor_obj.storage_id())\n\n if limit:\n query = query.limit(limit)\n\n with self.run_connection(run_id) as conn:\n results = conn.execute(query).fetchall()\n\n last_record_id = None\n try:\n records = []\n for (\n record_id,\n json_str,\n ) in results:\n records.append(\n EventLogRecord(\n storage_id=record_id,\n event_log_entry=deserialize_value(json_str, EventLogEntry),\n )\n )\n last_record_id = record_id\n except (seven.JSONDecodeError, DeserializationError) as err:\n raise DagsterEventLogInvalidForRun(run_id=run_id) from err\n\n if last_record_id is not None:\n next_cursor = EventLogCursor.from_storage_id(last_record_id).to_string()\n elif cursor:\n # record fetch returned no new logs, return the same cursor\n next_cursor = cursor\n else:\n # rely on the fact that all storage ids will be positive integers\n next_cursor = EventLogCursor.from_storage_id(-1).to_string()\n\n return EventLogConnection(\n records=records,\n cursor=next_cursor,\n has_more=bool(limit and len(results) == limit),\n )\n\n def get_stats_for_run(self, run_id: str) -> DagsterRunStatsSnapshot:\n check.str_param(run_id, "run_id")\n\n query = (\n db_select(\n [\n SqlEventLogStorageTable.c.dagster_event_type,\n db.func.count().label("n_events_of_type"),\n db.func.max(SqlEventLogStorageTable.c.timestamp).label("last_event_timestamp"),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.run_id == run_id,\n SqlEventLogStorageTable.c.dagster_event_type != None, # noqa: E711\n )\n )\n .group_by("dagster_event_type")\n )\n\n with self.run_connection(run_id) as conn:\n results = conn.execute(query).fetchall()\n\n try:\n counts = {}\n times = {}\n for result in results:\n (dagster_event_type, n_events_of_type, last_event_timestamp) = result\n check.invariant(dagster_event_type is not None)\n counts[dagster_event_type] = n_events_of_type\n times[dagster_event_type] = last_event_timestamp\n\n enqueued_time = times.get(DagsterEventType.PIPELINE_ENQUEUED.value, None)\n launch_time = times.get(DagsterEventType.PIPELINE_STARTING.value, None)\n start_time = times.get(DagsterEventType.PIPELINE_START.value, None)\n end_time = times.get(\n DagsterEventType.PIPELINE_SUCCESS.value,\n times.get(\n DagsterEventType.PIPELINE_FAILURE.value,\n times.get(DagsterEventType.PIPELINE_CANCELED.value, None),\n ),\n )\n\n return DagsterRunStatsSnapshot(\n run_id=run_id,\n steps_succeeded=counts.get(DagsterEventType.STEP_SUCCESS.value, 0),\n steps_failed=counts.get(DagsterEventType.STEP_FAILURE.value, 0),\n materializations=counts.get(DagsterEventType.ASSET_MATERIALIZATION.value, 0),\n expectations=counts.get(DagsterEventType.STEP_EXPECTATION_RESULT.value, 0),\n enqueued_time=datetime_as_float(enqueued_time) if enqueued_time else None,\n launch_time=datetime_as_float(launch_time) if launch_time else None,\n start_time=datetime_as_float(start_time) if start_time else None,\n end_time=datetime_as_float(end_time) if end_time else None,\n )\n except (seven.JSONDecodeError, DeserializationError) as err:\n raise DagsterEventLogInvalidForRun(run_id=run_id) from err\n\n def get_step_stats_for_run(\n self, run_id: str, step_keys: Optional[Sequence[str]] = None\n ) -> Sequence[RunStepKeyStatsSnapshot]:\n check.str_param(run_id, "run_id")\n check.opt_list_param(step_keys, "step_keys", of_type=str)\n\n # Originally, this was two different queries:\n # 1) one query which aggregated top-level step stats by grouping by event type / step_key in\n # a single query, using pure SQL (e.g. start_time, end_time, status, attempt counts).\n # 2) one query which fetched all the raw events for a specific event type and then inspected\n # the deserialized event object to aggregate stats derived from sequences of events.\n # (e.g. marker events, materializations, expectations resuls, attempts timing, etc.)\n #\n # For simplicity, we now just do the second type of query and derive the stats in Python\n # from the raw events. This has the benefit of being easier to read and also the benefit of\n # being able to share code with the in-memory event log storage implementation. We may\n # choose to revisit this in the future, especially if we are able to do JSON-column queries\n # in SQL as a way of bypassing the serdes layer in all cases.\n raw_event_query = (\n db_select([SqlEventLogStorageTable.c.event])\n .where(SqlEventLogStorageTable.c.run_id == run_id)\n .where(SqlEventLogStorageTable.c.step_key != None) # noqa: E711\n .where(\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [\n DagsterEventType.STEP_START.value,\n DagsterEventType.STEP_SUCCESS.value,\n DagsterEventType.STEP_SKIPPED.value,\n DagsterEventType.STEP_FAILURE.value,\n DagsterEventType.STEP_RESTARTED.value,\n DagsterEventType.ASSET_MATERIALIZATION.value,\n DagsterEventType.STEP_EXPECTATION_RESULT.value,\n DagsterEventType.STEP_RESTARTED.value,\n DagsterEventType.STEP_UP_FOR_RETRY.value,\n ]\n + [marker_event.value for marker_event in MARKER_EVENTS]\n )\n )\n .order_by(SqlEventLogStorageTable.c.id.asc())\n )\n if step_keys:\n raw_event_query = raw_event_query.where(\n SqlEventLogStorageTable.c.step_key.in_(step_keys)\n )\n\n with self.run_connection(run_id) as conn:\n results = conn.execute(raw_event_query).fetchall()\n\n try:\n records = [deserialize_value(json_str, EventLogEntry) for (json_str,) in results]\n return build_run_step_stats_from_events(run_id, records)\n except (seven.JSONDecodeError, DeserializationError) as err:\n raise DagsterEventLogInvalidForRun(run_id=run_id) from err\n\n def _apply_migration(self, migration_name, migration_fn, print_fn, force):\n if self.has_secondary_index(migration_name):\n if not force:\n if print_fn:\n print_fn(f"Skipping already applied data migration: {migration_name}")\n return\n if print_fn:\n print_fn(f"Starting data migration: {migration_name}")\n migration_fn()(self, print_fn)\n self.enable_secondary_index(migration_name)\n if print_fn:\n print_fn(f"Finished data migration: {migration_name}")\n\n def reindex_events(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the event_log table."""\n for migration_name, migration_fn in EVENT_LOG_DATA_MIGRATIONS.items():\n self._apply_migration(migration_name, migration_fn, print_fn, force)\n\n def reindex_assets(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the asset_keys table."""\n for migration_name, migration_fn in ASSET_DATA_MIGRATIONS.items():\n self._apply_migration(migration_name, migration_fn, print_fn, force)\n\n def wipe(self) -> None:\n """Clears the event log storage."""\n # Should be overridden by SqliteEventLogStorage and other storages that shard based on\n # run_id\n\n # https://stackoverflow.com/a/54386260/324449\n with self.run_connection(run_id=None) as conn:\n conn.execute(SqlEventLogStorageTable.delete())\n conn.execute(AssetKeyTable.delete())\n\n if self.has_table("asset_event_tags"):\n conn.execute(AssetEventTagsTable.delete())\n\n if self.has_table("dynamic_partitions"):\n conn.execute(DynamicPartitionsTable.delete())\n\n if self.has_table("concurrency_slots"):\n conn.execute(ConcurrencySlotsTable.delete())\n\n if self.has_table("pending_steps"):\n conn.execute(PendingStepsTable.delete())\n\n if self.has_table("asset_check_executions"):\n conn.execute(AssetCheckExecutionsTable.delete())\n\n self._wipe_index()\n\n def _wipe_index(self):\n with self.index_connection() as conn:\n conn.execute(SqlEventLogStorageTable.delete())\n conn.execute(AssetKeyTable.delete())\n\n if self.has_table("asset_event_tags"):\n conn.execute(AssetEventTagsTable.delete())\n\n if self.has_table("dynamic_partitions"):\n conn.execute(DynamicPartitionsTable.delete())\n\n if self.has_table("concurrency_slots"):\n conn.execute(ConcurrencySlotsTable.delete())\n\n if self.has_table("pending_steps"):\n conn.execute(PendingStepsTable.delete())\n\n if self.has_table("asset_check_executions"):\n conn.execute(AssetCheckExecutionsTable.delete())\n\n def delete_events(self, run_id: str) -> None:\n with self.run_connection(run_id) as conn:\n self.delete_events_for_run(conn, run_id)\n with self.index_connection() as conn:\n self.delete_events_for_run(conn, run_id)\n self.free_concurrency_slots_for_run(run_id)\n\n def delete_events_for_run(self, conn: Connection, run_id: str) -> None:\n check.str_param(run_id, "run_id")\n conn.execute(\n SqlEventLogStorageTable.delete().where(SqlEventLogStorageTable.c.run_id == run_id)\n )\n\n @property\n def is_persistent(self) -> bool:\n return True\n\n def update_event_log_record(self, record_id: int, event: EventLogEntry) -> None:\n """Utility method for migration scripts to update SQL representation of event records."""\n check.int_param(record_id, "record_id")\n check.inst_param(event, "event", EventLogEntry)\n dagster_event_type = None\n asset_key_str = None\n if event.is_dagster_event:\n dagster_event_type = event.dagster_event.event_type_value # type: ignore\n if event.dagster_event.asset_key: # type: ignore\n check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey) # type: ignore\n asset_key_str = event.dagster_event.asset_key.to_string() # type: ignore\n\n with self.run_connection(run_id=event.run_id) as conn:\n conn.execute(\n SqlEventLogStorageTable.update()\n .where(SqlEventLogStorageTable.c.id == record_id)\n .values(\n event=serialize_value(event),\n dagster_event_type=dagster_event_type,\n timestamp=datetime.utcfromtimestamp(event.timestamp),\n step_key=event.step_key,\n asset_key=asset_key_str,\n )\n )\n\n def get_event_log_table_data(self, run_id: str, record_id: int) -> Optional[SqlAlchemyRow]:\n """Utility method to test representation of the record in the SQL table. Returns all of\n the columns stored in the event log storage (as opposed to the deserialized `EventLogEntry`).\n This allows checking that certain fields are extracted to support performant lookups (e.g.\n extracting `step_key` for fast filtering).\n """\n with self.run_connection(run_id=run_id) as conn:\n query = (\n db_select([SqlEventLogStorageTable])\n .where(SqlEventLogStorageTable.c.id == record_id)\n .order_by(SqlEventLogStorageTable.c.id.asc())\n )\n return conn.execute(query).fetchone()\n\n def has_secondary_index(self, name: str) -> bool:\n """This method uses a checkpoint migration table to see if summary data has been constructed\n in a secondary index table. Can be used to checkpoint event_log data migrations.\n """\n query = (\n db_select([1])\n .where(SecondaryIndexMigrationTable.c.name == name)\n .where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711\n .limit(1)\n )\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n return len(results) > 0\n\n def enable_secondary_index(self, name: str) -> None:\n """This method marks an event_log data migration as complete, to indicate that a summary\n data migration is complete.\n """\n query = SecondaryIndexMigrationTable.insert().values(\n name=name,\n migration_completed=datetime.now(),\n )\n with self.index_connection() as conn:\n try:\n conn.execute(query)\n except db_exc.IntegrityError:\n conn.execute(\n SecondaryIndexMigrationTable.update()\n .where(SecondaryIndexMigrationTable.c.name == name)\n .values(migration_completed=datetime.now())\n )\n\n def _apply_filter_to_query(\n self,\n query: SqlAlchemyQuery,\n event_records_filter: EventRecordsFilter,\n asset_details: Optional[AssetDetails] = None,\n apply_cursor_filters: bool = True,\n ) -> SqlAlchemyQuery:\n query = query.where(\n SqlEventLogStorageTable.c.dagster_event_type == event_records_filter.event_type.value\n )\n\n if event_records_filter.asset_key:\n query = query.where(\n SqlEventLogStorageTable.c.asset_key == event_records_filter.asset_key.to_string(),\n )\n\n if event_records_filter.asset_partitions:\n query = query.where(\n SqlEventLogStorageTable.c.partition.in_(event_records_filter.asset_partitions)\n )\n\n if asset_details and asset_details.last_wipe_timestamp:\n query = query.where(\n SqlEventLogStorageTable.c.timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n\n if apply_cursor_filters:\n # allow the run-sharded sqlite implementation to disable this cursor filtering so that\n # it can implement its own custom cursor logic, as cursor ids are not unique across run\n # shards\n if event_records_filter.before_cursor is not None:\n before_cursor_id = (\n event_records_filter.before_cursor.id\n if isinstance(event_records_filter.before_cursor, RunShardedEventsCursor)\n else event_records_filter.before_cursor\n )\n query = query.where(SqlEventLogStorageTable.c.id < before_cursor_id)\n\n if event_records_filter.after_cursor is not None:\n after_cursor_id = (\n event_records_filter.after_cursor.id\n if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)\n else event_records_filter.after_cursor\n )\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor_id)\n\n if event_records_filter.before_timestamp:\n query = query.where(\n SqlEventLogStorageTable.c.timestamp\n < datetime.utcfromtimestamp(event_records_filter.before_timestamp)\n )\n\n if event_records_filter.after_timestamp:\n query = query.where(\n SqlEventLogStorageTable.c.timestamp\n > datetime.utcfromtimestamp(event_records_filter.after_timestamp)\n )\n\n if event_records_filter.storage_ids:\n query = query.where(SqlEventLogStorageTable.c.id.in_(event_records_filter.storage_ids))\n\n if event_records_filter.tags and self.has_table(AssetEventTagsTable.name):\n # If we don't have the tags table, we'll filter the results after the query\n check.invariant(\n isinstance(event_records_filter.asset_key, AssetKey),\n "Asset key must be set in event records filter to filter by tags.",\n )\n if self.supports_intersect:\n intersections = [\n db_select([AssetEventTagsTable.c.event_id]).where(\n db.and_(\n AssetEventTagsTable.c.asset_key\n == event_records_filter.asset_key.to_string(), # type: ignore # (bad sig?)\n AssetEventTagsTable.c.key == key,\n (\n AssetEventTagsTable.c.value == value\n if isinstance(value, str)\n else AssetEventTagsTable.c.value.in_(value)\n ),\n )\n )\n for key, value in event_records_filter.tags.items()\n ]\n query = query.where(SqlEventLogStorageTable.c.id.in_(db.intersect(*intersections)))\n\n return query\n\n def _apply_tags_table_joins(\n self,\n table: db.Table,\n tags: Mapping[str, Union[str, Sequence[str]]],\n asset_key: Optional[AssetKey],\n ) -> db.Table:\n event_id_col = table.c.id if table == SqlEventLogStorageTable else table.c.event_id\n i = 0\n for key, value in tags.items():\n i += 1\n tags_table = db_subquery(\n db_select([AssetEventTagsTable]), f"asset_event_tags_subquery_{i}"\n )\n table = table.join(\n tags_table,\n db.and_(\n event_id_col == tags_table.c.event_id,\n not asset_key or tags_table.c.asset_key == asset_key.to_string(),\n tags_table.c.key == key,\n (\n tags_table.c.value == value\n if isinstance(value, str)\n else tags_table.c.value.in_(value)\n ),\n ),\n )\n return table\n\n def get_event_records(\n self,\n event_records_filter: EventRecordsFilter,\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Sequence[EventLogRecord]:\n """Returns a list of (record_id, record)."""\n check.inst_param(event_records_filter, "event_records_filter", EventRecordsFilter)\n check.opt_int_param(limit, "limit")\n check.bool_param(ascending, "ascending")\n\n if event_records_filter.asset_key:\n asset_details = next(iter(self._get_assets_details([event_records_filter.asset_key])))\n else:\n asset_details = None\n\n if (\n event_records_filter.tags\n and not self.supports_intersect\n and self.has_table(AssetEventTagsTable.name)\n ):\n table = self._apply_tags_table_joins(\n SqlEventLogStorageTable, event_records_filter.tags, event_records_filter.asset_key\n )\n else:\n table = SqlEventLogStorageTable\n\n query = db_select(\n [SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event]\n ).select_from(table)\n\n query = self._apply_filter_to_query(\n query=query,\n event_records_filter=event_records_filter,\n asset_details=asset_details,\n )\n if limit:\n query = query.limit(limit)\n\n if ascending:\n query = query.order_by(SqlEventLogStorageTable.c.id.asc())\n else:\n query = query.order_by(SqlEventLogStorageTable.c.id.desc())\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n event_records = []\n for row_id, json_str in results:\n try:\n event_record = deserialize_value(json_str, NamedTuple)\n if not isinstance(event_record, EventLogEntry):\n logging.warning(\n "Could not resolve event record as EventLogEntry for id `%s`.", row_id\n )\n continue\n\n if event_records_filter.tags and not self.has_table(AssetEventTagsTable.name):\n # If we can't filter tags via the tags table, filter the returned records\n if limit is not None:\n raise DagsterInvalidInvocationError(\n "Cannot filter events on tags with a limit, without the asset event "\n "tags table. To fix, run `dagster instance migrate`."\n )\n\n event_record_tags = event_record.tags\n if not event_record_tags or any(\n event_record_tags.get(k) != v for k, v in event_records_filter.tags.items()\n ):\n continue\n\n event_records.append(\n EventLogRecord(storage_id=row_id, event_log_entry=event_record)\n )\n except seven.JSONDecodeError:\n logging.warning("Could not parse event record id `%s`.", row_id)\n\n return event_records\n\n def supports_event_consumer_queries(self) -> bool:\n return True\n\n @property\n def supports_intersect(self) -> bool:\n return True\n\n def get_logs_for_all_runs_by_log_id(\n self,\n after_cursor: int = -1,\n dagster_event_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ) -> Mapping[int, EventLogEntry]:\n check.int_param(after_cursor, "after_cursor")\n check.invariant(\n after_cursor >= -1,\n f"Don't know what to do with negative cursor {after_cursor}",\n )\n dagster_event_types = (\n {dagster_event_type}\n if isinstance(dagster_event_type, DagsterEventType)\n else check.opt_set_param(\n dagster_event_type, "dagster_event_type", of_type=DagsterEventType\n )\n )\n\n query = (\n db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])\n .where(SqlEventLogStorageTable.c.id > after_cursor)\n .order_by(SqlEventLogStorageTable.c.id.asc())\n )\n\n if dagster_event_types:\n query = query.where(\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [dagster_event_type.value for dagster_event_type in dagster_event_types]\n )\n )\n\n if limit:\n query = query.limit(limit)\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n events = {}\n record_id = None\n try:\n for (\n record_id,\n json_str,\n ) in results:\n events[record_id] = deserialize_value(json_str, EventLogEntry)\n except (seven.JSONDecodeError, DeserializationError):\n logging.warning("Could not parse event record id `%s`.", record_id)\n\n return events\n\n def get_maximum_record_id(self) -> Optional[int]:\n with self.index_connection() as conn:\n result = conn.execute(db_select([db.func.max(SqlEventLogStorageTable.c.id)])).fetchone()\n return result[0] # type: ignore\n\n def _construct_asset_record_from_row(\n self,\n row,\n last_materialization_record: Optional[EventLogRecord],\n can_cache_asset_status_data: bool,\n ) -> AssetRecord:\n from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\n asset_key = AssetKey.from_db_string(row["asset_key"])\n if asset_key:\n return AssetRecord(\n storage_id=row["id"],\n asset_entry=AssetEntry(\n asset_key=asset_key,\n last_materialization_record=last_materialization_record,\n last_run_id=row["last_run_id"],\n asset_details=AssetDetails.from_db_string(row["asset_details"]),\n cached_status=(\n AssetStatusCacheValue.from_db_string(row["cached_status_data"])\n if can_cache_asset_status_data\n else None\n ),\n ),\n )\n else:\n check.failed("Row did not contain asset key.")\n\n def _get_latest_materialization_records(\n self, raw_asset_rows\n ) -> Mapping[AssetKey, Optional[EventLogRecord]]:\n # Given a list of raw asset rows, returns a mapping of asset key to latest asset materialization\n # event log entry. Fetches backcompat EventLogEntry records when the last_materialization\n # in the raw asset row is an AssetMaterialization.\n to_backcompat_fetch = set()\n results: Dict[AssetKey, Optional[EventLogRecord]] = {}\n for row in raw_asset_rows:\n asset_key = AssetKey.from_db_string(row["asset_key"])\n if not asset_key:\n continue\n event_or_materialization = (\n deserialize_value(row["last_materialization"], NamedTuple)\n if row["last_materialization"]\n else None\n )\n if isinstance(event_or_materialization, EventLogRecord):\n results[asset_key] = event_or_materialization\n else:\n to_backcompat_fetch.add(asset_key)\n\n latest_event_subquery = db_subquery(\n db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n db.func.max(SqlEventLogStorageTable.c.id).label("id"),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in to_backcompat_fetch]\n ),\n SqlEventLogStorageTable.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value,\n )\n )\n .group_by(SqlEventLogStorageTable.c.asset_key),\n "latest_event_subquery",\n )\n backcompat_query = db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n SqlEventLogStorageTable.c.id,\n SqlEventLogStorageTable.c.event,\n ]\n ).select_from(\n latest_event_subquery.join(\n SqlEventLogStorageTable,\n db.and_(\n SqlEventLogStorageTable.c.asset_key == latest_event_subquery.c.asset_key,\n SqlEventLogStorageTable.c.id == latest_event_subquery.c.id,\n ),\n )\n )\n with self.index_connection() as conn:\n event_rows = db_fetch_mappings(conn, backcompat_query)\n\n for row in event_rows:\n asset_key = AssetKey.from_db_string(cast(Optional[str], row["asset_key"]))\n if asset_key:\n results[asset_key] = EventLogRecord(\n storage_id=cast(int, row["id"]),\n event_log_entry=deserialize_value(cast(str, row["event"]), EventLogEntry),\n )\n return results\n\n def can_cache_asset_status_data(self) -> bool:\n return self.has_asset_key_col("cached_status_data")\n\n def wipe_asset_cached_status(self, asset_key: AssetKey) -> None:\n if self.can_cache_asset_status_data():\n check.inst_param(asset_key, "asset_key", AssetKey)\n with self.index_connection() as conn:\n conn.execute(\n AssetKeyTable.update()\n .values(dict(cached_status_data=None))\n .where(\n AssetKeyTable.c.asset_key == asset_key.to_string(),\n )\n )\n\n def get_asset_records(\n self, asset_keys: Optional[Sequence[AssetKey]] = None\n ) -> Sequence[AssetRecord]:\n rows = self._fetch_asset_rows(asset_keys=asset_keys)\n latest_materialization_records = self._get_latest_materialization_records(rows)\n can_cache_asset_status_data = self.can_cache_asset_status_data()\n\n asset_records: List[AssetRecord] = []\n for row in rows:\n asset_key = AssetKey.from_db_string(row["asset_key"])\n if asset_key:\n asset_records.append(\n self._construct_asset_record_from_row(\n row,\n latest_materialization_records.get(asset_key),\n can_cache_asset_status_data,\n )\n )\n\n return asset_records\n\n def has_asset_key(self, asset_key: AssetKey) -> bool:\n check.inst_param(asset_key, "asset_key", AssetKey)\n rows = self._fetch_asset_rows(asset_keys=[asset_key])\n return bool(rows)\n\n def all_asset_keys(self):\n rows = self._fetch_asset_rows()\n asset_keys = [\n AssetKey.from_db_string(row["asset_key"])\n for row in sorted(rows, key=lambda x: x["asset_key"])\n ]\n return [asset_key for asset_key in asset_keys if asset_key]\n\n def get_asset_keys(\n self,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[AssetKey]:\n rows = self._fetch_asset_rows(prefix=prefix, limit=limit, cursor=cursor)\n asset_keys = [\n AssetKey.from_db_string(row["asset_key"])\n for row in sorted(rows, key=lambda x: x["asset_key"])\n ]\n return [asset_key for asset_key in asset_keys if asset_key]\n\n def get_latest_materialization_events(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional[EventLogEntry]]:\n check.iterable_param(asset_keys, "asset_keys", AssetKey)\n rows = self._fetch_asset_rows(asset_keys=asset_keys)\n return {\n asset_key: event_log_record.event_log_entry if event_log_record is not None else None\n for asset_key, event_log_record in self._get_latest_materialization_records(\n rows\n ).items()\n }\n\n def _fetch_asset_rows(\n self,\n asset_keys=None,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[SqlAlchemyRow]:\n # fetches rows containing asset_key, last_materialization, and asset_details from the DB,\n # applying the filters specified in the arguments.\n #\n # Differs from _fetch_raw_asset_rows, in that it loops through to make sure enough rows are\n # returned to satisfy the limit.\n #\n # returns a list of rows where each row is a tuple of serialized asset_key, materialization,\n # and asset_details\n should_query = True\n current_cursor = cursor\n if self.has_secondary_index(ASSET_KEY_INDEX_COLS):\n # if we have migrated, we can limit using SQL\n fetch_limit = limit\n else:\n # if we haven't migrated, overfetch in case the first N results are wiped\n fetch_limit = max(limit, MIN_ASSET_ROWS) if limit else None\n result = []\n\n while should_query:\n rows, has_more, current_cursor = self._fetch_raw_asset_rows(\n asset_keys=asset_keys, prefix=prefix, limit=fetch_limit, cursor=current_cursor\n )\n result.extend(rows)\n should_query = bool(has_more) and bool(limit) and len(result) < cast(int, limit)\n\n is_partial_query = asset_keys is not None or bool(prefix) or bool(limit) or bool(cursor)\n if not is_partial_query and self._can_mark_assets_as_migrated(rows): # type: ignore\n self.enable_secondary_index(ASSET_KEY_INDEX_COLS)\n\n return result[:limit] if limit else result\n\n def _fetch_raw_asset_rows(\n self,\n asset_keys: Optional[Sequence[AssetKey]] = None,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor=None,\n ) -> Tuple[Iterable[SqlAlchemyRow], bool, Optional[str]]:\n # fetches rows containing asset_key, last_materialization, and asset_details from the DB,\n # applying the filters specified in the arguments. Does not guarantee that the number of\n # rows returned will match the limit specified. This helper function is used to fetch a\n # chunk of asset key rows, which may or may not be wiped.\n #\n # Returns a tuple of (rows, has_more, cursor), where each row is a tuple of serialized\n # asset_key, materialization, and asset_details\n # TODO update comment\n\n columns = [\n AssetKeyTable.c.id,\n AssetKeyTable.c.asset_key,\n AssetKeyTable.c.last_materialization,\n AssetKeyTable.c.last_run_id,\n AssetKeyTable.c.asset_details,\n ]\n if self.can_cache_asset_status_data():\n columns.extend([AssetKeyTable.c.cached_status_data])\n\n is_partial_query = asset_keys is not None or bool(prefix) or bool(limit) or bool(cursor)\n if self.has_asset_key_index_cols() and not is_partial_query:\n # if the schema has been migrated, fetch the last_materialization_timestamp to see if\n # we can lazily migrate the data table\n columns.append(AssetKeyTable.c.last_materialization_timestamp)\n columns.append(AssetKeyTable.c.wipe_timestamp)\n\n query = db_select(columns).order_by(AssetKeyTable.c.asset_key.asc())\n query = self._apply_asset_filter_to_query(query, asset_keys, prefix, limit, cursor)\n\n if self.has_secondary_index(ASSET_KEY_INDEX_COLS):\n query = query.where(\n db.or_(\n AssetKeyTable.c.wipe_timestamp.is_(None),\n AssetKeyTable.c.last_materialization_timestamp > AssetKeyTable.c.wipe_timestamp,\n )\n )\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n return rows, False, None\n\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n wiped_timestamps_by_asset_key: Dict[AssetKey, float] = {}\n row_by_asset_key: Dict[AssetKey, SqlAlchemyRow] = OrderedDict()\n\n for row in rows:\n asset_key = AssetKey.from_db_string(cast(str, row["asset_key"]))\n if not asset_key:\n continue\n asset_details = AssetDetails.from_db_string(row["asset_details"])\n if not asset_details or not asset_details.last_wipe_timestamp:\n row_by_asset_key[asset_key] = row\n continue\n materialization_or_event_or_record = (\n deserialize_value(cast(str, row["last_materialization"]), NamedTuple)\n if row["last_materialization"]\n else None\n )\n if isinstance(materialization_or_event_or_record, (EventLogRecord, EventLogEntry)):\n if isinstance(materialization_or_event_or_record, EventLogRecord):\n event_timestamp = materialization_or_event_or_record.event_log_entry.timestamp\n else:\n event_timestamp = materialization_or_event_or_record.timestamp\n\n if asset_details.last_wipe_timestamp > event_timestamp:\n # this asset has not been materialized since being wiped, skip\n continue\n else:\n # add the key\n row_by_asset_key[asset_key] = row\n else:\n row_by_asset_key[asset_key] = row\n wiped_timestamps_by_asset_key[asset_key] = asset_details.last_wipe_timestamp\n\n if wiped_timestamps_by_asset_key:\n materialization_times = self._fetch_backcompat_materialization_times(\n wiped_timestamps_by_asset_key.keys() # type: ignore\n )\n for asset_key, wiped_timestamp in wiped_timestamps_by_asset_key.items():\n materialization_time = materialization_times.get(asset_key)\n if not materialization_time or utc_datetime_from_naive(\n materialization_time\n ) < utc_datetime_from_timestamp(wiped_timestamp):\n # remove rows that have not been materialized since being wiped\n row_by_asset_key.pop(asset_key)\n\n has_more = limit and len(rows) == limit\n new_cursor = rows[-1]["id"] if rows else None\n\n return row_by_asset_key.values(), has_more, new_cursor # type: ignore\n\n def update_asset_cached_status_data(\n self, asset_key: AssetKey, cache_values: "AssetStatusCacheValue"\n ) -> None:\n if self.can_cache_asset_status_data():\n with self.index_connection() as conn:\n conn.execute(\n AssetKeyTable.update()\n .where(\n AssetKeyTable.c.asset_key == asset_key.to_string(),\n )\n .values(cached_status_data=serialize_value(cache_values))\n )\n\n def _fetch_backcompat_materialization_times(\n self, asset_keys: Sequence[AssetKey]\n ) -> Mapping[AssetKey, datetime]:\n # fetches the latest materialization timestamp for the given asset_keys. Uses the (slower)\n # raw event log table.\n backcompat_query = (\n db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n db.func.max(SqlEventLogStorageTable.c.timestamp).label("timestamp"),\n ]\n )\n .where(\n SqlEventLogStorageTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in asset_keys]\n )\n )\n .group_by(SqlEventLogStorageTable.c.asset_key)\n .order_by(db.func.max(SqlEventLogStorageTable.c.timestamp).asc())\n )\n with self.index_connection() as conn:\n backcompat_rows = db_fetch_mappings(conn, backcompat_query)\n return {AssetKey.from_db_string(row["asset_key"]): row["timestamp"] for row in backcompat_rows} # type: ignore\n\n def _can_mark_assets_as_migrated(self, rows):\n if not self.has_asset_key_index_cols():\n return False\n\n if self.has_secondary_index(ASSET_KEY_INDEX_COLS):\n # we have already migrated\n return False\n\n for row in rows:\n if not _get_from_row(row, "last_materialization_timestamp"):\n return False\n\n if _get_from_row(row, "asset_details") and not _get_from_row(row, "wipe_timestamp"):\n return False\n\n return True\n\n def _apply_asset_filter_to_query(\n self,\n query: SqlAlchemyQuery,\n asset_keys: Optional[Sequence[AssetKey]] = None,\n prefix=None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> SqlAlchemyQuery:\n if asset_keys is not None:\n query = query.where(\n AssetKeyTable.c.asset_key.in_([asset_key.to_string() for asset_key in asset_keys])\n )\n\n if prefix:\n prefix_str = seven.dumps(prefix)[:-1]\n query = query.where(AssetKeyTable.c.asset_key.startswith(prefix_str))\n\n if cursor:\n query = query.where(AssetKeyTable.c.asset_key > cursor)\n\n if limit:\n query = query.limit(limit)\n return query\n\n def _get_assets_details(\n self, asset_keys: Sequence[AssetKey]\n ) -> Sequence[Optional[AssetDetails]]:\n check.sequence_param(asset_keys, "asset_key", AssetKey)\n rows = None\n with self.index_connection() as conn:\n rows = db_fetch_mappings(\n conn,\n db_select([AssetKeyTable.c.asset_key, AssetKeyTable.c.asset_details]).where(\n AssetKeyTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in asset_keys]\n ),\n ),\n )\n\n asset_key_to_details = {\n cast(str, row["asset_key"]): (\n deserialize_value(cast(str, row["asset_details"]), AssetDetails)\n if row["asset_details"]\n else None\n )\n for row in rows\n }\n\n # returns a list of the corresponding asset_details to provided asset_keys\n return [\n asset_key_to_details.get(asset_key.to_string(), None) for asset_key in asset_keys\n ]\n\n def _add_assets_wipe_filter_to_query(\n self,\n query: SqlAlchemyQuery,\n assets_details: Sequence[Optional[AssetDetails]],\n asset_keys: Sequence[AssetKey],\n ) -> SqlAlchemyQuery:\n check.invariant(\n len(assets_details) == len(asset_keys),\n "asset_details and asset_keys must be the same length",\n )\n for i in range(len(assets_details)):\n asset_key, asset_details = asset_keys[i], assets_details[i]\n if asset_details and asset_details.last_wipe_timestamp:\n asset_key_in_row = SqlEventLogStorageTable.c.asset_key == asset_key.to_string()\n # If asset key is in row, keep the row if the timestamp > wipe timestamp, else remove the row.\n # If asset key is not in row, keep the row.\n query = query.where(\n db.or_(\n db.and_(\n asset_key_in_row,\n SqlEventLogStorageTable.c.timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp),\n ),\n db.not_(asset_key_in_row),\n )\n )\n\n return query\n\n def get_event_tags_for_asset(\n self,\n asset_key: AssetKey,\n filter_tags: Optional[Mapping[str, str]] = None,\n filter_event_id: Optional[int] = None,\n ) -> Sequence[Mapping[str, str]]:\n """Fetches asset event tags for the given asset key.\n\n If filter_tags is provided, searches for events containing all of the filter tags. Then,\n returns all tags for those events. This enables searching for multipartitioned asset\n partition tags with a fixed dimension value, e.g. all of the tags for events where\n "country" == "US".\n\n If filter_event_id is provided, fetches only tags applied to the given event.\n\n Returns a list of dicts, where each dict is a mapping of tag key to tag value for a\n single event.\n """\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n filter_tags = check.opt_mapping_param(\n filter_tags, "filter_tags", key_type=str, value_type=str\n )\n filter_event_id = check.opt_int_param(filter_event_id, "filter_event_id")\n\n if not self.has_table(AssetEventTagsTable.name):\n raise DagsterInvalidInvocationError(\n "In order to search for asset event tags, you must run "\n "`dagster instance migrate` to create the AssetEventTags table."\n )\n\n asset_details = self._get_assets_details([asset_key])[0]\n if not filter_tags:\n tags_query = db_select(\n [\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n AssetEventTagsTable.c.event_id,\n ]\n ).where(AssetEventTagsTable.c.asset_key == asset_key.to_string())\n if asset_details and asset_details.last_wipe_timestamp:\n tags_query = tags_query.where(\n AssetEventTagsTable.c.event_timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n elif self.supports_intersect:\n\n def get_tag_filter_query(tag_key, tag_value):\n filter_query = db_select([AssetEventTagsTable.c.event_id]).where(\n db.and_(\n AssetEventTagsTable.c.asset_key == asset_key.to_string(),\n AssetEventTagsTable.c.key == tag_key,\n AssetEventTagsTable.c.value == tag_value,\n )\n )\n if asset_details and asset_details.last_wipe_timestamp:\n filter_query = filter_query.where(\n AssetEventTagsTable.c.event_timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n return filter_query\n\n intersections = [\n get_tag_filter_query(tag_key, tag_value)\n for tag_key, tag_value in filter_tags.items()\n ]\n\n tags_query = db_select(\n [\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n AssetEventTagsTable.c.event_id,\n ]\n ).where(\n db.and_(\n AssetEventTagsTable.c.event_id.in_(db.intersect(*intersections)),\n )\n )\n else:\n table = self._apply_tags_table_joins(AssetEventTagsTable, filter_tags, asset_key)\n tags_query = db_select(\n [\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n AssetEventTagsTable.c.event_id,\n ]\n ).select_from(table)\n\n if asset_details and asset_details.last_wipe_timestamp:\n tags_query = tags_query.where(\n AssetEventTagsTable.c.event_timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n\n if filter_event_id is not None:\n tags_query = tags_query.where(AssetEventTagsTable.c.event_id == filter_event_id)\n\n with self.index_connection() as conn:\n results = conn.execute(tags_query).fetchall()\n\n tags_by_event_id: Dict[int, Dict[str, str]] = defaultdict(dict)\n for row in results:\n key, value, event_id = row\n tags_by_event_id[event_id][key] = value\n\n return list(tags_by_event_id.values())\n\n def _asset_materialization_from_json_column(\n self, json_str: str\n ) -> Optional[AssetMaterialization]:\n if not json_str:\n return None\n\n # We switched to storing the entire event record of the last materialization instead of just\n # the AssetMaterialization object, so that we have access to metadata like timestamp,\n # pipeline, run_id, etc.\n #\n # This should make certain asset queries way more performant, without having to do extra\n # queries against the event log.\n #\n # This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`\n # to `last_materialization_event`, for clarity. For now, we should do some back-compat.\n #\n # https://github.com/dagster-io/dagster/issues/3945\n\n event_or_materialization = deserialize_value(json_str, NamedTuple)\n if isinstance(event_or_materialization, AssetMaterialization):\n return event_or_materialization\n\n if (\n not isinstance(event_or_materialization, EventLogEntry)\n or not event_or_materialization.is_dagster_event\n or not event_or_materialization.dagster_event.asset_key # type: ignore\n ):\n return None\n\n return event_or_materialization.dagster_event.step_materialization_data.materialization # type: ignore\n\n def _get_asset_key_values_on_wipe(self) -> Mapping[str, Any]:\n wipe_timestamp = pendulum.now("UTC").timestamp()\n values = {\n "asset_details": serialize_value(AssetDetails(last_wipe_timestamp=wipe_timestamp)),\n "last_run_id": None,\n }\n if self.has_asset_key_index_cols():\n values.update(\n dict(\n wipe_timestamp=utc_datetime_from_timestamp(wipe_timestamp),\n )\n )\n if self.can_cache_asset_status_data():\n values.update(dict(cached_status_data=None))\n return values\n\n def wipe_asset(self, asset_key: AssetKey) -> None:\n check.inst_param(asset_key, "asset_key", AssetKey)\n wiped_values = self._get_asset_key_values_on_wipe()\n\n with self.index_connection() as conn:\n conn.execute(\n AssetKeyTable.update()\n .values(**wiped_values)\n .where(\n AssetKeyTable.c.asset_key == asset_key.to_string(),\n )\n )\n\n def get_materialized_partitions(\n self,\n asset_key: AssetKey,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Set[str]:\n query = (\n db_select(\n [\n SqlEventLogStorageTable.c.partition,\n db.func.max(SqlEventLogStorageTable.c.id),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),\n SqlEventLogStorageTable.c.partition != None, # noqa: E711\n SqlEventLogStorageTable.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value,\n )\n )\n .group_by(SqlEventLogStorageTable.c.partition)\n )\n\n assets_details = self._get_assets_details([asset_key])\n query = self._add_assets_wipe_filter_to_query(query, assets_details, [asset_key])\n\n if after_cursor:\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor)\n if before_cursor:\n query = query.where(SqlEventLogStorageTable.c.id < before_cursor)\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n return set([cast(str, row[0]) for row in results])\n\n def get_materialization_count_by_partition(\n self,\n asset_keys: Sequence[AssetKey],\n after_cursor: Optional[int] = None,\n before_cursor: Optional[int] = None,\n ) -> Mapping[AssetKey, Mapping[str, int]]:\n check.sequence_param(asset_keys, "asset_keys", AssetKey)\n\n query = (\n db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n SqlEventLogStorageTable.c.partition,\n db.func.count(SqlEventLogStorageTable.c.id),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in asset_keys]\n ),\n SqlEventLogStorageTable.c.partition != None, # noqa: E711\n SqlEventLogStorageTable.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value,\n )\n )\n .group_by(SqlEventLogStorageTable.c.asset_key, SqlEventLogStorageTable.c.partition)\n )\n\n assets_details = self._get_assets_details(asset_keys)\n query = self._add_assets_wipe_filter_to_query(query, assets_details, asset_keys)\n\n if after_cursor:\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor)\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n materialization_count_by_partition: Dict[AssetKey, Dict[str, int]] = {\n asset_key: {} for asset_key in asset_keys\n }\n for row in results:\n asset_key = AssetKey.from_db_string(cast(Optional[str], row[0]))\n if asset_key:\n materialization_count_by_partition[asset_key][cast(str, row[1])] = cast(int, row[2])\n\n return materialization_count_by_partition\n\n def _latest_event_ids_by_partition_subquery(\n self,\n asset_key: AssetKey,\n event_types: Sequence[DagsterEventType],\n asset_partitions: Optional[Sequence[str]] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ):\n """Subquery for locating the latest event ids by partition for a given asset key and set\n of event types.\n """\n query = db_select(\n [\n SqlEventLogStorageTable.c.dagster_event_type,\n SqlEventLogStorageTable.c.partition,\n db.func.max(SqlEventLogStorageTable.c.id).label("id"),\n ]\n ).where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),\n SqlEventLogStorageTable.c.partition != None, # noqa: E711\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [event_type.value for event_type in event_types]\n ),\n )\n )\n if asset_partitions is not None:\n query = query.where(SqlEventLogStorageTable.c.partition.in_(asset_partitions))\n if before_cursor is not None:\n query = query.where(SqlEventLogStorageTable.c.id < before_cursor)\n if after_cursor is not None:\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor)\n\n latest_event_ids_subquery = query.group_by(\n SqlEventLogStorageTable.c.dagster_event_type, SqlEventLogStorageTable.c.partition\n )\n\n assets_details = self._get_assets_details([asset_key])\n return db_subquery(\n self._add_assets_wipe_filter_to_query(\n latest_event_ids_subquery, assets_details, [asset_key]\n ),\n "latest_event_ids_by_partition_subquery",\n )\n\n def get_latest_storage_id_by_partition(\n self, asset_key: AssetKey, event_type: DagsterEventType\n ) -> Mapping[str, int]:\n """Fetch the latest materialzation storage id for each partition for a given asset key.\n\n Returns a mapping of partition to storage id.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n latest_event_ids_by_partition_subquery = self._latest_event_ids_by_partition_subquery(\n asset_key, [event_type]\n )\n latest_event_ids_by_partition = db_select(\n [\n latest_event_ids_by_partition_subquery.c.partition,\n latest_event_ids_by_partition_subquery.c.id,\n ]\n )\n\n with self.index_connection() as conn:\n rows = conn.execute(latest_event_ids_by_partition).fetchall()\n\n latest_materialization_storage_id_by_partition: Dict[str, int] = {}\n for row in rows:\n latest_materialization_storage_id_by_partition[cast(str, row[0])] = cast(int, row[1])\n return latest_materialization_storage_id_by_partition\n\n def get_latest_tags_by_partition(\n self,\n asset_key: AssetKey,\n event_type: DagsterEventType,\n tag_keys: Sequence[str],\n asset_partitions: Optional[Sequence[str]] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Mapping[str, Mapping[str, str]]:\n check.inst_param(asset_key, "asset_key", AssetKey)\n check.inst_param(event_type, "event_type", DagsterEventType)\n check.sequence_param(tag_keys, "tag_keys", of_type=str)\n check.opt_nullable_sequence_param(asset_partitions, "asset_partitions", of_type=str)\n check.opt_int_param(before_cursor, "before_cursor")\n check.opt_int_param(after_cursor, "after_cursor")\n\n latest_event_ids_subquery = self._latest_event_ids_by_partition_subquery(\n asset_key=asset_key,\n event_types=[event_type],\n asset_partitions=asset_partitions,\n before_cursor=before_cursor,\n after_cursor=after_cursor,\n )\n\n latest_tags_by_partition_query = (\n db_select(\n [\n latest_event_ids_subquery.c.partition,\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n ]\n )\n .select_from(\n latest_event_ids_subquery.join(\n AssetEventTagsTable,\n AssetEventTagsTable.c.event_id == latest_event_ids_subquery.c.id,\n )\n )\n .where(AssetEventTagsTable.c.key.in_(tag_keys))\n )\n\n latest_tags_by_partition: Dict[str, Dict[str, str]] = defaultdict(dict)\n with self.index_connection() as conn:\n rows = conn.execute(latest_tags_by_partition_query).fetchall()\n\n for row in rows:\n latest_tags_by_partition[cast(str, row[0])][cast(str, row[1])] = cast(str, row[2])\n\n # convert defaultdict to dict\n return dict(latest_tags_by_partition)\n\n def get_latest_asset_partition_materialization_attempts_without_materializations(\n self, asset_key: AssetKey\n ) -> Mapping[str, Tuple[str, int]]:\n """Fetch the latest materialzation and materialization planned events for each partition of the given asset.\n Return the partitions that have a materialization planned event but no matching (same run) materialization event.\n These materializations could be in progress, or they could have failed. A separate query checking the run status\n is required to know.\n\n Returns a mapping of partition to [run id, event id].\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n latest_event_ids_subquery = self._latest_event_ids_by_partition_subquery(\n asset_key,\n [\n DagsterEventType.ASSET_MATERIALIZATION,\n DagsterEventType.ASSET_MATERIALIZATION_PLANNED,\n ],\n )\n\n latest_events_subquery = db_subquery(\n db_select(\n [\n SqlEventLogStorageTable.c.dagster_event_type,\n SqlEventLogStorageTable.c.partition,\n SqlEventLogStorageTable.c.run_id,\n SqlEventLogStorageTable.c.id,\n ]\n ).select_from(\n latest_event_ids_subquery.join(\n SqlEventLogStorageTable,\n SqlEventLogStorageTable.c.id == latest_event_ids_subquery.c.id,\n ),\n ),\n "latest_events_subquery",\n )\n\n materialization_planned_events = db_select(\n [\n latest_events_subquery.c.dagster_event_type,\n latest_events_subquery.c.partition,\n latest_events_subquery.c.run_id,\n latest_events_subquery.c.id,\n ]\n ).where(\n latest_events_subquery.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION_PLANNED.value\n )\n\n materialization_events = db_select(\n [\n latest_events_subquery.c.dagster_event_type,\n latest_events_subquery.c.partition,\n latest_events_subquery.c.run_id,\n ]\n ).where(\n latest_events_subquery.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value\n )\n\n with self.index_connection() as conn:\n materialization_planned_rows = db_fetch_mappings(conn, materialization_planned_events)\n materialization_rows = db_fetch_mappings(conn, materialization_events)\n\n materialization_planned_rows_by_partition = {\n cast(str, row["partition"]): (cast(str, row["run_id"]), cast(int, row["id"]))\n for row in materialization_planned_rows\n }\n for row in materialization_rows:\n if (\n row["partition"] in materialization_planned_rows_by_partition\n and materialization_planned_rows_by_partition[cast(str, row["partition"])][0]\n == row["run_id"]\n ):\n materialization_planned_rows_by_partition.pop(cast(str, row["partition"]))\n\n return materialization_planned_rows_by_partition\n\n def _check_partitions_table(self) -> None:\n # Guards against cases where the user is not running the latest migration for\n # partitions storage. Should be updated when the partitions storage schema changes.\n if not self.has_table("dynamic_partitions"):\n raise DagsterInvalidInvocationError(\n "Using dynamic partitions definitions requires the dynamic partitions table, which"\n " currently does not exist. Add this table by running `dagster"\n " instance migrate`."\n )\n\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n """Get the list of partition keys for a partition definition."""\n self._check_partitions_table()\n columns = [\n DynamicPartitionsTable.c.partitions_def_name,\n DynamicPartitionsTable.c.partition,\n ]\n query = (\n db_select(columns)\n .where(DynamicPartitionsTable.c.partitions_def_name == partitions_def_name)\n .order_by(DynamicPartitionsTable.c.id)\n )\n with self.index_connection() as conn:\n rows = conn.execute(query).fetchall()\n\n return [cast(str, row[1]) for row in rows]\n\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n self._check_partitions_table()\n query = (\n db_select([DynamicPartitionsTable.c.partition])\n .where(\n db.and_(\n DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,\n DynamicPartitionsTable.c.partition == partition_key,\n )\n )\n .limit(1)\n )\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n return len(results) > 0\n\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n self._check_partitions_table()\n with self.index_connection() as conn:\n existing_rows = conn.execute(\n db_select([DynamicPartitionsTable.c.partition]).where(\n db.and_(\n DynamicPartitionsTable.c.partition.in_(partition_keys),\n DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,\n )\n )\n ).fetchall()\n existing_keys = set([row[0] for row in existing_rows])\n new_keys = [\n partition_key\n for partition_key in partition_keys\n if partition_key not in existing_keys\n ]\n\n if new_keys:\n conn.execute(\n DynamicPartitionsTable.insert(),\n [\n dict(partitions_def_name=partitions_def_name, partition=partition_key)\n for partition_key in new_keys\n ],\n )\n\n def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:\n self._check_partitions_table()\n with self.index_connection() as conn:\n conn.execute(\n DynamicPartitionsTable.delete().where(\n db.and_(\n DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,\n DynamicPartitionsTable.c.partition == partition_key,\n )\n )\n )\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n return self.has_table(ConcurrencySlotsTable.name)\n\n def set_concurrency_slots(self, concurrency_key: str, num: int) -> None:\n """Allocate a set of concurrency slots.\n\n Args:\n concurrency_key (str): The key to allocate the slots for.\n num (int): The number of slots to allocate.\n """\n if num > MAX_CONCURRENCY_SLOTS:\n raise DagsterInvalidInvocationError(\n f"Cannot have more than {MAX_CONCURRENCY_SLOTS} slots per concurrency key."\n )\n if num < 0:\n raise DagsterInvalidInvocationError("Cannot have a negative number of slots.")\n\n keys_to_assign = None\n with self.index_connection() as conn:\n count_row = conn.execute(\n db_select([db.func.count()])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n ).fetchone()\n existing = cast(int, count_row[0]) if count_row else 0\n\n if existing > num:\n # need to delete some slots, favoring ones where the slot is unallocated\n rows = conn.execute(\n db_select([ConcurrencySlotsTable.c.id])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n .order_by(\n db_case([(ConcurrencySlotsTable.c.run_id.is_(None), 1)], else_=0).desc(),\n ConcurrencySlotsTable.c.id.desc(),\n )\n .limit(existing - num)\n ).fetchall()\n\n if rows:\n # mark rows as deleted\n conn.execute(\n ConcurrencySlotsTable.update()\n .values(deleted=True)\n .where(ConcurrencySlotsTable.c.id.in_([row[0] for row in rows]))\n )\n\n # actually delete rows that are marked as deleted and are not claimed... the rest\n # will be deleted when the slots are released by the free_concurrency_slots\n conn.execute(\n ConcurrencySlotsTable.delete().where(\n db.and_(\n ConcurrencySlotsTable.c.deleted == True, # noqa: E712\n ConcurrencySlotsTable.c.run_id == None, # noqa: E711\n )\n )\n )\n elif num > existing:\n # need to add some slots\n rows = [\n {\n "concurrency_key": concurrency_key,\n "run_id": None,\n "step_key": None,\n "deleted": False,\n }\n for _ in range(existing, num)\n ]\n conn.execute(ConcurrencySlotsTable.insert().values(rows))\n keys_to_assign = [concurrency_key for _ in range(existing, num)]\n\n if keys_to_assign:\n # we've added some slots... if there are any pending steps, we can assign them now or\n # they will be unutilized until free_concurrency_slots is called\n self.assign_pending_steps(keys_to_assign)\n\n def has_unassigned_slots(self, concurrency_key: str) -> bool:\n with self.index_connection() as conn:\n pending_row = conn.execute(\n db_select([db.func.count()])\n .select_from(PendingStepsTable)\n .where(\n db.and_(\n PendingStepsTable.c.concurrency_key == concurrency_key,\n PendingStepsTable.c.assigned_timestamp != None, # noqa: E711\n )\n )\n ).fetchone()\n slots = conn.execute(\n db_select([db.func.count()])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n ).fetchone()\n pending_count = cast(int, pending_row[0]) if pending_row else 0\n slots_count = cast(int, slots[0]) if slots else 0\n return slots_count > pending_count\n\n def check_concurrency_claim(\n self, concurrency_key: str, run_id: str, step_key: str\n ) -> ConcurrencyClaimStatus:\n with self.index_connection() as conn:\n pending_row = conn.execute(\n db_select(\n [\n PendingStepsTable.c.assigned_timestamp,\n PendingStepsTable.c.priority,\n PendingStepsTable.c.create_timestamp,\n ]\n ).where(\n db.and_(\n PendingStepsTable.c.run_id == run_id,\n PendingStepsTable.c.step_key == step_key,\n PendingStepsTable.c.concurrency_key == concurrency_key,\n )\n )\n ).fetchone()\n\n if not pending_row:\n # no pending step pending_row exists, the slot is blocked and the enqueued timestamp is None\n return ConcurrencyClaimStatus(\n concurrency_key=concurrency_key,\n slot_status=ConcurrencySlotStatus.BLOCKED,\n priority=None,\n assigned_timestamp=None,\n enqueued_timestamp=None,\n )\n\n priority = cast(int, pending_row[1]) if pending_row[1] else None\n assigned_timestamp = cast(datetime, pending_row[0]) if pending_row[0] else None\n create_timestamp = cast(datetime, pending_row[2]) if pending_row[2] else None\n if assigned_timestamp is None:\n return ConcurrencyClaimStatus(\n concurrency_key=concurrency_key,\n slot_status=ConcurrencySlotStatus.BLOCKED,\n priority=priority,\n assigned_timestamp=None,\n enqueued_timestamp=create_timestamp,\n )\n\n # pending step is assigned, check to see if it's been claimed\n slot_row = conn.execute(\n db_select([db.func.count()]).where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.run_id == run_id,\n ConcurrencySlotsTable.c.step_key == step_key,\n )\n )\n ).fetchone()\n\n return ConcurrencyClaimStatus(\n concurrency_key=concurrency_key,\n slot_status=(\n ConcurrencySlotStatus.CLAIMED\n if slot_row and slot_row[0]\n else ConcurrencySlotStatus.BLOCKED\n ),\n priority=priority,\n assigned_timestamp=assigned_timestamp,\n enqueued_timestamp=create_timestamp,\n )\n\n def can_claim_from_pending(self, concurrency_key: str, run_id: str, step_key: str):\n with self.index_connection() as conn:\n row = conn.execute(\n db_select([PendingStepsTable.c.assigned_timestamp]).where(\n db.and_(\n PendingStepsTable.c.run_id == run_id,\n PendingStepsTable.c.step_key == step_key,\n PendingStepsTable.c.concurrency_key == concurrency_key,\n )\n )\n ).fetchone()\n return row and row[0] is not None\n\n def has_pending_step(self, concurrency_key: str, run_id: str, step_key: str):\n with self.index_connection() as conn:\n row = conn.execute(\n db_select([db.func.count()])\n .select_from(PendingStepsTable)\n .where(\n db.and_(\n PendingStepsTable.c.concurrency_key == concurrency_key,\n PendingStepsTable.c.run_id == run_id,\n PendingStepsTable.c.step_key == step_key,\n )\n )\n ).fetchone()\n return row and cast(int, row[0]) > 0\n\n def assign_pending_steps(self, concurrency_keys: Sequence[str]):\n if not concurrency_keys:\n return\n\n with self.index_connection() as conn:\n for key in concurrency_keys:\n row = conn.execute(\n db_select([PendingStepsTable.c.id])\n .where(\n db.and_(\n PendingStepsTable.c.concurrency_key == key,\n PendingStepsTable.c.assigned_timestamp == None, # noqa: E711\n )\n )\n .order_by(\n PendingStepsTable.c.priority.desc(),\n PendingStepsTable.c.create_timestamp.asc(),\n )\n .limit(1)\n ).fetchone()\n if row:\n conn.execute(\n PendingStepsTable.update()\n .where(PendingStepsTable.c.id == row[0])\n .values(assigned_timestamp=db.func.now())\n )\n\n def add_pending_step(\n self,\n concurrency_key: str,\n run_id: str,\n step_key: str,\n priority: Optional[int] = None,\n should_assign: bool = False,\n ):\n with self.index_connection() as conn:\n try:\n conn.execute(\n PendingStepsTable.insert().values(\n [\n dict(\n run_id=run_id,\n step_key=step_key,\n concurrency_key=concurrency_key,\n priority=priority or 0,\n assigned_timestamp=db.func.now() if should_assign else None,\n )\n ]\n )\n )\n except db_exc.IntegrityError:\n # do nothing\n pass\n\n def _remove_pending_steps(self, run_id: str, step_key: Optional[str] = None):\n query = PendingStepsTable.delete().where(PendingStepsTable.c.run_id == run_id)\n if step_key:\n query = query.where(PendingStepsTable.c.step_key == step_key)\n with self.index_connection() as conn:\n conn.execute(query)\n\n def claim_concurrency_slot(\n self, concurrency_key: str, run_id: str, step_key: str, priority: Optional[int] = None\n ) -> ConcurrencyClaimStatus:\n """Claim concurrency slot for step.\n\n Args:\n concurrency_keys (str): The concurrency key to claim.\n run_id (str): The run id to claim for.\n step_key (str): The step key to claim for.\n """\n # first, register the step by adding to pending queue\n if not self.has_pending_step(\n concurrency_key=concurrency_key, run_id=run_id, step_key=step_key\n ):\n has_unassigned_slots = self.has_unassigned_slots(concurrency_key)\n self.add_pending_step(\n concurrency_key=concurrency_key,\n run_id=run_id,\n step_key=step_key,\n priority=priority,\n should_assign=has_unassigned_slots,\n )\n\n # if the step is not assigned (i.e. has not been popped from queue), block the claim\n claim_status = self.check_concurrency_claim(\n concurrency_key=concurrency_key, run_id=run_id, step_key=step_key\n )\n if claim_status.is_claimed or not claim_status.is_assigned:\n return claim_status\n\n # attempt to claim a concurrency slot... this should generally work because we only assign\n # based on the number of unclaimed slots, but this should act as a safeguard, using the slot\n # rows as a semaphore\n slot_status = self._claim_concurrency_slot(\n concurrency_key=concurrency_key, run_id=run_id, step_key=step_key\n )\n return claim_status.with_slot_status(slot_status)\n\n def _claim_concurrency_slot(\n self, concurrency_key: str, run_id: str, step_key: str\n ) -> ConcurrencySlotStatus:\n """Claim a concurrency slot for the step. Helper method that is called for steps that are\n popped off the priority queue.\n\n Args:\n concurrency_key (str): The concurrency key to claim.\n run_id (str): The run id to claim a slot for.\n step_key (str): The step key to claim a slot for.\n """\n with self.index_connection() as conn:\n result = conn.execute(\n db_select([ConcurrencySlotsTable.c.id])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.step_key == None, # noqa: E711\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n .with_for_update(skip_locked=True)\n .limit(1)\n ).fetchone()\n if not result or not result[0]:\n return ConcurrencySlotStatus.BLOCKED\n if not conn.execute(\n ConcurrencySlotsTable.update()\n .values(run_id=run_id, step_key=step_key)\n .where(ConcurrencySlotsTable.c.id == result[0])\n ).rowcount:\n return ConcurrencySlotStatus.BLOCKED\n\n return ConcurrencySlotStatus.CLAIMED\n\n def get_concurrency_keys(self) -> Set[str]:\n """Get the set of concurrency limited keys."""\n with self.index_connection() as conn:\n rows = conn.execute(\n db_select([ConcurrencySlotsTable.c.concurrency_key])\n .select_from(ConcurrencySlotsTable)\n .where(ConcurrencySlotsTable.c.deleted == False) # noqa: E712\n .distinct()\n ).fetchall()\n return {cast(str, row[0]) for row in rows}\n\n def get_concurrency_info(self, concurrency_key: str) -> ConcurrencyKeyInfo:\n """Get the list of concurrency slots for a given concurrency key.\n\n Args:\n concurrency_key (str): The concurrency key to get the slots for.\n\n Returns:\n List[Tuple[str, int]]: A list of tuples of run_id and the number of slots it is\n occupying for the given concurrency key.\n """\n with self.index_connection() as conn:\n slot_query = (\n db_select(\n [\n ConcurrencySlotsTable.c.run_id,\n ConcurrencySlotsTable.c.deleted,\n db.func.count().label("count"),\n ]\n )\n .select_from(ConcurrencySlotsTable)\n .where(ConcurrencySlotsTable.c.concurrency_key == concurrency_key)\n .group_by(ConcurrencySlotsTable.c.run_id, ConcurrencySlotsTable.c.deleted)\n )\n slot_rows = db_fetch_mappings(conn, slot_query)\n pending_query = (\n db_select(\n [\n PendingStepsTable.c.run_id,\n db_case(\n [(PendingStepsTable.c.assigned_timestamp.is_(None), False)],\n else_=True,\n ).label("is_assigned"),\n db.func.count().label("count"),\n ]\n )\n .select_from(PendingStepsTable)\n .where(PendingStepsTable.c.concurrency_key == concurrency_key)\n .group_by(PendingStepsTable.c.run_id, "is_assigned")\n )\n pending_rows = db_fetch_mappings(conn, pending_query)\n\n return ConcurrencyKeyInfo(\n concurrency_key=concurrency_key,\n slot_count=sum(\n [\n cast(int, slot_row["count"])\n for slot_row in slot_rows\n if not slot_row["deleted"]\n ]\n ),\n active_slot_count=sum(\n [cast(int, slot_row["count"]) for slot_row in slot_rows if slot_row["run_id"]]\n ),\n active_run_ids={\n cast(str, slot_row["run_id"]) for slot_row in slot_rows if slot_row["run_id"]\n },\n pending_step_count=sum(\n [cast(int, row["count"]) for row in pending_rows if not row["is_assigned"]]\n ),\n pending_run_ids={\n cast(str, row["run_id"]) for row in pending_rows if not row["is_assigned"]\n },\n assigned_step_count=sum(\n [cast(int, row["count"]) for row in pending_rows if row["is_assigned"]]\n ),\n assigned_run_ids={\n cast(str, row["run_id"]) for row in pending_rows if row["is_assigned"]\n },\n )\n\n def get_concurrency_run_ids(self) -> Set[str]:\n with self.index_connection() as conn:\n rows = conn.execute(db_select([PendingStepsTable.c.run_id]).distinct()).fetchall()\n return set([cast(str, row[0]) for row in rows])\n\n def free_concurrency_slots_for_run(self, run_id: str) -> None:\n freed_concurrency_keys = self._free_concurrency_slots(run_id=run_id)\n self._remove_pending_steps(run_id=run_id)\n if freed_concurrency_keys:\n # assign any pending steps that can now claim a slot\n self.assign_pending_steps(freed_concurrency_keys)\n\n def free_concurrency_slot_for_step(self, run_id: str, step_key: str) -> None:\n freed_concurrency_keys = self._free_concurrency_slots(run_id=run_id, step_key=step_key)\n self._remove_pending_steps(run_id=run_id, step_key=step_key)\n if freed_concurrency_keys:\n # assign any pending steps that can now claim a slot\n self.assign_pending_steps(freed_concurrency_keys)\n\n def _free_concurrency_slots(self, run_id: str, step_key: Optional[str] = None) -> Sequence[str]:\n """Frees concurrency slots for a given run/step.\n\n Args:\n run_id (str): The run id to free the slots for.\n step_key (Optional[str]): The step key to free the slots for. If not provided, all the\n slots for all the steps of the run will be freed.\n """\n with self.index_connection() as conn:\n # first delete any rows that apply and are marked as deleted. This happens when the\n # configured number of slots has been reduced, and some of the pruned slots included\n # ones that were already allocated to the run/step\n delete_query = ConcurrencySlotsTable.delete().where(\n db.and_(\n ConcurrencySlotsTable.c.run_id == run_id,\n ConcurrencySlotsTable.c.deleted == True, # noqa: E712\n )\n )\n if step_key:\n delete_query = delete_query.where(ConcurrencySlotsTable.c.step_key == step_key)\n conn.execute(delete_query)\n\n # next, fetch the slots to free up, while grabbing the concurrency keys so that we can\n # allocate any pending steps from the queue for the freed slots, if necessary\n select_query = (\n db_select([ConcurrencySlotsTable.c.id, ConcurrencySlotsTable.c.concurrency_key])\n .select_from(ConcurrencySlotsTable)\n .where(ConcurrencySlotsTable.c.run_id == run_id)\n .with_for_update(skip_locked=True)\n )\n if step_key:\n select_query = select_query.where(ConcurrencySlotsTable.c.step_key == step_key)\n rows = conn.execute(select_query).fetchall()\n if not rows:\n return []\n\n # now, actually free the slots\n conn.execute(\n ConcurrencySlotsTable.update()\n .values(run_id=None, step_key=None)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.id.in_([row[0] for row in rows]),\n )\n )\n )\n\n # return the concurrency keys for the freed slots\n return [cast(str, row[1]) for row in rows]\n\n def store_asset_check_event(self, event: EventLogEntry, event_id: Optional[int]) -> None:\n check.inst_param(event, "event", EventLogEntry)\n check.opt_int_param(event_id, "event_id")\n\n check.invariant(\n self.supports_asset_checks,\n "Asset checks require a database schema migration. Run `dagster instance migrate`.",\n )\n\n if event.dagster_event_type == DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED:\n self._store_asset_check_evaluation_planned(event, event_id)\n if event.dagster_event_type == DagsterEventType.ASSET_CHECK_EVALUATION:\n if event.run_id == "" or event.run_id is None:\n self._store_runless_asset_check_evaluation(event, event_id)\n else:\n self._update_asset_check_evaluation(event, event_id)\n\n def _store_asset_check_evaluation_planned(\n self, event: EventLogEntry, event_id: Optional[int]\n ) -> None:\n planned = cast(\n AssetCheckEvaluationPlanned, check.not_none(event.dagster_event).event_specific_data\n )\n with self.index_connection() as conn:\n conn.execute(\n AssetCheckExecutionsTable.insert().values(\n asset_key=planned.asset_key.to_string(),\n check_name=planned.check_name,\n run_id=event.run_id,\n execution_status=AssetCheckExecutionRecordStatus.PLANNED.value,\n evaluation_event=serialize_value(event),\n evaluation_event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n )\n )\n\n def _store_runless_asset_check_evaluation(\n self, event: EventLogEntry, event_id: Optional[int]\n ) -> None:\n evaluation = cast(\n AssetCheckEvaluation, check.not_none(event.dagster_event).event_specific_data\n )\n with self.index_connection() as conn:\n conn.execute(\n AssetCheckExecutionsTable.insert().values(\n asset_key=evaluation.asset_key.to_string(),\n check_name=evaluation.check_name,\n run_id=event.run_id,\n execution_status=(\n AssetCheckExecutionRecordStatus.SUCCEEDED.value\n if evaluation.passed\n else AssetCheckExecutionRecordStatus.FAILED.value\n ),\n evaluation_event=serialize_value(event),\n evaluation_event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n evaluation_event_storage_id=event_id,\n materialization_event_storage_id=(\n evaluation.target_materialization_data.storage_id\n if evaluation.target_materialization_data\n else None\n ),\n )\n )\n\n def _update_asset_check_evaluation(self, event: EventLogEntry, event_id: Optional[int]) -> None:\n evaluation = cast(\n AssetCheckEvaluation, check.not_none(event.dagster_event).event_specific_data\n )\n with self.index_connection() as conn:\n rows_updated = conn.execute(\n AssetCheckExecutionsTable.update()\n .where(\n # (asset_key, check_name, run_id) uniquely identifies the row created for the planned event\n db.and_(\n AssetCheckExecutionsTable.c.asset_key == evaluation.asset_key.to_string(),\n AssetCheckExecutionsTable.c.check_name == evaluation.check_name,\n AssetCheckExecutionsTable.c.run_id == event.run_id,\n )\n )\n .values(\n execution_status=(\n AssetCheckExecutionRecordStatus.SUCCEEDED.value\n if evaluation.passed\n else AssetCheckExecutionRecordStatus.FAILED.value\n ),\n evaluation_event=serialize_value(event),\n evaluation_event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n evaluation_event_storage_id=event_id,\n materialization_event_storage_id=(\n evaluation.target_materialization_data.storage_id\n if evaluation.target_materialization_data\n else None\n ),\n )\n ).rowcount\n if rows_updated != 1:\n raise DagsterInvariantViolationError(\n "Expected to update one row for asset check evaluation, but updated"\n f" {rows_updated}."\n )\n\n def get_asset_check_execution_history(\n self,\n check_key: AssetCheckKey,\n limit: int,\n cursor: Optional[int] = None,\n ) -> Sequence[AssetCheckExecutionRecord]:\n check.inst_param(check_key, "key", AssetCheckKey)\n check.int_param(limit, "limit")\n check.opt_int_param(cursor, "cursor")\n\n query = (\n db_select(\n [\n AssetCheckExecutionsTable.c.id,\n AssetCheckExecutionsTable.c.run_id,\n AssetCheckExecutionsTable.c.execution_status,\n AssetCheckExecutionsTable.c.evaluation_event,\n AssetCheckExecutionsTable.c.create_timestamp,\n ]\n )\n .where(\n db.and_(\n AssetCheckExecutionsTable.c.asset_key == check_key.asset_key.to_string(),\n AssetCheckExecutionsTable.c.check_name == check_key.name,\n )\n )\n .order_by(AssetCheckExecutionsTable.c.id.desc())\n ).limit(limit)\n\n if cursor:\n query = query.where(AssetCheckExecutionsTable.c.id < cursor)\n\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n return [AssetCheckExecutionRecord.from_db_row(row) for row in rows]\n\n def get_latest_asset_check_execution_by_key(\n self, check_keys: Sequence[AssetCheckKey]\n ) -> Mapping[AssetCheckKey, AssetCheckExecutionRecord]:\n if not check_keys:\n return {}\n\n latest_ids_subquery = db_subquery(\n db_select(\n [\n db.func.max(AssetCheckExecutionsTable.c.id).label("id"),\n ]\n )\n .where(\n db.and_(\n AssetCheckExecutionsTable.c.asset_key.in_(\n [key.asset_key.to_string() for key in check_keys]\n ),\n AssetCheckExecutionsTable.c.check_name.in_([key.name for key in check_keys]),\n )\n )\n .group_by(\n AssetCheckExecutionsTable.c.asset_key,\n AssetCheckExecutionsTable.c.check_name,\n )\n )\n\n query = db_select(\n [\n AssetCheckExecutionsTable.c.id,\n AssetCheckExecutionsTable.c.asset_key,\n AssetCheckExecutionsTable.c.check_name,\n AssetCheckExecutionsTable.c.run_id,\n AssetCheckExecutionsTable.c.execution_status,\n AssetCheckExecutionsTable.c.evaluation_event,\n AssetCheckExecutionsTable.c.create_timestamp,\n ]\n ).select_from(\n AssetCheckExecutionsTable.join(\n latest_ids_subquery,\n db.and_(\n AssetCheckExecutionsTable.c.id == latest_ids_subquery.c.id,\n ),\n )\n )\n\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n return {\n AssetCheckKey(\n asset_key=check.not_none(AssetKey.from_db_string(cast(str, row["asset_key"]))),\n name=cast(str, row["check_name"]),\n ): AssetCheckExecutionRecord.from_db_row(row)\n for row in rows\n }\n\n @property\n def supports_asset_checks(self):\n return self.has_table(AssetCheckExecutionsTable.name)
\n\n\ndef _get_from_row(row: SqlAlchemyRow, column: str) -> object:\n """Utility function for extracting a column from a sqlalchemy row proxy, since '_asdict' is not\n supported in sqlalchemy 1.3.\n """\n if column not in row.keys():\n return None\n return row[column]\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/sql_event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.sql_event_log"}, "sqlite": {"consolidated_sqlite_event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.sqlite.consolidated_sqlite_event_log

\nimport logging\nimport os\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom typing import Any, Mapping, Optional\n\nimport sqlalchemy as db\nfrom sqlalchemy.pool import NullPool\nfrom typing_extensions import Self\nfrom watchdog.events import PatternMatchingEventHandler\nfrom watchdog.observers import Observer\n\nimport dagster._check as check\nfrom dagster._config import StringSource\nfrom dagster._core.storage.dagster_run import DagsterRunStatus\nfrom dagster._core.storage.event_log.base import EventLogCursor\nfrom dagster._core.storage.sql import (\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlite import create_db_conn_string\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import SqlEventLogStorageMetadata\nfrom ..sql_event_log import SqlDbConnection, SqlEventLogStorage\n\nSQLITE_EVENT_LOG_FILENAME = "event_log"\n\n\n
[docs]class ConsolidatedSqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """SQLite-backed consolidated event log storage intended for test cases only.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To explicitly specify the consolidated SQLite for event log storage, you can add a block such as\n the following to your ``dagster.yaml``:\n\n .. code-block:: YAML\n\n run_storage:\n module: dagster._core.storage.event_log\n class: ConsolidatedSqliteEventLogStorage\n config:\n base_dir: /path/to/dir\n\n The ``base_dir`` param tells the event log storage where on disk to store the database.\n """\n\n def __init__(self, base_dir, inst_data: Optional[ConfigurableClassData] = None):\n self._base_dir = check.str_param(base_dir, "base_dir")\n self._conn_string = create_db_conn_string(base_dir, SQLITE_EVENT_LOG_FILENAME)\n self._secondary_index_cache = {}\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self._watchers = defaultdict(dict)\n self._obs = None\n\n if not os.path.exists(self.get_db_path()):\n self._init_db()\n\n super().__init__()\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return ConsolidatedSqliteEventLogStorage(inst_data=inst_data, **config_value)\n\n def _init_db(self):\n mkdir_p(self._base_dir)\n engine = create_engine(self._conn_string, poolclass=NullPool)\n alembic_config = get_alembic_config(__file__)\n\n should_mark_indexes = False\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n if not (db_revision and head_revision):\n SqlEventLogStorageMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n should_mark_indexes = True\n\n if should_mark_indexes:\n # mark all secondary indexes\n self.reindex_events()\n self.reindex_assets()\n\n @contextmanager\n def _connect(self):\n engine = create_engine(self._conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n\n def run_connection(self, run_id: Optional[str]) -> SqlDbConnection:\n return self._connect()\n\n def index_connection(self):\n return self._connect()\n\n def has_table(self, table_name: str) -> bool:\n engine = create_engine(self._conn_string, poolclass=NullPool)\n return bool(engine.dialect.has_table(engine.connect(), table_name))\n\n def get_db_path(self):\n return os.path.join(self._base_dir, f"{SQLITE_EVENT_LOG_FILENAME}.db")\n\n def upgrade(self):\n alembic_config = get_alembic_config(__file__)\n with self._connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def has_secondary_index(self, name):\n if name not in self._secondary_index_cache:\n self._secondary_index_cache[name] = super(\n ConsolidatedSqliteEventLogStorage, self\n ).has_secondary_index(name)\n return self._secondary_index_cache[name]\n\n def enable_secondary_index(self, name):\n super(ConsolidatedSqliteEventLogStorage, self).enable_secondary_index(name)\n if name in self._secondary_index_cache:\n del self._secondary_index_cache[name]\n\n def watch(self, run_id, cursor, callback):\n if not self._obs:\n self._obs = Observer()\n self._obs.start()\n self._obs.schedule(\n ConsolidatedSqliteEventLogStorageWatchdog(self), self._base_dir, True\n )\n\n self._watchers[run_id][callback] = cursor\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n return False\n\n def on_modified(self):\n keys = [\n (run_id, callback)\n for run_id, callback_dict in self._watchers.items()\n for callback, _ in callback_dict.items()\n ]\n for run_id, callback in keys:\n cursor = self._watchers[run_id][callback]\n\n # fetch events\n connection = self.get_records_for_run(run_id, cursor)\n\n # update cursor\n if connection.cursor:\n self._watchers[run_id][callback] = connection.cursor\n\n for record in connection.records:\n status = None\n try:\n status = callback(\n record.event_log_entry,\n str(EventLogCursor.from_storage_id(record.storage_id)),\n )\n except Exception:\n logging.exception("Exception in callback for event watch on run %s.", run_id)\n\n if (\n status == DagsterRunStatus.SUCCESS\n or status == DagsterRunStatus.FAILURE\n or status == DagsterRunStatus.CANCELED\n ):\n self.end_watch(run_id, callback)\n\n def end_watch(self, run_id, handler):\n if run_id in self._watchers and handler in self._watchers[run_id]:\n del self._watchers[run_id][handler]\n\n def dispose(self):\n if self._obs:\n self._obs.stop()\n self._obs.join(timeout=15)
\n\n\nclass ConsolidatedSqliteEventLogStorageWatchdog(PatternMatchingEventHandler):\n def __init__(self, event_log_storage, **kwargs):\n self._event_log_storage = check.inst_param(\n event_log_storage, "event_log_storage", ConsolidatedSqliteEventLogStorage\n )\n self._log_path = event_log_storage.get_db_path()\n super(ConsolidatedSqliteEventLogStorageWatchdog, self).__init__(\n patterns=[self._log_path], **kwargs\n )\n\n def on_modified(self, event):\n check.invariant(event.src_path == self._log_path)\n self._event_log_storage.on_modified()\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/sqlite/consolidated_sqlite_event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.sqlite.consolidated_sqlite_event_log"}, "sqlite_event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.sqlite.sqlite_event_log

\nimport contextlib\nimport glob\nimport logging\nimport os\nimport re\nimport sqlite3\nimport threading\nimport time\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Any, ContextManager, Iterable, Iterator, Optional, Sequence\n\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection, Engine\nfrom sqlalchemy.pool import NullPool\nfrom tqdm import tqdm\nfrom watchdog.events import FileSystemEvent, PatternMatchingEventHandler\nfrom watchdog.observers import Observer\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._config import StringSource\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.event_api import EventHandlerFn\nfrom dagster._core.events import ASSET_CHECK_EVENTS, ASSET_EVENTS, EVENT_TYPE_TO_PIPELINE_RUN_STATUS\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.dagster_run import DagsterRunStatus, RunsFilter\nfrom dagster._core.storage.event_log.base import EventLogCursor, EventLogRecord, EventRecordsFilter\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlalchemy_compat import db_select\nfrom dagster._core.storage.sqlite import create_db_conn_string\nfrom dagster._serdes import (\n    ConfigurableClass,\n    ConfigurableClassData,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import SqlEventLogStorageMetadata, SqlEventLogStorageTable\nfrom ..sql_event_log import RunShardedEventsCursor, SqlEventLogStorage\n\nif TYPE_CHECKING:\n    from dagster._core.storage.sqlite_storage import SqliteStorageConfig\nINDEX_SHARD_NAME = "index"\n\n\n
[docs]class SqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """SQLite-backed event log storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file insqliteve\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n This is the default event log storage when none is specified in the ``dagster.yaml``.\n\n To explicitly specify SQLite for event log storage, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. code-block:: YAML\n\n event_log_storage:\n module: dagster._core.storage.event_log\n class: SqliteEventLogStorage\n config:\n base_dir: /path/to/dir\n\n The ``base_dir`` param tells the event log storage where on disk to store the databases. To\n improve concurrent performance, event logs are stored in a separate SQLite database for each\n run.\n """\n\n def __init__(self, base_dir: str, inst_data: Optional[ConfigurableClassData] = None):\n """Note that idempotent initialization of the SQLite database is done on a per-run_id\n basis in the body of connect, since each run is stored in a separate database.\n """\n self._base_dir = os.path.abspath(check.str_param(base_dir, "base_dir"))\n mkdir_p(self._base_dir)\n\n self._obs = None\n\n self._watchers = defaultdict(dict)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n # Used to ensure that each run ID attempts to initialize its DB the first time it connects,\n # ensuring that the database will be created if it doesn't exist\n self._initialized_dbs = set()\n\n # Ensure that multiple threads (like the event log watcher) interact safely with each other\n self._db_lock = threading.Lock()\n\n if not os.path.exists(self.path_for_shard(INDEX_SHARD_NAME)):\n conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)\n engine = create_engine(conn_string, poolclass=NullPool)\n self._initdb(engine)\n self.reindex_events()\n self.reindex_assets()\n\n super().__init__()\n\n def upgrade(self) -> None:\n all_run_ids = self.get_all_run_ids()\n print(f"Updating event log storage for {len(all_run_ids)} runs on disk...") # noqa: T201\n alembic_config = get_alembic_config(__file__)\n if all_run_ids:\n for run_id in tqdm(all_run_ids):\n with self.run_connection(run_id) as conn:\n run_alembic_upgrade(alembic_config, conn, run_id)\n\n print("Updating event log storage for index db on disk...") # noqa: T201\n with self.index_connection() as conn:\n run_alembic_upgrade(alembic_config, conn, "index")\n\n self._initialized_dbs = set()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: "SqliteStorageConfig"\n ) -> "SqliteEventLogStorage":\n return SqliteEventLogStorage(inst_data=inst_data, **config_value)\n\n def get_all_run_ids(self) -> Sequence[str]:\n all_filenames = glob.glob(os.path.join(self._base_dir, "*.db"))\n return [\n os.path.splitext(os.path.basename(filename))[0]\n for filename in all_filenames\n if os.path.splitext(os.path.basename(filename))[0] != INDEX_SHARD_NAME\n ]\n\n def has_table(self, table_name: str) -> bool:\n conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)\n engine = create_engine(conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n return bool(engine.dialect.has_table(conn, table_name))\n\n def path_for_shard(self, run_id: str) -> str:\n return os.path.join(self._base_dir, f"{run_id}.db")\n\n def conn_string_for_shard(self, shard_name: str) -> str:\n check.str_param(shard_name, "shard_name")\n return create_db_conn_string(self._base_dir, shard_name)\n\n def _initdb(self, engine: Engine) -> None:\n alembic_config = get_alembic_config(__file__)\n\n retry_limit = 10\n\n while True:\n try:\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n\n if not (db_revision and head_revision):\n SqlEventLogStorageMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n\n break\n except (db_exc.DatabaseError, sqlite3.DatabaseError, sqlite3.OperationalError) as exc:\n # This is SQLite-specific handling for concurrency issues that can arise when\n # multiple processes (e.g. the dagster-webserver process and user code process) contend with\n # each other to init the db. When we hit the following errors, we know that another\n # process is on the case and we should retry.\n err_msg = str(exc)\n\n if not (\n re.search(r"table [A-Za-z_]* already exists", err_msg)\n or "database is locked" in err_msg\n or "UNIQUE constraint failed: alembic_version.version_num" in err_msg\n ):\n raise\n\n if retry_limit == 0:\n raise\n else:\n logging.info(\n "SqliteEventLogStorage._initdb: Encountered apparent concurrent init, "\n "retrying (%s retries left). Exception: %s",\n retry_limit,\n err_msg,\n )\n time.sleep(0.2)\n retry_limit -= 1\n\n @contextmanager\n def _connect(self, shard: str) -> Iterator[Connection]:\n with self._db_lock:\n check.str_param(shard, "shard")\n\n conn_string = self.conn_string_for_shard(shard)\n engine = create_engine(conn_string, poolclass=NullPool)\n\n if shard not in self._initialized_dbs:\n self._initdb(engine)\n self._initialized_dbs.add(shard)\n\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n engine.dispose()\n\n def run_connection(self, run_id: Optional[str] = None) -> Any:\n return self._connect(run_id) # type: ignore # bad sig\n\n def index_connection(self) -> ContextManager[Connection]:\n return self._connect(INDEX_SHARD_NAME)\n\n def store_event(self, event: EventLogEntry) -> None:\n """Overridden method to replicate asset events in a central assets.db sqlite shard, enabling\n cross-run asset queries.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n check.inst_param(event, "event", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event)\n run_id = event.run_id\n\n with self.run_connection(run_id) as conn:\n conn.execute(insert_event_statement)\n\n if event.is_dagster_event and event.dagster_event.asset_key: # type: ignore\n check.invariant(\n event.dagster_event_type in ASSET_EVENTS,\n "Can only store asset materializations, materialization_planned, and"\n " observations in index database",\n )\n\n event_id = None\n\n # mirror the event in the cross-run index database\n with self.index_connection() as conn:\n result = conn.execute(insert_event_statement)\n event_id = result.inserted_primary_key[0]\n\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n "Cannot store asset event tags for null event id."\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, None)\n\n if event.is_dagster_event and event.dagster_event_type in EVENT_TYPE_TO_PIPELINE_RUN_STATUS:\n # should mirror run status change events in the index shard\n with self.index_connection() as conn:\n result = conn.execute(insert_event_statement)\n\n def get_event_records(\n self,\n event_records_filter: EventRecordsFilter,\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Iterable[EventLogRecord]:\n """Overridden method to enable cross-run event queries in sqlite.\n\n The record id in sqlite does not auto increment cross runs, so instead of fetching events\n after record id, we only fetch events whose runs updated after update_timestamp.\n """\n check.opt_inst_param(event_records_filter, "event_records_filter", EventRecordsFilter)\n check.opt_int_param(limit, "limit")\n check.bool_param(ascending, "ascending")\n\n is_asset_query = event_records_filter and event_records_filter.event_type in ASSET_EVENTS\n if is_asset_query:\n # asset materializations, observations and materialization planned events get mirrored\n # into the index shard, so no custom run shard-aware cursor logic needed\n return super(SqliteEventLogStorage, self).get_event_records(\n event_records_filter=event_records_filter, limit=limit, ascending=ascending\n )\n\n query = db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])\n if event_records_filter.asset_key:\n asset_details = next(iter(self._get_assets_details([event_records_filter.asset_key])))\n else:\n asset_details = None\n\n if event_records_filter.after_cursor is not None and not isinstance(\n event_records_filter.after_cursor, RunShardedEventsCursor\n ):\n raise Exception("""\n Called `get_event_records` on a run-sharded event log storage with a cursor that\n is not run-aware. Add a RunShardedEventsCursor to your query filter\n or switch your instance configuration to use a non-run-sharded event log storage\n (e.g. PostgresEventLogStorage, ConsolidatedSqliteEventLogStorage)\n """)\n\n query = self._apply_filter_to_query(\n query=query,\n event_records_filter=event_records_filter,\n asset_details=asset_details,\n apply_cursor_filters=False, # run-sharded cursor filters don't really make sense\n )\n if limit:\n query = query.limit(limit)\n if ascending:\n query = query.order_by(SqlEventLogStorageTable.c.timestamp.asc())\n else:\n query = query.order_by(SqlEventLogStorageTable.c.timestamp.desc())\n\n # workaround for the run-shard sqlite to enable cross-run queries: get a list of run_ids\n # whose events may qualify the query, and then open run_connection per run_id at a time.\n run_updated_after = (\n event_records_filter.after_cursor.run_updated_after\n if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)\n else None\n )\n run_records = self._instance.get_run_records(\n filters=RunsFilter(updated_after=run_updated_after),\n order_by="update_timestamp",\n ascending=ascending,\n )\n\n event_records = []\n for run_record in run_records:\n run_id = run_record.dagster_run.run_id\n with self.run_connection(run_id) as conn:\n results = conn.execute(query).fetchall()\n\n for row_id, json_str in results:\n try:\n event_record = deserialize_value(json_str, EventLogEntry)\n event_records.append(\n EventLogRecord(storage_id=row_id, event_log_entry=event_record)\n )\n if limit and len(event_records) >= limit:\n break\n except DeserializationError:\n logging.warning(\n "Could not resolve event record as EventLogEntry for id `%s`.", row_id\n )\n except seven.JSONDecodeError:\n logging.warning("Could not parse event record id `%s`.", row_id)\n\n if limit and len(event_records) >= limit:\n break\n\n return event_records[:limit]\n\n def supports_event_consumer_queries(self) -> bool:\n return False\n\n def delete_events(self, run_id: str) -> None:\n with self.run_connection(run_id) as conn:\n self.delete_events_for_run(conn, run_id)\n\n # delete the mirrored event in the cross-run index database\n with self.index_connection() as conn:\n self.delete_events_for_run(conn, run_id)\n\n def wipe(self) -> None:\n # should delete all the run-sharded db files and drop the contents of the index\n for filename in (\n glob.glob(os.path.join(self._base_dir, "*.db"))\n + glob.glob(os.path.join(self._base_dir, "*.db-wal"))\n + glob.glob(os.path.join(self._base_dir, "*.db-shm"))\n ):\n if (\n not filename.endswith(f"{INDEX_SHARD_NAME}.db")\n and not filename.endswith(f"{INDEX_SHARD_NAME}.db-wal")\n and not filename.endswith(f"{INDEX_SHARD_NAME}.db-shm")\n ):\n with contextlib.suppress(FileNotFoundError):\n os.unlink(filename)\n\n self._initialized_dbs = set()\n self._wipe_index()\n\n def _delete_mirrored_events_for_asset_key(self, asset_key: AssetKey) -> None:\n with self.index_connection() as conn:\n conn.execute(\n SqlEventLogStorageTable.delete().where(\n SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),\n )\n )\n\n def wipe_asset(self, asset_key: AssetKey) -> None:\n # default implementation will update the event_logs in the sharded dbs, and the asset_key\n # table in the asset shard, but will not remove the mirrored event_log events in the asset\n # shard\n super(SqliteEventLogStorage, self).wipe_asset(asset_key)\n self._delete_mirrored_events_for_asset_key(asset_key)\n\n def watch(self, run_id: str, cursor: Optional[str], callback: EventHandlerFn) -> None:\n if not self._obs:\n self._obs = Observer()\n self._obs.start()\n\n watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback, cursor)\n self._watchers[run_id][callback] = (\n watchdog,\n self._obs.schedule(watchdog, self._base_dir, True),\n )\n\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n if handler in self._watchers[run_id]:\n event_handler, watch = self._watchers[run_id][handler]\n self._obs.remove_handler_for_watch(event_handler, watch) # type: ignore # (possible none)\n del self._watchers[run_id][handler]\n\n def dispose(self) -> None:\n if self._obs:\n self._obs.stop()\n self._obs.join(timeout=15)\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = get_alembic_config(__file__)\n with self.index_connection() as conn:\n return check_alembic_revision(alembic_config, conn)\n\n @property\n def is_run_sharded(self) -> bool:\n return True\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n return False
\n\n\nclass SqliteEventLogStorageWatchdog(PatternMatchingEventHandler):\n def __init__(\n self,\n event_log_storage: SqliteEventLogStorage,\n run_id: str,\n callback: EventHandlerFn,\n cursor: Optional[str],\n **kwargs: Any,\n ):\n self._event_log_storage = check.inst_param(\n event_log_storage, "event_log_storage", SqliteEventLogStorage\n )\n self._run_id = check.str_param(run_id, "run_id")\n self._cb = check.callable_param(callback, "callback")\n self._log_path = event_log_storage.path_for_shard(run_id)\n self._cursor = cursor\n super(SqliteEventLogStorageWatchdog, self).__init__(patterns=[self._log_path], **kwargs)\n\n def _process_log(self) -> None:\n connection = self._event_log_storage.get_records_for_run(self._run_id, self._cursor)\n if connection.cursor:\n self._cursor = connection.cursor\n for record in connection.records:\n status = None\n try:\n status = self._cb(\n record.event_log_entry, str(EventLogCursor.from_storage_id(record.storage_id))\n )\n except Exception:\n logging.exception("Exception in callback for event watch on run %s.", self._run_id)\n\n if (\n status == DagsterRunStatus.SUCCESS\n or status == DagsterRunStatus.FAILURE\n or status == DagsterRunStatus.CANCELED\n ):\n self._event_log_storage.end_watch(self._run_id, self._cb)\n\n def on_modified(self, event: FileSystemEvent) -> None:\n check.invariant(event.src_path == self._log_path)\n self._process_log()\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/sqlite/sqlite_event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.sqlite.sqlite_event_log"}}}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.file_manager

\nimport io\nimport os\nimport shutil\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import BinaryIO, ContextManager, Iterator, Optional, TextIO, Union\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._config import Field, StringSource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource, resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._utils import mkdir_p\n\nfrom .temp_file_manager import TempfileManager\n\nIOStream: TypeAlias = Union[TextIO, BinaryIO]\n\n\n
[docs]class FileHandle(ABC):\n """A reference to a file as manipulated by a FileManager.\n\n Subclasses may handle files that are resident on the local file system, in an object store, or\n in any arbitrary place where a file can be stored.\n\n This exists to handle the very common case where you wish to write a computation that reads,\n transforms, and writes files, but where you also want the same code to work in local development\n as well as on a cluster where the files will be stored in a globally available object store\n such as S3.\n """\n\n @public\n @property\n @abstractmethod\n def path_desc(self) -> str:\n """A representation of the file path for display purposes only."""\n raise NotImplementedError()
\n\n\n
[docs]class LocalFileHandle(FileHandle):\n """A reference to a file on a local filesystem."""\n\n def __init__(self, path: str):\n self._path = check.str_param(path, "path")\n\n @public\n @property\n def path(self) -> str:\n """The file's path."""\n return self._path\n\n @public\n @property\n def path_desc(self) -> str:\n """A representation of the file path for display purposes only."""\n return self._path
\n\n\n
[docs]class FileManager(ABC):\n """Base class for all file managers in dagster.\n\n The file manager is an interface that can be implemented by resources to provide abstract\n access to a file system such as local disk, S3, or other cloud storage.\n\n For examples of usage, see the documentation of the concrete file manager implementations.\n """\n\n
[docs] @public\n @abstractmethod\n def copy_handle_to_local_temp(self, file_handle: FileHandle) -> str:\n """Copy a file represented by a file handle to a temp file.\n\n In an implementation built around an object store such as S3, this method would be expected\n to download the file from S3 to local filesystem in a location assigned by the standard\n library's :py:mod:`python:tempfile` module.\n\n Temp files returned by this method are *not* guaranteed to be reusable across solid\n boundaries. For files that must be available across solid boundaries, use the\n :py:meth:`~dagster._core.storage.file_manager.FileManager.read`,\n :py:meth:`~dagster._core.storage.file_manager.FileManager.read_data`,\n :py:meth:`~dagster._core.storage.file_manager.FileManager.write`, and\n :py:meth:`~dagster._core.storage.file_manager.FileManager.write_data` methods.\n\n Args:\n file_handle (FileHandle): The handle to the file to make available as a local temp file.\n\n Returns:\n str: Path to the local temp file.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def delete_local_temp(self) -> None:\n """Delete all local temporary files created by previous calls to\n :py:meth:`~dagster._core.storage.file_manager.FileManager.copy_handle_to_local_temp`.\n\n Should typically only be called by framework implementors.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def read(self, file_handle: FileHandle, mode: str = "rb") -> ContextManager[IOStream]:\n """Return a file-like stream for the file handle.\n\n This may incur an expensive network call for file managers backed by object stores\n such as S3.\n\n Args:\n file_handle (FileHandle): The file handle to make available as a stream.\n mode (str): The mode in which to open the file. Default: ``"rb"``.\n\n Returns:\n Union[TextIO, BinaryIO]: A file-like stream.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def read_data(self, file_handle: FileHandle) -> bytes:\n """Return the bytes for a given file handle. This may incur an expensive network\n call for file managers backed by object stores such as s3.\n\n Args:\n file_handle (FileHandle): The file handle for which to return bytes.\n\n Returns:\n bytes: Bytes for a given file handle.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def write(self, file_obj: IOStream, mode: str = "wb", ext: Optional[str] = None) -> FileHandle:\n """Write the bytes contained within the given file object into the file manager.\n\n Args:\n file_obj (Union[TextIO, StringIO]): A file-like object.\n mode (Optional[str]): The mode in which to write the file into the file manager.\n Default: ``"wb"``.\n ext (Optional[str]): For file managers that support file extensions, the extension with\n which to write the file. Default: ``None``.\n\n Returns:\n FileHandle: A handle to the newly created file.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def write_data(self, data: bytes, ext: Optional[str] = None) -> FileHandle:\n """Write raw bytes into the file manager.\n\n Args:\n data (bytes): The bytes to write into the file manager.\n ext (Optional[str]): For file managers that support file extensions, the extension with\n which to write the file. Default: ``None``.\n\n Returns:\n FileHandle: A handle to the newly created file.\n """\n raise NotImplementedError()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema={"base_dir": Field(StringSource, is_required=False)})\ndef local_file_manager(init_context: InitResourceContext) -> "LocalFileManager":\n """FileManager that provides abstract access to a local filesystem.\n\n By default, files will be stored in `<local_artifact_storage>/storage/file_manager` where\n `<local_artifact_storage>` can be configured the ``dagster.yaml`` file in ``$DAGSTER_HOME``.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n\n Examples:\n .. code-block:: python\n\n import tempfile\n\n from dagster import job, local_file_manager, op\n\n\n @op(required_resource_keys={"file_manager"})\n def write_files(context):\n fh_1 = context.resources.file_manager.write_data(b"foo")\n\n with tempfile.NamedTemporaryFile("w+") as fd:\n fd.write("bar")\n fd.seek(0)\n fh_2 = context.resources.file_manager.write(fd, mode="w", ext=".txt")\n\n return (fh_1, fh_2)\n\n\n @op(required_resource_keys={"file_manager"})\n def read_files(context, file_handles):\n fh_1, fh_2 = file_handles\n assert context.resources.file_manager.read_data(fh_2) == b"bar"\n fd = context.resources.file_manager.read(fh_2, mode="r")\n assert fd.read() == "foo"\n fd.close()\n\n\n @job(resource_defs={"file_manager": local_file_manager})\n def files_pipeline():\n read_files(write_files())\n\n Or to specify the file directory:\n\n .. code-block:: python\n\n @job(\n resource_defs={\n "file_manager": local_file_manager.configured({"base_dir": "/my/base/dir"})\n }\n )\n def files_pipeline():\n read_files(write_files())\n """\n return LocalFileManager(\n base_dir=init_context.resource_config.get(\n "base_dir", os.path.join(init_context.instance.storage_directory(), "file_manager") # type: ignore # (possible none)\n )\n )
\n\n\ndef check_file_like_obj(obj: object) -> None:\n check.invariant(obj and hasattr(obj, "read") and hasattr(obj, "write"))\n\n\nclass LocalFileManager(FileManager):\n def __init__(self, base_dir: str):\n self.base_dir = base_dir\n self._base_dir_ensured = False\n self._temp_file_manager = TempfileManager()\n\n @staticmethod\n def for_instance(instance: DagsterInstance, run_id: str) -> "LocalFileManager":\n check.inst_param(instance, "instance", DagsterInstance)\n return LocalFileManager(instance.file_manager_directory(run_id))\n\n def ensure_base_dir_exists(self) -> None:\n if self._base_dir_ensured:\n return\n\n mkdir_p(self.base_dir)\n\n self._base_dir_ensured = True\n\n def copy_handle_to_local_temp(self, file_handle: FileHandle) -> str:\n check.inst_param(file_handle, "file_handle", FileHandle)\n with self.read(file_handle, "rb") as handle_obj: # type: ignore # (??)\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_file_obj.write(handle_obj.read())\n temp_name = temp_file_obj.name\n temp_file_obj.close()\n return temp_name\n\n @contextmanager\n def read(self, file_handle: LocalFileHandle, mode: str = "rb") -> Iterator[IOStream]:\n check.inst_param(file_handle, "file_handle", LocalFileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n encoding = None if mode == "rb" else "utf8"\n with open(file_handle.path, mode, encoding=encoding) as file_obj:\n yield file_obj # type: ignore # (??)\n\n def read_data(self, file_handle: LocalFileHandle) -> bytes:\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read() # type: ignore # (??)\n\n def write_data(self, data: bytes, ext: Optional[str] = None):\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", ext=ext)\n\n def write(\n self, file_obj: IOStream, mode: str = "wb", ext: Optional[str] = None\n ) -> LocalFileHandle:\n check_file_like_obj(file_obj)\n check.opt_str_param(ext, "ext")\n\n self.ensure_base_dir_exists()\n\n dest_file_path = os.path.join(\n self.base_dir, str(uuid.uuid4()) + (("." + ext) if ext is not None else "")\n )\n\n encoding = None if "b" in mode else "utf8"\n with open(dest_file_path, mode, encoding=encoding) as dest_file_obj:\n shutil.copyfileobj(file_obj, dest_file_obj) # type: ignore # (??)\n return LocalFileHandle(dest_file_path)\n\n def delete_local_temp(self) -> None:\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster/_core/storage/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.file_manager"}, "fs_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.fs_io_manager

\nimport os\nimport pickle\nfrom typing import TYPE_CHECKING, Any, Optional\n\nfrom pydantic import Field\n\nimport dagster._check as check\nfrom dagster import (\n    DagsterInvariantViolationError,\n    Field as DagsterField,\n)\nfrom dagster._annotations import experimental\nfrom dagster._config import StringSource\nfrom dagster._config.pythonic_config import ConfigurableIOManagerFactory\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import IOManager, dagster_maintained_io_manager, io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL, mkdir_p\n\nif TYPE_CHECKING:\n    from typing_extensions import Literal\n    from upath import UPath\n\n\n
[docs]class FilesystemIOManager(ConfigurableIOManagerFactory["PickledObjectFilesystemIOManager"]):\n """Built-in filesystem IO manager that stores and retrieves values using pickling.\n\n The base directory that the pickle files live inside is determined by:\n\n * The IO manager's "base_dir" configuration value, if specified. Otherwise...\n * A "storage/" directory underneath the value for "local_artifact_storage" in your dagster.yaml\n file, if specified. Otherwise...\n * A "storage/" directory underneath the directory that the DAGSTER_HOME environment variable\n points to, if that environment variable is specified. Otherwise...\n * A temporary directory.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n So, with a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n\n 1. Attach an IO manager to a set of assets using the reserved resource key ``"io_manager"``.\n\n .. code-block:: python\n\n from dagster import Definitions, asset, FilesystemIOManager\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": FilesystemIOManager(base_dir="/my/base/path")\n },\n )\n\n\n 2. Specify a job-level IO manager using the reserved resource key ``"io_manager"``,\n which will set the given IO manager on all ops in a job.\n\n .. code-block:: python\n\n from dagster import FilesystemIOManager, job, op\n\n @op\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(\n resource_defs={\n "io_manager": FilesystemIOManager(base_dir="/my/base/path")\n }\n )\n def job():\n op_b(op_a())\n\n\n 3. Specify IO manager on :py:class:`Out`, which allows you to set different IO managers on\n different step outputs.\n\n .. code-block:: python\n\n from dagster import FilesystemIOManager, job, op, Out\n\n @op(out=Out(io_manager_key="my_io_manager"))\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(resource_defs={"my_io_manager": FilesystemIOManager()})\n def job():\n op_b(op_a())\n\n """\n\n base_dir: Optional[str] = Field(default=None, description="Base directory for storing files.")\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def create_io_manager(self, context: InitResourceContext) -> "PickledObjectFilesystemIOManager":\n base_dir = self.base_dir or check.not_none(context.instance).storage_directory()\n return PickledObjectFilesystemIOManager(base_dir=base_dir)
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=FilesystemIOManager.to_config_schema(),\n description="Built-in filesystem IO manager that stores and retrieves values using pickling.",\n)\ndef fs_io_manager(init_context: InitResourceContext) -> "PickledObjectFilesystemIOManager":\n """Built-in filesystem IO manager that stores and retrieves values using pickling.\n\n The base directory that the pickle files live inside is determined by:\n\n * The IO manager's "base_dir" configuration value, if specified. Otherwise...\n * A "storage/" directory underneath the value for "local_artifact_storage" in your dagster.yaml\n file, if specified. Otherwise...\n * A "storage/" directory underneath the directory that the DAGSTER_HOME environment variable\n points to, if that environment variable is specified. Otherwise...\n * A temporary directory.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n So, with a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n\n 1. Attach an IO manager to a set of assets using the reserved resource key ``"io_manager"``.\n\n .. code-block:: python\n\n from dagster import Definitions, asset, fs_io_manager\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": fs_io_manager.configured({"base_dir": "/my/base/path"})\n },\n )\n\n\n 2. Specify a job-level IO manager using the reserved resource key ``"io_manager"``,\n which will set the given IO manager on all ops in a job.\n\n .. code-block:: python\n\n from dagster import fs_io_manager, job, op\n\n @op\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(\n resource_defs={\n "io_manager": fs_io_manager.configured({"base_dir": "/my/base/path"})\n }\n )\n def job():\n op_b(op_a())\n\n\n 3. Specify IO manager on :py:class:`Out`, which allows you to set different IO managers on\n different step outputs.\n\n .. code-block:: python\n\n from dagster import fs_io_manager, job, op, Out\n\n @op(out=Out(io_manager_key="my_io_manager"))\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(resource_defs={"my_io_manager": fs_io_manager})\n def job():\n op_b(op_a())\n\n """\n return FilesystemIOManager.from_resource_context(init_context)
\n\n\nclass PickledObjectFilesystemIOManager(UPathIOManager):\n """Built-in filesystem IO manager that stores and retrieves values using pickling.\n Is compatible with local and remote filesystems via `universal-pathlib` and `fsspec`.\n Learn more about how to use remote filesystems here: https://github.com/fsspec/universal_pathlib.\n\n Args:\n base_dir (Optional[str]): base directory where all the step outputs which use this object\n manager will be stored in.\n **kwargs: additional keyword arguments for `universal_pathlib.UPath`.\n """\n\n extension: str = "" # TODO: maybe change this to .pickle? Leaving blank for compatibility.\n\n def __init__(self, base_dir=None, **kwargs):\n from upath import UPath\n\n self.base_dir = check.opt_str_param(base_dir, "base_dir")\n\n super().__init__(base_path=UPath(base_dir, **kwargs))\n\n def dump_to_path(self, context: OutputContext, obj: Any, path: "UPath"):\n try:\n with path.open("wb") as file:\n pickle.dump(obj, file, PICKLE_PROTOCOL)\n except (AttributeError, RecursionError, ImportError, pickle.PicklingError) as e:\n executor = context.step_context.job_def.executor_def\n\n if isinstance(e, RecursionError):\n # if obj can't be pickled because of RecursionError then __str__() will also\n # throw a RecursionError\n obj_repr = f"{obj.__class__} exceeds recursion limit and"\n else:\n obj_repr = obj.__str__()\n\n raise DagsterInvariantViolationError(\n f"Object {obj_repr} is not picklable. You are currently using the "\n f"fs_io_manager and the {executor.name}. You will need to use a different "\n "io manager to continue using this output. For example, you can use the "\n "mem_io_manager with the in_process_executor.\\n"\n "For more information on io managers, visit "\n "https://docs.dagster.io/concepts/io-management/io-managers \\n"\n "For more information on executors, vist "\n "https://docs.dagster.io/deployment/executors#overview"\n ) from e\n\n def load_from_path(self, context: InputContext, path: "UPath") -> Any:\n with path.open("rb") as file:\n return pickle.load(file)\n\n\nclass CustomPathPickledObjectFilesystemIOManager(IOManager):\n """Built-in filesystem IO managerthat stores and retrieves values using pickling and\n allow users to specify file path for outputs.\n\n Args:\n base_dir (Optional[str]): base directory where all the step outputs which use this object\n manager will be stored in.\n """\n\n def __init__(self, base_dir: Optional[str] = None):\n self.base_dir = check.opt_str_param(base_dir, "base_dir")\n self.write_mode: Literal["wb"] = "wb"\n self.read_mode: Literal["rb"] = "rb"\n\n def _get_path(self, path: str) -> str:\n return os.path.join(self.base_dir, path) # type: ignore # (possible none)\n\n def handle_output(self, context: OutputContext, obj: object):\n """Pickle the data and store the object to a custom file path.\n\n This method emits an AssetMaterialization event so the assets will be tracked by the\n Asset Catalog.\n """\n check.inst_param(context, "context", OutputContext)\n metadata = context.metadata\n path = check.str_param(metadata.get("path"), "metadata.path") # type: ignore # (possible none)\n\n filepath = self._get_path(path)\n\n # Ensure path exists\n mkdir_p(os.path.dirname(filepath))\n context.log.debug(f"Writing file at: {filepath}")\n\n with open(filepath, self.write_mode) as write_obj:\n pickle.dump(obj, write_obj, PICKLE_PROTOCOL)\n\n return AssetMaterialization(\n asset_key=AssetKey([context.job_name, context.step_key, context.name]),\n metadata={"path": MetadataValue.path(os.path.abspath(filepath))},\n )\n\n def load_input(self, context: InputContext) -> object:\n """Unpickle the file from a given file path and Load it to a data object."""\n check.inst_param(context, "context", InputContext)\n metadata = context.upstream_output.metadata # type: ignore # (possible none)\n path = check.str_param(metadata.get("path"), "metadata.path") # type: ignore # (possible none)\n filepath = self._get_path(path)\n context.log.debug(f"Loading file from: {filepath}")\n\n with open(filepath, self.read_mode) as read_obj:\n return pickle.load(read_obj)\n\n\n@dagster_maintained_io_manager\n@io_manager(config_schema={"base_dir": DagsterField(StringSource, is_required=True)})\n@experimental\ndef custom_path_fs_io_manager(\n init_context: InitResourceContext,\n) -> CustomPathPickledObjectFilesystemIOManager:\n """Built-in IO manager that allows users to custom output file path per output definition.\n\n It requires users to specify a base directory where all the step output will be stored in. It\n serializes and deserializes output values (assets) using pickling and stores the pickled object\n in the user-provided file paths.\n\n Example usage:\n\n .. code-block:: python\n\n from dagster import custom_path_fs_io_manager, job, op\n\n @op(out=Out(metadata={"path": "path/to/sample_output"}))\n def sample_data(df):\n return df[:5]\n\n my_custom_path_fs_io_manager = custom_path_fs_io_manager.configured(\n {"base_dir": "path/to/basedir"}\n )\n\n @job(resource_defs={"io_manager": my_custom_path_fs_io_manager})\n def my_job():\n sample_data()\n\n """\n return CustomPathPickledObjectFilesystemIOManager(\n base_dir=init_context.resource_config.get("base_dir")\n )\n
", "current_page_name": "_modules/dagster/_core/storage/fs_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.fs_io_manager"}, "input_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.input_manager

\nfrom abc import ABC, abstractmethod\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, AbstractSet, Callable, Optional, Union, cast, overload\n\nfrom typing_extensions import TypeAlias, TypeGuard\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import has_at_least_one_parameter\nfrom dagster._core.definitions.config import is_callable_valid_config_arg\nfrom dagster._core.definitions.definition_config_schema import (\n    CoercableToConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom dagster._core.definitions.resource_definition import ResourceDefinition, ResourceFunction\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.input import InputContext\n\nInputLoadFn: TypeAlias = Union[\n    Callable[["InputContext"], object],\n    Callable[[], object],\n]\n\n\n
[docs]class InputManager(ABC):\n """Base interface for classes that are responsible for loading solid inputs."""\n\n @abstractmethod\n def load_input(self, context: "InputContext") -> object:\n """The user-defined read method that loads an input to a solid.\n\n Args:\n context (InputContext): The input context.\n\n Returns:\n Any: The data object.\n """
\n\n\nclass IInputManagerDefinition:\n @property\n @abstractmethod\n def input_config_schema(self) -> IDefinitionConfigSchema:\n """The schema for per-input configuration for inputs that are managed by this\n input manager.\n """\n\n\n
[docs]class InputManagerDefinition(ResourceDefinition, IInputManagerDefinition):\n """Definition of an input manager resource.\n\n Input managers load op inputs.\n\n An InputManagerDefinition is a :py:class:`ResourceDefinition` whose resource_fn returns an\n :py:class:`InputManager`.\n\n The easiest way to create an InputManagerDefinition is with the\n :py:func:`@input_manager <input_manager>` decorator.\n """\n\n def __init__(\n self,\n resource_fn: ResourceFunction,\n config_schema: Optional[CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n input_config_schema: Optional[CoercableToConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n ):\n self._input_config_schema = convert_user_facing_definition_config_schema(\n input_config_schema\n )\n super(InputManagerDefinition, self).__init__(\n resource_fn=resource_fn,\n config_schema=config_schema,\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )\n\n @property\n def input_config_schema(self) -> IDefinitionConfigSchema:\n return self._input_config_schema\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: CoercableToConfigSchema,\n ) -> "InputManagerDefinition":\n return InputManagerDefinition(\n config_schema=config_schema,\n description=description or self.description,\n resource_fn=self.resource_fn,\n required_resource_keys=self.required_resource_keys,\n input_config_schema=self.input_config_schema,\n )
\n\n\n@overload\ndef input_manager(\n config_schema: InputLoadFn,\n) -> InputManagerDefinition: ...\n\n\n@overload\ndef input_manager(\n config_schema: Optional[CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n input_config_schema: Optional[CoercableToConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n) -> Callable[[InputLoadFn], InputManagerDefinition]: ...\n\n\n
[docs]def input_manager(\n config_schema: Union[InputLoadFn, Optional[CoercableToConfigSchema]] = None,\n description: Optional[str] = None,\n input_config_schema: Optional[CoercableToConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n) -> Union[InputManagerDefinition, Callable[[InputLoadFn], InputManagerDefinition]]:\n """Define an input manager.\n\n Input managers load op inputs, either from upstream outputs or by providing default values.\n\n The decorated function should accept a :py:class:`InputContext` and resource config, and return\n a loaded object that will be passed into one of the inputs of an op.\n\n The decorator produces an :py:class:`InputManagerDefinition`.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the resource-level config. If not\n set, Dagster will accept any config provided.\n description (Optional[str]): A human-readable description of the resource.\n input_config_schema (Optional[ConfigSchema]): A schema for the input-level config. Each\n input that uses this input manager can be configured separately using this config.\n If not set, Dagster will accept any config provided.\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by the input\n manager.\n version (Optional[str]): (Experimental) the version of the input manager definition.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import input_manager, op, job, In\n\n @input_manager\n def csv_loader(_):\n return read_csv("some/path")\n\n @op(ins={"input1": In(input_manager_key="csv_loader_key")})\n def my_op(_, input1):\n do_stuff(input1)\n\n @job(resource_defs={"csv_loader_key": csv_loader})\n def my_job():\n my_op()\n\n @input_manager(config_schema={"base_dir": str})\n def csv_loader(context):\n return read_csv(context.resource_config["base_dir"] + "/some/path")\n\n @input_manager(input_config_schema={"path": str})\n def csv_loader(context):\n return read_csv(context.config["path"])\n """\n if _is_input_load_fn(config_schema):\n return _InputManagerDecoratorCallable()(config_schema)\n\n def _wrap(load_fn: InputLoadFn) -> InputManagerDefinition:\n return _InputManagerDecoratorCallable(\n config_schema=cast(CoercableToConfigSchema, config_schema),\n description=description,\n version=version,\n input_config_schema=input_config_schema,\n required_resource_keys=required_resource_keys,\n )(load_fn)\n\n return _wrap
\n\n\ndef _is_input_load_fn(obj: Union[InputLoadFn, CoercableToConfigSchema]) -> TypeGuard[InputLoadFn]:\n return callable(obj) and not is_callable_valid_config_arg(obj)\n\n\nclass InputManagerWrapper(InputManager):\n def __init__(self, load_fn: InputLoadFn):\n self._load_fn = load_fn\n\n def load_input(self, context: "InputContext") -> object:\n # the @input_manager decorated function (self._load_fn) may return a direct value that\n # should be used or an instance of an InputManager. So we call self._load_fn and see if the\n # result is an InputManager. If so we call it's load_input method\n intermediate = (\n # type-ignore because function being used as attribute\n self._load_fn(context)\n if has_at_least_one_parameter(self._load_fn)\n else self._load_fn() # type: ignore # (strict type guard)\n )\n\n if isinstance(intermediate, InputManager):\n return intermediate.load_input(context)\n return intermediate\n\n\nclass _InputManagerDecoratorCallable:\n def __init__(\n self,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n version: Optional[str] = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n ):\n self.config_schema = config_schema\n self.description = check.opt_str_param(description, "description")\n self.version = check.opt_str_param(version, "version")\n self.input_config_schema = input_config_schema\n self.required_resource_keys = required_resource_keys\n\n def __call__(self, load_fn: InputLoadFn) -> InputManagerDefinition:\n check.callable_param(load_fn, "load_fn")\n\n def _resource_fn(_):\n return InputManagerWrapper(load_fn)\n\n input_manager_def = InputManagerDefinition(\n resource_fn=_resource_fn,\n config_schema=self.config_schema,\n description=self.description,\n version=self.version,\n input_config_schema=self.input_config_schema,\n required_resource_keys=self.required_resource_keys,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(input_manager_def, wrapped=load_fn) # type: ignore\n\n return input_manager_def\n
", "current_page_name": "_modules/dagster/_core/storage/input_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.input_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.io_manager

\nfrom abc import abstractmethod\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Callable, Optional, Set, Union, cast, overload\n\nfrom typing_extensions import TypeAlias, TypeGuard\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.definitions.config import is_callable_valid_config_arg\nfrom dagster._core.definitions.definition_config_schema import (\n    CoercableToConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.storage.input_manager import IInputManagerDefinition, InputManager\nfrom dagster._core.storage.output_manager import IOutputManagerDefinition, OutputManager\n\nfrom ..decorator_utils import get_function_params\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.init import InitResourceContext\n    from dagster._core.execution.context.input import InputContext\n    from dagster._core.execution.context.output import OutputContext\n\nIOManagerFunctionWithContext = Callable[["InitResourceContext"], "IOManager"]\nIOManagerFunction: TypeAlias = Union[\n    IOManagerFunctionWithContext,\n    Callable[[], "IOManager"],\n]\n\n\ndef is_io_manager_context_provided(\n    fn: IOManagerFunction,\n) -> TypeGuard[IOManagerFunctionWithContext]:\n    return len(get_function_params(fn)) >= 1\n\n\n
[docs]class IOManagerDefinition(ResourceDefinition, IInputManagerDefinition, IOutputManagerDefinition):\n """Definition of an IO manager resource.\n\n IOManagers are used to store op outputs and load them as inputs to downstream ops.\n\n An IOManagerDefinition is a :py:class:`ResourceDefinition` whose `resource_fn` returns an\n :py:class:`IOManager`.\n\n The easiest way to create an IOManagerDefnition is with the :py:func:`@io_manager <io_manager>`\n decorator.\n """\n\n def __init__(\n self,\n resource_fn: IOManagerFunction,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n input_config_schema: CoercableToConfigSchema = None,\n output_config_schema: CoercableToConfigSchema = None,\n ):\n self._input_config_schema = convert_user_facing_definition_config_schema(\n input_config_schema\n )\n # Unlike other configurable objects, whose config schemas default to Any,\n # output_config_schema defaults to None. This the because IOManager input / output config\n # shares config namespace with dagster type loaders.\n self._output_config_schema = (\n convert_user_facing_definition_config_schema(output_config_schema)\n if output_config_schema is not None\n else None\n )\n super(IOManagerDefinition, self).__init__(\n resource_fn=resource_fn,\n config_schema=config_schema,\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )\n\n @property\n def input_config_schema(self) -> IDefinitionConfigSchema:\n return self._input_config_schema\n\n @property\n def output_config_schema(self) -> Optional[IDefinitionConfigSchema]:\n return self._output_config_schema\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: CoercableToConfigSchema,\n ) -> "IOManagerDefinition":\n io_def = IOManagerDefinition(\n config_schema=config_schema,\n description=description or self.description,\n resource_fn=self.resource_fn,\n required_resource_keys=self.required_resource_keys,\n input_config_schema=self.input_config_schema,\n output_config_schema=self.output_config_schema,\n )\n\n io_def._dagster_maintained = self._is_dagster_maintained() # noqa: SLF001\n\n return io_def\n\n
[docs] @public\n @staticmethod\n def hardcoded_io_manager(\n value: "IOManager", description: Optional[str] = None\n ) -> "IOManagerDefinition":\n """A helper function that creates an ``IOManagerDefinition`` with a hardcoded IOManager.\n\n Args:\n value (IOManager): A hardcoded IO Manager which helps mock the definition.\n description ([Optional[str]]): The description of the IO Manager. Defaults to None.\n\n Returns:\n [IOManagerDefinition]: A hardcoded resource.\n """\n check.inst_param(value, "value", IOManager)\n return IOManagerDefinition(resource_fn=lambda _init_context: value, description=description)
\n\n\n
[docs]class IOManager(InputManager, OutputManager):\n """Base class for user-provided IO managers.\n\n IOManagers are used to store op outputs and load them as inputs to downstream ops.\n\n Extend this class to handle how objects are loaded and stored. Users should implement\n ``handle_output`` to store an object and ``load_input`` to retrieve an object.\n """\n\n
[docs] @public\n @abstractmethod\n def load_input(self, context: "InputContext") -> Any:\n """User-defined method that loads an input to an op.\n\n Args:\n context (InputContext): The input context, which describes the input that's being loaded\n and the upstream output that's being loaded from.\n\n Returns:\n Any: The data object.\n """
\n\n
[docs] @public\n @abstractmethod\n def handle_output(self, context: "OutputContext", obj: Any) -> None:\n """User-defined method that stores an output of an op.\n\n Args:\n context (OutputContext): The context of the step output that produces this object.\n obj (Any): The object, returned by the op, to be stored.\n """
\n\n\n@overload\ndef io_manager(config_schema: IOManagerFunction) -> IOManagerDefinition: ...\n\n\n@overload\ndef io_manager(\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n output_config_schema: CoercableToConfigSchema = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[Set[str]] = None,\n version: Optional[str] = None,\n) -> Callable[[IOManagerFunction], IOManagerDefinition]: ...\n\n\n
[docs]def io_manager(\n config_schema: Union[IOManagerFunction, CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n output_config_schema: CoercableToConfigSchema = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[Set[str]] = None,\n version: Optional[str] = None,\n) -> Union[IOManagerDefinition, Callable[[IOManagerFunction], IOManagerDefinition],]:\n """Define an IO manager.\n\n IOManagers are used to store op outputs and load them as inputs to downstream ops.\n\n The decorated function should accept an :py:class:`InitResourceContext` and return an\n :py:class:`IOManager`.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the resource config. Configuration\n data available in `init_context.resource_config`. If not set, Dagster will accept any\n config provided.\n description(Optional[str]): A human-readable description of the resource.\n output_config_schema (Optional[ConfigSchema]): The schema for per-output config. If not set,\n no per-output configuration will be allowed.\n input_config_schema (Optional[ConfigSchema]): The schema for per-input config. If not set,\n Dagster will accept any config provided.\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by the object\n manager.\n version (Optional[str]): (Experimental) The version of a resource function. Two wrapped\n resource functions should only have the same version if they produce the same resource\n definition when provided with the same inputs.\n\n **Examples:**\n\n .. code-block:: python\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n write_csv("some/path")\n\n def load_input(self, context):\n return read_csv("some/path")\n\n @io_manager\n def my_io_manager(init_context):\n return MyIOManager()\n\n @op(out=Out(io_manager_key="my_io_manager_key"))\n def my_op(_):\n return do_stuff()\n\n @job(resource_defs={"my_io_manager_key": my_io_manager})\n def my_job():\n my_op()\n\n """\n if callable(config_schema) and not is_callable_valid_config_arg(config_schema):\n config_schema = cast(IOManagerFunction, config_schema)\n return _IOManagerDecoratorCallable()(config_schema)\n\n def _wrap(resource_fn: IOManagerFunction) -> IOManagerDefinition:\n return _IOManagerDecoratorCallable(\n config_schema=cast(Optional[UserConfigSchema], config_schema),\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n output_config_schema=output_config_schema,\n input_config_schema=input_config_schema,\n )(resource_fn)\n\n return _wrap
\n\n\ndef dagster_maintained_io_manager(io_manager_def: IOManagerDefinition) -> IOManagerDefinition:\n io_manager_def._dagster_maintained = True # noqa: SLF001\n return io_manager_def\n\n\nclass _IOManagerDecoratorCallable:\n def __init__(\n self,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n output_config_schema: CoercableToConfigSchema = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[Set[str]] = None,\n version: Optional[str] = None,\n ):\n # type validation happens in IOManagerDefinition\n self.config_schema = config_schema\n self.description = description\n self.required_resource_keys = required_resource_keys\n self.version = version\n self.output_config_schema = output_config_schema\n self.input_config_schema = input_config_schema\n\n def __call__(self, fn: IOManagerFunction) -> IOManagerDefinition:\n check.callable_param(fn, "fn")\n\n io_manager_def = IOManagerDefinition(\n resource_fn=fn,\n config_schema=self.config_schema,\n description=self.description,\n required_resource_keys=self.required_resource_keys,\n version=self.version,\n output_config_schema=self.output_config_schema,\n input_config_schema=self.input_config_schema,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(io_manager_def, wrapped=fn) # type: ignore\n\n return io_manager_def\n
", "current_page_name": "_modules/dagster/_core/storage/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.io_manager"}, "local_compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.local_compute_log_manager

\nimport hashlib\nimport os\nimport shutil\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom typing import IO, TYPE_CHECKING, Generator, Iterator, Mapping, Optional, Sequence, Tuple\n\nfrom typing_extensions import Final\nfrom watchdog.events import PatternMatchingEventHandler\nfrom watchdog.observers.polling import PollingObserver\n\nfrom dagster import (\n    Field,\n    Float,\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.execution.compute_logs import mirror_stream_to_file\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._seven import json\nfrom dagster._utils import ensure_dir, ensure_file, touch_file\n\nfrom .captured_log_manager import (\n    CapturedLogContext,\n    CapturedLogData,\n    CapturedLogManager,\n    CapturedLogMetadata,\n    CapturedLogSubscription,\n)\nfrom .compute_log_manager import (\n    MAX_BYTES_FILE_READ,\n    ComputeIOType,\n    ComputeLogFileData,\n    ComputeLogManager,\n    ComputeLogSubscription,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.storage.cloud_storage_compute_log_manager import LogSubscription\n\nDEFAULT_WATCHDOG_POLLING_TIMEOUT: Final = 2.5\n\nIO_TYPE_EXTENSION: Final[Mapping[ComputeIOType, str]] = {\n    ComputeIOType.STDOUT: "out",\n    ComputeIOType.STDERR: "err",\n}\n\nMAX_FILENAME_LENGTH: Final = 255\n\n\n
[docs]class LocalComputeLogManager(CapturedLogManager, ComputeLogManager, ConfigurableClass):\n """Stores copies of stdout & stderr for each compute step locally on disk."""\n\n def __init__(\n self,\n base_dir: str,\n polling_timeout: Optional[float] = None,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._base_dir = base_dir\n self._polling_timeout = check.opt_float_param(\n polling_timeout, "polling_timeout", DEFAULT_WATCHDOG_POLLING_TIMEOUT\n )\n self._subscription_manager = LocalComputeLogSubscriptionManager(self)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @property\n def polling_timeout(self) -> float:\n return self._polling_timeout\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {\n "base_dir": StringSource,\n "polling_timeout": Field(Float, is_required=False),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value\n ) -> "LocalComputeLogManager":\n return LocalComputeLogManager(inst_data=inst_data, **config_value)\n\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:\n outpath = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT])\n errpath = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR])\n with mirror_stream_to_file(sys.stdout, outpath), mirror_stream_to_file(sys.stderr, errpath):\n yield CapturedLogContext(log_key)\n\n # leave artifact on filesystem so that we know the capture is completed\n touch_file(self.complete_artifact_path(log_key))\n\n @contextmanager\n def open_log_stream(\n self, log_key: Sequence[str], io_type: ComputeIOType\n ) -> Iterator[Optional[IO]]:\n path = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n with open(path, "+a", encoding="utf-8") as f:\n yield f\n\n def is_capture_complete(self, log_key: Sequence[str]) -> bool:\n return os.path.exists(self.complete_artifact_path(log_key))\n\n def get_log_data(\n self, log_key: Sequence[str], cursor: Optional[str] = None, max_bytes: Optional[int] = None\n ) -> CapturedLogData:\n stdout_cursor, stderr_cursor = self.parse_cursor(cursor)\n stdout, stdout_offset = self._read_bytes(\n log_key, ComputeIOType.STDOUT, offset=stdout_cursor, max_bytes=max_bytes\n )\n stderr, stderr_offset = self._read_bytes(\n log_key, ComputeIOType.STDERR, offset=stderr_cursor, max_bytes=max_bytes\n )\n return CapturedLogData(\n log_key=log_key,\n stdout=stdout,\n stderr=stderr,\n cursor=self.build_cursor(stdout_offset, stderr_offset),\n )\n\n def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:\n return CapturedLogMetadata(\n stdout_location=self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT]\n ),\n stderr_location=self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR]\n ),\n stdout_download_url=self.get_captured_log_download_url(log_key, ComputeIOType.STDOUT),\n stderr_download_url=self.get_captured_log_download_url(log_key, ComputeIOType.STDERR),\n )\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n if log_key:\n paths = [\n self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT]),\n self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR]),\n self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT], partial=True\n ),\n self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR], partial=True\n ),\n self.get_captured_local_path(log_key, "complete"),\n ]\n for path in paths:\n if os.path.exists(path) and os.path.isfile(path):\n os.remove(path)\n elif prefix:\n dir_to_delete = os.path.join(self._base_dir, *prefix)\n if os.path.exists(dir_to_delete) and os.path.isdir(dir_to_delete):\n # recursively delete all files in dir\n shutil.rmtree(dir_to_delete)\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n def _read_bytes(\n self,\n log_key: Sequence[str],\n io_type: ComputeIOType,\n offset: Optional[int] = 0,\n max_bytes: Optional[int] = None,\n ):\n path = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n return self.read_path(path, offset or 0, max_bytes)\n\n def parse_cursor(self, cursor: Optional[str] = None) -> Tuple[int, int]:\n # Translates a string cursor into a set of byte offsets for stdout, stderr\n if not cursor:\n return 0, 0\n\n parts = cursor.split(":")\n if not parts or len(parts) != 2:\n return 0, 0\n\n stdout, stderr = [int(_) for _ in parts]\n return stdout, stderr\n\n def build_cursor(self, stdout_offset: int, stderr_offset: int) -> str:\n return f"{stdout_offset}:{stderr_offset}"\n\n def complete_artifact_path(self, log_key):\n return self.get_captured_local_path(log_key, "complete")\n\n def read_path(\n self,\n path: str,\n offset: int = 0,\n max_bytes: Optional[int] = None,\n ):\n if not os.path.exists(path) or not os.path.isfile(path):\n return None, offset\n\n with open(path, "rb") as f:\n f.seek(offset, os.SEEK_SET)\n if max_bytes is None:\n data = f.read()\n else:\n data = f.read(max_bytes)\n new_offset = f.tell()\n return data, new_offset\n\n def get_captured_log_download_url(self, log_key, io_type):\n check.inst_param(io_type, "io_type", ComputeIOType)\n url = "/logs"\n for part in log_key:\n url = f"{url}/{part}"\n\n return f"{url}/{IO_TYPE_EXTENSION[io_type]}"\n\n def get_captured_local_path(self, log_key: Sequence[str], extension: str, partial=False):\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n if len(filename) > MAX_FILENAME_LENGTH:\n filename = "{}.{}".format(hashlib.md5(filebase.encode("utf-8")).hexdigest(), extension)\n return os.path.join(self._base_dir, *namespace, filename)\n\n def subscribe(\n self, log_key: Sequence[str], cursor: Optional[str] = None\n ) -> CapturedLogSubscription:\n subscription = CapturedLogSubscription(self, log_key, cursor)\n self.on_subscribe(subscription)\n return subscription\n\n def unsubscribe(self, subscription):\n self.on_unsubscribe(subscription)\n\n ###############################################\n #\n # Methods for the ComputeLogManager interface\n #\n ###############################################\n @contextmanager\n def _watch_logs(\n self, dagster_run: DagsterRun, step_key: Optional[str] = None\n ) -> Iterator[None]:\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n\n log_key = self.build_log_key_for_run(dagster_run.run_id, step_key or dagster_run.job_name)\n with self.capture_logs(log_key):\n yield\n\n def get_local_path(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n """Legacy adapter from compute log manager to more generic captured log manager API."""\n check.inst_param(io_type, "io_type", ComputeIOType)\n log_key = self.build_log_key_for_run(run_id, key)\n return self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n\n def read_logs_file(\n self,\n run_id: str,\n key: str,\n io_type: ComputeIOType,\n cursor: int = 0,\n max_bytes: int = MAX_BYTES_FILE_READ,\n ) -> ComputeLogFileData:\n path = self.get_local_path(run_id, key, io_type)\n\n if not os.path.exists(path) or not os.path.isfile(path):\n return ComputeLogFileData(path=path, data=None, cursor=0, size=0, download_url=None)\n\n # See: https://docs.python.org/2/library/stdtypes.html#file.tell for Windows behavior\n with open(path, "rb") as f:\n f.seek(cursor, os.SEEK_SET)\n data = f.read(max_bytes)\n cursor = f.tell()\n stats = os.fstat(f.fileno())\n\n # local download path\n download_url = self.download_url(run_id, key, io_type)\n return ComputeLogFileData(\n path=path,\n data=data.decode("utf-8"),\n cursor=cursor,\n size=stats.st_size,\n download_url=download_url,\n )\n\n def get_key(self, dagster_run: DagsterRun, step_key: Optional[str]):\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n return step_key or dagster_run.job_name\n\n def is_watch_completed(self, run_id: str, key: str) -> bool:\n log_key = self.build_log_key_for_run(run_id, key)\n return self.is_capture_complete(log_key)\n\n def on_watch_start(self, dagster_run: DagsterRun, step_key: Optional[str]):\n pass\n\n def on_watch_finish(self, dagster_run: DagsterRun, step_key: Optional[str] = None):\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n log_key = self.build_log_key_for_run(dagster_run.run_id, step_key or dagster_run.job_name)\n touchpath = self.complete_artifact_path(log_key)\n touch_file(touchpath)\n\n def download_url(self, run_id: str, key: str, io_type: ComputeIOType):\n check.inst_param(io_type, "io_type", ComputeIOType)\n return f"/download/{run_id}/{key}/{io_type.value}"\n\n def on_subscribe(self, subscription: "LogSubscription") -> None:\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription: "LogSubscription") -> None:\n self._subscription_manager.remove_subscription(subscription)\n\n def dispose(self) -> None:\n self._subscription_manager.dispose()
\n\n\nclass LocalComputeLogSubscriptionManager:\n def __init__(self, manager):\n self._manager = manager\n self._subscriptions = defaultdict(list)\n self._watchers = {}\n self._observer = None\n\n def add_subscription(self, subscription: "LogSubscription") -> None:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n\n if self.is_complete(subscription):\n subscription.fetch()\n subscription.complete()\n else:\n log_key = self._log_key(subscription)\n watch_key = self._watch_key(log_key)\n self._subscriptions[watch_key].append(subscription)\n self.watch(subscription)\n\n def is_complete(self, subscription: "LogSubscription") -> bool:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n\n if isinstance(subscription, ComputeLogSubscription):\n return self._manager.is_watch_completed(subscription.run_id, subscription.key)\n return self._manager.is_capture_complete(subscription.log_key)\n\n def remove_subscription(self, subscription: "LogSubscription") -> None:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n log_key = self._log_key(subscription)\n watch_key = self._watch_key(log_key)\n if subscription in self._subscriptions[watch_key]:\n self._subscriptions[watch_key].remove(subscription)\n subscription.complete()\n\n def _log_key(self, subscription: "LogSubscription") -> Sequence[str]:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n\n if isinstance(subscription, ComputeLogSubscription):\n return self._manager.build_log_key_for_run(subscription.run_id, subscription.key)\n return subscription.log_key\n\n def _watch_key(self, log_key: Sequence[str]) -> str:\n return json.dumps(log_key)\n\n def remove_all_subscriptions(self, log_key: Sequence[str]) -> None:\n watch_key = self._watch_key(log_key)\n for subscription in self._subscriptions.pop(watch_key, []):\n subscription.complete()\n\n def watch(self, subscription: "LogSubscription") -> None:\n log_key = self._log_key(subscription)\n watch_key = self._watch_key(log_key)\n if watch_key in self._watchers:\n return\n\n update_paths = [\n self._manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT]),\n self._manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR]),\n self._manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT], partial=True\n ),\n self._manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR], partial=True\n ),\n ]\n complete_paths = [self._manager.complete_artifact_path(log_key)]\n directory = os.path.dirname(\n self._manager.get_captured_local_path(log_key, ComputeIOType.STDERR),\n )\n\n if not self._observer:\n self._observer = PollingObserver(self._manager.polling_timeout)\n self._observer.start()\n\n ensure_dir(directory)\n\n self._watchers[watch_key] = self._observer.schedule(\n LocalComputeLogFilesystemEventHandler(self, log_key, update_paths, complete_paths),\n str(directory),\n )\n\n def notify_subscriptions(self, log_key: Sequence[str]) -> None:\n watch_key = self._watch_key(log_key)\n for subscription in self._subscriptions[watch_key]:\n subscription.fetch()\n\n def unwatch(self, log_key: Sequence[str], handler) -> None:\n watch_key = self._watch_key(log_key)\n if watch_key in self._watchers:\n self._observer.remove_handler_for_watch(handler, self._watchers[watch_key]) # type: ignore\n del self._watchers[watch_key]\n\n def dispose(self) -> None:\n if self._observer:\n self._observer.stop()\n self._observer.join(15)\n\n\nclass LocalComputeLogFilesystemEventHandler(PatternMatchingEventHandler):\n def __init__(self, manager, log_key, update_paths, complete_paths):\n self.manager = manager\n self.log_key = log_key\n self.update_paths = update_paths\n self.complete_paths = complete_paths\n patterns = update_paths + complete_paths\n super(LocalComputeLogFilesystemEventHandler, self).__init__(patterns=patterns)\n\n def on_created(self, event):\n if event.src_path in self.complete_paths:\n self.manager.remove_all_subscriptions(self.log_key)\n self.manager.unwatch(self.log_key, self)\n\n def on_modified(self, event):\n if event.src_path in self.update_paths:\n self.manager.notify_subscriptions(self.log_key)\n
", "current_page_name": "_modules/dagster/_core/storage/local_compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.local_compute_log_manager"}, "mem_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.mem_io_manager

\nfrom typing import Dict, Tuple\n\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import IOManager, dagster_maintained_io_manager, io_manager\n\n\n
[docs]class InMemoryIOManager(IOManager):\n """I/O manager that stores and retrieves values in memory. After execution is complete, the values will\n be garbage-collected. Note that this means that each run will not have access to values from previous runs.\n """\n\n def __init__(self):\n self.values: Dict[Tuple[object, ...], object] = {}\n\n def handle_output(self, context: OutputContext, obj: object):\n keys = tuple(context.get_identifier())\n self.values[keys] = obj\n\n def load_input(self, context: InputContext) -> object:\n keys = tuple(context.get_identifier())\n return self.values[keys]
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(description="Built-in IO manager that stores and retrieves values in memory.")\ndef mem_io_manager(_) -> InMemoryIOManager:\n """Built-in IO manager that stores and retrieves values in memory."""\n return InMemoryIOManager()
\n
", "current_page_name": "_modules/dagster/_core/storage/mem_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.mem_io_manager"}, "memoizable_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.memoizable_io_manager

\nimport os\nimport pickle\nfrom abc import abstractmethod\nfrom typing import Union\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._config import Field, StringSource\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import IOManager, dagster_maintained_io_manager, io_manager\nfrom dagster._utils import PICKLE_PROTOCOL, mkdir_p\n\n\n
[docs]class MemoizableIOManager(IOManager):\n """Base class for IO manager enabled to work with memoized execution. Users should implement\n the ``load_input`` and ``handle_output`` methods described in the ``IOManager`` API, and the\n ``has_output`` method, which returns a boolean representing whether a data object can be found.\n """\n\n
[docs] @public\n @abstractmethod\n def has_output(self, context: OutputContext) -> bool:\n """The user-defined method that returns whether data exists given the metadata.\n\n Args:\n context (OutputContext): The context of the step performing this check.\n\n Returns:\n bool: True if there is data present that matches the provided context. False otherwise.\n """
\n\n\nclass VersionedPickledObjectFilesystemIOManager(MemoizableIOManager):\n def __init__(self, base_dir=None):\n self.base_dir = check.opt_str_param(base_dir, "base_dir")\n self.write_mode = "wb"\n self.read_mode = "rb"\n\n def _get_path(self, context: Union[InputContext, OutputContext]) -> str:\n output_context: OutputContext\n\n if isinstance(context, OutputContext):\n output_context = context\n else:\n if context.upstream_output is None:\n raise DagsterInvariantViolationError(\n "Missing value of InputContext.upstream_output. Cannot compute the input path."\n )\n\n output_context = context.upstream_output\n\n # automatically construct filepath\n step_key = check.str_param(output_context.step_key, "context.step_key")\n output_name = check.str_param(output_context.name, "context.name")\n version = check.str_param(output_context.version, "context.version")\n\n return os.path.join(self.base_dir, step_key, output_name, version)\n\n def handle_output(self, context, obj):\n """Pickle the data with the associated version, and store the object to a file.\n\n This method omits the AssetMaterialization event so assets generated by it won't be tracked\n by the Asset Catalog.\n """\n filepath = self._get_path(context)\n\n context.log.debug(f"Writing file at: {filepath}")\n\n # Ensure path exists\n mkdir_p(os.path.dirname(filepath))\n\n with open(filepath, self.write_mode) as write_obj:\n pickle.dump(obj, write_obj, PICKLE_PROTOCOL)\n\n def load_input(self, context):\n """Unpickle the file and Load it to a data object."""\n filepath = self._get_path(context)\n\n context.log.debug(f"Loading file from: {filepath}")\n\n with open(filepath, self.read_mode) as read_obj:\n return pickle.load(read_obj)\n\n def has_output(self, context):\n """Returns true if data object exists with the associated version, False otherwise."""\n filepath = self._get_path(context)\n\n context.log.debug(f"Checking for file at: {filepath}")\n\n return os.path.exists(filepath) and not os.path.isdir(filepath)\n\n\n@dagster_maintained_io_manager\n@io_manager(config_schema={"base_dir": Field(StringSource, is_required=False)})\n@experimental\ndef versioned_filesystem_io_manager(init_context):\n """Filesystem IO manager that utilizes versioning of stored objects.\n\n It requires users to specify a base directory where all the step outputs will be stored in. It\n serializes and deserializes output values (assets) using pickling and automatically constructs\n the filepaths for the assets using the provided directory, and the version for a provided step\n output.\n """\n return VersionedPickledObjectFilesystemIOManager(\n base_dir=init_context.resource_config.get(\n "base_dir", os.path.join(init_context.instance.storage_directory(), "versioned_outputs")\n )\n )\n
", "current_page_name": "_modules/dagster/_core/storage/memoizable_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.memoizable_io_manager"}, "noop_compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.noop_compute_log_manager

\nfrom contextlib import contextmanager\nfrom typing import IO, Any, Generator, Mapping, Optional, Sequence\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._core.storage.captured_log_manager import (\n    CapturedLogContext,\n    CapturedLogData,\n    CapturedLogManager,\n    CapturedLogMetadata,\n    CapturedLogSubscription,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\nfrom .compute_log_manager import (\n    MAX_BYTES_FILE_READ,\n    ComputeIOType,\n    ComputeLogFileData,\n    ComputeLogManager,\n)\n\n\n
[docs]class NoOpComputeLogManager(CapturedLogManager, ComputeLogManager, ConfigurableClass):\n """When enabled for a Dagster instance, stdout and stderr will not be available for any step."""\n\n def __init__(self, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {}\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return NoOpComputeLogManager(inst_data=inst_data, **config_value)\n\n def enabled(self, _dagster_run, _step_key):\n return False\n\n def _watch_logs(self, dagster_run, step_key=None):\n pass\n\n def get_local_path(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n raise NotImplementedError()\n\n def is_watch_completed(self, run_id, key):\n return True\n\n def on_watch_start(self, dagster_run, step_key):\n pass\n\n def on_watch_finish(self, dagster_run, step_key):\n pass\n\n def download_url(self, run_id, key, io_type):\n return None\n\n def read_logs_file(self, run_id, key, io_type, cursor=0, max_bytes=MAX_BYTES_FILE_READ):\n return ComputeLogFileData(\n path=f"{key}.{io_type}", data=None, cursor=0, size=0, download_url=None\n )\n\n def on_subscribe(self, subscription):\n pass\n\n def on_unsubscribe(self, subscription):\n pass\n\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:\n yield CapturedLogContext(log_key=log_key)\n\n def is_capture_complete(self, log_key: Sequence[str]):\n return True\n\n @contextmanager\n def open_log_stream(\n self, log_key: Sequence[str], io_type: ComputeIOType\n ) -> Generator[Optional[IO], None, None]:\n yield None\n\n def get_log_data(\n self,\n log_key: Sequence[str],\n cursor: Optional[str] = None,\n max_bytes: Optional[int] = None,\n ) -> CapturedLogData:\n return CapturedLogData(log_key=log_key)\n\n def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:\n return CapturedLogMetadata()\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n pass\n\n def subscribe(\n self, log_key: Sequence[str], cursor: Optional[str] = None\n ) -> CapturedLogSubscription:\n return CapturedLogSubscription(self, log_key, cursor)\n\n def unsubscribe(self, subscription: CapturedLogSubscription):\n pass
\n
", "current_page_name": "_modules/dagster/_core/storage/noop_compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.noop_compute_log_manager"}, "root": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.root

\nimport os\nfrom tempfile import TemporaryDirectory\nfrom typing import Optional\n\nfrom typing_extensions import TypedDict\n\nfrom dagster import (\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\n\nclass LocalArtifactStorageConfig(TypedDict):\n    base_dir: str\n\n\n
[docs]class LocalArtifactStorage(ConfigurableClass):\n def __init__(self, base_dir: str, inst_data: Optional[ConfigurableClassData] = None):\n self._base_dir = base_dir\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @property\n def base_dir(self) -> str:\n return self._base_dir\n\n def file_manager_dir(self, run_id: str) -> str:\n check.str_param(run_id, "run_id")\n return os.path.join(self.base_dir, "storage", run_id, "files")\n\n @property\n def storage_dir(self) -> str:\n return os.path.join(self.base_dir, "storage")\n\n @property\n def schedules_dir(self) -> str:\n return os.path.join(self.base_dir, "schedules")\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: LocalArtifactStorageConfig\n ) -> "LocalArtifactStorage":\n return LocalArtifactStorage(inst_data=inst_data, **config_value)\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n def dispose(self):\n pass
\n\n\nclass TemporaryLocalArtifactStorage(LocalArtifactStorage):\n """Used by ephemeral DagsterInstances, defers directory creation til\n access since many uses of ephemeral instance do not require artifact directory.\n """\n\n def __init__(self):\n self._temp_dir = None\n\n @property\n def base_dir(self):\n if self._temp_dir is None:\n self._temp_dir = TemporaryDirectory()\n return self._temp_dir.name\n\n def dispose(self):\n if self._temp_dir:\n self._temp_dir.cleanup()\n
", "current_page_name": "_modules/dagster/_core/storage/root", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.root"}, "runs": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.runs.base

\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Mapping, Optional, Sequence, Set, Tuple, Union\n\nfrom typing_extensions import TypedDict\n\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.snap import ExecutionPlanSnapshot, JobSnapshot\nfrom dagster._core.storage.dagster_run import (\n    DagsterRun,\n    JobBucket,\n    RunPartitionData,\n    RunRecord,\n    RunsFilter,\n    TagBucket,\n)\nfrom dagster._core.storage.sql import AlembicVersion\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._utils import PrintFn\n\nfrom ..daemon_cursor import DaemonCursorStorage\n\nif TYPE_CHECKING:\n    from dagster._core.host_representation.origin import ExternalJobOrigin\n\n\nclass RunGroupInfo(TypedDict):\n    count: int\n    runs: Sequence[DagsterRun]\n\n\n
[docs]class RunStorage(ABC, MayHaveInstanceWeakref[T_DagsterInstance], DaemonCursorStorage):\n """Abstract base class for storing pipeline run history.\n\n Note that run storages using SQL databases as backing stores should implement\n :py:class:`~dagster._core.storage.runs.SqlRunStorage`.\n\n Users should not directly instantiate concrete subclasses of this class; they are instantiated\n by internal machinery when ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the\n ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of concrete subclasses of this class\n should be done by setting values in that file.\n """\n\n @abstractmethod\n def add_run(self, dagster_run: DagsterRun) -> DagsterRun:\n """Add a run to storage.\n\n If a run already exists with the same ID, raise DagsterRunAlreadyExists\n If the run's snapshot ID does not exist raise DagsterSnapshotDoesNotExist\n\n Args:\n dagster_run (DagsterRun): The run to add.\n """\n\n @abstractmethod\n def handle_run_event(self, run_id: str, event: DagsterEvent) -> None:\n """Update run storage in accordance to a pipeline run related DagsterEvent.\n\n Args:\n run_id (str)\n event (DagsterEvent)\n """\n\n @abstractmethod\n def get_runs(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[DagsterRun]:\n """Return all the runs present in the storage that match the given filters.\n\n Args:\n filters (Optional[RunsFilter]) -- The\n :py:class:`~dagster._core.storage.pipeline_run.RunsFilter` by which to filter\n runs\n cursor (Optional[str]): Starting cursor (run_id) of range of runs\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n\n Returns:\n List[PipelineRun]\n """\n\n @abstractmethod\n def get_run_ids(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[str]:\n """Return all the run IDs for runs present in the storage that match the given filters.\n\n Args:\n filters (Optional[RunsFilter]) -- The\n :py:class:`~dagster._core.storage.pipeline_run.RunsFilter` by which to filter\n runs\n cursor (Optional[str]): Starting cursor (run_id) of range of runs\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n\n Returns:\n Sequence[str]\n """\n\n @abstractmethod\n def get_runs_count(self, filters: Optional[RunsFilter] = None) -> int:\n """Return the number of runs present in the storage that match the given filters.\n\n Args:\n filters (Optional[RunsFilter]) -- The\n :py:class:`~dagster._core.storage.pipeline_run.PipelineRunFilter` by which to filter\n runs\n\n Returns:\n int: The number of runs that match the given filters.\n """\n\n @abstractmethod\n def get_run_group(self, run_id: str) -> Optional[Tuple[str, Sequence[DagsterRun]]]:\n """Get the run group to which a given run belongs.\n\n Args:\n run_id (str): If the corresponding run is the descendant of some root run (i.e., there\n is a root_run_id on the :py:class:`PipelineRun`), that root run and all of its\n descendants are returned; otherwise, the group will consist only of the given run\n (a run that does not descend from any root is its own root).\n\n Returns:\n Optional[Tuple[string, List[PipelineRun]]]: If there is a corresponding run group, tuple\n whose first element is the root_run_id and whose second element is a list of all the\n descendent runs. Otherwise `None`.\n """\n\n @abstractmethod\n def get_run_records(\n self,\n filters: Optional[RunsFilter] = None,\n limit: Optional[int] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n cursor: Optional[str] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[RunRecord]:\n """Return a list of run records stored in the run storage, sorted by the given column in given order.\n\n Args:\n filters (Optional[RunsFilter]): the filter by which to filter runs.\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n order_by (Optional[str]): Name of the column to sort by. Defaults to id.\n ascending (Optional[bool]): Sort the result in ascending order if True, descending\n otherwise. Defaults to descending.\n\n Returns:\n List[RunRecord]: List of run records stored in the run storage.\n """\n\n @abstractmethod\n def get_run_tags(\n self,\n tag_keys: Optional[Sequence[str]] = None,\n value_prefix: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[Tuple[str, Set[str]]]:\n """Get a list of tag keys and the values that have been associated with them.\n\n Args:\n tag_keys (Optional[Sequence[str]]): tag keys to filter by.\n\n Returns:\n List[Tuple[str, Set[str]]]\n """\n\n @abstractmethod\n def get_run_tag_keys(self) -> Sequence[str]:\n """Get a list of tag keys.\n\n Returns:\n List[str]\n """\n\n @abstractmethod\n def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]) -> None:\n """Add additional tags for a pipeline run.\n\n Args:\n run_id (str)\n new_tags (Dict[string, string])\n """\n\n @abstractmethod\n def has_run(self, run_id: str) -> bool:\n """Check if the storage contains a run.\n\n Args:\n run_id (str): The id of the run\n\n Returns:\n bool\n """\n\n def add_snapshot(\n self,\n snapshot: Union[JobSnapshot, ExecutionPlanSnapshot],\n snapshot_id: Optional[str] = None,\n ) -> None:\n """Add a snapshot to the storage.\n\n Args:\n snapshot (Union[PipelineSnapshot, ExecutionPlanSnapshot])\n snapshot_id (Optional[str]): [Internal] The id of the snapshot. If not provided, the\n snapshot id will be generated from a hash of the snapshot. This should only be used\n in debugging, where we might want to import a historical run whose snapshots were\n calculated using a different hash function than the current code.\n """\n if isinstance(snapshot, JobSnapshot):\n self.add_job_snapshot(snapshot, snapshot_id)\n else:\n self.add_execution_plan_snapshot(snapshot, snapshot_id)\n\n def has_snapshot(self, snapshot_id: str):\n return self.has_job_snapshot(snapshot_id) or self.has_execution_plan_snapshot(snapshot_id)\n\n @abstractmethod\n def has_job_snapshot(self, job_snapshot_id: str) -> bool:\n """Check to see if storage contains a pipeline snapshot.\n\n Args:\n pipeline_snapshot_id (str): The id of the run.\n\n Returns:\n bool\n """\n\n @abstractmethod\n def add_job_snapshot(self, job_snapshot: JobSnapshot, snapshot_id: Optional[str] = None) -> str:\n """Add a pipeline snapshot to the run store.\n\n Pipeline snapshots are content-addressable, meaning\n that the ID for a snapshot is a hash based on the\n body of the snapshot. This function returns\n that snapshot ID.\n\n Args:\n job_snapshot (PipelineSnapshot)\n snapshot_id (Optional[str]): [Internal] The id of the snapshot. If not provided, the\n snapshot id will be generated from a hash of the snapshot. This should only be used\n in debugging, where we might want to import a historical run whose snapshots were\n calculated using a different hash function than the current code.\n\n Return:\n str: The job_snapshot_id\n """\n\n @abstractmethod\n def get_job_snapshot(self, job_snapshot_id: str) -> JobSnapshot:\n """Fetch a snapshot by ID.\n\n Args:\n job_snapshot_id (str)\n\n Returns:\n PipelineSnapshot\n """\n\n @abstractmethod\n def has_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> bool:\n """Check to see if storage contains an execution plan snapshot.\n\n Args:\n execution_plan_snapshot_id (str): The id of the execution plan.\n\n Returns:\n bool\n """\n\n @abstractmethod\n def add_execution_plan_snapshot(\n self, execution_plan_snapshot: ExecutionPlanSnapshot, snapshot_id: Optional[str] = None\n ) -> str:\n """Add an execution plan snapshot to the run store.\n\n Execution plan snapshots are content-addressable, meaning\n that the ID for a snapshot is a hash based on the\n body of the snapshot. This function returns\n that snapshot ID.\n\n Args:\n execution_plan_snapshot (ExecutionPlanSnapshot)\n snapshot_id (Optional[str]): [Internal] The id of the snapshot. If not provided, the\n snapshot id will be generated from a hash of the snapshot. This should only be used\n in debugging, where we might want to import a historical run whose snapshots were\n calculated using a different hash function than the current code.\n\n Return:\n str: The execution_plan_snapshot_id\n """\n\n @abstractmethod\n def get_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> ExecutionPlanSnapshot:\n """Fetch a snapshot by ID.\n\n Args:\n execution_plan_snapshot_id (str)\n\n Returns:\n ExecutionPlanSnapshot\n """\n\n @abstractmethod\n def wipe(self) -> None:\n """Clears the run storage."""\n\n @abstractmethod\n def delete_run(self, run_id: str) -> None:\n """Remove a run from storage."""\n\n @property\n def supports_bucket_queries(self) -> bool:\n return False\n\n @abstractmethod\n def get_run_partition_data(self, runs_filter: RunsFilter) -> Sequence[RunPartitionData]:\n """Get run partition data for a given partitioned job."""\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any required data migrations."""\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any optional data migrations for optimized reads."""\n\n def dispose(self) -> None:\n """Explicit lifecycle management."""\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n """Allows for optimizing database connection / use in the context of a long lived webserver process."""\n\n # Daemon Heartbeat Storage\n #\n # Holds heartbeats from the Dagster Daemon so that other system components can alert when it's not\n # alive.\n # This is temporarily placed along with run storage to avoid adding a new instance concept. It\n # should be split out once all metadata storages are configured together.\n\n @abstractmethod\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n """Called on a regular interval by the daemon."""\n\n @abstractmethod\n def get_daemon_heartbeats(self) -> Mapping[str, DaemonHeartbeat]:\n """Latest heartbeats of all daemon types."""\n\n @abstractmethod\n def wipe_daemon_heartbeats(self) -> None:\n """Wipe all daemon heartbeats."""\n\n # Backfill storage\n @abstractmethod\n def get_backfills(\n self,\n status: Optional[BulkActionStatus] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[PartitionBackfill]:\n """Get a list of partition backfills."""\n\n @abstractmethod\n def get_backfill(self, backfill_id: str) -> Optional[PartitionBackfill]:\n """Get the partition backfill of the given backfill id."""\n\n @abstractmethod\n def add_backfill(self, partition_backfill: PartitionBackfill):\n """Add partition backfill to run storage."""\n\n @abstractmethod\n def update_backfill(self, partition_backfill: PartitionBackfill):\n """Update a partition backfill in run storage."""\n\n def alembic_version(self) -> Optional[AlembicVersion]:\n return None\n\n @abstractmethod\n def replace_job_origin(self, run: "DagsterRun", job_origin: "ExternalJobOrigin") -> None: ...
\n
", "current_page_name": "_modules/dagster/_core/storage/runs/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.runs.base"}, "sql_run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.runs.sql_run_storage

\nimport logging\nimport uuid\nimport zlib\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    Any,\n    Callable,\n    ContextManager,\n    Dict,\n    Iterable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection\n\nimport dagster._check as check\nfrom dagster._core.errors import (\n    DagsterInvariantViolationError,\n    DagsterRunAlreadyExists,\n    DagsterRunNotFoundError,\n    DagsterSnapshotDoesNotExist,\n)\nfrom dagster._core.events import EVENT_TYPE_TO_PIPELINE_RUN_STATUS, DagsterEvent, DagsterEventType\nfrom dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill\nfrom dagster._core.host_representation.origin import ExternalJobOrigin\nfrom dagster._core.snap import (\n    ExecutionPlanSnapshot,\n    JobSnapshot,\n    create_execution_plan_snapshot_id,\n    create_job_snapshot_id,\n)\nfrom dagster._core.storage.sql import SqlAlchemyQuery\nfrom dagster._core.storage.sqlalchemy_compat import (\n    db_fetch_mappings,\n    db_scalar_subquery,\n    db_select,\n    db_subquery,\n)\nfrom dagster._core.storage.tags import (\n    PARTITION_NAME_TAG,\n    PARTITION_SET_TAG,\n    REPOSITORY_LABEL_TAG,\n    ROOT_RUN_ID_TAG,\n)\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._serdes import (\n    deserialize_value,\n    serialize_value,\n)\nfrom dagster._seven import JSONDecodeError\nfrom dagster._utils import PrintFn, utc_datetime_from_timestamp\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..dagster_run import (\n    DagsterRun,\n    DagsterRunStatus,\n    JobBucket,\n    RunPartitionData,\n    RunRecord,\n    RunsFilter,\n    TagBucket,\n)\nfrom .base import RunStorage\nfrom .migration import (\n    OPTIONAL_DATA_MIGRATIONS,\n    REQUIRED_DATA_MIGRATIONS,\n    RUN_PARTITIONS,\n    MigrationFn,\n)\nfrom .schema import (\n    BulkActionsTable,\n    DaemonHeartbeatsTable,\n    InstanceInfo,\n    KeyValueStoreTable,\n    RunsTable,\n    RunTagsTable,\n    SecondaryIndexMigrationTable,\n    SnapshotsTable,\n)\n\n\nclass SnapshotType(Enum):\n    PIPELINE = "PIPELINE"\n    EXECUTION_PLAN = "EXECUTION_PLAN"\n\n\n
[docs]class SqlRunStorage(RunStorage):\n """Base class for SQL based run storages."""\n\n @abstractmethod\n def connect(self) -> ContextManager[Connection]:\n """Context manager yielding a sqlalchemy.engine.Connection."""\n\n @abstractmethod\n def upgrade(self) -> None:\n """This method should perform any schema or data migrations necessary to bring an\n out-of-date instance of the storage up to date.\n """\n\n def fetchall(self, query: SqlAlchemyQuery) -> Sequence[Any]:\n with self.connect() as conn:\n return db_fetch_mappings(conn, query)\n\n def fetchone(self, query: SqlAlchemyQuery) -> Optional[Any]:\n with self.connect() as conn:\n if db.__version__.startswith("2."):\n return conn.execute(query).mappings().first()\n else:\n return conn.execute(query).fetchone()\n\n def add_run(self, dagster_run: DagsterRun) -> DagsterRun:\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n\n if dagster_run.job_snapshot_id and not self.has_job_snapshot(dagster_run.job_snapshot_id):\n raise DagsterSnapshotDoesNotExist(\n f"Snapshot {dagster_run.job_snapshot_id} does not exist in run storage"\n )\n\n has_tags = dagster_run.tags and len(dagster_run.tags) > 0\n partition = dagster_run.tags.get(PARTITION_NAME_TAG) if has_tags else None\n partition_set = dagster_run.tags.get(PARTITION_SET_TAG) if has_tags else None\n\n runs_insert = RunsTable.insert().values(\n run_id=dagster_run.run_id,\n pipeline_name=dagster_run.job_name,\n status=dagster_run.status.value,\n run_body=serialize_value(dagster_run),\n snapshot_id=dagster_run.job_snapshot_id,\n partition=partition,\n partition_set=partition_set,\n )\n with self.connect() as conn:\n try:\n conn.execute(runs_insert)\n except db_exc.IntegrityError as exc:\n raise DagsterRunAlreadyExists from exc\n\n tags_to_insert = dagster_run.tags_for_storage()\n if tags_to_insert:\n conn.execute(\n RunTagsTable.insert(),\n [\n dict(run_id=dagster_run.run_id, key=k, value=v)\n for k, v in tags_to_insert.items()\n ],\n )\n\n return dagster_run\n\n def handle_run_event(self, run_id: str, event: DagsterEvent) -> None:\n check.str_param(run_id, "run_id")\n check.inst_param(event, "event", DagsterEvent)\n\n if event.event_type not in EVENT_TYPE_TO_PIPELINE_RUN_STATUS:\n return\n\n run = self._get_run_by_id(run_id)\n if not run:\n # TODO log?\n return\n\n new_job_status = EVENT_TYPE_TO_PIPELINE_RUN_STATUS[event.event_type]\n\n run_stats_cols_in_index = self.has_run_stats_index_cols()\n\n kwargs = {}\n\n # consider changing the `handle_run_event` signature to get timestamp off of the\n # EventLogEntry instead of the DagsterEvent, for consistency\n now = pendulum.now("UTC")\n\n if run_stats_cols_in_index and event.event_type == DagsterEventType.PIPELINE_START:\n kwargs["start_time"] = now.timestamp()\n\n if run_stats_cols_in_index and event.event_type in {\n DagsterEventType.PIPELINE_CANCELED,\n DagsterEventType.PIPELINE_FAILURE,\n DagsterEventType.PIPELINE_SUCCESS,\n }:\n kwargs["end_time"] = now.timestamp()\n\n with self.connect() as conn:\n conn.execute(\n RunsTable.update()\n .where(RunsTable.c.run_id == run_id)\n .values(\n run_body=serialize_value(run.with_status(new_job_status)),\n status=new_job_status.value,\n update_timestamp=now,\n **kwargs,\n )\n )\n\n def _row_to_run(self, row: Dict) -> DagsterRun:\n run = deserialize_value(row["run_body"], DagsterRun)\n status = DagsterRunStatus(row["status"])\n # NOTE: the status column is more trustworthy than the status in the run body, since concurrent\n # writes (e.g. handle_run_event and add_tags) can cause the status in the body to be out of\n # overriden with an old value.\n return run.with_status(status)\n\n def _rows_to_runs(self, rows: Iterable[Dict]) -> Sequence[DagsterRun]:\n return list(map(self._row_to_run, rows))\n\n def _add_cursor_limit_to_query(\n self,\n query: SqlAlchemyQuery,\n cursor: Optional[str],\n limit: Optional[int],\n order_by: Optional[str],\n ascending: Optional[bool],\n ) -> SqlAlchemyQuery:\n """Helper function to deal with cursor/limit pagination args."""\n if cursor:\n cursor_query = db_select([RunsTable.c.id]).where(RunsTable.c.run_id == cursor)\n query = query.where(RunsTable.c.id < db_scalar_subquery(cursor_query))\n\n if limit:\n query = query.limit(limit)\n\n sorting_column = getattr(RunsTable.c, order_by) if order_by else RunsTable.c.id\n direction = db.asc if ascending else db.desc\n query = query.order_by(direction(sorting_column))\n\n return query\n\n @property\n def supports_intersect(self) -> bool:\n return True\n\n def _add_filters_to_query(self, query: SqlAlchemyQuery, filters: RunsFilter) -> SqlAlchemyQuery:\n check.inst_param(filters, "filters", RunsFilter)\n\n if filters.run_ids:\n query = query.where(RunsTable.c.run_id.in_(filters.run_ids))\n\n if filters.job_name:\n query = query.where(RunsTable.c.pipeline_name == filters.job_name)\n\n if filters.statuses:\n query = query.where(\n RunsTable.c.status.in_([status.value for status in filters.statuses])\n )\n\n if filters.snapshot_id:\n query = query.where(RunsTable.c.snapshot_id == filters.snapshot_id)\n\n if filters.updated_after:\n query = query.where(RunsTable.c.update_timestamp > filters.updated_after)\n\n if filters.updated_before:\n query = query.where(RunsTable.c.update_timestamp < filters.updated_before)\n\n if filters.created_after:\n query = query.where(RunsTable.c.create_timestamp > filters.created_after)\n\n if filters.created_before:\n query = query.where(RunsTable.c.create_timestamp < filters.created_before)\n\n return query\n\n def _runs_query(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n columns: Optional[Sequence[str]] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> SqlAlchemyQuery:\n filters = check.opt_inst_param(filters, "filters", RunsFilter, default=RunsFilter())\n check.opt_str_param(cursor, "cursor")\n check.opt_int_param(limit, "limit")\n check.opt_sequence_param(columns, "columns")\n check.opt_str_param(order_by, "order_by")\n check.opt_bool_param(ascending, "ascending")\n\n if columns is None:\n columns = ["run_body", "status"]\n\n if filters.tags:\n table = self._apply_tags_table_joins(RunsTable, filters.tags)\n else:\n table = RunsTable\n\n base_query = db_select([getattr(RunsTable.c, column) for column in columns]).select_from(\n table\n )\n base_query = self._add_filters_to_query(base_query, filters)\n return self._add_cursor_limit_to_query(base_query, cursor, limit, order_by, ascending)\n\n def _apply_tags_table_joins(\n self,\n table: db.Table,\n tags: Mapping[str, Union[str, Sequence[str]]],\n ) -> db.Table:\n multi_join = len(tags) > 1\n i = 0\n for key, value in tags.items():\n i += 1\n tags_table = (\n db_subquery(db_select([RunTagsTable]), f"run_tags_subquery_{i}")\n if multi_join\n else RunTagsTable\n )\n table = table.join(\n tags_table,\n db.and_(\n RunsTable.c.run_id == tags_table.c.run_id,\n tags_table.c.key == key,\n (\n tags_table.c.value == value\n if isinstance(value, str)\n else tags_table.c.value.in_(value)\n ),\n ),\n )\n return table\n\n def get_runs(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[DagsterRun]:\n query = self._runs_query(filters, cursor, limit, bucket_by=bucket_by)\n rows = self.fetchall(query)\n return self._rows_to_runs(rows)\n\n def get_run_ids(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[str]:\n query = self._runs_query(filters=filters, cursor=cursor, limit=limit, columns=["run_id"])\n rows = self.fetchall(query)\n return [row["run_id"] for row in rows]\n\n def get_runs_count(self, filters: Optional[RunsFilter] = None) -> int:\n subquery = db_subquery(self._runs_query(filters=filters))\n query = db_select([db.func.count().label("count")]).select_from(subquery)\n row = self.fetchone(query)\n count = row["count"] if row else 0\n return count\n\n def _get_run_by_id(self, run_id: str) -> Optional[DagsterRun]:\n check.str_param(run_id, "run_id")\n\n query = db_select([RunsTable.c.run_body, RunsTable.c.status]).where(\n RunsTable.c.run_id == run_id\n )\n rows = self.fetchall(query)\n return self._row_to_run(rows[0]) if rows else None\n\n def get_run_records(\n self,\n filters: Optional[RunsFilter] = None,\n limit: Optional[int] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n cursor: Optional[str] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[RunRecord]:\n filters = check.opt_inst_param(filters, "filters", RunsFilter, default=RunsFilter())\n check.opt_int_param(limit, "limit")\n\n columns = ["id", "run_body", "status", "create_timestamp", "update_timestamp"]\n\n if self.has_run_stats_index_cols():\n columns += ["start_time", "end_time"]\n # only fetch columns we use to build RunRecord\n query = self._runs_query(\n filters=filters,\n limit=limit,\n columns=columns,\n order_by=order_by,\n ascending=ascending,\n cursor=cursor,\n bucket_by=bucket_by,\n )\n\n rows = self.fetchall(query)\n return [\n RunRecord(\n storage_id=check.int_param(row["id"], "id"),\n dagster_run=self._row_to_run(row),\n create_timestamp=check.inst(row["create_timestamp"], datetime),\n update_timestamp=check.inst(row["update_timestamp"], datetime),\n start_time=(\n check.opt_inst(row["start_time"], float) if "start_time" in row else None\n ),\n end_time=check.opt_inst(row["end_time"], float) if "end_time" in row else None,\n )\n for row in rows\n ]\n\n def get_run_tags(\n self,\n tag_keys: Optional[Sequence[str]] = None,\n value_prefix: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[Tuple[str, Set[str]]]:\n result = defaultdict(set)\n query = (\n db_select([RunTagsTable.c.key, RunTagsTable.c.value])\n .distinct()\n .order_by(RunTagsTable.c.key, RunTagsTable.c.value)\n )\n if tag_keys:\n query = query.where(RunTagsTable.c.key.in_(tag_keys))\n if value_prefix:\n query = query.where(RunTagsTable.c.value.startswith(value_prefix))\n if limit:\n query = query.limit(limit)\n rows = self.fetchall(query)\n for r in rows:\n result[r["key"]].add(r["value"])\n return sorted(list([(k, v) for k, v in result.items()]), key=lambda x: x[0])\n\n def get_run_tag_keys(self) -> Sequence[str]:\n query = db_select([RunTagsTable.c.key]).distinct().order_by(RunTagsTable.c.key)\n rows = self.fetchall(query)\n return sorted([r["key"] for r in rows])\n\n def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]) -> None:\n check.str_param(run_id, "run_id")\n check.mapping_param(new_tags, "new_tags", key_type=str, value_type=str)\n\n run = self._get_run_by_id(run_id)\n if not run:\n raise DagsterRunNotFoundError(\n f"Run {run_id} was not found in instance.", invalid_run_id=run_id\n )\n current_tags = run.tags if run.tags else {}\n\n all_tags = merge_dicts(current_tags, new_tags)\n partition = all_tags.get(PARTITION_NAME_TAG)\n partition_set = all_tags.get(PARTITION_SET_TAG)\n\n with self.connect() as conn:\n conn.execute(\n RunsTable.update()\n .where(RunsTable.c.run_id == run_id)\n .values(\n run_body=serialize_value(run.with_tags(merge_dicts(current_tags, new_tags))),\n partition=partition,\n partition_set=partition_set,\n update_timestamp=pendulum.now("UTC"),\n )\n )\n\n current_tags_set = set(current_tags.keys())\n new_tags_set = set(new_tags.keys())\n\n existing_tags = current_tags_set & new_tags_set\n added_tags = new_tags_set.difference(existing_tags)\n\n for tag in existing_tags:\n conn.execute(\n RunTagsTable.update()\n .where(db.and_(RunTagsTable.c.run_id == run_id, RunTagsTable.c.key == tag))\n .values(value=new_tags[tag])\n )\n\n if added_tags:\n conn.execute(\n RunTagsTable.insert(),\n [dict(run_id=run_id, key=tag, value=new_tags[tag]) for tag in added_tags],\n )\n\n def get_run_group(self, run_id: str) -> Tuple[str, Sequence[DagsterRun]]:\n check.str_param(run_id, "run_id")\n dagster_run = self._get_run_by_id(run_id)\n if not dagster_run:\n raise DagsterRunNotFoundError(\n f"Run {run_id} was not found in instance.", invalid_run_id=run_id\n )\n\n # find root_run\n root_run_id = dagster_run.root_run_id if dagster_run.root_run_id else dagster_run.run_id\n root_run = self._get_run_by_id(root_run_id)\n if not root_run:\n raise DagsterRunNotFoundError(\n f"Run id {root_run_id} set as root run id for run {run_id} was not found in"\n " instance.",\n invalid_run_id=root_run_id,\n )\n\n # root_run_id to run_id 1:1 mapping\n # https://github.com/dagster-io/dagster/issues/2495\n # Note: we currently use tags to persist the run group info\n root_to_run = db_subquery(\n db_select(\n [RunTagsTable.c.value.label("root_run_id"), RunTagsTable.c.run_id.label("run_id")]\n ).where(\n db.and_(RunTagsTable.c.key == ROOT_RUN_ID_TAG, RunTagsTable.c.value == root_run_id)\n ),\n "root_to_run",\n )\n # get run group\n run_group_query = db_select([RunsTable.c.run_body, RunsTable.c.status]).select_from(\n root_to_run.join(\n RunsTable,\n root_to_run.c.run_id == RunsTable.c.run_id,\n isouter=True,\n )\n )\n\n res = self.fetchall(run_group_query)\n run_group = self._rows_to_runs(res)\n\n return (root_run_id, [root_run, *run_group])\n\n def has_run(self, run_id: str) -> bool:\n check.str_param(run_id, "run_id")\n return bool(self._get_run_by_id(run_id))\n\n def delete_run(self, run_id: str) -> None:\n check.str_param(run_id, "run_id")\n query = db.delete(RunsTable).where(RunsTable.c.run_id == run_id)\n with self.connect() as conn:\n conn.execute(query)\n\n def has_job_snapshot(self, job_snapshot_id: str) -> bool:\n check.str_param(job_snapshot_id, "job_snapshot_id")\n return self._has_snapshot_id(job_snapshot_id)\n\n def add_job_snapshot(self, job_snapshot: JobSnapshot, snapshot_id: Optional[str] = None) -> str:\n check.inst_param(job_snapshot, "job_snapshot", JobSnapshot)\n check.opt_str_param(snapshot_id, "snapshot_id")\n\n if not snapshot_id:\n snapshot_id = create_job_snapshot_id(job_snapshot)\n\n return self._add_snapshot(\n snapshot_id=snapshot_id,\n snapshot_obj=job_snapshot,\n snapshot_type=SnapshotType.PIPELINE,\n )\n\n def get_job_snapshot(self, job_snapshot_id: str) -> JobSnapshot:\n check.str_param(job_snapshot_id, "job_snapshot_id")\n return self._get_snapshot(job_snapshot_id) # type: ignore # (allowed to return None?)\n\n def has_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> bool:\n check.str_param(execution_plan_snapshot_id, "execution_plan_snapshot_id")\n return bool(self.get_execution_plan_snapshot(execution_plan_snapshot_id))\n\n def add_execution_plan_snapshot(\n self, execution_plan_snapshot: ExecutionPlanSnapshot, snapshot_id: Optional[str] = None\n ) -> str:\n check.inst_param(execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot)\n check.opt_str_param(snapshot_id, "snapshot_id")\n\n if not snapshot_id:\n snapshot_id = create_execution_plan_snapshot_id(execution_plan_snapshot)\n\n return self._add_snapshot(\n snapshot_id=snapshot_id,\n snapshot_obj=execution_plan_snapshot,\n snapshot_type=SnapshotType.EXECUTION_PLAN,\n )\n\n def get_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> ExecutionPlanSnapshot:\n check.str_param(execution_plan_snapshot_id, "execution_plan_snapshot_id")\n return self._get_snapshot(execution_plan_snapshot_id) # type: ignore # (allowed to return None?)\n\n def _add_snapshot(self, snapshot_id: str, snapshot_obj, snapshot_type: SnapshotType) -> str:\n check.str_param(snapshot_id, "snapshot_id")\n check.not_none_param(snapshot_obj, "snapshot_obj")\n check.inst_param(snapshot_type, "snapshot_type", SnapshotType)\n\n with self.connect() as conn:\n snapshot_insert = SnapshotsTable.insert().values(\n snapshot_id=snapshot_id,\n snapshot_body=zlib.compress(serialize_value(snapshot_obj).encode("utf-8")),\n snapshot_type=snapshot_type.value,\n )\n try:\n conn.execute(snapshot_insert)\n except db_exc.IntegrityError:\n # on_conflict_do_nothing equivalent\n pass\n\n return snapshot_id\n\n def get_run_storage_id(self) -> str:\n query = db_select([InstanceInfo.c.run_storage_id])\n row = self.fetchone(query)\n if not row:\n run_storage_id = str(uuid.uuid4())\n with self.connect() as conn:\n conn.execute(InstanceInfo.insert().values(run_storage_id=run_storage_id))\n return run_storage_id\n else:\n return row["run_storage_id"]\n\n def _has_snapshot_id(self, snapshot_id: str) -> bool:\n query = db_select([SnapshotsTable.c.snapshot_id]).where(\n SnapshotsTable.c.snapshot_id == snapshot_id\n )\n\n row = self.fetchone(query)\n\n return bool(row)\n\n def _get_snapshot(self, snapshot_id: str) -> Optional[JobSnapshot]:\n query = db_select([SnapshotsTable.c.snapshot_body]).where(\n SnapshotsTable.c.snapshot_id == snapshot_id\n )\n\n row = self.fetchone(query)\n\n return defensively_unpack_execution_plan_snapshot_query(logging, [row["snapshot_body"]]) if row else None # type: ignore\n\n def get_run_partition_data(self, runs_filter: RunsFilter) -> Sequence[RunPartitionData]:\n if self.has_built_index(RUN_PARTITIONS) and self.has_run_stats_index_cols():\n query = self._runs_query(\n filters=runs_filter,\n columns=["run_id", "status", "start_time", "end_time", "partition"],\n )\n rows = self.fetchall(query)\n\n # dedup by partition\n _partition_data_by_partition = {}\n for row in rows:\n if not row["partition"] or row["partition"] in _partition_data_by_partition:\n continue\n\n _partition_data_by_partition[row["partition"]] = RunPartitionData(\n run_id=row["run_id"],\n partition=row["partition"],\n status=DagsterRunStatus[row["status"]],\n start_time=row["start_time"],\n end_time=row["end_time"],\n )\n\n return list(_partition_data_by_partition.values())\n else:\n query = self._runs_query(filters=runs_filter)\n rows = self.fetchall(query)\n _partition_data_by_partition = {}\n for row in rows:\n run = self._row_to_run(row)\n partition = run.tags.get(PARTITION_NAME_TAG)\n if not partition or partition in _partition_data_by_partition:\n continue\n\n _partition_data_by_partition[partition] = RunPartitionData(\n run_id=run.run_id,\n partition=partition,\n status=run.status,\n start_time=None,\n end_time=None,\n )\n\n return list(_partition_data_by_partition.values())\n\n def _get_partition_runs(\n self, partition_set_name: str, partition_name: str\n ) -> Sequence[DagsterRun]:\n # utility method to help test reads off of the partition column\n if not self.has_built_index(RUN_PARTITIONS):\n # query by tags\n return self.get_runs(\n filters=RunsFilter(\n tags={\n PARTITION_SET_TAG: partition_set_name,\n PARTITION_NAME_TAG: partition_name,\n }\n )\n )\n else:\n query = (\n self._runs_query()\n .where(RunsTable.c.partition == partition_name)\n .where(RunsTable.c.partition_set == partition_set_name)\n )\n rows = self.fetchall(query)\n return self._rows_to_runs(rows)\n\n # Tracking data migrations over secondary indexes\n\n def _execute_data_migrations(\n self,\n migrations: Mapping[str, Callable[[], MigrationFn]],\n print_fn: Optional[PrintFn] = None,\n force_rebuild_all: bool = False,\n ) -> None:\n for migration_name, migration_fn in migrations.items():\n if self.has_built_index(migration_name):\n if not force_rebuild_all:\n if print_fn:\n print_fn(f"Skipping already applied data migration: {migration_name}")\n continue\n if print_fn:\n print_fn(f"Starting data migration: {migration_name}")\n migration_fn()(self, print_fn)\n self.mark_index_built(migration_name)\n if print_fn:\n print_fn(f"Finished data migration: {migration_name}")\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(REQUIRED_DATA_MIGRATIONS, print_fn, force_rebuild_all)\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(OPTIONAL_DATA_MIGRATIONS, print_fn, force_rebuild_all)\n\n def has_built_index(self, migration_name: str) -> bool:\n query = (\n db_select([1])\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711\n .limit(1)\n )\n results = self.fetchall(query)\n\n return len(results) > 0\n\n def mark_index_built(self, migration_name: str) -> None:\n query = SecondaryIndexMigrationTable.insert().values(\n name=migration_name,\n migration_completed=datetime.now(),\n )\n with self.connect() as conn:\n try:\n conn.execute(query)\n except db_exc.IntegrityError:\n conn.execute(\n SecondaryIndexMigrationTable.update()\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .values(migration_completed=datetime.now())\n )\n\n # Checking for migrations\n\n def has_run_stats_index_cols(self) -> bool:\n with self.connect() as conn:\n column_names = [x.get("name") for x in db.inspect(conn).get_columns(RunsTable.name)]\n return "start_time" in column_names and "end_time" in column_names\n\n def has_bulk_actions_selector_cols(self) -> bool:\n with self.connect() as conn:\n column_names = [\n x.get("name") for x in db.inspect(conn).get_columns(BulkActionsTable.name)\n ]\n return "selector_id" in column_names\n\n # Daemon heartbeats\n\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n with self.connect() as conn:\n # insert, or update if already present\n try:\n conn.execute(\n DaemonHeartbeatsTable.insert().values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_type=daemon_heartbeat.daemon_type,\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n )\n except db_exc.IntegrityError:\n conn.execute(\n DaemonHeartbeatsTable.update()\n .where(DaemonHeartbeatsTable.c.daemon_type == daemon_heartbeat.daemon_type)\n .values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n )\n\n def get_daemon_heartbeats(self) -> Mapping[str, DaemonHeartbeat]:\n rows = self.fetchall(db_select([DaemonHeartbeatsTable.c.body]))\n heartbeats = []\n for row in rows:\n heartbeats.append(deserialize_value(row["body"], DaemonHeartbeat))\n return {heartbeat.daemon_type: heartbeat for heartbeat in heartbeats}\n\n def wipe(self) -> None:\n """Clears the run storage."""\n with self.connect() as conn:\n # https://stackoverflow.com/a/54386260/324449\n conn.execute(RunsTable.delete())\n conn.execute(RunTagsTable.delete())\n conn.execute(SnapshotsTable.delete())\n conn.execute(DaemonHeartbeatsTable.delete())\n conn.execute(BulkActionsTable.delete())\n\n def wipe_daemon_heartbeats(self) -> None:\n with self.connect() as conn:\n # https://stackoverflow.com/a/54386260/324449\n conn.execute(DaemonHeartbeatsTable.delete())\n\n def get_backfills(\n self,\n status: Optional[BulkActionStatus] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[PartitionBackfill]:\n check.opt_inst_param(status, "status", BulkActionStatus)\n query = db_select([BulkActionsTable.c.body])\n if status:\n query = query.where(BulkActionsTable.c.status == status.value)\n if cursor:\n cursor_query = db_select([BulkActionsTable.c.id]).where(\n BulkActionsTable.c.key == cursor\n )\n query = query.where(BulkActionsTable.c.id < cursor_query)\n if limit:\n query = query.limit(limit)\n query = query.order_by(BulkActionsTable.c.id.desc())\n rows = self.fetchall(query)\n return [deserialize_value(row["body"], PartitionBackfill) for row in rows]\n\n def get_backfill(self, backfill_id: str) -> Optional[PartitionBackfill]:\n check.str_param(backfill_id, "backfill_id")\n query = db_select([BulkActionsTable.c.body]).where(BulkActionsTable.c.key == backfill_id)\n row = self.fetchone(query)\n return deserialize_value(row["body"], PartitionBackfill) if row else None\n\n def add_backfill(self, partition_backfill: PartitionBackfill) -> None:\n check.inst_param(partition_backfill, "partition_backfill", PartitionBackfill)\n values: Dict[str, Any] = dict(\n key=partition_backfill.backfill_id,\n status=partition_backfill.status.value,\n timestamp=utc_datetime_from_timestamp(partition_backfill.backfill_timestamp),\n body=serialize_value(cast(NamedTuple, partition_backfill)),\n )\n\n if self.has_bulk_actions_selector_cols():\n values["selector_id"] = partition_backfill.selector_id\n values["action_type"] = partition_backfill.bulk_action_type.value\n\n with self.connect() as conn:\n conn.execute(BulkActionsTable.insert().values(**values))\n\n def update_backfill(self, partition_backfill: PartitionBackfill) -> None:\n check.inst_param(partition_backfill, "partition_backfill", PartitionBackfill)\n backfill_id = partition_backfill.backfill_id\n if not self.get_backfill(backfill_id):\n raise DagsterInvariantViolationError(\n f"Backfill {backfill_id} is not present in storage"\n )\n with self.connect() as conn:\n conn.execute(\n BulkActionsTable.update()\n .where(BulkActionsTable.c.key == backfill_id)\n .values(\n status=partition_backfill.status.value,\n body=serialize_value(partition_backfill),\n )\n )\n\n def get_cursor_values(self, keys: Set[str]) -> Mapping[str, str]:\n check.set_param(keys, "keys", of_type=str)\n\n rows = self.fetchall(\n db_select([KeyValueStoreTable.c.key, KeyValueStoreTable.c.value]).where(\n KeyValueStoreTable.c.key.in_(keys)\n ),\n )\n return {row["key"]: row["value"] for row in rows}\n\n def set_cursor_values(self, pairs: Mapping[str, str]) -> None:\n check.mapping_param(pairs, "pairs", key_type=str, value_type=str)\n db_values = [{"key": k, "value": v} for k, v in pairs.items()]\n\n with self.connect() as conn:\n try:\n conn.execute(KeyValueStoreTable.insert().values(db_values))\n except db_exc.IntegrityError:\n conn.execute(\n KeyValueStoreTable.update()\n .where(KeyValueStoreTable.c.key.in_(pairs.keys()))\n .values(value=db.sql.case(pairs, value=KeyValueStoreTable.c.key))\n )\n\n # Migrating run history\n def replace_job_origin(self, run: DagsterRun, job_origin: ExternalJobOrigin) -> None:\n new_label = job_origin.external_repository_origin.get_label()\n with self.connect() as conn:\n conn.execute(\n RunsTable.update()\n .where(RunsTable.c.run_id == run.run_id)\n .values(\n run_body=serialize_value(run.with_job_origin(job_origin)),\n )\n )\n conn.execute(\n RunTagsTable.update()\n .where(RunTagsTable.c.run_id == run.run_id)\n .where(RunTagsTable.c.key == REPOSITORY_LABEL_TAG)\n .values(value=new_label)\n )
\n\n\nGET_PIPELINE_SNAPSHOT_QUERY_ID = "get-pipeline-snapshot"\n\n\ndef defensively_unpack_execution_plan_snapshot_query(\n logger: logging.Logger, row: Sequence[Any]\n) -> Optional[Union[ExecutionPlanSnapshot, JobSnapshot]]:\n # minimal checking here because sqlalchemy returns a different type based on what version of\n # SqlAlchemy you are using\n\n def _warn(msg: str) -> None:\n logger.warning(f"get-pipeline-snapshot: {msg}")\n\n if not isinstance(row[0], bytes):\n _warn("First entry in row is not a binary type.")\n return None\n\n try:\n uncompressed_bytes = zlib.decompress(row[0])\n except zlib.error:\n _warn("Could not decompress bytes stored in snapshot table.")\n return None\n\n try:\n decoded_str = uncompressed_bytes.decode("utf-8")\n except UnicodeDecodeError:\n _warn("Could not unicode decode decompressed bytes stored in snapshot table.")\n return None\n\n try:\n return deserialize_value(decoded_str, (ExecutionPlanSnapshot, JobSnapshot))\n except JSONDecodeError:\n _warn("Could not parse json in snapshot table.")\n return None\n
", "current_page_name": "_modules/dagster/_core/storage/runs/sql_run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.runs.sql_run_storage"}, "sqlite": {"sqlite_run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.runs.sqlite.sqlite_run_storage

\nimport os\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Iterator, Optional\nfrom urllib.parse import urljoin, urlparse\n\nimport sqlalchemy as db\nfrom sqlalchemy.engine import Connection\nfrom sqlalchemy.pool import NullPool\nfrom typing_extensions import Self\n\nfrom dagster import (\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_downgrade,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlite import create_db_conn_string\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import InstanceInfo, RunsTable, RunStorageSqlMetadata, RunTagsTable\nfrom ..sql_run_storage import SqlRunStorage\n\nif TYPE_CHECKING:\n    from dagster._core.storage.sqlite_storage import SqliteStorageConfig\nMINIMUM_SQLITE_BUCKET_VERSION = [3, 25, 0]\n\n\n
[docs]class SqliteRunStorage(SqlRunStorage, ConfigurableClass):\n """SQLite-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n This is the default run storage when none is specified in the ``dagster.yaml``.\n\n To explicitly specify SQLite for run storage, you can add a block such as the following to your\n ``dagster.yaml``:\n\n .. code-block:: YAML\n\n run_storage:\n module: dagster._core.storage.runs\n class: SqliteRunStorage\n config:\n base_dir: /path/to/dir\n\n The ``base_dir`` param tells the run storage where on disk to store the database.\n """\n\n def __init__(self, conn_string: str, inst_data: Optional[ConfigurableClassData] = None):\n check.str_param(conn_string, "conn_string")\n self._conn_string = conn_string\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: "SqliteStorageConfig"\n ) -> "SqliteRunStorage":\n return SqliteRunStorage.from_local(inst_data=inst_data, **config_value)\n\n @classmethod\n def from_local(cls, base_dir: str, inst_data: Optional[ConfigurableClassData] = None) -> Self:\n check.str_param(base_dir, "base_dir")\n mkdir_p(base_dir)\n conn_string = create_db_conn_string(base_dir, "runs")\n engine = create_engine(conn_string, poolclass=NullPool)\n alembic_config = get_alembic_config(__file__)\n\n should_mark_indexes = False\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n if not (db_revision and head_revision):\n RunStorageSqlMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n should_mark_indexes = True\n\n table_names = db.inspect(engine).get_table_names()\n if "instance_info" not in table_names:\n InstanceInfo.create(engine)\n\n run_storage = cls(conn_string, inst_data)\n\n if should_mark_indexes:\n run_storage.migrate()\n run_storage.optimize()\n\n return run_storage\n\n @contextmanager\n def connect(self) -> Iterator[Connection]:\n engine = create_engine(self._conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n\n def _alembic_upgrade(self, rev: str = "head") -> None:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn, rev=rev)\n\n def _alembic_downgrade(self, rev: str = "head") -> None:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_downgrade(alembic_config, conn, rev=rev)\n\n def upgrade(self) -> None:\n self._check_for_version_066_migration_and_perform()\n self._alembic_upgrade()\n\n # In version 0.6.6, we changed the layout of the of the sqllite dbs on disk\n # to move from the root of DAGSTER_HOME/runs.db to DAGSTER_HOME/history/runs.bd\n # This function checks for that condition and does the move\n def _check_for_version_066_migration_and_perform(self) -> None:\n old_conn_string = "sqlite://" + urljoin(urlparse(self._conn_string).path, "../runs.db")\n path_to_old_db = urlparse(old_conn_string).path\n # sqlite URLs look like `sqlite:///foo/bar/baz on Unix/Mac` but on Windows they look like\n # `sqlite:///D:/foo/bar/baz` (or `sqlite:///D:\\foo\\bar\\baz`)\n if os.name == "nt":\n path_to_old_db = path_to_old_db.lstrip("/")\n if os.path.exists(path_to_old_db):\n old_storage = SqliteRunStorage(old_conn_string)\n old_runs = old_storage.get_runs()\n for run in old_runs:\n self.add_run(run)\n os.unlink(path_to_old_db)\n\n def delete_run(self, run_id: str) -> None:\n """Override the default sql delete run implementation until we can get full\n support on cascading deletes.\n """\n check.str_param(run_id, "run_id")\n remove_tags = db.delete(RunTagsTable).where(RunTagsTable.c.run_id == run_id)\n remove_run = db.delete(RunsTable).where(RunsTable.c.run_id == run_id)\n with self.connect() as conn:\n conn.execute(remove_tags)\n conn.execute(remove_run)\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster/_core/storage/runs/sqlite/sqlite_run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.runs.sqlite.sqlite_run_storage"}}}, "schedules": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.schedules.base

\nimport abc\nfrom typing import Mapping, Optional, Sequence, Set\n\nfrom dagster import AssetKey\nfrom dagster._core.definitions.auto_materialize_rule import AutoMaterializeAssetEvaluation\nfrom dagster._core.definitions.run_request import InstigatorType\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.scheduler.instigation import (\n    AutoMaterializeAssetEvaluationRecord,\n    InstigatorState,\n    InstigatorStatus,\n    InstigatorTick,\n    TickData,\n    TickStatus,\n)\nfrom dagster._core.storage.sql import AlembicVersion\nfrom dagster._utils import PrintFn\n\n\n
[docs]class ScheduleStorage(abc.ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract class for managing persistance of scheduler artifacts."""\n\n @abc.abstractmethod\n def wipe(self) -> None:\n """Delete all schedules from storage."""\n\n @abc.abstractmethod\n def all_instigator_state(\n self,\n repository_origin_id: Optional[str] = None,\n repository_selector_id: Optional[str] = None,\n instigator_type: Optional[InstigatorType] = None,\n instigator_statuses: Optional[Set[InstigatorStatus]] = None,\n ) -> Sequence[InstigatorState]:\n """Return all InstigationStates present in storage.\n\n Args:\n repository_origin_id (Optional[str]): The ExternalRepository target id to scope results to\n repository_selector_id (Optional[str]): The repository selector id to scope results to\n instigator_type (Optional[InstigatorType]): The InstigatorType to scope results to\n instigator_statuses (Optional[Set[InstigatorStatus]]): The InstigatorStatuses to scope results to\n """\n\n @abc.abstractmethod\n def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional[InstigatorState]:\n """Return the instigator state for the given id.\n\n Args:\n origin_id (str): The unique instigator identifier\n selector_id (str): The logical instigator identifier\n """\n\n @abc.abstractmethod\n def add_instigator_state(self, state: InstigatorState) -> InstigatorState:\n """Add an instigator state to storage.\n\n Args:\n state (InstigatorState): The state to add\n """\n\n @abc.abstractmethod\n def update_instigator_state(self, state: InstigatorState) -> InstigatorState:\n """Update an instigator state in storage.\n\n Args:\n state (InstigatorState): The state to update\n """\n\n @abc.abstractmethod\n def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:\n """Delete a state in storage.\n\n Args:\n origin_id (str): The id of the instigator target to delete\n selector_id (str): The logical instigator identifier\n """\n\n @property\n def supports_batch_queries(self) -> bool:\n return False\n\n def get_batch_ticks(\n self,\n selector_ids: Sequence[str],\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Mapping[str, Sequence[InstigatorTick]]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Sequence[InstigatorTick]:\n """Get the ticks for a given instigator.\n\n Args:\n origin_id (str): The id of the instigator target\n selector_id (str): The logical instigator identifier\n """\n\n @abc.abstractmethod\n def create_tick(self, tick_data: TickData) -> InstigatorTick:\n """Add a tick to storage.\n\n Args:\n tick_data (TickData): The tick to add\n """\n\n @abc.abstractmethod\n def update_tick(self, tick: InstigatorTick) -> InstigatorTick:\n """Update a tick already in storage.\n\n Args:\n tick (InstigatorTick): The tick to update\n """\n\n @abc.abstractmethod\n def purge_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: float,\n tick_statuses: Optional[Sequence[TickStatus]] = None,\n ) -> None:\n """Wipe ticks for an instigator for a certain status and timestamp.\n\n Args:\n origin_id (str): The id of the instigator target to delete\n selector_id (str): The logical instigator identifier\n before (datetime): All ticks before this datetime will get purged\n tick_statuses (Optional[List[TickStatus]]): The tick statuses to wipe\n """\n\n @property\n def supports_auto_materialize_asset_evaluations(self) -> bool:\n return True\n\n @abc.abstractmethod\n def add_auto_materialize_asset_evaluations(\n self,\n evaluation_id: int,\n asset_evaluations: Sequence[AutoMaterializeAssetEvaluation],\n ) -> None:\n """Add asset policy evaluations to storage."""\n\n @abc.abstractmethod\n def get_auto_materialize_asset_evaluations(\n self, asset_key: AssetKey, limit: int, cursor: Optional[int] = None\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n """Get the policy evaluations for a given asset.\n\n Args:\n asset_key (AssetKey): The asset key to query\n limit (Optional[int]): The maximum number of evaluations to return\n cursor (Optional[int]): The cursor to paginate from\n """\n\n @abc.abstractmethod\n def get_auto_materialize_evaluations_for_evaluation_id(\n self, evaluation_id: int\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n """Get all policy evaluations for a given evaluation ID.\n\n Args:\n evaluation_id (int): The evaluation ID to query.\n """\n\n @abc.abstractmethod\n def purge_asset_evaluations(self, before: float) -> None:\n """Wipe evaluations before a certain timestamp.\n\n Args:\n before (datetime): All evaluations before this datetime will get purged\n """\n\n @abc.abstractmethod\n def upgrade(self) -> None:\n """Perform any needed migrations."""\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any required data migrations."""\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any optional data migrations for optimized reads."""\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n """Allows for optimizing database connection / use in the context of a long lived webserver process."""\n\n def alembic_version(self) -> Optional[AlembicVersion]:\n return None\n\n def dispose(self) -> None:\n """Explicit lifecycle management."""
\n
", "current_page_name": "_modules/dagster/_core/storage/schedules/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.schedules.base"}, "sql_schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.schedules.sql_schedule_storage

\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import (\n    Any,\n    Callable,\n    ContextManager,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Type,\n    TypeVar,\n)\n\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection\n\nimport dagster._check as check\nfrom dagster._core.definitions.auto_materialize_rule import AutoMaterializeAssetEvaluation\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.run_request import InstigatorType\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.scheduler.instigation import (\n    AutoMaterializeAssetEvaluationRecord,\n    InstigatorState,\n    InstigatorStatus,\n    InstigatorTick,\n    TickData,\n    TickStatus,\n)\nfrom dagster._core.storage.sql import SqlAlchemyQuery, SqlAlchemyRow\nfrom dagster._core.storage.sqlalchemy_compat import db_fetch_mappings, db_select, db_subquery\nfrom dagster._serdes import serialize_value\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._utils import PrintFn, utc_datetime_from_timestamp\n\nfrom .base import ScheduleStorage\nfrom .migration import (\n    OPTIONAL_SCHEDULE_DATA_MIGRATIONS,\n    REQUIRED_SCHEDULE_DATA_MIGRATIONS,\n    SCHEDULE_JOBS_SELECTOR_ID,\n    SCHEDULE_TICKS_SELECTOR_ID,\n)\nfrom .schema import (\n    AssetDaemonAssetEvaluationsTable,\n    InstigatorsTable,\n    JobTable,\n    JobTickTable,\n    SecondaryIndexMigrationTable,\n)\n\nT_NamedTuple = TypeVar("T_NamedTuple", bound=NamedTuple)\n\n\n
[docs]class SqlScheduleStorage(ScheduleStorage):\n """Base class for SQL backed schedule storage."""\n\n @abstractmethod\n def connect(self) -> ContextManager[Connection]:\n """Context manager yielding a sqlalchemy.engine.Connection."""\n\n def execute(self, query: SqlAlchemyQuery) -> Sequence[SqlAlchemyRow]:\n with self.connect() as conn:\n result_proxy = conn.execute(query)\n res = result_proxy.fetchall()\n result_proxy.close()\n\n return res\n\n def _deserialize_rows(\n self, rows: Sequence[SqlAlchemyRow], as_type: Type[T_NamedTuple]\n ) -> Sequence[T_NamedTuple]:\n return list(map(lambda r: deserialize_value(r[0], as_type), rows))\n\n def all_instigator_state(\n self,\n repository_origin_id: Optional[str] = None,\n repository_selector_id: Optional[str] = None,\n instigator_type: Optional[InstigatorType] = None,\n instigator_statuses: Optional[Set[InstigatorStatus]] = None,\n ) -> Sequence[InstigatorState]:\n check.opt_inst_param(instigator_type, "instigator_type", InstigatorType)\n\n if self.has_instigators_table() and self.has_built_index(SCHEDULE_JOBS_SELECTOR_ID):\n query = db_select([InstigatorsTable.c.instigator_body]).select_from(InstigatorsTable)\n if repository_selector_id:\n query = query.where(\n InstigatorsTable.c.repository_selector_id == repository_selector_id\n )\n if instigator_type:\n query = query.where(InstigatorsTable.c.instigator_type == instigator_type.value)\n if instigator_statuses:\n query = query.where(\n InstigatorsTable.c.status.in_([status.value for status in instigator_statuses])\n )\n\n else:\n query = db_select([JobTable.c.job_body]).select_from(JobTable)\n if repository_origin_id:\n query = query.where(JobTable.c.repository_origin_id == repository_origin_id)\n if instigator_type:\n query = query.where(JobTable.c.job_type == instigator_type.value)\n if instigator_statuses:\n query = query.where(\n JobTable.c.status.in_([status.value for status in instigator_statuses])\n )\n\n rows = self.execute(query)\n return self._deserialize_rows(rows, InstigatorState)\n\n def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional[InstigatorState]:\n check.str_param(origin_id, "origin_id")\n check.str_param(selector_id, "selector_id")\n\n if self.has_instigators_table() and self.has_built_index(SCHEDULE_JOBS_SELECTOR_ID):\n query = (\n db_select([InstigatorsTable.c.instigator_body])\n .select_from(InstigatorsTable)\n .where(InstigatorsTable.c.selector_id == selector_id)\n )\n else:\n query = (\n db_select([JobTable.c.job_body])\n .select_from(JobTable)\n .where(JobTable.c.job_origin_id == origin_id)\n )\n\n rows = self.execute(query)\n return self._deserialize_rows(rows[:1], InstigatorState)[0] if len(rows) else None\n\n def _has_instigator_state_by_selector(self, selector_id: str) -> bool:\n check.str_param(selector_id, "selector_id")\n\n query = (\n db_select([JobTable.c.job_body])\n .select_from(JobTable)\n .where(JobTable.c.selector_id == selector_id)\n )\n\n rows = self.execute(query)\n return self._deserialize_rows(rows[:1])[0] if len(rows) else None # type: ignore\n\n def _add_or_update_instigators_table(self, conn: Connection, state: InstigatorState) -> None:\n selector_id = state.selector_id\n try:\n conn.execute(\n InstigatorsTable.insert().values(\n selector_id=selector_id,\n repository_selector_id=state.repository_selector_id,\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n )\n )\n except db_exc.IntegrityError:\n conn.execute(\n InstigatorsTable.update()\n .where(InstigatorsTable.c.selector_id == selector_id)\n .values(\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n update_timestamp=pendulum.now("UTC"),\n )\n )\n\n def add_instigator_state(self, state: InstigatorState) -> InstigatorState:\n check.inst_param(state, "state", InstigatorState)\n with self.connect() as conn:\n try:\n conn.execute(\n JobTable.insert().values(\n job_origin_id=state.instigator_origin_id,\n repository_origin_id=state.repository_origin_id,\n status=state.status.value,\n job_type=state.instigator_type.value,\n job_body=serialize_value(state),\n )\n )\n except db_exc.IntegrityError as exc:\n raise DagsterInvariantViolationError(\n f"InstigatorState {state.instigator_origin_id} is already present in storage"\n ) from exc\n\n # try writing to the instigators table\n if self._has_instigators_table(conn):\n self._add_or_update_instigators_table(conn, state)\n\n return state\n\n def update_instigator_state(self, state: InstigatorState) -> InstigatorState:\n check.inst_param(state, "state", InstigatorState)\n if not self.get_instigator_state(state.instigator_origin_id, state.selector_id):\n raise DagsterInvariantViolationError(\n f"InstigatorState {state.instigator_origin_id} is not present in storage"\n )\n\n values = {\n "status": state.status.value,\n "job_body": serialize_value(state),\n "update_timestamp": pendulum.now("UTC"),\n }\n if self.has_instigators_table():\n values["selector_id"] = state.selector_id\n\n with self.connect() as conn:\n conn.execute(\n JobTable.update()\n .where(JobTable.c.job_origin_id == state.instigator_origin_id)\n .values(**values)\n )\n if self._has_instigators_table(conn):\n self._add_or_update_instigators_table(conn, state)\n\n return state\n\n def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:\n check.str_param(origin_id, "origin_id")\n check.str_param(selector_id, "selector_id")\n\n if not self.get_instigator_state(origin_id, selector_id):\n raise DagsterInvariantViolationError(\n f"InstigatorState {origin_id} is not present in storage"\n )\n\n with self.connect() as conn:\n conn.execute(JobTable.delete().where(JobTable.c.job_origin_id == origin_id))\n\n if self._has_instigators_table(conn):\n if not self._jobs_has_selector_state(conn, selector_id):\n conn.execute(\n InstigatorsTable.delete().where(\n InstigatorsTable.c.selector_id == selector_id\n )\n )\n\n def _jobs_has_selector_state(self, conn: Connection, selector_id: str) -> bool:\n query = (\n db_select([db.func.count()])\n .select_from(JobTable)\n .where(JobTable.c.selector_id == selector_id)\n )\n result = conn.execute(query)\n row = result.fetchone()\n result.close()\n return row[0] > 0 # type: ignore # (possible none)\n\n def _add_filter_limit(\n self,\n query: SqlAlchemyQuery,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses=None,\n ) -> SqlAlchemyQuery:\n check.opt_float_param(before, "before")\n check.opt_float_param(after, "after")\n check.opt_int_param(limit, "limit")\n check.opt_list_param(statuses, "statuses", of_type=TickStatus)\n\n if before:\n query = query.where(JobTickTable.c.timestamp < utc_datetime_from_timestamp(before))\n if after:\n query = query.where(JobTickTable.c.timestamp > utc_datetime_from_timestamp(after))\n if limit:\n query = query.limit(limit)\n if statuses:\n query = query.where(JobTickTable.c.status.in_([status.value for status in statuses]))\n return query\n\n @property\n def supports_batch_queries(self) -> bool:\n return self.has_instigators_table() and self.has_built_index(SCHEDULE_TICKS_SELECTOR_ID)\n\n def has_instigators_table(self) -> bool:\n with self.connect() as conn:\n return self._has_instigators_table(conn)\n\n def _has_instigators_table(self, conn: Connection) -> bool:\n table_names = db.inspect(conn).get_table_names()\n return "instigators" in table_names\n\n def _has_asset_daemon_asset_evaluations_table(self, conn: Connection) -> bool:\n table_names = db.inspect(conn).get_table_names()\n return "asset_daemon_asset_evaluations" in table_names\n\n def get_batch_ticks(\n self,\n selector_ids: Sequence[str],\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Mapping[str, Sequence[InstigatorTick]]:\n check.sequence_param(selector_ids, "selector_ids", of_type=str)\n check.opt_int_param(limit, "limit")\n check.opt_sequence_param(statuses, "statuses", of_type=TickStatus)\n\n bucket_rank_column = (\n db.func.rank()\n .over(\n order_by=db.desc(JobTickTable.c.timestamp),\n partition_by=JobTickTable.c.selector_id,\n )\n .label("rank")\n )\n subquery = db_subquery(\n db_select(\n [\n JobTickTable.c.id,\n JobTickTable.c.selector_id,\n JobTickTable.c.tick_body,\n bucket_rank_column,\n ]\n )\n .select_from(JobTickTable)\n .where(JobTickTable.c.selector_id.in_(selector_ids))\n )\n if statuses:\n subquery = subquery.where(\n JobTickTable.c.status.in_([status.value for status in statuses])\n )\n\n query = (\n db_select([subquery.c.id, subquery.c.selector_id, subquery.c.tick_body])\n .order_by(subquery.c.rank.asc())\n .where(subquery.c.rank <= limit)\n )\n\n rows = self.execute(query)\n results = defaultdict(list)\n for row in rows:\n tick_id = row[0]\n selector_id = row[1]\n tick_data = deserialize_value(row[2], TickData)\n results[selector_id].append(InstigatorTick(tick_id, tick_data))\n return results\n\n def get_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Sequence[InstigatorTick]:\n check.str_param(origin_id, "origin_id")\n check.opt_float_param(before, "before")\n check.opt_float_param(after, "after")\n check.opt_int_param(limit, "limit")\n check.opt_list_param(statuses, "statuses", of_type=TickStatus)\n\n base_query = (\n db_select([JobTickTable.c.id, JobTickTable.c.tick_body])\n .select_from(JobTickTable)\n .order_by(JobTickTable.c.timestamp.desc())\n )\n if self.has_instigators_table():\n query = base_query.where(\n db.or_(\n JobTickTable.c.selector_id == selector_id,\n db.and_(\n JobTickTable.c.selector_id.is_(None),\n JobTickTable.c.job_origin_id == origin_id,\n ),\n )\n )\n else:\n query = base_query.where(JobTickTable.c.job_origin_id == origin_id)\n\n query = self._add_filter_limit(\n query, before=before, after=after, limit=limit, statuses=statuses\n )\n\n rows = self.execute(query)\n return list(map(lambda r: InstigatorTick(r[0], deserialize_value(r[1], TickData)), rows))\n\n def create_tick(self, tick_data: TickData) -> InstigatorTick:\n check.inst_param(tick_data, "tick_data", TickData)\n\n values = {\n "job_origin_id": tick_data.instigator_origin_id,\n "status": tick_data.status.value,\n "type": tick_data.instigator_type.value,\n "timestamp": utc_datetime_from_timestamp(tick_data.timestamp),\n "tick_body": serialize_value(tick_data),\n }\n if self.has_instigators_table() and tick_data.selector_id:\n values["selector_id"] = tick_data.selector_id\n\n with self.connect() as conn:\n try:\n tick_insert = JobTickTable.insert().values(**values)\n result = conn.execute(tick_insert)\n tick_id = result.inserted_primary_key[0]\n return InstigatorTick(tick_id, tick_data)\n except db_exc.IntegrityError as exc:\n raise DagsterInvariantViolationError(\n f"Unable to insert InstigatorTick for job {tick_data.instigator_name} in"\n " storage"\n ) from exc\n\n def update_tick(self, tick: InstigatorTick) -> InstigatorTick:\n check.inst_param(tick, "tick", InstigatorTick)\n\n values = {\n "status": tick.status.value,\n "type": tick.instigator_type.value,\n "timestamp": utc_datetime_from_timestamp(tick.timestamp),\n "tick_body": serialize_value(tick.tick_data),\n }\n if self.has_instigators_table() and tick.selector_id:\n values["selector_id"] = tick.selector_id\n\n with self.connect() as conn:\n conn.execute(\n JobTickTable.update().where(JobTickTable.c.id == tick.tick_id).values(**values)\n )\n\n return tick\n\n def purge_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: float,\n tick_statuses: Optional[Sequence[TickStatus]] = None,\n ) -> None:\n check.str_param(origin_id, "origin_id")\n check.float_param(before, "before")\n check.opt_list_param(tick_statuses, "tick_statuses", of_type=TickStatus)\n\n utc_before = utc_datetime_from_timestamp(before)\n\n query = JobTickTable.delete().where(JobTickTable.c.timestamp < utc_before)\n if tick_statuses:\n query = query.where(\n JobTickTable.c.status.in_([tick_status.value for tick_status in tick_statuses])\n )\n\n if self.has_instigators_table():\n query = query.where(\n db.or_(\n JobTickTable.c.selector_id == selector_id,\n db.and_(\n JobTickTable.c.selector_id.is_(None),\n JobTickTable.c.job_origin_id == origin_id,\n ),\n )\n )\n else:\n query = query.where(JobTickTable.c.job_origin_id == origin_id)\n\n with self.connect() as conn:\n conn.execute(query)\n\n @property\n def supports_auto_materialize_asset_evaluations(self) -> bool:\n with self.connect() as conn:\n return self._has_asset_daemon_asset_evaluations_table(conn)\n\n def add_auto_materialize_asset_evaluations(\n self,\n evaluation_id: int,\n asset_evaluations: Sequence[AutoMaterializeAssetEvaluation],\n ):\n if not asset_evaluations:\n return\n\n with self.connect() as conn:\n bulk_insert = AssetDaemonAssetEvaluationsTable.insert().values(\n [\n {\n "evaluation_id": evaluation_id,\n "asset_key": evaluation.asset_key.to_string(),\n "asset_evaluation_body": serialize_value(evaluation),\n "num_requested": evaluation.num_requested,\n "num_skipped": evaluation.num_skipped,\n "num_discarded": evaluation.num_discarded,\n }\n for evaluation in asset_evaluations\n ]\n )\n conn.execute(bulk_insert)\n\n def get_auto_materialize_asset_evaluations(\n self, asset_key: AssetKey, limit: int, cursor: Optional[int] = None\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n with self.connect() as conn:\n query = (\n db_select(\n [\n AssetDaemonAssetEvaluationsTable.c.id,\n AssetDaemonAssetEvaluationsTable.c.asset_evaluation_body,\n AssetDaemonAssetEvaluationsTable.c.evaluation_id,\n AssetDaemonAssetEvaluationsTable.c.create_timestamp,\n AssetDaemonAssetEvaluationsTable.c.asset_key,\n ]\n )\n .where(AssetDaemonAssetEvaluationsTable.c.asset_key == asset_key.to_string())\n .order_by(AssetDaemonAssetEvaluationsTable.c.evaluation_id.desc())\n ).limit(limit)\n\n if cursor:\n query = query.where(AssetDaemonAssetEvaluationsTable.c.evaluation_id < cursor)\n\n rows = db_fetch_mappings(conn, query)\n return [AutoMaterializeAssetEvaluationRecord.from_db_row(row) for row in rows]\n\n def get_auto_materialize_evaluations_for_evaluation_id(\n self, evaluation_id: int\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n with self.connect() as conn:\n query = db_select(\n [\n AssetDaemonAssetEvaluationsTable.c.id,\n AssetDaemonAssetEvaluationsTable.c.asset_evaluation_body,\n AssetDaemonAssetEvaluationsTable.c.evaluation_id,\n AssetDaemonAssetEvaluationsTable.c.create_timestamp,\n AssetDaemonAssetEvaluationsTable.c.asset_key,\n ]\n ).where(AssetDaemonAssetEvaluationsTable.c.evaluation_id == evaluation_id)\n\n rows = db_fetch_mappings(conn, query)\n return [AutoMaterializeAssetEvaluationRecord.from_db_row(row) for row in rows]\n\n def purge_asset_evaluations(self, before: float):\n check.float_param(before, "before")\n\n utc_before = utc_datetime_from_timestamp(before)\n query = AssetDaemonAssetEvaluationsTable.delete().where(\n AssetDaemonAssetEvaluationsTable.c.create_timestamp < utc_before\n )\n\n with self.connect() as conn:\n conn.execute(query)\n\n def wipe(self) -> None:\n """Clears the schedule storage."""\n with self.connect() as conn:\n # https://stackoverflow.com/a/54386260/324449\n conn.execute(JobTable.delete())\n conn.execute(JobTickTable.delete())\n if self._has_instigators_table(conn):\n conn.execute(InstigatorsTable.delete())\n if self._has_asset_daemon_asset_evaluations_table(conn):\n conn.execute(AssetDaemonAssetEvaluationsTable.delete())\n\n # MIGRATIONS\n\n def has_secondary_index_table(self) -> bool:\n with self.connect() as conn:\n return "secondary_indexes" in db.inspect(conn).get_table_names()\n\n def has_built_index(self, migration_name: str) -> bool:\n if not self.has_secondary_index_table():\n return False\n\n query = (\n db_select([1])\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711\n .limit(1)\n )\n with self.connect() as conn:\n results = conn.execute(query).fetchall()\n\n return len(results) > 0\n\n def mark_index_built(self, migration_name: str) -> None:\n query = SecondaryIndexMigrationTable.insert().values(\n name=migration_name,\n migration_completed=datetime.now(),\n )\n with self.connect() as conn:\n try:\n conn.execute(query)\n except db_exc.IntegrityError:\n conn.execute(\n SecondaryIndexMigrationTable.update()\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .values(migration_completed=datetime.now())\n )\n\n def _execute_data_migrations(\n self,\n migrations: Mapping[str, Callable[..., Any]],\n print_fn: Optional[Callable] = None,\n force_rebuild_all: bool = False,\n ) -> None:\n for migration_name, migration_fn in migrations.items():\n if self.has_built_index(migration_name):\n if not force_rebuild_all:\n if print_fn:\n print_fn(f"Skipping already applied migration: {migration_name}")\n continue\n if print_fn:\n print_fn(f"Starting data migration: {migration_name}")\n migration_fn()(self, print_fn)\n self.mark_index_built(migration_name)\n if print_fn:\n print_fn(f"Finished data migration: {migration_name}")\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(\n REQUIRED_SCHEDULE_DATA_MIGRATIONS, print_fn, force_rebuild_all\n )\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(\n OPTIONAL_SCHEDULE_DATA_MIGRATIONS, print_fn, force_rebuild_all\n )
\n
", "current_page_name": "_modules/dagster/_core/storage/schedules/sql_schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.schedules.sql_schedule_storage"}, "sqlite": {"sqlite_schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.schedules.sqlite.sqlite_schedule_storage

\nfrom contextlib import contextmanager\nfrom typing import Iterator, Optional\n\nimport sqlalchemy as db\nfrom packaging.version import parse\nfrom sqlalchemy.engine import Connection\nfrom sqlalchemy.pool import NullPool\n\nfrom dagster import (\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlite import create_db_conn_string, get_sqlite_version\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import ScheduleStorageSqlMetadata\nfrom ..sql_schedule_storage import SqlScheduleStorage\n\nMINIMUM_SQLITE_BATCH_VERSION = "3.25.0"\n\n\n
[docs]class SqliteScheduleStorage(SqlScheduleStorage, ConfigurableClass):\n """Local SQLite backed schedule storage."""\n\n def __init__(self, conn_string: str, inst_data: Optional[ConfigurableClassData] = None):\n check.str_param(conn_string, "conn_string")\n self._conn_string = conn_string\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value\n ) -> "SqliteScheduleStorage":\n return SqliteScheduleStorage.from_local(inst_data=inst_data, **config_value)\n\n @classmethod\n def from_local(\n cls, base_dir: str, inst_data: Optional[ConfigurableClassData] = None\n ) -> "SqliteScheduleStorage":\n check.str_param(base_dir, "base_dir")\n mkdir_p(base_dir)\n conn_string = create_db_conn_string(base_dir, "schedules")\n engine = create_engine(conn_string, poolclass=NullPool)\n alembic_config = get_alembic_config(__file__)\n\n should_migrate_data = False\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n if not (db_revision and head_revision):\n ScheduleStorageSqlMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n should_migrate_data = True\n\n schedule_storage = cls(conn_string, inst_data)\n if should_migrate_data:\n schedule_storage.migrate()\n schedule_storage.optimize()\n\n return schedule_storage\n\n @contextmanager\n def connect(self) -> Iterator[Connection]:\n engine = create_engine(self._conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n\n @property\n def supports_batch_queries(self) -> bool:\n if not super().supports_batch_queries:\n return False\n\n return super().supports_batch_queries and parse(get_sqlite_version()) >= parse(\n MINIMUM_SQLITE_BATCH_VERSION\n )\n\n def upgrade(self) -> None:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster/_core/storage/schedules/sqlite/sqlite_schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.schedules.sqlite.sqlite_schedule_storage"}}}, "upath_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.upath_io_manager

\nimport asyncio\nimport inspect\nfrom abc import abstractmethod\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union\n\nfrom fsspec import AbstractFileSystem\nfrom fsspec.implementations.local import LocalFileSystem\n\nfrom dagster import (\n    InputContext,\n    MetadataValue,\n    MultiPartitionKey,\n    OutputContext,\n    _check as check,\n)\nfrom dagster._core.storage.memoizable_io_manager import MemoizableIOManager\n\nif TYPE_CHECKING:\n    from upath import UPath\n\n\n
[docs]class UPathIOManager(MemoizableIOManager):\n """Abstract IOManager base class compatible with local and cloud storage via `universal-pathlib` and `fsspec`.\n\n Features:\n - handles partitioned assets\n - handles loading a single upstream partition\n - handles loading multiple upstream partitions (with respect to :py:class:`PartitionMapping`)\n - supports loading multiple partitions concurrently with async `load_from_path` method\n - the `get_metadata` method can be customized to add additional metadata to the output\n - the `allow_missing_partitions` metadata value can be set to `True` to skip missing partitions\n (the default behavior is to raise an error)\n\n """\n\n extension: Optional[str] = None # override in child class\n\n def __init__(\n self,\n base_path: Optional["UPath"] = None,\n ):\n from upath import UPath\n\n assert not self.extension or "." in self.extension\n self._base_path = base_path or UPath(".")\n\n @abstractmethod\n def dump_to_path(self, context: OutputContext, obj: Any, path: "UPath"):\n """Child classes should override this method to write the object to the filesystem."""\n\n @abstractmethod\n def load_from_path(self, context: InputContext, path: "UPath") -> Any:\n """Child classes should override this method to load the object from the filesystem."""\n\n @property\n def fs(self) -> AbstractFileSystem:\n """Utility function to get the IOManager filesystem.\n\n Returns:\n AbstractFileSystem: fsspec filesystem.\n\n """\n from upath import UPath\n\n if isinstance(self._base_path, UPath):\n return self._base_path.fs\n elif isinstance(self._base_path, Path):\n return LocalFileSystem()\n else:\n raise ValueError(f"Unsupported base_path type: {type(self._base_path)}")\n\n @property\n def storage_options(self) -> Dict[str, Any]:\n """Utility function to get the fsspec storage_options which are often consumed by various I/O functions.\n\n Returns:\n Dict[str, Any]: fsspec storage_options.\n """\n from upath import UPath\n\n if isinstance(self._base_path, UPath):\n return self._base_path._kwargs.copy() # noqa\n elif isinstance(self._base_path, Path):\n return {}\n else:\n raise ValueError(f"Unsupported base_path type: {type(self._base_path)}")\n\n def get_metadata(\n self,\n context: OutputContext,\n obj: Any,\n ) -> Dict[str, MetadataValue]:\n """Child classes should override this method to add custom metadata to the outputs."""\n return {}\n\n # Read/write operations on paths can generally be handled by methods on the\n # UPath class, but when the backend requires credentials, this isn't\n # always possible. Override these path_* methods to provide custom\n # implementations for targeting backends that require authentication.\n\n def unlink(self, path: "UPath") -> None:\n """Remove the file or object at the provided path."""\n path.unlink()\n\n def path_exists(self, path: "UPath") -> bool:\n """Check if a file or object exists at the provided path."""\n return path.exists()\n\n def make_directory(self, path: "UPath"):\n """Create a directory at the provided path.\n\n Override as a no-op if the target backend doesn't use directories.\n """\n path.mkdir(parents=True, exist_ok=True)\n\n def has_output(self, context: OutputContext) -> bool:\n return self.path_exists(self._get_path(context))\n\n def _with_extension(self, path: "UPath") -> "UPath":\n return path.with_suffix(path.suffix + self.extension) if self.extension else path\n\n def _get_path_without_extension(self, context: Union[InputContext, OutputContext]) -> "UPath":\n if context.has_asset_key:\n context_path = self.get_asset_relative_path(context)\n else:\n # we are dealing with an op output\n context_path = self.get_op_output_relative_path(context)\n\n return self._base_path.joinpath(context_path)\n\n def get_asset_relative_path(self, context: Union[InputContext, OutputContext]) -> "UPath":\n from upath import UPath\n\n # we are not using context.get_asset_identifier() because it already includes the partition_key\n return UPath(*context.asset_key.path)\n\n def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> "UPath":\n from upath import UPath\n\n return UPath(*context.get_identifier())\n\n def get_loading_input_log_message(self, path: "UPath") -> str:\n return f"Loading file from: {path} using {self.__class__.__name__}..."\n\n def get_writing_output_log_message(self, path: "UPath") -> str:\n return f"Writing file at: {path} using {self.__class__.__name__}..."\n\n def get_loading_input_partition_log_message(self, path: "UPath", partition_key: str) -> str:\n return f"Loading partition {partition_key} from {path} using {self.__class__.__name__}..."\n\n def get_missing_partition_log_message(self, partition_key: str) -> str:\n return (\n f"Couldn't load partition {partition_key} and skipped it "\n "because the input metadata includes allow_missing_partitions=True"\n )\n\n def _get_path(self, context: Union[InputContext, OutputContext]) -> "UPath":\n """Returns the I/O path for a given context.\n Should not be used with partitions (use `_get_paths_for_partitions` instead).\n """\n path = self._get_path_without_extension(context)\n return self._with_extension(path)\n\n def get_path_for_partition(\n self, context: Union[InputContext, OutputContext], path: "UPath", partition: str\n ) -> "UPath":\n """Override this method if you want to use a different partitioning scheme\n (for example, if the saving function handles partitioning instead).\n The extension will be added later.\n\n Args:\n context (Union[InputContext, OutputContext]): The context for the I/O operation.\n path (UPath): The path to the file or object.\n partition (str): Formatted partition/multipartition key\n\n Returns:\n UPath: The path to the file with the partition key appended.\n """\n return path / partition\n\n def _get_paths_for_partitions(\n self, context: Union[InputContext, OutputContext]\n ) -> Dict[str, "UPath"]:\n """Returns a dict of partition_keys into I/O paths for a given context."""\n if not context.has_asset_partitions:\n raise TypeError(\n f"Detected {context.dagster_type.typing_type} input type "\n "but the asset is not partitioned"\n )\n\n def _formatted_multipartitioned_path(partition_key: MultiPartitionKey) -> str:\n ordered_dimension_keys = [\n key[1]\n for key in sorted(partition_key.keys_by_dimension.items(), key=lambda x: x[0])\n ]\n return "/".join(ordered_dimension_keys)\n\n formatted_partition_keys = {\n partition_key: (\n _formatted_multipartitioned_path(partition_key)\n if isinstance(partition_key, MultiPartitionKey)\n else partition_key\n )\n for partition_key in context.asset_partition_keys\n }\n\n asset_path = self._get_path_without_extension(context)\n return {\n partition_key: self._with_extension(\n self.get_path_for_partition(context, asset_path, partition)\n )\n for partition_key, partition in formatted_partition_keys.items()\n }\n\n def _get_multipartition_backcompat_paths(\n self, context: Union[InputContext, OutputContext]\n ) -> Mapping[str, "UPath"]:\n if not context.has_asset_partitions:\n raise TypeError(\n f"Detected {context.dagster_type.typing_type} input type "\n "but the asset is not partitioned"\n )\n\n partition_keys = context.asset_partition_keys\n\n asset_path = self._get_path_without_extension(context)\n return {\n partition_key: self._with_extension(asset_path / partition_key)\n for partition_key in partition_keys\n if isinstance(partition_key, MultiPartitionKey)\n }\n\n def _load_single_input(\n self, path: "UPath", context: InputContext, backcompat_path: Optional["UPath"] = None\n ) -> Any:\n context.log.debug(self.get_loading_input_log_message(path))\n try:\n obj = self.load_from_path(context=context, path=path)\n if asyncio.iscoroutine(obj):\n obj = asyncio.run(obj)\n except FileNotFoundError as e:\n if backcompat_path is not None:\n try:\n obj = self.load_from_path(context=context, path=backcompat_path)\n if asyncio.iscoroutine(obj):\n obj = asyncio.run(obj)\n\n context.log.debug(\n f"File not found at {path}. Loaded instead from backcompat path:"\n f" {backcompat_path}"\n )\n except FileNotFoundError:\n raise e\n else:\n raise e\n\n context.add_input_metadata({"path": MetadataValue.path(str(path))})\n return obj\n\n def _load_partition_from_path(\n self,\n context: InputContext,\n partition_key: str,\n path: "UPath",\n backcompat_path: Optional["UPath"] = None,\n ) -> Any:\n """1. Try to load the partition from the normal path.\n 2. If it was not found, try to load it from the backcompat path.\n 3. If allow_missing_partitions metadata is True, skip the partition if it was not found in any of the paths.\n Otherwise, raise an error.\n\n Args:\n context (InputContext): IOManager Input context\n partition_key (str): the partition key corresponding to the partition being loaded\n path (UPath): The path to the partition.\n backcompat_path (Optional[UPath]): The path to the partition in the backcompat location.\n\n Returns:\n Any: The object loaded from the partition.\n """\n allow_missing_partitions = (\n context.metadata.get("allow_missing_partitions", False)\n if context.metadata is not None\n else False\n )\n\n try:\n context.log.debug(self.get_loading_input_partition_log_message(path, partition_key))\n obj = self.load_from_path(context=context, path=path)\n return obj\n except FileNotFoundError as e:\n if backcompat_path is not None:\n try:\n obj = self.load_from_path(context=context, path=path)\n context.log.debug(\n f"File not found at {path}. Loaded instead from backcompat path:"\n f" {backcompat_path}"\n )\n return obj\n except FileNotFoundError as e:\n if allow_missing_partitions:\n context.log.warning(self.get_missing_partition_log_message(partition_key))\n return None\n else:\n raise e\n if allow_missing_partitions:\n context.log.warning(self.get_missing_partition_log_message(partition_key))\n return None\n else:\n raise e\n\n def _load_multiple_inputs(self, context: InputContext) -> Dict[str, Any]:\n # load multiple partitions\n paths = self._get_paths_for_partitions(context) # paths for normal partitions\n backcompat_paths = self._get_multipartition_backcompat_paths(\n context\n ) # paths for multipartitions\n\n context.log.debug(f"Loading {len(paths)} partitions...")\n\n objs = {}\n\n if not inspect.iscoroutinefunction(self.load_from_path):\n for partition_key in context.asset_partition_keys:\n obj = self._load_partition_from_path(\n context,\n partition_key,\n paths[partition_key],\n backcompat_paths.get(partition_key),\n )\n if obj is not None: # in case some partitions were skipped\n objs[partition_key] = obj\n return objs\n else:\n # load_from_path returns a coroutine, so we need to await the results\n\n async def collect():\n loop = asyncio.get_running_loop()\n\n tasks = []\n\n for partition_key in context.asset_partition_keys:\n tasks.append(\n loop.create_task(\n self._load_partition_from_path(\n context,\n partition_key,\n paths[partition_key],\n backcompat_paths.get(partition_key),\n )\n )\n )\n\n results = await asyncio.gather(*tasks, return_exceptions=True)\n\n # need to handle missing partitions here because exceptions don't get propagated from async calls\n allow_missing_partitions = (\n context.metadata.get("allow_missing_partitions", False)\n if context.metadata is not None\n else False\n )\n\n results_without_errors = []\n found_errors = False\n for partition_key, result in zip(context.asset_partition_keys, results):\n if isinstance(result, FileNotFoundError):\n if allow_missing_partitions:\n context.log.warning(\n self.get_missing_partition_log_message(partition_key)\n )\n else:\n context.log.error(str(result))\n found_errors = True\n elif isinstance(result, Exception):\n context.log.error(str(result))\n found_errors = True\n else:\n results_without_errors.append(result)\n\n if found_errors:\n raise RuntimeError(\n f"{len(paths) - len(results_without_errors)} partitions could not be loaded"\n )\n\n return results_without_errors\n\n awaited_objects = asyncio.get_event_loop().run_until_complete(collect())\n\n return {\n partition_key: awaited_object\n for partition_key, awaited_object in zip(\n context.asset_partition_keys, awaited_objects\n )\n if awaited_object is not None\n }\n\n def load_input(self, context: InputContext) -> Union[Any, Dict[str, Any]]:\n # If no asset key, we are dealing with an op output which is always non-partitioned\n if not context.has_asset_key or not context.has_asset_partitions:\n path = self._get_path(context)\n return self._load_single_input(path, context)\n else:\n asset_partition_keys = context.asset_partition_keys\n if len(asset_partition_keys) == 0:\n return None\n elif len(asset_partition_keys) == 1:\n paths = self._get_paths_for_partitions(context)\n check.invariant(len(paths) == 1, f"Expected 1 path, but got {len(paths)}")\n path = next(iter(paths.values()))\n backcompat_paths = self._get_multipartition_backcompat_paths(context)\n backcompat_path = (\n None if not backcompat_paths else next(iter(backcompat_paths.values()))\n )\n\n return self._load_single_input(path, context, backcompat_path)\n else: # we are dealing with multiple partitions of an asset\n type_annotation = context.dagster_type.typing_type\n if type_annotation != Any and not is_dict_type(type_annotation):\n check.failed(\n "Loading an input that corresponds to multiple partitions, but the"\n " type annotation on the op input is not a dict, Dict, Mapping, or"\n f" Any: is '{type_annotation}'."\n )\n\n return self._load_multiple_inputs(context)\n\n def handle_output(self, context: OutputContext, obj: Any):\n if context.dagster_type.typing_type == type(None):\n check.invariant(\n obj is None,\n "Output had Nothing type or 'None' annotation, but handle_output received"\n f" value that was not None and was of type {type(obj)}.",\n )\n return None\n\n if context.has_asset_partitions:\n paths = self._get_paths_for_partitions(context)\n\n check.invariant(\n len(paths) == 1,\n f"The current IO manager {type(self)} does not support persisting an output"\n " associated with multiple partitions. This error is likely occurring because a"\n " backfill was launched using the 'single run' option. Instead, launch the"\n " backfill with the 'multiple runs' option.",\n )\n\n path = next(iter(paths.values()))\n else:\n path = self._get_path(context)\n self.make_directory(path.parent)\n context.log.debug(self.get_writing_output_log_message(path))\n self.dump_to_path(context=context, obj=obj, path=path)\n\n metadata = {"path": MetadataValue.path(str(path))}\n custom_metadata = self.get_metadata(context=context, obj=obj)\n metadata.update(custom_metadata) # type: ignore\n\n context.add_output_metadata(metadata)
\n\n\ndef is_dict_type(type_obj) -> bool:\n if type_obj == dict:\n return True\n\n if hasattr(type_obj, "__origin__") and type_obj.__origin__ in (dict, Dict, Mapping):\n return True\n\n return False\n
", "current_page_name": "_modules/dagster/_core/storage/upath_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.upath_io_manager"}}, "types": {"config_schema": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.types.config_schema

\nimport hashlib\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Callable, Iterator, Optional, cast\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import experimental_param\nfrom dagster._config import ConfigType\nfrom dagster._core.decorator_utils import get_function_params, validate_expected_params\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ..definitions.resource_requirement import (\n    ResourceRequirement,\n    TypeLoaderResourceRequirement,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.system import (\n        DagsterTypeLoaderContext,\n    )\n\n\n
[docs]class DagsterTypeLoader(ABC):\n """Dagster type loaders are used to load unconnected inputs of the dagster type they are attached\n to.\n\n The recommended way to define a type loader is with the\n :py:func:`@dagster_type_loader <dagster_type_loader>` decorator.\n """\n\n @property\n @abstractmethod\n def schema_type(self) -> ConfigType:\n pass\n\n @property\n def loader_version(self) -> Optional[str]:\n return None\n\n def compute_loaded_input_version(self, _config_value: object) -> Optional[str]:\n return None\n\n def construct_from_config_value(\n self, _context: "DagsterTypeLoaderContext", config_value: object\n ) -> object:\n """How to create a runtime value from config data."""\n return config_value\n\n def required_resource_keys(self) -> AbstractSet[str]:\n return frozenset()\n\n def get_resource_requirements(\n self, outer_context: Optional[object] = None\n ) -> Iterator[ResourceRequirement]:\n type_display_name = cast(str, outer_context)\n for resource_key in sorted(list(self.required_resource_keys())):\n yield TypeLoaderResourceRequirement(\n key=resource_key, type_display_name=type_display_name\n )
\n\n\n@experimental_param(param="loader_version")\n@experimental_param(param="external_version_fn")\nclass DagsterTypeLoaderFromDecorator(DagsterTypeLoader):\n def __init__(\n self,\n config_type,\n func,\n required_resource_keys,\n loader_version=None,\n external_version_fn=None,\n ):\n self._config_type = check.inst_param(config_type, "config_type", ConfigType)\n self._func = check.callable_param(func, "func")\n self._required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n self._loader_version = check.opt_str_param(loader_version, "loader_version")\n self._external_version_fn = check.opt_callable_param(\n external_version_fn, "external_version_fn"\n )\n\n @property\n def schema_type(self) -> ConfigType:\n return self._config_type\n\n @property\n def loader_version(self) -> Optional[str]:\n return self._loader_version\n\n def compute_loaded_input_version(self, config_value: object) -> Optional[str]:\n """Compute the type-loaded input from a given config_value.\n\n Args:\n config_value (object): Config value to be ingested by the external version\n loading function.\n\n Returns:\n Optional[str]: Hash of concatenated loader version and external input version if both\n are provided, else None.\n """\n version = ""\n if self.loader_version:\n version += str(self.loader_version)\n if self._external_version_fn:\n ext_version = self._external_version_fn(config_value)\n version += str(ext_version)\n\n if version == "":\n return None # Sentinel value for no version provided.\n else:\n return hashlib.sha1(version.encode("utf-8")).hexdigest()\n\n def construct_from_config_value(\n self, context: "DagsterTypeLoaderContext", config_value: object\n ):\n return self._func(context, config_value)\n\n def required_resource_keys(self):\n return frozenset(self._required_resource_keys)\n\n\ndef _create_type_loader_for_decorator(\n config_type: ConfigType,\n func,\n required_resource_keys: Optional[AbstractSet[str]],\n loader_version: Optional[str] = None,\n external_version_fn: Optional[Callable[[object], str]] = None,\n):\n return DagsterTypeLoaderFromDecorator(\n config_type, func, required_resource_keys, loader_version, external_version_fn\n )\n\n\nDagsterTypeLoaderFn: TypeAlias = Callable[["DagsterTypeLoaderContext", Any], Any]\n\n\n
[docs]def dagster_type_loader(\n config_schema: object,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n loader_version: Optional[str] = None,\n external_version_fn: Optional[Callable[[object], str]] = None,\n) -> Callable[[DagsterTypeLoaderFn], DagsterTypeLoaderFromDecorator]:\n """Create an dagster type loader that maps config data to a runtime value.\n\n The decorated function should take the execution context and parsed config value and return the\n appropriate runtime value.\n\n Args:\n config_schema (ConfigSchema): The schema for the config that's passed to the decorated\n function.\n loader_version (str): (Experimental) The version of the decorated compute function. Two\n loading functions should have the same version if and only if they deterministically\n produce the same outputs when provided the same inputs.\n external_version_fn (Callable): (Experimental) A function that takes in the same parameters as the loader\n function (config_value) and returns a representation of the version of the external\n asset (str). Two external assets with identical versions are treated as identical to one\n another.\n\n Examples:\n .. code-block:: python\n\n @dagster_type_loader(Permissive())\n def load_dict(_context, value):\n return value\n """\n from dagster._config import resolve_to_config_type\n\n config_type = resolve_to_config_type(config_schema)\n assert isinstance(\n config_type, ConfigType\n ), f"{config_schema} could not be resolved to config type"\n EXPECTED_POSITIONALS = ["context", "*"]\n\n def wrapper(func: DagsterTypeLoaderFn) -> DagsterTypeLoaderFromDecorator:\n params = get_function_params(func)\n missing_positional = validate_expected_params(params, EXPECTED_POSITIONALS)\n if missing_positional:\n raise DagsterInvalidDefinitionError(\n f"@dagster_type_loader '{func.__name__}' decorated function does not have required"\n f" positional parameter '{missing_positional}'. @dagster_type_loader decorated"\n " functions should only have keyword arguments that match input names and a first"\n " positional parameter named 'context'."\n )\n\n return _create_type_loader_for_decorator(\n config_type, func, required_resource_keys, loader_version, external_version_fn\n )\n\n return wrapper
\n
", "current_page_name": "_modules/dagster/_core/types/config_schema", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.types.config_schema"}, "dagster_type": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.types.dagster_type

\nimport typing as t\nfrom abc import abstractmethod\nfrom enum import Enum as PythonEnum\nfrom functools import partial\nfrom typing import (\n    AbstractSet as TypingAbstractSet,\n    AnyStr,\n    Iterator as TypingIterator,\n    Mapping,\n    Optional as TypingOptional,\n    Sequence,\n    Type as TypingType,\n    cast,\n)\n\nfrom typing_extensions import get_args, get_origin\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import (\n    Array,\n    ConfigType,\n    Noneable as ConfigNoneable,\n)\nfrom dagster._core.definitions.events import DynamicOutput, Output, TypeCheck\nfrom dagster._core.definitions.metadata import (\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._seven import is_subclass\n\nfrom ..definitions.resource_requirement import (\n    RequiresResources,\n    ResourceRequirement,\n    TypeResourceRequirement,\n)\nfrom .builtin_config_schemas import BuiltinSchemas\nfrom .config_schema import DagsterTypeLoader\n\nif t.TYPE_CHECKING:\n    from dagster._core.definitions.node_definition import NodeDefinition\n    from dagster._core.execution.context.system import DagsterTypeLoaderContext, TypeCheckContext\n\nTypeCheckFn = t.Callable[["TypeCheckContext", AnyStr], t.Union[TypeCheck, bool]]\n\n\n@whitelist_for_serdes\nclass DagsterTypeKind(PythonEnum):\n    ANY = "ANY"\n    SCALAR = "SCALAR"\n    LIST = "LIST"\n    NOTHING = "NOTHING"\n    NULLABLE = "NULLABLE"\n    REGULAR = "REGULAR"\n\n\n
[docs]class DagsterType(RequiresResources):\n """Define a type in dagster. These can be used in the inputs and outputs of ops.\n\n Args:\n type_check_fn (Callable[[TypeCheckContext, Any], [Union[bool, TypeCheck]]]):\n The function that defines the type check. It takes the value flowing\n through the input or output of the op. If it passes, return either\n ``True`` or a :py:class:`~dagster.TypeCheck` with ``success`` set to ``True``. If it fails,\n return either ``False`` or a :py:class:`~dagster.TypeCheck` with ``success`` set to ``False``.\n The first argument must be named ``context`` (or, if unused, ``_``, ``_context``, or ``context_``).\n Use ``required_resource_keys`` for access to resources.\n key (Optional[str]): The unique key to identify types programmatically.\n The key property always has a value. If you omit key to the argument\n to the init function, it instead receives the value of ``name``. If\n neither ``key`` nor ``name`` is provided, a ``CheckError`` is thrown.\n\n In the case of a generic type such as ``List`` or ``Optional``, this is\n generated programmatically based on the type parameters.\n\n For most use cases, name should be set and the key argument should\n not be specified.\n name (Optional[str]): A unique name given by a user. If ``key`` is ``None``, ``key``\n becomes this value. Name is not given in a case where the user does\n not specify a unique name for this type, such as a generic class.\n description (Optional[str]): A markdown-formatted string, displayed in tooling.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader` and can map config data to a value of\n this type. Specify this argument if you will need to shim values of this type using the\n config machinery. As a rule, you should use the\n :py:func:`@dagster_type_loader <dagster.dagster_type_loader>` decorator to construct\n these arguments.\n required_resource_keys (Optional[Set[str]]): Resource keys required by the ``type_check_fn``.\n is_builtin (bool): Defaults to False. This is used by tools to display or\n filter built-in types (such as :py:class:`~dagster.String`, :py:class:`~dagster.Int`) to visually distinguish\n them from user-defined types. Meant for internal use.\n kind (DagsterTypeKind): Defaults to None. This is used to determine the kind of runtime type\n for InputDefinition and OutputDefinition type checking.\n typing_type: Defaults to None. A valid python typing type (e.g. Optional[List[int]]) for the\n value contained within the DagsterType. Meant for internal use.\n """\n\n def __init__(\n self,\n type_check_fn: TypeCheckFn,\n key: t.Optional[str] = None,\n name: t.Optional[str] = None,\n is_builtin: bool = False,\n description: t.Optional[str] = None,\n loader: t.Optional[DagsterTypeLoader] = None,\n required_resource_keys: t.Optional[t.Set[str]] = None,\n kind: DagsterTypeKind = DagsterTypeKind.REGULAR,\n typing_type: t.Any = t.Any,\n metadata: t.Optional[t.Mapping[str, RawMetadataValue]] = None,\n ):\n check.opt_str_param(key, "key")\n check.opt_str_param(name, "name")\n\n check.invariant(not (name is None and key is None), "Must set key or name")\n if name is None:\n key = check.not_none(\n key,\n "If name is not provided, must provide key.",\n )\n self.key, self._name = key, None\n elif key is None:\n name = check.not_none(\n name,\n "If key is not provided, must provide name.",\n )\n self.key, self._name = name, name\n else:\n check.invariant(key and name)\n self.key, self._name = key, name\n\n self._description = check.opt_str_param(description, "description")\n self._loader = check.opt_inst_param(loader, "loader", DagsterTypeLoader)\n\n self._required_resource_keys = check.opt_set_param(\n required_resource_keys,\n "required_resource_keys",\n )\n\n self._type_check_fn = check.callable_param(type_check_fn, "type_check_fn")\n _validate_type_check_fn(self._type_check_fn, self._name)\n\n self.is_builtin = check.bool_param(is_builtin, "is_builtin")\n check.invariant(\n self.display_name is not None,\n f"All types must have a valid display name, got None for key {key}",\n )\n\n self.kind = check.inst_param(kind, "kind", DagsterTypeKind)\n\n self._typing_type = typing_type\n\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n
[docs] @public\n def type_check(self, context: "TypeCheckContext", value: object) -> TypeCheck:\n """Type check the value against the type.\n\n Args:\n context (TypeCheckContext): The context of the type check.\n value (Any): The value to check.\n\n Returns:\n TypeCheck: The result of the type check.\n """\n retval = self._type_check_fn(context, value)\n\n if not isinstance(retval, (bool, TypeCheck)):\n raise DagsterInvariantViolationError(\n f"You have returned {retval!r} of type {type(retval)} from the type "\n f'check function of type "{self.key}". Return value must be instance '\n "of TypeCheck or a bool."\n )\n\n return TypeCheck(success=retval) if isinstance(retval, bool) else retval
\n\n def __eq__(self, other):\n return isinstance(other, DagsterType) and self.key == other.key\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.key)\n\n @staticmethod\n def from_builtin_enum(builtin_enum) -> "DagsterType":\n check.invariant(BuiltinEnum.contains(builtin_enum), "must be member of BuiltinEnum")\n return _RUNTIME_MAP[builtin_enum]\n\n @property\n def metadata(self) -> t.Mapping[str, MetadataValue]:\n return self._metadata\n\n @public\n @property\n def required_resource_keys(self) -> TypingAbstractSet[str]:\n """AbstractSet[str]: Set of resource keys required by the type check function."""\n return self._required_resource_keys\n\n @public\n @property\n def display_name(self) -> str:\n """Either the name or key (if name is `None`) of the type, overridden in many subclasses."""\n return cast(str, self._name or self.key)\n\n @public\n @property\n def unique_name(self) -> t.Optional[str]:\n """The unique name of this type. Can be None if the type is not unique, such as container types."""\n # TODO: docstring and body inconsistent-- can this be None or not?\n check.invariant(\n self._name is not None,\n f"unique_name requested but is None for type {self.display_name}",\n )\n return self._name\n\n @public\n @property\n def has_unique_name(self) -> bool:\n """bool: Whether the type has a unique name."""\n return self._name is not None\n\n @public\n @property\n def typing_type(self) -> t.Any:\n """Any: The python typing type for this type."""\n return self._typing_type\n\n @public\n @property\n def loader(self) -> t.Optional[DagsterTypeLoader]:\n """Optional[DagsterTypeLoader]: Loader for this type, if any."""\n return self._loader\n\n @public\n @property\n def description(self) -> t.Optional[str]:\n """Optional[str]: Description of the type, or None if not provided."""\n return self._description\n\n @property\n def inner_types(self) -> t.Sequence["DagsterType"]:\n return []\n\n @property\n def loader_schema_key(self) -> t.Optional[str]:\n return self.loader.schema_type.key if self.loader else None\n\n @property\n def type_param_keys(self) -> t.Sequence[str]:\n return []\n\n @property\n def is_nothing(self) -> bool:\n return self.kind == DagsterTypeKind.NOTHING\n\n @property\n def supports_fan_in(self) -> bool:\n return False\n\n def get_inner_type_for_fan_in(self) -> "DagsterType":\n check.failed(\n "DagsterType {name} does not support fan-in, should have checked supports_fan_in before"\n " calling getter.".format(name=self.display_name)\n )\n\n def get_resource_requirements(\n self, _outer_context: TypingOptional[object] = None\n ) -> TypingIterator[ResourceRequirement]:\n for resource_key in sorted(list(self.required_resource_keys)):\n yield TypeResourceRequirement(key=resource_key, type_display_name=self.display_name)\n if self.loader:\n yield from self.loader.get_resource_requirements(outer_context=self.display_name)
\n\n\ndef _validate_type_check_fn(fn: t.Callable, name: t.Optional[str]) -> bool:\n from dagster._seven import get_arg_names\n\n args = get_arg_names(fn)\n\n # py2 doesn't filter out self\n if len(args) >= 1 and args[0] == "self":\n args = args[1:]\n\n if len(args) == 2:\n possible_names = {\n "_",\n "context",\n "_context",\n "context_",\n }\n if args[0] not in possible_names:\n DagsterInvalidDefinitionError(\n f'type_check function on type "{name}" must have first '\n 'argument named "context" (or _, _context, context_).'\n )\n return True\n\n raise DagsterInvalidDefinitionError(\n f'type_check_fn argument on type "{name}" must take 2 arguments, received {len(args)}.'\n )\n\n\nclass BuiltinScalarDagsterType(DagsterType):\n def __init__(self, name: str, type_check_fn: TypeCheckFn, typing_type: t.Type, **kwargs):\n super(BuiltinScalarDagsterType, self).__init__(\n key=name,\n name=name,\n kind=DagsterTypeKind.SCALAR,\n type_check_fn=type_check_fn,\n is_builtin=True,\n typing_type=typing_type,\n **kwargs,\n )\n\n # This is passed to the constructor of subclasses as the argument `type_check_fn`-- that's why\n # it exists together with the `type_check_fn` arg.\n def type_check_fn(self, _context, value) -> TypeCheck:\n return self.type_check_scalar_value(value)\n\n @abstractmethod\n def type_check_scalar_value(self, _value) -> TypeCheck:\n raise NotImplementedError()\n\n\ndef _typemismatch_error_str(value: object, expected_type_desc: str) -> str:\n return 'Value "{value}" of python type "{python_type}" must be a {type_desc}.'.format(\n value=value, python_type=type(value).__name__, type_desc=expected_type_desc\n )\n\n\ndef _fail_if_not_of_type(\n value: object, value_type: t.Type[t.Any], value_type_desc: str\n) -> TypeCheck:\n if not isinstance(value, value_type):\n return TypeCheck(success=False, description=_typemismatch_error_str(value, value_type_desc))\n\n return TypeCheck(success=True)\n\n\nclass _Int(BuiltinScalarDagsterType):\n def __init__(self):\n super(_Int, self).__init__(\n name="Int",\n loader=BuiltinSchemas.INT_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=int,\n )\n\n def type_check_scalar_value(self, value) -> TypeCheck:\n return _fail_if_not_of_type(value, int, "int")\n\n\nclass _String(BuiltinScalarDagsterType):\n def __init__(self):\n super(_String, self).__init__(\n name="String",\n loader=BuiltinSchemas.STRING_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=str,\n )\n\n def type_check_scalar_value(self, value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, str, "string")\n\n\nclass _Float(BuiltinScalarDagsterType):\n def __init__(self):\n super(_Float, self).__init__(\n name="Float",\n loader=BuiltinSchemas.FLOAT_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=float,\n )\n\n def type_check_scalar_value(self, value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, float, "float")\n\n\nclass _Bool(BuiltinScalarDagsterType):\n def __init__(self):\n super(_Bool, self).__init__(\n name="Bool",\n loader=BuiltinSchemas.BOOL_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=bool,\n )\n\n def type_check_scalar_value(self, value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, bool, "bool")\n\n\nclass Anyish(DagsterType):\n def __init__(\n self,\n key: t.Optional[str],\n name: t.Optional[str],\n loader: t.Optional[DagsterTypeLoader] = None,\n is_builtin: bool = False,\n description: t.Optional[str] = None,\n ):\n super(Anyish, self).__init__(\n key=key,\n name=name,\n kind=DagsterTypeKind.ANY,\n loader=loader,\n is_builtin=is_builtin,\n type_check_fn=self.type_check_method,\n description=description,\n typing_type=t.Any,\n )\n\n def type_check_method(self, _context: "TypeCheckContext", _value: object) -> TypeCheck:\n return TypeCheck(success=True)\n\n @property\n def supports_fan_in(self) -> bool:\n return True\n\n def get_inner_type_for_fan_in(self) -> DagsterType:\n # Anyish all the way down\n return self\n\n\nclass _Any(Anyish):\n def __init__(self):\n super(_Any, self).__init__(\n key="Any",\n name="Any",\n loader=BuiltinSchemas.ANY_INPUT,\n is_builtin=True,\n )\n\n\ndef create_any_type(\n name: str,\n loader: t.Optional[DagsterTypeLoader] = None,\n description: t.Optional[str] = None,\n) -> Anyish:\n return Anyish(\n key=name,\n name=name,\n description=description,\n loader=loader,\n )\n\n\nclass _Nothing(DagsterType):\n def __init__(self):\n super(_Nothing, self).__init__(\n key="Nothing",\n name="Nothing",\n kind=DagsterTypeKind.NOTHING,\n loader=None,\n type_check_fn=self.type_check_method,\n is_builtin=True,\n typing_type=type(None),\n )\n\n def type_check_method(self, _context: "TypeCheckContext", value: object) -> TypeCheck:\n if value is not None:\n return TypeCheck(\n success=False,\n description=f"Value must be None, got a {type(value)}",\n )\n\n return TypeCheck(success=True)\n\n @property\n def supports_fan_in(self) -> bool:\n return True\n\n def get_inner_type_for_fan_in(self) -> DagsterType:\n return self\n\n\ndef isinstance_type_check_fn(\n expected_python_type: t.Union[t.Type, t.Tuple[t.Type, ...]],\n dagster_type_name: str,\n expected_python_type_str: str,\n) -> TypeCheckFn:\n def type_check(_context: "TypeCheckContext", value: object) -> TypeCheck:\n if not isinstance(value, expected_python_type):\n return TypeCheck(\n success=False,\n description=(\n f"Value of type {type(value)} failed type check for Dagster type"\n f" {dagster_type_name}, expected value to be of Python type"\n f" {expected_python_type_str}."\n ),\n )\n\n return TypeCheck(success=True)\n\n return type_check\n\n\n
[docs]class PythonObjectDagsterType(DagsterType):\n """Define a type in dagster whose typecheck is an isinstance check.\n\n Specifically, the type can either be a single python type (e.g. int),\n or a tuple of types (e.g. (int, float)) which is treated as a union.\n\n Examples:\n .. code-block:: python\n\n ntype = PythonObjectDagsterType(python_type=int)\n assert ntype.name == 'int'\n assert_success(ntype, 1)\n assert_failure(ntype, 'a')\n\n .. code-block:: python\n\n ntype = PythonObjectDagsterType(python_type=(int, float))\n assert ntype.name == 'Union[int, float]'\n assert_success(ntype, 1)\n assert_success(ntype, 1.5)\n assert_failure(ntype, 'a')\n\n\n Args:\n python_type (Union[Type, Tuple[Type, ...]): The dagster typecheck function calls instanceof on\n this type.\n name (Optional[str]): Name the type. Defaults to the name of ``python_type``.\n key (Optional[str]): Key of the type. Defaults to name.\n description (Optional[str]): A markdown-formatted string, displayed in tooling.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader` and can map config data to a value of\n this type. Specify this argument if you will need to shim values of this type using the\n config machinery. As a rule, you should use the\n :py:func:`@dagster_type_loader <dagster.dagster_type_loader>` decorator to construct\n these arguments.\n """\n\n def __init__(\n self,\n python_type: t.Union[t.Type, t.Tuple[t.Type, ...]],\n key: t.Optional[str] = None,\n name: t.Optional[str] = None,\n **kwargs,\n ):\n if isinstance(python_type, tuple):\n self.python_type = check.tuple_param(\n python_type, "python_type", of_shape=tuple(type for item in python_type)\n )\n self.type_str = "Union[{}]".format(\n ", ".join(python_type.__name__ for python_type in python_type)\n )\n typing_type = t.Union[python_type] # type: ignore\n\n else:\n self.python_type = check.class_param(python_type, "python_type")\n self.type_str = cast(str, python_type.__name__)\n typing_type = self.python_type\n name = check.opt_str_param(name, "name", self.type_str)\n key = check.opt_str_param(key, "key", name)\n super(PythonObjectDagsterType, self).__init__(\n key=key,\n name=name,\n type_check_fn=isinstance_type_check_fn(python_type, name, self.type_str),\n typing_type=typing_type,\n **kwargs,\n )
\n\n\nclass NoneableInputSchema(DagsterTypeLoader):\n def __init__(self, inner_dagster_type: DagsterType):\n self._inner_dagster_type = check.inst_param(\n inner_dagster_type, "inner_dagster_type", DagsterType\n )\n self._inner_loader = check.not_none_param(inner_dagster_type.loader, "inner_dagster_type")\n self._schema_type = ConfigNoneable(self._inner_loader.schema_type)\n\n @property\n def schema_type(self) -> ConfigType:\n return self._schema_type\n\n def construct_from_config_value(\n self, context: "DagsterTypeLoaderContext", config_value: object\n ) -> object:\n if config_value is None:\n return None\n return self._inner_loader.construct_from_config_value(context, config_value)\n\n\ndef _create_nullable_input_schema(inner_type: DagsterType) -> t.Optional[DagsterTypeLoader]:\n if not inner_type.loader:\n return None\n\n return NoneableInputSchema(inner_type)\n\n\nclass OptionalType(DagsterType):\n def __init__(self, inner_type: DagsterType):\n inner_type = resolve_dagster_type(inner_type)\n\n if inner_type is Nothing:\n raise DagsterInvalidDefinitionError(\n "Type Nothing can not be wrapped in List or Optional"\n )\n\n key = "Optional." + cast(str, inner_type.key)\n self.inner_type = inner_type\n super(OptionalType, self).__init__(\n key=key,\n name=None,\n kind=DagsterTypeKind.NULLABLE,\n type_check_fn=self.type_check_method,\n loader=_create_nullable_input_schema(inner_type),\n # This throws a type error with Py\n typing_type=t.Optional[inner_type.typing_type],\n )\n\n @property\n def display_name(self) -> str:\n return self.inner_type.display_name + "?"\n\n def type_check_method(self, context, value):\n return (\n TypeCheck(success=True) if value is None else self.inner_type.type_check(context, value)\n )\n\n @property\n def inner_types(self):\n return [self.inner_type] + self.inner_type.inner_types\n\n @property\n def type_param_keys(self):\n return [self.inner_type.key]\n\n @property\n def supports_fan_in(self):\n return self.inner_type.supports_fan_in\n\n def get_inner_type_for_fan_in(self):\n return self.inner_type.get_inner_type_for_fan_in()\n\n\nclass ListInputSchema(DagsterTypeLoader):\n def __init__(self, inner_dagster_type):\n self._inner_dagster_type = check.inst_param(\n inner_dagster_type, "inner_dagster_type", DagsterType\n )\n check.param_invariant(inner_dagster_type.loader, "inner_dagster_type")\n self._schema_type = Array(inner_dagster_type.loader.schema_type)\n\n @property\n def schema_type(self):\n return self._schema_type\n\n def construct_from_config_value(self, context, config_value):\n convert_item = partial(self._inner_dagster_type.loader.construct_from_config_value, context)\n return list(map(convert_item, config_value))\n\n\ndef _create_list_input_schema(inner_type):\n if not inner_type.loader:\n return None\n\n return ListInputSchema(inner_type)\n\n\nclass ListType(DagsterType):\n def __init__(self, inner_type: DagsterType):\n key = "List." + inner_type.key\n self.inner_type = inner_type\n super(ListType, self).__init__(\n key=key,\n name=None,\n kind=DagsterTypeKind.LIST,\n type_check_fn=self.type_check_method,\n loader=_create_list_input_schema(inner_type),\n typing_type=t.List[inner_type.typing_type],\n )\n\n @property\n def display_name(self):\n return "[" + self.inner_type.display_name + "]"\n\n def type_check_method(self, context, value):\n value_check = _fail_if_not_of_type(value, list, "list")\n if not value_check.success:\n return value_check\n\n for item in value:\n item_check = self.inner_type.type_check(context, item)\n if not item_check.success:\n return item_check\n\n return TypeCheck(success=True)\n\n @property\n def inner_types(self):\n return [self.inner_type] + self.inner_type.inner_types\n\n @property\n def type_param_keys(self):\n return [self.inner_type.key]\n\n @property\n def supports_fan_in(self):\n return True\n\n def get_inner_type_for_fan_in(self):\n return self.inner_type\n\n\nclass DagsterListApi:\n def __getitem__(self, inner_type):\n check.not_none_param(inner_type, "inner_type")\n return _List(resolve_dagster_type(inner_type))\n\n def __call__(self, inner_type):\n check.not_none_param(inner_type, "inner_type")\n return _List(inner_type)\n\n\nList: DagsterListApi = DagsterListApi()\n\n\ndef _List(inner_type):\n check.inst_param(inner_type, "inner_type", DagsterType)\n if inner_type is Nothing:\n raise DagsterInvalidDefinitionError("Type Nothing can not be wrapped in List or Optional")\n return ListType(inner_type)\n\n\nclass Stringish(DagsterType):\n def __init__(self, key: t.Optional[str] = None, name: t.Optional[str] = None, **kwargs):\n name = check.opt_str_param(name, "name", type(self).__name__)\n key = check.opt_str_param(key, "key", name)\n super(Stringish, self).__init__(\n key=key,\n name=name,\n kind=DagsterTypeKind.SCALAR,\n type_check_fn=self.type_check_method,\n loader=BuiltinSchemas.STRING_INPUT,\n typing_type=str,\n **kwargs,\n )\n\n def type_check_method(self, _context: "TypeCheckContext", value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, str, "string")\n\n\ndef create_string_type(name, description=None):\n return Stringish(name=name, key=name, description=description)\n\n\nAny = _Any()\nBool = _Bool()\nFloat = _Float()\nInt = _Int()\nString = _String()\nNothing = _Nothing()\n\n_RUNTIME_MAP = {\n BuiltinEnum.ANY: Any,\n BuiltinEnum.BOOL: Bool,\n BuiltinEnum.FLOAT: Float,\n BuiltinEnum.INT: Int,\n BuiltinEnum.STRING: String,\n BuiltinEnum.NOTHING: Nothing,\n}\n\n_PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY: t.Dict[type, DagsterType] = {}\n"""Python types corresponding to user-defined RunTime types created using @map_to_dagster_type or\nas_dagster_type are registered here so that we can remap the Python types to runtime types."""\n\n\n
[docs]def make_python_type_usable_as_dagster_type(\n python_type: TypingType[t.Any], dagster_type: DagsterType\n) -> None:\n """Take any existing python type and map it to a dagster type (generally created with\n :py:class:`DagsterType <dagster.DagsterType>`) This can only be called once\n on a given python type.\n """\n check.inst_param(python_type, "python_type", type)\n check.inst_param(dagster_type, "dagster_type", DagsterType)\n registered_dagster_type = _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY.get(python_type)\n\n if registered_dagster_type is None:\n _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type] = dagster_type\n elif registered_dagster_type is not dagster_type:\n # This would be just a great place to insert a short URL pointing to the type system\n # documentation into the error message\n # https://github.com/dagster-io/dagster/issues/1831\n if isinstance(registered_dagster_type, TypeHintInferredDagsterType):\n raise DagsterInvalidDefinitionError(\n "A Dagster type has already been registered for the Python type "\n f'{python_type}. The Dagster type was "auto-registered" - i.e. a solid definition '\n "used the Python type as an annotation for one of its arguments or for its return "\n "value before make_python_type_usable_as_dagster_type was called, and we "\n "generated a Dagster type to correspond to it. To override the auto-generated "\n "Dagster type, call make_python_type_usable_as_dagster_type before any solid "\n "definitions refer to the Python type."\n )\n else:\n raise DagsterInvalidDefinitionError(\n "A Dagster type has already been registered for the Python type "\n f"{python_type}. make_python_type_usable_as_dagster_type can only "\n "be called once on a python type as it is registering a 1:1 mapping "\n "between that python type and a dagster type."\n )
\n\n\nDAGSTER_INVALID_TYPE_ERROR_MESSAGE = (\n "Invalid type: dagster_type must be an instance of DagsterType or a Python type: "\n "got {dagster_type}{additional_msg}"\n)\n\n\nclass TypeHintInferredDagsterType(DagsterType):\n def __init__(self, python_type: t.Type):\n qualified_name = f"{python_type.__module__}.{python_type.__name__}"\n self.python_type = python_type\n super(TypeHintInferredDagsterType, self).__init__(\n key=f"_TypeHintInferred[{qualified_name}]",\n description=(\n f"DagsterType created from a type hint for the Python type {qualified_name}"\n ),\n type_check_fn=isinstance_type_check_fn(\n python_type, python_type.__name__, qualified_name\n ),\n typing_type=python_type,\n )\n\n @property\n def display_name(self) -> str:\n return self.python_type.__name__\n\n\ndef resolve_dagster_type(dagster_type: object) -> DagsterType:\n # circular dep\n from dagster._utils.typing_api import is_typing_type\n\n from ..definitions.result import MaterializeResult\n from .primitive_mapping import (\n is_supported_runtime_python_builtin,\n remap_python_builtin_for_runtime,\n )\n from .python_dict import (\n Dict as DDict,\n PythonDict,\n )\n from .python_set import DagsterSetApi, PythonSet\n from .python_tuple import DagsterTupleApi, PythonTuple\n from .transform_typing import transform_typing_type\n\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),\n "Cannot resolve a config type to a runtime type",\n )\n\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, DagsterType)),\n f"Do not pass runtime type classes. Got {dagster_type}",\n )\n\n # First, check to see if we're using Dagster's generic output type to do the type catching.\n if is_generic_output_annotation(dagster_type):\n type_args = get_args(dagster_type)\n # If no inner type was provided, forward Any type.\n dagster_type = type_args[0] if len(type_args) == 1 else Any\n elif is_dynamic_output_annotation(dagster_type):\n dynamic_out_annotation = get_args(dagster_type)[0]\n type_args = get_args(dynamic_out_annotation)\n dagster_type = type_args[0] if len(type_args) == 1 else Any\n elif dagster_type == MaterializeResult:\n # convert MaterializeResult type annotation to Nothing until returning\n # scalar values via MaterializeResult is supported\n # https://github.com/dagster-io/dagster/issues/16887\n dagster_type = Nothing\n\n # Then, check to see if it is part of python's typing library\n if is_typing_type(dagster_type):\n dagster_type = transform_typing_type(dagster_type)\n if isinstance(dagster_type, DagsterType):\n return dagster_type\n\n # Test for unhashable objects -- this is if, for instance, someone has passed us an instance of\n # a dict where they meant to pass dict or Dict, etc.\n try:\n hash(dagster_type)\n except TypeError as e:\n raise DagsterInvalidDefinitionError(\n DAGSTER_INVALID_TYPE_ERROR_MESSAGE.format(\n additional_msg=(\n ", which isn't hashable. Did you pass an instance of a type instead of "\n "the type?"\n ),\n dagster_type=str(dagster_type),\n )\n ) from e\n\n if BuiltinEnum.contains(dagster_type):\n return DagsterType.from_builtin_enum(dagster_type)\n\n if is_supported_runtime_python_builtin(dagster_type):\n return remap_python_builtin_for_runtime(dagster_type)\n\n if dagster_type is None:\n return Any\n\n if dagster_type is DDict:\n return PythonDict\n if isinstance(dagster_type, DagsterTupleApi):\n return PythonTuple\n if isinstance(dagster_type, DagsterSetApi):\n return PythonSet\n if isinstance(dagster_type, DagsterListApi):\n return List(Any)\n\n if isinstance(dagster_type, type):\n return resolve_python_type_to_dagster_type(dagster_type)\n\n raise DagsterInvalidDefinitionError(\n DAGSTER_INVALID_TYPE_ERROR_MESSAGE.format(\n dagster_type=str(dagster_type), additional_msg="."\n )\n )\n\n\ndef is_dynamic_output_annotation(dagster_type: object) -> bool:\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),\n "Cannot resolve a config type to a runtime type",\n )\n\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),\n f"Do not pass runtime type classes. Got {dagster_type}",\n )\n\n if dagster_type == DynamicOutput or get_origin(dagster_type) == DynamicOutput:\n raise DagsterInvariantViolationError(\n "Op annotated with return type DynamicOutput. DynamicOutputs can only be returned in"\n " the context of a List. If only one output is needed, use the Output API."\n )\n\n if get_origin(dagster_type) == list and len(get_args(dagster_type)) == 1:\n list_inner_type = get_args(dagster_type)[0]\n return list_inner_type == DynamicOutput or get_origin(list_inner_type) == DynamicOutput\n return False\n\n\ndef is_generic_output_annotation(dagster_type: object) -> bool:\n return dagster_type == Output or get_origin(dagster_type) == Output\n\n\ndef resolve_python_type_to_dagster_type(python_type: t.Type) -> DagsterType:\n """Resolves a Python type to a Dagster type."""\n check.inst_param(python_type, "python_type", type)\n\n if python_type in _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY:\n return _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type]\n else:\n dagster_type = TypeHintInferredDagsterType(python_type)\n _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type] = dagster_type\n return dagster_type\n\n\nALL_RUNTIME_BUILTINS = list(_RUNTIME_MAP.values())\n\n\ndef construct_dagster_type_dictionary(\n node_defs: Sequence["NodeDefinition"],\n) -> Mapping[str, DagsterType]:\n from dagster._core.definitions.graph_definition import GraphDefinition\n\n type_dict_by_name = {t.unique_name: t for t in ALL_RUNTIME_BUILTINS}\n type_dict_by_key = {t.key: t for t in ALL_RUNTIME_BUILTINS}\n\n def process_node_def(node_def: "NodeDefinition"):\n input_output_types = list(node_def.all_input_output_types())\n for dagster_type in input_output_types:\n # We don't do uniqueness check on key because with classes\n # like Array, Noneable, etc, those are ephemeral objects\n # and it is perfectly fine to have many of them.\n type_dict_by_key[dagster_type.key] = dagster_type\n\n if not dagster_type.has_unique_name:\n continue\n\n if dagster_type.unique_name not in type_dict_by_name:\n type_dict_by_name[dagster_type.unique_name] = dagster_type\n continue\n\n if type_dict_by_name[dagster_type.unique_name] is not dagster_type:\n raise DagsterInvalidDefinitionError(\n (\n 'You have created two dagster types with the same name "{type_name}". '\n "Dagster types have must have unique names."\n ).format(type_name=dagster_type.display_name)\n )\n\n if isinstance(node_def, GraphDefinition):\n for child_node_def in node_def.node_defs:\n process_node_def(child_node_def)\n\n for node_def in node_defs:\n process_node_def(node_def)\n\n return type_dict_by_key\n\n\nclass DagsterOptionalApi:\n def __getitem__(self, inner_type: t.Union[t.Type, DagsterType]) -> OptionalType:\n inner_type = resolve_dagster_type(check.not_none_param(inner_type, "inner_type"))\n return OptionalType(inner_type)\n\n\nOptional: DagsterOptionalApi = DagsterOptionalApi()\n
", "current_page_name": "_modules/dagster/_core/types/dagster_type", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.types.dagster_type"}, "decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.types.decorator

\nfrom typing import TYPE_CHECKING, Callable, Optional, Type, TypeVar, Union, overload\n\nimport dagster._check as check\n\nfrom .dagster_type import PythonObjectDagsterType, make_python_type_usable_as_dagster_type\n\nif TYPE_CHECKING:\n    from dagster._core.types.config_schema import DagsterTypeLoader\n\nT_Type = TypeVar("T_Type", bound=Type[object])\n\n\n@overload\ndef usable_as_dagster_type(\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    loader: Optional["DagsterTypeLoader"] = ...,\n) -> Callable[[T_Type], T_Type]: ...\n\n\n@overload\ndef usable_as_dagster_type(\n    name: T_Type,\n) -> T_Type: ...\n\n\n
[docs]def usable_as_dagster_type(\n name: Optional[Union[str, T_Type]] = None,\n description: Optional[str] = None,\n loader: Optional["DagsterTypeLoader"] = None,\n) -> Union[T_Type, Callable[[T_Type], T_Type]]:\n """Decorate a Python class to make it usable as a Dagster Type.\n\n This is intended to make it straightforward to annotate existing business logic classes to\n make them dagster types whose typecheck is an isinstance check against that python class.\n\n Args:\n python_type (cls): The python type to make usable as python type.\n name (Optional[str]): Name of the new Dagster type. If ``None``, the name (``__name__``) of\n the ``python_type`` will be used.\n description (Optional[str]): A user-readable description of the type.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`DagsterTypeLoader` and can map config data to a value of\n this type. Specify this argument if you will need to shim values of this type using the\n config machinery. As a rule, you should use the\n :py:func:`@dagster_type_loader <dagster.dagster_type_loader>` decorator to construct\n these arguments.\n\n Examples:\n .. code-block:: python\n\n # dagster_aws.s3.file_manager.S3FileHandle\n @usable_as_dagster_type\n class S3FileHandle(FileHandle):\n def __init__(self, s3_bucket, s3_key):\n self._s3_bucket = check.str_param(s3_bucket, 's3_bucket')\n self._s3_key = check.str_param(s3_key, 's3_key')\n\n @property\n def s3_bucket(self):\n return self._s3_bucket\n\n @property\n def s3_key(self):\n return self._s3_key\n\n @property\n def path_desc(self):\n return self.s3_path\n\n @property\n def s3_path(self):\n return 's3://{bucket}/{key}'.format(bucket=self.s3_bucket, key=self.s3_key)\n """\n # check for no args, no parens case\n if isinstance(name, type):\n bare_cls = name # with no parens, name is actually the decorated class\n make_python_type_usable_as_dagster_type(\n bare_cls,\n PythonObjectDagsterType(python_type=bare_cls, name=bare_cls.__name__, description=None),\n )\n return bare_cls\n\n def _with_args(bare_cls: T_Type) -> T_Type:\n check.class_param(bare_cls, "bare_cls")\n new_name = check.opt_str_param(name, "name") if name else bare_cls.__name__\n\n make_python_type_usable_as_dagster_type(\n bare_cls,\n PythonObjectDagsterType(\n name=new_name,\n description=description,\n python_type=bare_cls,\n loader=loader,\n ),\n )\n return bare_cls\n\n return _with_args
\n
", "current_page_name": "_modules/dagster/_core/types/decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.types.decorator"}}}, "_serdes": {"config_class": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._serdes.config_class

\nimport importlib\nfrom abc import ABC, abstractmethod\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Dict,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Type,\n    TypeVar,\n    Union,\n    overload,\n)\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._utils import convert_dagster_submodule_name\nfrom dagster._utils.yaml_utils import load_run_config_yaml\n\nfrom .serdes import (\n    NamedTupleSerializer,\n    whitelist_for_serdes,\n)\n\nif TYPE_CHECKING:\n    from dagster._config.config_schema import UserConfigSchema\n\nT_ConfigurableClass = TypeVar("T_ConfigurableClass")\n\n\nclass ConfigurableClassDataSerializer(NamedTupleSerializer["ConfigurableClassData"]):\n    def after_pack(self, **packed: Any) -> Dict[str, Any]:\n        packed["module_name"] = convert_dagster_submodule_name(packed["module_name"], "public")\n        return packed\n\n\n
[docs]@whitelist_for_serdes(serializer=ConfigurableClassDataSerializer)\nclass ConfigurableClassData(\n NamedTuple(\n "_ConfigurableClassData",\n [\n ("module_name", str),\n ("class_name", str),\n ("config_yaml", str),\n ],\n )\n):\n """Serializable tuple describing where to find a class and the config fragment that should\n be used to instantiate it.\n\n Users should not instantiate this class directly.\n\n Classes intended to be serialized in this way should implement the\n :py:class:`dagster.serdes.ConfigurableClass` mixin.\n """\n\n def __new__(cls, module_name: str, class_name: str, config_yaml: str):\n return super(ConfigurableClassData, cls).__new__(\n cls,\n convert_dagster_submodule_name(check.str_param(module_name, "module_name"), "private"),\n check.str_param(class_name, "class_name"),\n check.str_param(config_yaml, "config_yaml"),\n )\n\n @property\n def config_dict(self) -> Mapping[str, Any]:\n return check.is_dict(load_run_config_yaml(self.config_yaml), key_type=str)\n\n def info_dict(self) -> Mapping[str, Any]:\n return {\n "module": self.module_name,\n "class": self.class_name,\n "config": self.config_dict,\n }\n\n @overload\n def rehydrate(self, as_type: Type[T_ConfigurableClass]) -> T_ConfigurableClass: ...\n\n @overload\n def rehydrate(self, as_type: None = ...) -> "ConfigurableClass": ...\n\n def rehydrate(\n self, as_type: Optional[Type[T_ConfigurableClass]] = None\n ) -> Union["ConfigurableClass", T_ConfigurableClass]:\n from dagster._config import process_config, resolve_to_config_type\n from dagster._core.errors import DagsterInvalidConfigError\n\n try:\n module = importlib.import_module(self.module_name)\n except ModuleNotFoundError:\n check.failed(\n f"Couldn't import module {self.module_name} when attempting to load the "\n f"configurable class {self.module_name}.{self.class_name}"\n )\n try:\n klass = getattr(module, self.class_name)\n except AttributeError:\n check.failed(\n f"Couldn't find class {self.class_name} in module when attempting to load the "\n f"configurable class {self.module_name}.{self.class_name}"\n )\n\n if not issubclass(klass, as_type or ConfigurableClass):\n raise check.CheckError(\n klass,\n f"class {self.class_name} in module {self.module_name}",\n ConfigurableClass,\n )\n\n config_dict = self.config_dict\n result = process_config(resolve_to_config_type(klass.config_type()), config_dict)\n if not result.success:\n raise DagsterInvalidConfigError(\n f"Errors whilst loading configuration for {klass.config_type()}.",\n result.errors,\n config_dict,\n )\n return klass.from_config_value(self, check.not_none(result.value))
\n\n\n
[docs]class ConfigurableClass(ABC):\n """Abstract mixin for classes that can be loaded from config.\n\n This supports a powerful plugin pattern which avoids both a) a lengthy, hard-to-synchronize list\n of conditional imports / optional extras_requires in dagster core and b) a magic directory or\n file in which third parties can place plugin packages. Instead, the intention is to make, e.g.,\n run storage, pluggable with a config chunk like:\n\n .. code-block:: yaml\n\n run_storage:\n module: very_cool_package.run_storage\n class: SplendidRunStorage\n config:\n magic_word: "quux"\n\n This same pattern should eventually be viable for other system components, e.g. engines.\n\n The ``ConfigurableClass`` mixin provides the necessary hooks for classes to be instantiated from\n an instance of ``ConfigurableClassData``.\n\n Pieces of the Dagster system which we wish to make pluggable in this way should consume a config\n type such as:\n\n .. code-block:: python\n\n {'module': str, 'class': str, 'config': Field(Permissive())}\n\n """\n\n @property\n @abstractmethod\n def inst_data(self) -> Optional[ConfigurableClassData]:\n """Subclass must be able to return the inst_data as a property if it has been constructed\n through the from_config_value code path.\n """\n\n @classmethod\n @abstractmethod\n def config_type(cls) -> "UserConfigSchema":\n """Get the config type against which to validate a config yaml fragment.\n\n The only place config values matching this type are used is inside `from_config_value`. This\n is an alternative constructor for a class. It is a common pattern for the config type to\n match constructor arguments, so `from_config_value`\n\n The config type against which to validate a config yaml fragment\n serialized in an instance of ``ConfigurableClassData``.\n """\n ...\n # We need to raise `NotImplementedError` here because nothing prevents abstract class\n # methods from being called.\n raise NotImplementedError(f"{cls.__name__} must implement the config_type classmethod")\n\n @classmethod\n @abstractmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n """Create an instance of the ConfigurableClass from a validated config value.\n\n The config value used here should be derived from the accompanying `inst_data` argument.\n `inst_data` contains the yaml-serialized config-- this must be parsed and\n validated/normalized, then passed to this method for object instantiation. This is done in\n ConfigurableClassData.rehydrate.\n\n Args:\n config_value (dict): The validated config value to use. Typically this should be the\n ``value`` attribute of a\n :py:class:`~dagster._core.types.evaluator.evaluation.EvaluateValueResult`.\n\n\n A common pattern is for the implementation to align the config_value with the signature\n of the ConfigurableClass's constructor:\n\n .. code-block:: python\n\n @classmethod\n def from_config_value(cls, inst_data, config_value):\n return MyConfigurableClass(inst_data=inst_data, **config_value)\n\n """
\n\n\ndef class_from_code_pointer(module_name: str, class_name: str) -> Type[object]:\n try:\n module = importlib.import_module(module_name)\n except ModuleNotFoundError:\n check.failed(\n "Couldn't import module {module_name} when attempting to load the class {klass}".format(\n module_name=module_name,\n klass=module_name + "." + class_name,\n )\n )\n try:\n return getattr(module, class_name)\n except AttributeError:\n check.failed(\n "Couldn't find class {class_name} in module when attempting to load the "\n "class {klass}".format(\n class_name=class_name,\n klass=module_name + "." + class_name,\n )\n )\n
", "current_page_name": "_modules/dagster/_serdes/config_class", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._serdes.config_class"}}, "_utils": {"alabaster_version": "0.7.13", "alert": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.alert

\nimport datetime\nimport smtplib\nimport ssl\nfrom typing import TYPE_CHECKING, Callable, Optional, Sequence, Union\n\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions.sensor_definition import DefaultSensorStatus, SensorDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.graph_definition import GraphDefinition\n    from dagster._core.definitions.job_definition import JobDefinition\n    from dagster._core.definitions.run_status_sensor_definition import RunFailureSensorContext\n    from dagster._core.definitions.selector import JobSelector, RepositorySelector\n    from dagster._core.definitions.unresolved_asset_job_definition import (\n        UnresolvedAssetJobDefinition,\n    )\n\n\ndef _default_failure_email_body(context: "RunFailureSensorContext") -> str:\n    from dagster._core.host_representation.external_data import DEFAULT_MODE_NAME\n\n    return "<br>".join(\n        [\n            f"Pipeline {context.dagster_run.job_name} failed!",\n            f"Run ID: {context.dagster_run.run_id}",\n            f"Mode: {DEFAULT_MODE_NAME}",\n            f"Error: {context.failure_event.message}",\n        ]\n    )\n\n\ndef _default_failure_email_subject(context) -> str:\n    return f"Dagster Run Failed: {context.pipeline_run.job_name}"\n\n\nEMAIL_MESSAGE = """From: {email_from}\nTo: {email_to}\nMIME-Version: 1.0\nContent-type: text/html\nSubject: {email_subject}\n\n{email_body}\n\n<!-- this ensures Gmail doesn't trim the email -->\n<span style="opacity: 0"> {randomness} </span>\n"""\n\n\ndef send_email_via_ssl(\n    email_from: str,\n    email_password: str,\n    email_to: Sequence[str],\n    message: str,\n    smtp_host: str,\n    smtp_port: int,\n):\n    context = ssl.create_default_context()\n    with smtplib.SMTP_SSL(smtp_host, smtp_port, context=context) as server:\n        server.login(email_from, email_password)\n        server.sendmail(email_from, email_to, message)\n\n\ndef send_email_via_starttls(\n    email_from: str,\n    email_password: str,\n    email_to: Sequence[str],\n    message: str,\n    smtp_host: str,\n    smtp_port: int,\n):\n    context = ssl.create_default_context()\n    with smtplib.SMTP(smtp_host, smtp_port) as server:\n        server.starttls(context=context)\n        server.login(email_from, email_password)\n        server.sendmail(email_from, email_to, message)\n\n\n
[docs]@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef make_email_on_run_failure_sensor(\n email_from: str,\n email_password: str,\n email_to: Sequence[str],\n email_body_fn: Callable[["RunFailureSensorContext"], str] = _default_failure_email_body,\n email_subject_fn: Callable[["RunFailureSensorContext"], str] = _default_failure_email_subject,\n smtp_host: str = "smtp.gmail.com",\n smtp_type: str = "SSL",\n smtp_port: Optional[int] = None,\n name: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n "JobDefinition",\n "GraphDefinition",\n "UnresolvedAssetJobDefinition",\n "RepositorySelector",\n "JobSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n "JobDefinition",\n "GraphDefinition",\n "UnresolvedAssetJobDefinition",\n "RepositorySelector",\n "JobSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n) -> SensorDefinition:\n """Create a job failure sensor that sends email via the SMTP protocol.\n\n Args:\n email_from (str): The sender email address to send the message from.\n email_password (str): The password of the sender.\n email_to (List[str]): The receipt email addresses to send the message to.\n email_body_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` outputs the email body you want to send.\n Defaults to the plain text that contains error message, job name, and run ID.\n email_subject_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` outputs the email subject you want to send.\n Defaults to "Dagster Run Failed: <job_name>".\n smtp_host (str): The hostname of the SMTP server. Defaults to "smtp.gmail.com".\n smtp_type (str): The protocol; either "SSL" or "STARTTLS". Defaults to SSL.\n smtp_port (Optional[int]): The SMTP port. Defaults to 465 for SSL, 587 for STARTTLS.\n name: (Optional[str]): The name of the sensor. Defaults to "email_on_job_failure".\n webserver_base_url: (Optional[str]): The base url of your dagster-webserver instance. Specify this to allow\n messages to include deeplinks to the failed run.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, JobDefinition, RepositorySelector, JobSelector]]]):\n The jobs that will be monitored by this failure sensor. Defaults to None, which means the alert will\n be sent when any job in the repository fails. To monitor jobs in external repositories,\n use RepositorySelector and JobSelector.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, JobDefinition, RepositorySelector, JobSelector]]]):\n (deprecated in favor of monitored_jobs) The jobs that will be monitored by this failure\n sensor. Defaults to None, which means the alert will be sent when any job in the repository fails.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n\n Examples:\n .. code-block:: python\n\n email_on_run_failure = make_email_on_run_failure_sensor(\n email_from="no-reply@example.com",\n email_password=os.getenv("ALERT_EMAIL_PASSWORD"),\n email_to=["xxx@example.com"],\n )\n\n @repository\n def my_repo():\n return [my_job + email_on_run_failure]\n\n .. code-block:: python\n\n def my_message_fn(context: RunFailureSensorContext) -> str:\n return (\n f"Job {context.pipeline_run.job_name} failed!"\n f"Error: {context.failure_event.message}"\n )\n\n email_on_run_failure = make_email_on_run_failure_sensor(\n email_from="no-reply@example.com",\n email_password=os.getenv("ALERT_EMAIL_PASSWORD"),\n email_to=["xxx@example.com"],\n email_body_fn=my_message_fn,\n email_subject_fn=lambda _: "Dagster Alert",\n webserver_base_url="http://mycoolsite.com",\n )\n\n\n """\n from dagster._core.definitions.run_status_sensor_definition import (\n RunFailureSensorContext,\n run_failure_sensor,\n )\n\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n @run_failure_sensor(\n name=name,\n monitored_jobs=jobs,\n default_status=default_status,\n monitor_all_repositories=monitor_all_repositories,\n )\n def email_on_run_failure(context: RunFailureSensorContext):\n email_body = email_body_fn(context)\n if webserver_base_url:\n email_body += (\n f'<p><a href="{webserver_base_url}/runs/{context.dagster_run.run_id}">View in'\n " the Dagster UI</a></p>"\n )\n\n message = EMAIL_MESSAGE.format(\n email_to=",".join(email_to),\n email_from=email_from,\n email_subject=email_subject_fn(context),\n email_body=email_body,\n randomness=datetime.datetime.now(),\n )\n\n if smtp_type == "SSL":\n send_email_via_ssl(\n email_from, email_password, email_to, message, smtp_host, smtp_port=smtp_port or 465\n )\n elif smtp_type == "STARTTLS":\n send_email_via_starttls(\n email_from, email_password, email_to, message, smtp_host, smtp_port=smtp_port or 587\n )\n else:\n raise DagsterInvalidDefinitionError(f'smtp_type "{smtp_type}" is not supported.')\n\n return email_on_run_failure
\n
", "current_page_name": "_modules/dagster/_utils/alert", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.alert"}, "body": "

Source code for dagster._utils

\nimport _thread as thread\nimport contextlib\nimport contextvars\nimport datetime\nimport errno\nimport functools\nimport inspect\nimport multiprocessing\nimport os\nimport re\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom collections import OrderedDict\nfrom datetime import timezone\nfrom enum import Enum\nfrom signal import Signals\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    ContextManager,\n    Dict,\n    Generator,\n    Generic,\n    Hashable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    TypeVar,\n    Union,\n    cast,\n    overload,\n)\n\nimport packaging.version\nfrom typing_extensions import Literal, TypeAlias, TypeGuard\n\nimport dagster._check as check\nimport dagster._seven as seven\n\nfrom .internal_init import IHasInternalInit as IHasInternalInit\n\nif sys.version_info > (3,):\n    from pathlib import Path\nelse:\n    from pathlib2 import Path\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.definitions_class import Definitions\n    from dagster._core.definitions.repository_definition.repository_definition import (\n        RepositoryDefinition,\n    )\n    from dagster._core.events import DagsterEvent\n\nK = TypeVar("K")\nT = TypeVar("T")\nU = TypeVar("U")\nV = TypeVar("V")\n\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\nPICKLE_PROTOCOL = 4\n\n\nDEFAULT_WORKSPACE_YAML_FILENAME = "workspace.yaml"\n\nPrintFn: TypeAlias = Callable[[Any], None]\n\nSingleInstigatorDebugCrashFlags: TypeAlias = Mapping[str, int]\nDebugCrashFlags: TypeAlias = Mapping[str, SingleInstigatorDebugCrashFlags]\n\n\n# Use this to get the "library version" (pre-1.0 version) from the "core version" (post 1.0\n# version). 16 is from the 0.16.0 that library versions stayed on when core went to 1.0.0.\ndef library_version_from_core_version(core_version: str) -> str:\n    parsed_version = parse_package_version(core_version)\n\n    release = parsed_version.release\n    if release[0] >= 1:\n        library_version = ".".join(["0", str(16 + release[1]), str(release[2])])\n\n        if parsed_version.is_prerelease:\n            library_version = library_version + "".join(\n                [str(pre) for pre in check.not_none(parsed_version.pre)]\n            )\n\n        if parsed_version.is_postrelease:\n            library_version = library_version + "post" + str(parsed_version.post)\n\n        return library_version\n    else:\n        return core_version\n\n\ndef parse_package_version(version_str: str) -> packaging.version.Version:\n    parsed_version = packaging.version.parse(version_str)\n    assert isinstance(parsed_version, packaging.version.Version)\n    return parsed_version\n\n\ndef convert_dagster_submodule_name(name: str, mode: Literal["private", "public"]) -> str:\n    """This function was introduced when all Dagster submodules were marked private by\n    underscore-prefixing the root submodules (e.g. `dagster._core`). The function provides\n    backcompatibility by converting modules between the old and new (i.e. public and private) forms.\n    This is needed when reading older data or communicating with older versions of Dagster.\n    """\n    if mode == "private":\n        return re.sub(r"^dagster\\.([^_])", r"dagster._\\1", name)\n    elif mode == "public":\n        return re.sub(r"^dagster._", "dagster.", name)\n    else:\n        check.failed("`mode` must be 'private' or 'public'")\n\n\n
[docs]def file_relative_path(dunderfile: str, relative_path: str) -> str:\n """Get a path relative to the currently executing Python file.\n\n This function is useful when one needs to load a file that is relative to the position of\n the current file. (Such as when you encode a configuration file path in source file and want\n in runnable in any current working directory)\n\n Args:\n dunderfile (str): Should always be ``__file__``.\n relative_path (str): Path to get relative to the currently executing file.\n\n **Examples**:\n\n .. code-block:: python\n\n file_relative_path(__file__, 'path/relative/to/file')\n\n """\n check.str_param(dunderfile, "dunderfile")\n check.str_param(relative_path, "relative_path")\n\n return os.path.join(os.path.dirname(dunderfile), relative_path)
\n\n\ndef script_relative_path(file_path: str) -> str:\n """Useful for testing with local files. Use a path relative to where the\n test resides and this function will return the absolute path\n of that file. Otherwise it will be relative to script that\n ran the test.\n\n Note: this is function is very, very expensive (on the order of 1\n millisecond per invocation) so this should only be used in performance\n insensitive contexts. Prefer file_relative_path for anything with\n performance constraints.\n\n """\n # from http://bit.ly/2snyC6s\n\n check.str_param(file_path, "file_path")\n scriptdir = inspect.stack()[1][1]\n return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))\n\n\n# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py\ndef camelcase(string: str) -> str:\n check.str_param(string, "string")\n\n string = re.sub(r"^[\\-_\\.]", "", str(string))\n if not string:\n return string\n return str(string[0]).upper() + re.sub(\n r"[\\-_\\.\\s]([a-z])", lambda matched: str(matched.group(1)).upper(), string[1:]\n )\n\n\ndef ensure_single_item(ddict: Mapping[T, U]) -> Tuple[T, U]:\n check.mapping_param(ddict, "ddict")\n check.param_invariant(len(ddict) == 1, "ddict", "Expected dict with single item")\n return next(iter(ddict.items()))\n\n\n@contextlib.contextmanager\ndef pushd(path: str) -> Iterator[str]:\n old_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield path\n finally:\n os.chdir(old_cwd)\n\n\ndef safe_isfile(path: str) -> bool:\n """Backport of Python 3.8 os.path.isfile behavior.\n\n This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not\n sure that there are other ways to provoke this behavior on Unix other than the null byte,\n but there are certainly other ways to do it on Windows. Afaict, we won't mask other\n ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an\n unexpected, uncaught ValueError from very deep in our logic.\n """\n try:\n return os.path.isfile(path)\n except ValueError:\n return False\n\n\ndef mkdir_p(path: str) -> str:\n try:\n os.makedirs(path)\n return path\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n return path\n else:\n raise\n\n\ndef hash_collection(\n collection: Union[\n Mapping[Hashable, Any], Sequence[Any], AbstractSet[Any], Tuple[Any, ...], NamedTuple\n ]\n) -> int:\n """Hash a mutable collection or immutable collection containing mutable elements.\n\n This is useful for hashing Dagster-specific NamedTuples that contain mutable lists or dicts.\n The default NamedTuple __hash__ function assumes the contents of the NamedTuple are themselves\n hashable, and will throw an error if they are not. This can occur when trying to e.g. compute a\n cache key for the tuple for use with `lru_cache`.\n\n This alternative implementation will recursively process collection elements to convert basic\n lists and dicts to tuples prior to hashing. It is recommended to cache the result:\n\n Example:\n .. code-block:: python\n\n def __hash__(self):\n if not hasattr(self, '_hash'):\n self._hash = hash_named_tuple(self)\n return self._hash\n """\n assert isinstance(\n collection, (list, dict, set, tuple)\n ), f"Cannot hash collection of type {type(collection)}"\n return hash(make_hashable(collection))\n\n\n@overload\ndef make_hashable(value: Union[List[Any], Set[Any]]) -> Tuple[Any, ...]: ...\n\n\n@overload\ndef make_hashable(value: Dict[Any, Any]) -> Tuple[Tuple[Any, Any]]: ...\n\n\n@overload\ndef make_hashable(value: Any) -> Any: ...\n\n\ndef make_hashable(value: Any) -> Any:\n if isinstance(value, dict):\n return tuple(sorted((key, make_hashable(value)) for key, value in value.items()))\n elif isinstance(value, (list, tuple, set)):\n return tuple([make_hashable(x) for x in value])\n else:\n return value\n\n\ndef get_prop_or_key(elem, key):\n if isinstance(elem, Mapping):\n return elem.get(key)\n else:\n return getattr(elem, key)\n\n\ndef list_pull(alist, key):\n return list(map(lambda elem: get_prop_or_key(elem, key), alist))\n\n\ndef all_none(kwargs):\n for value in kwargs.values():\n if value is not None:\n return False\n return True\n\n\ndef check_script(path, return_code=0):\n try:\n subprocess.check_output([sys.executable, path])\n except subprocess.CalledProcessError as exc:\n if return_code != 0:\n if exc.returncode == return_code:\n return\n raise\n\n\ndef check_cli_execute_file_job(path, pipeline_fn_name, env_file=None):\n from dagster._core.test_utils import instance_for_test\n\n with instance_for_test():\n cli_cmd = [\n sys.executable,\n "-m",\n "dagster",\n "pipeline",\n "execute",\n "-f",\n path,\n "-a",\n pipeline_fn_name,\n ]\n\n if env_file:\n cli_cmd.append("-c")\n cli_cmd.append(env_file)\n\n try:\n subprocess.check_output(cli_cmd)\n except subprocess.CalledProcessError as cpe:\n print(cpe) # noqa: T201\n raise cpe\n\n\ndef safe_tempfile_path_unmanaged() -> str:\n # This gets a valid temporary file path in the safest possible way, although there is still no\n # guarantee that another process will not create a file at this path. The NamedTemporaryFile is\n # deleted when the context manager exits and the file object is closed.\n #\n # This is preferable to using NamedTemporaryFile as a context manager and passing the name\n # attribute of the file object around because NamedTemporaryFiles cannot be opened a second time\n # if already open on Windows NT or later:\n # https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile\n # https://github.com/dagster-io/dagster/issues/1582\n with tempfile.NamedTemporaryFile() as fd:\n path = fd.name\n return Path(path).as_posix()\n\n\n@contextlib.contextmanager\ndef safe_tempfile_path() -> Iterator[str]:\n path = None\n try:\n path = safe_tempfile_path_unmanaged()\n yield path\n finally:\n if path is not None and os.path.exists(path):\n os.unlink(path)\n\n\n@overload\ndef ensure_gen(thing_or_gen: Generator[T, Any, Any]) -> Generator[T, Any, Any]:\n pass\n\n\n@overload\ndef ensure_gen(thing_or_gen: T) -> Generator[T, Any, Any]:\n pass\n\n\ndef ensure_gen(\n thing_or_gen: Union[T, Iterator[T], Generator[T, Any, Any]]\n) -> Generator[T, Any, Any]:\n if not inspect.isgenerator(thing_or_gen):\n thing_or_gen = cast(T, thing_or_gen)\n\n def _gen_thing():\n yield thing_or_gen\n\n return _gen_thing()\n\n return thing_or_gen\n\n\ndef ensure_dir(file_path: str) -> str:\n try:\n os.makedirs(file_path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return file_path\n\n\ndef ensure_file(path: str) -> str:\n ensure_dir(os.path.dirname(path))\n if not os.path.exists(path):\n touch_file(path)\n return path\n\n\ndef touch_file(path):\n ensure_dir(os.path.dirname(path))\n with open(path, "a", encoding="utf8"):\n os.utime(path, None)\n\n\ndef _kill_on_event(termination_event):\n termination_event.wait()\n send_interrupt()\n\n\ndef send_interrupt():\n if seven.IS_WINDOWS:\n # This will raise a KeyboardInterrupt in python land - meaning this wont be able to\n # interrupt things like sleep()\n thread.interrupt_main()\n else:\n # If on unix send an os level signal to interrupt any situation we may be stuck in\n os.kill(os.getpid(), signal.SIGINT)\n\n\n# Function to be invoked by daemon thread in processes which seek to be cancellable.\n# The motivation for this approach is to be able to exit cleanly on Windows. An alternative\n# path is to change how the processes are opened and send CTRL_BREAK signals, which at\n# the time of authoring seemed a more costly approach.\n#\n# Reading for the curious:\n# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine\n# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/\ndef start_termination_thread(termination_event):\n check.inst_param(termination_event, "termination_event", ttype=type(multiprocessing.Event()))\n\n int_thread = threading.Thread(\n target=_kill_on_event, args=(termination_event,), name="kill-on-event"\n )\n int_thread.daemon = True\n int_thread.start()\n\n\n# Executes the next() function within an instance of the supplied context manager class\n# (leaving the context before yielding each result)\ndef iterate_with_context(\n context_fn: Callable[[], ContextManager[Any]], iterator: Iterator[T]\n) -> Iterator[T]:\n while True:\n # Allow interrupts during user code so that we can terminate slow/hanging steps\n with context_fn():\n try:\n next_output = next(iterator)\n except StopIteration:\n return\n\n yield next_output\n\n\ndef datetime_as_float(dt: datetime.datetime) -> float:\n check.inst_param(dt, "dt", datetime.datetime)\n return float((dt - EPOCH).total_seconds())\n\n\nT_GeneratedContext = TypeVar("T_GeneratedContext")\n\n\nclass EventGenerationManager(Generic[T_GeneratedContext]):\n """Utility class that wraps an event generator function, that also yields a single instance of\n a typed object. All events yielded before the typed object are yielded through the method\n `generate_setup_events` and all events yielded after the typed object are yielded through the\n method `generate_teardown_events`.\n\n This is used to help replace the context managers used in pipeline initialization with\n generators so that we can begin emitting initialization events AND construct a pipeline context\n object, while managing explicit setup/teardown.\n\n This does require calling `generate_setup_events` AND `generate_teardown_events` in order to\n get the typed object.\n """\n\n def __init__(\n self,\n generator: Iterator[Union["DagsterEvent", T_GeneratedContext]],\n object_cls: Type[T_GeneratedContext],\n require_object: Optional[bool] = True,\n ):\n self.generator = check.generator(generator)\n self.object_cls: Type[T_GeneratedContext] = check.class_param(object_cls, "object_cls")\n self.require_object = check.bool_param(require_object, "require_object")\n self.object: Optional[T_GeneratedContext] = None\n self.did_setup = False\n self.did_teardown = False\n\n def generate_setup_events(self) -> Iterator["DagsterEvent"]:\n self.did_setup = True\n try:\n while self.object is None:\n obj = next(self.generator)\n if isinstance(obj, self.object_cls):\n self.object = obj\n else:\n yield obj\n except StopIteration:\n if self.require_object:\n check.inst_param(\n self.object,\n "self.object",\n self.object_cls,\n f"generator never yielded object of type {self.object_cls.__name__}",\n )\n\n def get_object(self) -> T_GeneratedContext:\n if not self.did_setup:\n check.failed("Called `get_object` before `generate_setup_events`")\n return cast(T_GeneratedContext, self.object)\n\n def generate_teardown_events(self) -> Iterator["DagsterEvent"]:\n self.did_teardown = True\n if self.object:\n yield from self.generator\n\n\ndef utc_datetime_from_timestamp(timestamp: float) -> datetime.datetime:\n tz = timezone.utc\n return datetime.datetime.fromtimestamp(timestamp, tz=tz)\n\n\ndef utc_datetime_from_naive(dt: datetime.datetime) -> datetime.datetime:\n tz = timezone.utc\n return dt.replace(tzinfo=tz)\n\n\ndef is_enum_value(value: object) -> bool:\n return False if value is None else issubclass(value.__class__, Enum)\n\n\ndef git_repository_root() -> str:\n return subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("utf-8").strip()\n\n\ndef segfault() -> None:\n """Reliable cross-Python version segfault.\n\n https://bugs.python.org/issue1215#msg143236\n """\n import ctypes\n\n ctypes.string_at(0)\n\n\ndef find_free_port() -> int:\n with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(("", 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\ndef is_port_in_use(host, port) -> bool:\n # Similar to the socket options that uvicorn uses to bind ports:\n # https://github.com/encode/uvicorn/blob/62f19c1c39929c84968712c371c9b7b96a041dec/uvicorn/config.py#L565-L566\n sock = socket.socket(family=socket.AF_INET)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n sock.bind((host, port))\n return False\n except socket.error as e:\n return e.errno == errno.EADDRINUSE\n finally:\n sock.close()\n\n\n@contextlib.contextmanager\ndef alter_sys_path(to_add: Sequence[str], to_remove: Sequence[str]) -> Iterator[None]:\n to_restore = [path for path in sys.path]\n\n # remove paths\n for path in to_remove:\n if path in sys.path:\n sys.path.remove(path)\n\n # add paths\n for path in to_add:\n sys.path.insert(0, path)\n\n try:\n yield\n finally:\n sys.path = to_restore\n\n\n@contextlib.contextmanager\ndef restore_sys_modules() -> Iterator[None]:\n sys_modules = {k: v for k, v in sys.modules.items()}\n try:\n yield\n finally:\n to_delete = set(sys.modules) - set(sys_modules)\n for key in to_delete:\n del sys.modules[key]\n\n\ndef process_is_alive(pid: int) -> bool:\n if seven.IS_WINDOWS:\n import psutil\n\n return psutil.pid_exists(pid=pid)\n else:\n try:\n subprocess.check_output(["ps", str(pid)])\n except subprocess.CalledProcessError as exc:\n assert exc.returncode == 1\n return False\n return True\n\n\ndef compose(*args):\n """Compose python functions args such that compose(f, g)(x) is equivalent to f(g(x)).""" # noqa: D402\n # reduce using functional composition over all the arguments, with the identity function as\n # initializer\n return functools.reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)\n\n\ndef dict_without_keys(ddict, *keys):\n return {key: value for key, value in ddict.items() if key not in set(keys)}\n\n\nclass Counter:\n def __init__(self):\n self._lock = threading.Lock()\n self._counts = OrderedDict()\n super(Counter, self).__init__()\n\n def increment(self, key: str):\n with self._lock:\n self._counts[key] = self._counts.get(key, 0) + 1\n\n def counts(self) -> Mapping[str, int]:\n with self._lock:\n copy = {k: v for k, v in self._counts.items()}\n return copy\n\n\ntraced_counter = contextvars.ContextVar("traced_counts", default=Counter())\n\nT_Callable = TypeVar("T_Callable", bound=Callable)\n\n\ndef traced(func: T_Callable) -> T_Callable:\n """A decorator that keeps track of how many times a function is called."""\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n counter = traced_counter.get()\n if counter and isinstance(counter, Counter):\n counter.increment(func.__qualname__)\n\n return func(*args, **kwargs)\n\n return cast(T_Callable, inner)\n\n\ndef get_terminate_signal():\n if sys.platform == "win32":\n return signal.SIGTERM\n return signal.SIGKILL\n\n\ndef get_run_crash_explanation(prefix: str, exit_code: int):\n # As per https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess.returncode\n # negative exit code means a posix signal\n if exit_code < 0 and -exit_code in [signal.value for signal in Signals]:\n posix_signal = -exit_code\n signal_str = Signals(posix_signal).name\n exit_clause = f"was terminated by signal {posix_signal} ({signal_str})."\n if posix_signal == get_terminate_signal():\n exit_clause = (\n exit_clause\n + " This usually indicates that the process was"\n " killed by the operating system due to running out of"\n " memory. Possible solutions include increasing the"\n " amount of memory available to the run, reducing"\n " the amount of memory used by the ops in the run, or"\n " configuring the executor to run fewer ops concurrently."\n )\n else:\n exit_clause = f"unexpectedly exited with code {exit_code}."\n\n return prefix + " " + exit_clause\n\n\ndef last_file_comp(path: str) -> str:\n return os.path.basename(os.path.normpath(path))\n\n\ndef is_named_tuple_instance(obj: object) -> TypeGuard[NamedTuple]:\n return isinstance(obj, tuple) and hasattr(obj, "_fields")\n\n\ndef is_named_tuple_subclass(klass: Type[object]) -> TypeGuard[Type[NamedTuple]]:\n return isinstance(klass, type) and issubclass(klass, tuple) and hasattr(klass, "_fields")\n\n\n@overload\ndef normalize_to_repository(\n definitions_or_repository: Optional[Union["Definitions", "RepositoryDefinition"]] = ...,\n repository: Optional["RepositoryDefinition"] = ...,\n error_on_none: Literal[True] = ...,\n) -> "RepositoryDefinition": ...\n\n\n@overload\ndef normalize_to_repository(\n definitions_or_repository: Optional[Union["Definitions", "RepositoryDefinition"]] = ...,\n repository: Optional["RepositoryDefinition"] = ...,\n error_on_none: Literal[False] = ...,\n) -> Optional["RepositoryDefinition"]: ...\n\n\ndef normalize_to_repository(\n definitions_or_repository: Optional[Union["Definitions", "RepositoryDefinition"]] = None,\n repository: Optional["RepositoryDefinition"] = None,\n error_on_none: bool = True,\n) -> Optional["RepositoryDefinition"]:\n """Normalizes the arguments that take a RepositoryDefinition or Definitions object to a\n RepositoryDefinition.\n\n This is intended to handle both the case where a single argument takes a\n `Union[RepositoryDefinition, Definitions]` or separate keyword arguments accept\n `RepositoryDefinition` or `Definitions`.\n """\n from dagster._core.definitions.definitions_class import Definitions\n\n if (definitions_or_repository and repository) or (\n error_on_none and not (definitions_or_repository or repository)\n ):\n check.failed("Exactly one of `definitions` or `repository_def` must be provided.")\n elif isinstance(definitions_or_repository, Definitions):\n return definitions_or_repository.get_repository_def()\n elif definitions_or_repository:\n return definitions_or_repository\n elif repository:\n return repository\n else:\n return None\n\n\ndef xor(a, b):\n return bool(a) != bool(b)\n\n\ndef tail_file(path_or_fd: Union[str, int], should_stop: Callable[[], bool]) -> Iterator[str]:\n with open(path_or_fd, "r") as output_stream:\n while True:\n line = output_stream.readline()\n if line:\n yield line\n elif should_stop():\n break\n else:\n time.sleep(0.01)\n
", "current_page_name": "_modules/dagster/_utils", "customsidebar": null, "dagster_type": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.dagster_type

\nfrom typing import Any\n\nfrom dagster._core.definitions.events import Failure, TypeCheck\nfrom dagster._core.definitions.graph_definition import GraphDefinition\nfrom dagster._core.definitions.job_base import InMemoryJob\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.api import create_execution_plan\nfrom dagster._core.execution.context_creation_job import scoped_job_context\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.types.dagster_type import resolve_dagster_type\n\nfrom .typing_api import is_typing_type\n\n\n
[docs]def check_dagster_type(dagster_type: Any, value: Any) -> TypeCheck:\n """Test a custom Dagster type.\n\n Args:\n dagster_type (Any): The Dagster type to test. Should be one of the\n :ref:`built-in types <builtin>`, a dagster type explicitly constructed with\n :py:func:`as_dagster_type`, :py:func:`@usable_as_dagster_type <dagster_type>`, or\n :py:func:`PythonObjectDagsterType`, or a Python type.\n value (Any): The runtime value to test.\n\n Returns:\n TypeCheck: The result of the type check.\n\n\n Examples:\n .. code-block:: python\n\n assert check_dagster_type(Dict[Any, Any], {'foo': 'bar'}).success\n """\n if is_typing_type(dagster_type):\n raise DagsterInvariantViolationError(\n f"Must pass in a type from dagster module. You passed {dagster_type} "\n "which is part of python's typing module."\n )\n\n dagster_type = resolve_dagster_type(dagster_type)\n\n job = InMemoryJob(GraphDefinition(node_defs=[], name="empty").to_job())\n job_def = job.get_definition()\n\n instance = DagsterInstance.ephemeral()\n execution_plan = create_execution_plan(job)\n dagster_run = instance.create_run_for_job(job_def)\n with scoped_job_context(execution_plan, job, {}, dagster_run, instance) as context:\n type_check_context = context.for_type(dagster_type)\n try:\n type_check = dagster_type.type_check(type_check_context, value)\n except Failure as failure:\n return TypeCheck(success=False, description=failure.description)\n\n if not isinstance(type_check, TypeCheck):\n raise DagsterInvariantViolationError(\n "Type checks can only return TypeCheck. Type {type_name} returned {value}.".format(\n type_name=dagster_type.display_name, value=repr(type_check)\n )\n )\n return type_check
\n
", "current_page_name": "_modules/dagster/_utils/dagster_type", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.dagster_type"}, "favicon_url": null, "forked_pdb": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.forked_pdb

\nimport pdb\nimport sys\n\n\n# From https://stackoverflow.com/questions/4716533/how-to-attach-debugger-to-a-python-subproccess\n
[docs]class ForkedPdb(pdb.Pdb):\n """A pdb subclass that may be used from a forked multiprocessing child.\n\n **Examples**:\n\n .. code-block:: python\n\n from dagster._utils.forked_pdb import ForkedPdb\n\n @solid\n def complex_solid(_):\n # some complicated stuff\n\n ForkedPdb().set_trace()\n\n # some other complicated stuff\n\n You can initiate pipeline execution via the webserver and use the pdb debugger to examine/step through\n execution at the breakpoint.\n """\n\n def interaction(self, frame, traceback):\n _stdin = sys.stdin\n try:\n sys.stdin = open("/dev/stdin", encoding="utf8")\n pdb.Pdb.interaction(self, frame, traceback)\n finally:\n sys.stdin = _stdin
\n
", "current_page_name": "_modules/dagster/_utils/forked_pdb", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.forked_pdb"}, "log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.log

\nimport copy\nimport logging\nimport sys\nimport traceback\nfrom typing import Mapping, NamedTuple, Optional\n\nimport coloredlogs\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import deprecated\nfrom dagster._config import Enum, EnumValue\nfrom dagster._core.definitions.logger_definition import logger\nfrom dagster._core.utils import PYTHON_LOGGING_LEVELS_MAPPING, coerce_valid_log_level\n\nLogLevelEnum = Enum("log_level", list(map(EnumValue, PYTHON_LOGGING_LEVELS_MAPPING.keys())))\n\n\nclass JsonFileHandler(logging.Handler):\n    def __init__(self, json_path: str):\n        super(JsonFileHandler, self).__init__()\n        self.json_path = check.str_param(json_path, "json_path")\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            log_dict = copy.copy(record.__dict__)\n\n            # This horrific monstrosity is to maintain backwards compatability\n            # with the old behavior of the JsonFileHandler, which the clarify\n            # project has a dependency on. It relied on the dagster-defined\n            # properties smashing all the properties of the LogRecord object\n            # and uploads all of those properties to a redshift table for\n            # in order to do analytics on the log\n\n            if "dagster_meta" in log_dict:\n                dagster_meta_dict = log_dict["dagster_meta"]\n                del log_dict["dagster_meta"]\n            else:\n                dagster_meta_dict = {}\n\n            log_dict.update(dagster_meta_dict)\n\n            with open(self.json_path, "a", encoding="utf8") as ff:\n                text_line = seven.json.dumps(log_dict)\n                ff.write(text_line + "\\n")\n        # Need to catch Exception here, so disabling lint\n        except Exception as e:\n            logging.critical("[%s] Error during logging!", self.__class__.__name__)\n            logging.exception(str(e))\n\n\nclass StructuredLoggerMessage(\n    NamedTuple(\n        "_StructuredLoggerMessage",\n        [\n            ("name", str),\n            ("message", str),\n            ("level", int),\n            ("meta", Mapping[object, object]),\n            ("record", logging.LogRecord),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        name: str,\n        message: str,\n        level: int,\n        meta: Mapping[object, object],\n        record: logging.LogRecord,\n    ):\n        return super(StructuredLoggerMessage, cls).__new__(\n            cls,\n            check.str_param(name, "name"),\n            check.str_param(message, "message"),\n            coerce_valid_log_level(level),\n            check.mapping_param(meta, "meta"),\n            check.inst_param(record, "record", logging.LogRecord),\n        )\n\n\nclass JsonEventLoggerHandler(logging.Handler):\n    def __init__(self, json_path: str, construct_event_record):\n        super(JsonEventLoggerHandler, self).__init__()\n        self.json_path = check.str_param(json_path, "json_path")\n        self.construct_event_record = construct_event_record\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            event_record = self.construct_event_record(record)\n            with open(self.json_path, "a", encoding="utf8") as ff:\n                text_line = seven.json.dumps(event_record.to_dict())\n                ff.write(text_line + "\\n")\n\n        # Need to catch Exception here, so disabling lint\n        except Exception as e:\n            logging.critical("[%s] Error during logging!", self.__class__.__name__)\n            logging.exception(str(e))\n\n\nclass StructuredLoggerHandler(logging.Handler):\n    def __init__(self, callback):\n        super(StructuredLoggerHandler, self).__init__()\n        self.callback = check.is_callable(callback, "callback")\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            self.callback(\n                StructuredLoggerMessage(\n                    name=record.name,\n                    message=record.msg,\n                    level=record.levelno,\n                    meta=record.dagster_meta,  # type: ignore\n                    record=record,\n                )\n            )\n        # Need to catch Exception here, so disabling lint\n        except Exception as e:\n            logging.critical("[%s] Error during logging!", self.__class__.__name__)\n            logging.exception(str(e))\n\n\ndef construct_single_handler_logger(name, level, handler):\n    check.str_param(name, "name")\n    check.inst_param(handler, "handler", logging.Handler)\n\n    level = coerce_valid_log_level(level)\n\n    @logger\n    def single_handler_logger(_init_context):\n        klass = logging.getLoggerClass()\n        logger_ = klass(name, level=level)\n        logger_.addHandler(handler)\n        handler.setLevel(level)\n        return logger_\n\n    return single_handler_logger\n\n\n# Base python logger whose messages will be captured as structured Dagster log messages.\nBASE_DAGSTER_LOGGER = logging.getLogger(name="dagster")\n\n\n
[docs]def get_dagster_logger(name: Optional[str] = None) -> logging.Logger:\n """Creates a python logger whose output messages will be captured and converted into Dagster log\n messages. This means they will have structured information such as the step_key, run_id, etc.\n embedded into them, and will show up in the Dagster event log.\n\n This can be used as a more convenient alternative to `context.log` in most cases. If log level\n is not set explicitly, defaults to DEBUG.\n\n Args:\n name (Optional[str]): If supplied, will create a logger with the name "dagster.builtin.{name}",\n with properties inherited from the base Dagster logger. If omitted, the returned logger\n will be named "dagster.builtin".\n\n Returns:\n :class:`logging.Logger`: A logger whose output will be captured by Dagster.\n\n Example:\n .. code-block:: python\n\n from dagster import get_dagster_logger, op\n\n @op\n def hello_op():\n log = get_dagster_logger()\n for i in range(5):\n # do something\n log.info(f"Did {i+1} things!")\n\n """\n # enforce that the parent logger will always have a DEBUG log level\n BASE_DAGSTER_LOGGER.setLevel(logging.DEBUG)\n base_builtin = BASE_DAGSTER_LOGGER.getChild("builtin")\n if name:\n return base_builtin.getChild(name)\n return base_builtin
\n\n\ndef define_structured_logger(name, callback, level):\n check.str_param(name, "name")\n check.callable_param(callback, "callback")\n level = coerce_valid_log_level(level)\n\n return construct_single_handler_logger(name, level, StructuredLoggerHandler(callback))\n\n\ndef define_json_file_logger(name, json_path, level):\n check.str_param(name, "name")\n check.str_param(json_path, "json_path")\n level = coerce_valid_log_level(level)\n\n stream_handler = JsonFileHandler(json_path)\n stream_handler.setFormatter(define_default_formatter())\n return construct_single_handler_logger(name, level, stream_handler)\n\n\ndef get_stack_trace_array(exception):\n check.inst_param(exception, "exception", Exception)\n if hasattr(exception, "__traceback__"):\n tb = exception.__traceback__\n else:\n _exc_type, _exc_value, tb = sys.exc_info()\n return traceback.format_tb(tb)\n\n\ndef default_format_string():\n return "%(asctime)s - %(name)s - %(levelname)s - %(message)s"\n\n\ndef default_date_format_string():\n return "%Y-%m-%d %H:%M:%S %z"\n\n\ndef define_default_formatter():\n return logging.Formatter(default_format_string(), default_date_format_string())\n\n\n@deprecated(\n breaking_version="2.0",\n subject="loggers.dagit",\n emit_runtime_warning=False,\n)\ndef configure_loggers(handler="default", log_level="INFO"):\n LOGGING_CONFIG = {\n "version": 1,\n "disable_existing_loggers": False,\n "formatters": {\n "colored": {\n "()": coloredlogs.ColoredFormatter,\n "fmt": default_format_string(),\n "datefmt": default_date_format_string(),\n "field_styles": {"levelname": {"color": "blue"}, "asctime": {"color": "green"}},\n "level_styles": {"debug": {}, "error": {"color": "red"}},\n },\n },\n "handlers": {\n "default": {\n "formatter": "colored",\n "class": "logging.StreamHandler",\n "stream": sys.stdout,\n "level": log_level,\n },\n "null": {\n "class": "logging.NullHandler",\n },\n },\n "loggers": {\n "dagster": {\n "handlers": [handler],\n "level": log_level,\n },\n # Only one of dagster or dagster-webserver will be used at a time. We configure them\n # both here to avoid a dependency on the dagster-webserver package.\n "dagit": {\n "handlers": [handler],\n "level": log_level,\n },\n "dagster-webserver": {\n "handlers": [handler],\n "level": log_level,\n },\n },\n }\n\n logging.config.dictConfig(LOGGING_CONFIG)\n\n\ndef create_console_logger(name, level):\n klass = logging.getLoggerClass()\n handler = klass(name, level=level)\n coloredlogs.install(\n logger=handler,\n level=level,\n fmt=default_format_string(),\n datefmt=default_date_format_string(),\n field_styles={"levelname": {"color": "blue"}, "asctime": {"color": "green"}},\n level_styles={"debug": {}, "error": {"color": "red"}},\n )\n return handler\n
", "current_page_name": "_modules/dagster/_utils/log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.log"}, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils", "warnings": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.warnings

\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, Optional, TypeVar\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import (\n    Decoratable,\n    apply_context_manager_decorator,\n)\n\nT = TypeVar("T")\n\n# ########################\n# ##### DEPRECATED\n# ########################\n\n\ndef normalize_renamed_param(\n    new_val: T,\n    new_arg: str,\n    old_val: T,\n    old_arg: str,\n    coerce_old_to_new: Optional[Callable[[T], T]] = None,\n) -> T:\n    """Utility for managing backwards compatibility of a renamed parameter.\n\n    .. code-block::\n\n       # The name of param `old_flag` is being updated to `new_flag`, but we are temporarily\n       # accepting either param.\n       def is_new(old_flag=None, new_flag=None):\n           return canonicalize_backcompat_args(\n               new_val=new_flag,\n               new_arg='new_flag',\n               old_val=old_flag,\n               old_arg='old_flag',\n               breaking_version='0.9.0',\n               coerce_old_to_new=lambda val: not val,\n           )\n\n    In the above example, if the caller sets both new_flag and old_flag, it will fail by throwing\n    a CheckError. If the caller sets the new_flag, it's returned unaltered. If the caller sets\n    old_flag, it will return the old_flag run through the coercion function.\n    """\n    check.str_param(new_arg, "new_arg")\n    check.str_param(old_arg, "old_arg")\n    check.opt_callable_param(coerce_old_to_new, "coerce_old_to_new")\n    if new_val is not None and old_val is not None:\n        check.failed(f'Do not use deprecated "{old_arg}" now that you are using "{new_arg}".')\n    elif old_val is not None:\n        return coerce_old_to_new(old_val) if coerce_old_to_new else old_val\n    else:\n        return new_val\n\n\ndef deprecation_warning(\n    subject: str,\n    breaking_version: str,\n    additional_warn_text: Optional[str] = None,\n    stacklevel: int = 3,\n):\n    warnings.warn(\n        f"{subject} is deprecated and will be removed in {breaking_version}."\n        + ((" " + additional_warn_text) if additional_warn_text else ""),\n        category=DeprecationWarning,\n        stacklevel=stacklevel,\n    )\n\n\n# ########################\n# ##### EXPERIMENTAL\n# ########################\n\nEXPERIMENTAL_WARNING_HELP = (\n    "To mute warnings for experimental functionality, invoke"\n    ' warnings.filterwarnings("ignore", category=dagster.ExperimentalWarning) or use'\n    " one of the other methods described at"\n    " https://docs.python.org/3/library/warnings.html#describing-warning-filters."\n)\n\n\n
[docs]class ExperimentalWarning(Warning):\n pass
\n\n\ndef experimental_warning(\n subject: str, additional_warn_text: Optional[str] = None, stacklevel: int = 3\n) -> None:\n extra_text = f" {additional_warn_text}" if additional_warn_text else ""\n warnings.warn(\n f"{subject} is experimental. It may break in future versions, even between dot"\n f" releases.{extra_text} {EXPERIMENTAL_WARNING_HELP}",\n ExperimentalWarning,\n stacklevel=stacklevel,\n )\n\n\n# ########################\n# ##### DISABLE DAGSTER WARNINGS\n# ########################\n\n\n@contextmanager\ndef disable_dagster_warnings() -> Iterator[None]:\n with warnings.catch_warnings():\n warnings.simplefilter("ignore", category=DeprecationWarning)\n warnings.simplefilter("ignore", category=ExperimentalWarning)\n yield\n\n\nT_Decoratable = TypeVar("T_Decoratable", bound=Decoratable)\n\n\ndef suppress_dagster_warnings(__obj: T_Decoratable) -> T_Decoratable:\n """Mark a method/function as ignoring Dagster-generated warnings. This suppresses any\n `ExperimentalWarnings` or `DeprecationWarnings` when the function is called.\n\n Usage:\n\n .. code-block:: python\n\n @suppress_dagster_warnings\n def invokes_some_experimental_stuff(my_arg):\n my_experimental_function(my_arg)\n """\n return apply_context_manager_decorator(__obj, disable_dagster_warnings)\n
", "current_page_name": "_modules/dagster/_utils/warnings", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.warnings"}}}, "dagster_airbyte": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.asset_defs

\nimport hashlib\nimport inspect\nimport os\nimport re\nfrom abc import abstractmethod\nfrom functools import partial\nfrom itertools import chain\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport yaml\nfrom dagster import (\n    AssetKey,\n    AssetOut,\n    AutoMaterializePolicy,\n    FreshnessPolicy,\n    Nothing,\n    Output,\n    ResourceDefinition,\n    SourceAsset,\n    _check as check,\n)\nfrom dagster._core.definitions import AssetsDefinition, multi_asset\nfrom dagster._core.definitions.cacheable_assets import (\n    AssetsDefinitionCacheableData,\n    CacheableAssetsDefinition,\n)\nfrom dagster._core.definitions.events import CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.metadata import MetadataValue, TableSchemaMetadataValue\nfrom dagster._core.definitions.metadata.table import TableSchema\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvalidInvocationError\nfrom dagster._core.execution.context.init import build_init_resource_context\nfrom dagster._utils.merger import merge_dicts\n\nfrom dagster_airbyte.resources import AirbyteCloudResource, AirbyteResource, BaseAirbyteResource\nfrom dagster_airbyte.types import AirbyteTableMetadata\nfrom dagster_airbyte.utils import (\n    generate_materializations,\n    generate_table_schema,\n    is_basic_normalization_operation,\n)\n\n\ndef _table_to_output_name_fn(table: str) -> str:\n    return table.replace("-", "_")\n\n\ndef _build_airbyte_asset_defn_metadata(\n    connection_id: str,\n    destination_tables: Sequence[str],\n    table_to_asset_key_fn: Callable[[str], AssetKey],\n    asset_key_prefix: Optional[Sequence[str]] = None,\n    normalization_tables: Optional[Mapping[str, Set[str]]] = None,\n    upstream_assets: Optional[Iterable[AssetKey]] = None,\n    group_name: Optional[str] = None,\n    io_manager_key: Optional[str] = None,\n    schema_by_table_name: Optional[Mapping[str, TableSchema]] = None,\n    freshness_policy: Optional[FreshnessPolicy] = None,\n    auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n) -> AssetsDefinitionCacheableData:\n    asset_key_prefix = (\n        check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str) or []\n    )\n\n    # Generate a list of outputs, the set of destination tables plus any affiliated\n    # normalization tables\n    tables = list(\n        chain.from_iterable(\n            chain(\n                [destination_tables], normalization_tables.values() if normalization_tables else []\n            )\n        )\n    )\n\n    outputs = {\n        _table_to_output_name_fn(table): AssetKey(\n            [*asset_key_prefix, *table_to_asset_key_fn(table).path]\n        )\n        for table in tables\n    }\n\n    internal_deps: Dict[str, Set[AssetKey]] = {}\n\n    metadata_encodable_normalization_tables = (\n        {k: list(v) for k, v in normalization_tables.items()} if normalization_tables else {}\n    )\n\n    # If normalization tables are specified, we need to add a dependency from the destination table\n    # to the affilitated normalization table\n    if len(metadata_encodable_normalization_tables) > 0:\n        for base_table, derived_tables in metadata_encodable_normalization_tables.items():\n            for derived_table in derived_tables:\n                internal_deps[derived_table] = {\n                    AssetKey([*asset_key_prefix, *table_to_asset_key_fn(base_table).path])\n                }\n\n    # All non-normalization tables depend on any user-provided upstream assets\n    for table in destination_tables:\n        internal_deps[table] = set(upstream_assets or [])\n\n    return AssetsDefinitionCacheableData(\n        keys_by_input_name=(\n            {asset_key.path[-1]: asset_key for asset_key in upstream_assets}\n            if upstream_assets\n            else {}\n        ),\n        keys_by_output_name=outputs,\n        internal_asset_deps=internal_deps,\n        group_name=group_name,\n        key_prefix=asset_key_prefix,\n        can_subset=False,\n        metadata_by_output_name=(\n            {\n                table: {"table_schema": MetadataValue.table_schema(schema_by_table_name[table])}\n                for table in tables\n            }\n            if schema_by_table_name\n            else None\n        ),\n        freshness_policies_by_output_name=(\n            {output: freshness_policy for output in outputs} if freshness_policy else None\n        ),\n        auto_materialize_policies_by_output_name=(\n            {output: auto_materialize_policy for output in outputs}\n            if auto_materialize_policy\n            else None\n        ),\n        extra_metadata={\n            "connection_id": connection_id,\n            "group_name": group_name,\n            "destination_tables": destination_tables,\n            "normalization_tables": metadata_encodable_normalization_tables,\n            "io_manager_key": io_manager_key,\n        },\n    )\n\n\ndef _build_airbyte_assets_from_metadata(\n    assets_defn_meta: AssetsDefinitionCacheableData,\n    resource_defs: Optional[Mapping[str, ResourceDefinition]],\n) -> AssetsDefinition:\n    metadata = cast(Mapping[str, Any], assets_defn_meta.extra_metadata)\n    connection_id = cast(str, metadata["connection_id"])\n    group_name = cast(Optional[str], metadata["group_name"])\n    destination_tables = cast(List[str], metadata["destination_tables"])\n    normalization_tables = cast(Mapping[str, List[str]], metadata["normalization_tables"])\n    io_manager_key = cast(Optional[str], metadata["io_manager_key"])\n\n    @multi_asset(\n        name=f"airbyte_sync_{connection_id[:5]}",\n        deps=list((assets_defn_meta.keys_by_input_name or {}).values()),\n        outs={\n            k: AssetOut(\n                key=v,\n                metadata=(\n                    {\n                        k: cast(TableSchemaMetadataValue, v)\n                        for k, v in assets_defn_meta.metadata_by_output_name.get(k, {}).items()\n                    }\n                    if assets_defn_meta.metadata_by_output_name\n                    else None\n                ),\n                io_manager_key=io_manager_key,\n                freshness_policy=(\n                    assets_defn_meta.freshness_policies_by_output_name.get(k)\n                    if assets_defn_meta.freshness_policies_by_output_name\n                    else None\n                ),\n                dagster_type=Nothing,\n            )\n            for k, v in (assets_defn_meta.keys_by_output_name or {}).items()\n        },\n        internal_asset_deps={\n            k: set(v) for k, v in (assets_defn_meta.internal_asset_deps or {}).items()\n        },\n        compute_kind="airbyte",\n        group_name=group_name,\n        resource_defs=resource_defs,\n    )\n    def _assets(context, airbyte: AirbyteResource):\n        ab_output = airbyte.sync_and_poll(connection_id=connection_id)\n        for materialization in generate_materializations(\n            ab_output, assets_defn_meta.key_prefix or []\n        ):\n            table_name = materialization.asset_key.path[-1]\n            if table_name in destination_tables:\n                yield Output(\n                    value=None,\n                    output_name=_table_to_output_name_fn(table_name),\n                    metadata=materialization.metadata,\n                )\n                # Also materialize any normalization tables affiliated with this destination\n                # e.g. nested objects, lists etc\n                if normalization_tables:\n                    for dependent_table in normalization_tables.get(table_name, set()):\n                        yield Output(\n                            value=None,\n                            output_name=_table_to_output_name_fn(dependent_table),\n                        )\n            else:\n                yield materialization\n\n    return _assets\n\n\n
[docs]def build_airbyte_assets(\n connection_id: str,\n destination_tables: Sequence[str],\n asset_key_prefix: Optional[Sequence[str]] = None,\n group_name: Optional[str] = None,\n normalization_tables: Optional[Mapping[str, Set[str]]] = None,\n deps: Optional[Iterable[Union[CoercibleToAssetKey, AssetsDefinition, SourceAsset]]] = None,\n upstream_assets: Optional[Set[AssetKey]] = None,\n schema_by_table_name: Optional[Mapping[str, TableSchema]] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n stream_to_asset_map: Optional[Mapping[str, str]] = None,\n) -> Sequence[AssetsDefinition]:\n """Builds a set of assets representing the tables created by an Airbyte sync operation.\n\n Args:\n connection_id (str): The Airbyte Connection ID that this op will sync. You can retrieve this\n value from the "Connections" tab of a given connector in the Airbyte UI.\n destination_tables (List[str]): The names of the tables that you want to be represented\n in the Dagster asset graph for this sync. This will generally map to the name of the\n stream in Airbyte, unless a stream prefix has been specified in Airbyte.\n normalization_tables (Optional[Mapping[str, List[str]]]): If you are using Airbyte's\n normalization feature, you may specify a mapping of destination table to a list of\n derived tables that will be created by the normalization process.\n asset_key_prefix (Optional[List[str]]): A prefix for the asset keys inside this asset.\n If left blank, assets will have a key of `AssetKey([table_name])`.\n deps (Optional[Sequence[Union[AssetsDefinition, SourceAsset, str, AssetKey]]]):\n A list of assets to add as sources.\n upstream_assets (Optional[Set[AssetKey]]): Deprecated, use deps instead. A list of assets to add as sources.\n freshness_policy (Optional[FreshnessPolicy]): A freshness policy to apply to the assets\n stream_to_asset_map (Optional[Mapping[str, str]]): A mapping of an Airbyte stream name to a Dagster asset.\n This allows the use of the "prefix" setting in Airbyte with special characters that aren't valid asset names.\n """\n if upstream_assets is not None and deps is not None:\n raise DagsterInvalidDefinitionError(\n "Cannot specify both deps and upstream_assets to build_airbyte_assets. Use only deps"\n " instead."\n )\n\n asset_key_prefix = check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n # Generate a list of outputs, the set of destination tables plus any affiliated\n # normalization tables\n tables = chain.from_iterable(\n chain([destination_tables], normalization_tables.values() if normalization_tables else [])\n )\n outputs = {\n table: AssetOut(\n key=AssetKey([*asset_key_prefix, table]),\n metadata=(\n {"table_schema": MetadataValue.table_schema(schema_by_table_name[table])}\n if schema_by_table_name\n else None\n ),\n freshness_policy=freshness_policy,\n )\n for table in tables\n }\n\n internal_deps = {}\n\n # If normalization tables are specified, we need to add a dependency from the destination table\n # to the affilitated normalization table\n if normalization_tables:\n for base_table, derived_tables in normalization_tables.items():\n for derived_table in derived_tables:\n internal_deps[derived_table] = {AssetKey([*asset_key_prefix, base_table])}\n\n upstream_deps = deps\n if upstream_assets is not None:\n upstream_deps = list(upstream_assets)\n\n # All non-normalization tables depend on any user-provided upstream assets\n for table in destination_tables:\n internal_deps[table] = set(upstream_deps) if upstream_deps else set()\n\n @multi_asset(\n name=f"airbyte_sync_{connection_id[:5]}",\n deps=upstream_deps,\n outs=outputs,\n internal_asset_deps=internal_deps,\n compute_kind="airbyte",\n group_name=group_name,\n )\n def _assets(context, airbyte: BaseAirbyteResource):\n ab_output = airbyte.sync_and_poll(connection_id=connection_id)\n\n # No connection details (e.g. using Airbyte Cloud) means we just assume\n # that the outputs were produced\n if len(ab_output.connection_details) == 0:\n for table_name in destination_tables:\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(table_name),\n )\n if normalization_tables:\n for dependent_table in normalization_tables.get(table_name, set()):\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(dependent_table),\n )\n else:\n for materialization in generate_materializations(\n ab_output, asset_key_prefix, stream_to_asset_map\n ):\n table_name = materialization.asset_key.path[-1]\n if table_name in destination_tables:\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(table_name),\n metadata=materialization.metadata,\n )\n # Also materialize any normalization tables affiliated with this destination\n # e.g. nested objects, lists etc\n if normalization_tables:\n for dependent_table in normalization_tables.get(table_name, set()):\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(dependent_table),\n )\n else:\n yield materialization\n\n return [_assets]
\n\n\ndef _get_schema_types(schema: Mapping[str, Any]) -> Sequence[str]:\n """Given a schema definition, return a list of data types that are valid for this schema."""\n types = schema.get("types") or schema.get("type")\n if not types:\n return []\n if isinstance(types, str):\n return [types]\n return types\n\n\ndef _get_sub_schemas(schema: Mapping[str, Any]) -> Sequence[Mapping[str, Any]]:\n """Returns a list of sub-schema definitions for a given schema. This is used to handle union types."""\n return schema.get("anyOf") or schema.get("oneOf") or [schema]\n\n\ndef _get_normalization_tables_for_schema(\n key: str, schema: Mapping[str, Any], prefix: str = ""\n) -> Mapping[str, AirbyteTableMetadata]:\n """Recursively traverses a schema, returning metadata for the tables that will be created by the Airbyte\n normalization process.\n\n For example, a table `cars` with a nested object field `limited_editions` will produce the tables\n `cars` and `cars_limited_editions`.\n\n For more information on Airbyte's normalization process, see:\n https://docs.airbyte.com/understanding-airbyte/basic-normalization/#nesting\n """\n out: Dict[str, AirbyteTableMetadata] = {}\n # Object types are broken into a new table, as long as they have children\n\n sub_schemas = _get_sub_schemas(schema)\n\n for sub_schema in sub_schemas:\n schema_types = _get_schema_types(sub_schema)\n if not schema_types:\n continue\n\n if "object" in schema_types and len(sub_schema.get("properties", {})) > 0:\n out[prefix + key] = AirbyteTableMetadata(\n schema=generate_table_schema(sub_schema.get("properties", {}))\n )\n for k, v in sub_schema["properties"].items():\n out = merge_dicts(\n out, _get_normalization_tables_for_schema(k, v, f"{prefix}{key}_")\n )\n # Array types are also broken into a new table\n elif "array" in schema_types:\n out[prefix + key] = AirbyteTableMetadata(\n schema=generate_table_schema(sub_schema.get("items", {}).get("properties", {}))\n )\n if sub_schema.get("items", {}).get("properties"):\n for k, v in sub_schema["items"]["properties"].items():\n out = merge_dicts(\n out, _get_normalization_tables_for_schema(k, v, f"{prefix}{key}_")\n )\n\n return out\n\n\ndef _clean_name(name: str) -> str:\n """Cleans an input to be a valid Dagster asset name."""\n return re.sub(r"[^a-z0-9]+", "_", name.lower())\n\n\nclass AirbyteConnectionMetadata(\n NamedTuple(\n "_AirbyteConnectionMetadata",\n [\n ("name", str),\n ("stream_prefix", str),\n ("has_basic_normalization", bool),\n ("stream_data", List[Mapping[str, Any]]),\n ],\n )\n):\n """Contains information about an Airbyte connection.\n\n Attributes:\n name (str): The name of the connection.\n stream_prefix (str): A prefix to add to all stream names.\n has_basic_normalization (bool): Whether or not the connection has basic normalization enabled.\n stream_data (List[Mapping[str, Any]]): Unparsed list of dicts with information about each stream.\n """\n\n @classmethod\n def from_api_json(\n cls, contents: Mapping[str, Any], operations: Mapping[str, Any]\n ) -> "AirbyteConnectionMetadata":\n return cls(\n name=contents["name"],\n stream_prefix=contents.get("prefix", ""),\n has_basic_normalization=any(\n is_basic_normalization_operation(op.get("operatorConfiguration", {}))\n for op in operations.get("operations", [])\n ),\n stream_data=contents.get("syncCatalog", {}).get("streams", []),\n )\n\n @classmethod\n def from_config(cls, contents: Mapping[str, Any]) -> "AirbyteConnectionMetadata":\n config_contents = cast(Mapping[str, Any], contents.get("configuration"))\n check.invariant(\n config_contents is not None, "Airbyte connection config is missing 'configuration' key"\n )\n\n return cls(\n name=contents["resource_name"],\n stream_prefix=config_contents.get("prefix", ""),\n has_basic_normalization=any(\n is_basic_normalization_operation(op.get("operator_configuration", {}))\n for op in config_contents.get("operations", [])\n ),\n stream_data=config_contents.get("sync_catalog", {}).get("streams", []),\n )\n\n def parse_stream_tables(\n self, return_normalization_tables: bool = False\n ) -> Mapping[str, AirbyteTableMetadata]:\n """Parses the stream data and returns a mapping, with keys representing destination\n tables associated with each enabled stream and values representing any affiliated\n tables created by Airbyte's normalization process, if enabled.\n """\n tables: Dict[str, AirbyteTableMetadata] = {}\n\n enabled_streams = [\n stream for stream in self.stream_data if stream.get("config", {}).get("selected", False)\n ]\n\n for stream in enabled_streams:\n name = cast(str, stream.get("stream", {}).get("name"))\n prefixed_name = f"{self.stream_prefix}{name}"\n\n schema = (\n stream["stream"]["json_schema"]\n if "json_schema" in stream["stream"]\n else stream["stream"]["jsonSchema"]\n )\n normalization_tables: Dict[str, AirbyteTableMetadata] = {}\n schema_props = schema.get("properties", schema.get("items", {}).get("properties", {}))\n if self.has_basic_normalization and return_normalization_tables:\n for k, v in schema_props.items():\n for normalization_table_name, meta in _get_normalization_tables_for_schema(\n k, v, f"{name}_"\n ).items():\n prefixed_norm_table_name = f"{self.stream_prefix}{normalization_table_name}"\n normalization_tables[prefixed_norm_table_name] = meta\n tables[prefixed_name] = AirbyteTableMetadata(\n schema=generate_table_schema(schema_props),\n normalization_tables=normalization_tables,\n )\n\n return tables\n\n\ndef _get_schema_by_table_name(\n stream_table_metadata: Mapping[str, AirbyteTableMetadata]\n) -> Mapping[str, TableSchema]:\n schema_by_base_table_name = [(k, v.schema) for k, v in stream_table_metadata.items()]\n schema_by_normalization_table_name = list(\n chain.from_iterable(\n [\n [\n (k, v.schema)\n for k, v in cast(\n Dict[str, AirbyteTableMetadata], meta.normalization_tables\n ).items()\n ]\n for meta in stream_table_metadata.values()\n ]\n )\n )\n\n return dict(schema_by_normalization_table_name + schema_by_base_table_name)\n\n\nclass AirbyteCoreCacheableAssetsDefinition(CacheableAssetsDefinition):\n def __init__(\n self,\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n ):\n self._key_prefix = key_prefix\n self._create_assets_for_normalization_tables = create_assets_for_normalization_tables\n self._connection_to_group_fn = connection_to_group_fn\n self._connection_to_io_manager_key_fn = connection_to_io_manager_key_fn\n self._connection_filter = connection_filter\n self._connection_to_asset_key_fn: Callable[[AirbyteConnectionMetadata, str], AssetKey] = (\n connection_to_asset_key_fn or (lambda _, table: AssetKey(path=[table]))\n )\n self._connection_to_freshness_policy_fn = connection_to_freshness_policy_fn or (\n lambda _: None\n )\n self._connection_to_auto_materialize_policy_fn = (\n connection_to_auto_materialize_policy_fn or (lambda _: None)\n )\n\n contents = hashlib.sha1() # so that hexdigest is 40, not 64 bytes\n contents.update(",".join(key_prefix).encode("utf-8"))\n contents.update(str(create_assets_for_normalization_tables).encode("utf-8"))\n if connection_filter:\n contents.update(inspect.getsource(connection_filter).encode("utf-8"))\n\n super().__init__(unique_id=f"airbyte-{contents.hexdigest()}")\n\n @abstractmethod\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n pass\n\n def compute_cacheable_data(self) -> Sequence[AssetsDefinitionCacheableData]:\n asset_defn_data: List[AssetsDefinitionCacheableData] = []\n for connection_id, connection in self._get_connections():\n stream_table_metadata = connection.parse_stream_tables(\n self._create_assets_for_normalization_tables\n )\n schema_by_table_name = _get_schema_by_table_name(stream_table_metadata)\n\n table_to_asset_key = partial(self._connection_to_asset_key_fn, connection)\n asset_data_for_conn = _build_airbyte_asset_defn_metadata(\n connection_id=connection_id,\n destination_tables=list(stream_table_metadata.keys()),\n normalization_tables={\n table: set(metadata.normalization_tables.keys())\n for table, metadata in stream_table_metadata.items()\n },\n asset_key_prefix=self._key_prefix,\n group_name=(\n self._connection_to_group_fn(connection.name)\n if self._connection_to_group_fn\n else None\n ),\n io_manager_key=(\n self._connection_to_io_manager_key_fn(connection.name)\n if self._connection_to_io_manager_key_fn\n else None\n ),\n schema_by_table_name=schema_by_table_name,\n table_to_asset_key_fn=table_to_asset_key,\n freshness_policy=self._connection_to_freshness_policy_fn(connection),\n auto_materialize_policy=self._connection_to_auto_materialize_policy_fn(connection),\n )\n\n asset_defn_data.append(asset_data_for_conn)\n\n return asset_defn_data\n\n def _build_definitions_with_resources(\n self,\n data: Sequence[AssetsDefinitionCacheableData],\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n ) -> Sequence[AssetsDefinition]:\n return [_build_airbyte_assets_from_metadata(meta, resource_defs) for meta in data]\n\n def build_definitions(\n self, data: Sequence[AssetsDefinitionCacheableData]\n ) -> Sequence[AssetsDefinition]:\n return self._build_definitions_with_resources(data)\n\n\nclass AirbyteInstanceCacheableAssetsDefinition(AirbyteCoreCacheableAssetsDefinition):\n def __init__(\n self,\n airbyte_resource_def: Union[ResourceDefinition, AirbyteResource],\n workspace_id: Optional[str],\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n ):\n super().__init__(\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )\n self._workspace_id = workspace_id\n self._airbyte_instance: AirbyteResource = (\n airbyte_resource_def.process_config_and_initialize()\n if isinstance(airbyte_resource_def, AirbyteResource)\n else airbyte_resource_def(build_init_resource_context())\n )\n\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n workspace_id = self._workspace_id\n if not workspace_id:\n workspaces = cast(\n List[Dict[str, Any]],\n check.not_none(\n self._airbyte_instance.make_request(endpoint="/workspaces/list", data={})\n ).get("workspaces", []),\n )\n\n check.invariant(len(workspaces) <= 1, "Airbyte instance has more than one workspace")\n check.invariant(len(workspaces) > 0, "Airbyte instance has no workspaces")\n\n workspace_id = workspaces[0].get("workspaceId")\n\n connections = cast(\n List[Dict[str, Any]],\n check.not_none(\n self._airbyte_instance.make_request(\n endpoint="/connections/list", data={"workspaceId": workspace_id}\n )\n ).get("connections", []),\n )\n\n output_connections: List[Tuple[str, AirbyteConnectionMetadata]] = []\n for connection_json in connections:\n connection_id = cast(str, connection_json.get("connectionId"))\n\n operations_json = cast(\n Dict[str, Any],\n check.not_none(\n self._airbyte_instance.make_request(\n endpoint="/operations/list",\n data={"connectionId": connection_id},\n )\n ),\n )\n connection = AirbyteConnectionMetadata.from_api_json(connection_json, operations_json)\n\n # Filter out connections that don't match the filter function\n if self._connection_filter and not self._connection_filter(connection):\n continue\n\n output_connections.append((connection_id, connection))\n return output_connections\n\n def build_definitions(\n self, data: Sequence[AssetsDefinitionCacheableData]\n ) -> Sequence[AssetsDefinition]:\n return super()._build_definitions_with_resources(\n data, {"airbyte": self._airbyte_instance.get_resource_definition()}\n )\n\n\nclass AirbyteYAMLCacheableAssetsDefinition(AirbyteCoreCacheableAssetsDefinition):\n def __init__(\n self,\n project_dir: str,\n workspace_id: Optional[str],\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]],\n connection_directories: Optional[Sequence[str]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n ):\n super().__init__(\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )\n self._workspace_id = workspace_id\n self._project_dir = project_dir\n self._connection_directories = connection_directories\n\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n connections_dir = os.path.join(self._project_dir, "connections")\n\n output_connections: List[Tuple[str, AirbyteConnectionMetadata]] = []\n\n connection_directories = self._connection_directories or os.listdir(connections_dir)\n for connection_name in connection_directories:\n connection_dir = os.path.join(connections_dir, connection_name)\n with open(os.path.join(connection_dir, "configuration.yaml"), encoding="utf-8") as f:\n connection = AirbyteConnectionMetadata.from_config(yaml.safe_load(f.read()))\n\n # Filter out connections that don't match the filter function\n if self._connection_filter and not self._connection_filter(connection):\n continue\n\n if self._workspace_id:\n state_file = f"state_{self._workspace_id}.yaml"\n check.invariant(\n state_file in os.listdir(connection_dir),\n f"Workspace state file {state_file} not found",\n )\n else:\n state_files = [\n filename\n for filename in os.listdir(connection_dir)\n if filename.startswith("state_")\n ]\n check.invariant(\n len(state_files) > 0,\n f"No state files found for connection {connection_name} in {connection_dir}",\n )\n check.invariant(\n len(state_files) <= 1,\n "More than one state file found for connection {} in {}, specify a workspace_id"\n " to disambiguate".format(connection_name, connection_dir),\n )\n state_file = state_files[0]\n\n with open(os.path.join(connection_dir, cast(str, state_file)), encoding="utf-8") as f:\n state = yaml.safe_load(f.read())\n connection_id = state.get("resource_id")\n\n output_connections.append((connection_id, connection))\n return output_connections\n\n\n
[docs]def load_assets_from_airbyte_instance(\n airbyte: Union[AirbyteResource, ResourceDefinition],\n workspace_id: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n create_assets_for_normalization_tables: bool = True,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]] = None,\n connection_to_asset_key_fn: Optional[\n Callable[[AirbyteConnectionMetadata, str], AssetKey]\n ] = None,\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ] = None,\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n) -> CacheableAssetsDefinition:\n """Loads Airbyte connection assets from a configured AirbyteResource instance. This fetches information\n about defined connections at initialization time, and will error on workspace load if the Airbyte\n instance is not reachable.\n\n Args:\n airbyte (ResourceDefinition): An AirbyteResource configured with the appropriate connection\n details.\n workspace_id (Optional[str]): The ID of the Airbyte workspace to load connections from. Only\n required if multiple workspaces exist in your instance.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n create_assets_for_normalization_tables (bool): If True, assets will be created for tables\n created by Airbyte's normalization feature. If False, only the destination tables\n will be created. Defaults to True.\n connection_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Airbyte connection name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The I/O manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connection_to_io_manager_key_fn.\n connection_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n I/O manager key for a given Airbyte connection name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connection_filter (Optional[Callable[[AirbyteConnectionMetadata], bool]]): Optional function which takes\n in connection metadata and returns False if the connection should be excluded from the output assets.\n connection_to_asset_key_fn (Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]]): Optional function which\n takes in connection metadata and table name and returns an asset key for the table. If None, the default asset\n key is based on the table name. Any asset key prefix will be applied to the output of this function.\n connection_to_freshness_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]]): Optional function\n which takes in connection metadata and returns a freshness policy for the connection's assets. If None, no freshness policies\n will be applied to the assets.\n connection_to_auto_materialize_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]]): Optional\n function which takes in connection metadata and returns an auto materialization policy for the connection's assets. If None, no\n auto materialization policies will be applied to the assets.\n\n **Examples:**\n\n Loading all Airbyte connections as assets:\n\n .. code-block:: python\n\n from dagster_airbyte import airbyte_resource, load_assets_from_airbyte_instance\n\n airbyte_instance = airbyte_resource.configured(\n {\n "host": "localhost",\n "port": "8000",\n }\n )\n airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance)\n\n Filtering the set of loaded connections:\n\n .. code-block:: python\n\n from dagster_airbyte import airbyte_resource, load_assets_from_airbyte_instance\n\n airbyte_instance = airbyte_resource.configured(\n {\n "host": "localhost",\n "port": "8000",\n }\n )\n airbyte_assets = load_assets_from_airbyte_instance(\n airbyte_instance,\n connection_filter=lambda meta: "snowflake" in meta.name,\n )\n """\n if isinstance(airbyte, AirbyteCloudResource):\n raise DagsterInvalidInvocationError(\n "load_assets_from_airbyte_instance is not yet supported for AirbyteCloudResource"\n )\n\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connection_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connection_to_io_manager_key_fn",\n )\n if not connection_to_io_manager_key_fn:\n connection_to_io_manager_key_fn = lambda _: io_manager_key\n\n return AirbyteInstanceCacheableAssetsDefinition(\n airbyte_resource_def=airbyte,\n workspace_id=workspace_id,\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )
\n\n\n
[docs]def load_assets_from_airbyte_project(\n project_dir: str,\n workspace_id: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n create_assets_for_normalization_tables: bool = True,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]] = None,\n connection_directories: Optional[Sequence[str]] = None,\n connection_to_asset_key_fn: Optional[\n Callable[[AirbyteConnectionMetadata, str], AssetKey]\n ] = None,\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ] = None,\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n) -> CacheableAssetsDefinition:\n """Loads an Airbyte project into a set of Dagster assets.\n\n Point to the root folder of an Airbyte project synced using the Octavia CLI. For\n more information, see https://github.com/airbytehq/airbyte/tree/master/octavia-cli#octavia-import-all.\n\n Args:\n project_dir (str): The path to the root of your Airbyte project, containing sources, destinations,\n and connections folders.\n workspace_id (Optional[str]): The ID of the Airbyte workspace to load connections from. Only\n required if multiple workspace state YAMLfiles exist in the project.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n create_assets_for_normalization_tables (bool): If True, assets will be created for tables\n created by Airbyte's normalization feature. If False, only the destination tables\n will be created. Defaults to True.\n connection_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Airbyte connection name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The I/O manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connection_to_io_manager_key_fn.\n connection_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n I/O manager key for a given Airbyte connection name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connection_filter (Optional[Callable[[AirbyteConnectionMetadata], bool]]): Optional function which\n takes in connection metadata and returns False if the connection should be excluded from the output assets.\n connection_directories (Optional[List[str]]): Optional list of connection directories to load assets from.\n If omitted, all connections in the Airbyte project are loaded. May be faster than connection_filter\n if the project has many connections or if the connection yaml files are large.\n connection_to_asset_key_fn (Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]]): Optional function which\n takes in connection metadata and table name and returns an asset key for the table. If None, the default asset\n key is based on the table name. Any asset key prefix will be applied to the output of this function.\n connection_to_freshness_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]]):\n Optional function which takes in connection metadata and returns a freshness policy for the connection's assets.\n If None, no freshness policies will be applied to the assets.\n connection_to_auto_materialize_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]]):\n Optional function which takes in connection metadata and returns an auto materialization policy for the connection's assets.\n If None, no auto materialization policies will be applied to the assets.\n\n **Examples:**\n\n Loading all Airbyte connections as assets:\n\n .. code-block:: python\n\n from dagster_airbyte import load_assets_from_airbyte_project\n\n airbyte_assets = load_assets_from_airbyte_project(\n project_dir="path/to/airbyte/project",\n )\n\n Filtering the set of loaded connections:\n\n .. code-block:: python\n\n from dagster_airbyte import load_assets_from_airbyte_project\n\n airbyte_assets = load_assets_from_airbyte_project(\n project_dir="path/to/airbyte/project",\n connection_filter=lambda meta: "snowflake" in meta.name,\n )\n """\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connection_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connection_to_io_manager_key_fn",\n )\n if not connection_to_io_manager_key_fn:\n connection_to_io_manager_key_fn = lambda _: io_manager_key\n\n return AirbyteYAMLCacheableAssetsDefinition(\n project_dir=project_dir,\n workspace_id=workspace_id,\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_directories=connection_directories,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )
\n
", "current_page_name": "_modules/dagster_airbyte/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.asset_defs"}, "managed": {"generated": {"destinations": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.generated.destinations

\n# ruff: noqa: A001, A002\nfrom typing import Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\nfrom dagster_airbyte.managed.types import GeneratedAirbyteDestination\n\n\n
[docs]class DynamodbDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n dynamodb_table_name_prefix: str,\n dynamodb_region: str,\n access_key_id: str,\n secret_access_key: str,\n dynamodb_endpoint: Optional[str] = None,\n ):\n """Airbyte Destination for Dynamodb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/dynamodb\n\n Args:\n name (str): The name of the destination.\n dynamodb_endpoint (Optional[str]): This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty).\n dynamodb_table_name_prefix (str): The prefix to use when naming DynamoDB tables.\n dynamodb_region (str): The region of the DynamoDB.\n access_key_id (str): The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB.\n secret_access_key (str): The corresponding secret to the access key id.\n """\n self.dynamodb_endpoint = check.opt_str_param(dynamodb_endpoint, "dynamodb_endpoint")\n self.dynamodb_table_name_prefix = check.str_param(\n dynamodb_table_name_prefix, "dynamodb_table_name_prefix"\n )\n self.dynamodb_region = check.str_param(dynamodb_region, "dynamodb_region")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n super().__init__("Dynamodb", name)
\n\n\n
[docs]class BigqueryDestination(GeneratedAirbyteDestination):\n
[docs] class StandardInserts:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class HMACKey:\n
[docs] @public\n def __init__(self, hmac_key_access_id: str, hmac_key_secret: str):\n self.credential_type = "HMAC_KEY"\n self.hmac_key_access_id = check.str_param(hmac_key_access_id, "hmac_key_access_id")\n self.hmac_key_secret = check.str_param(hmac_key_secret, "hmac_key_secret")
\n\n
[docs] class GCSStaging:\n
[docs] @public\n def __init__(\n self,\n credential: "BigqueryDestination.HMACKey",\n gcs_bucket_name: str,\n gcs_bucket_path: str,\n keep_files_in_gcs_bucket: Optional[str] = None,\n ):\n self.method = "GCS Staging"\n self.credential = check.inst_param(\n credential, "credential", BigqueryDestination.HMACKey\n )\n self.gcs_bucket_name = check.str_param(gcs_bucket_name, "gcs_bucket_name")\n self.gcs_bucket_path = check.str_param(gcs_bucket_path, "gcs_bucket_path")\n self.keep_files_in_gcs_bucket = check.opt_str_param(\n keep_files_in_gcs_bucket, "keep_files_in_gcs_bucket"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n project_id: str,\n dataset_location: str,\n dataset_id: str,\n loading_method: Union[\n "BigqueryDestination.StandardInserts", "BigqueryDestination.GCSStaging"\n ],\n credentials_json: Optional[str] = None,\n transformation_priority: Optional[str] = None,\n big_query_client_buffer_size_mb: Optional[int] = None,\n ):\n """Airbyte Destination for Bigquery.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/bigquery\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset. Read more here.\n dataset_location (str): The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.\n dataset_id (str): The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.\n loading_method (Union[BigqueryDestination.StandardInserts, BigqueryDestination.GCSStaging]): Loading method used to send select the way data will be uploaded to BigQuery. Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging. GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here.\n credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.\n transformation_priority (Optional[str]): Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don`t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly.\n big_query_client_buffer_size_mb (Optional[int]): Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.dataset_location = check.str_param(dataset_location, "dataset_location")\n self.dataset_id = check.str_param(dataset_id, "dataset_id")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (BigqueryDestination.StandardInserts, BigqueryDestination.GCSStaging),\n )\n self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")\n self.transformation_priority = check.opt_str_param(\n transformation_priority, "transformation_priority"\n )\n self.big_query_client_buffer_size_mb = check.opt_int_param(\n big_query_client_buffer_size_mb, "big_query_client_buffer_size_mb"\n )\n super().__init__("Bigquery", name)
\n\n\n
[docs]class RabbitmqDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n routing_key: str,\n ssl: Optional[bool] = None,\n port: Optional[int] = None,\n virtual_host: Optional[str] = None,\n username: Optional[str] = None,\n password: Optional[str] = None,\n exchange: Optional[str] = None,\n ):\n """Airbyte Destination for Rabbitmq.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/rabbitmq\n\n Args:\n name (str): The name of the destination.\n ssl (Optional[bool]): SSL enabled.\n host (str): The RabbitMQ host name.\n port (Optional[int]): The RabbitMQ port.\n virtual_host (Optional[str]): The RabbitMQ virtual host name.\n username (Optional[str]): The username to connect.\n password (Optional[str]): The password to connect.\n exchange (Optional[str]): The exchange name.\n routing_key (str): The routing key.\n """\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.host = check.str_param(host, "host")\n self.port = check.opt_int_param(port, "port")\n self.virtual_host = check.opt_str_param(virtual_host, "virtual_host")\n self.username = check.opt_str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.exchange = check.opt_str_param(exchange, "exchange")\n self.routing_key = check.str_param(routing_key, "routing_key")\n super().__init__("Rabbitmq", name)
\n\n\n
[docs]class KvdbDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, bucket_id: str, secret_key: str):\n """Airbyte Destination for Kvdb.\n\n Documentation can be found at https://kvdb.io/docs/api/\n\n Args:\n name (str): The name of the destination.\n bucket_id (str): The ID of your KVdb bucket.\n secret_key (str): Your bucket Secret Key.\n """\n self.bucket_id = check.str_param(bucket_id, "bucket_id")\n self.secret_key = check.str_param(secret_key, "secret_key")\n super().__init__("Kvdb", name)
\n\n\n
[docs]class ClickhouseDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Destination for Clickhouse.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/clickhouse\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): HTTP port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n ssl (Optional[bool]): Encrypt data using SSL.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Clickhouse", name)
\n\n\n
[docs]class AmazonSqsDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n queue_url: str,\n region: str,\n message_delay: Optional[int] = None,\n access_key: Optional[str] = None,\n secret_key: Optional[str] = None,\n message_body_key: Optional[str] = None,\n message_group_id: Optional[str] = None,\n ):\n """Airbyte Destination for Amazon Sqs.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/amazon-sqs\n\n Args:\n name (str): The name of the destination.\n queue_url (str): URL of the SQS Queue\n region (str): AWS Region of the SQS Queue\n message_delay (Optional[int]): Modify the Message Delay of the individual message from the Queue's default (seconds).\n access_key (Optional[str]): The Access Key ID of the AWS IAM Role to use for sending messages\n secret_key (Optional[str]): The Secret Key of the AWS IAM Role to use for sending messages\n message_body_key (Optional[str]): Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.\n message_group_id (Optional[str]): The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.\n """\n self.queue_url = check.str_param(queue_url, "queue_url")\n self.region = check.str_param(region, "region")\n self.message_delay = check.opt_int_param(message_delay, "message_delay")\n self.access_key = check.opt_str_param(access_key, "access_key")\n self.secret_key = check.opt_str_param(secret_key, "secret_key")\n self.message_body_key = check.opt_str_param(message_body_key, "message_body_key")\n self.message_group_id = check.opt_str_param(message_group_id, "message_group_id")\n super().__init__("Amazon Sqs", name)
\n\n\n
[docs]class MariadbColumnstoreDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Mariadb Columnstore.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mariadb-columnstore\n\n Args:\n name (str): The name of the destination.\n host (str): The Hostname of the database.\n port (int): The Port of the database.\n database (str): Name of the database.\n username (str): The Username which is used to access the database.\n password (Optional[str]): The Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Mariadb Columnstore", name)
\n\n\n
[docs]class KinesisDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n endpoint: str,\n region: str,\n shardCount: int,\n accessKey: str,\n privateKey: str,\n bufferSize: int,\n ):\n """Airbyte Destination for Kinesis.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/kinesis\n\n Args:\n name (str): The name of the destination.\n endpoint (str): AWS Kinesis endpoint.\n region (str): AWS region. Your account determines the Regions that are available to you.\n shardCount (int): Number of shards to which the data should be streamed.\n accessKey (str): Generate the AWS Access Key for current user.\n privateKey (str): The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".\n bufferSize (int): Buffer size for storing kinesis records before being batch streamed.\n """\n self.endpoint = check.str_param(endpoint, "endpoint")\n self.region = check.str_param(region, "region")\n self.shardCount = check.int_param(shardCount, "shardCount")\n self.accessKey = check.str_param(accessKey, "accessKey")\n self.privateKey = check.str_param(privateKey, "privateKey")\n self.bufferSize = check.int_param(bufferSize, "bufferSize")\n super().__init__("Kinesis", name)
\n\n\n
[docs]class AzureBlobStorageDestination(GeneratedAirbyteDestination):\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(self, flattening: str):\n self.format_type = "CSV"\n self.flattening = check.str_param(flattening, "flattening")
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n ):\n self.format_type = "JSONL"
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n azure_blob_storage_account_name: str,\n azure_blob_storage_account_key: str,\n format: Union[\n "AzureBlobStorageDestination.CSVCommaSeparatedValues",\n "AzureBlobStorageDestination.JSONLinesNewlineDelimitedJSON",\n ],\n azure_blob_storage_endpoint_domain_name: Optional[str] = None,\n azure_blob_storage_container_name: Optional[str] = None,\n azure_blob_storage_output_buffer_size: Optional[int] = None,\n ):\n """Airbyte Destination for Azure Blob Storage.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/azureblobstorage\n\n Args:\n name (str): The name of the destination.\n azure_blob_storage_endpoint_domain_name (Optional[str]): This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.\n azure_blob_storage_container_name (Optional[str]): The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp\n azure_blob_storage_account_name (str): The account's name of the Azure Blob Storage.\n azure_blob_storage_account_key (str): The Azure blob storage account key.\n azure_blob_storage_output_buffer_size (Optional[int]): The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.\n format (Union[AzureBlobStorageDestination.CSVCommaSeparatedValues, AzureBlobStorageDestination.JSONLinesNewlineDelimitedJSON]): Output data format\n """\n self.azure_blob_storage_endpoint_domain_name = check.opt_str_param(\n azure_blob_storage_endpoint_domain_name, "azure_blob_storage_endpoint_domain_name"\n )\n self.azure_blob_storage_container_name = check.opt_str_param(\n azure_blob_storage_container_name, "azure_blob_storage_container_name"\n )\n self.azure_blob_storage_account_name = check.str_param(\n azure_blob_storage_account_name, "azure_blob_storage_account_name"\n )\n self.azure_blob_storage_account_key = check.str_param(\n azure_blob_storage_account_key, "azure_blob_storage_account_key"\n )\n self.azure_blob_storage_output_buffer_size = check.opt_int_param(\n azure_blob_storage_output_buffer_size, "azure_blob_storage_output_buffer_size"\n )\n self.format = check.inst_param(\n format,\n "format",\n (\n AzureBlobStorageDestination.CSVCommaSeparatedValues,\n AzureBlobStorageDestination.JSONLinesNewlineDelimitedJSON,\n ),\n )\n super().__init__("Azure Blob Storage", name)
\n\n\n
[docs]class KafkaDestination(GeneratedAirbyteDestination):\n
[docs] class PLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")
\n\n
[docs] class SASLPLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] class SASLSSL:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n bootstrap_servers: str,\n topic_pattern: str,\n protocol: Union[\n "KafkaDestination.PLAINTEXT",\n "KafkaDestination.SASLPLAINTEXT",\n "KafkaDestination.SASLSSL",\n ],\n acks: str,\n enable_idempotence: bool,\n compression_type: str,\n batch_size: int,\n linger_ms: str,\n max_in_flight_requests_per_connection: int,\n client_dns_lookup: str,\n buffer_memory: str,\n max_request_size: int,\n retries: int,\n socket_connection_setup_timeout_ms: str,\n socket_connection_setup_timeout_max_ms: str,\n max_block_ms: str,\n request_timeout_ms: int,\n delivery_timeout_ms: int,\n send_buffer_bytes: int,\n receive_buffer_bytes: int,\n test_topic: Optional[str] = None,\n sync_producer: Optional[bool] = None,\n client_id: Optional[str] = None,\n ):\n """Airbyte Destination for Kafka.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/kafka\n\n Args:\n name (str): The name of the destination.\n bootstrap_servers (str): A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\n topic_pattern (str): Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.\n test_topic (Optional[str]): Topic to test if Airbyte can produce messages.\n sync_producer (Optional[bool]): Wait synchronously until the record has been sent to Kafka.\n protocol (Union[KafkaDestination.PLAINTEXT, KafkaDestination.SASLPLAINTEXT, KafkaDestination.SASLSSL]): Protocol used to communicate with brokers.\n client_id (Optional[str]): An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\n acks (str): The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent.\n enable_idempotence (bool): When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.\n compression_type (str): The compression type for all data generated by the producer.\n batch_size (int): The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition.\n linger_ms (str): The producer groups together any records that arrive in between request transmissions into a single batched request.\n max_in_flight_requests_per_connection (int): The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.\n client_dns_lookup (str): Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.\n buffer_memory (str): The total bytes of memory the producer can use to buffer records waiting to be sent to the server.\n max_request_size (int): The maximum size of a request in bytes.\n retries (int): Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.\n socket_connection_setup_timeout_ms (str): The amount of time the client will wait for the socket connection to be established.\n socket_connection_setup_timeout_max_ms (str): The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.\n max_block_ms (str): The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.\n request_timeout_ms (int): The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\n delivery_timeout_ms (int): An upper bound on the time to report success or failure after a call to 'send()' returns.\n send_buffer_bytes (int): The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.\n receive_buffer_bytes (int): The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\n """\n self.bootstrap_servers = check.str_param(bootstrap_servers, "bootstrap_servers")\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")\n self.test_topic = check.opt_str_param(test_topic, "test_topic")\n self.sync_producer = check.opt_bool_param(sync_producer, "sync_producer")\n self.protocol = check.inst_param(\n protocol,\n "protocol",\n (KafkaDestination.PLAINTEXT, KafkaDestination.SASLPLAINTEXT, KafkaDestination.SASLSSL),\n )\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.acks = check.str_param(acks, "acks")\n self.enable_idempotence = check.bool_param(enable_idempotence, "enable_idempotence")\n self.compression_type = check.str_param(compression_type, "compression_type")\n self.batch_size = check.int_param(batch_size, "batch_size")\n self.linger_ms = check.str_param(linger_ms, "linger_ms")\n self.max_in_flight_requests_per_connection = check.int_param(\n max_in_flight_requests_per_connection, "max_in_flight_requests_per_connection"\n )\n self.client_dns_lookup = check.str_param(client_dns_lookup, "client_dns_lookup")\n self.buffer_memory = check.str_param(buffer_memory, "buffer_memory")\n self.max_request_size = check.int_param(max_request_size, "max_request_size")\n self.retries = check.int_param(retries, "retries")\n self.socket_connection_setup_timeout_ms = check.str_param(\n socket_connection_setup_timeout_ms, "socket_connection_setup_timeout_ms"\n )\n self.socket_connection_setup_timeout_max_ms = check.str_param(\n socket_connection_setup_timeout_max_ms, "socket_connection_setup_timeout_max_ms"\n )\n self.max_block_ms = check.str_param(max_block_ms, "max_block_ms")\n self.request_timeout_ms = check.int_param(request_timeout_ms, "request_timeout_ms")\n self.delivery_timeout_ms = check.int_param(delivery_timeout_ms, "delivery_timeout_ms")\n self.send_buffer_bytes = check.int_param(send_buffer_bytes, "send_buffer_bytes")\n self.receive_buffer_bytes = check.int_param(receive_buffer_bytes, "receive_buffer_bytes")\n super().__init__("Kafka", name)
\n\n\n
[docs]class ElasticsearchDestination(GeneratedAirbyteDestination):\n
[docs] class None_:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "none"
\n\n
[docs] class ApiKeySecret:\n
[docs] @public\n def __init__(self, apiKeyId: str, apiKeySecret: str):\n self.method = "secret"\n self.apiKeyId = check.str_param(apiKeyId, "apiKeyId")\n self.apiKeySecret = check.str_param(apiKeySecret, "apiKeySecret")
\n\n
[docs] class UsernamePassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.method = "basic"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n endpoint: str,\n authenticationMethod: Union[\n "ElasticsearchDestination.None_",\n "ElasticsearchDestination.ApiKeySecret",\n "ElasticsearchDestination.UsernamePassword",\n ],\n upsert: Optional[bool] = None,\n ):\n r"""Airbyte Destination for Elasticsearch.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/elasticsearch\n\n Args:\n name (str): The name of the destination.\n endpoint (str): The full url of the Elasticsearch server\n upsert (Optional[bool]): If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys.\n authenticationMethod (Union[ElasticsearchDestination.None\\\\_, ElasticsearchDestination.ApiKeySecret, ElasticsearchDestination.UsernamePassword]): The type of authentication to be used\n """\n self.endpoint = check.str_param(endpoint, "endpoint")\n self.upsert = check.opt_bool_param(upsert, "upsert")\n self.authenticationMethod = check.inst_param(\n authenticationMethod,\n "authenticationMethod",\n (\n ElasticsearchDestination.None_,\n ElasticsearchDestination.ApiKeySecret,\n ElasticsearchDestination.UsernamePassword,\n ),\n )\n super().__init__("Elasticsearch", name)
\n\n\n
[docs]class MysqlDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n ssl: Optional[bool] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Mysql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mysql\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n ssl (Optional[bool]): Encrypt data using SSL.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Mysql", name)
\n\n\n
[docs]class SftpJsonDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n username: str,\n password: str,\n destination_path: str,\n port: Optional[int] = None,\n ):\n """Airbyte Destination for Sftp Json.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/sftp-json\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the SFTP server.\n port (Optional[int]): Port of the SFTP server.\n username (str): Username to use to access the SFTP server.\n password (str): Password associated with the username.\n destination_path (str): Path to the directory where json files will be written.\n """\n self.host = check.str_param(host, "host")\n self.port = check.opt_int_param(port, "port")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Sftp Json", name)
\n\n\n
[docs]class GcsDestination(GeneratedAirbyteDestination):\n
[docs] class HMACKey:\n
[docs] @public\n def __init__(self, credential_type: str, hmac_key_access_id: str, hmac_key_secret: str):\n self.credential_type = check.str_param(credential_type, "credential_type")\n self.hmac_key_access_id = check.str_param(hmac_key_access_id, "hmac_key_access_id")\n self.hmac_key_secret = check.str_param(hmac_key_secret, "hmac_key_secret")
\n\n
[docs] class NoCompression:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class Deflate:\n
[docs] @public\n def __init__(self, codec: str, compression_level: Optional[int] = None):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.opt_int_param(compression_level, "compression_level")
\n\n
[docs] class Bzip2:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class Xz:\n
[docs] @public\n def __init__(self, codec: str, compression_level: Optional[int] = None):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.opt_int_param(compression_level, "compression_level")
\n\n
[docs] class Zstandard:\n
[docs] @public\n def __init__(\n self,\n codec: str,\n compression_level: Optional[int] = None,\n include_checksum: Optional[bool] = None,\n ):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.opt_int_param(compression_level, "compression_level")\n self.include_checksum = check.opt_bool_param(include_checksum, "include_checksum")
\n\n
[docs] class Snappy:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class AvroApacheAvro:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Union[\n "GcsDestination.NoCompression",\n "GcsDestination.Deflate",\n "GcsDestination.Bzip2",\n "GcsDestination.Xz",\n "GcsDestination.Zstandard",\n "GcsDestination.Snappy",\n ],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.inst_param(\n compression_codec,\n "compression_codec",\n (\n GcsDestination.NoCompression,\n GcsDestination.Deflate,\n GcsDestination.Bzip2,\n GcsDestination.Xz,\n GcsDestination.Zstandard,\n GcsDestination.Snappy,\n ),\n )
\n\n
[docs] class GZIP:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["GcsDestination.NoCompression", "GcsDestination.GZIP"],\n flattening: Optional[str] = None,\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.flattening = check.opt_str_param(flattening, "flattening")\n self.compression = check.inst_param(\n compression, "compression", (GcsDestination.NoCompression, GcsDestination.GZIP)\n )
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["GcsDestination.NoCompression", "GcsDestination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression = check.inst_param(\n compression, "compression", (GcsDestination.NoCompression, GcsDestination.GZIP)\n )
\n\n
[docs] class ParquetColumnarStorage:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Optional[str] = None,\n block_size_mb: Optional[int] = None,\n max_padding_size_mb: Optional[int] = None,\n page_size_kb: Optional[int] = None,\n dictionary_page_size_kb: Optional[int] = None,\n dictionary_encoding: Optional[bool] = None,\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.opt_str_param(compression_codec, "compression_codec")\n self.block_size_mb = check.opt_int_param(block_size_mb, "block_size_mb")\n self.max_padding_size_mb = check.opt_int_param(\n max_padding_size_mb, "max_padding_size_mb"\n )\n self.page_size_kb = check.opt_int_param(page_size_kb, "page_size_kb")\n self.dictionary_page_size_kb = check.opt_int_param(\n dictionary_page_size_kb, "dictionary_page_size_kb"\n )\n self.dictionary_encoding = check.opt_bool_param(\n dictionary_encoding, "dictionary_encoding"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n gcs_bucket_name: str,\n gcs_bucket_path: str,\n credential: "GcsDestination.HMACKey",\n format: Union[\n "GcsDestination.AvroApacheAvro",\n "GcsDestination.CSVCommaSeparatedValues",\n "GcsDestination.JSONLinesNewlineDelimitedJSON",\n "GcsDestination.ParquetColumnarStorage",\n ],\n gcs_bucket_region: Optional[str] = None,\n ):\n """Airbyte Destination for Gcs.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/gcs\n\n Args:\n name (str): The name of the destination.\n gcs_bucket_name (str): You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here.\n gcs_bucket_path (str): GCS Bucket Path string Subdirectory under the above bucket to sync the data into.\n gcs_bucket_region (Optional[str]): Select a Region of the GCS Bucket. Read more here.\n credential (GcsDestination.HMACKey): An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here.\n format (Union[GcsDestination.AvroApacheAvro, GcsDestination.CSVCommaSeparatedValues, GcsDestination.JSONLinesNewlineDelimitedJSON, GcsDestination.ParquetColumnarStorage]): Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format.\n """\n self.gcs_bucket_name = check.str_param(gcs_bucket_name, "gcs_bucket_name")\n self.gcs_bucket_path = check.str_param(gcs_bucket_path, "gcs_bucket_path")\n self.gcs_bucket_region = check.opt_str_param(gcs_bucket_region, "gcs_bucket_region")\n self.credential = check.inst_param(credential, "credential", GcsDestination.HMACKey)\n self.format = check.inst_param(\n format,\n "format",\n (\n GcsDestination.AvroApacheAvro,\n GcsDestination.CSVCommaSeparatedValues,\n GcsDestination.JSONLinesNewlineDelimitedJSON,\n GcsDestination.ParquetColumnarStorage,\n ),\n )\n super().__init__("Gcs", name)
\n\n\n
[docs]class CassandraDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n keyspace: str,\n username: str,\n password: str,\n address: str,\n port: int,\n datacenter: Optional[str] = None,\n replication: Optional[int] = None,\n ):\n """Airbyte Destination for Cassandra.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/cassandra\n\n Args:\n name (str): The name of the destination.\n keyspace (str): Default Cassandra keyspace to create data in.\n username (str): Username to use to access Cassandra.\n password (str): Password associated with Cassandra.\n address (str): Address to connect to.\n port (int): Port of Cassandra.\n datacenter (Optional[str]): Datacenter of the cassandra cluster.\n replication (Optional[int]): Indicates to how many nodes the data should be replicated to.\n """\n self.keyspace = check.str_param(keyspace, "keyspace")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.address = check.str_param(address, "address")\n self.port = check.int_param(port, "port")\n self.datacenter = check.opt_str_param(datacenter, "datacenter")\n self.replication = check.opt_int_param(replication, "replication")\n super().__init__("Cassandra", name)
\n\n\n
[docs]class FireboltDestination(GeneratedAirbyteDestination):\n
[docs] class SQLInserts:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "SQL"
\n\n
[docs] class ExternalTableViaS3:\n
[docs] @public\n def __init__(self, s3_bucket: str, s3_region: str, aws_key_id: str, aws_key_secret: str):\n self.method = "S3"\n self.s3_bucket = check.str_param(s3_bucket, "s3_bucket")\n self.s3_region = check.str_param(s3_region, "s3_region")\n self.aws_key_id = check.str_param(aws_key_id, "aws_key_id")\n self.aws_key_secret = check.str_param(aws_key_secret, "aws_key_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n password: str,\n database: str,\n loading_method: Union[\n "FireboltDestination.SQLInserts", "FireboltDestination.ExternalTableViaS3"\n ],\n account: Optional[str] = None,\n host: Optional[str] = None,\n engine: Optional[str] = None,\n ):\n """Airbyte Destination for Firebolt.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/firebolt\n\n Args:\n name (str): The name of the destination.\n username (str): Firebolt email address you use to login.\n password (str): Firebolt password.\n account (Optional[str]): Firebolt account to login.\n host (Optional[str]): The host name of your Firebolt database.\n database (str): The database to connect to.\n engine (Optional[str]): Engine name or url to connect to.\n loading_method (Union[FireboltDestination.SQLInserts, FireboltDestination.ExternalTableViaS3]): Loading method used to select the way data will be uploaded to Firebolt\n """\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.account = check.opt_str_param(account, "account")\n self.host = check.opt_str_param(host, "host")\n self.database = check.str_param(database, "database")\n self.engine = check.opt_str_param(engine, "engine")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (FireboltDestination.SQLInserts, FireboltDestination.ExternalTableViaS3),\n )\n super().__init__("Firebolt", name)
\n\n\n
[docs]class GoogleSheetsDestination(GeneratedAirbyteDestination):\n
[docs] class AuthenticationViaGoogleOAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n spreadsheet_id: str,\n credentials: "GoogleSheetsDestination.AuthenticationViaGoogleOAuth",\n ):\n """Airbyte Destination for Google Sheets.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/google-sheets\n\n Args:\n name (str): The name of the destination.\n spreadsheet_id (str): The link to your spreadsheet. See this guide for more details.\n credentials (GoogleSheetsDestination.AuthenticationViaGoogleOAuth): Google API Credentials for connecting to Google Sheets and Google Drive APIs\n """\n self.spreadsheet_id = check.str_param(spreadsheet_id, "spreadsheet_id")\n self.credentials = check.inst_param(\n credentials, "credentials", GoogleSheetsDestination.AuthenticationViaGoogleOAuth\n )\n super().__init__("Google Sheets", name)
\n\n\n
[docs]class DatabricksDestination(GeneratedAirbyteDestination):\n
[docs] class AmazonS3:\n
[docs] @public\n def __init__(\n self,\n data_source_type: str,\n s3_bucket_name: str,\n s3_bucket_path: str,\n s3_bucket_region: str,\n s3_access_key_id: str,\n s3_secret_access_key: str,\n file_name_pattern: Optional[str] = None,\n ):\n self.data_source_type = check.str_param(data_source_type, "data_source_type")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.str_param(s3_bucket_path, "s3_bucket_path")\n self.s3_bucket_region = check.str_param(s3_bucket_region, "s3_bucket_region")\n self.s3_access_key_id = check.str_param(s3_access_key_id, "s3_access_key_id")\n self.s3_secret_access_key = check.str_param(\n s3_secret_access_key, "s3_secret_access_key"\n )\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")
\n\n
[docs] class AzureBlobStorage:\n
[docs] @public\n def __init__(\n self,\n data_source_type: str,\n azure_blob_storage_account_name: str,\n azure_blob_storage_container_name: str,\n azure_blob_storage_sas_token: str,\n azure_blob_storage_endpoint_domain_name: Optional[str] = None,\n ):\n self.data_source_type = check.str_param(data_source_type, "data_source_type")\n self.azure_blob_storage_endpoint_domain_name = check.opt_str_param(\n azure_blob_storage_endpoint_domain_name, "azure_blob_storage_endpoint_domain_name"\n )\n self.azure_blob_storage_account_name = check.str_param(\n azure_blob_storage_account_name, "azure_blob_storage_account_name"\n )\n self.azure_blob_storage_container_name = check.str_param(\n azure_blob_storage_container_name, "azure_blob_storage_container_name"\n )\n self.azure_blob_storage_sas_token = check.str_param(\n azure_blob_storage_sas_token, "azure_blob_storage_sas_token"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n accept_terms: bool,\n databricks_server_hostname: str,\n databricks_http_path: str,\n databricks_personal_access_token: str,\n data_source: Union[\n "DatabricksDestination.AmazonS3", "DatabricksDestination.AzureBlobStorage"\n ],\n databricks_port: Optional[str] = None,\n database_schema: Optional[str] = None,\n purge_staging_data: Optional[bool] = None,\n ):\n """Airbyte Destination for Databricks.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/databricks\n\n Args:\n name (str): The name of the destination.\n accept_terms (bool): You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector.\n databricks_server_hostname (str): Databricks Cluster Server Hostname.\n databricks_http_path (str): Databricks Cluster HTTP Path.\n databricks_port (Optional[str]): Databricks Cluster Port.\n databricks_personal_access_token (str): Databricks Personal Access Token for making authenticated requests.\n database_schema (Optional[str]): The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".\n data_source (Union[DatabricksDestination.AmazonS3, DatabricksDestination.AzureBlobStorage]): Storage on which the delta lake is built.\n purge_staging_data (Optional[bool]): Default to 'true'. Switch it to 'false' for debugging purpose.\n """\n self.accept_terms = check.bool_param(accept_terms, "accept_terms")\n self.databricks_server_hostname = check.str_param(\n databricks_server_hostname, "databricks_server_hostname"\n )\n self.databricks_http_path = check.str_param(databricks_http_path, "databricks_http_path")\n self.databricks_port = check.opt_str_param(databricks_port, "databricks_port")\n self.databricks_personal_access_token = check.str_param(\n databricks_personal_access_token, "databricks_personal_access_token"\n )\n self.database_schema = check.opt_str_param(database_schema, "database_schema")\n self.data_source = check.inst_param(\n data_source,\n "data_source",\n (DatabricksDestination.AmazonS3, DatabricksDestination.AzureBlobStorage),\n )\n self.purge_staging_data = check.opt_bool_param(purge_staging_data, "purge_staging_data")\n super().__init__("Databricks", name)
\n\n\n
[docs]class BigqueryDenormalizedDestination(GeneratedAirbyteDestination):\n
[docs] class StandardInserts:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class HMACKey:\n
[docs] @public\n def __init__(self, hmac_key_access_id: str, hmac_key_secret: str):\n self.credential_type = "HMAC_KEY"\n self.hmac_key_access_id = check.str_param(hmac_key_access_id, "hmac_key_access_id")\n self.hmac_key_secret = check.str_param(hmac_key_secret, "hmac_key_secret")
\n\n
[docs] class GCSStaging:\n
[docs] @public\n def __init__(\n self,\n credential: "BigqueryDenormalizedDestination.HMACKey",\n gcs_bucket_name: str,\n gcs_bucket_path: str,\n keep_files_in_gcs_bucket: Optional[str] = None,\n ):\n self.method = "GCS Staging"\n self.credential = check.inst_param(\n credential, "credential", BigqueryDenormalizedDestination.HMACKey\n )\n self.gcs_bucket_name = check.str_param(gcs_bucket_name, "gcs_bucket_name")\n self.gcs_bucket_path = check.str_param(gcs_bucket_path, "gcs_bucket_path")\n self.keep_files_in_gcs_bucket = check.opt_str_param(\n keep_files_in_gcs_bucket, "keep_files_in_gcs_bucket"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n project_id: str,\n dataset_id: str,\n loading_method: Union[\n "BigqueryDenormalizedDestination.StandardInserts",\n "BigqueryDenormalizedDestination.GCSStaging",\n ],\n credentials_json: Optional[str] = None,\n dataset_location: Optional[str] = None,\n big_query_client_buffer_size_mb: Optional[int] = None,\n ):\n """Airbyte Destination for Bigquery Denormalized.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/bigquery\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset. Read more here.\n dataset_id (str): The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.\n loading_method (Union[BigqueryDenormalizedDestination.StandardInserts, BigqueryDenormalizedDestination.GCSStaging]): Loading method used to send select the way data will be uploaded to BigQuery. Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging. GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here.\n credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.\n dataset_location (Optional[str]): The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here.\n big_query_client_buffer_size_mb (Optional[int]): Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.dataset_id = check.str_param(dataset_id, "dataset_id")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (\n BigqueryDenormalizedDestination.StandardInserts,\n BigqueryDenormalizedDestination.GCSStaging,\n ),\n )\n self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")\n self.dataset_location = check.opt_str_param(dataset_location, "dataset_location")\n self.big_query_client_buffer_size_mb = check.opt_int_param(\n big_query_client_buffer_size_mb, "big_query_client_buffer_size_mb"\n )\n super().__init__("Bigquery Denormalized", name)
\n\n\n
[docs]class SqliteDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, destination_path: str):\n """Airbyte Destination for Sqlite.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/sqlite\n\n Args:\n name (str): The name of the destination.\n destination_path (str): Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs\n """\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Sqlite", name)
\n\n\n
[docs]class MongodbDestination(GeneratedAirbyteDestination):\n
[docs] class StandaloneMongoDbInstance:\n
[docs] @public\n def __init__(self, instance: str, host: str, port: int, tls: Optional[bool] = None):\n self.instance = check.str_param(instance, "instance")\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.tls = check.opt_bool_param(tls, "tls")
\n\n
[docs] class ReplicaSet:\n
[docs] @public\n def __init__(self, instance: str, server_addresses: str, replica_set: Optional[str] = None):\n self.instance = check.str_param(instance, "instance")\n self.server_addresses = check.str_param(server_addresses, "server_addresses")\n self.replica_set = check.opt_str_param(replica_set, "replica_set")
\n\n
[docs] class MongoDBAtlas:\n
[docs] @public\n def __init__(self, instance: str, cluster_url: str):\n self.instance = check.str_param(instance, "instance")\n self.cluster_url = check.str_param(cluster_url, "cluster_url")
\n\n
[docs] class None_:\n
[docs] @public\n def __init__(\n self,\n ):\n self.authorization = "none"
\n\n
[docs] class LoginPassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.authorization = "login/password"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n instance_type: Union[\n "MongodbDestination.StandaloneMongoDbInstance",\n "MongodbDestination.ReplicaSet",\n "MongodbDestination.MongoDBAtlas",\n ],\n database: str,\n auth_type: Union["MongodbDestination.None_", "MongodbDestination.LoginPassword"],\n ):\n r"""Airbyte Destination for Mongodb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mongodb\n\n Args:\n name (str): The name of the destination.\n instance_type (Union[MongodbDestination.StandaloneMongoDbInstance, MongodbDestination.ReplicaSet, MongodbDestination.MongoDBAtlas]): MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.\n database (str): Name of the database.\n auth_type (Union[MongodbDestination.None\\\\_, MongodbDestination.LoginPassword]): Authorization type.\n """\n self.instance_type = check.inst_param(\n instance_type,\n "instance_type",\n (\n MongodbDestination.StandaloneMongoDbInstance,\n MongodbDestination.ReplicaSet,\n MongodbDestination.MongoDBAtlas,\n ),\n )\n self.database = check.str_param(database, "database")\n self.auth_type = check.inst_param(\n auth_type, "auth_type", (MongodbDestination.None_, MongodbDestination.LoginPassword)\n )\n super().__init__("Mongodb", name)
\n\n\n
[docs]class RocksetDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, api_key: str, workspace: str, api_server: Optional[str] = None):\n """Airbyte Destination for Rockset.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/rockset\n\n Args:\n name (str): The name of the destination.\n api_key (str): Rockset api key\n workspace (str): The Rockset workspace in which collections will be created + written to.\n api_server (Optional[str]): Rockset api URL\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.workspace = check.str_param(workspace, "workspace")\n self.api_server = check.opt_str_param(api_server, "api_server")\n super().__init__("Rockset", name)
\n\n\n
[docs]class OracleDestination(GeneratedAirbyteDestination):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_method = "unencrypted"
\n\n
[docs] class NativeNetworkEncryptionNNE:\n
[docs] @public\n def __init__(self, encryption_algorithm: Optional[str] = None):\n self.encryption_method = "client_nne"\n self.encryption_algorithm = check.opt_str_param(\n encryption_algorithm, "encryption_algorithm"\n )
\n\n
[docs] class TLSEncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, ssl_certificate: str):\n self.encryption_method = "encrypted_verify_certificate"\n self.ssl_certificate = check.str_param(ssl_certificate, "ssl_certificate")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n sid: str,\n username: str,\n encryption: Union[\n "OracleDestination.Unencrypted",\n "OracleDestination.NativeNetworkEncryptionNNE",\n "OracleDestination.TLSEncryptedVerifyCertificate",\n ],\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n schema: Optional[str] = None,\n ):\n """Airbyte Destination for Oracle.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/oracle\n\n Args:\n name (str): The name of the destination.\n host (str): The hostname of the database.\n port (int): The port of the database.\n sid (str): The System Identifier uniquely distinguishes the instance from any other instance on the same computer.\n username (str): The username to access the database. This user must have CREATE USER privileges in the database.\n password (Optional[str]): The password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n schema (Optional[str]): The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.\n encryption (Union[OracleDestination.Unencrypted, OracleDestination.NativeNetworkEncryptionNNE, OracleDestination.TLSEncryptedVerifyCertificate]): The encryption method which is used when communicating with the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.sid = check.str_param(sid, "sid")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.schema = check.opt_str_param(schema, "schema")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (\n OracleDestination.Unencrypted,\n OracleDestination.NativeNetworkEncryptionNNE,\n OracleDestination.TLSEncryptedVerifyCertificate,\n ),\n )\n super().__init__("Oracle", name)
\n\n\n
[docs]class CsvDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, destination_path: str):\n """Airbyte Destination for Csv.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/local-csv\n\n Args:\n name (str): The name of the destination.\n destination_path (str): Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs\n """\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Csv", name)
\n\n\n
[docs]class S3Destination(GeneratedAirbyteDestination):\n
[docs] class NoCompression:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class Deflate:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Bzip2:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class Xz:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Zstandard:\n
[docs] @public\n def __init__(\n self, codec: str, compression_level: int, include_checksum: Optional[bool] = None\n ):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")\n self.include_checksum = check.opt_bool_param(include_checksum, "include_checksum")
\n\n
[docs] class Snappy:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class AvroApacheAvro:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Union[\n "S3Destination.NoCompression",\n "S3Destination.Deflate",\n "S3Destination.Bzip2",\n "S3Destination.Xz",\n "S3Destination.Zstandard",\n "S3Destination.Snappy",\n ],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.inst_param(\n compression_codec,\n "compression_codec",\n (\n S3Destination.NoCompression,\n S3Destination.Deflate,\n S3Destination.Bzip2,\n S3Destination.Xz,\n S3Destination.Zstandard,\n S3Destination.Snappy,\n ),\n )
\n\n
[docs] class GZIP:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n flattening: str,\n compression: Union["S3Destination.NoCompression", "S3Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.flattening = check.str_param(flattening, "flattening")\n self.compression = check.inst_param(\n compression, "compression", (S3Destination.NoCompression, S3Destination.GZIP)\n )
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["S3Destination.NoCompression", "S3Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression = check.inst_param(\n compression, "compression", (S3Destination.NoCompression, S3Destination.GZIP)\n )
\n\n
[docs] class ParquetColumnarStorage:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Optional[str] = None,\n block_size_mb: Optional[int] = None,\n max_padding_size_mb: Optional[int] = None,\n page_size_kb: Optional[int] = None,\n dictionary_page_size_kb: Optional[int] = None,\n dictionary_encoding: Optional[bool] = None,\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.opt_str_param(compression_codec, "compression_codec")\n self.block_size_mb = check.opt_int_param(block_size_mb, "block_size_mb")\n self.max_padding_size_mb = check.opt_int_param(\n max_padding_size_mb, "max_padding_size_mb"\n )\n self.page_size_kb = check.opt_int_param(page_size_kb, "page_size_kb")\n self.dictionary_page_size_kb = check.opt_int_param(\n dictionary_page_size_kb, "dictionary_page_size_kb"\n )\n self.dictionary_encoding = check.opt_bool_param(\n dictionary_encoding, "dictionary_encoding"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n s3_bucket_name: str,\n s3_bucket_path: str,\n s3_bucket_region: str,\n format: Union[\n "S3Destination.AvroApacheAvro",\n "S3Destination.CSVCommaSeparatedValues",\n "S3Destination.JSONLinesNewlineDelimitedJSON",\n "S3Destination.ParquetColumnarStorage",\n ],\n access_key_id: Optional[str] = None,\n secret_access_key: Optional[str] = None,\n s3_endpoint: Optional[str] = None,\n s3_path_format: Optional[str] = None,\n file_name_pattern: Optional[str] = None,\n ):\n """Airbyte Destination for S3.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/s3\n\n Args:\n name (str): The name of the destination.\n access_key_id (Optional[str]): The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.\n secret_access_key (Optional[str]): The corresponding secret to the access key ID. Read more here\n s3_bucket_name (str): The name of the S3 bucket. Read more here.\n s3_bucket_path (str): Directory under the S3 bucket where data will be written. Read more here\n s3_bucket_region (str): The region of the S3 bucket. See here for all region codes.\n format (Union[S3Destination.AvroApacheAvro, S3Destination.CSVCommaSeparatedValues, S3Destination.JSONLinesNewlineDelimitedJSON, S3Destination.ParquetColumnarStorage]): Format of the data output. See here for more details\n s3_endpoint (Optional[str]): Your S3 endpoint url. Read more here\n s3_path_format (Optional[str]): Format string on how data will be organized inside the S3 bucket directory. Read more here\n file_name_pattern (Optional[str]): The pattern allows you to set the file-name format for the S3 staging file(s)\n """\n self.access_key_id = check.opt_str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.opt_str_param(secret_access_key, "secret_access_key")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.str_param(s3_bucket_path, "s3_bucket_path")\n self.s3_bucket_region = check.str_param(s3_bucket_region, "s3_bucket_region")\n self.format = check.inst_param(\n format,\n "format",\n (\n S3Destination.AvroApacheAvro,\n S3Destination.CSVCommaSeparatedValues,\n S3Destination.JSONLinesNewlineDelimitedJSON,\n S3Destination.ParquetColumnarStorage,\n ),\n )\n self.s3_endpoint = check.opt_str_param(s3_endpoint, "s3_endpoint")\n self.s3_path_format = check.opt_str_param(s3_path_format, "s3_path_format")\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")\n super().__init__("S3", name)
\n\n\n
[docs]class AwsDatalakeDestination(GeneratedAirbyteDestination):\n
[docs] class IAMRole:\n
[docs] @public\n def __init__(self, role_arn: str):\n self.credentials_title = "IAM Role"\n self.role_arn = check.str_param(role_arn, "role_arn")
\n\n
[docs] class IAMUser:\n
[docs] @public\n def __init__(self, aws_access_key_id: str, aws_secret_access_key: str):\n self.credentials_title = "IAM User"\n self.aws_access_key_id = check.str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n region: str,\n credentials: Union["AwsDatalakeDestination.IAMRole", "AwsDatalakeDestination.IAMUser"],\n bucket_name: str,\n bucket_prefix: str,\n aws_account_id: Optional[str] = None,\n lakeformation_database_name: Optional[str] = None,\n ):\n """Airbyte Destination for Aws Datalake.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/aws-datalake\n\n Args:\n name (str): The name of the destination.\n aws_account_id (Optional[str]): target aws account id\n region (str): Region name\n credentials (Union[AwsDatalakeDestination.IAMRole, AwsDatalakeDestination.IAMUser]): Choose How to Authenticate to AWS.\n bucket_name (str): Name of the bucket\n bucket_prefix (str): S3 prefix\n lakeformation_database_name (Optional[str]): Which database to use\n """\n self.aws_account_id = check.opt_str_param(aws_account_id, "aws_account_id")\n self.region = check.str_param(region, "region")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (AwsDatalakeDestination.IAMRole, AwsDatalakeDestination.IAMUser),\n )\n self.bucket_name = check.str_param(bucket_name, "bucket_name")\n self.bucket_prefix = check.str_param(bucket_prefix, "bucket_prefix")\n self.lakeformation_database_name = check.opt_str_param(\n lakeformation_database_name, "lakeformation_database_name"\n )\n super().__init__("Aws Datalake", name)
\n\n\n
[docs]class MssqlDestination(GeneratedAirbyteDestination):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "unencrypted"
\n\n
[docs] class EncryptedTrustServerCertificate:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "encrypted_trust_server_certificate"
\n\n
[docs] class EncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, hostNameInCertificate: Optional[str] = None):\n self.ssl_method = "encrypted_verify_certificate"\n self.hostNameInCertificate = check.opt_str_param(\n hostNameInCertificate, "hostNameInCertificate"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n schema: str,\n username: str,\n ssl_method: Union[\n "MssqlDestination.Unencrypted",\n "MssqlDestination.EncryptedTrustServerCertificate",\n "MssqlDestination.EncryptedVerifyCertificate",\n ],\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Mssql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mssql\n\n Args:\n name (str): The name of the destination.\n host (str): The host name of the MSSQL database.\n port (int): The port of the MSSQL database.\n database (str): The name of the MSSQL database.\n schema (str): The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n ssl_method (Union[MssqlDestination.Unencrypted, MssqlDestination.EncryptedTrustServerCertificate, MssqlDestination.EncryptedVerifyCertificate]): The encryption method which is used to communicate with the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl_method = check.inst_param(\n ssl_method,\n "ssl_method",\n (\n MssqlDestination.Unencrypted,\n MssqlDestination.EncryptedTrustServerCertificate,\n MssqlDestination.EncryptedVerifyCertificate,\n ),\n )\n super().__init__("Mssql", name)
\n\n\n
[docs]class PubsubDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, project_id: str, topic_id: str, credentials_json: str):\n """Airbyte Destination for Pubsub.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/pubsub\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target PubSub.\n topic_id (str): The PubSub topic ID in the given GCP project ID.\n credentials_json (str): The contents of the JSON service account key. Check out the docs if you need help generating this key.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.topic_id = check.str_param(topic_id, "topic_id")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n super().__init__("Pubsub", name)
\n\n\n
[docs]class R2Destination(GeneratedAirbyteDestination):\n
[docs] class NoCompression:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class Deflate:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Bzip2:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class Xz:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Zstandard:\n
[docs] @public\n def __init__(\n self, codec: str, compression_level: int, include_checksum: Optional[bool] = None\n ):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")\n self.include_checksum = check.opt_bool_param(include_checksum, "include_checksum")
\n\n
[docs] class Snappy:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class AvroApacheAvro:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Union[\n "R2Destination.NoCompression",\n "R2Destination.Deflate",\n "R2Destination.Bzip2",\n "R2Destination.Xz",\n "R2Destination.Zstandard",\n "R2Destination.Snappy",\n ],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.inst_param(\n compression_codec,\n "compression_codec",\n (\n R2Destination.NoCompression,\n R2Destination.Deflate,\n R2Destination.Bzip2,\n R2Destination.Xz,\n R2Destination.Zstandard,\n R2Destination.Snappy,\n ),\n )
\n\n
[docs] class GZIP:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n flattening: str,\n compression: Union["R2Destination.NoCompression", "R2Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.flattening = check.str_param(flattening, "flattening")\n self.compression = check.inst_param(\n compression, "compression", (R2Destination.NoCompression, R2Destination.GZIP)\n )
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["R2Destination.NoCompression", "R2Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression = check.inst_param(\n compression, "compression", (R2Destination.NoCompression, R2Destination.GZIP)\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n access_key_id: str,\n secret_access_key: str,\n s3_bucket_name: str,\n s3_bucket_path: str,\n format: Union[\n "R2Destination.AvroApacheAvro",\n "R2Destination.CSVCommaSeparatedValues",\n "R2Destination.JSONLinesNewlineDelimitedJSON",\n ],\n s3_path_format: Optional[str] = None,\n file_name_pattern: Optional[str] = None,\n ):\n """Airbyte Destination for R2.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/r2\n\n Args:\n name (str): The name of the destination.\n account_id (str): Cloudflare account ID\n access_key_id (str): The access key ID to access the R2 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.\n secret_access_key (str): The corresponding secret to the access key ID. Read more here\n s3_bucket_name (str): The name of the R2 bucket. Read more here.\n s3_bucket_path (str): Directory under the R2 bucket where data will be written.\n format (Union[R2Destination.AvroApacheAvro, R2Destination.CSVCommaSeparatedValues, R2Destination.JSONLinesNewlineDelimitedJSON]): Format of the data output. See here for more details\n s3_path_format (Optional[str]): Format string on how data will be organized inside the R2 bucket directory. Read more here\n file_name_pattern (Optional[str]): The pattern allows you to set the file-name format for the R2 staging file(s)\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.str_param(s3_bucket_path, "s3_bucket_path")\n self.format = check.inst_param(\n format,\n "format",\n (\n R2Destination.AvroApacheAvro,\n R2Destination.CSVCommaSeparatedValues,\n R2Destination.JSONLinesNewlineDelimitedJSON,\n ),\n )\n self.s3_path_format = check.opt_str_param(s3_path_format, "s3_path_format")\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")\n super().__init__("R2", name)
\n\n\n
[docs]class JdbcDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n jdbc_url: str,\n password: Optional[str] = None,\n schema: Optional[str] = None,\n ):\n """Airbyte Destination for Jdbc.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/postgres\n\n Args:\n name (str): The name of the destination.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url (str): JDBC formatted url. See the standard here.\n schema (Optional[str]): If you leave the schema unspecified, JDBC defaults to a schema named "public".\n """\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url = check.str_param(jdbc_url, "jdbc_url")\n self.schema = check.opt_str_param(schema, "schema")\n super().__init__("Jdbc", name)
\n\n\n
[docs]class KeenDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self, name: str, project_id: str, api_key: str, infer_timestamp: Optional[bool] = None\n ):\n """Airbyte Destination for Keen.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/keen\n\n Args:\n name (str): The name of the destination.\n project_id (str): To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.\n api_key (str): To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.\n infer_timestamp (Optional[bool]): Allow connector to guess keen.timestamp value based on the streamed data.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.api_key = check.str_param(api_key, "api_key")\n self.infer_timestamp = check.opt_bool_param(infer_timestamp, "infer_timestamp")\n super().__init__("Keen", name)
\n\n\n
[docs]class TidbDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n ssl: Optional[bool] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Tidb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/tidb\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n ssl (Optional[bool]): Encrypt data using SSL.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Tidb", name)
\n\n\n
[docs]class FirestoreDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, project_id: str, credentials_json: Optional[str] = None):\n """Airbyte Destination for Firestore.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/firestore\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset.\n credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")\n super().__init__("Firestore", name)
\n\n\n
[docs]class ScyllaDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n keyspace: str,\n username: str,\n password: str,\n address: str,\n port: int,\n replication: Optional[int] = None,\n ):\n """Airbyte Destination for Scylla.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/scylla\n\n Args:\n name (str): The name of the destination.\n keyspace (str): Default Scylla keyspace to create data in.\n username (str): Username to use to access Scylla.\n password (str): Password associated with Scylla.\n address (str): Address to connect to.\n port (int): Port of Scylla.\n replication (Optional[int]): Indicates to how many nodes the data should be replicated to.\n """\n self.keyspace = check.str_param(keyspace, "keyspace")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.address = check.str_param(address, "address")\n self.port = check.int_param(port, "port")\n self.replication = check.opt_int_param(replication, "replication")\n super().__init__("Scylla", name)
\n\n\n
[docs]class RedisDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self, name: str, host: str, port: int, username: str, password: str, cache_type: str\n ):\n """Airbyte Destination for Redis.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/redis\n\n Args:\n name (str): The name of the destination.\n host (str): Redis host to connect to.\n port (int): Port of Redis.\n username (str): Username associated with Redis.\n password (str): Password associated with Redis.\n cache_type (str): Redis cache type to store data in.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.cache_type = check.str_param(cache_type, "cache_type")\n super().__init__("Redis", name)
\n\n\n
[docs]class MqttDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n broker_host: str,\n broker_port: int,\n use_tls: bool,\n topic_pattern: str,\n publisher_sync: bool,\n connect_timeout: int,\n automatic_reconnect: bool,\n clean_session: bool,\n message_retained: bool,\n message_qos: str,\n username: Optional[str] = None,\n password: Optional[str] = None,\n topic_test: Optional[str] = None,\n client: Optional[str] = None,\n ):\n """Airbyte Destination for Mqtt.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mqtt\n\n Args:\n name (str): The name of the destination.\n broker_host (str): Host of the broker to connect to.\n broker_port (int): Port of the broker.\n use_tls (bool): Whether to use TLS encryption on the connection.\n username (Optional[str]): User name to use for the connection.\n password (Optional[str]): Password to use for the connection.\n topic_pattern (str): Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.\n topic_test (Optional[str]): Topic to test if Airbyte can produce messages.\n client (Optional[str]): A client identifier that is unique on the server being connected to.\n publisher_sync (bool): Wait synchronously until the record has been sent to the broker.\n connect_timeout (int): Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.\n automatic_reconnect (bool): Whether the client will automatically attempt to reconnect to the server if the connection is lost.\n clean_session (bool): Whether the client and server should remember state across restarts and reconnects.\n message_retained (bool): Whether or not the publish message should be retained by the messaging engine.\n message_qos (str): Quality of service used for each message to be delivered.\n """\n self.broker_host = check.str_param(broker_host, "broker_host")\n self.broker_port = check.int_param(broker_port, "broker_port")\n self.use_tls = check.bool_param(use_tls, "use_tls")\n self.username = check.opt_str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")\n self.topic_test = check.opt_str_param(topic_test, "topic_test")\n self.client = check.opt_str_param(client, "client")\n self.publisher_sync = check.bool_param(publisher_sync, "publisher_sync")\n self.connect_timeout = check.int_param(connect_timeout, "connect_timeout")\n self.automatic_reconnect = check.bool_param(automatic_reconnect, "automatic_reconnect")\n self.clean_session = check.bool_param(clean_session, "clean_session")\n self.message_retained = check.bool_param(message_retained, "message_retained")\n self.message_qos = check.str_param(message_qos, "message_qos")\n super().__init__("Mqtt", name)
\n\n\n
[docs]class RedshiftDestination(GeneratedAirbyteDestination):\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class NoEncryption:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_type = "none"
\n\n
[docs] class AESCBCEnvelopeEncryption:\n
[docs] @public\n def __init__(self, key_encrypting_key: Optional[str] = None):\n self.encryption_type = "aes_cbc_envelope"\n self.key_encrypting_key = check.opt_str_param(key_encrypting_key, "key_encrypting_key")
\n\n
[docs] class S3Staging:\n
[docs] @public\n def __init__(\n self,\n s3_bucket_name: str,\n s3_bucket_region: str,\n access_key_id: str,\n secret_access_key: str,\n encryption: Union[\n "RedshiftDestination.NoEncryption", "RedshiftDestination.AESCBCEnvelopeEncryption"\n ],\n s3_bucket_path: Optional[str] = None,\n file_name_pattern: Optional[str] = None,\n purge_staging_data: Optional[bool] = None,\n ):\n self.method = "S3 Staging"\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.opt_str_param(s3_bucket_path, "s3_bucket_path")\n self.s3_bucket_region = check.str_param(s3_bucket_region, "s3_bucket_region")\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n self.purge_staging_data = check.opt_bool_param(purge_staging_data, "purge_staging_data")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (RedshiftDestination.NoEncryption, RedshiftDestination.AESCBCEnvelopeEncryption),\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n username: str,\n password: str,\n database: str,\n schema: str,\n uploading_method: Union["RedshiftDestination.Standard", "RedshiftDestination.S3Staging"],\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Redshift.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/redshift\n\n Args:\n name (str): The name of the destination.\n host (str): Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)\n port (int): Port of the database.\n username (str): Username to use to access the database.\n password (str): Password associated with the username.\n database (str): Name of the database.\n schema (str): The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n uploading_method (Union[RedshiftDestination.Standard, RedshiftDestination.S3Staging]): The method how the data will be uploaded to the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.uploading_method = check.inst_param(\n uploading_method,\n "uploading_method",\n (RedshiftDestination.Standard, RedshiftDestination.S3Staging),\n )\n super().__init__("Redshift", name)
\n\n\n
[docs]class PulsarDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n brokers: str,\n use_tls: bool,\n topic_type: str,\n topic_tenant: str,\n topic_namespace: str,\n topic_pattern: str,\n compression_type: str,\n send_timeout_ms: int,\n max_pending_messages: int,\n max_pending_messages_across_partitions: int,\n batching_enabled: bool,\n batching_max_messages: int,\n batching_max_publish_delay: int,\n block_if_queue_full: bool,\n topic_test: Optional[str] = None,\n producer_name: Optional[str] = None,\n producer_sync: Optional[bool] = None,\n ):\n """Airbyte Destination for Pulsar.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/pulsar\n\n Args:\n name (str): The name of the destination.\n brokers (str): A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.\n use_tls (bool): Whether to use TLS encryption on the connection.\n topic_type (str): It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.\n topic_tenant (str): The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.\n topic_namespace (str): The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.\n topic_pattern (str): Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.\n topic_test (Optional[str]): Topic to test if Airbyte can produce messages.\n producer_name (Optional[str]): Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.\n producer_sync (Optional[bool]): Wait synchronously until the record has been sent to Pulsar.\n compression_type (str): Compression type for the producer.\n send_timeout_ms (int): If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).\n max_pending_messages (int): The maximum size of a queue holding pending messages.\n max_pending_messages_across_partitions (int): The maximum number of pending messages across partitions.\n batching_enabled (bool): Control whether automatic batching of messages is enabled for the producer.\n batching_max_messages (int): Maximum number of messages permitted in a batch.\n batching_max_publish_delay (int): Time period in milliseconds within which the messages sent will be batched.\n block_if_queue_full (bool): If the send operation should block when the outgoing message queue is full.\n """\n self.brokers = check.str_param(brokers, "brokers")\n self.use_tls = check.bool_param(use_tls, "use_tls")\n self.topic_type = check.str_param(topic_type, "topic_type")\n self.topic_tenant = check.str_param(topic_tenant, "topic_tenant")\n self.topic_namespace = check.str_param(topic_namespace, "topic_namespace")\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")\n self.topic_test = check.opt_str_param(topic_test, "topic_test")\n self.producer_name = check.opt_str_param(producer_name, "producer_name")\n self.producer_sync = check.opt_bool_param(producer_sync, "producer_sync")\n self.compression_type = check.str_param(compression_type, "compression_type")\n self.send_timeout_ms = check.int_param(send_timeout_ms, "send_timeout_ms")\n self.max_pending_messages = check.int_param(max_pending_messages, "max_pending_messages")\n self.max_pending_messages_across_partitions = check.int_param(\n max_pending_messages_across_partitions, "max_pending_messages_across_partitions"\n )\n self.batching_enabled = check.bool_param(batching_enabled, "batching_enabled")\n self.batching_max_messages = check.int_param(batching_max_messages, "batching_max_messages")\n self.batching_max_publish_delay = check.int_param(\n batching_max_publish_delay, "batching_max_publish_delay"\n )\n self.block_if_queue_full = check.bool_param(block_if_queue_full, "block_if_queue_full")\n super().__init__("Pulsar", name)
\n\n\n
[docs]class SnowflakeDestination(GeneratedAirbyteDestination):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n access_token: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class KeyPairAuthentication:\n
[docs] @public\n def __init__(\n self,\n private_key: str,\n auth_type: Optional[str] = None,\n private_key_password: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.private_key = check.str_param(private_key, "private_key")\n self.private_key_password = check.opt_str_param(\n private_key_password, "private_key_password"\n )
\n\n
[docs] class UsernameAndPassword:\n
[docs] @public\n def __init__(self, password: str):\n self.password = check.str_param(password, "password")
\n\n
[docs] class SelectAnotherOption:\n
[docs] @public\n def __init__(self, method: str):\n self.method = check.str_param(method, "method")
\n\n
[docs] class RecommendedInternalStaging:\n
[docs] @public\n def __init__(self, method: str):\n self.method = check.str_param(method, "method")
\n\n
[docs] class NoEncryption:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_type = "none"
\n\n
[docs] class AESCBCEnvelopeEncryption:\n
[docs] @public\n def __init__(self, key_encrypting_key: Optional[str] = None):\n self.encryption_type = "aes_cbc_envelope"\n self.key_encrypting_key = check.opt_str_param(key_encrypting_key, "key_encrypting_key")
\n\n
[docs] class AWSS3Staging:\n
[docs] @public\n def __init__(\n self,\n method: str,\n s3_bucket_name: str,\n access_key_id: str,\n secret_access_key: str,\n encryption: Union[\n "SnowflakeDestination.NoEncryption", "SnowflakeDestination.AESCBCEnvelopeEncryption"\n ],\n s3_bucket_region: Optional[str] = None,\n purge_staging_data: Optional[bool] = None,\n file_name_pattern: Optional[str] = None,\n ):\n self.method = check.str_param(method, "method")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_region = check.opt_str_param(s3_bucket_region, "s3_bucket_region")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n self.purge_staging_data = check.opt_bool_param(purge_staging_data, "purge_staging_data")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (SnowflakeDestination.NoEncryption, SnowflakeDestination.AESCBCEnvelopeEncryption),\n )\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")
\n\n
[docs] class GoogleCloudStorageStaging:\n
[docs] @public\n def __init__(self, method: str, project_id: str, bucket_name: str, credentials_json: str):\n self.method = check.str_param(method, "method")\n self.project_id = check.str_param(project_id, "project_id")\n self.bucket_name = check.str_param(bucket_name, "bucket_name")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")
\n\n
[docs] class AzureBlobStorageStaging:\n
[docs] @public\n def __init__(\n self,\n method: str,\n azure_blob_storage_account_name: str,\n azure_blob_storage_container_name: str,\n azure_blob_storage_sas_token: str,\n azure_blob_storage_endpoint_domain_name: Optional[str] = None,\n ):\n self.method = check.str_param(method, "method")\n self.azure_blob_storage_endpoint_domain_name = check.opt_str_param(\n azure_blob_storage_endpoint_domain_name, "azure_blob_storage_endpoint_domain_name"\n )\n self.azure_blob_storage_account_name = check.str_param(\n azure_blob_storage_account_name, "azure_blob_storage_account_name"\n )\n self.azure_blob_storage_container_name = check.str_param(\n azure_blob_storage_container_name, "azure_blob_storage_container_name"\n )\n self.azure_blob_storage_sas_token = check.str_param(\n azure_blob_storage_sas_token, "azure_blob_storage_sas_token"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n role: str,\n warehouse: str,\n database: str,\n schema: str,\n username: str,\n credentials: Union[\n "SnowflakeDestination.OAuth20",\n "SnowflakeDestination.KeyPairAuthentication",\n "SnowflakeDestination.UsernameAndPassword",\n ],\n loading_method: Union[\n "SnowflakeDestination.SelectAnotherOption",\n "SnowflakeDestination.RecommendedInternalStaging",\n "SnowflakeDestination.AWSS3Staging",\n "SnowflakeDestination.GoogleCloudStorageStaging",\n "SnowflakeDestination.AzureBlobStorageStaging",\n ],\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Snowflake.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/snowflake\n\n Args:\n name (str): The name of the destination.\n host (str): Enter your Snowflake account's locator (in the format ...snowflakecomputing.com)\n role (str): Enter the role that you want to use to access Snowflake\n warehouse (str): Enter the name of the warehouse that you want to sync data into\n database (str): Enter the name of the database you want to sync data into\n schema (str): Enter the name of the default schema\n username (str): Enter the name of the user you want to use to access the database\n jdbc_url_params (Optional[str]): Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3\n loading_method (Union[SnowflakeDestination.SelectAnotherOption, SnowflakeDestination.RecommendedInternalStaging, SnowflakeDestination.AWSS3Staging, SnowflakeDestination.GoogleCloudStorageStaging, SnowflakeDestination.AzureBlobStorageStaging]): Select a data staging method\n """\n self.host = check.str_param(host, "host")\n self.role = check.str_param(role, "role")\n self.warehouse = check.str_param(warehouse, "warehouse")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.username = check.str_param(username, "username")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n SnowflakeDestination.OAuth20,\n SnowflakeDestination.KeyPairAuthentication,\n SnowflakeDestination.UsernameAndPassword,\n ),\n )\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (\n SnowflakeDestination.SelectAnotherOption,\n SnowflakeDestination.RecommendedInternalStaging,\n SnowflakeDestination.AWSS3Staging,\n SnowflakeDestination.GoogleCloudStorageStaging,\n SnowflakeDestination.AzureBlobStorageStaging,\n ),\n )\n super().__init__("Snowflake", name)
\n\n\n
[docs]class PostgresDestination(GeneratedAirbyteDestination):\n
[docs] class Disable:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "disable"
\n\n
[docs] class Allow:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "allow"
\n\n
[docs] class Prefer:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "prefer"
\n\n
[docs] class Require:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "require"
\n\n
[docs] class VerifyCa:\n
[docs] @public\n def __init__(self, ca_certificate: str, client_key_password: Optional[str] = None):\n self.mode = "verify-ca"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class VerifyFull:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: str,\n client_key: str,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify-full"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.str_param(client_certificate, "client_certificate")\n self.client_key = check.str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n schema: str,\n username: str,\n ssl_mode: Union[\n "PostgresDestination.Disable",\n "PostgresDestination.Allow",\n "PostgresDestination.Prefer",\n "PostgresDestination.Require",\n "PostgresDestination.VerifyCa",\n "PostgresDestination.VerifyFull",\n ],\n password: Optional[str] = None,\n ssl: Optional[bool] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Postgres.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/postgres\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n schema (str): The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n ssl (Optional[bool]): Encrypt data using SSL. When activating SSL, please select one of the connection modes.\n ssl_mode (Union[PostgresDestination.Disable, PostgresDestination.Allow, PostgresDestination.Prefer, PostgresDestination.Require, PostgresDestination.VerifyCa, PostgresDestination.VerifyFull]): SSL connection modes. disable - Chose this mode to disable encryption of communication between Airbyte and destination database allow - Chose this mode to enable encryption only when required by the source database prefer - Chose this mode to allow unencrypted connection only if the source database does not support encryption require - Chose this mode to always require encryption. If the source database server does not support encryption, connection will fail verify-ca - Chose this mode to always require encryption and to verify that the source database server has a valid SSL certificate verify-full - This is the most secure mode. Chose this mode to always require encryption and to verify the identity of the source database server See more information - in the docs.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.ssl_mode = check.inst_param(\n ssl_mode,\n "ssl_mode",\n (\n PostgresDestination.Disable,\n PostgresDestination.Allow,\n PostgresDestination.Prefer,\n PostgresDestination.Require,\n PostgresDestination.VerifyCa,\n PostgresDestination.VerifyFull,\n ),\n )\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Postgres", name)
\n\n\n
[docs]class ScaffoldDestinationPythonDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, TODO: Optional[str] = None):\n """Airbyte Destination for Scaffold Destination Python.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/scaffold-destination-python\n\n Args:\n name (str): The name of the destination.\n TODO (Optional[str]): FIX ME\n """\n self.TODO = check.opt_str_param(TODO, "TODO")\n super().__init__("Scaffold Destination Python", name)
\n\n\n
[docs]class LocalJsonDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, destination_path: str):\n """Airbyte Destination for Local Json.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/local-json\n\n Args:\n name (str): The name of the destination.\n destination_path (str): Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs\n """\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Local Json", name)
\n\n\n
[docs]class MeilisearchDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, host: str, api_key: Optional[str] = None):\n """Airbyte Destination for Meilisearch.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/meilisearch\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the MeiliSearch instance.\n api_key (Optional[str]): MeiliSearch API Key. See the docs for more information on how to obtain this key.\n """\n self.host = check.str_param(host, "host")\n self.api_key = check.opt_str_param(api_key, "api_key")\n super().__init__("Meilisearch", name)
\n
", "current_page_name": "_modules/dagster_airbyte/managed/generated/destinations", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.generated.destinations"}, "sources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.generated.sources

\n# ruff: noqa: A001, A002\nfrom typing import List, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\nfrom dagster_airbyte.managed.types import GeneratedAirbyteSource\n\n\n
[docs]class StravaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n athlete_id: int,\n start_date: str,\n auth_type: Optional[str] = None,\n ):\n """Airbyte Source for Strava.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/strava\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Strava developer application.\n client_secret (str): The Client Secret of your Strava developer application.\n refresh_token (str): The Refresh Token with the activity: read_all permissions.\n athlete_id (int): The Athlete ID of your Strava developer application.\n start_date (str): UTC date and time. Any data before this date will not be replicated.\n """\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.athlete_id = check.int_param(athlete_id, "athlete_id")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Strava", name)
\n\n\n
[docs]class AppsflyerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n app_id: str,\n api_token: str,\n start_date: str,\n timezone: Optional[str] = None,\n ):\n """Airbyte Source for Appsflyer.\n\n Args:\n name (str): The name of the destination.\n app_id (str): App identifier as found in AppsFlyer.\n api_token (str): Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard.\n start_date (str): The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days.\n timezone (Optional[str]): Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console.\n """\n self.app_id = check.str_param(app_id, "app_id")\n self.api_token = check.str_param(api_token, "api_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.timezone = check.opt_str_param(timezone, "timezone")\n super().__init__("Appsflyer", name)
\n\n\n
[docs]class GoogleWorkspaceAdminReportsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, credentials_json: str, email: str, lookback: Optional[int] = None\n ):\n """Airbyte Source for Google Workspace Admin Reports.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-workspace-admin-reports\n\n Args:\n name (str): The name of the destination.\n credentials_json (str): The contents of the JSON service account key. See the docs for more information on how to generate this key.\n email (str): The email of the user, who has permissions to access the Google Workspace Admin APIs.\n lookback (Optional[int]): Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days.\n """\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n self.email = check.str_param(email, "email")\n self.lookback = check.opt_int_param(lookback, "lookback")\n super().__init__("Google Workspace Admin Reports", name)
\n\n\n
[docs]class CartSource(GeneratedAirbyteSource):\n
[docs] class CentralAPIRouter:\n
[docs] @public\n def __init__(self, user_name: str, user_secret: str, site_id: str):\n self.auth_type = "CENTRAL_API_ROUTER"\n self.user_name = check.str_param(user_name, "user_name")\n self.user_secret = check.str_param(user_secret, "user_secret")\n self.site_id = check.str_param(site_id, "site_id")
\n\n
[docs] class SingleStoreAccessToken:\n
[docs] @public\n def __init__(self, access_token: str, store_name: str):\n self.auth_type = "SINGLE_STORE_ACCESS_TOKEN"\n self.access_token = check.str_param(access_token, "access_token")\n self.store_name = check.str_param(store_name, "store_name")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["CartSource.CentralAPIRouter", "CartSource.SingleStoreAccessToken"],\n start_date: str,\n ):\n """Airbyte Source for Cart.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/cart\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate the data\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (CartSource.CentralAPIRouter, CartSource.SingleStoreAccessToken),\n )\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Cart", name)
\n\n\n
[docs]class LinkedinAdsSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_method: Optional[str] = None,\n ):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str, auth_method: Optional[str] = None):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["LinkedinAdsSource.OAuth20", "LinkedinAdsSource.AccessToken"],\n start_date: str,\n account_ids: Optional[List[int]] = None,\n ):\n """Airbyte Source for Linkedin Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/linkedin-ads\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date in the format 2020-09-17. Any data before this date will not be replicated.\n account_ids (Optional[List[int]]): Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (LinkedinAdsSource.OAuth20, LinkedinAdsSource.AccessToken)\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.account_ids = check.opt_nullable_list_param(account_ids, "account_ids", int)\n super().__init__("Linkedin Ads", name)
\n\n\n
[docs]class MongodbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n user: str,\n password: str,\n auth_source: str,\n replica_set: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Mongodb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mongodb\n\n Args:\n name (str): The name of the destination.\n host (str): Host of a Mongo database to be replicated.\n port (int): Port of a Mongo database to be replicated.\n database (str): Database to be replicated.\n user (str): User\n password (str): Password\n auth_source (str): Authentication source where user information is stored. See the Mongo docs for more info.\n replica_set (Optional[str]): The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.\n ssl (Optional[bool]): If this switch is enabled, TLS connections will be used to connect to MongoDB.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.user = check.str_param(user, "user")\n self.password = check.str_param(password, "password")\n self.auth_source = check.str_param(auth_source, "auth_source")\n self.replica_set = check.opt_str_param(replica_set, "replica_set")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Mongodb", name)
\n\n\n
[docs]class TimelySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, account_id: str, start_date: str, bearer_token: str):\n """Airbyte Source for Timely.\n\n Args:\n name (str): The name of the destination.\n account_id (str): Timely account id\n start_date (str): start date\n bearer_token (str): Timely bearer token\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.bearer_token = check.str_param(bearer_token, "bearer_token")\n super().__init__("Timely", name)
\n\n\n
[docs]class StockTickerApiTutorialSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, stock_ticker: str, api_key: str):\n """Airbyte Source for Stock Ticker Api Tutorial.\n\n Documentation can be found at https://polygon.io/docs/stocks/get_v2_aggs_grouped_locale_us_market_stocks__date\n\n Args:\n name (str): The name of the destination.\n stock_ticker (str): The stock ticker to track\n api_key (str): The Polygon.io Stocks API key to use to hit the API.\n """\n self.stock_ticker = check.str_param(stock_ticker, "stock_ticker")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Stock Ticker Api Tutorial", name)
\n\n\n
[docs]class WrikeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, access_token: str, wrike_instance: str, start_date: Optional[str] = None\n ):\n """Airbyte Source for Wrike.\n\n Args:\n name (str): The name of the destination.\n access_token (str): Permanent access token. You can find documentation on how to acquire a permanent access token here\n wrike_instance (str): Wrike's instance such as `app-us2.wrike.com`\n start_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Only comments after this date will be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.wrike_instance = check.str_param(wrike_instance, "wrike_instance")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Wrike", name)
\n\n\n
[docs]class CommercetoolsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n region: str,\n host: str,\n start_date: str,\n project_key: str,\n client_id: str,\n client_secret: str,\n ):\n """Airbyte Source for Commercetools.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/commercetools\n\n Args:\n name (str): The name of the destination.\n region (str): The region of the platform.\n host (str): The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization\n start_date (str): The date you would like to replicate data. Format: YYYY-MM-DD.\n project_key (str): The project key\n client_id (str): Id of API Client.\n client_secret (str): The password of secret of API Client.\n """\n self.region = check.str_param(region, "region")\n self.host = check.str_param(host, "host")\n self.start_date = check.str_param(start_date, "start_date")\n self.project_key = check.str_param(project_key, "project_key")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n super().__init__("Commercetools", name)
\n\n\n
[docs]class GutendexSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n author_year_start: Optional[str] = None,\n author_year_end: Optional[str] = None,\n copyright: Optional[str] = None,\n languages: Optional[str] = None,\n search: Optional[str] = None,\n sort: Optional[str] = None,\n topic: Optional[str] = None,\n ):\n """Airbyte Source for Gutendex.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/gutendex\n\n Args:\n name (str): The name of the destination.\n author_year_start (Optional[str]): (Optional) Defines the minimum birth year of the authors. Books by authors born prior to the start year will not be returned. Supports both positive (CE) or negative (BCE) integer values\n author_year_end (Optional[str]): (Optional) Defines the maximum birth year of the authors. Books by authors born after the end year will not be returned. Supports both positive (CE) or negative (BCE) integer values\n copyright (Optional[str]): (Optional) Use this to find books with a certain copyright status - true for books with existing copyrights, false for books in the public domain in the USA, or null for books with no available copyright information.\n languages (Optional[str]): (Optional) Use this to find books in any of a list of languages. They must be comma-separated, two-character language codes.\n search (Optional[str]): (Optional) Use this to search author names and book titles with given words. They must be separated by a space (i.e. %20 in URL-encoded format) and are case-insensitive.\n sort (Optional[str]): (Optional) Use this to sort books - ascending for Project Gutenberg ID numbers from lowest to highest, descending for IDs highest to lowest, or popular (the default) for most popular to least popular by number of downloads.\n topic (Optional[str]): (Optional) Use this to search for a case-insensitive key-phrase in books' bookshelves or subjects.\n """\n self.author_year_start = check.opt_str_param(author_year_start, "author_year_start")\n self.author_year_end = check.opt_str_param(author_year_end, "author_year_end")\n self.copyright = check.opt_str_param(copyright, "copyright")\n self.languages = check.opt_str_param(languages, "languages")\n self.search = check.opt_str_param(search, "search")\n self.sort = check.opt_str_param(sort, "sort")\n self.topic = check.opt_str_param(topic, "topic")\n super().__init__("Gutendex", name)
\n\n\n
[docs]class IterableSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str):\n """Airbyte Source for Iterable.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/iterable\n\n Args:\n name (str): The name of the destination.\n api_key (str): Iterable API Key. See the docs for more information on how to obtain this key.\n start_date (str): The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Iterable", name)
\n\n\n
[docs]class QuickbooksSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n realm_id: str,\n user_agent: str,\n start_date: str,\n sandbox: bool,\n ):\n """Airbyte Source for Quickbooks Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/quickbooks\n\n Args:\n name (str): The name of the destination.\n client_id (str): Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production.\n client_secret (str): Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production.\n refresh_token (str): A token used when refreshing the access token.\n realm_id (str): Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token.\n user_agent (str): Process and email for API logging purposes. Example: tap-quickbooks .\n start_date (str): The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated.\n sandbox (bool): Determines whether to use the sandbox or production environment.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.realm_id = check.str_param(realm_id, "realm_id")\n self.user_agent = check.str_param(user_agent, "user_agent")\n self.start_date = check.str_param(start_date, "start_date")\n self.sandbox = check.bool_param(sandbox, "sandbox")\n super().__init__("Quickbooks Singer", name)
\n\n\n
[docs]class BigcommerceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, store_hash: str, access_token: str):\n """Airbyte Source for Bigcommerce.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bigcommerce\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date you would like to replicate data. Format: YYYY-MM-DD.\n store_hash (str): The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'.\n access_token (str): Access Token for making authenticated requests.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.store_hash = check.str_param(store_hash, "store_hash")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Bigcommerce", name)
\n\n\n
[docs]class ShopifySource(GeneratedAirbyteSource):\n
[docs] class APIPassword:\n
[docs] @public\n def __init__(self, api_password: str):\n self.auth_method = "api_password"\n self.api_password = check.str_param(api_password, "api_password")
\n\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n access_token: Optional[str] = None,\n ):\n self.auth_method = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n shop: str,\n credentials: Union["ShopifySource.APIPassword", "ShopifySource.OAuth20"],\n start_date: str,\n ):\n """Airbyte Source for Shopify.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/shopify\n\n Args:\n name (str): The name of the destination.\n shop (str): The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'.\n credentials (Union[ShopifySource.APIPassword, ShopifySource.OAuth20]): The authorization method to use to retrieve data from Shopify\n start_date (str): The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated.\n """\n self.shop = check.str_param(shop, "shop")\n self.credentials = check.inst_param(\n credentials, "credentials", (ShopifySource.APIPassword, ShopifySource.OAuth20)\n )\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Shopify", name)
\n\n\n
[docs]class AppstoreSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, key_id: str, private_key: str, issuer_id: str, vendor: str, start_date: str\n ):\n """Airbyte Source for Appstore Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/appstore\n\n Args:\n name (str): The name of the destination.\n key_id (str): Appstore Key ID. See the docs for more information on how to obtain this key.\n private_key (str): Appstore Private Key. See the docs for more information on how to obtain this key.\n issuer_id (str): Appstore Issuer ID. See the docs for more information on how to obtain this ID.\n vendor (str): Appstore Vendor ID. See the docs for more information on how to obtain this ID.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.key_id = check.str_param(key_id, "key_id")\n self.private_key = check.str_param(private_key, "private_key")\n self.issuer_id = check.str_param(issuer_id, "issuer_id")\n self.vendor = check.str_param(vendor, "vendor")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Appstore Singer", name)
\n\n\n
[docs]class GreenhouseSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Greenhouse.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/greenhouse\n\n Args:\n name (str): The name of the destination.\n api_key (str): Greenhouse API Key. See the docs for more information on how to generate this key.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Greenhouse", name)
\n\n\n
[docs]class ZoomSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, jwt: str):\n """Airbyte Source for Zoom Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zoom\n\n Args:\n name (str): The name of the destination.\n jwt (str): Zoom JWT Token. See the docs for more information on how to obtain this key.\n """\n self.jwt = check.str_param(jwt, "jwt")\n super().__init__("Zoom Singer", name)
\n\n\n
[docs]class TiktokMarketingSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self, app_id: str, secret: str, access_token: str, auth_type: Optional[str] = None\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.app_id = check.str_param(app_id, "app_id")\n self.secret = check.str_param(secret, "secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class SandboxAccessToken:\n
[docs] @public\n def __init__(self, advertiser_id: str, access_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.advertiser_id = check.str_param(advertiser_id, "advertiser_id")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "TiktokMarketingSource.OAuth20", "TiktokMarketingSource.SandboxAccessToken"\n ],\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n report_granularity: Optional[str] = None,\n ):\n """Airbyte Source for Tiktok Marketing.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/tiktok-marketing\n\n Args:\n name (str): The name of the destination.\n credentials (Union[TiktokMarketingSource.OAuth20, TiktokMarketingSource.SandboxAccessToken]): Authentication method\n start_date (Optional[str]): The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. If this parameter is not set, all data will be replicated.\n end_date (Optional[str]): The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DD. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the data till the current date.\n report_granularity (Optional[str]): The granularity used for aggregating performance data in reports. See the docs.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (TiktokMarketingSource.OAuth20, TiktokMarketingSource.SandboxAccessToken),\n )\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.report_granularity = check.opt_str_param(report_granularity, "report_granularity")\n super().__init__("Tiktok Marketing", name)
\n\n\n
[docs]class ZendeskChatSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n access_token: Optional[str] = None,\n refresh_token: Optional[str] = None,\n ):\n self.credentials = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str):\n self.credentials = "access_token"\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union["ZendeskChatSource.OAuth20", "ZendeskChatSource.AccessToken"],\n subdomain: Optional[str] = None,\n ):\n """Airbyte Source for Zendesk Chat.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-chat\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z.\n subdomain (Optional[str]): Required if you access Zendesk Chat from a Zendesk Support subdomain.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.subdomain = check.opt_str_param(subdomain, "subdomain")\n self.credentials = check.inst_param(\n credentials, "credentials", (ZendeskChatSource.OAuth20, ZendeskChatSource.AccessToken)\n )\n super().__init__("Zendesk Chat", name)
\n\n\n
[docs]class AwsCloudtrailSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, aws_key_id: str, aws_secret_key: str, aws_region_name: str, start_date: str\n ):\n """Airbyte Source for Aws Cloudtrail.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/aws-cloudtrail\n\n Args:\n name (str): The name of the destination.\n aws_key_id (str): AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.\n aws_secret_key (str): AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.\n aws_region_name (str): The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name.\n start_date (str): The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD.\n """\n self.aws_key_id = check.str_param(aws_key_id, "aws_key_id")\n self.aws_secret_key = check.str_param(aws_secret_key, "aws_secret_key")\n self.aws_region_name = check.str_param(aws_region_name, "aws_region_name")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Aws Cloudtrail", name)
\n\n\n
[docs]class OktaSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "oauth2.0"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, api_token: str):\n self.auth_type = "api_token"\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["OktaSource.OAuth20", "OktaSource.APIToken"],\n domain: Optional[str] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Okta.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/okta\n\n Args:\n name (str): The name of the destination.\n domain (Optional[str]): The Okta domain. See the docs for instructions on how to find it.\n start_date (Optional[str]): UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated.\n """\n self.domain = check.opt_str_param(domain, "domain")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials, "credentials", (OktaSource.OAuth20, OktaSource.APIToken)\n )\n super().__init__("Okta", name)
\n\n\n
[docs]class InsightlySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, token: Optional[str] = None, start_date: Optional[str] = None):\n """Airbyte Source for Insightly.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/insightly\n\n Args:\n name (str): The name of the destination.\n token (Optional[str]): Your Insightly API token.\n start_date (Optional[str]): The date from which you'd like to replicate data for Insightly in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only for incremental streams.\n """\n self.token = check.opt_str_param(token, "token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Insightly", name)
\n\n\n
[docs]class LinkedinPagesSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_method: Optional[str] = None,\n ):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str, auth_method: Optional[str] = None):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n org_id: int,\n credentials: Union["LinkedinPagesSource.OAuth20", "LinkedinPagesSource.AccessToken"],\n ):\n """Airbyte Source for Linkedin Pages.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/linkedin-pages/\n\n Args:\n name (str): The name of the destination.\n org_id (int): Specify the Organization ID\n """\n self.org_id = check.int_param(org_id, "org_id")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (LinkedinPagesSource.OAuth20, LinkedinPagesSource.AccessToken),\n )\n super().__init__("Linkedin Pages", name)
\n\n\n
[docs]class PersistiqSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Persistiq.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/persistiq\n\n Args:\n name (str): The name of the destination.\n api_key (str): PersistIq API Key. See the docs for more information on where to find that key.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Persistiq", name)
\n\n\n
[docs]class FreshcallerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n api_key: str,\n start_date: str,\n requests_per_minute: Optional[int] = None,\n sync_lag_minutes: Optional[int] = None,\n ):\n """Airbyte Source for Freshcaller.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshcaller\n\n Args:\n name (str): The name of the destination.\n domain (str): Used to construct Base URL for the Freshcaller APIs\n api_key (str): Freshcaller API Key. See the docs for more information on how to obtain this key.\n requests_per_minute (Optional[int]): The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account.\n start_date (str): UTC date and time. Any data created after this date will be replicated.\n sync_lag_minutes (Optional[int]): Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched\n """\n self.domain = check.str_param(domain, "domain")\n self.api_key = check.str_param(api_key, "api_key")\n self.requests_per_minute = check.opt_int_param(requests_per_minute, "requests_per_minute")\n self.start_date = check.str_param(start_date, "start_date")\n self.sync_lag_minutes = check.opt_int_param(sync_lag_minutes, "sync_lag_minutes")\n super().__init__("Freshcaller", name)
\n\n\n
[docs]class AppfollowSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, ext_id: str, cid: str, api_secret: str, country: str):\n """Airbyte Source for Appfollow.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/appfollow\n\n Args:\n name (str): The name of the destination.\n ext_id (str): for App Store \u2014 this is 9-10 digits identification number; for Google Play \u2014 this is bundle name;\n cid (str): client id provided by Appfollow\n api_secret (str): api secret provided by Appfollow\n country (str): getting data by Country\n """\n self.ext_id = check.str_param(ext_id, "ext_id")\n self.cid = check.str_param(cid, "cid")\n self.api_secret = check.str_param(api_secret, "api_secret")\n self.country = check.str_param(country, "country")\n super().__init__("Appfollow", name)
\n\n\n
[docs]class FacebookPagesSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, access_token: str, page_id: str):\n """Airbyte Source for Facebook Pages.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/facebook-pages\n\n Args:\n name (str): The name of the destination.\n access_token (str): Facebook Page Access Token\n page_id (str): Page ID\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.page_id = check.str_param(page_id, "page_id")\n super().__init__("Facebook Pages", name)
\n\n\n
[docs]class JiraSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_token: str,\n domain: str,\n email: str,\n projects: Optional[List[str]] = None,\n start_date: Optional[str] = None,\n additional_fields: Optional[List[str]] = None,\n expand_issue_changelog: Optional[bool] = None,\n render_fields: Optional[bool] = None,\n enable_experimental_streams: Optional[bool] = None,\n ):\n """Airbyte Source for Jira.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/jira\n\n Args:\n name (str): The name of the destination.\n api_token (str): Jira API Token. See the docs for more information on how to generate this key.\n domain (str): The Domain for your Jira account, e.g. airbyteio.atlassian.net\n email (str): The user email for your Jira account.\n projects (Optional[List[str]]): List of Jira project keys to replicate data for.\n start_date (Optional[str]): The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues.\n additional_fields (Optional[List[str]]): List of additional fields to include in replicating issues.\n expand_issue_changelog (Optional[bool]): Expand the changelog when replicating issues.\n render_fields (Optional[bool]): Render issue fields in HTML format in addition to Jira JSON-like format.\n enable_experimental_streams (Optional[bool]): Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.com/integrations/sources/jira#experimental-tables for more info.\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.domain = check.str_param(domain, "domain")\n self.email = check.str_param(email, "email")\n self.projects = check.opt_nullable_list_param(projects, "projects", str)\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.additional_fields = check.opt_nullable_list_param(\n additional_fields, "additional_fields", str\n )\n self.expand_issue_changelog = check.opt_bool_param(\n expand_issue_changelog, "expand_issue_changelog"\n )\n self.render_fields = check.opt_bool_param(render_fields, "render_fields")\n self.enable_experimental_streams = check.opt_bool_param(\n enable_experimental_streams, "enable_experimental_streams"\n )\n super().__init__("Jira", name)
\n\n\n
[docs]class GoogleSheetsSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaGoogleOAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "Client"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, service_account_info: str):\n self.auth_type = "Service"\n self.service_account_info = check.str_param(\n service_account_info, "service_account_info"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n spreadsheet_id: str,\n credentials: Union[\n "GoogleSheetsSource.AuthenticateViaGoogleOAuth",\n "GoogleSheetsSource.ServiceAccountKeyAuthentication",\n ],\n row_batch_size: Optional[int] = None,\n ):\n """Airbyte Source for Google Sheets.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-sheets\n\n Args:\n name (str): The name of the destination.\n spreadsheet_id (str): Enter the link to the Google spreadsheet you want to sync\n row_batch_size (Optional[int]): Number of rows fetched when making a Google Sheet API call. Defaults to 200.\n credentials (Union[GoogleSheetsSource.AuthenticateViaGoogleOAuth, GoogleSheetsSource.ServiceAccountKeyAuthentication]): Credentials for connecting to the Google Sheets API\n """\n self.spreadsheet_id = check.str_param(spreadsheet_id, "spreadsheet_id")\n self.row_batch_size = check.opt_int_param(row_batch_size, "row_batch_size")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n GoogleSheetsSource.AuthenticateViaGoogleOAuth,\n GoogleSheetsSource.ServiceAccountKeyAuthentication,\n ),\n )\n super().__init__("Google Sheets", name)
\n\n\n
[docs]class DockerhubSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, docker_username: str):\n """Airbyte Source for Dockerhub.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/dockerhub\n\n Args:\n name (str): The name of the destination.\n docker_username (str): Username of DockerHub person or organization (for https://hub.docker.com/v2/repositories/USERNAME/ API call)\n """\n self.docker_username = check.str_param(docker_username, "docker_username")\n super().__init__("Dockerhub", name)
\n\n\n
[docs]class UsCensusSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, query_path: str, api_key: str, query_params: Optional[str] = None\n ):\n """Airbyte Source for Us Census.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/us-census\n\n Args:\n name (str): The name of the destination.\n query_params (Optional[str]): The query parameters portion of the GET request, without the api key\n query_path (str): The path portion of the GET request\n api_key (str): Your API Key. Get your key here.\n """\n self.query_params = check.opt_str_param(query_params, "query_params")\n self.query_path = check.str_param(query_path, "query_path")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Us Census", name)
\n\n\n
[docs]class KustomerSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str, start_date: str):\n """Airbyte Source for Kustomer Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/kustomer\n\n Args:\n name (str): The name of the destination.\n api_token (str): Kustomer API Token. See the docs on how to obtain this\n start_date (str): The date from which you'd like to replicate the data\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Kustomer Singer", name)
\n\n\n
[docs]class AzureTableSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n storage_account_name: str,\n storage_access_key: str,\n storage_endpoint_suffix: Optional[str] = None,\n ):\n """Airbyte Source for Azure Table.\n\n Args:\n name (str): The name of the destination.\n storage_account_name (str): The name of your storage account.\n storage_access_key (str): Azure Table Storage Access Key. See the docs for more information on how to obtain this key.\n storage_endpoint_suffix (Optional[str]): Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix\n """\n self.storage_account_name = check.str_param(storage_account_name, "storage_account_name")\n self.storage_access_key = check.str_param(storage_access_key, "storage_access_key")\n self.storage_endpoint_suffix = check.opt_str_param(\n storage_endpoint_suffix, "storage_endpoint_suffix"\n )\n super().__init__("Azure Table", name)
\n\n\n
[docs]class ScaffoldJavaJdbcSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n replication_method: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Scaffold Java Jdbc.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/scaffold_java_jdbc\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3)\n replication_method (str): Replication method to use for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.replication_method = check.str_param(replication_method, "replication_method")\n super().__init__("Scaffold Java Jdbc", name)
\n\n\n
[docs]class TidbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Tidb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/tidb\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3)\n ssl (Optional[bool]): Encrypt data using SSL.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Tidb", name)
\n\n\n
[docs]class QualarooSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n token: str,\n key: str,\n start_date: str,\n survey_ids: Optional[List[str]] = None,\n ):\n """Airbyte Source for Qualaroo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/qualaroo\n\n Args:\n name (str): The name of the destination.\n token (str): A Qualaroo token. See the docs for instructions on how to generate it.\n key (str): A Qualaroo token. See the docs for instructions on how to generate it.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n survey_ids (Optional[List[str]]): IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated.\n """\n self.token = check.str_param(token, "token")\n self.key = check.str_param(key, "key")\n self.start_date = check.str_param(start_date, "start_date")\n self.survey_ids = check.opt_nullable_list_param(survey_ids, "survey_ids", str)\n super().__init__("Qualaroo", name)
\n\n\n
[docs]class YahooFinancePriceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, tickers: str, interval: Optional[str] = None, range: Optional[str] = None\n ):\n """Airbyte Source for Yahoo Finance Price.\n\n Args:\n name (str): The name of the destination.\n tickers (str): Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed.\n interval (Optional[str]): The interval of between prices queried.\n range (Optional[str]): The range of prices to be queried.\n """\n self.tickers = check.str_param(tickers, "tickers")\n self.interval = check.opt_str_param(interval, "interval")\n self.range = check.opt_str_param(range, "range")\n super().__init__("Yahoo Finance Price", name)
\n\n\n
[docs]class GoogleAnalyticsV4Source(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaGoogleOauth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n access_token: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, credentials_json: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth",\n "GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication",\n ],\n start_date: str,\n view_id: str,\n custom_reports: Optional[str] = None,\n window_in_days: Optional[int] = None,\n ):\n """Airbyte Source for Google Analytics V4.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-analytics-universal-analytics\n\n Args:\n name (str): The name of the destination.\n credentials (Union[GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth, GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication]): Credentials for the service\n start_date (str): The date in the format YYYY-MM-DD. Any data before this date will not be replicated.\n view_id (str): The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer.\n custom_reports (Optional[str]): A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field.\n window_in_days (Optional[int]): The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth,\n GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication,\n ),\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.view_id = check.str_param(view_id, "view_id")\n self.custom_reports = check.opt_str_param(custom_reports, "custom_reports")\n self.window_in_days = check.opt_int_param(window_in_days, "window_in_days")\n super().__init__("Google Analytics V4", name)
\n\n\n
[docs]class JdbcSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n jdbc_url: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Jdbc.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/postgres\n\n Args:\n name (str): The name of the destination.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url (str): JDBC formatted URL. See the standard here.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url = check.str_param(jdbc_url, "jdbc_url")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Jdbc", name)
\n\n\n
[docs]class FakerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n count: int,\n seed: Optional[int] = None,\n records_per_sync: Optional[int] = None,\n records_per_slice: Optional[int] = None,\n ):\n """Airbyte Source for Faker.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/faker\n\n Args:\n name (str): The name of the destination.\n count (int): How many users should be generated in total. This setting does not apply to the purchases or products stream.\n seed (Optional[int]): Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random)\n records_per_sync (Optional[int]): How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records.\n records_per_slice (Optional[int]): How many fake records will be in each page (stream slice), before a state message is emitted?\n """\n self.count = check.int_param(count, "count")\n self.seed = check.opt_int_param(seed, "seed")\n self.records_per_sync = check.opt_int_param(records_per_sync, "records_per_sync")\n self.records_per_slice = check.opt_int_param(records_per_slice, "records_per_slice")\n super().__init__("Faker", name)
\n\n\n
[docs]class TplcentralSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n url_base: str,\n client_id: str,\n client_secret: str,\n user_login_id: Optional[int] = None,\n user_login: Optional[str] = None,\n tpl_key: Optional[str] = None,\n customer_id: Optional[int] = None,\n facility_id: Optional[int] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Tplcentral.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/tplcentral\n\n Args:\n name (str): The name of the destination.\n user_login_id (Optional[int]): User login ID and/or name is required\n user_login (Optional[str]): User login ID and/or name is required\n start_date (Optional[str]): Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00.\n """\n self.url_base = check.str_param(url_base, "url_base")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.user_login_id = check.opt_int_param(user_login_id, "user_login_id")\n self.user_login = check.opt_str_param(user_login, "user_login")\n self.tpl_key = check.opt_str_param(tpl_key, "tpl_key")\n self.customer_id = check.opt_int_param(customer_id, "customer_id")\n self.facility_id = check.opt_int_param(facility_id, "facility_id")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Tplcentral", name)
\n\n\n
[docs]class ClickhouseSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Clickhouse.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/clickhouse\n\n Args:\n name (str): The name of the destination.\n host (str): The host endpoint of the Clickhouse cluster.\n port (int): The port of the database.\n database (str): The name of the database.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt data using SSL.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Clickhouse", name)
\n\n\n
[docs]class FreshserviceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, domain_name: str, api_key: str, start_date: str):\n """Airbyte Source for Freshservice.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshservice\n\n Args:\n name (str): The name of the destination.\n domain_name (str): The name of your Freshservice domain\n api_key (str): Freshservice API Key. See here. The key is case sensitive.\n start_date (str): UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated.\n """\n self.domain_name = check.str_param(domain_name, "domain_name")\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Freshservice", name)
\n\n\n
[docs]class ZenloopSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_token: str,\n date_from: Optional[str] = None,\n survey_id: Optional[str] = None,\n survey_group_id: Optional[str] = None,\n ):\n """Airbyte Source for Zenloop.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zenloop\n\n Args:\n name (str): The name of the destination.\n api_token (str): Zenloop API Token. You can get the API token in settings page here\n date_from (Optional[str]): Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced\n survey_id (Optional[str]): Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys\n survey_group_id (Optional[str]): Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.date_from = check.opt_str_param(date_from, "date_from")\n self.survey_id = check.opt_str_param(survey_id, "survey_id")\n self.survey_group_id = check.opt_str_param(survey_group_id, "survey_group_id")\n super().__init__("Zenloop", name)
\n\n\n
[docs]class OracleSource(GeneratedAirbyteSource):\n
[docs] class ServiceName:\n
[docs] @public\n def __init__(self, service_name: str, connection_type: Optional[str] = None):\n self.connection_type = check.opt_str_param(connection_type, "connection_type")\n self.service_name = check.str_param(service_name, "service_name")
\n\n
[docs] class SystemIDSID:\n
[docs] @public\n def __init__(self, sid: str, connection_type: Optional[str] = None):\n self.connection_type = check.opt_str_param(connection_type, "connection_type")\n self.sid = check.str_param(sid, "sid")
\n\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_method = "unencrypted"
\n\n
[docs] class NativeNetworkEncryptionNNE:\n
[docs] @public\n def __init__(self, encryption_algorithm: Optional[str] = None):\n self.encryption_method = "client_nne"\n self.encryption_algorithm = check.opt_str_param(\n encryption_algorithm, "encryption_algorithm"\n )
\n\n
[docs] class TLSEncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, ssl_certificate: str):\n self.encryption_method = "encrypted_verify_certificate"\n self.ssl_certificate = check.str_param(ssl_certificate, "ssl_certificate")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n connection_data: Union["OracleSource.ServiceName", "OracleSource.SystemIDSID"],\n username: str,\n encryption: Union[\n "OracleSource.Unencrypted",\n "OracleSource.NativeNetworkEncryptionNNE",\n "OracleSource.TLSEncryptedVerifyCertificate",\n ],\n password: Optional[str] = None,\n schemas: Optional[List[str]] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Oracle.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/oracle\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database. Oracle Corporations recommends the following port numbers: 1521 - Default listening port for client connections to the listener. 2484 - Recommended and officially registered listening port for client connections to the listener using TCP/IP with SSL\n connection_data (Union[OracleSource.ServiceName, OracleSource.SystemIDSID]): Connect data that will be used for DB connection\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with the username.\n schemas (Optional[List[str]]): The list of schemas to sync from. Defaults to user. Case sensitive.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n encryption (Union[OracleSource.Unencrypted, OracleSource.NativeNetworkEncryptionNNE, OracleSource.TLSEncryptedVerifyCertificate]): The encryption method with is used when communicating with the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.connection_data = check.inst_param(\n connection_data, "connection_data", (OracleSource.ServiceName, OracleSource.SystemIDSID)\n )\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (\n OracleSource.Unencrypted,\n OracleSource.NativeNetworkEncryptionNNE,\n OracleSource.TLSEncryptedVerifyCertificate,\n ),\n )\n super().__init__("Oracle", name)
\n\n\n
[docs]class KlaviyoSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str):\n """Airbyte Source for Klaviyo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/klaviyo\n\n Args:\n name (str): The name of the destination.\n api_key (str): Klaviyo API Key. See our docs if you need help finding this key.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Klaviyo", name)
\n\n\n
[docs]class GoogleDirectorySource(GeneratedAirbyteSource):\n
[docs] class SignInViaGoogleOAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n credentials_title: Optional[str] = None,\n ):\n self.credentials_title = check.opt_str_param(credentials_title, "credentials_title")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class ServiceAccountKey:\n
[docs] @public\n def __init__(\n self, credentials_json: str, email: str, credentials_title: Optional[str] = None\n ):\n self.credentials_title = check.opt_str_param(credentials_title, "credentials_title")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n self.email = check.str_param(email, "email")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "GoogleDirectorySource.SignInViaGoogleOAuth", "GoogleDirectorySource.ServiceAccountKey"\n ],\n ):\n """Airbyte Source for Google Directory.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-directory\n\n Args:\n name (str): The name of the destination.\n credentials (Union[GoogleDirectorySource.SignInViaGoogleOAuth, GoogleDirectorySource.ServiceAccountKey]): Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (GoogleDirectorySource.SignInViaGoogleOAuth, GoogleDirectorySource.ServiceAccountKey),\n )\n super().__init__("Google Directory", name)
\n\n\n
[docs]class InstagramSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, access_token: str):\n """Airbyte Source for Instagram.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/instagram\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n access_token (str): The value of the access token generated. See the docs for more information\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Instagram", name)
\n\n\n
[docs]class ShortioSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, domain_id: str, secret_key: str, start_date: str):\n """Airbyte Source for Shortio.\n\n Documentation can be found at https://developers.short.io/reference\n\n Args:\n name (str): The name of the destination.\n secret_key (str): Short.io Secret Key\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.domain_id = check.str_param(domain_id, "domain_id")\n self.secret_key = check.str_param(secret_key, "secret_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Shortio", name)
\n\n\n
[docs]class SquareSource(GeneratedAirbyteSource):\n
[docs] class OauthAuthentication:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "Oauth"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIKey:\n
[docs] @public\n def __init__(self, api_key: str):\n self.auth_type = "Apikey"\n self.api_key = check.str_param(api_key, "api_key")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n is_sandbox: bool,\n credentials: Union["SquareSource.OauthAuthentication", "SquareSource.APIKey"],\n start_date: Optional[str] = None,\n include_deleted_objects: Optional[bool] = None,\n ):\n """Airbyte Source for Square.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/square\n\n Args:\n name (str): The name of the destination.\n is_sandbox (bool): Determines whether to use the sandbox or production environment.\n start_date (Optional[str]): UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated.\n include_deleted_objects (Optional[bool]): In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes)\n """\n self.is_sandbox = check.bool_param(is_sandbox, "is_sandbox")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.include_deleted_objects = check.opt_bool_param(\n include_deleted_objects, "include_deleted_objects"\n )\n self.credentials = check.inst_param(\n credentials, "credentials", (SquareSource.OauthAuthentication, SquareSource.APIKey)\n )\n super().__init__("Square", name)
\n\n\n
[docs]class DelightedSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, since: str, api_key: str):\n """Airbyte Source for Delighted.\n\n Args:\n name (str): The name of the destination.\n since (str): The date from which you'd like to replicate the data\n api_key (str): A Delighted API key.\n """\n self.since = check.str_param(since, "since")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Delighted", name)
\n\n\n
[docs]class AmazonSqsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n queue_url: str,\n region: str,\n delete_messages: bool,\n max_batch_size: Optional[int] = None,\n max_wait_time: Optional[int] = None,\n attributes_to_return: Optional[str] = None,\n visibility_timeout: Optional[int] = None,\n access_key: Optional[str] = None,\n secret_key: Optional[str] = None,\n ):\n """Airbyte Source for Amazon Sqs.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-sqs\n\n Args:\n name (str): The name of the destination.\n queue_url (str): URL of the SQS Queue\n region (str): AWS Region of the SQS Queue\n delete_messages (bool): If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail.\n max_batch_size (Optional[int]): Max amount of messages to get in one batch (10 max)\n max_wait_time (Optional[int]): Max amount of time in seconds to wait for messages in a single poll (20 max)\n attributes_to_return (Optional[str]): Comma separated list of Mesage Attribute names to return\n visibility_timeout (Optional[int]): Modify the Visibility Timeout of the individual message from the Queue's default (seconds).\n access_key (Optional[str]): The Access Key ID of the AWS IAM Role to use for pulling messages\n secret_key (Optional[str]): The Secret Key of the AWS IAM Role to use for pulling messages\n """\n self.queue_url = check.str_param(queue_url, "queue_url")\n self.region = check.str_param(region, "region")\n self.delete_messages = check.bool_param(delete_messages, "delete_messages")\n self.max_batch_size = check.opt_int_param(max_batch_size, "max_batch_size")\n self.max_wait_time = check.opt_int_param(max_wait_time, "max_wait_time")\n self.attributes_to_return = check.opt_str_param(\n attributes_to_return, "attributes_to_return"\n )\n self.visibility_timeout = check.opt_int_param(visibility_timeout, "visibility_timeout")\n self.access_key = check.opt_str_param(access_key, "access_key")\n self.secret_key = check.opt_str_param(secret_key, "secret_key")\n super().__init__("Amazon Sqs", name)
\n\n\n
[docs]class YoutubeAnalyticsSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaOAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(self, name: str, credentials: "YoutubeAnalyticsSource.AuthenticateViaOAuth20"):\n """Airbyte Source for Youtube Analytics.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/youtube-analytics\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", YoutubeAnalyticsSource.AuthenticateViaOAuth20\n )\n super().__init__("Youtube Analytics", name)
\n\n\n
[docs]class ScaffoldSourcePythonSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, fix_me: Optional[str] = None):\n """Airbyte Source for Scaffold Source Python.\n\n Args:\n name (str): The name of the destination.\n fix_me (Optional[str]): describe me\n """\n self.fix_me = check.opt_str_param(fix_me, "fix_me")\n super().__init__("Scaffold Source Python", name)
\n\n\n
[docs]class LookerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n client_id: str,\n client_secret: str,\n run_look_ids: Optional[List[str]] = None,\n ):\n """Airbyte Source for Looker.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/looker\n\n Args:\n name (str): The name of the destination.\n domain (str): Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address\n client_id (str): The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.\n client_secret (str): The Client Secret is second part of an API3 key.\n run_look_ids (Optional[List[str]]): The IDs of any Looks to run\n """\n self.domain = check.str_param(domain, "domain")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.run_look_ids = check.opt_nullable_list_param(run_look_ids, "run_look_ids", str)\n super().__init__("Looker", name)
\n\n\n
[docs]class GitlabSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_url: str,\n private_token: str,\n start_date: str,\n groups: Optional[str] = None,\n projects: Optional[str] = None,\n ):\n """Airbyte Source for Gitlab.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/gitlab\n\n Args:\n name (str): The name of the destination.\n api_url (str): Please enter your basic URL from GitLab instance.\n private_token (str): Log into your GitLab account and then generate a personal Access Token.\n groups (Optional[str]): Space-delimited list of groups. e.g. airbyte.io.\n projects (Optional[str]): Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab.\n start_date (str): The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.api_url = check.str_param(api_url, "api_url")\n self.private_token = check.str_param(private_token, "private_token")\n self.groups = check.opt_str_param(groups, "groups")\n self.projects = check.opt_str_param(projects, "projects")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Gitlab", name)
\n\n\n
[docs]class ExchangeRatesSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n access_key: str,\n base: Optional[str] = None,\n ignore_weekends: Optional[bool] = None,\n ):\n """Airbyte Source for Exchange Rates.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/exchangeratesapi\n\n Args:\n name (str): The name of the destination.\n start_date (str): Start getting data from that date.\n access_key (str): Your API Key. See here. The key is case sensitive.\n base (Optional[str]): ISO reference currency. See here. Free plan doesn't support Source Currency Switching, default base currency is EUR\n ignore_weekends (Optional[bool]): Ignore weekends? (Exchanges don't run on weekends)\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_key = check.str_param(access_key, "access_key")\n self.base = check.opt_str_param(base, "base")\n self.ignore_weekends = check.opt_bool_param(ignore_weekends, "ignore_weekends")\n super().__init__("Exchange Rates", name)
\n\n\n
[docs]class AmazonAdsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n region: Optional[str] = None,\n report_wait_timeout: Optional[int] = None,\n report_generation_max_retries: Optional[int] = None,\n start_date: Optional[str] = None,\n profiles: Optional[List[int]] = None,\n state_filter: Optional[List[str]] = None,\n ):\n """Airbyte Source for Amazon Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-ads\n\n Args:\n name (str): The name of the destination.\n client_id (str): The client ID of your Amazon Ads developer application. See the docs for more information.\n client_secret (str): The client secret of your Amazon Ads developer application. See the docs for more information.\n refresh_token (str): Amazon Ads refresh token. See the docs for more information on how to obtain this token.\n region (Optional[str]): Region to pull data from (EU/NA/FE). See docs for more details.\n report_wait_timeout (Optional[int]): Timeout duration in minutes for Reports. Default is 60 minutes.\n report_generation_max_retries (Optional[int]): Maximum retries Airbyte will attempt for fetching report data. Default is 5.\n start_date (Optional[str]): The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format\n profiles (Optional[List[int]]): Profile IDs you want to fetch data for. See docs for more details.\n state_filter (Optional[List[str]]): Reflects the state of the Display, Product, and Brand Campaign streams as enabled, paused, or archived. If you do not populate this field, it will be ignored completely.\n """\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.region = check.opt_str_param(region, "region")\n self.report_wait_timeout = check.opt_int_param(report_wait_timeout, "report_wait_timeout")\n self.report_generation_max_retries = check.opt_int_param(\n report_generation_max_retries, "report_generation_max_retries"\n )\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.profiles = check.opt_nullable_list_param(profiles, "profiles", int)\n self.state_filter = check.opt_nullable_list_param(state_filter, "state_filter", str)\n super().__init__("Amazon Ads", name)
\n\n\n
[docs]class MixpanelSource(GeneratedAirbyteSource):\n
[docs] class ServiceAccount:\n
[docs] @public\n def __init__(self, username: str, secret: str):\n self.username = check.str_param(username, "username")\n self.secret = check.str_param(secret, "secret")
\n\n
[docs] class ProjectSecret:\n
[docs] @public\n def __init__(self, api_secret: str):\n self.api_secret = check.str_param(api_secret, "api_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["MixpanelSource.ServiceAccount", "MixpanelSource.ProjectSecret"],\n project_id: Optional[int] = None,\n attribution_window: Optional[int] = None,\n project_timezone: Optional[str] = None,\n select_properties_by_default: Optional[bool] = None,\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n region: Optional[str] = None,\n date_window_size: Optional[int] = None,\n ):\n """Airbyte Source for Mixpanel.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mixpanel\n\n Args:\n name (str): The name of the destination.\n credentials (Union[MixpanelSource.ServiceAccount, MixpanelSource.ProjectSecret]): Choose how to authenticate to Mixpanel\n project_id (Optional[int]): Your project ID number. See the docs for more information on how to obtain this.\n attribution_window (Optional[int]): A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.\n project_timezone (Optional[str]): Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.\n select_properties_by_default (Optional[bool]): Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.\n start_date (Optional[str]): The date in the format YYYY-MM-DD. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.\n end_date (Optional[str]): The date in the format YYYY-MM-DD. Any data after this date will not be replicated. Left empty to always sync to most recent date\n region (Optional[str]): The region of mixpanel domain instance either US or EU.\n date_window_size (Optional[int]): Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (MixpanelSource.ServiceAccount, MixpanelSource.ProjectSecret),\n )\n self.project_id = check.opt_int_param(project_id, "project_id")\n self.attribution_window = check.opt_int_param(attribution_window, "attribution_window")\n self.project_timezone = check.opt_str_param(project_timezone, "project_timezone")\n self.select_properties_by_default = check.opt_bool_param(\n select_properties_by_default, "select_properties_by_default"\n )\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.region = check.opt_str_param(region, "region")\n self.date_window_size = check.opt_int_param(date_window_size, "date_window_size")\n super().__init__("Mixpanel", name)
\n\n\n
[docs]class OrbitSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str, workspace: str, start_date: Optional[str] = None):\n """Airbyte Source for Orbit.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/orbit\n\n Args:\n name (str): The name of the destination.\n api_token (str): Authorizes you to work with Orbit workspaces associated with the token.\n workspace (str): The unique name of the workspace that your API token is associated with.\n start_date (Optional[str]): Date in the format 2022-06-26. Only load members whose last activities are after this date.\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.workspace = check.str_param(workspace, "workspace")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Orbit", name)
\n\n\n
[docs]class AmazonSellerPartnerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n lwa_app_id: str,\n lwa_client_secret: str,\n refresh_token: str,\n aws_access_key: str,\n aws_secret_key: str,\n role_arn: str,\n replication_start_date: str,\n aws_environment: str,\n region: str,\n app_id: Optional[str] = None,\n auth_type: Optional[str] = None,\n replication_end_date: Optional[str] = None,\n period_in_days: Optional[int] = None,\n report_options: Optional[str] = None,\n max_wait_seconds: Optional[int] = None,\n ):\n """Airbyte Source for Amazon Seller Partner.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-seller-partner\n\n Args:\n name (str): The name of the destination.\n app_id (Optional[str]): Your Amazon App ID\n lwa_app_id (str): Your Login with Amazon Client ID.\n lwa_client_secret (str): Your Login with Amazon Client Secret.\n refresh_token (str): The Refresh Token obtained via OAuth flow authorization.\n aws_access_key (str): Specifies the AWS access key used as part of the credentials to authenticate the user.\n aws_secret_key (str): Specifies the AWS secret key used as part of the credentials to authenticate the user.\n role_arn (str): Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS).\n replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n replication_end_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated.\n period_in_days (Optional[int]): Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync.\n report_options (Optional[str]): Additional information passed to reports. This varies by report type. Must be a valid json string.\n max_wait_seconds (Optional[int]): Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report.\n aws_environment (str): An enumeration.\n region (str): An enumeration.\n """\n self.app_id = check.opt_str_param(app_id, "app_id")\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.lwa_app_id = check.str_param(lwa_app_id, "lwa_app_id")\n self.lwa_client_secret = check.str_param(lwa_client_secret, "lwa_client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.aws_access_key = check.str_param(aws_access_key, "aws_access_key")\n self.aws_secret_key = check.str_param(aws_secret_key, "aws_secret_key")\n self.role_arn = check.str_param(role_arn, "role_arn")\n self.replication_start_date = check.str_param(\n replication_start_date, "replication_start_date"\n )\n self.replication_end_date = check.opt_str_param(\n replication_end_date, "replication_end_date"\n )\n self.period_in_days = check.opt_int_param(period_in_days, "period_in_days")\n self.report_options = check.opt_str_param(report_options, "report_options")\n self.max_wait_seconds = check.opt_int_param(max_wait_seconds, "max_wait_seconds")\n self.aws_environment = check.str_param(aws_environment, "aws_environment")\n self.region = check.str_param(region, "region")\n super().__init__("Amazon Seller Partner", name)
\n\n\n
[docs]class CourierSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Courier.\n\n Documentation can be found at https://docs.airbyte.io/integrations/sources/courier\n\n Args:\n name (str): The name of the destination.\n api_key (str): Courier API Key to retrieve your data.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Courier", name)
\n\n\n
[docs]class CloseComSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: Optional[str] = None):\n r"""Airbyte Source for Close Com.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/close-com\n\n Args:\n name (str): The name of the destination.\n api_key (str): Close.com API key (usually starts with 'api\\\\_'; find yours here).\n start_date (Optional[str]): The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Close Com", name)
\n\n\n
[docs]class BingAdsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n refresh_token: str,\n developer_token: str,\n reports_start_date: str,\n auth_method: Optional[str] = None,\n tenant_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n """Airbyte Source for Bing Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bing-ads\n\n Args:\n name (str): The name of the destination.\n tenant_id (Optional[str]): The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value.\n client_id (str): The Client ID of your Microsoft Advertising developer application.\n client_secret (Optional[str]): The Client Secret of your Microsoft Advertising developer application.\n refresh_token (str): Refresh Token to renew the expired Access Token.\n developer_token (str): Developer token associated with user. See more info in the docs.\n reports_start_date (str): The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format.\n """\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.tenant_id = check.opt_str_param(tenant_id, "tenant_id")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.developer_token = check.str_param(developer_token, "developer_token")\n self.reports_start_date = check.str_param(reports_start_date, "reports_start_date")\n super().__init__("Bing Ads", name)
\n\n\n
[docs]class PrimetricSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, client_id: str, client_secret: str):\n """Airbyte Source for Primetric.\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Primetric developer application. The Client ID is visible here.\n client_secret (str): The Client Secret of your Primetric developer application. You can manage your client's credentials here.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n super().__init__("Primetric", name)
\n\n\n
[docs]class PivotalTrackerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str):\n """Airbyte Source for Pivotal Tracker.\n\n Args:\n name (str): The name of the destination.\n api_token (str): Pivotal Tracker API token\n """\n self.api_token = check.str_param(api_token, "api_token")\n super().__init__("Pivotal Tracker", name)
\n\n\n
[docs]class ElasticsearchSource(GeneratedAirbyteSource):\n
[docs] class None_:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "none"
\n\n
[docs] class ApiKeySecret:\n
[docs] @public\n def __init__(self, apiKeyId: str, apiKeySecret: str):\n self.method = "secret"\n self.apiKeyId = check.str_param(apiKeyId, "apiKeyId")\n self.apiKeySecret = check.str_param(apiKeySecret, "apiKeySecret")
\n\n
[docs] class UsernamePassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.method = "basic"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n endpoint: str,\n authenticationMethod: Union[\n "ElasticsearchSource.None_",\n "ElasticsearchSource.ApiKeySecret",\n "ElasticsearchSource.UsernamePassword",\n ],\n ):\n r"""Airbyte Source for Elasticsearch.\n\n Documentation can be found at https://docs.airbyte.com/integrations/source/elasticsearch\n\n Args:\n name (str): The name of the destination.\n endpoint (str): The full url of the Elasticsearch server\n authenticationMethod (Union[ElasticsearchSource.None\\\\_, ElasticsearchSource.ApiKeySecret, ElasticsearchSource.UsernamePassword]): The type of authentication to be used\n """\n self.endpoint = check.str_param(endpoint, "endpoint")\n self.authenticationMethod = check.inst_param(\n authenticationMethod,\n "authenticationMethod",\n (\n ElasticsearchSource.None_,\n ElasticsearchSource.ApiKeySecret,\n ElasticsearchSource.UsernamePassword,\n ),\n )\n super().__init__("Elasticsearch", name)
\n\n\n
[docs]class BigquerySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, project_id: str, credentials_json: str, dataset_id: Optional[str] = None\n ):\n """Airbyte Source for Bigquery.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bigquery\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset.\n dataset_id (Optional[str]): The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery.\n credentials_json (str): The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.dataset_id = check.opt_str_param(dataset_id, "dataset_id")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n super().__init__("Bigquery", name)
\n\n\n
[docs]class WoocommerceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n shop: str,\n start_date: str,\n api_key: str,\n api_secret: str,\n conversion_window_days: Optional[int] = None,\n ):\n """Airbyte Source for Woocommerce.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/woocommerce\n\n Args:\n name (str): The name of the destination.\n shop (str): The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'.\n start_date (str): The date you would like to replicate data. Format: YYYY-MM-DD.\n api_key (str): The CUSTOMER KEY for API in WooCommerce shop.\n api_secret (str): The CUSTOMER SECRET for API in WooCommerce shop.\n conversion_window_days (Optional[int]): A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads.\n """\n self.shop = check.str_param(shop, "shop")\n self.start_date = check.str_param(start_date, "start_date")\n self.api_key = check.str_param(api_key, "api_key")\n self.api_secret = check.str_param(api_secret, "api_secret")\n self.conversion_window_days = check.opt_int_param(\n conversion_window_days, "conversion_window_days"\n )\n super().__init__("Woocommerce", name)
\n\n\n
[docs]class SearchMetricsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, api_key: str, client_secret: str, country_code: str, start_date: str\n ):\n """Airbyte Source for Search Metrics.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/seacrh-metrics\n\n Args:\n name (str): The name of the destination.\n country_code (str): The region of the S3 staging bucket to use if utilising a copy strategy.\n start_date (str): Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.country_code = check.str_param(country_code, "country_code")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Search Metrics", name)
\n\n\n
[docs]class TypeformSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, start_date: str, token: str, form_ids: Optional[List[str]] = None\n ):\n """Airbyte Source for Typeform.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/typeform\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated.\n token (str): The API Token for a Typeform account.\n form_ids (Optional[List[str]]): When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.token = check.str_param(token, "token")\n self.form_ids = check.opt_nullable_list_param(form_ids, "form_ids", str)\n super().__init__("Typeform", name)
\n\n\n
[docs]class WebflowSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, site_id: str, api_key: str):\n """Airbyte Source for Webflow.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/webflow\n\n Args:\n name (str): The name of the destination.\n site_id (str): The id of the Webflow site you are requesting data from. See https://developers.webflow.com/#sites\n api_key (str): The API token for authenticating to Webflow. See https://university.webflow.com/lesson/intro-to-the-webflow-api\n """\n self.site_id = check.str_param(site_id, "site_id")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Webflow", name)
\n\n\n
[docs]class FireboltSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n password: str,\n database: str,\n account: Optional[str] = None,\n host: Optional[str] = None,\n engine: Optional[str] = None,\n ):\n """Airbyte Source for Firebolt.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/firebolt\n\n Args:\n name (str): The name of the destination.\n username (str): Firebolt email address you use to login.\n password (str): Firebolt password.\n account (Optional[str]): Firebolt account to login.\n host (Optional[str]): The host name of your Firebolt database.\n database (str): The database to connect to.\n engine (Optional[str]): Engine name or url to connect to.\n """\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.account = check.opt_str_param(account, "account")\n self.host = check.opt_str_param(host, "host")\n self.database = check.str_param(database, "database")\n self.engine = check.opt_str_param(engine, "engine")\n super().__init__("Firebolt", name)
\n\n\n
[docs]class FaunaSource(GeneratedAirbyteSource):\n
[docs] class Disabled:\n
[docs] @public\n def __init__(\n self,\n ):\n self.deletion_mode = "ignore"
\n\n
[docs] class Enabled:\n
[docs] @public\n def __init__(self, column: str):\n self.deletion_mode = "deleted_field"\n self.column = check.str_param(column, "column")
\n\n
[docs] class Collection:\n
[docs] @public\n def __init__(\n self, page_size: int, deletions: Union["FaunaSource.Disabled", "FaunaSource.Enabled"]\n ):\n self.page_size = check.int_param(page_size, "page_size")\n self.deletions = check.inst_param(\n deletions, "deletions", (FaunaSource.Disabled, FaunaSource.Enabled)\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n port: int,\n scheme: str,\n secret: str,\n collection: "FaunaSource.Collection",\n ):\n """Airbyte Source for Fauna.\n\n Documentation can be found at https://github.com/fauna/airbyte/blob/source-fauna/docs/integrations/sources/fauna.md\n\n Args:\n name (str): The name of the destination.\n domain (str): Domain of Fauna to query. Defaults db.fauna.com. See the docs.\n port (int): Endpoint port.\n scheme (str): URL scheme.\n secret (str): Fauna secret, used when authenticating with the database.\n collection (FaunaSource.Collection): Settings for the Fauna Collection.\n """\n self.domain = check.str_param(domain, "domain")\n self.port = check.int_param(port, "port")\n self.scheme = check.str_param(scheme, "scheme")\n self.secret = check.str_param(secret, "secret")\n self.collection = check.inst_param(collection, "collection", FaunaSource.Collection)\n super().__init__("Fauna", name)
\n\n\n
[docs]class IntercomSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, access_token: str):\n """Airbyte Source for Intercom.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/intercom\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n access_token (str): Access token for making authenticated requests. See the Intercom docs for more information.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Intercom", name)
\n\n\n
[docs]class FreshsalesSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, domain_name: str, api_key: str):\n """Airbyte Source for Freshsales.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshsales\n\n Args:\n name (str): The name of the destination.\n domain_name (str): The Name of your Freshsales domain\n api_key (str): Freshsales API Key. See here. The key is case sensitive.\n """\n self.domain_name = check.str_param(domain_name, "domain_name")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Freshsales", name)
\n\n\n
[docs]class AdjustSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_token: str,\n dimensions: List[str],\n ingest_start: str,\n metrics: List[str],\n additional_metrics: Optional[List[str]] = None,\n until_today: Optional[bool] = None,\n ):\n """Airbyte Source for Adjust.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/adjust\n\n Args:\n name (str): The name of the destination.\n additional_metrics (Optional[List[str]]): Metrics names that are not pre-defined, such as cohort metrics or app specific metrics.\n api_token (str): Adjust API key, see https://help.adjust.com/en/article/report-service-api-authentication\n dimensions (List[str]): Dimensions allow a user to break down metrics into groups using one or several parameters. For example, the number of installs by date, country and network. See https://help.adjust.com/en/article/reports-endpoint#dimensions for more information about the dimensions.\n ingest_start (str): Data ingest start date.\n metrics (List[str]): Select at least one metric to query.\n until_today (Optional[bool]): Syncs data up until today. Useful when running daily incremental syncs, and duplicates are not desired.\n """\n self.additional_metrics = check.opt_nullable_list_param(\n additional_metrics, "additional_metrics", str\n )\n self.api_token = check.str_param(api_token, "api_token")\n self.dimensions = check.list_param(dimensions, "dimensions", str)\n self.ingest_start = check.str_param(ingest_start, "ingest_start")\n self.metrics = check.list_param(metrics, "metrics", str)\n self.until_today = check.opt_bool_param(until_today, "until_today")\n super().__init__("Adjust", name)
\n\n\n
[docs]class BambooHrSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n subdomain: str,\n api_key: str,\n custom_reports_fields: Optional[str] = None,\n custom_reports_include_default_fields: Optional[bool] = None,\n ):\n """Airbyte Source for Bamboo Hr.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bamboo-hr\n\n Args:\n name (str): The name of the destination.\n subdomain (str): Sub Domain of bamboo hr\n api_key (str): Api key of bamboo hr\n custom_reports_fields (Optional[str]): Comma-separated list of fields to include in custom reports.\n custom_reports_include_default_fields (Optional[bool]): If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names.\n """\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.api_key = check.str_param(api_key, "api_key")\n self.custom_reports_fields = check.opt_str_param(\n custom_reports_fields, "custom_reports_fields"\n )\n self.custom_reports_include_default_fields = check.opt_bool_param(\n custom_reports_include_default_fields, "custom_reports_include_default_fields"\n )\n super().__init__("Bamboo Hr", name)
\n\n\n
[docs]class GoogleAdsSource(GeneratedAirbyteSource):\n
[docs] class GoogleCredentials:\n
[docs] @public\n def __init__(\n self,\n developer_token: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n access_token: Optional[str] = None,\n ):\n self.developer_token = check.str_param(developer_token, "developer_token")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] class CustomGAQLQueriesEntry:\n
[docs] @public\n def __init__(self, query: str, table_name: str):\n self.query = check.str_param(query, "query")\n self.table_name = check.str_param(table_name, "table_name")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: "GoogleAdsSource.GoogleCredentials",\n customer_id: str,\n start_date: str,\n end_date: Optional[str] = None,\n custom_queries: Optional[List[CustomGAQLQueriesEntry]] = None,\n login_customer_id: Optional[str] = None,\n conversion_window_days: Optional[int] = None,\n ):\n """Airbyte Source for Google Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-ads\n\n Args:\n name (str): The name of the destination.\n customer_id (str): Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account.\n start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated.\n end_date (Optional[str]): UTC date and time in the format 2017-01-25. Any data after this date will not be replicated.\n login_customer_id (Optional[str]): If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here\n conversion_window_days (Optional[int]): A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", GoogleAdsSource.GoogleCredentials\n )\n self.customer_id = check.str_param(customer_id, "customer_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.custom_queries = check.opt_nullable_list_param(\n custom_queries, "custom_queries", GoogleAdsSource.CustomGAQLQueriesEntry\n )\n self.login_customer_id = check.opt_str_param(login_customer_id, "login_customer_id")\n self.conversion_window_days = check.opt_int_param(\n conversion_window_days, "conversion_window_days"\n )\n super().__init__("Google Ads", name)
\n\n\n
[docs]class HellobatonSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, company: str):\n """Airbyte Source for Hellobaton.\n\n Args:\n name (str): The name of the destination.\n api_key (str): authentication key required to access the api endpoints\n company (str): Company name that generates your base api url\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.company = check.str_param(company, "company")\n super().__init__("Hellobaton", name)
\n\n\n
[docs]class SendgridSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, apikey: str, start_time: Union[int, str]):\n """Airbyte Source for Sendgrid.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/sendgrid\n\n Args:\n name (str): The name of the destination.\n apikey (str): API Key, use admin to generate this key.\n start_time (Union[int, str]): Start time in ISO8601 format. Any data before this time point will not be replicated.\n """\n self.apikey = check.str_param(apikey, "apikey")\n self.start_time = check.inst_param(start_time, "start_time", (int, str))\n super().__init__("Sendgrid", name)
\n\n\n
[docs]class MondaySource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: str,\n subdomain: Optional[str] = None,\n ):\n self.auth_type = "oauth2.0"\n self.subdomain = check.opt_str_param(subdomain, "subdomain")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, api_token: str):\n self.auth_type = "api_token"\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self, name: str, credentials: Union["MondaySource.OAuth20", "MondaySource.APIToken"]\n ):\n """Airbyte Source for Monday.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/monday\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (MondaySource.OAuth20, MondaySource.APIToken)\n )\n super().__init__("Monday", name)
\n\n\n
[docs]class DixaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, api_token: str, start_date: str, batch_size: Optional[int] = None\n ):\n """Airbyte Source for Dixa.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/dixa\n\n Args:\n name (str): The name of the destination.\n api_token (str): Dixa API token\n start_date (str): The connector pulls records updated from this date onwards.\n batch_size (Optional[int]): Number of days to batch into one request. Max 31.\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.batch_size = check.opt_int_param(batch_size, "batch_size")\n super().__init__("Dixa", name)
\n\n\n
[docs]class SalesforceSource(GeneratedAirbyteSource):\n
[docs] class FilterSalesforceObjectsEntry:\n
[docs] @public\n def __init__(self, criteria: str, value: str):\n self.criteria = check.str_param(criteria, "criteria")\n self.value = check.str_param(value, "value")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n is_sandbox: Optional[bool] = None,\n auth_type: Optional[str] = None,\n start_date: Optional[str] = None,\n streams_criteria: Optional[List[FilterSalesforceObjectsEntry]] = None,\n ):\n """Airbyte Source for Salesforce.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/salesforce\n\n Args:\n name (str): The name of the destination.\n is_sandbox (Optional[bool]): Toggle if you're using a Salesforce Sandbox\n client_id (str): Enter your Salesforce developer application's Client ID\n client_secret (str): Enter your Salesforce developer application's Client secret\n refresh_token (str): Enter your application's Salesforce Refresh Token used for Airbyte to access your Salesforce account.\n start_date (Optional[str]): Enter the date in the YYYY-MM-DD format. Airbyte will replicate the data added on and after this date. If this field is blank, Airbyte will replicate all data.\n streams_criteria (Optional[List[SalesforceSource.FilterSalesforceObjectsEntry]]): Filter streams relevant to you\n """\n self.is_sandbox = check.opt_bool_param(is_sandbox, "is_sandbox")\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.streams_criteria = check.opt_nullable_list_param(\n streams_criteria, "streams_criteria", SalesforceSource.FilterSalesforceObjectsEntry\n )\n super().__init__("Salesforce", name)
\n\n\n
[docs]class PipedriveSource(GeneratedAirbyteSource):\n
[docs] class SignInViaPipedriveOAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "Client"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIKeyAuthentication:\n
[docs] @public\n def __init__(self, api_token: str):\n self.auth_type = "Token"\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n authorization: Union[\n "PipedriveSource.SignInViaPipedriveOAuth", "PipedriveSource.APIKeyAuthentication"\n ],\n replication_start_date: str,\n ):\n """Airbyte Source for Pipedrive.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/pipedrive\n\n Args:\n name (str): The name of the destination.\n authorization (Union[PipedriveSource.SignInViaPipedriveOAuth, PipedriveSource.APIKeyAuthentication]): Choose one of the possible authorization method\n replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental\n """\n self.authorization = check.inst_param(\n authorization,\n "authorization",\n (PipedriveSource.SignInViaPipedriveOAuth, PipedriveSource.APIKeyAuthentication),\n )\n self.replication_start_date = check.str_param(\n replication_start_date, "replication_start_date"\n )\n super().__init__("Pipedrive", name)
\n\n\n
[docs]class FileSource(GeneratedAirbyteSource):\n
[docs] class HTTPSPublicWeb:\n
[docs] @public\n def __init__(self, user_agent: Optional[bool] = None):\n self.storage = "HTTPS"\n self.user_agent = check.opt_bool_param(user_agent, "user_agent")
\n\n
[docs] class GCSGoogleCloudStorage:\n
[docs] @public\n def __init__(self, service_account_json: Optional[str] = None):\n self.storage = "GCS"\n self.service_account_json = check.opt_str_param(\n service_account_json, "service_account_json"\n )
\n\n
[docs] class S3AmazonWebServices:\n
[docs] @public\n def __init__(\n self,\n aws_access_key_id: Optional[str] = None,\n aws_secret_access_key: Optional[str] = None,\n ):\n self.storage = "S3"\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )
\n\n
[docs] class AzBlobAzureBlobStorage:\n
[docs] @public\n def __init__(\n self,\n storage_account: str,\n sas_token: Optional[str] = None,\n shared_key: Optional[str] = None,\n ):\n self.storage = "AzBlob"\n self.storage_account = check.str_param(storage_account, "storage_account")\n self.sas_token = check.opt_str_param(sas_token, "sas_token")\n self.shared_key = check.opt_str_param(shared_key, "shared_key")
\n\n
[docs] class SSHSecureShell:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SSH"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SCPSecureCopyProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SCP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SFTPSecureFileTransferProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SFTP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class LocalFilesystemLimited:\n
[docs] @public\n def __init__(\n self,\n ):\n self.storage = "local"
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n dataset_name: str,\n format: str,\n url: str,\n provider: Union[\n "FileSource.HTTPSPublicWeb",\n "FileSource.GCSGoogleCloudStorage",\n "FileSource.S3AmazonWebServices",\n "FileSource.AzBlobAzureBlobStorage",\n "FileSource.SSHSecureShell",\n "FileSource.SCPSecureCopyProtocol",\n "FileSource.SFTPSecureFileTransferProtocol",\n "FileSource.LocalFilesystemLimited",\n ],\n reader_options: Optional[str] = None,\n ):\n """Airbyte Source for File.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/file\n\n Args:\n name (str): The name of the destination.\n dataset_name (str): The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only).\n format (str): The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs).\n reader_options (Optional[str]): This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior.\n url (str): The URL path to access the file which should be replicated.\n provider (Union[FileSource.HTTPSPublicWeb, FileSource.GCSGoogleCloudStorage, FileSource.S3AmazonWebServices, FileSource.AzBlobAzureBlobStorage, FileSource.SSHSecureShell, FileSource.SCPSecureCopyProtocol, FileSource.SFTPSecureFileTransferProtocol, FileSource.LocalFilesystemLimited]): The storage Provider or Location of the file(s) which should be replicated.\n """\n self.dataset_name = check.str_param(dataset_name, "dataset_name")\n self.format = check.str_param(format, "format")\n self.reader_options = check.opt_str_param(reader_options, "reader_options")\n self.url = check.str_param(url, "url")\n self.provider = check.inst_param(\n provider,\n "provider",\n (\n FileSource.HTTPSPublicWeb,\n FileSource.GCSGoogleCloudStorage,\n FileSource.S3AmazonWebServices,\n FileSource.AzBlobAzureBlobStorage,\n FileSource.SSHSecureShell,\n FileSource.SCPSecureCopyProtocol,\n FileSource.SFTPSecureFileTransferProtocol,\n FileSource.LocalFilesystemLimited,\n ),\n )\n super().__init__("File", name)
\n\n\n
[docs]class GlassfrogSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Glassfrog.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/glassfrog\n\n Args:\n name (str): The name of the destination.\n api_key (str): API key provided by Glassfrog\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Glassfrog", name)
\n\n\n
[docs]class ChartmogulSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str, interval: str):\n """Airbyte Source for Chartmogul.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/chartmogul\n\n Args:\n name (str): The name of the destination.\n api_key (str): Chartmogul API key\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated.\n interval (str): Some APIs such as Metrics require intervals to cluster data.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.interval = check.str_param(interval, "interval")\n super().__init__("Chartmogul", name)
\n\n\n
[docs]class OrbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_key: str,\n start_date: Optional[str] = None,\n lookback_window_days: Optional[int] = None,\n string_event_properties_keys: Optional[List[str]] = None,\n numeric_event_properties_keys: Optional[List[str]] = None,\n ):\n """Airbyte Source for Orb.\n\n Documentation can be found at https://docs.withorb.com/\n\n Args:\n name (str): The name of the destination.\n api_key (str): Orb API Key, issued from the Orb admin console.\n start_date (Optional[str]): UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced.\n lookback_window_days (Optional[int]): When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced.\n string_event_properties_keys (Optional[List[str]]): Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction.\n numeric_event_properties_keys (Optional[List[str]]): Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.lookback_window_days = check.opt_int_param(\n lookback_window_days, "lookback_window_days"\n )\n self.string_event_properties_keys = check.opt_nullable_list_param(\n string_event_properties_keys, "string_event_properties_keys", str\n )\n self.numeric_event_properties_keys = check.opt_nullable_list_param(\n numeric_event_properties_keys, "numeric_event_properties_keys", str\n )\n super().__init__("Orb", name)
\n\n\n
[docs]class CockroachdbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Cockroachdb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/cockroachdb\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt client/server communications for increased security.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Cockroachdb", name)
\n\n\n
[docs]class ConfluenceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str, domain_name: str, email: str):\n """Airbyte Source for Confluence.\n\n Args:\n name (str): The name of the destination.\n api_token (str): Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/\n domain_name (str): Your Confluence domain name\n email (str): Your Confluence login email\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.domain_name = check.str_param(domain_name, "domain_name")\n self.email = check.str_param(email, "email")\n super().__init__("Confluence", name)
\n\n\n
[docs]class PlaidSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n access_token: str,\n api_key: str,\n client_id: str,\n plaid_env: str,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Plaid.\n\n Documentation can be found at https://plaid.com/docs/api/\n\n Args:\n name (str): The name of the destination.\n access_token (str): The end-user's Link access token.\n api_key (str): The Plaid API key to use to hit the API.\n client_id (str): The Plaid client id\n plaid_env (str): The Plaid environment\n start_date (Optional[str]): The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.api_key = check.str_param(api_key, "api_key")\n self.client_id = check.str_param(client_id, "client_id")\n self.plaid_env = check.str_param(plaid_env, "plaid_env")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Plaid", name)
\n\n\n
[docs]class SnapchatMarketingSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n ):\n """Airbyte Source for Snapchat Marketing.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/snapchat-marketing\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Snapchat developer application.\n client_secret (str): The Client Secret of your Snapchat developer application.\n refresh_token (str): Refresh Token to renew the expired Access Token.\n start_date (Optional[str]): Date in the format 2022-01-01. Any data before this date will not be replicated.\n end_date (Optional[str]): Date in the format 2017-01-25. Any data after this date will not be replicated.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n super().__init__("Snapchat Marketing", name)
\n\n\n
[docs]class MicrosoftTeamsSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaMicrosoftOAuth20:\n
[docs] @public\n def __init__(\n self,\n tenant_id: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.tenant_id = check.str_param(tenant_id, "tenant_id")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AuthenticateViaMicrosoft:\n
[docs] @public\n def __init__(\n self,\n tenant_id: str,\n client_id: str,\n client_secret: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.tenant_id = check.str_param(tenant_id, "tenant_id")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n period: str,\n credentials: Union[\n "MicrosoftTeamsSource.AuthenticateViaMicrosoftOAuth20",\n "MicrosoftTeamsSource.AuthenticateViaMicrosoft",\n ],\n ):\n """Airbyte Source for Microsoft Teams.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/microsoft-teams\n\n Args:\n name (str): The name of the destination.\n period (str): Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.\n credentials (Union[MicrosoftTeamsSource.AuthenticateViaMicrosoftOAuth20, MicrosoftTeamsSource.AuthenticateViaMicrosoft]): Choose how to authenticate to Microsoft\n """\n self.period = check.str_param(period, "period")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n MicrosoftTeamsSource.AuthenticateViaMicrosoftOAuth20,\n MicrosoftTeamsSource.AuthenticateViaMicrosoft,\n ),\n )\n super().__init__("Microsoft Teams", name)
\n\n\n
[docs]class LeverHiringSource(GeneratedAirbyteSource):\n
[docs] class OAuthCredentials:\n
[docs] @public\n def __init__(\n self,\n refresh_token: str,\n auth_type: Optional[str] = None,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: "LeverHiringSource.OAuthCredentials",\n start_date: str,\n environment: Optional[str] = None,\n ):\n """Airbyte Source for Lever Hiring.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/lever-hiring\n\n Args:\n name (str): The name of the destination.\n credentials (LeverHiringSource.OAuthCredentials): Choose how to authenticate to Lever Hiring.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Note that it will be used only in the following incremental streams: comments, commits, and issues.\n environment (Optional[str]): The environment in which you'd like to replicate data for Lever. This is used to determine which Lever API endpoint to use.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", LeverHiringSource.OAuthCredentials\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.environment = check.opt_str_param(environment, "environment")\n super().__init__("Lever Hiring", name)
\n\n\n
[docs]class TwilioSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_sid: str,\n auth_token: str,\n start_date: str,\n lookback_window: Optional[int] = None,\n ):\n """Airbyte Source for Twilio.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/twilio\n\n Args:\n name (str): The name of the destination.\n account_sid (str): Twilio account SID\n auth_token (str): Twilio Auth Token.\n start_date (str): UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated.\n lookback_window (Optional[int]): How far into the past to look for records. (in minutes)\n """\n self.account_sid = check.str_param(account_sid, "account_sid")\n self.auth_token = check.str_param(auth_token, "auth_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window = check.opt_int_param(lookback_window, "lookback_window")\n super().__init__("Twilio", name)
\n\n\n
[docs]class StripeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n client_secret: str,\n start_date: str,\n lookback_window_days: Optional[int] = None,\n slice_range: Optional[int] = None,\n ):\n r"""Airbyte Source for Stripe.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/stripe\n\n Args:\n name (str): The name of the destination.\n account_id (str): Your Stripe account ID (starts with 'acct\\\\_', find yours here).\n client_secret (str): Stripe API key (usually starts with 'sk_live\\\\_'; find yours here).\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Only data generated after this date will be replicated.\n lookback_window_days (Optional[int]): When set, the connector will always re-export data from the past N days, where N is the value set here. This is useful if your data is frequently updated after creation. More info here\n slice_range (Optional[int]): The time increment used by the connector when requesting data from the Stripe API. The bigger the value is, the less requests will be made and faster the sync will be. On the other hand, the more seldom the state is persisted.\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window_days = check.opt_int_param(\n lookback_window_days, "lookback_window_days"\n )\n self.slice_range = check.opt_int_param(slice_range, "slice_range")\n super().__init__("Stripe", name)
\n\n\n
[docs]class Db2Source(GeneratedAirbyteSource):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_method = "unencrypted"
\n\n
[docs] class TLSEncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, ssl_certificate: str, key_store_password: Optional[str] = None):\n self.encryption_method = "encrypted_verify_certificate"\n self.ssl_certificate = check.str_param(ssl_certificate, "ssl_certificate")\n self.key_store_password = check.opt_str_param(key_store_password, "key_store_password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n db: str,\n username: str,\n password: str,\n encryption: Union["Db2Source.Unencrypted", "Db2Source.TLSEncryptedVerifyCertificate"],\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Db2.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/db2\n\n Args:\n name (str): The name of the destination.\n host (str): Host of the Db2.\n port (int): Port of the database.\n db (str): Name of the database.\n username (str): Username to use to access the database.\n password (str): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n encryption (Union[Db2Source.Unencrypted, Db2Source.TLSEncryptedVerifyCertificate]): Encryption method to use when communicating with the database\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.db = check.str_param(db, "db")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (Db2Source.Unencrypted, Db2Source.TLSEncryptedVerifyCertificate),\n )\n super().__init__("Db2", name)
\n\n\n
[docs]class SlackSource(GeneratedAirbyteSource):\n
[docs] class DefaultOAuth20Authorization:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: str,\n refresh_token: Optional[str] = None,\n ):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
\n\n
[docs] class APITokenCredentials:\n
[docs] @public\n def __init__(self, api_token: str):\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n lookback_window: int,\n join_channels: bool,\n credentials: Union[\n "SlackSource.DefaultOAuth20Authorization", "SlackSource.APITokenCredentials"\n ],\n channel_filter: Optional[List[str]] = None,\n ):\n """Airbyte Source for Slack.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/slack\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n lookback_window (int): How far into the past to look for messages in threads.\n join_channels (bool): Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages.\n channel_filter (Optional[List[str]]): A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter.\n credentials (Union[SlackSource.DefaultOAuth20Authorization, SlackSource.APITokenCredentials]): Choose how to authenticate into Slack\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window = check.int_param(lookback_window, "lookback_window")\n self.join_channels = check.bool_param(join_channels, "join_channels")\n self.channel_filter = check.opt_nullable_list_param(channel_filter, "channel_filter", str)\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (SlackSource.DefaultOAuth20Authorization, SlackSource.APITokenCredentials),\n )\n super().__init__("Slack", name)
\n\n\n
[docs]class RechargeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, access_token: str):\n """Airbyte Source for Recharge.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/recharge\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated.\n access_token (str): The value of the Access Token generated. See the docs for more information.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Recharge", name)
\n\n\n
[docs]class OpenweatherSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n lat: str,\n lon: str,\n appid: str,\n units: Optional[str] = None,\n lang: Optional[str] = None,\n ):\n """Airbyte Source for Openweather.\n\n Args:\n name (str): The name of the destination.\n lat (str): Latitude for which you want to get weather condition from. (min -90, max 90)\n lon (str): Longitude for which you want to get weather condition from. (min -180, max 180)\n appid (str): Your OpenWeather API Key. See here. The key is case sensitive.\n units (Optional[str]): Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default.\n lang (Optional[str]): You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages.\n """\n self.lat = check.str_param(lat, "lat")\n self.lon = check.str_param(lon, "lon")\n self.appid = check.str_param(appid, "appid")\n self.units = check.opt_str_param(units, "units")\n self.lang = check.opt_str_param(lang, "lang")\n super().__init__("Openweather", name)
\n\n\n
[docs]class RetentlySource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaRetentlyOAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AuthenticateWithAPIToken:\n
[docs] @public\n def __init__(self, api_key: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.api_key = check.str_param(api_key, "api_key")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "RetentlySource.AuthenticateViaRetentlyOAuth", "RetentlySource.AuthenticateWithAPIToken"\n ],\n ):\n """Airbyte Source for Retently.\n\n Args:\n name (str): The name of the destination.\n credentials (Union[RetentlySource.AuthenticateViaRetentlyOAuth, RetentlySource.AuthenticateWithAPIToken]): Choose how to authenticate to Retently\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (RetentlySource.AuthenticateViaRetentlyOAuth, RetentlySource.AuthenticateWithAPIToken),\n )\n super().__init__("Retently", name)
\n\n\n
[docs]class ScaffoldSourceHttpSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, TODO: str):\n """Airbyte Source for Scaffold Source Http.\n\n Args:\n name (str): The name of the destination.\n TODO (str): describe me\n """\n self.TODO = check.str_param(TODO, "TODO")\n super().__init__("Scaffold Source Http", name)
\n\n\n
[docs]class YandexMetricaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, auth_token: str, counter_id: str, start_date: str, end_date: str):\n """Airbyte Source for Yandex Metrica.\n\n Args:\n name (str): The name of the destination.\n auth_token (str): Your Yandex Metrica API access token\n counter_id (str): Counter ID\n start_date (str): UTC date and time in the format YYYY-MM-DD.\n end_date (str): UTC date and time in the format YYYY-MM-DD.\n """\n self.auth_token = check.str_param(auth_token, "auth_token")\n self.counter_id = check.str_param(counter_id, "counter_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.str_param(end_date, "end_date")\n super().__init__("Yandex Metrica", name)
\n\n\n
[docs]class TalkdeskExploreSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n auth_url: str,\n api_key: str,\n timezone: Optional[str] = None,\n ):\n """Airbyte Source for Talkdesk Explore.\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated.\n timezone (Optional[str]): Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones)\n auth_url (str): Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment.\n api_key (str): Talkdesk API key.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.timezone = check.opt_str_param(timezone, "timezone")\n self.auth_url = check.str_param(auth_url, "auth_url")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Talkdesk Explore", name)
\n\n\n
[docs]class ChargifySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, domain: str):\n """Airbyte Source for Chargify.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/chargify\n\n Args:\n name (str): The name of the destination.\n api_key (str): Chargify API Key.\n domain (str): Chargify domain. Normally this domain follows the following format companyname.chargify.com\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.domain = check.str_param(domain, "domain")\n super().__init__("Chargify", name)
\n\n\n
[docs]class RkiCovidSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str):\n """Airbyte Source for Rki Covid.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/rki-covid\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date in the format 2017-01-25. Any data before this date will not be replicated.\n """\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Rki Covid", name)
\n\n\n
[docs]class PostgresSource(GeneratedAirbyteSource):\n
[docs] class Disable:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "disable"
\n\n
[docs] class Allow:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "allow"
\n\n
[docs] class Prefer:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "prefer"
\n\n
[docs] class Require:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "require"
\n\n
[docs] class VerifyCa:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify-ca"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class VerifyFull:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify-full"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class LogicalReplicationCDC:\n
[docs] @public\n def __init__(\n self,\n replication_slot: str,\n publication: str,\n plugin: Optional[str] = None,\n initial_waiting_seconds: Optional[int] = None,\n ):\n self.method = "CDC"\n self.plugin = check.opt_str_param(plugin, "plugin")\n self.replication_slot = check.str_param(replication_slot, "replication_slot")\n self.publication = check.str_param(publication, "publication")\n self.initial_waiting_seconds = check.opt_int_param(\n initial_waiting_seconds, "initial_waiting_seconds"\n )
\n\n
[docs] class NoTunnel:\n
[docs] @public\n def __init__(\n self,\n ):\n self.tunnel_method = "NO_TUNNEL"
\n\n
[docs] class SSHKeyAuthentication:\n
[docs] @public\n def __init__(self, tunnel_host: str, tunnel_port: int, tunnel_user: str, ssh_key: str):\n self.tunnel_method = "SSH_KEY_AUTH"\n self.tunnel_host = check.str_param(tunnel_host, "tunnel_host")\n self.tunnel_port = check.int_param(tunnel_port, "tunnel_port")\n self.tunnel_user = check.str_param(tunnel_user, "tunnel_user")\n self.ssh_key = check.str_param(ssh_key, "ssh_key")
\n\n
[docs] class PasswordAuthentication:\n
[docs] @public\n def __init__(\n self, tunnel_host: str, tunnel_port: int, tunnel_user: str, tunnel_user_password: str\n ):\n self.tunnel_method = "SSH_PASSWORD_AUTH"\n self.tunnel_host = check.str_param(tunnel_host, "tunnel_host")\n self.tunnel_port = check.int_param(tunnel_port, "tunnel_port")\n self.tunnel_user = check.str_param(tunnel_user, "tunnel_user")\n self.tunnel_user_password = check.str_param(\n tunnel_user_password, "tunnel_user_password"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n ssl_mode: Union[\n "PostgresSource.Disable",\n "PostgresSource.Allow",\n "PostgresSource.Prefer",\n "PostgresSource.Require",\n "PostgresSource.VerifyCa",\n "PostgresSource.VerifyFull",\n ],\n replication_method: Union[\n "PostgresSource.Standard", "PostgresSource.LogicalReplicationCDC"\n ],\n tunnel_method: Union[\n "PostgresSource.NoTunnel",\n "PostgresSource.SSHKeyAuthentication",\n "PostgresSource.PasswordAuthentication",\n ],\n schemas: Optional[List[str]] = None,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Postgres.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/postgres\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n schemas (Optional[List[str]]): The list of schemas (case sensitive) to sync from. Defaults to public.\n username (str): Username to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt data using SSL. When activating SSL, please select one of the connection modes.\n ssl_mode (Union[PostgresSource.Disable, PostgresSource.Allow, PostgresSource.Prefer, PostgresSource.Require, PostgresSource.VerifyCa, PostgresSource.VerifyFull]): SSL connection modes. disable - Disables encryption of communication between Airbyte and source database allow - Enables encryption only when required by the source database prefer - allows unencrypted connection only if the source database does not support encryption require - Always require encryption. If the source database server does not support encryption, connection will fail verify-ca - Always require encryption and verifies that the source database server has a valid SSL certificate verify-full - This is the most secure mode. Always require encryption and verifies the identity of the source database server Read more in the docs.\n replication_method (Union[PostgresSource.Standard, PostgresSource.LogicalReplicationCDC]): Replication method for extracting data from the database.\n tunnel_method (Union[PostgresSource.NoTunnel, PostgresSource.SSHKeyAuthentication, PostgresSource.PasswordAuthentication]): Whether to initiate an SSH tunnel before connecting to the database, and if so, which kind of authentication to use.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.ssl_mode = check.inst_param(\n ssl_mode,\n "ssl_mode",\n (\n PostgresSource.Disable,\n PostgresSource.Allow,\n PostgresSource.Prefer,\n PostgresSource.Require,\n PostgresSource.VerifyCa,\n PostgresSource.VerifyFull,\n ),\n )\n self.replication_method = check.inst_param(\n replication_method,\n "replication_method",\n (PostgresSource.Standard, PostgresSource.LogicalReplicationCDC),\n )\n self.tunnel_method = check.inst_param(\n tunnel_method,\n "tunnel_method",\n (\n PostgresSource.NoTunnel,\n PostgresSource.SSHKeyAuthentication,\n PostgresSource.PasswordAuthentication,\n ),\n )\n super().__init__("Postgres", name)
\n\n\n
[docs]class TrelloSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n token: str,\n key: str,\n start_date: str,\n board_ids: Optional[List[str]] = None,\n ):\n """Airbyte Source for Trello.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/trello\n\n Args:\n name (str): The name of the destination.\n token (str): Trello v API token. See the docs for instructions on how to generate it.\n key (str): Trello API key. See the docs for instructions on how to generate it.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n board_ids (Optional[List[str]]): IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated.\n """\n self.token = check.str_param(token, "token")\n self.key = check.str_param(key, "key")\n self.start_date = check.str_param(start_date, "start_date")\n self.board_ids = check.opt_nullable_list_param(board_ids, "board_ids", str)\n super().__init__("Trello", name)
\n\n\n
[docs]class PrestashopSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, url: str, access_key: str):\n """Airbyte Source for Prestashop.\n\n Args:\n name (str): The name of the destination.\n url (str): Shop URL without trailing slash (domain name or IP address)\n access_key (str): Your PrestaShop access key. See the docs for info on how to obtain this.\n """\n self.url = check.str_param(url, "url")\n self.access_key = check.str_param(access_key, "access_key")\n super().__init__("Prestashop", name)
\n\n\n
[docs]class PaystackSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n secret_key: str,\n start_date: str,\n lookback_window_days: Optional[int] = None,\n ):\n r"""Airbyte Source for Paystack.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/paystack\n\n Args:\n name (str): The name of the destination.\n secret_key (str): The Paystack API key (usually starts with 'sk_live\\\\_'; find yours here).\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n lookback_window_days (Optional[int]): When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation.\n """\n self.secret_key = check.str_param(secret_key, "secret_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window_days = check.opt_int_param(\n lookback_window_days, "lookback_window_days"\n )\n super().__init__("Paystack", name)
\n\n\n
[docs]class S3Source(GeneratedAirbyteSource):\n
[docs] class CSV:\n
[docs] @public\n def __init__(\n self,\n filetype: Optional[str] = None,\n delimiter: Optional[str] = None,\n infer_datatypes: Optional[bool] = None,\n quote_char: Optional[str] = None,\n escape_char: Optional[str] = None,\n encoding: Optional[str] = None,\n double_quote: Optional[bool] = None,\n newlines_in_values: Optional[bool] = None,\n additional_reader_options: Optional[str] = None,\n advanced_options: Optional[str] = None,\n block_size: Optional[int] = None,\n ):\n self.filetype = check.opt_str_param(filetype, "filetype")\n self.delimiter = check.opt_str_param(delimiter, "delimiter")\n self.infer_datatypes = check.opt_bool_param(infer_datatypes, "infer_datatypes")\n self.quote_char = check.opt_str_param(quote_char, "quote_char")\n self.escape_char = check.opt_str_param(escape_char, "escape_char")\n self.encoding = check.opt_str_param(encoding, "encoding")\n self.double_quote = check.opt_bool_param(double_quote, "double_quote")\n self.newlines_in_values = check.opt_bool_param(newlines_in_values, "newlines_in_values")\n self.additional_reader_options = check.opt_str_param(\n additional_reader_options, "additional_reader_options"\n )\n self.advanced_options = check.opt_str_param(advanced_options, "advanced_options")\n self.block_size = check.opt_int_param(block_size, "block_size")
\n\n
[docs] class Parquet:\n
[docs] @public\n def __init__(\n self,\n filetype: Optional[str] = None,\n columns: Optional[List[str]] = None,\n batch_size: Optional[int] = None,\n buffer_size: Optional[int] = None,\n ):\n self.filetype = check.opt_str_param(filetype, "filetype")\n self.columns = check.opt_nullable_list_param(columns, "columns", str)\n self.batch_size = check.opt_int_param(batch_size, "batch_size")\n self.buffer_size = check.opt_int_param(buffer_size, "buffer_size")
\n\n
[docs] class Avro:\n
[docs] @public\n def __init__(self, filetype: Optional[str] = None):\n self.filetype = check.opt_str_param(filetype, "filetype")
\n\n
[docs] class Jsonl:\n
[docs] @public\n def __init__(\n self,\n filetype: Optional[str] = None,\n newlines_in_values: Optional[bool] = None,\n unexpected_field_behavior: Optional[str] = None,\n block_size: Optional[int] = None,\n ):\n self.filetype = check.opt_str_param(filetype, "filetype")\n self.newlines_in_values = check.opt_bool_param(newlines_in_values, "newlines_in_values")\n self.unexpected_field_behavior = check.opt_str_param(\n unexpected_field_behavior, "unexpected_field_behavior"\n )\n self.block_size = check.opt_int_param(block_size, "block_size")
\n\n
[docs] class S3AmazonWebServices:\n
[docs] @public\n def __init__(\n self,\n bucket: str,\n aws_access_key_id: Optional[str] = None,\n aws_secret_access_key: Optional[str] = None,\n path_prefix: Optional[str] = None,\n endpoint: Optional[str] = None,\n ):\n self.bucket = check.str_param(bucket, "bucket")\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )\n self.path_prefix = check.opt_str_param(path_prefix, "path_prefix")\n self.endpoint = check.opt_str_param(endpoint, "endpoint")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n dataset: str,\n path_pattern: str,\n format: Union["S3Source.CSV", "S3Source.Parquet", "S3Source.Avro", "S3Source.Jsonl"],\n provider: "S3Source.S3AmazonWebServices",\n schema: Optional[str] = None,\n ):\n """Airbyte Source for S3.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/s3\n\n Args:\n name (str): The name of the destination.\n dataset (str): The name of the stream you would like this source to output. Can contain letters, numbers, or underscores.\n path_pattern (str): A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files.\n format (Union[S3Source.CSV, S3Source.Parquet, S3Source.Avro, S3Source.Jsonl]): The format of the files you'd like to replicate\n schema (Optional[str]): Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema.\n provider (S3Source.S3AmazonWebServices): Use this to load files from S3 or S3-compatible services\n """\n self.dataset = check.str_param(dataset, "dataset")\n self.path_pattern = check.str_param(path_pattern, "path_pattern")\n self.format = check.inst_param(\n format, "format", (S3Source.CSV, S3Source.Parquet, S3Source.Avro, S3Source.Jsonl)\n )\n self.schema = check.opt_str_param(schema, "schema")\n self.provider = check.inst_param(provider, "provider", S3Source.S3AmazonWebServices)\n super().__init__("S3", name)
\n\n\n
[docs]class SnowflakeSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: Optional[str] = None,\n refresh_token: Optional[str] = None,\n ):\n self.auth_type = "OAuth"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
\n\n
[docs] class UsernameAndPassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.auth_type = "username/password"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["SnowflakeSource.OAuth20", "SnowflakeSource.UsernameAndPassword"],\n host: str,\n role: str,\n warehouse: str,\n database: str,\n schema: str,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Snowflake.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/snowflake\n\n Args:\n name (str): The name of the destination.\n host (str): The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com).\n role (str): The role you created for Airbyte to access Snowflake.\n warehouse (str): The warehouse you created for Airbyte to access data.\n database (str): The database you created for Airbyte to access data.\n schema (str): The source Snowflake schema tables.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (SnowflakeSource.OAuth20, SnowflakeSource.UsernameAndPassword),\n )\n self.host = check.str_param(host, "host")\n self.role = check.str_param(role, "role")\n self.warehouse = check.str_param(warehouse, "warehouse")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Snowflake", name)
\n\n\n
[docs]class AmplitudeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, secret_key: str, start_date: str):\n """Airbyte Source for Amplitude.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amplitude\n\n Args:\n name (str): The name of the destination.\n api_key (str): Amplitude API Key. See the setup guide for more information on how to obtain this key.\n secret_key (str): Amplitude Secret Key. See the setup guide for more information on how to obtain this key.\n start_date (str): UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.secret_key = check.str_param(secret_key, "secret_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Amplitude", name)
\n\n\n
[docs]class PosthogSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, api_key: str, base_url: Optional[str] = None):\n """Airbyte Source for Posthog.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/posthog\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate the data. Any data before this date will not be replicated.\n api_key (str): API Key. See the docs for information on how to generate this key.\n base_url (Optional[str]): Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com).\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.api_key = check.str_param(api_key, "api_key")\n self.base_url = check.opt_str_param(base_url, "base_url")\n super().__init__("Posthog", name)
\n\n\n
[docs]class PaypalTransactionSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n is_sandbox: bool,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n refresh_token: Optional[str] = None,\n ):\n """Airbyte Source for Paypal Transaction.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/paypal-transactions\n\n Args:\n name (str): The name of the destination.\n client_id (Optional[str]): The Client ID of your Paypal developer application.\n client_secret (Optional[str]): The Client Secret of your Paypal developer application.\n refresh_token (Optional[str]): The key to refresh the expired access token.\n start_date (str): Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time.\n is_sandbox (bool): Determines whether to use the sandbox or production environment.\n """\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.is_sandbox = check.bool_param(is_sandbox, "is_sandbox")\n super().__init__("Paypal Transaction", name)
\n\n\n
[docs]class MssqlSource(GeneratedAirbyteSource):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "unencrypted"
\n\n
[docs] class EncryptedTrustServerCertificate:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "encrypted_trust_server_certificate"
\n\n
[docs] class EncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, hostNameInCertificate: Optional[str] = None):\n self.ssl_method = "encrypted_verify_certificate"\n self.hostNameInCertificate = check.opt_str_param(\n hostNameInCertificate, "hostNameInCertificate"\n )
\n\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "STANDARD"
\n\n
[docs] class LogicalReplicationCDC:\n
[docs] @public\n def __init__(\n self, data_to_sync: Optional[str] = None, snapshot_isolation: Optional[str] = None\n ):\n self.method = "CDC"\n self.data_to_sync = check.opt_str_param(data_to_sync, "data_to_sync")\n self.snapshot_isolation = check.opt_str_param(snapshot_isolation, "snapshot_isolation")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n ssl_method: Union[\n "MssqlSource.Unencrypted",\n "MssqlSource.EncryptedTrustServerCertificate",\n "MssqlSource.EncryptedVerifyCertificate",\n ],\n replication_method: Union["MssqlSource.Standard", "MssqlSource.LogicalReplicationCDC"],\n schemas: Optional[List[str]] = None,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Mssql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mssql\n\n Args:\n name (str): The name of the destination.\n host (str): The hostname of the database.\n port (int): The port of the database.\n database (str): The name of the database.\n schemas (Optional[List[str]]): The list of schemas to sync from. Defaults to user. Case sensitive.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n ssl_method (Union[MssqlSource.Unencrypted, MssqlSource.EncryptedTrustServerCertificate, MssqlSource.EncryptedVerifyCertificate]): The encryption method which is used when communicating with the database.\n replication_method (Union[MssqlSource.Standard, MssqlSource.LogicalReplicationCDC]): The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl_method = check.inst_param(\n ssl_method,\n "ssl_method",\n (\n MssqlSource.Unencrypted,\n MssqlSource.EncryptedTrustServerCertificate,\n MssqlSource.EncryptedVerifyCertificate,\n ),\n )\n self.replication_method = check.inst_param(\n replication_method,\n "replication_method",\n (MssqlSource.Standard, MssqlSource.LogicalReplicationCDC),\n )\n super().__init__("Mssql", name)
\n\n\n
[docs]class ZohoCrmSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n dc_region: str,\n environment: str,\n edition: str,\n start_datetime: Optional[str] = None,\n ):\n """Airbyte Source for Zoho Crm.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zoho-crm\n\n Args:\n name (str): The name of the destination.\n client_id (str): OAuth2.0 Client ID\n client_secret (str): OAuth2.0 Client Secret\n refresh_token (str): OAuth2.0 Refresh Token\n dc_region (str): Please choose the region of your Data Center location. More info by this Link\n environment (str): Please choose the environment\n start_datetime (Optional[str]): ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM`\n edition (str): Choose your Edition of Zoho CRM to determine API Concurrency Limits\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.dc_region = check.str_param(dc_region, "dc_region")\n self.environment = check.str_param(environment, "environment")\n self.start_datetime = check.opt_str_param(start_datetime, "start_datetime")\n self.edition = check.str_param(edition, "edition")\n super().__init__("Zoho Crm", name)
\n\n\n
[docs]class RedshiftSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: str,\n schemas: Optional[List[str]] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Redshift.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/redshift\n\n Args:\n name (str): The name of the destination.\n host (str): Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com).\n port (int): Port of the database.\n database (str): Name of the database.\n schemas (Optional[List[str]]): The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive.\n username (str): Username to use to access the database.\n password (str): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Redshift", name)
\n\n\n
[docs]class AsanaSource(GeneratedAirbyteSource):\n
[docs] class PATCredentials:\n
[docs] @public\n def __init__(self, personal_access_token: str):\n self.personal_access_token = check.str_param(\n personal_access_token, "personal_access_token"\n )
\n\n
[docs] class OAuthCredentials:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["AsanaSource.PATCredentials", "AsanaSource.OAuthCredentials"],\n ):\n """Airbyte Source for Asana.\n\n Args:\n name (str): The name of the destination.\n credentials (Union[AsanaSource.PATCredentials, AsanaSource.OAuthCredentials]): Choose how to authenticate to Github\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (AsanaSource.PATCredentials, AsanaSource.OAuthCredentials)\n )\n super().__init__("Asana", name)
\n\n\n
[docs]class SmartsheetsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n access_token: str,\n spreadsheet_id: str,\n start_datetime: Optional[str] = None,\n ):\n """Airbyte Source for Smartsheets.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/smartsheets\n\n Args:\n name (str): The name of the destination.\n access_token (str): The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token.\n spreadsheet_id (str): The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties\n start_datetime (Optional[str]): Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00`\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.spreadsheet_id = check.str_param(spreadsheet_id, "spreadsheet_id")\n self.start_datetime = check.opt_str_param(start_datetime, "start_datetime")\n super().__init__("Smartsheets", name)
\n\n\n
[docs]class MailchimpSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n access_token: str,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_type = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIKey:\n
[docs] @public\n def __init__(self, apikey: str):\n self.auth_type = "apikey"\n self.apikey = check.str_param(apikey, "apikey")
\n\n
[docs] @public\n def __init__(\n self, name: str, credentials: Union["MailchimpSource.OAuth20", "MailchimpSource.APIKey"]\n ):\n """Airbyte Source for Mailchimp.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mailchimp\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (MailchimpSource.OAuth20, MailchimpSource.APIKey)\n )\n super().__init__("Mailchimp", name)
\n\n\n
[docs]class SentrySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n auth_token: str,\n organization: str,\n project: str,\n hostname: Optional[str] = None,\n discover_fields: Optional[List[str]] = None,\n ):\n """Airbyte Source for Sentry.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/sentry\n\n Args:\n name (str): The name of the destination.\n auth_token (str): Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/"\n hostname (Optional[str]): Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty.\n organization (str): The slug of the organization the groups belong to.\n project (str): The name (slug) of the Project you want to sync.\n discover_fields (Optional[List[str]]): Fields to retrieve when fetching discover events\n """\n self.auth_token = check.str_param(auth_token, "auth_token")\n self.hostname = check.opt_str_param(hostname, "hostname")\n self.organization = check.str_param(organization, "organization")\n self.project = check.str_param(project, "project")\n self.discover_fields = check.opt_nullable_list_param(\n discover_fields, "discover_fields", str\n )\n super().__init__("Sentry", name)
\n\n\n
[docs]class MailgunSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n private_key: str,\n domain_region: Optional[str] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Mailgun.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mailgun\n\n Args:\n name (str): The name of the destination.\n private_key (str): Primary account API key to access your Mailgun data.\n domain_region (Optional[str]): Domain region code. 'EU' or 'US' are possible values. The default is 'US'.\n start_date (Optional[str]): UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.\n """\n self.private_key = check.str_param(private_key, "private_key")\n self.domain_region = check.opt_str_param(domain_region, "domain_region")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Mailgun", name)
\n\n\n
[docs]class OnesignalSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, user_auth_key: str, start_date: str, outcome_names: str):\n """Airbyte Source for Onesignal.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/onesignal\n\n Args:\n name (str): The name of the destination.\n user_auth_key (str): OneSignal User Auth Key, see the docs for more information on how to obtain this key.\n start_date (str): The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n outcome_names (str): Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details\n """\n self.user_auth_key = check.str_param(user_auth_key, "user_auth_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.outcome_names = check.str_param(outcome_names, "outcome_names")\n super().__init__("Onesignal", name)
\n\n\n
[docs]class PythonHttpTutorialSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, base: str, access_key: Optional[str] = None):\n """Airbyte Source for Python Http Tutorial.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/exchangeratesapi\n\n Args:\n name (str): The name of the destination.\n access_key (Optional[str]): API access key used to retrieve data from the Exchange Rates API.\n start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated.\n base (str): ISO reference currency. See here.\n """\n self.access_key = check.opt_str_param(access_key, "access_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.base = check.str_param(base, "base")\n super().__init__("Python Http Tutorial", name)
\n\n\n
[docs]class AirtableSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, base_id: str, tables: List[str]):\n """Airbyte Source for Airtable.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/airtable\n\n Args:\n name (str): The name of the destination.\n api_key (str): The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key.\n base_id (str): The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs.\n tables (List[str]): The list of Tables to integrate.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.base_id = check.str_param(base_id, "base_id")\n self.tables = check.list_param(tables, "tables", str)\n super().__init__("Airtable", name)
\n\n\n
[docs]class MongodbV2Source(GeneratedAirbyteSource):\n
[docs] class StandaloneMongoDbInstance:\n
[docs] @public\n def __init__(self, instance: str, host: str, port: int, tls: Optional[bool] = None):\n self.instance = check.str_param(instance, "instance")\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.tls = check.opt_bool_param(tls, "tls")
\n\n
[docs] class ReplicaSet:\n
[docs] @public\n def __init__(self, instance: str, server_addresses: str, replica_set: Optional[str] = None):\n self.instance = check.str_param(instance, "instance")\n self.server_addresses = check.str_param(server_addresses, "server_addresses")\n self.replica_set = check.opt_str_param(replica_set, "replica_set")
\n\n
[docs] class MongoDBAtlas:\n
[docs] @public\n def __init__(self, instance: str, cluster_url: str):\n self.instance = check.str_param(instance, "instance")\n self.cluster_url = check.str_param(cluster_url, "cluster_url")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n instance_type: Union[\n "MongodbV2Source.StandaloneMongoDbInstance",\n "MongodbV2Source.ReplicaSet",\n "MongodbV2Source.MongoDBAtlas",\n ],\n database: str,\n user: Optional[str] = None,\n password: Optional[str] = None,\n auth_source: Optional[str] = None,\n ):\n """Airbyte Source for Mongodb V2.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mongodb-v2\n\n Args:\n name (str): The name of the destination.\n instance_type (Union[MongodbV2Source.StandaloneMongoDbInstance, MongodbV2Source.ReplicaSet, MongodbV2Source.MongoDBAtlas]): The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.\n database (str): The database you want to replicate.\n user (Optional[str]): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n auth_source (Optional[str]): The authentication source where the user information is stored.\n """\n self.instance_type = check.inst_param(\n instance_type,\n "instance_type",\n (\n MongodbV2Source.StandaloneMongoDbInstance,\n MongodbV2Source.ReplicaSet,\n MongodbV2Source.MongoDBAtlas,\n ),\n )\n self.database = check.str_param(database, "database")\n self.user = check.opt_str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.auth_source = check.opt_str_param(auth_source, "auth_source")\n super().__init__("Mongodb V2", name)
\n\n\n
[docs]class FileSecureSource(GeneratedAirbyteSource):\n
[docs] class HTTPSPublicWeb:\n
[docs] @public\n def __init__(self, user_agent: Optional[bool] = None):\n self.storage = "HTTPS"\n self.user_agent = check.opt_bool_param(user_agent, "user_agent")
\n\n
[docs] class GCSGoogleCloudStorage:\n
[docs] @public\n def __init__(self, service_account_json: Optional[str] = None):\n self.storage = "GCS"\n self.service_account_json = check.opt_str_param(\n service_account_json, "service_account_json"\n )
\n\n
[docs] class S3AmazonWebServices:\n
[docs] @public\n def __init__(\n self,\n aws_access_key_id: Optional[str] = None,\n aws_secret_access_key: Optional[str] = None,\n ):\n self.storage = "S3"\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )
\n\n
[docs] class AzBlobAzureBlobStorage:\n
[docs] @public\n def __init__(\n self,\n storage_account: str,\n sas_token: Optional[str] = None,\n shared_key: Optional[str] = None,\n ):\n self.storage = "AzBlob"\n self.storage_account = check.str_param(storage_account, "storage_account")\n self.sas_token = check.opt_str_param(sas_token, "sas_token")\n self.shared_key = check.opt_str_param(shared_key, "shared_key")
\n\n
[docs] class SSHSecureShell:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SSH"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SCPSecureCopyProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SCP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SFTPSecureFileTransferProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SFTP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n dataset_name: str,\n format: str,\n url: str,\n provider: Union[\n "FileSecureSource.HTTPSPublicWeb",\n "FileSecureSource.GCSGoogleCloudStorage",\n "FileSecureSource.S3AmazonWebServices",\n "FileSecureSource.AzBlobAzureBlobStorage",\n "FileSecureSource.SSHSecureShell",\n "FileSecureSource.SCPSecureCopyProtocol",\n "FileSecureSource.SFTPSecureFileTransferProtocol",\n ],\n reader_options: Optional[str] = None,\n ):\n """Airbyte Source for File Secure.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/file\n\n Args:\n name (str): The name of the destination.\n dataset_name (str): The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only).\n format (str): The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs).\n reader_options (Optional[str]): This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior.\n url (str): The URL path to access the file which should be replicated.\n provider (Union[FileSecureSource.HTTPSPublicWeb, FileSecureSource.GCSGoogleCloudStorage, FileSecureSource.S3AmazonWebServices, FileSecureSource.AzBlobAzureBlobStorage, FileSecureSource.SSHSecureShell, FileSecureSource.SCPSecureCopyProtocol, FileSecureSource.SFTPSecureFileTransferProtocol]): The storage Provider or Location of the file(s) which should be replicated.\n """\n self.dataset_name = check.str_param(dataset_name, "dataset_name")\n self.format = check.str_param(format, "format")\n self.reader_options = check.opt_str_param(reader_options, "reader_options")\n self.url = check.str_param(url, "url")\n self.provider = check.inst_param(\n provider,\n "provider",\n (\n FileSecureSource.HTTPSPublicWeb,\n FileSecureSource.GCSGoogleCloudStorage,\n FileSecureSource.S3AmazonWebServices,\n FileSecureSource.AzBlobAzureBlobStorage,\n FileSecureSource.SSHSecureShell,\n FileSecureSource.SCPSecureCopyProtocol,\n FileSecureSource.SFTPSecureFileTransferProtocol,\n ),\n )\n super().__init__("File Secure", name)
\n\n\n
[docs]class ZendeskSupportSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, access_token: str, credentials: Optional[str] = None):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, email: str, api_token: str, credentials: Optional[str] = None):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.email = check.str_param(email, "email")\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n subdomain: str,\n credentials: Union["ZendeskSupportSource.OAuth20", "ZendeskSupportSource.APIToken"],\n ):\n """Airbyte Source for Zendesk Support.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-support\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n subdomain (str): This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain.\n credentials (Union[ZendeskSupportSource.OAuth20, ZendeskSupportSource.APIToken]): Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (ZendeskSupportSource.OAuth20, ZendeskSupportSource.APIToken),\n )\n super().__init__("Zendesk Support", name)
\n\n\n
[docs]class TempoSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str):\n """Airbyte Source for Tempo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/\n\n Args:\n name (str): The name of the destination.\n api_token (str): Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration.\n """\n self.api_token = check.str_param(api_token, "api_token")\n super().__init__("Tempo", name)
\n\n\n
[docs]class BraintreeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n merchant_id: str,\n public_key: str,\n private_key: str,\n environment: str,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Braintree.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/braintree\n\n Args:\n name (str): The name of the destination.\n merchant_id (str): The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID.\n public_key (str): Braintree Public Key. See the docs for more information on how to obtain this key.\n private_key (str): Braintree Private Key. See the docs for more information on how to obtain this key.\n start_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n environment (str): Environment specifies where the data will come from.\n """\n self.merchant_id = check.str_param(merchant_id, "merchant_id")\n self.public_key = check.str_param(public_key, "public_key")\n self.private_key = check.str_param(private_key, "private_key")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.environment = check.str_param(environment, "environment")\n super().__init__("Braintree", name)
\n\n\n
[docs]class SalesloftSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, client_id: str, client_secret: str, refresh_token: str, start_date: str\n ):\n """Airbyte Source for Salesloft.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/salesloft\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Salesloft developer application.\n client_secret (str): The Client Secret of your Salesloft developer application.\n refresh_token (str): The token for obtaining a new access token.\n start_date (str): The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Salesloft", name)
\n\n\n
[docs]class LinnworksSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, application_id: str, application_secret: str, token: str, start_date: str\n ):\n """Airbyte Source for Linnworks.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/linnworks\n\n Args:\n name (str): The name of the destination.\n application_id (str): Linnworks Application ID\n application_secret (str): Linnworks Application Secret\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.application_id = check.str_param(application_id, "application_id")\n self.application_secret = check.str_param(application_secret, "application_secret")\n self.token = check.str_param(token, "token")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Linnworks", name)
\n\n\n
[docs]class ChargebeeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, site: str, site_api_key: str, start_date: str, product_catalog: str\n ):\n """Airbyte Source for Chargebee.\n\n Documentation can be found at https://apidocs.chargebee.com/docs/api\n\n Args:\n name (str): The name of the destination.\n site (str): The site prefix for your Chargebee instance.\n site_api_key (str): Chargebee API Key. See the docs for more information on how to obtain this key.\n start_date (str): UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated.\n product_catalog (str): Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section.\n """\n self.site = check.str_param(site, "site")\n self.site_api_key = check.str_param(site_api_key, "site_api_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.product_catalog = check.str_param(product_catalog, "product_catalog")\n super().__init__("Chargebee", name)
\n\n\n
[docs]class GoogleAnalyticsDataApiSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaGoogleOauth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n access_token: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, credentials_json: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n property_id: str,\n credentials: Union[\n "GoogleAnalyticsDataApiSource.AuthenticateViaGoogleOauth",\n "GoogleAnalyticsDataApiSource.ServiceAccountKeyAuthentication",\n ],\n date_ranges_start_date: str,\n custom_reports: Optional[str] = None,\n window_in_days: Optional[int] = None,\n ):\n """Airbyte Source for Google Analytics Data Api.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-analytics-v4\n\n Args:\n name (str): The name of the destination.\n property_id (str): A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body\n credentials (Union[GoogleAnalyticsDataApiSource.AuthenticateViaGoogleOauth, GoogleAnalyticsDataApiSource.ServiceAccountKeyAuthentication]): Credentials for the service\n date_ranges_start_date (str): The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD\n custom_reports (Optional[str]): A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field.\n window_in_days (Optional[int]): The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364.\n """\n self.property_id = check.str_param(property_id, "property_id")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n GoogleAnalyticsDataApiSource.AuthenticateViaGoogleOauth,\n GoogleAnalyticsDataApiSource.ServiceAccountKeyAuthentication,\n ),\n )\n self.date_ranges_start_date = check.str_param(\n date_ranges_start_date, "date_ranges_start_date"\n )\n self.custom_reports = check.opt_str_param(custom_reports, "custom_reports")\n self.window_in_days = check.opt_int_param(window_in_days, "window_in_days")\n super().__init__("Google Analytics Data Api", name)
\n\n\n
[docs]class OutreachSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n redirect_uri: str,\n start_date: str,\n ):\n """Airbyte Source for Outreach.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/outreach\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Outreach developer application.\n client_secret (str): The Client Secret of your Outreach developer application.\n refresh_token (str): The token for obtaining the new access token.\n redirect_uri (str): A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token.\n start_date (str): The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.redirect_uri = check.str_param(redirect_uri, "redirect_uri")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Outreach", name)
\n\n\n
[docs]class LemlistSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Lemlist.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/lemlist\n\n Args:\n name (str): The name of the destination.\n api_key (str): Lemlist API key.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Lemlist", name)
\n\n\n
[docs]class ApifyDatasetSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, datasetId: str, clean: Optional[bool] = None):\n """Airbyte Source for Apify Dataset.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/apify-dataset\n\n Args:\n name (str): The name of the destination.\n datasetId (str): ID of the dataset you would like to load to Airbyte.\n clean (Optional[bool]): If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false.\n """\n self.datasetId = check.str_param(datasetId, "datasetId")\n self.clean = check.opt_bool_param(clean, "clean")\n super().__init__("Apify Dataset", name)
\n\n\n
[docs]class RecurlySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_key: str,\n begin_time: Optional[str] = None,\n end_time: Optional[str] = None,\n ):\n """Airbyte Source for Recurly.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/recurly\n\n Args:\n name (str): The name of the destination.\n api_key (str): Recurly API Key. See the docs for more information on how to generate this key.\n begin_time (Optional[str]): ISO8601 timestamp from which the replication from Recurly API will start from.\n end_time (Optional[str]): ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.begin_time = check.opt_str_param(begin_time, "begin_time")\n self.end_time = check.opt_str_param(end_time, "end_time")\n super().__init__("Recurly", name)
\n\n\n
[docs]class ZendeskTalkSource(GeneratedAirbyteSource):\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, email: str, api_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.email = check.str_param(email, "email")\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, access_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n subdomain: str,\n credentials: Union["ZendeskTalkSource.APIToken", "ZendeskTalkSource.OAuth20"],\n start_date: str,\n ):\n """Airbyte Source for Zendesk Talk.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-talk\n\n Args:\n name (str): The name of the destination.\n subdomain (str): This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain.\n credentials (Union[ZendeskTalkSource.APIToken, ZendeskTalkSource.OAuth20]): Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`.\n start_date (str): The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.credentials = check.inst_param(\n credentials, "credentials", (ZendeskTalkSource.APIToken, ZendeskTalkSource.OAuth20)\n )\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Zendesk Talk", name)
\n\n\n
[docs]class SftpSource(GeneratedAirbyteSource):\n
[docs] class PasswordAuthentication:\n
[docs] @public\n def __init__(self, auth_user_password: str):\n self.auth_method = "SSH_PASSWORD_AUTH"\n self.auth_user_password = check.str_param(auth_user_password, "auth_user_password")
\n\n
[docs] class SSHKeyAuthentication:\n
[docs] @public\n def __init__(self, auth_ssh_key: str):\n self.auth_method = "SSH_KEY_AUTH"\n self.auth_ssh_key = check.str_param(auth_ssh_key, "auth_ssh_key")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n user: str,\n host: str,\n port: int,\n credentials: Union["SftpSource.PasswordAuthentication", "SftpSource.SSHKeyAuthentication"],\n file_types: Optional[str] = None,\n folder_path: Optional[str] = None,\n file_pattern: Optional[str] = None,\n ):\n """Airbyte Source for Sftp.\n\n Documentation can be found at https://docs.airbyte.com/integrations/source/sftp\n\n Args:\n name (str): The name of the destination.\n user (str): The server user\n host (str): The server host address\n port (int): The server port\n credentials (Union[SftpSource.PasswordAuthentication, SftpSource.SSHKeyAuthentication]): The server authentication method\n file_types (Optional[str]): Coma separated file types. Currently only 'csv' and 'json' types are supported.\n folder_path (Optional[str]): The directory to search files for sync\n file_pattern (Optional[str]): The regular expression to specify files for sync in a chosen Folder Path\n """\n self.user = check.str_param(user, "user")\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (SftpSource.PasswordAuthentication, SftpSource.SSHKeyAuthentication),\n )\n self.file_types = check.opt_str_param(file_types, "file_types")\n self.folder_path = check.opt_str_param(folder_path, "folder_path")\n self.file_pattern = check.opt_str_param(file_pattern, "file_pattern")\n super().__init__("Sftp", name)
\n\n\n
[docs]class WhiskyHunterSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n ):\n """Airbyte Source for Whisky Hunter.\n\n Documentation can be found at https://docs.airbyte.io/integrations/sources/whisky-hunter\n\n Args:\n name (str): The name of the destination.\n\n """\n super().__init__("Whisky Hunter", name)
\n\n\n
[docs]class FreshdeskSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n api_key: str,\n requests_per_minute: Optional[int] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Freshdesk.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshdesk\n\n Args:\n name (str): The name of the destination.\n domain (str): Freshdesk domain\n api_key (str): Freshdesk API Key. See the docs for more information on how to obtain this key.\n requests_per_minute (Optional[int]): The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account.\n start_date (Optional[str]): UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated.\n """\n self.domain = check.str_param(domain, "domain")\n self.api_key = check.str_param(api_key, "api_key")\n self.requests_per_minute = check.opt_int_param(requests_per_minute, "requests_per_minute")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Freshdesk", name)
\n\n\n
[docs]class GocardlessSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n access_token: str,\n gocardless_environment: str,\n gocardless_version: str,\n start_date: str,\n ):\n """Airbyte Source for Gocardless.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/gocardless\n\n Args:\n name (str): The name of the destination.\n access_token (str): Gocardless API TOKEN\n gocardless_environment (str): Environment you are trying to connect to.\n gocardless_version (str): GoCardless version. This is a date. You can find the latest here: https://developer.gocardless.com/api-reference/#api-usage-making-requests\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.gocardless_environment = check.str_param(\n gocardless_environment, "gocardless_environment"\n )\n self.gocardless_version = check.str_param(gocardless_version, "gocardless_version")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Gocardless", name)
\n\n\n
[docs]class ZuoraSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n tenant_endpoint: str,\n data_query: str,\n client_id: str,\n client_secret: str,\n window_in_days: Optional[str] = None,\n ):\n """Airbyte Source for Zuora.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zuora\n\n Args:\n name (str): The name of the destination.\n start_date (str): Start Date in format: YYYY-MM-DD\n window_in_days (Optional[str]): The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year).\n tenant_endpoint (str): Please choose the right endpoint where your Tenant is located. More info by this Link\n data_query (str): Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link\n client_id (str): Your OAuth user Client ID\n client_secret (str): Your OAuth user Client Secret\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.window_in_days = check.opt_str_param(window_in_days, "window_in_days")\n self.tenant_endpoint = check.str_param(tenant_endpoint, "tenant_endpoint")\n self.data_query = check.str_param(data_query, "data_query")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n super().__init__("Zuora", name)
\n\n\n
[docs]class MarketoSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, domain_url: str, client_id: str, client_secret: str, start_date: str\n ):\n """Airbyte Source for Marketo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/marketo\n\n Args:\n name (str): The name of the destination.\n domain_url (str): Your Marketo Base URL. See the docs for info on how to obtain this.\n client_id (str): The Client ID of your Marketo developer application. See the docs for info on how to obtain this.\n client_secret (str): The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.domain_url = check.str_param(domain_url, "domain_url")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Marketo", name)
\n\n\n
[docs]class DriftSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: str,\n refresh_token: str,\n credentials: Optional[str] = None,\n ):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str, credentials: Optional[str] = None):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self, name: str, credentials: Union["DriftSource.OAuth20", "DriftSource.AccessToken"]\n ):\n """Airbyte Source for Drift.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/drift\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (DriftSource.OAuth20, DriftSource.AccessToken)\n )\n super().__init__("Drift", name)
\n\n\n
[docs]class PokeapiSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, pokemon_name: str):\n """Airbyte Source for Pokeapi.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/pokeapi\n\n Args:\n name (str): The name of the destination.\n pokemon_name (str): Pokemon requested from the API.\n """\n self.pokemon_name = check.str_param(pokemon_name, "pokemon_name")\n super().__init__("Pokeapi", name)
\n\n\n
[docs]class NetsuiteSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n realm: str,\n consumer_key: str,\n consumer_secret: str,\n token_key: str,\n token_secret: str,\n start_datetime: str,\n object_types: Optional[List[str]] = None,\n window_in_days: Optional[int] = None,\n ):\n """Airbyte Source for Netsuite.\n\n Args:\n name (str): The name of the destination.\n realm (str): Netsuite realm e.g. 2344535, as for `production` or 2344535_SB1, as for the `sandbox`\n consumer_key (str): Consumer key associated with your integration\n consumer_secret (str): Consumer secret associated with your integration\n token_key (str): Access token key\n token_secret (str): Access token secret\n object_types (Optional[List[str]]): The API names of the Netsuite objects you want to sync. Setting this speeds up the connection setup process by limiting the number of schemas that need to be retrieved from Netsuite.\n start_datetime (str): Starting point for your data replication, in format of "YYYY-MM-DDTHH:mm:ssZ"\n window_in_days (Optional[int]): The amount of days used to query the data with date chunks. Set smaller value, if you have lots of data.\n """\n self.realm = check.str_param(realm, "realm")\n self.consumer_key = check.str_param(consumer_key, "consumer_key")\n self.consumer_secret = check.str_param(consumer_secret, "consumer_secret")\n self.token_key = check.str_param(token_key, "token_key")\n self.token_secret = check.str_param(token_secret, "token_secret")\n self.object_types = check.opt_nullable_list_param(object_types, "object_types", str)\n self.start_datetime = check.str_param(start_datetime, "start_datetime")\n self.window_in_days = check.opt_int_param(window_in_days, "window_in_days")\n super().__init__("Netsuite", name)
\n\n\n
[docs]class HubplannerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Hubplanner.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/hubplanner\n\n Args:\n name (str): The name of the destination.\n api_key (str): Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Hubplanner", name)
\n\n\n
[docs]class Dv360Source(GeneratedAirbyteSource):\n
[docs] class Oauth2Credentials:\n
[docs] @public\n def __init__(\n self,\n access_token: str,\n refresh_token: str,\n token_uri: str,\n client_id: str,\n client_secret: str,\n ):\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.token_uri = check.str_param(token_uri, "token_uri")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: "Dv360Source.Oauth2Credentials",\n partner_id: int,\n start_date: str,\n end_date: Optional[str] = None,\n filters: Optional[List[str]] = None,\n ):\n """Airbyte Source for Dv 360.\n\n Args:\n name (str): The name of the destination.\n credentials (Dv360Source.Oauth2Credentials): Oauth2 credentials\n partner_id (int): Partner ID\n start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated\n end_date (Optional[str]): UTC date and time in the format 2017-01-25. Any data after this date will not be replicated.\n filters (Optional[List[str]]): filters for the dimensions. each filter object had 2 keys: 'type' for the name of the dimension to be used as. and 'value' for the value of the filter\n """\n self.credentials = check.inst_param(\n credentials, "credentials", Dv360Source.Oauth2Credentials\n )\n self.partner_id = check.int_param(partner_id, "partner_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.filters = check.opt_nullable_list_param(filters, "filters", str)\n super().__init__("Dv 360", name)
\n\n\n
[docs]class NotionSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, access_token: str):\n self.auth_type = "OAuth2.0"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, token: str):\n self.auth_type = "token"\n self.token = check.str_param(token, "token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union["NotionSource.OAuth20", "NotionSource.AccessToken"],\n ):\n """Airbyte Source for Notion.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/notion\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00.000Z. Any data before this date will not be replicated.\n credentials (Union[NotionSource.OAuth20, NotionSource.AccessToken]): Pick an authentication method.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials, "credentials", (NotionSource.OAuth20, NotionSource.AccessToken)\n )\n super().__init__("Notion", name)
\n\n\n
[docs]class ZendeskSunshineSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, access_token: str):\n self.auth_method = "oauth2.0"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, api_token: str, email: str):\n self.auth_method = "api_token"\n self.api_token = check.str_param(api_token, "api_token")\n self.email = check.str_param(email, "email")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n subdomain: str,\n start_date: str,\n credentials: Union["ZendeskSunshineSource.OAuth20", "ZendeskSunshineSource.APIToken"],\n ):\n """Airbyte Source for Zendesk Sunshine.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk_sunshine\n\n Args:\n name (str): The name of the destination.\n subdomain (str): The subdomain for your Zendesk Account.\n start_date (str): The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z.\n """\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (ZendeskSunshineSource.OAuth20, ZendeskSunshineSource.APIToken),\n )\n super().__init__("Zendesk Sunshine", name)
\n\n\n
[docs]class PinterestSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n refresh_token: str,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_method = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str):\n self.auth_method = "access_token"\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union["PinterestSource.OAuth20", "PinterestSource.AccessToken"],\n ):\n """Airbyte Source for Pinterest.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/pinterest\n\n Args:\n name (str): The name of the destination.\n start_date (str): A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to latest allowed date by api (914 days from today).\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials, "credentials", (PinterestSource.OAuth20, PinterestSource.AccessToken)\n )\n super().__init__("Pinterest", name)
\n\n\n
[docs]class MetabaseSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n instance_api_url: str,\n username: Optional[str] = None,\n password: Optional[str] = None,\n session_token: Optional[str] = None,\n ):\n r"""Airbyte Source for Metabase.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/metabase\n\n Args:\n name (str): The name of the destination.\n instance_api_url (str): URL to your metabase instance API\n session_token (Optional[str]): To generate your session token, you need to run the following command: ``` curl -X POST \\\\ -H "Content-Type: application/json" \\\\ -d '{"username": "person@metabase.com", "password": "fakepassword"}' \\\\ http://localhost:3000/api/session ``` Then copy the value of the `id` field returned by a successful call to that API. Note that by default, sessions are good for 14 days and needs to be regenerated.\n """\n self.instance_api_url = check.str_param(instance_api_url, "instance_api_url")\n self.username = check.opt_str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.session_token = check.opt_str_param(session_token, "session_token")\n super().__init__("Metabase", name)
\n\n\n
[docs]class HubspotSource(GeneratedAirbyteSource):\n
[docs] class OAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.credentials_title = "OAuth Credentials"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIKey:\n
[docs] @public\n def __init__(self, api_key: str):\n self.credentials_title = "API Key Credentials"\n self.api_key = check.str_param(api_key, "api_key")
\n\n
[docs] class PrivateAPP:\n
[docs] @public\n def __init__(self, access_token: str):\n self.credentials_title = "Private App Credentials"\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union[\n "HubspotSource.OAuth", "HubspotSource.APIKey", "HubspotSource.PrivateAPP"\n ],\n ):\n """Airbyte Source for Hubspot.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/hubspot\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n credentials (Union[HubspotSource.OAuth, HubspotSource.APIKey, HubspotSource.PrivateAPP]): Choose how to authenticate to HubSpot.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (HubspotSource.OAuth, HubspotSource.APIKey, HubspotSource.PrivateAPP),\n )\n super().__init__("Hubspot", name)
\n\n\n
[docs]class HarvestSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaHarvestOAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AuthenticateWithPersonalAccessToken:\n
[docs] @public\n def __init__(self, api_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n replication_start_date: str,\n credentials: Union[\n "HarvestSource.AuthenticateViaHarvestOAuth",\n "HarvestSource.AuthenticateWithPersonalAccessToken",\n ],\n ):\n """Airbyte Source for Harvest.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/harvest\n\n Args:\n name (str): The name of the destination.\n account_id (str): Harvest account ID. Required for all Harvest requests in pair with Personal Access Token\n replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n credentials (Union[HarvestSource.AuthenticateViaHarvestOAuth, HarvestSource.AuthenticateWithPersonalAccessToken]): Choose how to authenticate to Harvest.\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.replication_start_date = check.str_param(\n replication_start_date, "replication_start_date"\n )\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n HarvestSource.AuthenticateViaHarvestOAuth,\n HarvestSource.AuthenticateWithPersonalAccessToken,\n ),\n )\n super().__init__("Harvest", name)
\n\n\n
[docs]class GithubSource(GeneratedAirbyteSource):\n
[docs] class OAuthCredentials:\n
[docs] @public\n def __init__(self, access_token: str):\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class PATCredentials:\n
[docs] @public\n def __init__(self, personal_access_token: str):\n self.personal_access_token = check.str_param(\n personal_access_token, "personal_access_token"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["GithubSource.OAuthCredentials", "GithubSource.PATCredentials"],\n start_date: str,\n repository: str,\n branch: Optional[str] = None,\n page_size_for_large_streams: Optional[int] = None,\n ):\n """Airbyte Source for Github.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/github\n\n Args:\n name (str): The name of the destination.\n credentials (Union[GithubSource.OAuthCredentials, GithubSource.PATCredentials]): Choose how to authenticate to GitHub\n start_date (str): The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info\n repository (str): Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories.\n branch (Optional[str]): Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled.\n page_size_for_large_streams (Optional[int]): The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (GithubSource.OAuthCredentials, GithubSource.PATCredentials)\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.repository = check.str_param(repository, "repository")\n self.branch = check.opt_str_param(branch, "branch")\n self.page_size_for_large_streams = check.opt_int_param(\n page_size_for_large_streams, "page_size_for_large_streams"\n )\n super().__init__("Github", name)
\n\n\n
[docs]class E2eTestSource(GeneratedAirbyteSource):\n
[docs] class SingleSchema:\n
[docs] @public\n def __init__(\n self, stream_name: str, stream_schema: str, stream_duplication: Optional[int] = None\n ):\n self.type = "SINGLE_STREAM"\n self.stream_name = check.str_param(stream_name, "stream_name")\n self.stream_schema = check.str_param(stream_schema, "stream_schema")\n self.stream_duplication = check.opt_int_param(stream_duplication, "stream_duplication")
\n\n
[docs] class MultiSchema:\n
[docs] @public\n def __init__(self, stream_schemas: str):\n self.type = "MULTI_STREAM"\n self.stream_schemas = check.str_param(stream_schemas, "stream_schemas")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n max_messages: int,\n mock_catalog: Union["E2eTestSource.SingleSchema", "E2eTestSource.MultiSchema"],\n type: Optional[str] = None,\n seed: Optional[int] = None,\n message_interval_ms: Optional[int] = None,\n ):\n """Airbyte Source for E2e Test.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/e2e-test\n\n Args:\n name (str): The name of the destination.\n max_messages (int): Number of records to emit per stream. Min 1. Max 100 billion.\n seed (Optional[int]): When the seed is unspecified, the current time millis will be used as the seed. Range: [0, 1000000].\n message_interval_ms (Optional[int]): Interval between messages in ms. Min 0 ms. Max 60000 ms (1 minute).\n """\n self.type = check.opt_str_param(type, "type")\n self.max_messages = check.int_param(max_messages, "max_messages")\n self.seed = check.opt_int_param(seed, "seed")\n self.message_interval_ms = check.opt_int_param(message_interval_ms, "message_interval_ms")\n self.mock_catalog = check.inst_param(\n mock_catalog, "mock_catalog", (E2eTestSource.SingleSchema, E2eTestSource.MultiSchema)\n )\n super().__init__("E2e Test", name)
\n\n\n
[docs]class MysqlSource(GeneratedAirbyteSource):\n
[docs] class Preferred:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "preferred"
\n\n
[docs] class Required:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "required"
\n\n
[docs] class VerifyCA:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify_ca"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class VerifyIdentity:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify_identity"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "STANDARD"
\n\n
[docs] class LogicalReplicationCDC:\n
[docs] @public\n def __init__(\n self,\n initial_waiting_seconds: Optional[int] = None,\n server_time_zone: Optional[str] = None,\n ):\n self.method = "CDC"\n self.initial_waiting_seconds = check.opt_int_param(\n initial_waiting_seconds, "initial_waiting_seconds"\n )\n self.server_time_zone = check.opt_str_param(server_time_zone, "server_time_zone")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n ssl_mode: Union[\n "MysqlSource.Preferred",\n "MysqlSource.Required",\n "MysqlSource.VerifyCA",\n "MysqlSource.VerifyIdentity",\n ],\n replication_method: Union["MysqlSource.Standard", "MysqlSource.LogicalReplicationCDC"],\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Mysql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mysql\n\n Args:\n name (str): The name of the destination.\n host (str): The host name of the database.\n port (int): The port to connect to.\n database (str): The database name.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt data using SSL.\n ssl_mode (Union[MysqlSource.Preferred, MysqlSource.Required, MysqlSource.VerifyCA, MysqlSource.VerifyIdentity]): SSL connection modes. preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.required - Always connect with SSL. If the MySQL server doesn`t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.Verify Identity - Always connect with SSL. Verify both CA and Hostname.Read more in the docs.\n replication_method (Union[MysqlSource.Standard, MysqlSource.LogicalReplicationCDC]): Replication method to use for extracting data from the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.ssl_mode = check.inst_param(\n ssl_mode,\n "ssl_mode",\n (\n MysqlSource.Preferred,\n MysqlSource.Required,\n MysqlSource.VerifyCA,\n MysqlSource.VerifyIdentity,\n ),\n )\n self.replication_method = check.inst_param(\n replication_method,\n "replication_method",\n (MysqlSource.Standard, MysqlSource.LogicalReplicationCDC),\n )\n super().__init__("Mysql", name)
\n\n\n
[docs]class MyHoursSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n email: str,\n password: str,\n start_date: str,\n logs_batch_size: Optional[int] = None,\n ):\n """Airbyte Source for My Hours.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/my-hours\n\n Args:\n name (str): The name of the destination.\n email (str): Your My Hours username\n password (str): The password associated to the username\n start_date (str): Start date for collecting time logs\n logs_batch_size (Optional[int]): Pagination size used for retrieving logs in days\n """\n self.email = check.str_param(email, "email")\n self.password = check.str_param(password, "password")\n self.start_date = check.str_param(start_date, "start_date")\n self.logs_batch_size = check.opt_int_param(logs_batch_size, "logs_batch_size")\n super().__init__("My Hours", name)
\n\n\n
[docs]class KyribaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n username: str,\n password: str,\n start_date: str,\n end_date: Optional[str] = None,\n ):\n """Airbyte Source for Kyriba.\n\n Args:\n name (str): The name of the destination.\n domain (str): Kyriba domain\n username (str): Username to be used in basic auth\n password (str): Password to be used in basic auth\n start_date (str): The date the sync should start from.\n end_date (Optional[str]): The date the sync should end. If let empty the sync will run to the current date.\n """\n self.domain = check.str_param(domain, "domain")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n super().__init__("Kyriba", name)
\n\n\n
[docs]class GoogleSearchConsoleSource(GeneratedAirbyteSource):\n
[docs] class OAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n access_token: Optional[str] = None,\n ):\n self.auth_type = "Client"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, service_account_info: str, email: str):\n self.auth_type = "Service"\n self.service_account_info = check.str_param(\n service_account_info, "service_account_info"\n )\n self.email = check.str_param(email, "email")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n site_urls: List[str],\n start_date: str,\n authorization: Union[\n "GoogleSearchConsoleSource.OAuth",\n "GoogleSearchConsoleSource.ServiceAccountKeyAuthentication",\n ],\n end_date: Optional[str] = None,\n custom_reports: Optional[str] = None,\n ):\n """Airbyte Source for Google Search Console.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-search-console\n\n Args:\n name (str): The name of the destination.\n site_urls (List[str]): The URLs of the website property attached to your GSC account. Read more here.\n start_date (str): UTC date in the format 2017-01-25. Any data before this date will not be replicated.\n end_date (Optional[str]): UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field.\n custom_reports (Optional[str]): A JSON array describing the custom reports you want to sync from Google Search Console. See the docs for more information about the exact format you can use to fill out this field.\n """\n self.site_urls = check.list_param(site_urls, "site_urls", str)\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.authorization = check.inst_param(\n authorization,\n "authorization",\n (\n GoogleSearchConsoleSource.OAuth,\n GoogleSearchConsoleSource.ServiceAccountKeyAuthentication,\n ),\n )\n self.custom_reports = check.opt_str_param(custom_reports, "custom_reports")\n super().__init__("Google Search Console", name)
\n\n\n
[docs]class FacebookMarketingSource(GeneratedAirbyteSource):\n
[docs] class InsightConfig:\n
[docs] @public\n def __init__(\n self,\n name: str,\n fields: Optional[List[str]] = None,\n breakdowns: Optional[List[str]] = None,\n action_breakdowns: Optional[List[str]] = None,\n time_increment: Optional[int] = None,\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n insights_lookback_window: Optional[int] = None,\n ):\n self.name = check.str_param(name, "name")\n self.fields = check.opt_nullable_list_param(fields, "fields", str)\n self.breakdowns = check.opt_nullable_list_param(breakdowns, "breakdowns", str)\n self.action_breakdowns = check.opt_nullable_list_param(\n action_breakdowns, "action_breakdowns", str\n )\n self.time_increment = check.opt_int_param(time_increment, "time_increment")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.insights_lookback_window = check.opt_int_param(\n insights_lookback_window, "insights_lookback_window"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n start_date: str,\n access_token: str,\n end_date: Optional[str] = None,\n include_deleted: Optional[bool] = None,\n fetch_thumbnail_images: Optional[bool] = None,\n custom_insights: Optional[List[InsightConfig]] = None,\n page_size: Optional[int] = None,\n insights_lookback_window: Optional[int] = None,\n max_batch_size: Optional[int] = None,\n ):\n """Airbyte Source for Facebook Marketing.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/facebook-marketing\n\n Args:\n name (str): The name of the destination.\n account_id (str): The Facebook Ad account ID to use when pulling data from the Facebook Marketing API.\n start_date (str): The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n end_date (Optional[str]): The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data.\n access_token (str): The value of the access token generated. See the docs for more information\n include_deleted (Optional[bool]): Include data from deleted Campaigns, Ads, and AdSets\n fetch_thumbnail_images (Optional[bool]): In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url\n custom_insights (Optional[List[FacebookMarketingSource.InsightConfig]]): A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns)\n page_size (Optional[int]): Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases.\n insights_lookback_window (Optional[int]): The attribution window\n max_batch_size (Optional[int]): Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases.\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.access_token = check.str_param(access_token, "access_token")\n self.include_deleted = check.opt_bool_param(include_deleted, "include_deleted")\n self.fetch_thumbnail_images = check.opt_bool_param(\n fetch_thumbnail_images, "fetch_thumbnail_images"\n )\n self.custom_insights = check.opt_nullable_list_param(\n custom_insights, "custom_insights", FacebookMarketingSource.InsightConfig\n )\n self.page_size = check.opt_int_param(page_size, "page_size")\n self.insights_lookback_window = check.opt_int_param(\n insights_lookback_window, "insights_lookback_window"\n )\n self.max_batch_size = check.opt_int_param(max_batch_size, "max_batch_size")\n super().__init__("Facebook Marketing", name)
\n\n\n
[docs]class SurveymonkeySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, access_token: str, start_date: str, survey_ids: Optional[List[str]] = None\n ):\n """Airbyte Source for Surveymonkey.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/surveymonkey\n\n Args:\n name (str): The name of the destination.\n access_token (str): Access Token for making authenticated requests. See the docs for information on how to generate this key.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n survey_ids (Optional[List[str]]): IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.survey_ids = check.opt_nullable_list_param(survey_ids, "survey_ids", str)\n super().__init__("Surveymonkey", name)
\n\n\n
[docs]class PardotSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n pardot_business_unit_id: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n start_date: Optional[str] = None,\n is_sandbox: Optional[bool] = None,\n ):\n """Airbyte Source for Pardot.\n\n Args:\n name (str): The name of the destination.\n pardot_business_unit_id (str): Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup\n client_id (str): The Consumer Key that can be found when viewing your app in Salesforce\n client_secret (str): The Consumer Secret that can be found when viewing your app in Salesforce\n refresh_token (str): Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it.\n start_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter\n is_sandbox (Optional[bool]): Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false.\n """\n self.pardot_business_unit_id = check.str_param(\n pardot_business_unit_id, "pardot_business_unit_id"\n )\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.is_sandbox = check.opt_bool_param(is_sandbox, "is_sandbox")\n super().__init__("Pardot", name)
\n\n\n
[docs]class FlexportSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str):\n """Airbyte Source for Flexport.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/flexport\n\n Args:\n name (str): The name of the destination.\n\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Flexport", name)
\n\n\n
[docs]class ZenefitsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, token: str):\n """Airbyte Source for Zenefits.\n\n Args:\n name (str): The name of the destination.\n token (str): Use Sync with Zenefits button on the link given on the readme file, and get the token to access the api\n """\n self.token = check.str_param(token, "token")\n super().__init__("Zenefits", name)
\n\n\n
[docs]class KafkaSource(GeneratedAirbyteSource):\n
[docs] class JSON:\n
[docs] @public\n def __init__(self, deserialization_type: Optional[str] = None):\n self.deserialization_type = check.opt_str_param(\n deserialization_type, "deserialization_type"\n )
\n\n
[docs] class AVRO:\n
[docs] @public\n def __init__(\n self,\n deserialization_type: Optional[str] = None,\n deserialization_strategy: Optional[str] = None,\n schema_registry_url: Optional[str] = None,\n schema_registry_username: Optional[str] = None,\n schema_registry_password: Optional[str] = None,\n ):\n self.deserialization_type = check.opt_str_param(\n deserialization_type, "deserialization_type"\n )\n self.deserialization_strategy = check.opt_str_param(\n deserialization_strategy, "deserialization_strategy"\n )\n self.schema_registry_url = check.opt_str_param(\n schema_registry_url, "schema_registry_url"\n )\n self.schema_registry_username = check.opt_str_param(\n schema_registry_username, "schema_registry_username"\n )\n self.schema_registry_password = check.opt_str_param(\n schema_registry_password, "schema_registry_password"\n )
\n\n
[docs] class ManuallyAssignAListOfPartitions:\n
[docs] @public\n def __init__(self, topic_partitions: str):\n self.subscription_type = "assign"\n self.topic_partitions = check.str_param(topic_partitions, "topic_partitions")
\n\n
[docs] class SubscribeToAllTopicsMatchingSpecifiedPattern:\n
[docs] @public\n def __init__(self, topic_pattern: str):\n self.subscription_type = "subscribe"\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")
\n\n
[docs] class PLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")
\n\n
[docs] class SASLPLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] class SASLSSL:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n MessageFormat: Union["KafkaSource.JSON", "KafkaSource.AVRO"],\n bootstrap_servers: str,\n subscription: Union[\n "KafkaSource.ManuallyAssignAListOfPartitions",\n "KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern",\n ],\n protocol: Union[\n "KafkaSource.PLAINTEXT", "KafkaSource.SASLPLAINTEXT", "KafkaSource.SASLSSL"\n ],\n test_topic: Optional[str] = None,\n group_id: Optional[str] = None,\n max_poll_records: Optional[int] = None,\n polling_time: Optional[int] = None,\n client_id: Optional[str] = None,\n enable_auto_commit: Optional[bool] = None,\n auto_commit_interval_ms: Optional[int] = None,\n client_dns_lookup: Optional[str] = None,\n retry_backoff_ms: Optional[int] = None,\n request_timeout_ms: Optional[int] = None,\n receive_buffer_bytes: Optional[int] = None,\n auto_offset_reset: Optional[str] = None,\n repeated_calls: Optional[int] = None,\n max_records_process: Optional[int] = None,\n ):\n """Airbyte Source for Kafka.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/kafka\n\n Args:\n name (str): The name of the destination.\n MessageFormat (Union[KafkaSource.JSON, KafkaSource.AVRO]): The serialization used based on this\n bootstrap_servers (str): A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\n subscription (Union[KafkaSource.ManuallyAssignAListOfPartitions, KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern]): You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.\n test_topic (Optional[str]): The Topic to test in case the Airbyte can consume messages.\n group_id (Optional[str]): The Group ID is how you distinguish different consumer groups.\n max_poll_records (Optional[int]): The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.\n polling_time (Optional[int]): Amount of time Kafka connector should try to poll for messages.\n protocol (Union[KafkaSource.PLAINTEXT, KafkaSource.SASLPLAINTEXT, KafkaSource.SASLSSL]): The Protocol used to communicate with brokers.\n client_id (Optional[str]): An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\n enable_auto_commit (Optional[bool]): If true, the consumer's offset will be periodically committed in the background.\n auto_commit_interval_ms (Optional[int]): The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.\n client_dns_lookup (Optional[str]): Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.\n retry_backoff_ms (Optional[int]): The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.\n request_timeout_ms (Optional[int]): The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\n receive_buffer_bytes (Optional[int]): The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\n auto_offset_reset (Optional[str]): What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer.\n repeated_calls (Optional[int]): The number of repeated calls to poll() if no messages were received.\n max_records_process (Optional[int]): The Maximum to be processed per execution\n """\n self.MessageFormat = check.inst_param(\n MessageFormat, "MessageFormat", (KafkaSource.JSON, KafkaSource.AVRO)\n )\n self.bootstrap_servers = check.str_param(bootstrap_servers, "bootstrap_servers")\n self.subscription = check.inst_param(\n subscription,\n "subscription",\n (\n KafkaSource.ManuallyAssignAListOfPartitions,\n KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern,\n ),\n )\n self.test_topic = check.opt_str_param(test_topic, "test_topic")\n self.group_id = check.opt_str_param(group_id, "group_id")\n self.max_poll_records = check.opt_int_param(max_poll_records, "max_poll_records")\n self.polling_time = check.opt_int_param(polling_time, "polling_time")\n self.protocol = check.inst_param(\n protocol,\n "protocol",\n (KafkaSource.PLAINTEXT, KafkaSource.SASLPLAINTEXT, KafkaSource.SASLSSL),\n )\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.enable_auto_commit = check.opt_bool_param(enable_auto_commit, "enable_auto_commit")\n self.auto_commit_interval_ms = check.opt_int_param(\n auto_commit_interval_ms, "auto_commit_interval_ms"\n )\n self.client_dns_lookup = check.opt_str_param(client_dns_lookup, "client_dns_lookup")\n self.retry_backoff_ms = check.opt_int_param(retry_backoff_ms, "retry_backoff_ms")\n self.request_timeout_ms = check.opt_int_param(request_timeout_ms, "request_timeout_ms")\n self.receive_buffer_bytes = check.opt_int_param(\n receive_buffer_bytes, "receive_buffer_bytes"\n )\n self.auto_offset_reset = check.opt_str_param(auto_offset_reset, "auto_offset_reset")\n self.repeated_calls = check.opt_int_param(repeated_calls, "repeated_calls")\n self.max_records_process = check.opt_int_param(max_records_process, "max_records_process")\n super().__init__("Kafka", name)
\n
", "current_page_name": "_modules/dagster_airbyte/managed/generated/sources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.generated.sources"}}, "reconciliation": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.reconciliation

\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster import AssetKey\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\nfrom dagster._core.definitions.events import CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.execution.context.init import build_init_resource_context\nfrom dagster._utils.merger import deep_merge_dicts\nfrom dagster_managed_elements import (\n    ManagedElementCheckResult,\n    ManagedElementDiff,\n    ManagedElementError,\n)\nfrom dagster_managed_elements.types import (\n    SECRET_MASK_VALUE,\n    ManagedElementReconciler,\n    is_key_secret,\n)\nfrom dagster_managed_elements.utils import UNSET, diff_dicts\n\nfrom dagster_airbyte.asset_defs import (\n    AirbyteConnectionMetadata,\n    AirbyteInstanceCacheableAssetsDefinition,\n    _clean_name,\n)\nfrom dagster_airbyte.managed.types import (\n    AirbyteConnection,\n    AirbyteDestination,\n    AirbyteDestinationNamespace,\n    AirbyteSource,\n    AirbyteSyncMode,\n    InitializedAirbyteConnection,\n    InitializedAirbyteDestination,\n    InitializedAirbyteSource,\n)\nfrom dagster_airbyte.resources import AirbyteResource\nfrom dagster_airbyte.utils import is_basic_normalization_operation\n\n\ndef gen_configured_stream_json(\n    source_stream: Mapping[str, Any], user_stream_config: Mapping[str, AirbyteSyncMode]\n) -> Mapping[str, Any]:\n    """Generates an Airbyte API stream defintiion based on the succinct user-provided config and the\n    full stream definition from the source.\n    """\n    config = user_stream_config[source_stream["stream"]["name"]]\n    return deep_merge_dicts(\n        source_stream,\n        {"config": config.to_json()},\n    )\n\n\ndef _ignore_secrets_compare_fn(k: str, _cv: Any, dv: Any) -> Optional[bool]:\n    if is_key_secret(k):\n        return dv == SECRET_MASK_VALUE\n    return None\n\n\ndef _diff_configs(\n    config_dict: Mapping[str, Any], dst_dict: Mapping[str, Any], ignore_secrets: bool = True\n) -> ManagedElementDiff:\n    return diff_dicts(\n        config_dict=config_dict,\n        dst_dict=dst_dict,\n        custom_compare_fn=_ignore_secrets_compare_fn if ignore_secrets else None,\n    )\n\n\ndef diff_sources(\n    config_src: Optional[AirbyteSource],\n    curr_src: Optional[AirbyteSource],\n    ignore_secrets: bool = True,\n) -> ManagedElementCheckResult:\n    """Utility to diff two AirbyteSource objects."""\n    diff = _diff_configs(\n        config_src.source_configuration if config_src else {},\n        curr_src.source_configuration if curr_src else {},\n        ignore_secrets,\n    )\n    if not diff.is_empty():\n        name = config_src.name if config_src else curr_src.name if curr_src else "Unknown"\n        return ManagedElementDiff().with_nested(name, diff)\n\n    return ManagedElementDiff()\n\n\ndef diff_destinations(\n    config_dst: Optional[AirbyteDestination],\n    curr_dst: Optional[AirbyteDestination],\n    ignore_secrets: bool = True,\n) -> ManagedElementCheckResult:\n    """Utility to diff two AirbyteDestination objects."""\n    diff = _diff_configs(\n        config_dst.destination_configuration if config_dst else {},\n        curr_dst.destination_configuration if curr_dst else {},\n        ignore_secrets,\n    )\n    if not diff.is_empty():\n        name = config_dst.name if config_dst else curr_dst.name if curr_dst else "Unknown"\n        return ManagedElementDiff().with_nested(name, diff)\n\n    return ManagedElementDiff()\n\n\ndef conn_dict(conn: Optional[AirbyteConnection]) -> Mapping[str, Any]:\n    if not conn:\n        return {}\n    return {\n        "source": conn.source.name if conn.source else "Unknown",\n        "destination": conn.destination.name if conn.destination else "Unknown",\n        "normalize data": conn.normalize_data,\n        "streams": {k: v.to_json() for k, v in conn.stream_config.items()},\n        "destination namespace": (\n            conn.destination_namespace.name\n            if isinstance(conn.destination_namespace, AirbyteDestinationNamespace)\n            else conn.destination_namespace\n        ),\n        "prefix": conn.prefix,\n    }\n\n\nOPTIONAL_STREAM_SETTINGS = ("cursorField", "primaryKey")\n\n\ndef _compare_stream_values(k: str, cv: str, _dv: str):\n    """Don't register a diff for optional stream settings if the value is not set\n    in the user-provided config, this means it will default to the value in the\n    source.\n    """\n    return True if k in OPTIONAL_STREAM_SETTINGS and cv == UNSET else None\n\n\ndef diff_connections(\n    config_conn: Optional[AirbyteConnection], curr_conn: Optional[AirbyteConnection]\n) -> ManagedElementCheckResult:\n    """Utility to diff two AirbyteConnection objects."""\n    diff = diff_dicts(\n        conn_dict(config_conn),\n        conn_dict(curr_conn),\n        custom_compare_fn=_compare_stream_values,\n    )\n    if not diff.is_empty():\n        name = config_conn.name if config_conn else curr_conn.name if curr_conn else "Unknown"\n        return ManagedElementDiff().with_nested(name, diff)\n\n    return ManagedElementDiff()\n\n\ndef reconcile_sources(\n    res: AirbyteResource,\n    config_sources: Mapping[str, AirbyteSource],\n    existing_sources: Mapping[str, InitializedAirbyteSource],\n    workspace_id: str,\n    dry_run: bool,\n    should_delete: bool,\n    ignore_secrets: bool,\n) -> Tuple[Mapping[str, InitializedAirbyteSource], ManagedElementCheckResult]:\n    """Generates a diff of the configured and existing sources and reconciles them to match the\n    configured state if dry_run is False.\n    """\n    diff = ManagedElementDiff()\n\n    initialized_sources: Dict[str, InitializedAirbyteSource] = {}\n    for source_name in set(config_sources.keys()).union(existing_sources.keys()):\n        configured_source = config_sources.get(source_name)\n        existing_source = existing_sources.get(source_name)\n\n        # Ignore sources not mentioned in the user config unless the user specifies to delete\n        if not should_delete and existing_source and not configured_source:\n            initialized_sources[source_name] = existing_source\n            continue\n\n        diff = diff.join(\n            diff_sources(  # type: ignore\n                configured_source,\n                existing_source.source if existing_source else None,\n                ignore_secrets,\n            )\n        )\n\n        if existing_source and (\n            not configured_source or (configured_source.must_be_recreated(existing_source.source))\n        ):\n            initialized_sources[source_name] = existing_source\n            if not dry_run:\n                res.make_request(\n                    endpoint="/sources/delete",\n                    data={"sourceId": existing_source.source_id},\n                )\n            existing_source = None\n\n        if configured_source:\n            defn_id = check.not_none(\n                res.get_source_definition_by_name(configured_source.source_type)\n            )\n            base_source_defn_dict = {\n                "name": configured_source.name,\n                "connectionConfiguration": configured_source.source_configuration,\n            }\n            source_id = ""\n            if existing_source:\n                source_id = existing_source.source_id\n                if not dry_run:\n                    res.make_request(\n                        endpoint="/sources/update",\n                        data={"sourceId": source_id, **base_source_defn_dict},\n                    )\n            else:\n                if not dry_run:\n                    create_result = cast(\n                        Dict[str, str],\n                        check.not_none(\n                            res.make_request(\n                                endpoint="/sources/create",\n                                data={\n                                    "sourceDefinitionId": defn_id,\n                                    "workspaceId": workspace_id,\n                                    **base_source_defn_dict,\n                                },\n                            )\n                        ),\n                    )\n                    source_id = create_result["sourceId"]\n\n            if source_name in initialized_sources:\n                # Preserve to be able to initialize old connection object\n                initialized_sources[f"{source_name}_old"] = initialized_sources[source_name]\n            initialized_sources[source_name] = InitializedAirbyteSource(\n                source=configured_source,\n                source_id=source_id,\n                source_definition_id=defn_id,\n            )\n    return initialized_sources, diff\n\n\ndef reconcile_destinations(\n    res: AirbyteResource,\n    config_destinations: Mapping[str, AirbyteDestination],\n    existing_destinations: Mapping[str, InitializedAirbyteDestination],\n    workspace_id: str,\n    dry_run: bool,\n    should_delete: bool,\n    ignore_secrets: bool,\n) -> Tuple[Mapping[str, InitializedAirbyteDestination], ManagedElementCheckResult]:\n    """Generates a diff of the configured and existing destinations and reconciles them to match the\n    configured state if dry_run is False.\n    """\n    diff = ManagedElementDiff()\n\n    initialized_destinations: Dict[str, InitializedAirbyteDestination] = {}\n    for destination_name in set(config_destinations.keys()).union(existing_destinations.keys()):\n        configured_destination = config_destinations.get(destination_name)\n        existing_destination = existing_destinations.get(destination_name)\n\n        # Ignore destinations not mentioned in the user config unless the user specifies to delete\n        if not should_delete and existing_destination and not configured_destination:\n            initialized_destinations[destination_name] = existing_destination\n            continue\n\n        diff = diff.join(\n            diff_destinations(  # type: ignore\n                configured_destination,\n                existing_destination.destination if existing_destination else None,\n                ignore_secrets,\n            )\n        )\n\n        if existing_destination and (\n            not configured_destination\n            or (configured_destination.must_be_recreated(existing_destination.destination))\n        ):\n            initialized_destinations[destination_name] = existing_destination\n            if not dry_run:\n                res.make_request(\n                    endpoint="/destinations/delete",\n                    data={"destinationId": existing_destination.destination_id},\n                )\n            existing_destination = None\n\n        if configured_destination:\n            defn_id = res.get_destination_definition_by_name(\n                configured_destination.destination_type\n            )\n            base_destination_defn_dict = {\n                "name": configured_destination.name,\n                "connectionConfiguration": configured_destination.destination_configuration,\n            }\n            destination_id = ""\n            if existing_destination:\n                destination_id = existing_destination.destination_id\n                if not dry_run:\n                    res.make_request(\n                        endpoint="/destinations/update",\n                        data={"destinationId": destination_id, **base_destination_defn_dict},\n                    )\n            else:\n                if not dry_run:\n                    create_result = cast(\n                        Dict[str, str],\n                        check.not_none(\n                            res.make_request(\n                                endpoint="/destinations/create",\n                                data={\n                                    "destinationDefinitionId": defn_id,\n                                    "workspaceId": workspace_id,\n                                    **base_destination_defn_dict,\n                                },\n                            )\n                        ),\n                    )\n                    destination_id = create_result["destinationId"]\n\n            if destination_name in initialized_destinations:\n                # Preserve to be able to initialize old connection object\n                initialized_destinations[f"{destination_name}_old"] = initialized_destinations[\n                    destination_name\n                ]\n            initialized_destinations[destination_name] = InitializedAirbyteDestination(\n                destination=configured_destination,\n                destination_id=destination_id,\n                destination_definition_id=defn_id,\n            )\n    return initialized_destinations, diff\n\n\ndef reconcile_config(\n    res: AirbyteResource,\n    objects: Sequence[AirbyteConnection],\n    dry_run: bool = False,\n    should_delete: bool = False,\n    ignore_secrets: bool = True,\n) -> ManagedElementCheckResult:\n    """Main entry point for the reconciliation process. Takes a list of AirbyteConnection objects\n    and a pointer to an Airbyte instance and returns a diff, along with applying the diff\n    if dry_run is False.\n    """\n    with res.cache_requests():\n        config_connections = {conn.name: conn for conn in objects}\n        config_sources = {conn.source.name: conn.source for conn in objects}\n        config_dests = {conn.destination.name: conn.destination for conn in objects}\n\n        workspace_id = res.get_default_workspace()\n\n        existing_sources_raw = cast(\n            Dict[str, List[Dict[str, Any]]],\n            check.not_none(\n                res.make_request(endpoint="/sources/list", data={"workspaceId": workspace_id})\n            ),\n        )\n        existing_dests_raw = cast(\n            Dict[str, List[Dict[str, Any]]],\n            check.not_none(\n                res.make_request(endpoint="/destinations/list", data={"workspaceId": workspace_id})\n            ),\n        )\n\n        existing_sources: Dict[str, InitializedAirbyteSource] = {\n            source_json["name"]: InitializedAirbyteSource.from_api_json(source_json)\n            for source_json in existing_sources_raw.get("sources", [])\n        }\n        existing_dests: Dict[str, InitializedAirbyteDestination] = {\n            destination_json["name"]: InitializedAirbyteDestination.from_api_json(destination_json)\n            for destination_json in existing_dests_raw.get("destinations", [])\n        }\n\n        # First, remove any connections that need to be deleted, so that we can\n        # safely delete any sources/destinations that are no longer referenced\n        # or that need to be recreated.\n        connections_diff = reconcile_connections_pre(\n            res,\n            config_connections,\n            existing_sources,\n            existing_dests,\n            workspace_id,\n            dry_run,\n            should_delete,\n        )\n\n        all_sources, sources_diff = reconcile_sources(\n            res,\n            config_sources,\n            existing_sources,\n            workspace_id,\n            dry_run,\n            should_delete,\n            ignore_secrets,\n        )\n        all_dests, dests_diff = reconcile_destinations(\n            res, config_dests, existing_dests, workspace_id, dry_run, should_delete, ignore_secrets\n        )\n\n        # Now that we have updated the set of sources and destinations, we can\n        # recreate or update any connections which depend on them.\n        reconcile_connections_post(\n            res,\n            config_connections,\n            all_sources,\n            all_dests,\n            workspace_id,\n            dry_run,\n        )\n\n        return ManagedElementDiff().join(sources_diff).join(dests_diff).join(connections_diff)  # type: ignore\n\n\ndef reconcile_normalization(\n    res: AirbyteResource,\n    existing_connection_id: Optional[str],\n    destination: InitializedAirbyteDestination,\n    normalization_config: Optional[bool],\n    workspace_id: str,\n) -> Optional[str]:\n    """Reconciles the normalization configuration for a connection.\n\n    If normalization_config is None, then defaults to True on destinations that support normalization\n    and False on destinations that do not.\n    """\n    existing_basic_norm_op_id = None\n    if existing_connection_id:\n        operations = cast(\n            Dict[str, List[Dict[str, str]]],\n            check.not_none(\n                res.make_request(\n                    endpoint="/operations/list",\n                    data={"connectionId": existing_connection_id},\n                )\n            ),\n        )\n        existing_basic_norm_op = next(\n            (\n                operation\n                for operation in operations["operations"]\n                if is_basic_normalization_operation(operation)\n            ),\n            None,\n        )\n        existing_basic_norm_op_id = (\n            existing_basic_norm_op["operationId"] if existing_basic_norm_op else None\n        )\n\n    if normalization_config is not False:\n        if destination.destination_definition_id and res.does_dest_support_normalization(\n            destination.destination_definition_id, workspace_id\n        ):\n            if existing_basic_norm_op_id:\n                return existing_basic_norm_op_id\n            else:\n                return cast(\n                    Dict[str, str],\n                    check.not_none(\n                        res.make_request(\n                            endpoint="/operations/create",\n                            data={\n                                "workspaceId": workspace_id,\n                                "name": "Normalization",\n                                "operatorConfiguration": {\n                                    "operatorType": "normalization",\n                                    "normalization": {"option": "basic"},\n                                },\n                            },\n                        )\n                    ),\n                )["operationId"]\n        elif normalization_config is True:\n            raise Exception(\n                f"Destination {destination.destination.name} does not support normalization."\n            )\n\n    return None\n\n\ndef reconcile_connections_pre(\n    res: AirbyteResource,\n    config_connections: Mapping[str, AirbyteConnection],\n    existing_sources: Mapping[str, InitializedAirbyteSource],\n    existing_destinations: Mapping[str, InitializedAirbyteDestination],\n    workspace_id: str,\n    dry_run: bool,\n    should_delete: bool,\n) -> ManagedElementCheckResult:\n    """Generates the diff for connections, and deletes any connections that are not in the config if\n    dry_run is False.\n\n    It's necessary to do this in two steps because we need to remove connections that depend on\n    sources and destinations that are being deleted or recreated before Airbyte will allow us to\n    delete or recreate them.\n    """\n    diff = ManagedElementDiff()\n\n    existing_connections_raw = cast(\n        Dict[str, List[Dict[str, Any]]],\n        check.not_none(\n            res.make_request(endpoint="/connections/list", data={"workspaceId": workspace_id})\n        ),\n    )\n    existing_connections: Dict[str, InitializedAirbyteConnection] = {\n        connection_json["name"]: InitializedAirbyteConnection.from_api_json(\n            connection_json, existing_sources, existing_destinations\n        )\n        for connection_json in existing_connections_raw.get("connections", [])\n    }\n\n    for conn_name in set(config_connections.keys()).union(existing_connections.keys()):\n        config_conn = config_connections.get(conn_name)\n        existing_conn = existing_connections.get(conn_name)\n\n        # Ignore connections not mentioned in the user config unless the user specifies to delete\n        if not should_delete and not config_conn:\n            continue\n\n        diff = diff.join(\n            diff_connections(config_conn, existing_conn.connection if existing_conn else None)  # type: ignore\n        )\n\n        if existing_conn and (\n            not config_conn or config_conn.must_be_recreated(existing_conn.connection)\n        ):\n            if not dry_run:\n                res.make_request(\n                    endpoint="/connections/delete",\n                    data={"connectionId": existing_conn.connection_id},\n                )\n    return diff\n\n\ndef reconcile_connections_post(\n    res: AirbyteResource,\n    config_connections: Mapping[str, AirbyteConnection],\n    init_sources: Mapping[str, InitializedAirbyteSource],\n    init_dests: Mapping[str, InitializedAirbyteDestination],\n    workspace_id: str,\n    dry_run: bool,\n) -> None:\n    """Creates new and modifies existing connections based on the config if dry_run is False."""\n    existing_connections_raw = cast(\n        Dict[str, List[Dict[str, Any]]],\n        check.not_none(\n            res.make_request(endpoint="/connections/list", data={"workspaceId": workspace_id})\n        ),\n    )\n    existing_connections = {\n        connection_json["name"]: InitializedAirbyteConnection.from_api_json(\n            connection_json, init_sources, init_dests\n        )\n        for connection_json in existing_connections_raw.get("connections", [])\n    }\n\n    for conn_name, config_conn in config_connections.items():\n        existing_conn = existing_connections.get(conn_name)\n\n        normalization_operation_id = None\n        if not dry_run:\n            destination = init_dests[config_conn.destination.name]\n\n            # Enable or disable basic normalization based on config\n            normalization_operation_id = reconcile_normalization(\n                res,\n                existing_connections.get("name", {}).get("connectionId"),\n                destination,\n                config_conn.normalize_data,\n                workspace_id,\n            )\n\n        configured_streams = []\n        if not dry_run:\n            source = init_sources[config_conn.source.name]\n            schema = res.get_source_schema(source.source_id)\n            base_streams = schema["catalog"]["streams"]\n\n            configured_streams = [\n                gen_configured_stream_json(stream, config_conn.stream_config)\n                for stream in base_streams\n                if stream["stream"]["name"] in config_conn.stream_config\n            ]\n\n        connection_base_json = {\n            "name": conn_name,\n            "namespaceDefinition": "source",\n            "namespaceFormat": "${SOURCE_NAMESPACE}",\n            "prefix": "",\n            "operationIds": [normalization_operation_id] if normalization_operation_id else [],\n            "syncCatalog": {"streams": configured_streams},\n            "scheduleType": "manual",\n            "status": "active",\n        }\n\n        if isinstance(config_conn.destination_namespace, AirbyteDestinationNamespace):\n            connection_base_json["namespaceDefinition"] = config_conn.destination_namespace.value\n        else:\n            connection_base_json["namespaceDefinition"] = "customformat"\n            connection_base_json["namespaceFormat"] = cast(str, config_conn.destination_namespace)\n\n        if config_conn.prefix:\n            connection_base_json["prefix"] = config_conn.prefix\n\n        if existing_conn:\n            if not dry_run:\n                source = init_sources[config_conn.source.name]\n                res.make_request(\n                    endpoint="/connections/update",\n                    data={\n                        **connection_base_json,\n                        "sourceCatalogId": res.get_source_catalog_id(source.source_id),\n                        "connectionId": existing_conn.connection_id,\n                    },\n                )\n        else:\n            if not dry_run:\n                source = init_sources[config_conn.source.name]\n                destination = init_dests[config_conn.destination.name]\n\n                res.make_request(\n                    endpoint="/connections/create",\n                    data={\n                        **connection_base_json,\n                        "sourceCatalogId": res.get_source_catalog_id(source.source_id),\n                        "sourceId": source.source_id,\n                        "destinationId": destination.destination_id,\n                    },\n                )\n\n\n
[docs]@experimental\nclass AirbyteManagedElementReconciler(ManagedElementReconciler):\n """Reconciles Python-specified Airbyte connections with an Airbyte instance.\n\n Passing the module containing an AirbyteManagedElementReconciler to the dagster-airbyte\n CLI will allow you to check the state of your Python-code-specified Airbyte connections\n against an Airbyte instance, and reconcile them if necessary.\n\n This functionality is experimental and subject to change.\n """\n\n
[docs] @public\n def __init__(\n self,\n airbyte: Union[AirbyteResource, ResourceDefinition],\n connections: Iterable[AirbyteConnection],\n delete_unmentioned_resources: bool = False,\n ):\n """Reconciles Python-specified Airbyte connections with an Airbyte instance.\n\n Args:\n airbyte (Union[AirbyteResource, ResourceDefinition]): The Airbyte resource definition to reconcile against.\n connections (Iterable[AirbyteConnection]): The Airbyte connection objects to reconcile.\n delete_unmentioned_resources (bool): Whether to delete resources that are not mentioned in\n the set of connections provided. When True, all Airbyte instance contents are effectively\n managed by the reconciler. Defaults to False.\n """\n # airbyte = check.inst_param(airbyte, "airbyte", ResourceDefinition)\n\n self._airbyte_instance: AirbyteResource = (\n airbyte\n if isinstance(airbyte, AirbyteResource)\n else airbyte(build_init_resource_context())\n )\n self._connections = list(\n check.iterable_param(connections, "connections", of_type=AirbyteConnection)\n )\n self._delete_unmentioned_resources = check.bool_param(\n delete_unmentioned_resources, "delete_unmentioned_resources"\n )\n\n super().__init__()
\n\n def check(self, **kwargs) -> ManagedElementCheckResult:\n return reconcile_config(\n self._airbyte_instance,\n self._connections,\n dry_run=True,\n should_delete=self._delete_unmentioned_resources,\n ignore_secrets=(not kwargs.get("include_all_secrets", False)),\n )\n\n def apply(self, **kwargs) -> ManagedElementCheckResult:\n return reconcile_config(\n self._airbyte_instance,\n self._connections,\n dry_run=False,\n should_delete=self._delete_unmentioned_resources,\n ignore_secrets=(not kwargs.get("include_all_secrets", False)),\n )
\n\n\nclass AirbyteManagedElementCacheableAssetsDefinition(AirbyteInstanceCacheableAssetsDefinition):\n def __init__(\n self,\n airbyte_resource_def: AirbyteResource,\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connections: Iterable[AirbyteConnection],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n ):\n defined_conn_names = {conn.name for conn in connections}\n super().__init__(\n airbyte_resource_def=airbyte_resource_def,\n workspace_id=None,\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=lambda conn: conn.name in defined_conn_names,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n )\n self._connections: List[AirbyteConnection] = list(connections)\n\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n diff = reconcile_config(self._airbyte_instance, self._connections, dry_run=True)\n if isinstance(diff, ManagedElementDiff) and not diff.is_empty():\n raise ValueError(\n "Airbyte connections are not in sync with provided configuration, diff:\\n{}".format(\n str(diff)\n )\n )\n elif isinstance(diff, ManagedElementError):\n raise ValueError(f"Error checking Airbyte connections: {diff}")\n\n return super()._get_connections()\n\n\n
[docs]@experimental\ndef load_assets_from_connections(\n airbyte: Union[AirbyteResource, ResourceDefinition],\n connections: Iterable[AirbyteConnection],\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n create_assets_for_normalization_tables: bool = True,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connection_to_asset_key_fn: Optional[\n Callable[[AirbyteConnectionMetadata, str], AssetKey]\n ] = None,\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ] = None,\n) -> CacheableAssetsDefinition:\n """Loads Airbyte connection assets from a configured AirbyteResource instance, checking against a list of AirbyteConnection objects.\n This method will raise an error on repo load if the passed AirbyteConnection objects are not in sync with the Airbyte instance.\n\n Args:\n airbyte (Union[AirbyteResource, ResourceDefinition]): An AirbyteResource configured with the appropriate connection\n details.\n connections (Iterable[AirbyteConnection]): A list of AirbyteConnection objects to build assets for.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n create_assets_for_normalization_tables (bool): If True, assets will be created for tables\n created by Airbyte's normalization feature. If False, only the destination tables\n will be created. Defaults to True.\n connection_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Airbyte connection name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The IO manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connection_to_io_manager_key_fn.\n connection_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n IO manager key for a given Airbyte connection name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connection_to_asset_key_fn (Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]]): Optional function which\n takes in connection metadata and table name and returns an asset key for the table. If None, the default asset\n key is based on the table name. Any asset key prefix will be applied to the output of this function.\n connection_to_freshness_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]]): Optional function which\n takes in connection metadata and returns a freshness policy for the connection. If None, no freshness policy will be applied.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster_airbyte import (\n AirbyteConnection,\n AirbyteResource,\n load_assets_from_connections,\n )\n\n airbyte_instance = AirbyteResource(\n host: "localhost",\n port: "8000",\n )\n airbyte_connections = [\n AirbyteConnection(...),\n AirbyteConnection(...)\n ]\n airbyte_assets = load_assets_from_connections(airbyte_instance, airbyte_connections)\n """\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connection_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connection_to_io_manager_key_fn",\n )\n if not connection_to_io_manager_key_fn:\n connection_to_io_manager_key_fn = lambda _: io_manager_key\n\n return AirbyteManagedElementCacheableAssetsDefinition(\n airbyte_resource_def=(\n airbyte\n if isinstance(airbyte, AirbyteResource)\n else airbyte(build_init_resource_context())\n ),\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=check.bool_param(\n create_assets_for_normalization_tables, "create_assets_for_normalization_tables"\n ),\n connection_to_group_fn=check.opt_callable_param(\n connection_to_group_fn, "connection_to_group_fn"\n ),\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connections=check.iterable_param(connections, "connections", of_type=AirbyteConnection),\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n )
\n
", "current_page_name": "_modules/dagster_airbyte/managed/reconciliation", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.reconciliation"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.types

\nimport json\nfrom abc import ABC\nfrom enum import Enum\nfrom typing import Any, Dict, List, Mapping, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\n\n
[docs]class AirbyteSyncMode(ABC):\n """Represents the sync mode for a given Airbyte stream, which governs how Airbyte reads\n from a source and writes to a destination.\n\n For more information, see https://docs.airbyte.com/understanding-airbyte/connections/.\n """\n\n def __eq__(self, other: Any) -> bool:\n return isinstance(other, AirbyteSyncMode) and self.to_json() == other.to_json()\n\n def __init__(self, json_repr: Dict[str, Any]):\n self.json_repr = json_repr\n\n def to_json(self) -> Dict[str, Any]:\n return self.json_repr\n\n @classmethod\n def from_json(cls, json_repr: Dict[str, Any]) -> "AirbyteSyncMode":\n return cls(\n {\n k: v\n for k, v in json_repr.items()\n if k in ("syncMode", "destinationSyncMode", "cursorField", "primaryKey")\n }\n )\n\n
[docs] @public\n @classmethod\n def full_refresh_append(cls) -> "AirbyteSyncMode":\n """Syncs the entire data stream from the source, appending rows to the destination.\n\n https://docs.airbyte.com/understanding-airbyte/connections/full-refresh-append/\n """\n return cls({"syncMode": "full_refresh", "destinationSyncMode": "append"})
\n\n
[docs] @public\n @classmethod\n def full_refresh_overwrite(cls) -> "AirbyteSyncMode":\n """Syncs the entire data stream from the source, replaces data in the destination by\n overwriting it.\n\n https://docs.airbyte.com/understanding-airbyte/connections/full-refresh-overwrite\n """\n return cls({"syncMode": "full_refresh", "destinationSyncMode": "overwrite"})
\n\n
[docs] @public\n @classmethod\n def incremental_append(\n cls,\n cursor_field: Optional[str] = None,\n ) -> "AirbyteSyncMode":\n """Syncs only new records from the source, appending rows to the destination.\n May optionally specify the cursor field used to determine which records\n are new.\n\n https://docs.airbyte.com/understanding-airbyte/connections/incremental-append/\n """\n cursor_field = check.opt_str_param(cursor_field, "cursor_field")\n\n return cls(\n {\n "syncMode": "incremental",\n "destinationSyncMode": "append",\n **({"cursorField": [cursor_field]} if cursor_field else {}),\n }\n )
\n\n
[docs] @public\n @classmethod\n def incremental_append_dedup(\n cls,\n cursor_field: Optional[str] = None,\n primary_key: Optional[Union[str, List[str]]] = None,\n ) -> "AirbyteSyncMode":\n """Syncs new records from the source, appending to an append-only history\n table in the destination. Also generates a deduplicated view mirroring the\n source table. May optionally specify the cursor field used to determine\n which records are new, and the primary key used to determine which records\n are duplicates.\n\n https://docs.airbyte.com/understanding-airbyte/connections/incremental-append-dedup/\n """\n cursor_field = check.opt_str_param(cursor_field, "cursor_field")\n if isinstance(primary_key, str):\n primary_key = [primary_key]\n primary_key = check.opt_list_param(primary_key, "primary_key", of_type=str)\n\n return cls(\n {\n "syncMode": "incremental",\n "destinationSyncMode": "append_dedup",\n **({"cursorField": [cursor_field]} if cursor_field else {}),\n **({"primaryKey": [[x] for x in primary_key]} if primary_key else {}),\n }\n )
\n\n\n
[docs]class AirbyteSource:\n """Represents a user-defined Airbyte source.\n\n Args:\n name (str): The display name of the source.\n source_type (str): The type of the source, from Airbyte's list\n of sources https://airbytehq.github.io/category/sources/.\n source_configuration (Mapping[str, Any]): The configuration for the\n source, as defined by Airbyte's API.\n """\n\n
[docs] @public\n def __init__(self, name: str, source_type: str, source_configuration: Mapping[str, Any]):\n self.name = check.str_param(name, "name")\n self.source_type = check.str_param(source_type, "source_type")\n self.source_configuration = check.mapping_param(\n source_configuration, "source_configuration", key_type=str\n )
\n\n def must_be_recreated(self, other: "AirbyteSource") -> bool:\n return self.name != other.name or self.source_type != other.source_type
\n\n\nclass InitializedAirbyteSource:\n """User-defined Airbyte source bound to actual created Airbyte source."""\n\n def __init__(self, source: AirbyteSource, source_id: str, source_definition_id: Optional[str]):\n self.source = source\n self.source_id = source_id\n self.source_definition_id = source_definition_id\n\n @classmethod\n def from_api_json(cls, api_json: Mapping[str, Any]):\n return cls(\n source=AirbyteSource(\n name=api_json["name"],\n source_type=api_json["sourceName"],\n source_configuration=api_json["connectionConfiguration"],\n ),\n source_id=api_json["sourceId"],\n source_definition_id=None,\n )\n\n\n
[docs]class AirbyteDestination:\n """Represents a user-defined Airbyte destination.\n\n Args:\n name (str): The display name of the destination.\n destination_type (str): The type of the destination, from Airbyte's list\n of destinations https://airbytehq.github.io/category/destinations/.\n destination_configuration (Mapping[str, Any]): The configuration for the\n destination, as defined by Airbyte's API.\n """\n\n
[docs] @public\n def __init__(\n self, name: str, destination_type: str, destination_configuration: Mapping[str, Any]\n ):\n self.name = check.str_param(name, "name")\n self.destination_type = check.str_param(destination_type, "destination_type")\n self.destination_configuration = check.mapping_param(\n destination_configuration, "destination_configuration", key_type=str\n )
\n\n def must_be_recreated(self, other: "AirbyteDestination") -> bool:\n return self.name != other.name or self.destination_type != other.destination_type
\n\n\nclass InitializedAirbyteDestination:\n """User-defined Airbyte destination bound to actual created Airbyte destination."""\n\n def __init__(\n self,\n destination: AirbyteDestination,\n destination_id: str,\n destination_definition_id: Optional[str],\n ):\n self.destination = destination\n self.destination_id = destination_id\n self.destination_definition_id = destination_definition_id\n\n @classmethod\n def from_api_json(cls, api_json: Mapping[str, Any]):\n return cls(\n destination=AirbyteDestination(\n name=api_json["name"],\n destination_type=api_json["destinationName"],\n destination_configuration=api_json["connectionConfiguration"],\n ),\n destination_id=api_json["destinationId"],\n destination_definition_id=None,\n )\n\n\nclass AirbyteDestinationNamespace(Enum):\n """Represents the sync mode for a given Airbyte stream."""\n\n SAME_AS_SOURCE = "source"\n DESTINATION_DEFAULT = "destination"\n\n\n
[docs]class AirbyteConnection:\n """A user-defined Airbyte connection, pairing an Airbyte source and destination and configuring\n which streams to sync.\n\n Args:\n name (str): The display name of the connection.\n source (AirbyteSource): The source to sync from.\n destination (AirbyteDestination): The destination to sync to.\n stream_config (Mapping[str, AirbyteSyncMode]): A mapping from stream name to\n the sync mode for that stream, including any additional configuration\n of primary key or cursor field.\n normalize_data (Optional[bool]): Whether to normalize the data in the\n destination.\n destination_namespace (Optional[Union[AirbyteDestinationNamespace, str]]):\n The namespace to sync to in the destination. If set to\n AirbyteDestinationNamespace.SAME_AS_SOURCE, the namespace will be the\n same as the source namespace. If set to\n AirbyteDestinationNamespace.DESTINATION_DEFAULT, the namespace will be\n the default namespace for the destination. If set to a string, the\n namespace will be that string.\n prefix (Optional[str]): A prefix to add to the table names in the destination.\n\n Example:\n .. code-block:: python\n\n from dagster_airbyte.managed.generated.sources import FileSource\n from dagster_airbyte.managed.generated.destinations import LocalJsonDestination\n from dagster_airbyte import AirbyteConnection, AirbyteSyncMode\n\n cereals_csv_source = FileSource(...)\n local_json_destination = LocalJsonDestination(...)\n\n cereals_connection = AirbyteConnection(\n name="download-cereals",\n source=cereals_csv_source,\n destination=local_json_destination,\n stream_config={"cereals": AirbyteSyncMode.full_refresh_overwrite()},\n )\n """\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n source: AirbyteSource,\n destination: AirbyteDestination,\n stream_config: Mapping[str, AirbyteSyncMode],\n normalize_data: Optional[bool] = None,\n destination_namespace: Optional[\n Union[AirbyteDestinationNamespace, str]\n ] = AirbyteDestinationNamespace.SAME_AS_SOURCE,\n prefix: Optional[str] = None,\n ):\n self.name = check.str_param(name, "name")\n self.source = check.inst_param(source, "source", AirbyteSource)\n self.destination = check.inst_param(destination, "destination", AirbyteDestination)\n self.stream_config = check.mapping_param(\n stream_config, "stream_config", key_type=str, value_type=AirbyteSyncMode\n )\n self.normalize_data = check.opt_bool_param(normalize_data, "normalize_data")\n self.destination_namespace = check.opt_inst_param(\n destination_namespace, "destination_namespace", (str, AirbyteDestinationNamespace)\n )\n self.prefix = check.opt_str_param(prefix, "prefix")
\n\n def must_be_recreated(self, other: Optional["AirbyteConnection"]) -> bool:\n return (\n not other\n or self.source.must_be_recreated(other.source)\n or self.destination.must_be_recreated(other.destination)\n )
\n\n\nclass InitializedAirbyteConnection:\n """User-defined Airbyte connection bound to actual created Airbyte connection."""\n\n def __init__(\n self,\n connection: AirbyteConnection,\n connection_id: str,\n ):\n self.connection = connection\n self.connection_id = connection_id\n\n @classmethod\n def from_api_json(\n cls,\n api_dict: Mapping[str, Any],\n init_sources: Mapping[str, InitializedAirbyteSource],\n init_dests: Mapping[str, InitializedAirbyteDestination],\n ):\n source = next(\n (\n source.source\n for source in init_sources.values()\n if source.source_id == api_dict["sourceId"]\n ),\n None,\n )\n dest = next(\n (\n dest.destination\n for dest in init_dests.values()\n if dest.destination_id == api_dict["destinationId"]\n ),\n None,\n )\n\n source = check.not_none(source, f"Could not find source with id {api_dict['sourceId']}")\n dest = check.not_none(\n dest, f"Could not find destination with id {api_dict['destinationId']}"\n )\n\n streams = {\n stream["stream"]["name"]: AirbyteSyncMode.from_json(stream["config"])\n for stream in api_dict["syncCatalog"]["streams"]\n }\n return cls(\n AirbyteConnection(\n name=api_dict["name"],\n source=source,\n destination=dest,\n stream_config=streams,\n normalize_data=len(api_dict["operationIds"]) > 0,\n destination_namespace=(\n api_dict["namespaceFormat"]\n if api_dict["namespaceDefinition"] == "customformat"\n else AirbyteDestinationNamespace(api_dict["namespaceDefinition"])\n ),\n prefix=api_dict["prefix"] if api_dict.get("prefix") else None,\n ),\n api_dict["connectionId"],\n )\n\n\ndef _remove_none_values(obj: Dict[str, Any]) -> Dict[str, Any]:\n return {k: v for k, v in obj.items() if v is not None}\n\n\ndef _dump_class(obj: Any) -> Dict[str, Any]:\n return json.loads(json.dumps(obj, default=lambda o: _remove_none_values(o.__dict__)))\n\n\nclass GeneratedAirbyteSource(AirbyteSource):\n """Base class used by the codegen Airbyte sources. This class is not intended to be used directly.\n\n Converts all of its attributes into a source configuration dict which is passed down to the base\n AirbyteSource class.\n """\n\n def __init__(self, source_type: str, name: str):\n source_configuration = _dump_class(self)\n super().__init__(\n name=name, source_type=source_type, source_configuration=source_configuration\n )\n\n\nclass GeneratedAirbyteDestination(AirbyteDestination):\n """Base class used by the codegen Airbyte destinations. This class is not intended to be used directly.\n\n Converts all of its attributes into a destination configuration dict which is passed down to the\n base AirbyteDestination class.\n """\n\n def __init__(self, source_type: str, name: str):\n destination_configuration = _dump_class(self)\n super().__init__(\n name=name,\n destination_type=source_type,\n destination_configuration=destination_configuration,\n )\n
", "current_page_name": "_modules/dagster_airbyte/managed/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.types"}}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.ops

\nfrom typing import Any, Iterable, List, Optional\n\nfrom dagster import Config, In, Nothing, Out, Output, op\nfrom pydantic import Field\n\nfrom dagster_airbyte.types import AirbyteOutput\nfrom dagster_airbyte.utils import _get_attempt, generate_materializations\n\nfrom .resources import DEFAULT_POLL_INTERVAL_SECONDS, BaseAirbyteResource\n\n\nclass AirbyteSyncConfig(Config):\n    connection_id: str = Field(\n        ...,\n        description=(\n            "Parsed json dictionary representing the details of the Airbyte connector after the"\n            " sync successfully completes. See the [Airbyte API"\n            " Docs](https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview)"\n            " to see detailed information on this response."\n        ),\n    )\n    poll_interval: float = Field(\n        DEFAULT_POLL_INTERVAL_SECONDS,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    poll_timeout: Optional[float] = Field(\n        None,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    yield_materializations: bool = Field(\n        True,\n        description=(\n            "If True, materializations corresponding to the results of the Airbyte sync will "\n            "be yielded when the op executes."\n        ),\n    )\n    asset_key_prefix: List[str] = Field(\n        ["airbyte"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(\n ins={"start_after": In(Nothing)},\n out=Out(\n AirbyteOutput,\n description=(\n "Parsed json dictionary representing the details of the Airbyte connector after the"\n " sync successfully completes. See the [Airbyte API"\n " Docs](https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview)"\n " to see detailed information on this response."\n ),\n ),\n tags={"kind": "airbyte"},\n)\ndef airbyte_sync_op(\n context, config: AirbyteSyncConfig, airbyte: BaseAirbyteResource\n) -> Iterable[Any]:\n """Executes a Airbyte job sync for a given ``connection_id``, and polls until that sync\n completes, raising an error if it is unsuccessful. It outputs a AirbyteOutput which contains\n the job details for a given ``connection_id``.\n\n It requires the use of the :py:class:`~dagster_airbyte.airbyte_resource`, which allows it to\n communicate with the Airbyte API.\n\n Examples:\n .. code-block:: python\n\n from dagster import job\n from dagster_airbyte import airbyte_resource, airbyte_sync_op\n\n my_airbyte_resource = airbyte_resource.configured(\n {\n "host": {"env": "AIRBYTE_HOST"},\n "port": {"env": "AIRBYTE_PORT"},\n }\n )\n\n sync_foobar = airbyte_sync_op.configured({"connection_id": "foobar"}, name="sync_foobar")\n\n @job(resource_defs={"airbyte": my_airbyte_resource})\n def my_simple_airbyte_job():\n sync_foobar()\n\n @job(resource_defs={"airbyte": my_airbyte_resource})\n def my_composed_airbyte_job():\n final_foobar_state = sync_foobar(start_after=some_op())\n other_op(final_foobar_state)\n """\n airbyte_output = airbyte.sync_and_poll(\n connection_id=config.connection_id,\n poll_interval=config.poll_interval,\n poll_timeout=config.poll_timeout,\n )\n if config.yield_materializations:\n yield from generate_materializations(\n airbyte_output, asset_key_prefix=config.asset_key_prefix\n )\n yield Output(\n airbyte_output,\n metadata={\n **_get_attempt(airbyte_output.job_details.get("attempts", [{}])[-1]).get(\n "totalStats", {}\n )\n },\n )
\n
", "current_page_name": "_modules/dagster_airbyte/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.resources

\nimport hashlib\nimport json\nimport logging\nimport sys\nimport time\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, List, Mapping, Optional, cast\n\nimport requests\nfrom dagster import (\n    ConfigurableResource,\n    Failure,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._config.pythonic_config import infer_schema_from_config_class\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.merger import deep_merge_dicts\nfrom pydantic import Field\nfrom requests.exceptions import RequestException\n\nfrom dagster_airbyte.types import AirbyteOutput\n\nDEFAULT_POLL_INTERVAL_SECONDS = 10\n\n\nclass AirbyteState:\n    RUNNING = "running"\n    SUCCEEDED = "succeeded"\n    CANCELLED = "cancelled"\n    PENDING = "pending"\n    FAILED = "failed"\n    ERROR = "error"\n    INCOMPLETE = "incomplete"\n\n\nclass AirbyteResourceState:\n    def __init__(self) -> None:\n        self.request_cache: Dict[str, Optional[Mapping[str, object]]] = {}\n        # Int in case we nest contexts\n        self.cache_enabled = 0\n\n\nclass BaseAirbyteResource(ConfigurableResource):\n    request_max_retries: int = Field(\n        default=3,\n        description=(\n            "The maximum number of times requests to the Airbyte API should be retried "\n            "before failing."\n        ),\n    )\n    request_retry_delay: float = Field(\n        default=0.25,\n        description="Time (in seconds) to wait between each request retry.",\n    )\n    request_timeout: int = Field(\n        default=15,\n        description="Time (in seconds) after which the requests to Airbyte are declared timed out.",\n    )\n    cancel_sync_on_run_termination: bool = Field(\n        default=True,\n        description=(\n            "Whether to cancel a sync in Airbyte if the Dagster runner is terminated. This may"\n            " be useful to disable if using Airbyte sources that cannot be cancelled and"\n            " resumed easily, or if your Dagster deployment may experience runner interruptions"\n            " that do not impact your Airbyte deployment."\n        ),\n    )\n    poll_interval: float = Field(\n        default=DEFAULT_POLL_INTERVAL_SECONDS,\n        description="Time (in seconds) to wait between checking a sync's status.",\n    )\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        return True\n\n    @property\n    @cached_method\n    def _log(self) -> logging.Logger:\n        return get_dagster_logger()\n\n    @property\n    @abstractmethod\n    def api_base_url(self) -> str:\n        raise NotImplementedError()\n\n    @property\n    @abstractmethod\n    def all_additional_request_params(self) -> Mapping[str, Any]:\n        raise NotImplementedError()\n\n    def make_request(\n        self, endpoint: str, data: Optional[Mapping[str, object]] = None, method: str = "POST"\n    ) -> Optional[Mapping[str, object]]:\n        """Creates and sends a request to the desired Airbyte REST API endpoint.\n\n        Args:\n            endpoint (str): The Airbyte API endpoint to send this request to.\n            data (Optional[str]): JSON-formatted data string to be included in the request.\n\n        Returns:\n            Optional[Dict[str, Any]]: Parsed json data from the response to this request\n        """\n        url = self.api_base_url + endpoint\n        headers = {"accept": "application/json"}\n\n        num_retries = 0\n        while True:\n            try:\n                request_args: Dict[str, Any] = dict(\n                    method=method,\n                    url=url,\n                    headers=headers,\n                    timeout=self.request_timeout,\n                )\n                if data:\n                    request_args["json"] = data\n\n                request_args = deep_merge_dicts(\n                    request_args,\n                    self.all_additional_request_params,\n                )\n\n                response = requests.request(\n                    **request_args,\n                )\n                response.raise_for_status()\n                if response.status_code == 204:\n                    return None\n                return response.json()\n            except RequestException as e:\n                self._log.error("Request to Airbyte API failed: %s", e)\n                if num_retries == self.request_max_retries:\n                    break\n                num_retries += 1\n                time.sleep(self.request_retry_delay)\n\n        raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.")\n\n    @abstractmethod\n    def start_sync(self, connection_id: str) -> Mapping[str, object]:\n        raise NotImplementedError()\n\n    @abstractmethod\n    def get_connection_details(self, connection_id: str) -> Mapping[str, object]:\n        raise NotImplementedError()\n\n    @abstractmethod\n    def get_job_status(self, connection_id: str, job_id: int) -> Mapping[str, object]:\n        raise NotImplementedError()\n\n    @abstractmethod\n    def cancel_job(self, job_id: int):\n        raise NotImplementedError()\n\n    @property\n    @abstractmethod\n    def _should_forward_logs(self) -> bool:\n        raise NotImplementedError()\n\n    def sync_and_poll(\n        self,\n        connection_id: str,\n        poll_interval: Optional[float] = None,\n        poll_timeout: Optional[float] = None,\n    ) -> AirbyteOutput:\n        """Initializes a sync operation for the given connector, and polls until it completes.\n\n        Args:\n            connection_id (str): The Airbyte Connector ID. You can retrieve this value from the\n                "Connection" tab of a given connection in the Arbyte UI.\n            poll_interval (float): The time (in seconds) that will be waited between successive polls.\n            poll_timeout (float): The maximum time that will waited before this operation is timed\n                out. By default, this will never time out.\n\n        Returns:\n            :py:class:`~AirbyteOutput`:\n                Details of the sync job.\n        """\n        connection_details = self.get_connection_details(connection_id)\n        job_details = self.start_sync(connection_id)\n        job_info = cast(Dict[str, object], job_details.get("job", {}))\n        job_id = cast(int, job_info.get("id"))\n\n        self._log.info(f"Job {job_id} initialized for connection_id={connection_id}.")\n        start = time.monotonic()\n        logged_attempts = 0\n        logged_lines = 0\n        state = None\n\n        try:\n            while True:\n                if poll_timeout and start + poll_timeout < time.monotonic():\n                    raise Failure(\n                        f"Timeout: Airbyte job {job_id} is not ready after the timeout"\n                        f" {poll_timeout} seconds"\n                    )\n                time.sleep(poll_interval or self.poll_interval)\n                job_details = self.get_job_status(connection_id, job_id)\n                attempts = cast(List, job_details.get("attempts", []))\n                cur_attempt = len(attempts)\n                # spit out the available Airbyte log info\n                if cur_attempt:\n                    if self._should_forward_logs:\n                        log_lines = attempts[logged_attempts].get("logs", {}).get("logLines", [])\n\n                        for line in log_lines[logged_lines:]:\n                            sys.stdout.write(line + "\\n")\n                            sys.stdout.flush()\n                        logged_lines = len(log_lines)\n\n                    # if there's a next attempt, this one will have no more log messages\n                    if logged_attempts < cur_attempt - 1:\n                        logged_lines = 0\n                        logged_attempts += 1\n\n                job_info = cast(Dict[str, object], job_details.get("job", {}))\n                state = job_info.get("status")\n\n                if state in (AirbyteState.RUNNING, AirbyteState.PENDING, AirbyteState.INCOMPLETE):\n                    continue\n                elif state == AirbyteState.SUCCEEDED:\n                    break\n                elif state == AirbyteState.ERROR:\n                    raise Failure(f"Job failed: {job_id}")\n                elif state == AirbyteState.CANCELLED:\n                    raise Failure(f"Job was cancelled: {job_id}")\n                else:\n                    raise Failure(f"Encountered unexpected state `{state}` for job_id {job_id}")\n        finally:\n            # if Airbyte sync has not completed, make sure to cancel it so that it doesn't outlive\n            # the python process\n            if (\n                state not in (AirbyteState.SUCCEEDED, AirbyteState.ERROR, AirbyteState.CANCELLED)\n                and self.cancel_sync_on_run_termination\n            ):\n                self.cancel_job(job_id)\n\n        return AirbyteOutput(job_details=job_details, connection_details=connection_details)\n\n\nclass AirbyteCloudResource(BaseAirbyteResource):\n    """This resource allows users to programatically interface with the Airbyte Cloud API to launch\n    syncs and monitor their progress.\n\n    **Examples:**\n\n    .. code-block:: python\n\n        from dagster import job, EnvVar\n        from dagster_airbyte import AirbyteResource\n\n        my_airbyte_resource = AirbyteCloudResource(\n            api_key=EnvVar("AIRBYTE_API_KEY"),\n        )\n\n        airbyte_assets = build_airbyte_assets(\n            connection_id="87b7fe85-a22c-420e-8d74-b30e7ede77df",\n            destination_tables=["releases", "tags", "teams"],\n        )\n\n        defs = Definitions(\n            assets=[airbyte_assets],\n            resources={"airbyte": my_airbyte_resource},\n        )\n    """\n\n    api_key: str = Field(..., description="The Airbyte Cloud API key.")\n\n    @property\n    def api_base_url(self) -> str:\n        return "https://api.airbyte.com/v1"\n\n    @property\n    def all_additional_request_params(self) -> Mapping[str, Any]:\n        return {"headers": {"Authorization": f"Bearer {self.api_key}", "User-Agent": "dagster"}}\n\n    def start_sync(self, connection_id: str) -> Mapping[str, object]:\n        job_sync = check.not_none(\n            self.make_request(\n                endpoint="/jobs",\n                data={\n                    "connectionId": connection_id,\n                    "jobType": "sync",\n                },\n            )\n        )\n        return {"job": {"id": job_sync["jobId"], "status": job_sync["status"]}}\n\n    def get_connection_details(self, connection_id: str) -> Mapping[str, object]:\n        return {}\n\n    def get_job_status(self, connection_id: str, job_id: int) -> Mapping[str, object]:\n        job_status = check.not_none(self.make_request(endpoint=f"/jobs/{job_id}", method="GET"))\n        return {"job": {"id": job_status["jobId"], "status": job_status["status"]}}\n\n    def cancel_job(self, job_id: int):\n        self.make_request(endpoint=f"/jobs/{job_id}", method="DELETE")\n\n    @property\n    def _should_forward_logs(self) -> bool:\n        # Airbyte Cloud does not support streaming logs yet\n        return False\n\n\n
[docs]class AirbyteResource(BaseAirbyteResource):\n """This resource allows users to programatically interface with the Airbyte REST API to launch\n syncs and monitor their progress.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job, EnvVar\n from dagster_airbyte import AirbyteResource\n\n my_airbyte_resource = AirbyteResource(\n host=EnvVar("AIRBYTE_HOST"),\n port=EnvVar("AIRBYTE_PORT"),\n # If using basic auth\n username=EnvVar("AIRBYTE_USERNAME"),\n password=EnvVar("AIRBYTE_PASSWORD"),\n )\n\n airbyte_assets = build_airbyte_assets(\n connection_id="87b7fe85-a22c-420e-8d74-b30e7ede77df",\n destination_tables=["releases", "tags", "teams"],\n )\n\n defs = Definitions(\n assets=[airbyte_assets],\n resources={"airbyte": my_airbyte_resource},\n )\n """\n\n host: str = Field(description="The Airbyte server address.")\n port: str = Field(description="Port used for the Airbyte server.")\n username: Optional[str] = Field(default=None, description="Username if using basic auth.")\n password: Optional[str] = Field(default=None, description="Password if using basic auth.")\n use_https: bool = Field(\n default=False, description="Whether to use HTTPS to connect to the Airbyte server."\n )\n forward_logs: bool = Field(\n default=True,\n description=(\n "Whether to forward Airbyte logs to the compute log, can be expensive for"\n " long-running syncs."\n ),\n )\n request_additional_params: Mapping[str, Any] = Field(\n default=dict(),\n description=(\n "Any additional kwargs to pass to the requests library when making requests to Airbyte."\n ),\n )\n\n @property\n @cached_method\n def _state(self) -> AirbyteResourceState:\n return AirbyteResourceState()\n\n @property\n @cached_method\n def _log(self) -> logging.Logger:\n return get_dagster_logger()\n\n @property\n def api_base_url(self) -> str:\n return (\n ("https://" if self.use_https else "http://")\n + (f"{self.host}:{self.port}" if self.port else self.host)\n + "/api/v1"\n )\n\n @property\n def _should_forward_logs(self) -> bool:\n return self.forward_logs\n\n @contextmanager\n def cache_requests(self):\n """Context manager that enables caching certain requests to the Airbyte API,\n cleared when the context is exited.\n """\n self.clear_request_cache()\n self._state.cache_enabled += 1\n try:\n yield\n finally:\n self.clear_request_cache()\n self._state.cache_enabled -= 1\n\n def clear_request_cache(self) -> None:\n self._state.request_cache = {}\n\n def make_request_cached(self, endpoint: str, data: Optional[Mapping[str, object]]):\n if not self._state.cache_enabled > 0:\n return self.make_request(endpoint, data)\n data_json = json.dumps(data, sort_keys=True)\n sha = hashlib.sha1()\n sha.update(endpoint.encode("utf-8"))\n sha.update(data_json.encode("utf-8"))\n digest = sha.hexdigest()\n\n if digest not in self._state.request_cache:\n self._state.request_cache[digest] = self.make_request(endpoint, data)\n return self._state.request_cache[digest]\n\n @property\n def all_additional_request_params(self) -> Mapping[str, Any]:\n auth_param = (\n {"auth": (self.username, self.password)} if self.username and self.password else {}\n )\n return {**auth_param, **self.request_additional_params}\n\n def make_request(\n self, endpoint: str, data: Optional[Mapping[str, object]]\n ) -> Optional[Mapping[str, object]]:\n """Creates and sends a request to the desired Airbyte REST API endpoint.\n\n Args:\n endpoint (str): The Airbyte API endpoint to send this request to.\n data (Optional[str]): JSON-formatted data string to be included in the request.\n\n Returns:\n Optional[Dict[str, Any]]: Parsed json data from the response to this request\n """\n url = self.api_base_url + endpoint\n headers = {"accept": "application/json"}\n\n num_retries = 0\n while True:\n try:\n response = requests.request(\n **deep_merge_dicts( # type: ignore\n dict(\n method="POST",\n url=url,\n headers=headers,\n json=data,\n timeout=self.request_timeout,\n auth=(\n (self.username, self.password)\n if self.username and self.password\n else None\n ),\n ),\n self.request_additional_params,\n ),\n )\n response.raise_for_status()\n if response.status_code == 204:\n return None\n return response.json()\n except RequestException as e:\n self._log.error("Request to Airbyte API failed: %s", e)\n if num_retries == self.request_max_retries:\n break\n num_retries += 1\n time.sleep(self.request_retry_delay)\n\n raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.")\n\n def cancel_job(self, job_id: int):\n self.make_request(endpoint="/jobs/cancel", data={"id": job_id})\n\n def get_default_workspace(self) -> str:\n workspaces = cast(\n List[Dict[str, Any]],\n check.not_none(self.make_request_cached(endpoint="/workspaces/list", data={})).get(\n "workspaces", []\n ),\n )\n return workspaces[0]["workspaceId"]\n\n def get_source_definition_by_name(self, name: str) -> Optional[str]:\n name_lower = name.lower()\n definitions = self.make_request_cached(endpoint="/source_definitions/list", data={})\n\n return next(\n (\n definition["sourceDefinitionId"]\n for definition in definitions["sourceDefinitions"]\n if definition["name"].lower() == name_lower\n ),\n None,\n )\n\n def get_destination_definition_by_name(self, name: str):\n name_lower = name.lower()\n definitions = cast(\n Dict[str, List[Dict[str, str]]],\n check.not_none(\n self.make_request_cached(endpoint="/destination_definitions/list", data={})\n ),\n )\n return next(\n (\n definition["destinationDefinitionId"]\n for definition in definitions["destinationDefinitions"]\n if definition["name"].lower() == name_lower\n ),\n None,\n )\n\n def get_source_catalog_id(self, source_id: str):\n result = cast(\n Dict[str, Any],\n check.not_none(\n self.make_request(endpoint="/sources/discover_schema", data={"sourceId": source_id})\n ),\n )\n return result["catalogId"]\n\n def get_source_schema(self, source_id: str) -> Mapping[str, Any]:\n return cast(\n Dict[str, Any],\n check.not_none(\n self.make_request(endpoint="/sources/discover_schema", data={"sourceId": source_id})\n ),\n )\n\n def does_dest_support_normalization(\n self, destination_definition_id: str, workspace_id: str\n ) -> bool:\n # Airbyte API changed source of truth for normalization in PR\n # https://github.com/airbytehq/airbyte/pull/21005\n norm_dest_def_spec: bool = cast(\n Dict[str, Any],\n check.not_none(\n self.make_request_cached(\n endpoint="/destination_definition_specifications/get",\n data={\n "destinationDefinitionId": destination_definition_id,\n "workspaceId": workspace_id,\n },\n )\n ),\n ).get("supportsNormalization", False)\n\n norm_dest_def: bool = (\n cast(\n Dict[str, Any],\n check.not_none(\n self.make_request_cached(\n endpoint="/destination_definitions/get",\n data={\n "destinationDefinitionId": destination_definition_id,\n },\n )\n ),\n )\n .get("normalizationConfig", {})\n .get("supported", False)\n )\n\n return any([norm_dest_def_spec, norm_dest_def])\n\n def get_job_status(self, connection_id: str, job_id: int) -> Mapping[str, object]:\n if self.forward_logs:\n return check.not_none(self.make_request(endpoint="/jobs/get", data={"id": job_id}))\n else:\n # the "list all jobs" endpoint doesn't return logs, which actually makes it much more\n # lightweight for long-running syncs with many logs\n out = check.not_none(\n self.make_request(\n endpoint="/jobs/list",\n data={\n "configTypes": ["sync"],\n "configId": connection_id,\n # sync should be the most recent, so pageSize 5 is sufficient\n "pagination": {"pageSize": 5},\n },\n )\n )\n job = next((job for job in cast(List, out["jobs"]) if job["job"]["id"] == job_id), None)\n\n return check.not_none(job)\n\n def start_sync(self, connection_id: str) -> Mapping[str, object]:\n return check.not_none(\n self.make_request(endpoint="/connections/sync", data={"connectionId": connection_id})\n )\n\n def get_connection_details(self, connection_id: str) -> Mapping[str, object]:\n return check.not_none(\n self.make_request(endpoint="/connections/get", data={"connectionId": connection_id})\n )\n\n def sync_and_poll(\n self,\n connection_id: str,\n poll_interval: Optional[float] = None,\n poll_timeout: Optional[float] = None,\n ) -> AirbyteOutput:\n """Initializes a sync operation for the given connector, and polls until it completes.\n\n Args:\n connection_id (str): The Airbyte Connector ID. You can retrieve this value from the\n "Connection" tab of a given connection in the Arbyte UI.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~AirbyteOutput`:\n Details of the sync job.\n """\n connection_details = self.get_connection_details(connection_id)\n job_details = self.start_sync(connection_id)\n job_info = cast(Dict[str, object], job_details.get("job", {}))\n job_id = cast(int, job_info.get("id"))\n\n self._log.info(f"Job {job_id} initialized for connection_id={connection_id}.")\n start = time.monotonic()\n logged_attempts = 0\n logged_lines = 0\n state = None\n\n try:\n while True:\n if poll_timeout and start + poll_timeout < time.monotonic():\n raise Failure(\n f"Timeout: Airbyte job {job_id} is not ready after the timeout"\n f" {poll_timeout} seconds"\n )\n time.sleep(poll_interval or self.poll_interval)\n job_details = self.get_job_status(connection_id, job_id)\n attempts = cast(List, job_details.get("attempts", []))\n cur_attempt = len(attempts)\n # spit out the available Airbyte log info\n if cur_attempt:\n if self.forward_logs:\n log_lines = attempts[logged_attempts].get("logs", {}).get("logLines", [])\n\n for line in log_lines[logged_lines:]:\n sys.stdout.write(line + "\\n")\n sys.stdout.flush()\n logged_lines = len(log_lines)\n\n # if there's a next attempt, this one will have no more log messages\n if logged_attempts < cur_attempt - 1:\n logged_lines = 0\n logged_attempts += 1\n\n job_info = cast(Dict[str, object], job_details.get("job", {}))\n state = job_info.get("status")\n\n if state in (AirbyteState.RUNNING, AirbyteState.PENDING, AirbyteState.INCOMPLETE):\n continue\n elif state == AirbyteState.SUCCEEDED:\n break\n elif state == AirbyteState.ERROR:\n raise Failure(f"Job failed: {job_id}")\n elif state == AirbyteState.CANCELLED:\n raise Failure(f"Job was cancelled: {job_id}")\n else:\n raise Failure(f"Encountered unexpected state `{state}` for job_id {job_id}")\n finally:\n # if Airbyte sync has not completed, make sure to cancel it so that it doesn't outlive\n # the python process\n if (\n state not in (AirbyteState.SUCCEEDED, AirbyteState.ERROR, AirbyteState.CANCELLED)\n and self.cancel_sync_on_run_termination\n ):\n self.cancel_job(job_id)\n\n return AirbyteOutput(job_details=job_details, connection_details=connection_details)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=AirbyteResource.to_config_schema())\ndef airbyte_resource(context) -> AirbyteResource:\n """This resource allows users to programatically interface with the Airbyte REST API to launch\n syncs and monitor their progress. This currently implements only a subset of the functionality\n exposed by the API.\n\n For a complete set of documentation on the Airbyte REST API, including expected response JSON\n schema, see the `Airbyte API Docs <https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_airbyte import airbyte_resource\n\n my_airbyte_resource = airbyte_resource.configured(\n {\n "host": {"env": "AIRBYTE_HOST"},\n "port": {"env": "AIRBYTE_PORT"},\n # If using basic auth\n "username": {"env": "AIRBYTE_USERNAME"},\n "password": {"env": "AIRBYTE_PASSWORD"},\n }\n )\n\n @job(resource_defs={"airbyte":my_airbyte_resource})\n def my_airbyte_job():\n ...\n\n """\n return AirbyteResource.from_resource_context(context)
\n\n\n@dagster_maintained_resource\n@resource(config_schema=infer_schema_from_config_class(AirbyteCloudResource))\ndef airbyte_cloud_resource(context) -> AirbyteCloudResource:\n """This resource allows users to programatically interface with the Airbyte Cloud REST API to launch\n syncs and monitor their progress. Currently, this resource may only be used with the more basic\n `dagster-airbyte` APIs, including the ops and assets.\n\n """\n return AirbyteCloudResource.from_resource_context(context)\n
", "current_page_name": "_modules/dagster_airbyte/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.resources"}}, "dagster_airflow": {"dagster_asset_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.dagster_asset_factory

\nfrom typing import AbstractSet, List, Mapping, Optional, Set, Tuple\n\nfrom airflow.models.connection import Connection\nfrom airflow.models.dag import DAG\nfrom dagster import (\n    AssetKey,\n    AssetsDefinition,\n    GraphDefinition,\n    OutputMapping,\n    TimeWindowPartitionsDefinition,\n)\nfrom dagster._core.definitions.graph_definition import create_adjacency_lists\nfrom dagster._utils.schedules import is_valid_cron_schedule\n\nfrom dagster_airflow.dagster_job_factory import make_dagster_job_from_airflow_dag\nfrom dagster_airflow.utils import (\n    DagsterAirflowError,\n    normalized_name,\n)\n\n\ndef _build_asset_dependencies(\n    dag: DAG,\n    graph: GraphDefinition,\n    task_ids_by_asset_key: Mapping[AssetKey, AbstractSet[str]],\n    upstream_dependencies_by_asset_key: Mapping[AssetKey, AbstractSet[AssetKey]],\n) -> Tuple[AbstractSet[OutputMapping], Mapping[str, AssetKey], Mapping[str, Set[AssetKey]]]:\n    """Builds the asset dependency graph for a given set of airflow task mappings and a dagster graph."""\n    output_mappings = set()\n    keys_by_output_name = {}\n    internal_asset_deps: dict[str, Set[AssetKey]] = {}\n\n    visited_nodes: dict[str, bool] = {}\n    upstream_deps = set()\n\n    def find_upstream_dependency(node_name: str) -> None:\n        """Uses Depth-Firs-Search to find all upstream asset dependencies\n        as described in task_ids_by_asset_key.\n        """\n        # node has been visited\n        if visited_nodes[node_name]:\n            return\n        # mark node as visted\n        visited_nodes[node_name] = True\n        # traverse upstream nodes\n        for output_handle in graph.dependency_structure.all_upstream_outputs_from_node(node_name):\n            forward_node = output_handle.node_name\n            match = False\n            # find any assets produced by upstream nodes and add them to the internal asset deps\n            for asset_key in task_ids_by_asset_key:\n                if (\n                    forward_node.replace(f"{normalized_name(dag.dag_id)}__", "")\n                    in task_ids_by_asset_key[asset_key]\n                ):\n                    upstream_deps.add(asset_key)\n                    match = True\n            # don't traverse past nodes that have assets\n            if not match:\n                find_upstream_dependency(forward_node)\n\n    # iterate through each asset to find all upstream asset dependencies\n    for asset_key in task_ids_by_asset_key:\n        asset_upstream_deps = set()\n        for task_id in task_ids_by_asset_key[asset_key]:\n            visited_nodes = {s.name: False for s in graph.nodes}\n            upstream_deps = set()\n            find_upstream_dependency(normalized_name(dag.dag_id, task_id))\n            for dep in upstream_deps:\n                asset_upstream_deps.add(dep)\n            keys_by_output_name[f"result_{normalized_name(dag.dag_id, task_id)}"] = asset_key\n            output_mappings.add(\n                OutputMapping(\n                    graph_output_name=f"result_{normalized_name(dag.dag_id, task_id)}",\n                    mapped_node_name=normalized_name(dag.dag_id, task_id),\n                    mapped_node_output_name="airflow_task_complete",  # Default output name\n                )\n            )\n\n        # the tasks for a given asset should have the same internal deps\n        for task_id in task_ids_by_asset_key[asset_key]:\n            if f"result_{normalized_name(dag.dag_id, task_id)}" in internal_asset_deps:\n                internal_asset_deps[f"result_{normalized_name(dag.dag_id, task_id)}"].update(\n                    asset_upstream_deps\n                )\n            else:\n                internal_asset_deps[f"result_{normalized_name(dag.dag_id, task_id)}"] = (\n                    asset_upstream_deps\n                )\n\n    # add new upstream asset dependencies to the internal deps\n    for asset_key in upstream_dependencies_by_asset_key:\n        for key in keys_by_output_name:\n            if keys_by_output_name[key] == asset_key:\n                internal_asset_deps[key].update(upstream_dependencies_by_asset_key[asset_key])\n\n    return (output_mappings, keys_by_output_name, internal_asset_deps)\n\n\n
[docs]def load_assets_from_airflow_dag(\n dag: DAG,\n task_ids_by_asset_key: Mapping[AssetKey, AbstractSet[str]] = {},\n upstream_dependencies_by_asset_key: Mapping[AssetKey, AbstractSet[AssetKey]] = {},\n connections: Optional[List[Connection]] = None,\n) -> List[AssetsDefinition]:\n """[Experimental] Construct Dagster Assets for a given Airflow DAG.\n\n Args:\n dag (DAG): The Airflow DAG to compile into a Dagster job\n task_ids_by_asset_key (Optional[Mapping[AssetKey, AbstractSet[str]]]): A mapping from asset\n keys to task ids. Used break up the Airflow Dag into multiple SDAs\n upstream_dependencies_by_asset_key (Optional[Mapping[AssetKey, AbstractSet[AssetKey]]]): A\n mapping from upstream asset keys to assets provided in task_ids_by_asset_key. Used to\n declare new upstream SDA depenencies.\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n List[AssetsDefinition]\n """\n cron_schedule = dag.normalized_schedule_interval\n if cron_schedule is not None and not is_valid_cron_schedule(str(cron_schedule)):\n raise DagsterAirflowError(f"Invalid cron schedule: {cron_schedule} in DAG {dag.dag_id}")\n\n job = make_dagster_job_from_airflow_dag(dag, connections=connections)\n graph = job._graph_def # noqa: SLF001\n start_date = dag.start_date if dag.start_date else dag.default_args.get("start_date")\n if start_date is None:\n raise DagsterAirflowError(f"Invalid start_date: {start_date} in DAG {dag.dag_id}")\n\n # leaf nodes have no downstream nodes\n forward_edges, _ = create_adjacency_lists(graph.nodes, graph.dependency_structure)\n leaf_nodes = {\n node_name.replace(f"{normalized_name(dag.dag_id)}__", "")\n for node_name, downstream_nodes in forward_edges.items()\n if not downstream_nodes\n }\n\n mutated_task_ids_by_asset_key: dict[AssetKey, set[str]] = {}\n\n if task_ids_by_asset_key is None or task_ids_by_asset_key == {}:\n # if no mappings are provided the dag becomes a single SDA\n task_ids_by_asset_key = {AssetKey(dag.dag_id): leaf_nodes}\n else:\n # if mappings were provide any unmapped leaf nodes are added to a default asset\n used_nodes: set[str] = set()\n for key in task_ids_by_asset_key:\n used_nodes.update(task_ids_by_asset_key[key])\n\n mutated_task_ids_by_asset_key[AssetKey(dag.dag_id)] = leaf_nodes - used_nodes\n\n for key in task_ids_by_asset_key:\n if key not in mutated_task_ids_by_asset_key:\n mutated_task_ids_by_asset_key[key] = set(task_ids_by_asset_key[key])\n else:\n mutated_task_ids_by_asset_key[key].update(task_ids_by_asset_key[key])\n\n output_mappings, keys_by_output_name, internal_asset_deps = _build_asset_dependencies(\n dag, graph, mutated_task_ids_by_asset_key, upstream_dependencies_by_asset_key\n )\n\n new_graph = graph.copy(\n output_mappings=list(output_mappings),\n )\n\n asset_def = AssetsDefinition.from_graph(\n graph_def=new_graph,\n partitions_def=(\n TimeWindowPartitionsDefinition(\n cron_schedule=str(cron_schedule),\n timezone=dag.timezone.name,\n start=start_date.strftime("%Y-%m-%dT%H:%M:%S"),\n fmt="%Y-%m-%dT%H:%M:%S",\n )\n if cron_schedule is not None\n else None\n ),\n group_name=dag.dag_id,\n keys_by_output_name=keys_by_output_name,\n internal_asset_deps=internal_asset_deps,\n can_subset=True,\n )\n return [asset_def]
\n
", "current_page_name": "_modules/dagster_airflow/dagster_asset_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.dagster_asset_factory"}, "dagster_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.dagster_factory

\nimport os\nfrom typing import List, Mapping, Optional, Tuple\n\nfrom airflow.models.connection import Connection\nfrom airflow.models.dagbag import DagBag\nfrom dagster import (\n    Definitions,\n    JobDefinition,\n    ResourceDefinition,\n    ScheduleDefinition,\n    _check as check,\n)\n\nfrom dagster_airflow.dagster_job_factory import make_dagster_job_from_airflow_dag\nfrom dagster_airflow.dagster_schedule_factory import (\n    _is_dag_is_schedule,\n    make_dagster_schedule_from_airflow_dag,\n)\nfrom dagster_airflow.patch_airflow_example_dag import patch_airflow_example_dag\nfrom dagster_airflow.resources import (\n    make_ephemeral_airflow_db_resource as make_ephemeral_airflow_db_resource,\n)\nfrom dagster_airflow.resources.airflow_ephemeral_db import AirflowEphemeralDatabase\nfrom dagster_airflow.resources.airflow_persistent_db import AirflowPersistentDatabase\nfrom dagster_airflow.utils import (\n    is_airflow_2_loaded_in_environment,\n)\n\n\n
[docs]def make_dagster_definitions_from_airflow_dag_bag(\n dag_bag: DagBag,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Definitions:\n """Construct a Dagster definition corresponding to Airflow DAGs in DagBag.\n\n Usage:\n Create `make_dagster_definition.py`:\n from dagster_airflow import make_dagster_definition_from_airflow_dag_bag\n from airflow_home import my_dag_bag\n\n def make_definition_from_dag_bag():\n return make_dagster_definition_from_airflow_dag_bag(my_dag_bag)\n\n Use Definitions as usual, for example:\n `dagster-webserver -f path/to/make_dagster_definition.py`\n\n Args:\n dag_bag (DagBag): Airflow DagBag Model\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n Definitions\n """\n check.inst_param(dag_bag, "dag_bag", DagBag)\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n resource_defs = check.opt_mapping_param(resource_defs, "resource_defs")\n if resource_defs is None or "airflow_db" not in resource_defs:\n resource_defs = dict(resource_defs) if resource_defs else {}\n resource_defs["airflow_db"] = make_ephemeral_airflow_db_resource(connections=connections)\n\n schedules, jobs = make_schedules_and_jobs_from_airflow_dag_bag(\n dag_bag=dag_bag,\n connections=connections,\n resource_defs=resource_defs,\n )\n\n return Definitions(\n schedules=schedules,\n jobs=jobs,\n resources=resource_defs,\n )
\n\n\n
[docs]def make_dagster_definitions_from_airflow_dags_path(\n dag_path: str,\n safe_mode: bool = True,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Definitions:\n """Construct a Dagster repository corresponding to Airflow DAGs in dag_path.\n\n Usage:\n Create ``make_dagster_definitions.py``:\n\n .. code-block:: python\n\n from dagster_airflow import make_dagster_definitions_from_airflow_dags_path\n\n def make_definitions_from_dir():\n return make_dagster_definitions_from_airflow_dags_path(\n '/path/to/dags/',\n )\n\n Use RepositoryDefinition as usual, for example:\n ``dagster-webserver -f path/to/make_dagster_repo.py -n make_repo_from_dir``\n\n Args:\n dag_path (str): Path to directory or file that contains Airflow Dags\n include_examples (bool): True to include Airflow's example DAGs. (default: False)\n safe_mode (bool): True to use Airflow's default heuristic to find files that contain DAGs\n (ie find files that contain both b'DAG' and b'airflow') (default: True)\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n Definitions\n """\n check.str_param(dag_path, "dag_path")\n check.bool_param(safe_mode, "safe_mode")\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n resource_defs = check.opt_mapping_param(resource_defs, "resource_defs")\n if resource_defs is None or "airflow_db" not in resource_defs:\n resource_defs = dict(resource_defs) if resource_defs else {}\n resource_defs["airflow_db"] = make_ephemeral_airflow_db_resource(connections=connections)\n\n if (\n resource_defs["airflow_db"].resource_fn.__qualname__.split(".")[0]\n == "AirflowEphemeralDatabase"\n ):\n AirflowEphemeralDatabase._initialize_database(connections=connections) # noqa: SLF001\n elif (\n resource_defs["airflow_db"].resource_fn.__qualname__.split(".")[0]\n == "AirflowPersistentDatabase"\n ):\n AirflowPersistentDatabase._initialize_database( # noqa: SLF001\n uri=(\n os.getenv("AIRFLOW__DATABASE__SQL_ALCHEMY_CONN", "")\n if is_airflow_2_loaded_in_environment()\n else os.getenv("AIRFLOW__CORE__SQL_ALCHEMY_CONN", "")\n ),\n connections=connections,\n )\n\n dag_bag = DagBag(\n dag_folder=dag_path,\n include_examples=False, # Exclude Airflow example dags\n safe_mode=safe_mode,\n )\n\n return make_dagster_definitions_from_airflow_dag_bag(\n dag_bag=dag_bag,\n connections=connections,\n resource_defs=resource_defs,\n )
\n\n\ndef make_dagster_definitions_from_airflow_example_dags(\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Definitions:\n """Construct a Dagster repository for Airflow's example DAGs.\n\n Usage:\n\n Create `make_dagster_definitions.py`:\n from dagster_airflow import make_dagster_definitions_from_airflow_example_dags\n\n def make_airflow_example_dags():\n return make_dagster_definitions_from_airflow_example_dags()\n\n Use Definitions as usual, for example:\n `dagster-webserver -f path/to/make_dagster_definitions.py`\n\n Args:\n resource_defs: Optional[Mapping[str, ResourceDefinition]]\n Resource definitions to be used with the definitions\n\n Returns:\n Definitions\n """\n dag_bag = DagBag(\n dag_folder="some/empty/folder/with/no/dags", # prevent defaulting to settings.DAGS_FOLDER\n include_examples=True,\n )\n\n # There is a bug in Airflow v1 where the python_callable for task\n # 'search_catalog' is missing a required position argument '_'. It is fixed in airflow v2\n patch_airflow_example_dag(dag_bag)\n\n return make_dagster_definitions_from_airflow_dag_bag(\n dag_bag=dag_bag, resource_defs=resource_defs\n )\n\n\n
[docs]def make_schedules_and_jobs_from_airflow_dag_bag(\n dag_bag: DagBag,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Tuple[List[ScheduleDefinition], List[JobDefinition]]:\n """Construct Dagster Schedules and Jobs corresponding to Airflow DagBag.\n\n Args:\n dag_bag (DagBag): Airflow DagBag Model\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n - List[ScheduleDefinition]: The generated Dagster Schedules\n - List[JobDefinition]: The generated Dagster Jobs\n """\n check.inst_param(dag_bag, "dag_bag", DagBag)\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n\n job_defs = []\n schedule_defs = []\n count = 0\n # To enforce predictable iteration order\n sorted_dag_ids = sorted(dag_bag.dag_ids)\n for dag_id in sorted_dag_ids:\n dag = dag_bag.dags.get(dag_id)\n if not dag:\n continue\n if _is_dag_is_schedule(dag):\n schedule_defs.append(\n make_dagster_schedule_from_airflow_dag(\n dag=dag, tags=None, connections=connections, resource_defs=resource_defs\n )\n )\n else:\n job_defs.append(\n make_dagster_job_from_airflow_dag(\n dag=dag, tags=None, connections=connections, resource_defs=resource_defs\n )\n )\n\n count += 1\n\n return schedule_defs, job_defs
\n
", "current_page_name": "_modules/dagster_airflow/dagster_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.dagster_factory"}, "dagster_job_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.dagster_job_factory

\nfrom typing import List, Mapping, Optional\n\nfrom airflow.models.connection import Connection\nfrom airflow.models.dag import DAG\nfrom dagster import (\n    GraphDefinition,\n    JobDefinition,\n    ResourceDefinition,\n    _check as check,\n)\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.instance import IS_AIRFLOW_INGEST_PIPELINE_STR\n\nfrom dagster_airflow.airflow_dag_converter import get_graph_definition_args\nfrom dagster_airflow.resources import (\n    make_ephemeral_airflow_db_resource as make_ephemeral_airflow_db_resource,\n)\nfrom dagster_airflow.utils import (\n    normalized_name,\n)\n\n\n
[docs]def make_dagster_job_from_airflow_dag(\n dag: DAG,\n tags: Optional[Mapping[str, str]] = None,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> JobDefinition:\n """Construct a Dagster job corresponding to a given Airflow DAG.\n\n Tasks in the resulting job will execute the ``execute()`` method on the corresponding\n Airflow Operator. Dagster, any dependencies required by Airflow Operators, and the module\n containing your DAG definition must be available in the Python environment within which your\n Dagster solids execute.\n\n To set Airflow's ``execution_date`` for use with Airflow Operator's ``execute()`` methods,\n either:\n\n 1. (Best for ad hoc runs) Execute job directly. This will set execution_date to the\n time (in UTC) of the run.\n\n 2. Add ``{'airflow_execution_date': utc_date_string}`` to the job tags. This will override\n behavior from (1).\n\n .. code-block:: python\n\n my_dagster_job = make_dagster_job_from_airflow_dag(\n dag=dag,\n tags={'airflow_execution_date': utc_execution_date_str}\n )\n my_dagster_job.execute_in_process()\n\n 3. (Recommended) Add ``{'airflow_execution_date': utc_date_string}`` to the run tags,\n such as in the Dagster UI. This will override behavior from (1) and (2)\n\n\n We apply normalized_name() to the dag id and task ids when generating job name and op\n names to ensure that names conform to Dagster's naming conventions.\n\n Args:\n dag (DAG): The Airflow DAG to compile into a Dagster job\n tags (Dict[str, Field]): Job tags. Optionally include\n `tags={'airflow_execution_date': utc_date_string}` to specify execution_date used within\n execution of Airflow Operators.\n connections (List[Connection]): List of Airflow Connections to be created in the Ephemeral\n Airflow DB, if use_emphemeral_airflow_db is False this will be ignored.\n\n Returns:\n JobDefinition: The generated Dagster job\n\n """\n check.inst_param(dag, "dag", DAG)\n tags = check.opt_mapping_param(tags, "tags")\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n\n mutated_tags = dict(tags)\n if IS_AIRFLOW_INGEST_PIPELINE_STR not in tags:\n mutated_tags[IS_AIRFLOW_INGEST_PIPELINE_STR] = "true"\n\n mutated_tags = validate_tags(mutated_tags)\n\n node_dependencies, node_defs = get_graph_definition_args(dag=dag)\n\n graph_def = GraphDefinition(\n name=normalized_name(dag.dag_id),\n description="",\n node_defs=node_defs,\n dependencies=node_dependencies,\n tags=mutated_tags,\n )\n\n if resource_defs is None or "airflow_db" not in resource_defs:\n resource_defs = dict(resource_defs) if resource_defs else {}\n resource_defs["airflow_db"] = make_ephemeral_airflow_db_resource(connections=connections)\n\n job_def = JobDefinition(\n name=normalized_name(dag.dag_id),\n description="",\n graph_def=graph_def,\n resource_defs=resource_defs,\n tags=mutated_tags,\n metadata={},\n op_retry_policy=None,\n version_strategy=None,\n )\n return job_def
\n
", "current_page_name": "_modules/dagster_airflow/dagster_job_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.dagster_job_factory"}, "operators": {"dagster_operator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.operators.dagster_operator

\nimport json\n\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom dagster_airflow.hooks.dagster_hook import DagsterHook\nfrom dagster_airflow.links.dagster_link import LINK_FMT, DagsterLink\nfrom dagster_airflow.utils import is_airflow_2_loaded_in_environment\n\n\n
[docs]class DagsterOperator(BaseOperator):\n """DagsterOperator.\n\n Uses the dagster graphql api to run and monitor dagster jobs on remote dagster infrastructure\n\n Parameters:\n repository_name (str): the name of the repository to use\n repostitory_location_name (str): the name of the repostitory location to use\n job_name (str): the name of the job to run\n run_config (Optional[Dict[str, Any]]): the run config to use for the job run\n dagster_conn_id (Optional[str]): the id of the dagster connection, airflow 2.0+ only\n organization_id (Optional[str]): the id of the dagster cloud organization\n deployment_name (Optional[str]): the name of the dagster cloud deployment\n user_token (Optional[str]): the dagster cloud user token to use\n """\n\n template_fields = ["run_config"]\n template_ext = (".yaml", ".yml", ".json")\n ui_color = "#663399"\n ui_fgcolor = "#e0e3fc"\n operator_extra_links = (DagsterLink(),)\n\n @apply_defaults\n def __init__(\n self,\n dagster_conn_id="dagster_default",\n run_config=None,\n repository_name="",\n repostitory_location_name="",\n job_name="",\n # params for airflow < 2.0.0 were custom connections aren't supported\n deployment_name="prod",\n user_token=None,\n organization_id="",\n url="https://dagster.cloud/",\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n self.run_id = None\n self.dagster_conn_id = dagster_conn_id if is_airflow_2_loaded_in_environment() else None\n self.run_config = run_config or {}\n self.repository_name = repository_name\n self.repostitory_location_name = repostitory_location_name\n self.job_name = job_name\n\n self.user_token = user_token\n self.url = url\n self.organization_id = organization_id\n self.deployment_name = deployment_name\n\n self.hook = DagsterHook(\n dagster_conn_id=self.dagster_conn_id,\n user_token=self.user_token,\n url=f"{self.url}{self.organization_id}/{self.deployment_name}/graphql",\n )\n\n def _is_json(self, blob):\n try:\n json.loads(blob)\n except ValueError:\n return False\n return True\n\n def pre_execute(self, context):\n # force re-rendering to ensure run_config renders any templated\n # content from run_config that couldn't be accessed on init\n setattr(\n self,\n "run_config",\n self.render_template(self.run_config, context),\n )\n\n def on_kill(self):\n self.log.info("Terminating Run")\n self.hook.terminate_run(\n run_id=self.run_id,\n )\n\n def execute(self, context):\n try:\n return self._execute(context)\n except Exception as e:\n raise e\n\n def _execute(self, context):\n self.run_id = self.hook.launch_run(\n repository_name=self.repository_name,\n repostitory_location_name=self.repostitory_location_name,\n job_name=self.job_name,\n run_config=self.run_config,\n )\n # save relevant info in xcom for use in links\n context["task_instance"].xcom_push(key="run_id", value=self.run_id)\n context["task_instance"].xcom_push(\n key="organization_id",\n value=self.hook.organization_id if self.dagster_conn_id else self.organization_id,\n )\n context["task_instance"].xcom_push(\n key="deployment_name",\n value=self.hook.deployment_name if self.dagster_conn_id else self.deployment_name,\n )\n\n self.log.info("Run Starting....")\n self.log.info(\n "Run tracking: %s",\n LINK_FMT.format(\n organization_id=self.hook.organization_id,\n deployment_name=self.hook.deployment_name,\n run_id=self.run_id,\n ),\n )\n self.hook.wait_for_run(\n run_id=self.run_id,\n )
\n\n\n
[docs]class DagsterCloudOperator(DagsterOperator):\n """DagsterCloudOperator.\n\n Uses the dagster cloud graphql api to run and monitor dagster jobs on dagster cloud\n\n Parameters:\n repository_name (str): the name of the repository to use\n repostitory_location_name (str): the name of the repostitory location to use\n job_name (str): the name of the job to run\n run_config (Optional[Dict[str, Any]]): the run config to use for the job run\n dagster_conn_id (Optional[str]): the id of the dagster connection, airflow 2.0+ only\n organization_id (Optional[str]): the id of the dagster cloud organization\n deployment_name (Optional[str]): the name of the dagster cloud deployment\n user_token (Optional[str]): the dagster cloud user token to use\n """
\n
", "current_page_name": "_modules/dagster_airflow/operators/dagster_operator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.operators.dagster_operator"}}, "resources": {"airflow_ephemeral_db": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.resources.airflow_ephemeral_db

\nimport importlib\nimport os\nimport tempfile\nfrom typing import List, Optional\n\nimport airflow\nfrom airflow.models.connection import Connection\nfrom airflow.utils import db\nfrom dagster import (\n    Array,\n    DagsterRun,\n    Field,\n    InitResourceContext,\n    Noneable,\n    ResourceDefinition,\n    _check as check,\n)\n\nfrom dagster_airflow.resources.airflow_db import AirflowDatabase\nfrom dagster_airflow.utils import (\n    Locker,\n    create_airflow_connections,\n    is_airflow_2_loaded_in_environment,\n    serialize_connections,\n)\n\n\nclass AirflowEphemeralDatabase(AirflowDatabase):\n    """A ephemeral Airflow database Dagster resource."""\n\n    def __init__(\n        self, airflow_home_path: str, dagster_run: DagsterRun, dag_run_config: Optional[dict] = None\n    ):\n        self.airflow_home_path = airflow_home_path\n        super().__init__(dagster_run=dagster_run, dag_run_config=dag_run_config)\n\n    @staticmethod\n    def _initialize_database(\n        airflow_home_path: str = os.path.join(tempfile.gettempdir(), "dagster_airflow"),\n        connections: List[Connection] = [],\n    ):\n        os.environ["AIRFLOW_HOME"] = airflow_home_path\n        os.makedirs(airflow_home_path, exist_ok=True)\n        with Locker(airflow_home_path):\n            airflow_initialized = os.path.exists(f"{airflow_home_path}/airflow.db")\n            # because AIRFLOW_HOME has been overriden airflow needs to be reloaded\n            if is_airflow_2_loaded_in_environment():\n                importlib.reload(airflow.configuration)\n                importlib.reload(airflow.settings)\n                importlib.reload(airflow)\n            else:\n                importlib.reload(airflow)\n            if not airflow_initialized:\n                db.initdb()\n                create_airflow_connections(connections)\n\n    @staticmethod\n    def from_resource_context(context: InitResourceContext) -> "AirflowEphemeralDatabase":\n        airflow_home_path = os.path.join(tempfile.gettempdir(), f"dagster_airflow_{context.run_id}")\n        AirflowEphemeralDatabase._initialize_database(\n            airflow_home_path=airflow_home_path,\n            connections=[Connection(**c) for c in context.resource_config["connections"]],\n        )\n        return AirflowEphemeralDatabase(\n            airflow_home_path=airflow_home_path,\n            dagster_run=check.not_none(context.dagster_run, "Context must have run"),\n            dag_run_config=context.resource_config.get("dag_run_config"),\n        )\n\n\n
[docs]def make_ephemeral_airflow_db_resource(\n connections: List[Connection] = [], dag_run_config: Optional[dict] = None\n) -> ResourceDefinition:\n """Creates a Dagster resource that provides an ephemeral Airflow database.\n\n Args:\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n dag_run_config (Optional[dict]): dag_run configuration to be used when creating a DagRun\n\n Returns:\n ResourceDefinition: The ephemeral Airflow DB resource\n\n """\n serialized_connections = serialize_connections(connections)\n airflow_db_resource_def = ResourceDefinition(\n resource_fn=AirflowEphemeralDatabase.from_resource_context,\n config_schema={\n "connections": Field(\n Array(inner_type=dict),\n default_value=serialized_connections,\n is_required=False,\n ),\n "dag_run_config": Field(\n Noneable(dict),\n default_value=dag_run_config,\n is_required=False,\n ),\n },\n description="Ephemeral Airflow DB to be used by dagster-airflow ",\n )\n return airflow_db_resource_def
\n
", "current_page_name": "_modules/dagster_airflow/resources/airflow_ephemeral_db", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.resources.airflow_ephemeral_db"}, "airflow_persistent_db": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.resources.airflow_persistent_db

\nimport importlib\nimport os\nfrom typing import List, Optional\n\nimport airflow\nfrom airflow.models.connection import Connection\nfrom dagster import (\n    Array,\n    DagsterRun,\n    Field,\n    InitResourceContext,\n    ResourceDefinition,\n    StringSource,\n    _check as check,\n)\n\nfrom dagster_airflow.resources.airflow_db import AirflowDatabase\nfrom dagster_airflow.utils import (\n    create_airflow_connections,\n    is_airflow_2_loaded_in_environment,\n    serialize_connections,\n)\n\n\nclass AirflowPersistentDatabase(AirflowDatabase):\n    """A persistent Airflow database Dagster resource."""\n\n    def __init__(self, dagster_run: DagsterRun, uri: str, dag_run_config: Optional[dict] = None):\n        self.uri = uri\n        super().__init__(dagster_run=dagster_run, dag_run_config=dag_run_config)\n\n    @staticmethod\n    def _initialize_database(uri: str, connections: List[Connection] = []):\n        if is_airflow_2_loaded_in_environment("2.3.0"):\n            os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_CONN"] = uri\n            importlib.reload(airflow.configuration)\n            importlib.reload(airflow.settings)\n            importlib.reload(airflow)\n        else:\n            os.environ["AIRFLOW__CORE__SQL_ALCHEMY_CONN"] = uri\n            importlib.reload(airflow)\n        create_airflow_connections(connections)\n\n    @staticmethod\n    def from_resource_context(context: InitResourceContext) -> "AirflowPersistentDatabase":\n        uri = context.resource_config["uri"]\n        AirflowPersistentDatabase._initialize_database(\n            uri=uri, connections=[Connection(**c) for c in context.resource_config["connections"]]\n        )\n        return AirflowPersistentDatabase(\n            dagster_run=check.not_none(context.dagster_run, "Context must have run"),\n            uri=uri,\n            dag_run_config=context.resource_config["dag_run_config"],\n        )\n\n\n
[docs]def make_persistent_airflow_db_resource(\n uri: str = "",\n connections: List[Connection] = [],\n dag_run_config: Optional[dict] = {},\n) -> ResourceDefinition:\n """Creates a Dagster resource that provides an persistent Airflow database.\n\n\n Usage:\n .. code-block:: python\n\n from dagster_airflow import (\n make_dagster_definitions_from_airflow_dags_path,\n make_persistent_airflow_db_resource,\n )\n postgres_airflow_db = "postgresql+psycopg2://airflow:airflow@localhost:5432/airflow"\n airflow_db = make_persistent_airflow_db_resource(uri=postgres_airflow_db)\n definitions = make_dagster_definitions_from_airflow_example_dags(\n '/path/to/dags/',\n resource_defs={"airflow_db": airflow_db}\n )\n\n\n Args:\n uri: SQLAlchemy URI of the Airflow DB to be used\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n dag_run_config (Optional[dict]): dag_run configuration to be used when creating a DagRun\n\n Returns:\n ResourceDefinition: The persistent Airflow DB resource\n\n """\n if is_airflow_2_loaded_in_environment():\n os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_CONN"] = uri\n else:\n os.environ["AIRFLOW__CORE__SQL_ALCHEMY_CONN"] = uri\n\n serialized_connections = serialize_connections(connections)\n\n airflow_db_resource_def = ResourceDefinition(\n resource_fn=AirflowPersistentDatabase.from_resource_context,\n config_schema={\n "uri": Field(\n StringSource,\n default_value=uri,\n is_required=False,\n ),\n "connections": Field(\n Array(inner_type=dict),\n default_value=serialized_connections,\n is_required=False,\n ),\n "dag_run_config": Field(\n dict,\n default_value=dag_run_config,\n is_required=False,\n ),\n },\n description="Persistent Airflow DB to be used by dagster-airflow ",\n )\n return airflow_db_resource_def
\n
", "current_page_name": "_modules/dagster_airflow/resources/airflow_persistent_db", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.resources.airflow_persistent_db"}}}, "dagster_aws": {"ecs": {"launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.ecs.launcher

\nimport json\nimport logging\nimport os\nimport uuid\nimport warnings\nfrom collections import namedtuple\nfrom typing import Any, Dict, List, Mapping, Optional, Sequence\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom dagster import (\n    Array,\n    DagsterRunStatus,\n    Field,\n    Noneable,\n    Permissive,\n    ScalarUnion,\n    StringSource,\n    _check as check,\n)\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.instance import T_DagsterInstance\nfrom dagster._core.launcher.base import (\n    CheckRunHealthResult,\n    LaunchRunContext,\n    RunLauncher,\n    WorkerStatus,\n)\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import RUN_WORKER_ID_TAG\nfrom dagster._grpc.types import ExecuteRunArgs\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom dagster._utils.backoff import backoff\nfrom typing_extensions import Self\n\nfrom ..secretsmanager import get_secrets_from_arns\nfrom .container_context import SHARED_ECS_SCHEMA, SHARED_TASK_DEFINITION_FIELDS, EcsContainerContext\nfrom .tasks import (\n    DagsterEcsTaskDefinitionConfig,\n    get_current_ecs_task,\n    get_current_ecs_task_metadata,\n    get_task_definition_dict_from_current_task,\n    get_task_kwargs_from_current_task,\n)\nfrom .utils import get_task_definition_family, get_task_logs, task_definitions_match\n\nTags = namedtuple("Tags", ["arn", "cluster", "cpu", "memory"])\n\nRUNNING_STATUSES = [\n    "PROVISIONING",\n    "PENDING",\n    "ACTIVATING",\n    "RUNNING",\n    "DEACTIVATING",\n    "STOPPING",\n    "DEPROVISIONING",\n]\nSTOPPED_STATUSES = ["STOPPED"]\n\nDEFAULT_WINDOWS_RESOURCES = {"cpu": "1024", "memory": "2048"}\n\nDEFAULT_LINUX_RESOURCES = {"cpu": "256", "memory": "512"}\n\n\n
[docs]class EcsRunLauncher(RunLauncher[T_DagsterInstance], ConfigurableClass):\n """RunLauncher that starts a task in ECS for each Dagster job run."""\n\n def __init__(\n self,\n inst_data: Optional[ConfigurableClassData] = None,\n task_definition=None,\n container_name="run",\n secrets=None,\n secrets_tag="dagster",\n env_vars=None,\n include_sidecars=False,\n use_current_ecs_task_config: bool = True,\n run_task_kwargs: Optional[Mapping[str, Any]] = None,\n run_resources: Optional[Dict[str, Any]] = None,\n run_ecs_tags: Optional[List[Dict[str, Optional[str]]]] = None,\n ):\n self._inst_data = inst_data\n self.ecs = boto3.client("ecs")\n self.ec2 = boto3.resource("ec2")\n self.secrets_manager = boto3.client("secretsmanager")\n self.logs = boto3.client("logs")\n\n self.task_definition = None\n self.task_definition_dict = {}\n if isinstance(task_definition, str):\n self.task_definition = task_definition\n elif task_definition and "env" in task_definition:\n check.invariant(\n len(task_definition) == 1,\n "If `task_definition` is set to a dictionary with `env`, `env` must be the only"\n " key.",\n )\n env_var = task_definition["env"]\n self.task_definition = os.getenv(env_var)\n if not self.task_definition:\n raise Exception(\n f"You have attempted to fetch the environment variable {env_var} which is not"\n " set."\n )\n else:\n self.task_definition_dict = task_definition or {}\n\n self.container_name = container_name\n\n self.secrets = check.opt_list_param(secrets, "secrets")\n\n self.env_vars = check.opt_list_param(env_vars, "env_vars")\n\n if self.secrets and all(isinstance(secret, str) for secret in self.secrets):\n warnings.warn(\n "Setting secrets as a list of ARNs is deprecated. "\n "Secrets should instead follow the same structure as the ECS API: "\n "https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html",\n DeprecationWarning,\n )\n self.secrets = [\n {"name": name, "valueFrom": value_from}\n for name, value_from in get_secrets_from_arns(\n self.secrets_manager, self.secrets\n ).items()\n ]\n\n self.secrets_tags = [secrets_tag] if secrets_tag else []\n self.include_sidecars = include_sidecars\n\n if self.task_definition:\n task_definition = self.ecs.describe_task_definition(taskDefinition=self.task_definition)\n container_names = [\n container.get("name")\n for container in task_definition["taskDefinition"]["containerDefinitions"]\n ]\n check.invariant(\n container_name in container_names,\n f"Cannot override container '{container_name}' in task definition "\n f"'{self.task_definition}' because the container is not defined.",\n )\n self.task_definition = task_definition["taskDefinition"]["taskDefinitionArn"]\n\n self.use_current_ecs_task_config = check.opt_bool_param(\n use_current_ecs_task_config, "use_current_ecs_task_config"\n )\n\n self.run_task_kwargs = check.opt_mapping_param(run_task_kwargs, "run_task_kwargs")\n if run_task_kwargs:\n check.invariant(\n "taskDefinition" not in run_task_kwargs,\n "Use the `taskDefinition` config field to pass in a task definition to run.",\n )\n check.invariant(\n "overrides" not in run_task_kwargs,\n "Task overrides are set by the run launcher and cannot be set in run_task_kwargs.",\n )\n\n expected_keys = [\n key for key in self.ecs.meta.service_model.shape_for("RunTaskRequest").members\n ]\n\n for key in run_task_kwargs:\n check.invariant(\n key in expected_keys, f"Found an unexpected key {key} in run_task_kwargs"\n )\n\n self.run_resources = check.opt_mapping_param(run_resources, "run_resources")\n\n self.run_ecs_tags = check.opt_sequence_param(run_ecs_tags, "run_ecs_tags")\n\n self._current_task_metadata = None\n self._current_task = None\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @property\n def task_role_arn(self) -> Optional[str]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("task_role_arn")\n\n @property\n def execution_role_arn(self) -> Optional[str]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("execution_role_arn")\n\n @property\n def runtime_platform(self) -> Optional[Mapping[str, Any]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("runtime_platform")\n\n @property\n def mount_points(self) -> Optional[Sequence[Mapping[str, Any]]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("mount_points")\n\n @property\n def volumes(self) -> Optional[Sequence[Mapping[str, Any]]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("volumes")\n\n @property\n def repository_credentials(self) -> Optional[str]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("repository_credentials")\n\n @property\n def run_sidecar_containers(self) -> Optional[Sequence[Mapping[str, Any]]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("sidecar_containers")\n\n @classmethod\n def config_type(cls):\n return {\n "task_definition": Field(\n ScalarUnion(\n scalar_type=str,\n non_scalar_schema={\n "log_group": Field(StringSource, is_required=False),\n "sidecar_containers": Field(Array(Permissive({})), is_required=False),\n "requires_compatibilities": Field(Array(str), is_required=False),\n "env": Field(\n str,\n is_required=False,\n description=(\n "Backwards-compatibility for when task_definition was a"\n " StringSource.Can be used to source the task_definition scalar"\n " from an environment variable."\n ),\n ),\n **SHARED_TASK_DEFINITION_FIELDS,\n },\n ),\n is_required=False,\n description=(\n "Either the short name of an existing task definition to use when launching new"\n " tasks, or a dictionary configuration to use when creating a task definition"\n " for the run.If neither is provided, the task definition will be created based"\n " on the current task's task definition."\n ),\n ),\n "container_name": Field(\n StringSource,\n is_required=False,\n default_value="run",\n description=(\n "The container name to use when launching new tasks. Defaults to 'run'."\n ),\n ),\n "secrets": Field(\n Array(\n ScalarUnion(\n scalar_type=str,\n non_scalar_schema={"name": StringSource, "valueFrom": StringSource},\n )\n ),\n is_required=False,\n description=(\n "An array of AWS Secrets Manager secrets. These secrets will "\n "be mounted as environment variables in the container. See "\n "https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html."\n ),\n ),\n "secrets_tag": Field(\n Noneable(StringSource),\n is_required=False,\n default_value="dagster",\n description=(\n "AWS Secrets Manager secrets with this tag will be mounted as "\n "environment variables in the container. Defaults to 'dagster'."\n ),\n ),\n "include_sidecars": Field(\n bool,\n is_required=False,\n default_value=False,\n description=(\n "Whether each run should use the same sidecars as the task that launches it. "\n "Defaults to False."\n ),\n ),\n "use_current_ecs_task_config": Field(\n bool,\n is_required=False,\n default_value=True,\n description=(\n "Whether to use the run launcher's current ECS task in order to determine "\n "the cluster and networking configuration for the launched task. Defaults to "\n "True. Should only be called if the run launcher is running within an ECS "\n "task."\n ),\n ),\n "run_task_kwargs": Field(\n Permissive(\n {\n "cluster": Field(\n StringSource,\n is_required=False,\n description="Name of the ECS cluster to launch ECS tasks in.",\n ),\n }\n ),\n is_required=False,\n description=(\n "Additional arguments to include while running the task. See"\n " https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task"\n " for the available parameters. The overrides and taskDefinition arguments will"\n " always be set by the run launcher."\n ),\n ),\n **SHARED_ECS_SCHEMA,\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return EcsRunLauncher(inst_data=inst_data, **config_value)\n\n def _set_run_tags(self, run_id: str, cluster: str, task_arn: str):\n tags = {\n "ecs/task_arn": task_arn,\n "ecs/cluster": cluster,\n RUN_WORKER_ID_TAG: str(uuid.uuid4().hex)[0:6],\n }\n self._instance.add_run_tags(run_id, tags)\n\n def build_ecs_tags_for_run_task(self, run, container_context: EcsContainerContext):\n if any(tag["key"] == "dagster/run_id" for tag in container_context.run_ecs_tags):\n raise Exception("Cannot override system ECS tag: dagster/run_id")\n\n return [{"key": "dagster/run_id", "value": run.run_id}, *container_context.run_ecs_tags]\n\n def _get_run_tags(self, run_id):\n run = self._instance.get_run_by_id(run_id)\n tags = run.tags if run else {}\n arn = tags.get("ecs/task_arn")\n cluster = tags.get("ecs/cluster")\n cpu = tags.get("ecs/cpu")\n memory = tags.get("ecs/memory")\n\n return Tags(arn, cluster, cpu, memory)\n\n def _get_command_args(self, run_args: ExecuteRunArgs, context: LaunchRunContext):\n return run_args.get_command_args()\n\n def _get_image_for_run(self, context: LaunchRunContext) -> Optional[str]:\n job_origin = check.not_none(context.job_code_origin)\n return job_origin.repository_origin.container_image\n\n def launch_run(self, context: LaunchRunContext) -> None:\n """Launch a run in an ECS task."""\n run = context.dagster_run\n container_context = EcsContainerContext.create_for_run(run, self)\n\n job_origin = check.not_none(context.job_code_origin)\n\n # ECS limits overrides to 8192 characters including json formatting\n # https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html\n # When container_context is serialized as part of the ExecuteRunArgs, we risk\n # going over this limit (for example, if many secrets have been set). This strips\n # the container context off of our job origin because we don't actually need\n # it to launch the run; we only needed it to create the task definition.\n repository_origin = job_origin.repository_origin\n\n stripped_repository_origin = repository_origin._replace(container_context={})\n stripped_job_origin = job_origin._replace(repository_origin=stripped_repository_origin)\n\n args = ExecuteRunArgs(\n job_origin=stripped_job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n )\n command = self._get_command_args(args, context)\n image = self._get_image_for_run(context)\n\n run_task_kwargs = self._run_task_kwargs(run, image, container_context)\n\n # Set cpu or memory overrides\n # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html\n cpu_and_memory_overrides = self.get_cpu_and_memory_overrides(container_context, run)\n\n task_overrides = self._get_task_overrides(container_context, run)\n\n container_overrides: List[Dict[str, Any]] = [\n {\n "name": self._get_container_name(container_context),\n "command": command,\n # containerOverrides expects cpu/memory as integers\n **{k: int(v) for k, v in cpu_and_memory_overrides.items()},\n }\n ]\n\n run_task_kwargs["overrides"] = {\n "containerOverrides": container_overrides,\n # taskOverrides expects cpu/memory as strings\n **cpu_and_memory_overrides,\n **task_overrides,\n }\n run_task_kwargs["tags"] = [\n *run_task_kwargs.get("tags", []),\n *self.build_ecs_tags_for_run_task(run, container_context),\n ]\n\n run_task_kwargs_from_run = self._get_run_task_kwargs_from_run(run)\n run_task_kwargs.update(run_task_kwargs_from_run)\n\n # launchType and capacityProviderStrategy are incompatible - prefer the latter if it is set\n if "launchType" in run_task_kwargs and run_task_kwargs.get("capacityProviderStrategy"):\n del run_task_kwargs["launchType"]\n\n # Run a task using the same network configuration as this processes's task.\n response = self.ecs.run_task(**run_task_kwargs)\n\n tasks = response["tasks"]\n\n if not tasks:\n failures = response["failures"]\n failure_messages = []\n for failure in failures:\n arn = failure.get("arn")\n reason = failure.get("reason")\n detail = failure.get("detail")\n\n failure_message = (\n "Task"\n + (f" {arn}" if arn else "")\n + " failed."\n + (f" Failure reason: {reason}" if reason else "")\n + (f" Failure details: {detail}" if detail else "")\n )\n failure_messages.append(failure_message)\n\n raise Exception("\\n".join(failure_messages) if failure_messages else "Task failed.")\n\n arn = tasks[0]["taskArn"]\n cluster_arn = tasks[0]["clusterArn"]\n self._set_run_tags(run.run_id, cluster=cluster_arn, task_arn=arn)\n self.report_launch_events(run, arn, cluster_arn)\n\n def report_launch_events(\n self, run: DagsterRun, arn: Optional[str] = None, cluster: Optional[str] = None\n ):\n # Extracted method to allow for subclasses to customize the launch reporting behavior\n\n metadata = {}\n if arn:\n metadata["ECS Task ARN"] = arn\n if cluster:\n metadata["ECS Cluster"] = cluster\n\n metadata["Run ID"] = run.run_id\n self._instance.report_engine_event(\n message="Launching run in ECS task",\n dagster_run=run,\n engine_event_data=EngineEventData(metadata),\n cls=self.__class__,\n )\n\n def get_cpu_and_memory_overrides(\n self, container_context: EcsContainerContext, run: DagsterRun\n ) -> Mapping[str, str]:\n overrides = {}\n\n cpu = run.tags.get("ecs/cpu", container_context.run_resources.get("cpu"))\n memory = run.tags.get("ecs/memory", container_context.run_resources.get("memory"))\n\n if cpu:\n overrides["cpu"] = cpu\n if memory:\n overrides["memory"] = memory\n\n return overrides\n\n def _get_task_overrides(\n self, container_context: EcsContainerContext, run: DagsterRun\n ) -> Mapping[str, Any]:\n tag_overrides = run.tags.get("ecs/task_overrides")\n\n overrides = {}\n\n if tag_overrides:\n overrides = json.loads(tag_overrides)\n\n ephemeral_storage = run.tags.get(\n "ecs/ephemeral_storage", container_context.run_resources.get("ephemeral_storage")\n )\n if ephemeral_storage:\n overrides["ephemeralStorage"] = {"sizeInGiB": int(ephemeral_storage)}\n\n return overrides\n\n def _get_run_task_kwargs_from_run(self, run: DagsterRun) -> Mapping[str, Any]:\n run_task_kwargs = run.tags.get("ecs/run_task_kwargs")\n if run_task_kwargs:\n return json.loads(run_task_kwargs)\n return {}\n\n def terminate(self, run_id):\n tags = self._get_run_tags(run_id)\n\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n if not (tags.arn and tags.cluster):\n return False\n\n tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")\n if not tasks:\n return False\n\n status = tasks[0].get("lastStatus")\n if status == "STOPPED":\n return False\n\n self.ecs.stop_task(task=tags.arn, cluster=tags.cluster)\n return True\n\n def _get_current_task_metadata(self):\n if self._current_task_metadata is None:\n self._current_task_metadata = get_current_ecs_task_metadata()\n return self._current_task_metadata\n\n def _get_current_task(self):\n if self._current_task is None:\n current_task_metadata = self._get_current_task_metadata()\n self._current_task = get_current_ecs_task(\n self.ecs, current_task_metadata.task_arn, current_task_metadata.cluster\n )\n\n return self._current_task\n\n def _get_run_task_definition_family(self, run: DagsterRun) -> str:\n return get_task_definition_family("run", check.not_none(run.external_job_origin))\n\n def _get_container_name(self, container_context) -> str:\n return container_context.container_name or self.container_name\n\n def _run_task_kwargs(self, run, image, container_context) -> Dict[str, Any]:\n """Return a dictionary of args to launch the ECS task, registering a new task\n definition if needed.\n """\n environment = self._environment(container_context)\n environment.append({"name": "DAGSTER_RUN_JOB_NAME", "value": run.job_name})\n\n secrets = self._secrets(container_context)\n\n if container_context.task_definition_arn:\n task_definition = container_context.task_definition_arn\n else:\n family = self._get_run_task_definition_family(run)\n\n if self.task_definition_dict or not self.use_current_ecs_task_config:\n runtime_platform = container_context.runtime_platform\n is_windows = container_context.runtime_platform.get(\n "operatingSystemFamily"\n ) not in {None, "LINUX"}\n\n default_resources = (\n DEFAULT_WINDOWS_RESOURCES if is_windows else DEFAULT_LINUX_RESOURCES\n )\n task_definition_config = DagsterEcsTaskDefinitionConfig(\n family,\n image,\n self._get_container_name(container_context),\n command=None,\n log_configuration=(\n {\n "logDriver": "awslogs",\n "options": {\n "awslogs-group": self.task_definition_dict["log_group"],\n "awslogs-region": self.ecs.meta.region_name,\n "awslogs-stream-prefix": family,\n },\n }\n if self.task_definition_dict.get("log_group")\n else None\n ),\n secrets=secrets if secrets else [],\n environment=environment,\n execution_role_arn=container_context.execution_role_arn,\n task_role_arn=container_context.task_role_arn,\n sidecars=container_context.run_sidecar_containers,\n requires_compatibilities=self.task_definition_dict.get(\n "requires_compatibilities", []\n ),\n cpu=container_context.run_resources.get("cpu", default_resources["cpu"]),\n memory=container_context.run_resources.get(\n "memory", default_resources["memory"]\n ),\n ephemeral_storage=container_context.run_resources.get("ephemeral_storage"),\n runtime_platform=runtime_platform,\n volumes=container_context.volumes,\n mount_points=container_context.mount_points,\n repository_credentials=container_context.repository_credentials,\n )\n task_definition_dict = task_definition_config.task_definition_dict()\n else:\n task_definition_dict = get_task_definition_dict_from_current_task(\n self.ecs,\n family,\n self._get_current_task(),\n image,\n self._get_container_name(container_context),\n environment=environment,\n secrets=secrets if secrets else {},\n include_sidecars=self.include_sidecars,\n task_role_arn=container_context.task_role_arn,\n execution_role_arn=container_context.execution_role_arn,\n cpu=container_context.run_resources.get("cpu"),\n memory=container_context.run_resources.get("memory"),\n runtime_platform=container_context.runtime_platform,\n ephemeral_storage=container_context.run_resources.get("ephemeral_storage"),\n volumes=container_context.volumes,\n mount_points=container_context.mount_points,\n additional_sidecars=container_context.run_sidecar_containers,\n repository_credentials=container_context.repository_credentials,\n )\n\n task_definition_config = DagsterEcsTaskDefinitionConfig.from_task_definition_dict(\n task_definition_dict,\n self._get_container_name(container_context),\n )\n\n container_name = self._get_container_name(container_context)\n\n backoff(\n self._reuse_or_register_task_definition,\n retry_on=(Exception,),\n kwargs={\n "desired_task_definition_config": task_definition_config,\n "container_name": container_name,\n "task_definition_dict": task_definition_dict,\n },\n max_retries=5,\n )\n\n task_definition = family\n\n if self.use_current_ecs_task_config:\n current_task_metadata = get_current_ecs_task_metadata()\n current_task = get_current_ecs_task(\n self.ecs, current_task_metadata.task_arn, current_task_metadata.cluster\n )\n task_kwargs = get_task_kwargs_from_current_task(\n self.ec2,\n current_task_metadata.cluster,\n current_task,\n )\n else:\n task_kwargs = {}\n\n return {**task_kwargs, **self.run_task_kwargs, "taskDefinition": task_definition}\n\n def _reuse_task_definition(\n self, desired_task_definition_config: DagsterEcsTaskDefinitionConfig, container_name: str\n ):\n family = desired_task_definition_config.family\n\n try:\n existing_task_definition = self.ecs.describe_task_definition(taskDefinition=family)[\n "taskDefinition"\n ]\n except ClientError:\n # task definition does not exist, do not reuse\n return False\n\n return task_definitions_match(\n desired_task_definition_config,\n existing_task_definition,\n container_name=container_name,\n )\n\n def _reuse_or_register_task_definition(\n self,\n desired_task_definition_config: DagsterEcsTaskDefinitionConfig,\n container_name: str,\n task_definition_dict: dict,\n ):\n if not self._reuse_task_definition(desired_task_definition_config, container_name):\n self.ecs.register_task_definition(**task_definition_dict)\n\n def _environment(self, container_context):\n return [\n {"name": key, "value": value}\n for key, value in container_context.get_environment_dict().items()\n ]\n\n def _secrets(self, container_context):\n secrets = container_context.get_secrets_dict(self.secrets_manager)\n return (\n [{"name": key, "valueFrom": value} for key, value in secrets.items()] if secrets else []\n )\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n @property\n def include_cluster_info_in_failure_messages(self):\n return True\n\n def _is_transient_startup_failure(self, run, task):\n if not task.get("stoppedReason"):\n return False\n return (\n run.status == DagsterRunStatus.STARTING\n and "Timeout waiting for network interface provisioning to complete"\n in task.get("stoppedReason")\n )\n\n def check_run_worker_health(self, run: DagsterRun):\n run_worker_id = run.tags.get(RUN_WORKER_ID_TAG)\n\n tags = self._get_run_tags(run.run_id)\n container_context = EcsContainerContext.create_for_run(run, self)\n\n if not (tags.arn and tags.cluster):\n return CheckRunHealthResult(WorkerStatus.UNKNOWN, "", run_worker_id=run_worker_id)\n\n tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")\n if not tasks:\n return CheckRunHealthResult(WorkerStatus.UNKNOWN, "", run_worker_id=run_worker_id)\n\n t = tasks[0]\n\n if t.get("lastStatus") in RUNNING_STATUSES:\n return CheckRunHealthResult(WorkerStatus.RUNNING, run_worker_id=run_worker_id)\n elif t.get("lastStatus") in STOPPED_STATUSES:\n failed_containers = []\n for c in t.get("containers"):\n if c.get("exitCode") != 0:\n failed_containers.append(c)\n if len(failed_containers) > 0:\n if len(failed_containers) > 1:\n container_str = "Containers"\n else:\n container_str = "Container"\n\n failure_text = []\n\n cluster_failure_info = (\n f"Task {t.get('taskArn')} failed. Stop code: {t.get('stopCode')}. Stop"\n + f" reason: {t.get('stoppedReason')}."\n + f" {container_str} {[c.get('name') for c in failed_containers]} failed."\n )\n\n logging.warning(\n "Run monitoring detected run worker failure: " + cluster_failure_info\n )\n\n if self.include_cluster_info_in_failure_messages:\n failure_text.append(cluster_failure_info)\n\n logs = []\n\n try:\n logs = get_task_logs(\n self.ecs,\n logs_client=self.logs,\n cluster=tags.cluster,\n task_arn=tags.arn,\n container_name=self._get_container_name(container_context),\n )\n except:\n logging.exception(f"Error trying to get logs for failed task {tags.arn}")\n\n if logs:\n failure_text.append("Run worker logs:\\n" + "\\n".join(logs))\n\n return CheckRunHealthResult(\n WorkerStatus.FAILED,\n "\\n\\n".join(failure_text),\n transient=self._is_transient_startup_failure(run, t),\n run_worker_id=run_worker_id,\n )\n\n return CheckRunHealthResult(WorkerStatus.SUCCESS, run_worker_id=run_worker_id)\n\n return CheckRunHealthResult(\n WorkerStatus.UNKNOWN, "ECS task health status is unknown.", run_worker_id=run_worker_id\n )
\n
", "current_page_name": "_modules/dagster_aws/ecs/launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.ecs.launcher"}}, "emr": {"emr": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.emr.emr

\n# Portions of this file are copied from the Yelp MRJob project:\n#\n#   https://github.com/Yelp/mrjob\n#\n#\n# Copyright 2009-2013 Yelp, David Marin\n# Copyright 2015 Yelp\n# Copyright 2017 Yelp\n# Copyright 2018 Contributors\n# Copyright 2019 Yelp and Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport gzip\nimport re\nfrom io import BytesIO\nfrom urllib.parse import urlparse\n\nimport boto3\nimport dagster\nimport dagster._check as check\nfrom botocore.exceptions import WaiterError\n\nfrom dagster_aws.utils.mrjob.utils import _boto3_now, _wrap_aws_client, strip_microseconds\n\nfrom .types import EMR_CLUSTER_TERMINATED_STATES, EmrClusterState, EmrStepState\n\n# if we can't create or find our own service role, use the one\n# created by the AWS console and CLI\n_FALLBACK_SERVICE_ROLE = "EMR_DefaultRole"\n\n# if we can't create or find our own instance profile, use the one\n# created by the AWS console and CLI\n_FALLBACK_INSTANCE_PROFILE = "EMR_EC2_DefaultRole"\n\n\n
[docs]class EmrError(Exception):\n pass
\n\n\n
[docs]class EmrJobRunner:\n def __init__(\n self,\n region,\n check_cluster_every=30,\n aws_access_key_id=None,\n aws_secret_access_key=None,\n ):\n """This object encapsulates various utilities for interacting with EMR clusters and invoking\n steps (jobs) on them.\n\n See also :py:class:`~dagster_aws.emr.EmrPySparkResource`, which wraps this job runner in a\n resource for pyspark workloads.\n\n Args:\n region (str): AWS region to use\n check_cluster_every (int, optional): How frequently to poll boto3 APIs for updates.\n Defaults to 30 seconds.\n aws_access_key_id ([type], optional): AWS access key ID. Defaults to None, which will\n use the default boto3 credentials chain.\n aws_secret_access_key ([type], optional): AWS secret access key. Defaults to None, which\n will use the default boto3 credentials chain.\n """\n self.region = check.str_param(region, "region")\n\n # This is in seconds\n self.check_cluster_every = check.int_param(check_cluster_every, "check_cluster_every")\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )\n\n def make_emr_client(self):\n """Creates a boto3 EMR client. Construction is wrapped in retries in case client connection\n fails transiently.\n\n Returns:\n botocore.client.EMR: An EMR client\n """\n raw_emr_client = boto3.client(\n "emr",\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n region_name=self.region,\n )\n return _wrap_aws_client(raw_emr_client, min_backoff=self.check_cluster_every)\n\n def cluster_id_from_name(self, cluster_name):\n """Get a cluster ID in the format "j-123ABC123ABC1" given a cluster name "my cool cluster".\n\n Args:\n cluster_name (str): The name of the cluster for which to find an ID\n\n Returns:\n str: The ID of the cluster\n\n Raises:\n EmrError: No cluster with the specified name exists\n """\n check.str_param(cluster_name, "cluster_name")\n\n response = self.make_emr_client().list_clusters().get("Clusters", [])\n for cluster in response:\n if cluster["Name"] == cluster_name:\n return cluster["Id"]\n\n raise EmrError(f"cluster {cluster_name} not found in region {self.region}")\n\n @staticmethod\n def construct_step_dict_for_command(step_name, command, action_on_failure="CONTINUE"):\n """Construct an EMR step definition which uses command-runner.jar to execute a shell command\n on the EMR master.\n\n Args:\n step_name (str): The name of the EMR step (will show up in the EMR UI)\n command (str): The shell command to execute with command-runner.jar\n action_on_failure (str, optional): Configure action on failure (e.g., continue, or\n terminate the cluster). Defaults to 'CONTINUE'.\n\n Returns:\n dict: Step definition dict\n """\n check.str_param(step_name, "step_name")\n check.list_param(command, "command", of_type=str)\n check.str_param(action_on_failure, "action_on_failure")\n\n return {\n "Name": step_name,\n "ActionOnFailure": action_on_failure,\n "HadoopJarStep": {"Jar": "command-runner.jar", "Args": command},\n }\n\n def add_tags(self, log, tags, cluster_id):\n """Add tags in the dict tags to cluster cluster_id.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n tags (dict): Dictionary of {'key': 'value'} tags\n cluster_id (str): The ID of the cluster to tag\n """\n check.dict_param(tags, "tags")\n check.str_param(cluster_id, "cluster_id")\n\n tags_items = sorted(tags.items())\n\n self.make_emr_client().add_tags(\n ResourceId=cluster_id, Tags=[dict(Key=k, Value=v) for k, v in tags_items]\n )\n\n log.info(\n "Added EMR tags to cluster %s: %s"\n % (cluster_id, ", ".join("%s=%s" % (tag, value) for tag, value in tags_items))\n )\n\n def run_job_flow(self, log, cluster_config):\n """Create an empty cluster on EMR, and return the ID of that job flow.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n cluster_config (dict): Configuration for this EMR job flow. See:\n https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html\n\n Returns:\n str: The cluster ID, e.g. "j-ZKIY4CKQRX72"\n """\n check.dict_param(cluster_config, "cluster_config")\n\n log.debug("Creating Elastic MapReduce cluster")\n emr_client = self.make_emr_client()\n\n log.debug(\n "Calling run_job_flow(%s)"\n % (", ".join("%s=%r" % (k, v) for k, v in sorted(cluster_config.items())))\n )\n cluster_id = emr_client.run_job_flow(**cluster_config)["JobFlowId"]\n\n log.info("Created new cluster %s" % cluster_id)\n\n # set EMR tags for the cluster\n tags_items = cluster_config.get("Tags", [])\n tags = {k: v for k, v in tags_items}\n tags["__dagster_version"] = dagster.__version__\n self.add_tags(log, tags, cluster_id)\n return cluster_id\n\n def describe_cluster(self, cluster_id):\n """Thin wrapper over boto3 describe_cluster.\n\n Args:\n cluster_id (str): Cluster to inspect\n\n Returns:\n dict: The cluster info. See:\n https://docs.aws.amazon.com/emr/latest/APIReference/API_DescribeCluster.html\n """\n check.str_param(cluster_id, "cluster_id")\n\n emr_client = self.make_emr_client()\n return emr_client.describe_cluster(ClusterId=cluster_id)\n\n def describe_step(self, cluster_id, step_id):\n """Thin wrapper over boto3 describe_step.\n\n Args:\n cluster_id (str): Cluster to inspect\n step_id (str): Step ID to describe\n\n Returns:\n dict: The step info. See:\n https://docs.aws.amazon.com/emr/latest/APIReference/API_DescribeStep.html\n """\n check.str_param(cluster_id, "cluster_id")\n check.str_param(step_id, "step_id")\n\n emr_client = self.make_emr_client()\n return emr_client.describe_step(ClusterId=cluster_id, StepId=step_id)\n\n def add_job_flow_steps(self, log, cluster_id, step_defs):\n """Submit the constructed job flow steps to EMR for execution.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n cluster_id (str): The ID of the cluster\n step_defs (List[dict]): List of steps; see also `construct_step_dict_for_command`\n\n Returns:\n List[str]: list of step IDs.\n """\n check.str_param(cluster_id, "cluster_id")\n check.list_param(step_defs, "step_defs", of_type=dict)\n\n emr_client = self.make_emr_client()\n\n steps_kwargs = dict(JobFlowId=cluster_id, Steps=step_defs)\n log.debug(\n "Calling add_job_flow_steps(%s)"\n % ",".join(("%s=%r" % (k, v)) for k, v in steps_kwargs.items())\n )\n return emr_client.add_job_flow_steps(**steps_kwargs)["StepIds"]\n\n def is_emr_step_complete(self, log, cluster_id, emr_step_id):\n step = self.describe_step(cluster_id, emr_step_id)["Step"]\n step_state = EmrStepState(step["Status"]["State"])\n\n if step_state == EmrStepState.Pending:\n cluster = self.describe_cluster(cluster_id)["Cluster"]\n\n reason = _get_reason(cluster)\n reason_desc = (": %s" % reason) if reason else ""\n\n log.info("PENDING (cluster is %s%s)" % (cluster["Status"]["State"], reason_desc))\n return False\n\n elif step_state == EmrStepState.Running:\n time_running_desc = ""\n\n start = step["Status"]["Timeline"].get("StartDateTime")\n if start:\n time_running_desc = " for %s" % strip_microseconds(_boto3_now() - start)\n\n log.info("RUNNING%s" % time_running_desc)\n return False\n\n # we're done, will return at the end of this\n elif step_state == EmrStepState.Completed:\n log.info("COMPLETED")\n return True\n else:\n # step has failed somehow. *reason* seems to only be set\n # when job is cancelled (e.g. 'Job terminated')\n reason = _get_reason(step)\n reason_desc = (" (%s)" % reason) if reason else ""\n\n log.info("%s%s" % (step_state.value, reason_desc))\n\n # print cluster status; this might give more context\n # why step didn't succeed\n cluster = self.describe_cluster(cluster_id)["Cluster"]\n reason = _get_reason(cluster)\n reason_desc = (": %s" % reason) if reason else ""\n log.info(\n "Cluster %s %s %s%s"\n % (\n cluster["Id"],\n "was" if "ED" in cluster["Status"]["State"] else "is",\n cluster["Status"]["State"],\n reason_desc,\n )\n )\n\n if EmrClusterState(cluster["Status"]["State"]) in EMR_CLUSTER_TERMINATED_STATES:\n # was it caused by IAM roles?\n self._check_for_missing_default_iam_roles(log, cluster)\n\n # TODO: extract logs here to surface failure reason\n # See: https://github.com/dagster-io/dagster/issues/1954\n\n if step_state == EmrStepState.Failed:\n log.error("EMR step %s failed" % emr_step_id)\n\n raise EmrError("EMR step %s failed" % emr_step_id)\n\n def _check_for_missing_default_iam_roles(self, log, cluster):\n """If cluster couldn't start due to missing IAM roles, tell user what to do."""\n check.dict_param(cluster, "cluster")\n\n reason = _get_reason(cluster)\n if any(\n reason.endswith("/%s is invalid" % role)\n for role in (_FALLBACK_INSTANCE_PROFILE, _FALLBACK_SERVICE_ROLE)\n ):\n log.warning(\n "IAM roles are missing. See documentation for IAM roles on EMR here: "\n "https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-iam-roles.html"\n )\n\n def log_location_for_cluster(self, cluster_id):\n """EMR clusters are typically launched with S3 logging configured. This method inspects a\n cluster using boto3 describe_cluster to retrieve the log URI.\n\n Args:\n cluster_id (str): The cluster to inspect.\n\n Raises:\n EmrError: the log URI was missing (S3 log mirroring not enabled for this cluster)\n\n Returns:\n (str, str): log bucket and key\n """\n check.str_param(cluster_id, "cluster_id")\n\n # The S3 log URI is specified per job flow (cluster)\n log_uri = self.describe_cluster(cluster_id)["Cluster"].get("LogUri", None)\n\n # ugh, seriously boto3?! This will come back as string "None"\n if log_uri == "None" or log_uri is None:\n raise EmrError("Log URI not specified, cannot retrieve step execution logs")\n\n # For some reason the API returns an s3n:// protocol log URI instead of s3://\n log_uri = re.sub("^s3n", "s3", log_uri)\n log_uri_parsed = urlparse(log_uri)\n log_bucket = log_uri_parsed.netloc\n log_key_prefix = log_uri_parsed.path.lstrip("/")\n return log_bucket, log_key_prefix\n\n def retrieve_logs_for_step_id(self, log, cluster_id, step_id):\n """Retrieves stdout and stderr logs for the given step ID.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n cluster_id (str): EMR cluster ID\n step_id (str): EMR step ID for the job that was submitted.\n\n Returns:\n (str, str): Tuple of stdout log string contents, and stderr log string contents\n """\n check.str_param(cluster_id, "cluster_id")\n check.str_param(step_id, "step_id")\n\n log_bucket, log_key_prefix = self.log_location_for_cluster(cluster_id)\n\n prefix = f"{log_key_prefix}{cluster_id}/steps/{step_id}"\n stdout_log = self.wait_for_log(log, log_bucket, f"{prefix}/stdout.gz")\n stderr_log = self.wait_for_log(log, log_bucket, f"{prefix}/stderr.gz")\n return stdout_log, stderr_log\n\n def wait_for_log(self, log, log_bucket, log_key, waiter_delay=30, waiter_max_attempts=20):\n """Wait for gzipped EMR logs to appear on S3. Note that EMR syncs logs to S3 every 5\n minutes, so this may take a long time.\n\n Args:\n log_bucket (str): S3 bucket where log is expected to appear\n log_key (str): S3 key for the log file\n waiter_delay (int): How long to wait between attempts to check S3 for the log file\n waiter_max_attempts (int): Number of attempts before giving up on waiting\n\n Raises:\n EmrError: Raised if we waited the full duration and the logs did not appear\n\n Returns:\n str: contents of the log file\n """\n check.str_param(log_bucket, "log_bucket")\n check.str_param(log_key, "log_key")\n check.int_param(waiter_delay, "waiter_delay")\n check.int_param(waiter_max_attempts, "waiter_max_attempts")\n\n log.info(f"Attempting to get log: s3://{log_bucket}/{log_key}")\n\n s3 = _wrap_aws_client(boto3.client("s3"), min_backoff=self.check_cluster_every)\n waiter = s3.get_waiter("object_exists")\n try:\n waiter.wait(\n Bucket=log_bucket,\n Key=log_key,\n WaiterConfig={"Delay": waiter_delay, "MaxAttempts": waiter_max_attempts},\n )\n except WaiterError as err:\n raise EmrError("EMR log file did not appear on S3 after waiting") from err\n\n obj = BytesIO(s3.get_object(Bucket=log_bucket, Key=log_key)["Body"].read())\n gzip_file = gzip.GzipFile(fileobj=obj)\n return gzip_file.read().decode("utf-8")
\n\n\ndef _get_reason(cluster_or_step):\n """Get state change reason message."""\n # StateChangeReason is {} before the first state change\n return cluster_or_step["Status"]["StateChangeReason"].get("Message", "")\n
", "current_page_name": "_modules/dagster_aws/emr/emr", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.emr.emr"}, "pyspark_step_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.emr.pyspark_step_launcher

\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport time\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom dagster import (\n    Field,\n    StringSource,\n    _check as check,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.errors import DagsterInvariantViolationError, raise_execution_interrupts\nfrom dagster._core.execution.plan.external_step import (\n    PICKLED_EVENTS_FILE_NAME,\n    PICKLED_STEP_RUN_REF_FILE_NAME,\n    step_context_to_step_run_ref,\n)\nfrom dagster._serdes import deserialize_value\n\nfrom dagster_aws.emr import EmrError, EmrJobRunner, emr_step_main\nfrom dagster_aws.emr.configs_spark import spark_config as get_spark_config\nfrom dagster_aws.utils.mrjob.log4j import parse_hadoop_log4j_records\n\n# On EMR, Spark is installed here\nEMR_SPARK_HOME = "/usr/lib/spark/"\n\nCODE_ZIP_NAME = "code.zip"\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n {\n "spark_config": get_spark_config(),\n "cluster_id": Field(\n StringSource, description="Name of the job flow (cluster) on which to execute."\n ),\n "region_name": Field(StringSource, description="The AWS region that the cluster is in."),\n "action_on_failure": Field(\n str,\n is_required=False,\n default_value="CANCEL_AND_WAIT",\n description=(\n "The EMR action to take when the cluster step fails: "\n "https://docs.aws.amazon.com/emr/latest/APIReference/API_StepConfig.html"\n ),\n ),\n "staging_bucket": Field(\n StringSource,\n is_required=True,\n description=(\n "S3 bucket to use for passing files between the plan process and EMR process."\n ),\n ),\n "staging_prefix": Field(\n StringSource,\n is_required=False,\n default_value="emr_staging",\n description=(\n "S3 key prefix inside the staging_bucket to use for files passed the plan "\n "process and EMR process"\n ),\n ),\n "wait_for_logs": Field(\n bool,\n is_required=False,\n default_value=False,\n description=(\n "If set, the system will wait for EMR logs to appear on S3. Note that logs "\n "are copied every 5 minutes, so enabling this will add several minutes to the job "\n "runtime."\n ),\n ),\n "local_job_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "Absolute path to the package that contains the job definition(s) whose steps will"\n " execute remotely on EMR. This is a path on the local fileystem of the process"\n " executing the job. The expectation is that this package will also be available on"\n " the python path of the launched process running the Spark step on EMR, either"\n " deployed on step launch via the deploy_local_job_package option, referenced on s3"\n " via the s3_job_package_path option, or installed on the cluster via bootstrap"\n " actions."\n ),\n ),\n "local_pipeline_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "(legacy) Absolute path to the package that contains the pipeline definition(s)"\n " whose steps will execute remotely on EMR. This is a path on the local fileystem"\n " of the process executing the pipeline. The expectation is that this package will"\n " also be available on the python path of the launched process running the Spark"\n " step on EMR, either deployed on step launch via the deploy_local_pipeline_package"\n " option, referenced on s3 via the s3_pipeline_package_path option, or installed on"\n " the cluster via bootstrap actions."\n ),\n ),\n "deploy_local_job_package": Field(\n bool,\n default_value=False,\n is_required=False,\n description=(\n "If set, before every step run, the launcher will zip up all the code in"\n " local_job_package_path, upload it to s3, and pass it to spark-submit's --py-files"\n " option. This gives the remote process access to up-to-date user code. If not set,"\n " the assumption is that some other mechanism is used for distributing code to the"\n " EMR cluster. If this option is set to True, s3_job_package_path should not also"\n " be set."\n ),\n ),\n "deploy_local_pipeline_package": Field(\n bool,\n default_value=False,\n is_required=False,\n description=(\n "(legacy) If set, before every step run, the launcher will zip up all the code in"\n " local_job_package_path, upload it to s3, and pass it to spark-submit's --py-files"\n " option. This gives the remote process access to up-to-date user code. If not set,"\n " the assumption is that some other mechanism is used for distributing code to the"\n " EMR cluster. If this option is set to True, s3_job_package_path should not also"\n " be set."\n ),\n ),\n "s3_job_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "If set, this path will be passed to the --py-files option of spark-submit. "\n "This should usually be a path to a zip file. If this option is set, "\n "deploy_local_job_package should not be set to True."\n ),\n ),\n "s3_pipeline_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "If set, this path will be passed to the --py-files option of spark-submit. "\n "This should usually be a path to a zip file. If this option is set, "\n "deploy_local_pipeline_package should not be set to True."\n ),\n ),\n }\n)\ndef emr_pyspark_step_launcher(context):\n # Resolve legacy arguments\n if context.resource_config.get("local_job_package_path") and context.resource_config.get(\n "local_pipeline_package_path"\n ):\n raise DagsterInvariantViolationError(\n "Provided both ``local_job_package_path`` and legacy version "\n "``local_pipeline_package_path`` arguments to ``emr_pyspark_step_launcher`` "\n "resource. Please choose one or the other."\n )\n\n if not context.resource_config.get(\n "local_job_package_path"\n ) and not context.resource_config.get("local_pipeline_package_path"):\n raise DagsterInvariantViolationError(\n "For resource ``emr_pyspark_step_launcher``, no config value provided for required "\n "schema entry ``local_job_package_path``."\n )\n\n local_job_package_path = context.resource_config.get(\n "local_job_package_path"\n ) or context.resource_config.get("local_pipeline_package_path")\n\n if context.resource_config.get("deploy_local_job_package") and context.resource_config.get(\n "deploy_local_pipeline_package"\n ):\n raise DagsterInvariantViolationError(\n "Provided both ``deploy_local_job_package`` and legacy version "\n "``deploy_local_pipeline_package`` arguments to ``emr_pyspark_step_launcher`` "\n "resource. Please choose one or the other."\n )\n\n deploy_local_job_package = context.resource_config.get(\n "deploy_local_job_package"\n ) or context.resource_config.get("deploy_local_pipeline_package")\n\n if context.resource_config.get("s3_job_package_path") and context.resource_config.get(\n "s3_pipeline_package_path"\n ):\n raise DagsterInvariantViolationError(\n "Provided both ``s3_job_package_path`` and legacy version "\n "``s3_pipeline_package_path`` arguments to ``emr_pyspark_step_launcher`` "\n "resource. Please choose one or the other."\n )\n\n s3_job_package_path = context.resource_config.get(\n "s3_job_package_path"\n ) or context.resource_config.get("s3_pipeline_package_path")\n\n return EmrPySparkStepLauncher(\n region_name=context.resource_config.get("region_name"),\n staging_bucket=context.resource_config.get("staging_bucket"),\n staging_prefix=context.resource_config.get("staging_prefix"),\n wait_for_logs=context.resource_config.get("wait_for_logs"),\n action_on_failure=context.resource_config.get("action_on_failure"),\n cluster_id=context.resource_config.get("cluster_id"),\n spark_config=context.resource_config.get("spark_config"),\n local_job_package_path=local_job_package_path,\n deploy_local_job_package=deploy_local_job_package,\n s3_job_package_path=s3_job_package_path,\n )
\n\n\nemr_pyspark_step_launcher.__doc__ = "\\n".join(\n "- **" + option + "**: " + (field.description or "")\n for option, field in emr_pyspark_step_launcher.config_schema.config_type.fields.items() # type: ignore\n)\n\n\nclass EmrPySparkStepLauncher(StepLauncher):\n def __init__(\n self,\n region_name,\n staging_bucket,\n staging_prefix,\n wait_for_logs,\n action_on_failure,\n cluster_id,\n spark_config,\n local_job_package_path,\n deploy_local_job_package,\n s3_job_package_path=None,\n ):\n self.region_name = check.str_param(region_name, "region_name")\n self.staging_bucket = check.str_param(staging_bucket, "staging_bucket")\n self.staging_prefix = check.str_param(staging_prefix, "staging_prefix")\n self.wait_for_logs = check.bool_param(wait_for_logs, "wait_for_logs")\n self.action_on_failure = check.str_param(action_on_failure, "action_on_failure")\n self.cluster_id = check.str_param(cluster_id, "cluster_id")\n self.spark_config = spark_config\n\n check.invariant(\n not deploy_local_job_package or not s3_job_package_path,\n "If deploy_local_job_package is set to True, s3_job_package_path should not "\n "also be set.",\n )\n\n self.local_job_package_path = check.str_param(\n local_job_package_path, "local_job_package_path"\n )\n self.deploy_local_job_package = check.bool_param(\n deploy_local_job_package, "deploy_local_job_package"\n )\n self.s3_job_package_path = check.opt_str_param(s3_job_package_path, "s3_job_package_path")\n\n self.emr_job_runner = EmrJobRunner(region=self.region_name)\n\n def _post_artifacts(self, log, step_run_ref, run_id, step_key):\n """Synchronize the step run ref and pyspark code to an S3 staging bucket for use on EMR.\n\n For the zip file, consider the following toy example:\n\n # Folder: my_pyspark_project/\n # a.py\n def foo():\n print(1)\n\n # b.py\n def bar():\n print(2)\n\n # main.py\n from a import foo\n from b import bar\n\n foo()\n bar()\n\n This will zip up `my_pyspark_project/` as `my_pyspark_project.zip`. Then, when running\n `spark-submit --py-files my_pyspark_project.zip emr_step_main.py` on EMR this will\n print 1, 2.\n """\n from dagster_pyspark.utils import build_pyspark_zip\n\n with tempfile.TemporaryDirectory() as temp_dir:\n s3 = boto3.client("s3", region_name=self.region_name)\n\n # Upload step run ref\n def _upload_file_to_s3(local_path, s3_filename):\n key = self._artifact_s3_key(run_id, step_key, s3_filename)\n s3_uri = self._artifact_s3_uri(run_id, step_key, s3_filename)\n log.debug(f"Uploading file {local_path} to {s3_uri}")\n s3.upload_file(Filename=local_path, Bucket=self.staging_bucket, Key=key)\n\n # Upload main file.\n # The remote Dagster installation should also have the file, but locating it there\n # could be a pain.\n main_local_path = self._main_file_local_path()\n _upload_file_to_s3(main_local_path, self._main_file_name())\n\n if self.deploy_local_job_package:\n # Zip and upload package containing job\n zip_local_path = os.path.join(temp_dir, CODE_ZIP_NAME)\n\n build_pyspark_zip(zip_local_path, self.local_job_package_path)\n _upload_file_to_s3(zip_local_path, CODE_ZIP_NAME)\n\n # Create step run ref pickle file\n step_run_ref_local_path = os.path.join(temp_dir, PICKLED_STEP_RUN_REF_FILE_NAME)\n with open(step_run_ref_local_path, "wb") as step_pickle_file:\n pickle.dump(step_run_ref, step_pickle_file)\n\n _upload_file_to_s3(step_run_ref_local_path, PICKLED_STEP_RUN_REF_FILE_NAME)\n\n def launch_step(self, step_context):\n step_run_ref = step_context_to_step_run_ref(step_context, self.local_job_package_path)\n\n run_id = step_context.dagster_run.run_id\n log = step_context.log\n\n step_key = step_run_ref.step_key\n self._post_artifacts(log, step_run_ref, run_id, step_key)\n\n emr_step_def = self._get_emr_step_def(run_id, step_key, step_context.op.name)\n emr_step_id = self.emr_job_runner.add_job_flow_steps(log, self.cluster_id, [emr_step_def])[\n 0\n ]\n\n yield from self.wait_for_completion_and_log(run_id, step_key, emr_step_id, step_context)\n\n def wait_for_completion_and_log(self, run_id, step_key, emr_step_id, step_context):\n s3 = boto3.resource("s3", region_name=self.region_name)\n try:\n for event in self.wait_for_completion(step_context, s3, run_id, step_key, emr_step_id):\n yield event\n except EmrError as emr_error:\n if self.wait_for_logs:\n self._log_logs_from_s3(step_context.log, emr_step_id)\n raise emr_error\n\n if self.wait_for_logs:\n self._log_logs_from_s3(step_context.log, emr_step_id)\n\n def wait_for_completion(\n self, step_context, s3, run_id, step_key, emr_step_id, check_interval=15\n ):\n """We want to wait for the EMR steps to complete, and while that's happening, we want to\n yield any events that have been written to S3 for us by the remote process.\n After the the EMR steps complete, we want a final chance to fetch events before finishing\n the step.\n """\n done = False\n all_events = []\n # If this is being called within a `capture_interrupts` context, allow interrupts\n # while waiting for the pyspark execution to complete, so that we can terminate slow or\n # hanging steps\n while not done:\n with raise_execution_interrupts():\n time.sleep(check_interval) # AWS rate-limits us if we poll it too often\n done = self.emr_job_runner.is_emr_step_complete(\n step_context.log, self.cluster_id, emr_step_id\n )\n\n all_events_new = self.read_events(s3, run_id, step_key)\n\n if len(all_events_new) > len(all_events):\n for i in range(len(all_events), len(all_events_new)):\n event = all_events_new[i]\n # write each event from the EMR instance to the local instance\n step_context.instance.handle_new_event(event)\n if event.is_dagster_event:\n yield event.dagster_event\n all_events = all_events_new\n\n def read_events(self, s3, run_id, step_key):\n events_s3_obj = s3.Object(\n self.staging_bucket, self._artifact_s3_key(run_id, step_key, PICKLED_EVENTS_FILE_NAME)\n )\n\n try:\n events_data = events_s3_obj.get()["Body"].read()\n return deserialize_value(pickle.loads(events_data))\n except ClientError as ex:\n # The file might not be there yet, which is fine\n if ex.response["Error"]["Code"] == "NoSuchKey":\n return []\n else:\n raise ex\n\n def _log_logs_from_s3(self, log, emr_step_id):\n """Retrieves the logs from the remote PySpark process that EMR posted to S3 and logs\n them to the given log.\n """\n stdout_log, stderr_log = self.emr_job_runner.retrieve_logs_for_step_id(\n log, self.cluster_id, emr_step_id\n )\n # Since stderr is YARN / Hadoop Log4J output, parse and reformat those log lines for\n # Dagster's logging system.\n records = parse_hadoop_log4j_records(stderr_log)\n for record in records:\n if record.level:\n log.log(\n level=record.level,\n msg="".join(["Spark Driver stderr: ", record.logger, ": ", record.message]),\n )\n else:\n log.debug(f"Spark Driver stderr: {record.message}")\n\n sys.stdout.write(\n "---------- Spark Driver stdout: ----------\\n"\n + stdout_log\n + "\\n"\n + "---------- End of Spark Driver stdout ----------\\n"\n )\n\n def _get_emr_step_def(self, run_id, step_key, solid_name):\n """From the local Dagster instance, construct EMR steps that will kick off execution on a\n remote EMR cluster.\n """\n from dagster_spark.utils import flatten_dict, format_for_cli\n\n action_on_failure = self.action_on_failure\n\n # Execute Solid via spark-submit\n conf = dict(flatten_dict(self.spark_config))\n conf["spark.app.name"] = conf.get("spark.app.name", solid_name)\n\n check.invariant(\n conf.get("spark.master", "yarn") == "yarn",\n desc=(\n "spark.master is configured as %s; cannot set Spark master on EMR to anything "\n 'other than "yarn"'\n )\n % conf.get("spark.master"),\n )\n\n command = (\n [\n EMR_SPARK_HOME + "bin/spark-submit",\n "--master",\n "yarn",\n "--deploy-mode",\n conf.get("spark.submit.deployMode", "client"),\n ]\n + format_for_cli(list(flatten_dict(conf)))\n + [\n "--py-files",\n self._artifact_s3_uri(run_id, step_key, CODE_ZIP_NAME),\n self._artifact_s3_uri(run_id, step_key, self._main_file_name()),\n self.staging_bucket,\n self._artifact_s3_key(run_id, step_key, PICKLED_STEP_RUN_REF_FILE_NAME),\n ]\n )\n\n return EmrJobRunner.construct_step_dict_for_command(\n "Execute Solid/Op %s" % solid_name, command, action_on_failure=action_on_failure\n )\n\n def _main_file_name(self):\n return os.path.basename(self._main_file_local_path())\n\n def _main_file_local_path(self):\n return emr_step_main.__file__\n\n def _sanitize_step_key(self, step_key: str) -> str:\n # step_keys of dynamic steps contain brackets, which are invalid characters\n return step_key.replace("[", "__").replace("]", "__")\n\n def _artifact_s3_uri(self, run_id, step_key, filename):\n key = self._artifact_s3_key(run_id, self._sanitize_step_key(step_key), filename)\n return f"s3://{self.staging_bucket}/{key}"\n\n def _artifact_s3_key(self, run_id, step_key, filename):\n return "/".join(\n [\n self.staging_prefix,\n run_id,\n self._sanitize_step_key(step_key),\n os.path.basename(filename),\n ]\n )\n
", "current_page_name": "_modules/dagster_aws/emr/pyspark_step_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.emr.pyspark_step_launcher"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.emr.types

\nfrom enum import Enum as PyEnum\n\nfrom dagster import Enum, EnumValue\n\nEbsVolumeType = Enum(\n    name="EbsVolumeType", enum_values=[EnumValue("gp2"), EnumValue("io1"), EnumValue("standard")]\n)\n\n\n
[docs]class EmrClusterState(PyEnum):\n """Cluster state for EMR."""\n\n Starting = "STARTING"\n Bootstrapping = "BOOTSTRAPPING"\n Running = "RUNNING"\n Waiting = "WAITING"\n Terminating = "TERMINATING"\n Terminated = "TERMINATED"\n TerminatedWithErrors = "TERMINATED_WITH_ERRORS"
\n\n\nEMR_CLUSTER_TERMINATED_STATES = [\n EmrClusterState.Terminating,\n EmrClusterState.Terminated,\n EmrClusterState.TerminatedWithErrors,\n]\n\nEMR_CLUSTER_DONE_STATES = EMR_CLUSTER_TERMINATED_STATES + [EmrClusterState.Waiting]\n\n\n
[docs]class EmrStepState(PyEnum):\n """Step state for EMR."""\n\n Pending = "PENDING"\n Running = "RUNNING"\n Continue = "CONTINUE"\n Completed = "COMPLETED"\n Cancelled = "CANCELLED"\n Failed = "FAILED"\n Interrupted = "INTERRUPTED"
\n\n\nEmrActionOnFailure = Enum(\n name="EmrActionOnFailure",\n enum_values=[\n EnumValue("TERMINATE_JOB_FLOW"),\n EnumValue("TERMINATE_CLUSTER"),\n EnumValue("CANCEL_AND_WAIT"),\n EnumValue("CONTINUE"),\n ],\n)\n\nEmrAdjustmentType = Enum(\n name="EmrAdjustmentType",\n enum_values=[\n EnumValue("CHANGE_IN_CAPACITY"),\n EnumValue("PERCENT_CHANGE_IN_CAPACITY"),\n EnumValue("EXACT_CAPACITY"),\n ],\n)\n\nEmrComparisonOperator = Enum(\n name="EmrComparisonOperator",\n enum_values=[\n EnumValue("GREATER_THAN_OR_EQUAL"),\n EnumValue("GREATER_THAN"),\n EnumValue("LESS_THAN"),\n EnumValue("LESS_THAN_OR_EQUAL"),\n ],\n)\n\nEmrInstanceRole = Enum(\n name="EmrInstanceRole", enum_values=[EnumValue("MASTER"), EnumValue("CORE"), EnumValue("TASK")]\n)\n\nEmrMarket = Enum(name="EmrMarket", enum_values=[EnumValue("ON_DEMAND"), EnumValue("SPOT")])\n\nEmrRepoUpgradeOnBoot = Enum(\n name="EmrRepoUpgradeOnBoot", enum_values=[EnumValue("SECURITY"), EnumValue("NONE")]\n)\n\nEmrScaleDownBehavior = Enum(\n name="EmrScaleDownBehavior",\n enum_values=[\n EnumValue("TERMINATE_AT_INSTANCE_HOUR"),\n EnumValue("TERMINATE_AT_TASK_COMPLETION"),\n ],\n)\n\nEmrStatistic = Enum(\n name="EmrStatistic",\n enum_values=[\n EnumValue("SAMPLE_COUNT"),\n EnumValue("AVERAGE"),\n EnumValue("SUM"),\n EnumValue("MINIMUM"),\n EnumValue("MAXIMUM"),\n ],\n)\n\nEmrSupportedProducts = Enum(\n name="EmrSupportedProducts", enum_values=[EnumValue("mapr-m3"), EnumValue("mapr-m5")]\n)\n\nEmrTimeoutAction = Enum(\n name="EmrTimeoutAction",\n enum_values=[EnumValue("SWITCH_TO_ON_DEMAND"), EnumValue("TERMINATE_CLUSTER")],\n)\n\nEmrUnit = Enum(\n name="EmrUnit",\n enum_values=[\n EnumValue("NONE"),\n EnumValue("SECONDS"),\n EnumValue("MICRO_SECONDS"),\n EnumValue("MILLI_SECONDS"),\n EnumValue("BYTES"),\n EnumValue("KILO_BYTES"),\n EnumValue("MEGA_BYTES"),\n EnumValue("GIGA_BYTES"),\n EnumValue("TERA_BYTES"),\n EnumValue("BITS"),\n EnumValue("KILO_BITS"),\n EnumValue("MEGA_BITS"),\n EnumValue("GIGA_BITS"),\n EnumValue("TERA_BITS"),\n EnumValue("PERCENT"),\n EnumValue("COUNT"),\n EnumValue("BYTES_PER_SECOND"),\n EnumValue("KILO_BYTES_PER_SECOND"),\n EnumValue("MEGA_BYTES_PER_SECOND"),\n EnumValue("GIGA_BYTES_PER_SECOND"),\n EnumValue("TERA_BYTES_PER_SECOND"),\n EnumValue("BITS_PER_SECOND"),\n EnumValue("KILO_BITS_PER_SECOND"),\n EnumValue("MEGA_BITS_PER_SECOND"),\n EnumValue("GIGA_BITS_PER_SECOND"),\n EnumValue("TERA_BITS_PER_SECOND"),\n EnumValue("COUNT_PER_SECOND"),\n ],\n)\n
", "current_page_name": "_modules/dagster_aws/emr/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.emr.types"}}, "redshift": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.redshift.resources

\nimport abc\nfrom contextlib import contextmanager\nfrom logging import Logger\nfrom typing import Any, Dict, Optional, cast\n\nimport psycopg2\nimport psycopg2.extensions\nfrom dagster import (\n    ConfigurableResource,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\n\nclass RedshiftError(Exception):\n    pass\n\n\nclass BaseRedshiftClient(abc.ABC):\n    @abc.abstractmethod\n    def execute_query(self, query, fetch_results=False, cursor_factory=None, error_callback=None):\n        pass\n\n    @abc.abstractmethod\n    def execute_queries(\n        self, queries, fetch_results=False, cursor_factory=None, error_callback=None\n    ):\n        pass\n\n\nclass RedshiftClient(BaseRedshiftClient):\n    def __init__(self, conn_args: Dict[str, Any], autocommit: Optional[bool], log: Logger):\n        # Extract parameters from resource config\n        self.conn_args = conn_args\n\n        self.autocommit = autocommit\n        self.log = log\n\n    def execute_query(self, query, fetch_results=False, cursor_factory=None, error_callback=None):\n        """Synchronously execute a single query against Redshift. Will return a list of rows, where\n        each row is a tuple of values, e.g. SELECT 1 will return [(1,)].\n\n        Args:\n            query (str): The query to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n                cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[Tuple[Any, ...]]]: Results of the query, as a list of tuples, when\n                fetch_results is set. Otherwise return None.\n        """\n        check.str_param(query, "query")\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        with self._get_conn() as conn:\n            with self._get_cursor(conn, cursor_factory=cursor_factory) as cursor:\n                try:\n                    self.log.info(f"Executing query '{query}'")\n                    cursor.execute(query)\n\n                    if fetch_results and cursor.rowcount > 0:\n                        return cursor.fetchall()\n                    else:\n                        self.log.info("Empty result from query")\n\n                except Exception as e:\n                    # If autocommit is disabled or not set (it is disabled by default), Redshift\n                    # will be in the middle of a transaction at exception time, and because of\n                    # the failure the current transaction will not accept any further queries.\n                    #\n                    # This conn.commit() call closes the open transaction before handing off\n                    # control to the error callback, so that the user can issue additional\n                    # queries. Notably, for e.g. pg_last_copy_id() to work, it requires you to\n                    # use the same conn/cursor, so you have to do this conn.commit() to ensure\n                    # things are in a usable state in the error callback.\n                    if not self.autocommit:\n                        conn.commit()\n\n                    if error_callback is not None:\n                        error_callback(e, cursor, self.log)\n                    else:\n                        raise\n\n    def execute_queries(\n        self, queries, fetch_results=False, cursor_factory=None, error_callback=None\n    ):\n        """Synchronously execute a list of queries against Redshift. Will return a list of list of\n        rows, where each row is a tuple of values, e.g. ['SELECT 1', 'SELECT 1'] will return\n        [[(1,)], [(1,)]].\n\n        Args:\n            queries (List[str]): The queries to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n            cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[List[Tuple[Any, ...]]]]: Results of the query, as a list of list of\n                tuples, when fetch_results is set. Otherwise return None.\n        """\n        check.list_param(queries, "queries", of_type=str)\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        results = []\n        with self._get_conn() as conn:\n            with self._get_cursor(conn, cursor_factory=cursor_factory) as cursor:\n                for query in queries:\n                    try:\n                        self.log.info(f"Executing query '{query}'")\n                        cursor.execute(query)\n\n                        if fetch_results and cursor.rowcount > 0:\n                            results.append(cursor.fetchall())\n                        else:\n                            results.append([])\n                            self.log.info("Empty result from query")\n\n                    except Exception as e:\n                        # If autocommit is disabled or not set (it is disabled by default), Redshift\n                        # will be in the middle of a transaction at exception time, and because of\n                        # the failure the current transaction will not accept any further queries.\n                        #\n                        # This conn.commit() call closes the open transaction before handing off\n                        # control to the error callback, so that the user can issue additional\n                        # queries. Notably, for e.g. pg_last_copy_id() to work, it requires you to\n                        # use the same conn/cursor, so you have to do this conn.commit() to ensure\n                        # things are in a usable state in the error callback.\n                        if not self.autocommit:\n                            conn.commit()\n\n                        if error_callback is not None:\n                            error_callback(e, cursor, self.log)\n                        else:\n                            raise\n\n        if fetch_results:\n            return results\n\n    @contextmanager\n    def _get_conn(self):\n        conn = None\n        try:\n            conn = psycopg2.connect(**self.conn_args)\n            yield conn\n        finally:\n            if conn:\n                conn.close()\n\n    @contextmanager\n    def _get_cursor(self, conn, cursor_factory=None):\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n\n        # Could be none, in which case we should respect the connection default. Otherwise\n        # explicitly set to true/false.\n        if self.autocommit is not None:\n            conn.autocommit = self.autocommit\n\n        with conn:\n            with conn.cursor(cursor_factory=cursor_factory) as cursor:\n                yield cursor\n\n            # If autocommit is set, we'll commit after each and every query execution. Otherwise, we\n            # want to do a final commit after we're wrapped up executing the full set of one or more\n            # queries.\n            if not self.autocommit:\n                conn.commit()\n\n\n@deprecated(breaking_version="2.0", additional_warn_text="Use RedshiftClientResource instead.")\nclass RedshiftResource(RedshiftClient):\n    """This class was used by the function-style Redshift resource."""\n\n\nclass FakeRedshiftClient(BaseRedshiftClient):\n    QUERY_RESULT = [(1,)]\n\n    def __init__(self, log: Logger):\n        # Extract parameters from resource config\n\n        self.log = log\n\n    def execute_query(self, query, fetch_results=False, cursor_factory=None, error_callback=None):\n        """Fake for execute_query; returns [self.QUERY_RESULT].\n\n        Args:\n            query (str): The query to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n                cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[Tuple[Any, ...]]]: Results of the query, as a list of tuples, when\n                fetch_results is set. Otherwise return None.\n        """\n        check.str_param(query, "query")\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        self.log.info(f"Executing query '{query}'")\n        if fetch_results:\n            return self.QUERY_RESULT\n\n    def execute_queries(\n        self, queries, fetch_results=False, cursor_factory=None, error_callback=None\n    ):\n        """Fake for execute_queries; returns [self.QUERY_RESULT] * 3.\n\n        Args:\n            queries (List[str]): The queries to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n                cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[List[Tuple[Any, ...]]]]: Results of the query, as a list of list of\n                tuples, when fetch_results is set. Otherwise return None.\n        """\n        check.list_param(queries, "queries", of_type=str)\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        for query in queries:\n            self.log.info(f"Executing query '{query}'")\n        if fetch_results:\n            return [self.QUERY_RESULT] * 3\n\n\n@deprecated(breaking_version="2.0", additional_warn_text="Use FakeRedshiftClientResource instead.")\nclass FakeRedshiftResource(FakeRedshiftClient):\n    """This class was used by the function-style fake Redshift resource."""\n\n\n
[docs]class RedshiftClientResource(ConfigurableResource):\n """This resource enables connecting to a Redshift cluster and issuing queries against that\n cluster.\n\n Example:\n .. code-block:: python\n\n from dagster import Definitions, asset, EnvVar\n from dagster_aws.redshift import RedshiftClientResource\n\n @asset\n def example_redshift_asset(context, redshift: RedshiftClientResource):\n redshift.get_client().execute_query('SELECT 1', fetch_results=True)\n\n redshift_configured = RedshiftClientResource(\n host='my-redshift-cluster.us-east-1.redshift.amazonaws.com',\n port=5439,\n user='dagster',\n password=EnvVar("DAGSTER_REDSHIFT_PASSWORD"),\n database='dev',\n )\n\n defs = Definitions(\n assets=[example_redshift_asset],\n resources={'redshift': redshift_configured},\n )\n\n """\n\n host: str = Field(description="Redshift host")\n port: int = Field(default=5439, description="Redshift port")\n user: Optional[str] = Field(default=None, description="Username for Redshift connection")\n password: Optional[str] = Field(default=None, description="Password for Redshift connection")\n database: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default database to use. After login, you can use USE DATABASE to change"\n " the database."\n ),\n )\n autocommit: Optional[bool] = Field(default=None, description="Whether to autocommit queries")\n connect_timeout: int = Field(\n default=5, description="Timeout for connection to Redshift cluster. Defaults to 5 seconds."\n )\n sslmode: str = Field(\n default="require",\n description=(\n "SSL mode to use. See the Redshift documentation for reference:"\n " https://docs.aws.amazon.com/redshift/latest/mgmt/connecting-ssl-support.html"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> RedshiftClient:\n conn_args = {\n k: getattr(self, k, None)\n for k in (\n "host",\n "port",\n "user",\n "password",\n "database",\n "connect_timeout",\n "sslmode",\n )\n if getattr(self, k, None) is not None\n }\n\n return RedshiftClient(conn_args, self.autocommit, get_dagster_logger())
\n\n\n
[docs]class FakeRedshiftClientResource(RedshiftClientResource):\n def get_client(self) -> FakeRedshiftClient:\n return FakeRedshiftClient(get_dagster_logger())
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=RedshiftClientResource.to_config_schema(),\n description="Resource for connecting to the Redshift data warehouse",\n)\ndef redshift_resource(context) -> RedshiftClient:\n """This resource enables connecting to a Redshift cluster and issuing queries against that\n cluster.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, op\n from dagster_aws.redshift import redshift_resource\n\n @op(required_resource_keys={'redshift'})\n def example_redshift_op(context):\n return context.resources.redshift.execute_query('SELECT 1', fetch_results=True)\n\n redshift_configured = redshift_resource.configured({\n 'host': 'my-redshift-cluster.us-east-1.redshift.amazonaws.com',\n 'port': 5439,\n 'user': 'dagster',\n 'password': 'dagster',\n 'database': 'dev',\n })\n context = build_op_context(resources={'redshift': redshift_configured})\n assert example_redshift_op(context) == [(1,)]\n\n """\n return RedshiftClientResource.from_resource_context(context).get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=FakeRedshiftClientResource.to_config_schema(),\n description=(\n "Fake resource for connecting to the Redshift data warehouse. Usage is identical "\n "to the real redshift_resource. Will always return [(1,)] for the single query case and "\n "[[(1,)], [(1,)], [(1,)]] for the multi query case."\n ),\n)\ndef fake_redshift_resource(context) -> FakeRedshiftClient:\n return cast(\n FakeRedshiftClient,\n FakeRedshiftClientResource.from_resource_context(context).get_client(),\n )
\n
", "current_page_name": "_modules/dagster_aws/redshift/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.redshift.resources"}}, "s3": {"compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.compute_log_manager

\nimport os\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Mapping, Optional, Sequence\n\nimport boto3\nimport dagster._seven as seven\nfrom botocore.errorfactory import ClientError\nfrom dagster import (\n    Field,\n    Permissive,\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_type import Noneable\nfrom dagster._core.storage.captured_log_manager import CapturedLogContext\nfrom dagster._core.storage.cloud_storage_compute_log_manager import (\n    CloudStorageComputeLogManager,\n    PollingComputeLogSubscriptionManager,\n)\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\nfrom dagster._core.storage.local_compute_log_manager import (\n    IO_TYPE_EXTENSION,\n    LocalComputeLogManager,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import ensure_dir, ensure_file\nfrom typing_extensions import Self\n\nPOLLING_INTERVAL = 5\n\n\n
[docs]class S3ComputeLogManager(CloudStorageComputeLogManager, ConfigurableClass):\n """Logs compute function stdout and stderr to S3.\n\n Users should not instantiate this class directly. Instead, use a YAML block in ``dagster.yaml``\n such as the following:\n\n .. code-block:: YAML\n\n compute_logs:\n module: dagster_aws.s3.compute_log_manager\n class: S3ComputeLogManager\n config:\n bucket: "mycorp-dagster-compute-logs"\n local_dir: "/tmp/cool"\n prefix: "dagster-test-"\n use_ssl: true\n verify: true\n verify_cert_path: "/path/to/cert/bundle.pem"\n endpoint_url: "http://alternate-s3-host.io"\n skip_empty_files: true\n upload_interval: 30\n upload_extra_args:\n ServerSideEncryption: "AES256"\n show_url_only: false\n region: "us-west-1"\n\n Args:\n bucket (str): The name of the s3 bucket to which to log.\n local_dir (Optional[str]): Path to the local directory in which to stage logs. Default:\n ``dagster._seven.get_system_temp_directory()``.\n prefix (Optional[str]): Prefix for the log file keys.\n use_ssl (Optional[bool]): Whether or not to use SSL. Default True.\n verify (Optional[bool]): Whether or not to verify SSL certificates. Default True.\n verify_cert_path (Optional[str]): A filename of the CA cert bundle to use. Only used if\n `verify` set to False.\n endpoint_url (Optional[str]): Override for the S3 endpoint url.\n skip_empty_files: (Optional[bool]): Skip upload of empty log files.\n upload_interval: (Optional[int]): Interval in seconds to upload partial log files to S3. By default, will only upload when the capture is complete.\n upload_extra_args: (Optional[dict]): Extra args for S3 file upload\n show_url_only: (Optional[bool]): Only show the URL of the log file in the UI, instead of fetching and displaying the full content. Default False.\n region: (Optional[str]): The region of the S3 bucket. If not specified, will use the default region of the AWS session.\n inst_data (Optional[ConfigurableClassData]): Serializable representation of the compute\n log manager when newed up from config.\n """\n\n def __init__(\n self,\n bucket,\n local_dir=None,\n inst_data: Optional[ConfigurableClassData] = None,\n prefix="dagster",\n use_ssl=True,\n verify=True,\n verify_cert_path=None,\n endpoint_url=None,\n skip_empty_files=False,\n upload_interval=None,\n upload_extra_args=None,\n show_url_only=False,\n region=None,\n ):\n _verify = False if not verify else verify_cert_path\n self._s3_session = boto3.resource(\n "s3", use_ssl=use_ssl, verify=_verify, endpoint_url=endpoint_url\n ).meta.client\n self._s3_bucket = check.str_param(bucket, "bucket")\n self._s3_prefix = self._clean_prefix(check.str_param(prefix, "prefix"))\n\n # proxy calls to local compute log manager (for subscriptions, etc)\n if not local_dir:\n local_dir = seven.get_system_temp_directory()\n\n self._local_manager = LocalComputeLogManager(local_dir)\n self._subscription_manager = PollingComputeLogSubscriptionManager(self)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self._skip_empty_files = check.bool_param(skip_empty_files, "skip_empty_files")\n self._upload_interval = check.opt_int_param(upload_interval, "upload_interval")\n check.opt_dict_param(upload_extra_args, "upload_extra_args")\n self._upload_extra_args = upload_extra_args\n self._show_url_only = show_url_only\n if region is None:\n # if unspecified, use the current session name\n self._region = self._s3_session.meta.region_name\n else:\n self._region = region\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "bucket": StringSource,\n "local_dir": Field(StringSource, is_required=False),\n "prefix": Field(StringSource, is_required=False, default_value="dagster"),\n "use_ssl": Field(bool, is_required=False, default_value=True),\n "verify": Field(bool, is_required=False, default_value=True),\n "verify_cert_path": Field(StringSource, is_required=False),\n "endpoint_url": Field(StringSource, is_required=False),\n "skip_empty_files": Field(bool, is_required=False, default_value=False),\n "upload_interval": Field(Noneable(int), is_required=False, default_value=None),\n "upload_extra_args": Field(\n Permissive(), is_required=False, description="Extra args for S3 file upload"\n ),\n "show_url_only": Field(bool, is_required=False, default_value=False),\n "region": Field(StringSource, is_required=False),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return S3ComputeLogManager(inst_data=inst_data, **config_value)\n\n @property\n def local_manager(self) -> LocalComputeLogManager:\n return self._local_manager\n\n @property\n def upload_interval(self) -> Optional[int]:\n return self._upload_interval if self._upload_interval else None\n\n def _clean_prefix(self, prefix):\n parts = prefix.split("/")\n return "/".join([part for part in parts if part])\n\n def _s3_key(self, log_key, io_type, partial=False):\n check.inst_param(io_type, "io_type", ComputeIOType)\n extension = IO_TYPE_EXTENSION[io_type]\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n paths = [self._s3_prefix, "storage", *namespace, filename]\n return "/".join(paths) # s3 path delimiter\n\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Iterator[CapturedLogContext]:\n with super().capture_logs(log_key) as local_context:\n if not self._show_url_only:\n yield local_context\n else:\n out_key = self._s3_key(log_key, ComputeIOType.STDOUT)\n err_key = self._s3_key(log_key, ComputeIOType.STDERR)\n s3_base = f"https://s3.console.aws.amazon.com/s3/object/{self._s3_bucket}?region={self._region}"\n yield CapturedLogContext(\n local_context.log_key,\n external_stdout_url=f"{s3_base}&prefix={out_key}",\n external_stderr_url=f"{s3_base}&prefix={err_key}",\n )\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n self.local_manager.delete_logs(log_key=log_key, prefix=prefix)\n\n s3_keys_to_remove = None\n if log_key:\n s3_keys_to_remove = [\n self._s3_key(log_key, ComputeIOType.STDOUT),\n self._s3_key(log_key, ComputeIOType.STDERR),\n self._s3_key(log_key, ComputeIOType.STDOUT, partial=True),\n self._s3_key(log_key, ComputeIOType.STDERR, partial=True),\n ]\n elif prefix:\n # add the trailing '' to make sure that ['a'] does not match ['apple']\n s3_prefix = "/".join([self._s3_prefix, "storage", *prefix, ""])\n matching = self._s3_session.list_objects(Bucket=self._s3_bucket, Prefix=s3_prefix)\n s3_keys_to_remove = [obj["Key"] for obj in matching.get("Contents", [])]\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n if s3_keys_to_remove:\n to_delete = [{"Key": key} for key in s3_keys_to_remove]\n self._s3_session.delete_objects(Bucket=self._s3_bucket, Delete={"Objects": to_delete})\n\n def download_url_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n\n s3_key = self._s3_key(log_key, io_type)\n return self._s3_session.generate_presigned_url(\n ClientMethod="get_object", Params={"Bucket": self._s3_bucket, "Key": s3_key}\n )\n\n def display_path_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n s3_key = self._s3_key(log_key, io_type)\n return f"s3://{self._s3_bucket}/{s3_key}"\n\n def cloud_storage_has_logs(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial: bool = False\n ) -> bool:\n s3_key = self._s3_key(log_key, io_type, partial=partial)\n try: # https://stackoverflow.com/a/38376288/14656695\n self._s3_session.head_object(Bucket=self._s3_bucket, Key=s3_key)\n except ClientError:\n return False\n return True\n\n def upload_to_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n\n if (self._skip_empty_files or partial) and os.stat(path).st_size == 0:\n return\n\n s3_key = self._s3_key(log_key, io_type, partial=partial)\n with open(path, "rb") as data:\n extra_args = {\n "ContentType": "text/plain",\n **(self._upload_extra_args if self._upload_extra_args else {}),\n }\n self._s3_session.upload_fileobj(data, self._s3_bucket, s3_key, ExtraArgs=extra_args)\n\n def download_from_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self._local_manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[io_type], partial=partial\n )\n ensure_dir(os.path.dirname(path))\n s3_key = self._s3_key(log_key, io_type, partial=partial)\n with open(path, "wb") as fileobj:\n self._s3_session.download_fileobj(self._s3_bucket, s3_key, fileobj)\n\n def on_subscribe(self, subscription):\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription):\n self._subscription_manager.remove_subscription(subscription)\n\n def dispose(self):\n self._subscription_manager.dispose()\n self._local_manager.dispose()
\n
", "current_page_name": "_modules/dagster_aws/s3/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.compute_log_manager"}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.file_manager

\nimport io\nimport uuid\nfrom contextlib import contextmanager\n\nimport dagster._check as check\nfrom dagster._core.storage.file_manager import (\n    FileHandle,\n    FileManager,\n    TempfileManager,\n    check_file_like_obj,\n)\n\n\n
[docs]class S3FileHandle(FileHandle):\n """A reference to a file on S3."""\n\n def __init__(self, s3_bucket: str, s3_key: str):\n self._s3_bucket = check.str_param(s3_bucket, "s3_bucket")\n self._s3_key = check.str_param(s3_key, "s3_key")\n\n @property\n def s3_bucket(self) -> str:\n """str: The name of the S3 bucket."""\n return self._s3_bucket\n\n @property\n def s3_key(self) -> str:\n """str: The S3 key."""\n return self._s3_key\n\n @property\n def path_desc(self) -> str:\n """str: The file's S3 URL."""\n return self.s3_path\n\n @property\n def s3_path(self) -> str:\n """str: The file's S3 URL."""\n return f"s3://{self.s3_bucket}/{self.s3_key}"
\n\n\nclass S3FileManager(FileManager):\n def __init__(self, s3_session, s3_bucket, s3_base_key):\n self._s3_session = s3_session\n self._s3_bucket = check.str_param(s3_bucket, "s3_bucket")\n self._s3_base_key = check.str_param(s3_base_key, "s3_base_key")\n self._local_handle_cache = {}\n self._temp_file_manager = TempfileManager()\n\n def copy_handle_to_local_temp(self, file_handle):\n self._download_if_not_cached(file_handle)\n return self._get_local_path(file_handle)\n\n def _download_if_not_cached(self, file_handle):\n if not self._file_handle_cached(file_handle):\n # instigate download\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_name = temp_file_obj.name\n self._s3_session.download_file(\n Bucket=file_handle.s3_bucket, Key=file_handle.s3_key, Filename=temp_name\n )\n self._local_handle_cache[file_handle.s3_path] = temp_name\n\n return file_handle\n\n @contextmanager\n def read(self, file_handle, mode="rb"):\n check.inst_param(file_handle, "file_handle", S3FileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n self._download_if_not_cached(file_handle)\n\n encoding = None if mode == "rb" else "utf-8"\n with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:\n yield file_obj\n\n def _file_handle_cached(self, file_handle):\n return file_handle.s3_path in self._local_handle_cache\n\n def _get_local_path(self, file_handle):\n return self._local_handle_cache[file_handle.s3_path]\n\n def read_data(self, file_handle):\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read()\n\n def write_data(self, data, ext=None):\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", ext=ext)\n\n def write(self, file_obj, mode="wb", ext=None):\n check_file_like_obj(file_obj)\n s3_key = self.get_full_key(str(uuid.uuid4()) + (("." + ext) if ext is not None else ""))\n self._s3_session.put_object(Body=file_obj, Bucket=self._s3_bucket, Key=s3_key)\n return S3FileHandle(self._s3_bucket, s3_key)\n\n def get_full_key(self, file_key):\n return f"{self._s3_base_key}/{file_key}"\n\n def delete_local_temp(self):\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster_aws/s3/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.file_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.io_manager

\nimport io\nimport pickle\nfrom typing import Any, Dict, Optional, Union\n\nfrom dagster import (\n    ConfigurableIOManager,\n    InputContext,\n    MetadataValue,\n    OutputContext,\n    ResourceDependency,\n    _check as check,\n    io_manager,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL\nfrom dagster._utils.cached_method import cached_method\nfrom pydantic import Field\nfrom upath import UPath\n\nfrom .resources import S3Resource\n\n\nclass PickledObjectS3IOManager(UPathIOManager):\n    def __init__(\n        self,\n        s3_bucket: str,\n        s3_session: Any,\n        s3_prefix: Optional[str] = None,\n    ):\n        self.bucket = check.str_param(s3_bucket, "s3_bucket")\n        check.opt_str_param(s3_prefix, "s3_prefix")\n        self.s3 = s3_session\n        self.s3.list_objects(Bucket=s3_bucket, Prefix=s3_prefix, MaxKeys=1)\n        base_path = UPath(s3_prefix) if s3_prefix else None\n        super().__init__(base_path=base_path)\n\n    def load_from_path(self, context: InputContext, path: UPath) -> Any:\n        try:\n            s3_obj = self.s3.get_object(Bucket=self.bucket, Key=str(path))["Body"].read()\n            return pickle.loads(s3_obj)\n        except self.s3.exceptions.NoSuchKey:\n            raise FileNotFoundError(f"Could not find file {path} in S3 bucket {self.bucket}")\n\n    def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:\n        if self.path_exists(path):\n            context.log.warning(f"Removing existing S3 object: {path}")\n            self.unlink(path)\n\n        pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)\n        pickled_obj_bytes = io.BytesIO(pickled_obj)\n        self.s3.upload_fileobj(pickled_obj_bytes, self.bucket, str(path))\n\n    def path_exists(self, path: UPath) -> bool:\n        try:\n            self.s3.get_object(Bucket=self.bucket, Key=str(path))\n        except self.s3.exceptions.NoSuchKey:\n            return False\n        return True\n\n    def get_loading_input_log_message(self, path: UPath) -> str:\n        return f"Loading S3 object from: {self._uri_for_path(path)}"\n\n    def get_writing_output_log_message(self, path: UPath) -> str:\n        return f"Writing S3 object at: {self._uri_for_path(path)}"\n\n    def unlink(self, path: UPath) -> None:\n        self.s3.delete_object(Bucket=self.bucket, Key=str(path))\n\n    def make_directory(self, path: UPath) -> None:\n        # It is not necessary to create directories in S3\n        return None\n\n    def get_metadata(self, context: OutputContext, obj: Any) -> Dict[str, MetadataValue]:\n        path = self._get_path(context)\n        return {"uri": MetadataValue.path(self._uri_for_path(path))}\n\n    def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:\n        return UPath("storage", super().get_op_output_relative_path(context))\n\n    def _uri_for_path(self, path: UPath) -> str:\n        return f"s3://{self.bucket}/{path}"\n\n\n
[docs]class S3PickleIOManager(ConfigurableIOManager):\n """Persistent IO manager using S3 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for S3 and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n .. code-block:: python\n\n from dagster import asset, Definitions\n from dagster_aws.s3 import S3PickleIOManager, S3Resource\n\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": S3PickleIOManager(\n s3_resource=S3Resource(),\n s3_bucket="my-cool-bucket",\n s3_prefix="my-cool-prefix",\n )\n }\n )\n\n """\n\n s3_resource: ResourceDependency[S3Resource]\n s3_bucket: str = Field(description="S3 bucket to use for the file manager.")\n s3_prefix: str = Field(\n default="dagster", description="Prefix to use for the S3 bucket for this file manager."\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @cached_method\n def inner_io_manager(self) -> PickledObjectS3IOManager:\n return PickledObjectS3IOManager(\n s3_bucket=self.s3_bucket,\n s3_session=self.s3_resource.get_client(),\n s3_prefix=self.s3_prefix,\n )\n\n def load_input(self, context: InputContext) -> Any:\n return self.inner_io_manager().load_input(context)\n\n def handle_output(self, context: OutputContext, obj: Any) -> None:\n return self.inner_io_manager().handle_output(context, obj)
\n\n\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use S3PickleIOManager instead.",\n)\nclass ConfigurablePickledObjectS3IOManager(S3PickleIOManager):\n """Renamed to S3PickleIOManager. See S3PickleIOManager for documentation."""\n\n pass
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=S3PickleIOManager.to_config_schema(),\n required_resource_keys={"s3"},\n)\ndef s3_pickle_io_manager(init_context):\n """Persistent IO manager using S3 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for S3 and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_aws.s3 import s3_pickle_io_manager, s3_resource\n\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": s3_pickle_io_manager.configured(\n {"s3_bucket": "my-cool-bucket", "s3_prefix": "my-cool-prefix"}\n ),\n "s3": s3_resource,\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_aws.s3 import s3_pickle_io_manager, s3_resource\n\n @job(\n resource_defs={\n "io_manager": s3_pickle_io_manager.configured(\n {"s3_bucket": "my-cool-bucket", "s3_prefix": "my-cool-prefix"}\n ),\n "s3": s3_resource,\n },\n )\n def my_job():\n ...\n """\n s3_session = init_context.resources.s3\n s3_bucket = init_context.resource_config["s3_bucket"]\n s3_prefix = init_context.resource_config.get("s3_prefix") # s3_prefix is optional\n pickled_io_manager = PickledObjectS3IOManager(s3_bucket, s3_session, s3_prefix=s3_prefix)\n return pickled_io_manager
\n
", "current_page_name": "_modules/dagster_aws/s3/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.io_manager"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.ops

\nfrom typing import Any, Generator, Mapping\n\nfrom dagster import (\n    AssetMaterialization,\n    Field,\n    FileHandle,\n    In,\n    MetadataValue,\n    Out,\n    Output,\n    StringSource,\n    _check as check,\n    dagster_type_loader,\n    op,\n)\nfrom dagster._core.types.dagster_type import PythonObjectDagsterType\n\nfrom .file_manager import S3FileHandle\n\n\ndef dict_with_fields(name: str, fields: Mapping[str, object]):\n    check.str_param(name, "name")\n    check.mapping_param(fields, "fields", key_type=str)\n    field_names = set(fields.keys())\n\n    @dagster_type_loader(fields)\n    def _input_schema(_context, value):\n        check.dict_param(value, "value")\n        check.param_invariant(set(value.keys()) == field_names, "value")\n        return value\n\n    class _DictWithSchema(PythonObjectDagsterType):\n        def __init__(self):\n            super(_DictWithSchema, self).__init__(python_type=dict, name=name, loader=_input_schema)\n\n    return _DictWithSchema()\n\n\nS3Coordinate = dict_with_fields(\n    "S3Coordinate",\n    fields={\n        "bucket": Field(StringSource, description="S3 bucket name"),\n        "key": Field(StringSource, description="S3 key name"),\n    },\n)\n\n\ndef last_key(key: str) -> str:\n    if "/" not in key:\n        return key\n    comps = key.split("/")\n    return comps[-1]\n\n\n@op(\n    config_schema={\n        "Bucket": Field(\n            StringSource, description="The name of the bucket to upload to.", is_required=True\n        ),\n        "Key": Field(\n            StringSource, description="The name of the key to upload to.", is_required=True\n        ),\n    },\n    ins={"file_handle": In(FileHandle, description="The file to upload.")},\n    out={"s3_file_handle": Out(S3FileHandle)},\n    description="""Take a file handle and upload it to s3. Returns an S3FileHandle.""",\n    required_resource_keys={"s3", "file_manager"},\n)\ndef file_handle_to_s3(context, file_handle) -> Generator[Any, None, None]:\n    bucket = context.op_config["Bucket"]\n    key = context.op_config["Key"]\n\n    file_manager = context.resources.file_manager\n    s3 = context.resources.s3\n\n    with file_manager.read(file_handle, "rb") as fileobj:\n        s3.upload_fileobj(fileobj, bucket, key)\n        s3_file_handle = S3FileHandle(bucket, key)\n\n        yield AssetMaterialization(\n            asset_key=s3_file_handle.s3_path,\n            metadata={last_key(key): MetadataValue.path(s3_file_handle.s3_path)},\n        )\n\n        yield Output(value=s3_file_handle, output_name="s3_file_handle")\n
", "current_page_name": "_modules/dagster_aws/s3/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.resources

\nfrom typing import Any, Optional, TypeVar\n\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\nfrom .file_manager import S3FileManager\nfrom .utils import construct_s3_client\n\nT = TypeVar("T")\n\n\nclass ResourceWithS3Configuration(ConfigurableResource):\n    use_unsigned_session: bool = Field(\n        default=False, description="Specifies whether to use an unsigned S3 session."\n    )\n    region_name: Optional[str] = Field(\n        default=None, description="Specifies a custom region for the S3 session."\n    )\n    endpoint_url: Optional[str] = Field(\n        default=None, description="Specifies a custom endpoint for the S3 session."\n    )\n    max_attempts: int = Field(\n        default=5,\n        description=(\n            "This provides Boto3's retry handler with a value of maximum retry attempts, where the"\n            " initial call counts toward the max_attempts value that you provide."\n        ),\n    )\n    profile_name: Optional[str] = Field(\n        default=None, description="Specifies a profile to connect that session."\n    )\n    use_ssl: bool = Field(\n        default=True, description="Whether or not to use SSL. By default, SSL is used."\n    )\n    verify: Optional[str] = Field(\n        default=None,\n        description=(\n            "Whether or not to verify SSL certificates. By default SSL certificates are verified."\n            " You can also specify this argument if you want to use a different CA cert bundle than"\n            " the one used by botocore."\n        ),\n    )\n    aws_access_key_id: Optional[str] = Field(\n        default=None, description="AWS access key ID to use when creating the boto3 session."\n    )\n    aws_secret_access_key: Optional[str] = Field(\n        default=None, description="AWS secret access key to use when creating the boto3 session."\n    )\n    aws_session_token: str = Field(\n        default=None, description="AWS session token to use when creating the boto3 session."\n    )\n\n\n
[docs]class S3Resource(ResourceWithS3Configuration, IAttachDifferentObjectToOpContext):\n """Resource that gives access to S3.\n\n The underlying S3 session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is an S3 client, an instance of `botocore.client.S3`.\n\n Example:\n .. code-block:: python\n\n from dagster import job, op, Definitions\n from dagster_aws.s3 import S3Resource\n\n @op\n def example_s3_op(s3: S3Resource):\n return s3.get_client().list_objects_v2(\n Bucket='my-bucket',\n Prefix='some-key'\n )\n\n @job\n def example_job():\n example_s3_op()\n\n defs = Definitions(\n jobs=[example_job],\n resources={'s3': S3Resource(region_name='us-west-1')}\n )\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> Any:\n return construct_s3_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n endpoint_url=self.endpoint_url,\n use_unsigned_session=self.use_unsigned_session,\n profile_name=self.profile_name,\n use_ssl=self.use_ssl,\n verify=self.verify,\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n aws_session_token=self.aws_session_token,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=S3Resource.to_config_schema())\ndef s3_resource(context) -> Any:\n """Resource that gives access to S3.\n\n The underlying S3 session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is an S3 client, an instance of `botocore.client.S3`.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, job, op\n from dagster_aws.s3 import s3_resource\n\n @op(required_resource_keys={'s3'})\n def example_s3_op(context):\n return context.resources.s3.list_objects_v2(\n Bucket='my-bucket',\n Prefix='some-key'\n )\n\n @job(resource_defs={'s3': s3_resource})\n def example_job():\n example_s3_op()\n\n example_job.execute_in_process(\n run_config={\n 'resources': {\n 's3': {\n 'config': {\n 'region_name': 'us-west-1',\n }\n }\n }\n }\n )\n\n Note that your ops must also declare that they require this resource with\n `required_resource_keys`, or it will not be initialized for the execution of their compute\n functions.\n\n You may configure this resource as follows:\n\n .. code-block:: YAML\n\n resources:\n s3:\n config:\n region_name: "us-west-1"\n # Optional[str]: Specifies a custom region for the S3 session. Default is chosen\n # through the ordinary boto credential chain.\n use_unsigned_session: false\n # Optional[bool]: Specifies whether to use an unsigned S3 session. Default: True\n endpoint_url: "http://localhost"\n # Optional[str]: Specifies a custom endpoint for the S3 session. Default is None.\n profile_name: "dev"\n # Optional[str]: Specifies a custom profile for S3 session. Default is default\n # profile as specified in ~/.aws/credentials file\n use_ssl: true\n # Optional[bool]: Whether or not to use SSL. By default, SSL is used.\n verify: None\n # Optional[str]: Whether or not to verify SSL certificates. By default SSL certificates are verified.\n # You can also specify this argument if you want to use a different CA cert bundle than the one used by botocore."\n aws_access_key_id: None\n # Optional[str]: The access key to use when creating the client.\n aws_secret_access_key: None\n # Optional[str]: The secret key to use when creating the client.\n aws_session_token: None\n # Optional[str]: The session token to use when creating the client.\n """\n return S3Resource.from_resource_context(context).get_client()
\n\n\n
[docs]class S3FileManagerResource(ResourceWithS3Configuration, IAttachDifferentObjectToOpContext):\n s3_bucket: str = Field(description="S3 bucket to use for the file manager.")\n s3_prefix: str = Field(\n default="dagster", description="Prefix to use for the S3 bucket for this file manager."\n )\n\n def get_client(self) -> S3FileManager:\n return S3FileManager(\n s3_session=construct_s3_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n endpoint_url=self.endpoint_url,\n use_unsigned_session=self.use_unsigned_session,\n profile_name=self.profile_name,\n use_ssl=self.use_ssl,\n verify=self.verify,\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n aws_session_token=self.aws_session_token,\n ),\n s3_bucket=self.s3_bucket,\n s3_base_key=self.s3_prefix,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=S3FileManagerResource.to_config_schema(),\n)\ndef s3_file_manager(context) -> S3FileManager:\n """FileManager that provides abstract access to S3.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n """\n return S3FileManagerResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_aws/s3/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.resources"}}, "secretsmanager": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.secretsmanager.resources

\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Dict, Generator, List, Optional, cast\n\nfrom dagster import (\n    Field as LegacyDagsterField,\n    resource,\n)\nfrom dagster._config.field_utils import Shape\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.test_utils import environ\nfrom dagster._utils.merger import merge_dicts\nfrom pydantic import Field\n\nfrom dagster_aws.utils import ResourceWithBoto3Configuration\n\nfrom .secrets import construct_secretsmanager_client, get_secrets_from_arns, get_tagged_secrets\n\nif TYPE_CHECKING:\n    import botocore\n\n\n
[docs]class SecretsManagerResource(ResourceWithBoto3Configuration):\n """Resource that gives access to AWS SecretsManager.\n\n The underlying SecretsManager session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is a SecretsManager client, an instance of `botocore.client.SecretsManager`.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, job, op\n from dagster_aws.secretsmanager import SecretsManagerResource\n\n @op\n def example_secretsmanager_op(secretsmanager: SecretsManagerResource):\n return secretsmanager.get_client().get_secret_value(\n SecretId='arn:aws:secretsmanager:region:aws_account_id:secret:appauthexample-AbCdEf'\n )\n\n @job\n def example_job():\n example_secretsmanager_op()\n\n defs = Definitions(\n jobs=[example_job],\n resources={\n 'secretsmanager': SecretsManagerResource(\n region_name='us-west-1'\n )\n }\n )\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> "botocore.client.SecretsManager":\n return construct_secretsmanager_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n profile_name=self.profile_name,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(SecretsManagerResource.to_config_schema())\ndef secretsmanager_resource(context) -> "botocore.client.SecretsManager":\n """Resource that gives access to AWS SecretsManager.\n\n The underlying SecretsManager session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is a SecretsManager client, an instance of `botocore.client.SecretsManager`.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, job, op\n from dagster_aws.secretsmanager import secretsmanager_resource\n\n @op(required_resource_keys={'secretsmanager'})\n def example_secretsmanager_op(context):\n return context.resources.secretsmanager.get_secret_value(\n SecretId='arn:aws:secretsmanager:region:aws_account_id:secret:appauthexample-AbCdEf'\n )\n\n @job(resource_defs={'secretsmanager': secretsmanager_resource})\n def example_job():\n example_secretsmanager_op()\n\n example_job.execute_in_process(\n run_config={\n 'resources': {\n 'secretsmanager': {\n 'config': {\n 'region_name': 'us-west-1',\n }\n }\n }\n }\n )\n\n You may configure this resource as follows:\n\n .. code-block:: YAML\n\n resources:\n secretsmanager:\n config:\n region_name: "us-west-1"\n # Optional[str]: Specifies a custom region for the SecretsManager session. Default is chosen\n # through the ordinary boto credential chain.\n profile_name: "dev"\n # Optional[str]: Specifies a custom profile for SecretsManager session. Default is default\n # profile as specified in ~/.aws/credentials file\n\n """\n return SecretsManagerResource.from_resource_context(context).get_client()
\n\n\n
[docs]class SecretsManagerSecretsResource(ResourceWithBoto3Configuration):\n """Resource that provides a dict which maps selected SecretsManager secrets to\n their string values. Also optionally sets chosen secrets as environment variables.\n\n Example:\n .. code-block:: python\n\n import os\n from dagster import build_op_context, job, op, ResourceParam\n from dagster_aws.secretsmanager import SecretsManagerSecretsResource\n\n @op\n def example_secretsmanager_secrets_op(secrets: SecretsManagerSecretsResource):\n return secrets.fetch_secrets().get("my-secret-name")\n\n @op\n def example_secretsmanager_secrets_op_2(secrets: SecretsManagerSecretsResource):\n with secrets.secrets_in_environment():\n return os.getenv("my-other-secret-name")\n\n @job\n def example_job():\n example_secretsmanager_secrets_op()\n example_secretsmanager_secrets_op_2()\n\n defs = Definitions(\n jobs=[example_job],\n resources={\n 'secrets': SecretsManagerSecretsResource(\n region_name='us-west-1',\n secrets_tag="dagster",\n add_to_environment=True,\n )\n }\n )\n\n Note that your ops must also declare that they require this resource with or it will not be initialized\n for the execution of their compute functions.\n """\n\n secrets: List[str] = Field(\n default=[], description="An array of AWS Secrets Manager secrets arns to fetch."\n )\n secrets_tag: Optional[str] = Field(\n default=None,\n description="AWS Secrets Manager secrets with this tag will be fetched and made available.",\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @contextmanager\n def secrets_in_environment(\n self,\n secrets: Optional[List[str]] = None,\n secrets_tag: Optional[str] = None,\n ) -> Generator[Dict[str, str], None, None]:\n """Yields a dict which maps selected SecretsManager secrets to their string values. Also\n sets chosen secrets as environment variables.\n\n Args:\n secrets (Optional[List[str]]): An array of AWS Secrets Manager secrets arns to fetch.\n Note that this will override the secrets specified in the resource config.\n secrets_tag (Optional[str]): AWS Secrets Manager secrets with this tag will be fetched\n and made available. Note that this will override the secrets_tag specified in the\n resource config.\n """\n secrets_manager = construct_secretsmanager_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n profile_name=self.profile_name,\n )\n\n secrets_tag_to_fetch = secrets_tag if secrets_tag is not None else self.secrets_tag\n secrets_to_fetch = secrets if secrets is not None else self.secrets\n\n secret_arns = merge_dicts(\n (\n get_tagged_secrets(secrets_manager, [secrets_tag_to_fetch])\n if secrets_tag_to_fetch\n else {}\n ),\n get_secrets_from_arns(secrets_manager, secrets_to_fetch),\n )\n\n secrets_map = {\n name: secrets_manager.get_secret_value(SecretId=arn).get("SecretString")\n for name, arn in secret_arns.items()\n }\n with environ(secrets_map):\n yield secrets_map\n\n def fetch_secrets(\n self,\n secrets: Optional[List[str]] = None,\n secrets_tag: Optional[str] = None,\n ) -> Dict[str, str]:\n """Fetches secrets from AWS Secrets Manager and returns them as a dict.\n\n Args:\n secrets (Optional[List[str]]): An array of AWS Secrets Manager secrets arns to fetch.\n Note that this will override the secrets specified in the resource config.\n secrets_tag (Optional[str]): AWS Secrets Manager secrets with this tag will be fetched\n and made available. Note that this will override the secrets_tag specified in the\n resource config.\n """\n with self.secrets_in_environment(secrets=secrets, secrets_tag=secrets_tag) as secret_values:\n return secret_values
\n\n\nLEGACY_SECRETSMANAGER_SECRETS_SCHEMA = {\n **cast(Shape, SecretsManagerSecretsResource.to_config_schema().as_field().config_type).fields,\n "add_to_environment": LegacyDagsterField(\n bool,\n default_value=False,\n description="Whether to add the secrets to the environment. Defaults to False.",\n ),\n}\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=LEGACY_SECRETSMANAGER_SECRETS_SCHEMA)\n@contextmanager\ndef secretsmanager_secrets_resource(context):\n """Resource that provides a dict which maps selected SecretsManager secrets to\n their string values. Also optionally sets chosen secrets as environment variables.\n\n Example:\n .. code-block:: python\n\n import os\n from dagster import build_op_context, job, op\n from dagster_aws.secretsmanager import secretsmanager_secrets_resource\n\n @op(required_resource_keys={'secrets'})\n def example_secretsmanager_secrets_op(context):\n return context.resources.secrets.get("my-secret-name")\n\n @op(required_resource_keys={'secrets'})\n def example_secretsmanager_secrets_op_2(context):\n return os.getenv("my-other-secret-name")\n\n @job(resource_defs={'secrets': secretsmanager_secrets_resource})\n def example_job():\n example_secretsmanager_secrets_op()\n example_secretsmanager_secrets_op_2()\n\n example_job.execute_in_process(\n run_config={\n 'resources': {\n 'secrets': {\n 'config': {\n 'region_name': 'us-west-1',\n 'secrets_tag': 'dagster',\n 'add_to_environment': True,\n }\n }\n }\n }\n )\n\n Note that your ops must also declare that they require this resource with\n `required_resource_keys`, or it will not be initialized for the execution of their compute\n functions.\n\n You may configure this resource as follows:\n\n .. code-block:: YAML\n\n resources:\n secretsmanager:\n config:\n region_name: "us-west-1"\n # Optional[str]: Specifies a custom region for the SecretsManager session. Default is chosen\n # through the ordinary boto credential chain.\n profile_name: "dev"\n # Optional[str]: Specifies a custom profile for SecretsManager session. Default is default\n # profile as specified in ~/.aws/credentials file\n secrets: ["arn:aws:secretsmanager:region:aws_account_id:secret:appauthexample-AbCdEf"]\n # Optional[List[str]]: Specifies a list of secret ARNs to pull from SecretsManager.\n secrets_tag: "dagster"\n # Optional[str]: Specifies a tag, all secrets which have the tag set will be pulled\n # from SecretsManager.\n add_to_environment: true\n # Optional[bool]: Whether to set the selected secrets as environment variables. Defaults\n # to false.\n\n """\n add_to_environment = context.resource_config.get("add_to_environment", False)\n if add_to_environment:\n with SecretsManagerSecretsResource.from_resource_context(\n context\n ).secrets_in_environment() as secrets:\n yield secrets\n else:\n yield SecretsManagerSecretsResource.from_resource_context(context).fetch_secrets()
\n
", "current_page_name": "_modules/dagster_aws/secretsmanager/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.secretsmanager.resources"}}}, "dagster_azure": {"adls2": {"fake_adls2_resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.fake_adls2_resource

\nimport io\nimport random\nfrom typing import Any, Dict, Optional\nfrom unittest import mock\n\nfrom dagster import resource\nfrom dagster._config.pythonic_config import ConfigurableResource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\n\nfrom dagster_azure.blob import FakeBlobServiceClient\n\nfrom .utils import ResourceNotFoundError\n\n\n@dagster_maintained_resource\n@resource({"account_name": str})\ndef fake_adls2_resource(context):\n    return FakeADLS2Resource(account_name=context.resource_config["account_name"])\n\n\n
[docs]class FakeADLS2Resource(ConfigurableResource):\n """Stateful mock of an ADLS2Resource for testing.\n\n Wraps a ``mock.MagicMock``. Containers are implemented using an in-memory dict.\n """\n\n account_name: str\n storage_account: Optional[str] = None\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def adls2_client(self) -> "FakeADLS2ServiceClient":\n return FakeADLS2ServiceClient(self.account_name)\n\n @property\n @cached_method\n def blob_client(self) -> FakeBlobServiceClient:\n return FakeBlobServiceClient(self.account_name)\n\n @property\n def lease_client_constructor(self) -> Any:\n return FakeLeaseClient
\n\n\nclass FakeLeaseClient:\n def __init__(self, client):\n self.client = client\n self.id = None\n\n # client needs a ref to self to check if a given lease is valid\n self.client._lease = self # noqa: SLF001\n\n def acquire(self, lease_duration=-1):\n if self.id is None:\n self.id = random.randint(0, 2**9)\n else:\n raise Exception("Lease already held")\n\n def release(self):\n self.id = None\n\n def is_valid(self, lease):\n if self.id is None:\n # no lease is held so any operation is valid\n return True\n return lease == self.id\n\n\nclass FakeADLS2ServiceClient:\n """Stateful mock of an ADLS2 service client for testing.\n\n Wraps a ``mock.MagicMock``. Containers are implemented using an in-memory dict.\n """\n\n def __init__(self, account_name, credential="fake-creds"):\n self._account_name = account_name\n self._credential = mock.MagicMock()\n self._credential.account_key = credential\n self._file_systems = {}\n\n @property\n def account_name(self):\n return self._account_name\n\n @property\n def credential(self):\n return self._credential\n\n @property\n def file_systems(self):\n return self._file_systems\n\n def get_file_system_client(self, file_system):\n return self._file_systems.setdefault(\n file_system, FakeADLS2FilesystemClient(self.account_name, file_system)\n )\n\n def get_file_client(self, file_system, file_path):\n return self.get_file_system_client(file_system).get_file_client(file_path)\n\n\nclass FakeADLS2FilesystemClient:\n """Stateful mock of an ADLS2 filesystem client for testing."""\n\n def __init__(self, account_name, file_system_name):\n self._file_system: Dict[str, FakeADLS2FileClient] = {}\n self._account_name = account_name\n self._file_system_name = file_system_name\n\n @property\n def account_name(self):\n return self._account_name\n\n @property\n def file_system_name(self):\n return self._file_system_name\n\n def keys(self):\n return self._file_system.keys()\n\n def get_file_system_properties(self):\n return {"account_name": self.account_name, "file_system_name": self.file_system_name}\n\n def has_file(self, path):\n return bool(self._file_system.get(path))\n\n def get_file_client(self, file_path):\n # pass fileclient a ref to self and its name so the file can delete itself\n self._file_system.setdefault(file_path, FakeADLS2FileClient(self, file_path))\n return self._file_system[file_path]\n\n def create_file(self, file):\n # pass fileclient a ref to self and the file's name so the file can delete itself by\n # accessing the self._file_system dict\n self._file_system.setdefault(file, FakeADLS2FileClient(fs_client=self, name=file))\n return self._file_system[file]\n\n def delete_file(self, file):\n for k in list(self._file_system.keys()):\n if k.startswith(file):\n del self._file_system[k]\n\n\nclass FakeADLS2FileClient:\n """Stateful mock of an ADLS2 file client for testing."""\n\n def __init__(self, name, fs_client):\n self.name = name\n self.contents = None\n self._lease = None\n self.fs_client = fs_client\n\n @property\n def lease(self):\n return self._lease if self._lease is None else self._lease.id\n\n def get_file_properties(self):\n if self.contents is None:\n raise ResourceNotFoundError("File does not exist!")\n lease_id = None if self._lease is None else self._lease.id\n return {"lease": lease_id}\n\n def upload_data(self, contents, overwrite=False, lease=None):\n if self._lease is not None:\n if not self._lease.is_valid(lease):\n raise Exception("Invalid lease!")\n if self.contents is not None or overwrite is True:\n if isinstance(contents, str):\n self.contents = contents.encode("utf8")\n elif isinstance(contents, io.BytesIO):\n self.contents = contents.read()\n elif isinstance(contents, io.StringIO):\n self.contents = contents.read().encode("utf8")\n elif isinstance(contents, bytes):\n self.contents = contents\n else:\n self.contents = contents\n\n def download_file(self):\n if self.contents is None:\n raise ResourceNotFoundError("File does not exist!")\n return FakeADLS2FileDownloader(contents=self.contents)\n\n def delete_file(self, lease=None):\n if self._lease is not None:\n if not self._lease.is_valid(lease):\n raise Exception("Invalid lease!")\n self.fs_client.delete_file(self.name)\n\n\nclass FakeADLS2FileDownloader:\n """Mock of an ADLS2 file downloader for testing."""\n\n def __init__(self, contents):\n self.contents = contents\n\n def readall(self):\n return self.contents\n\n def readinto(self, fileobj):\n fileobj.write(self.contents)\n
", "current_page_name": "_modules/dagster_azure/adls2/fake_adls2_resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.fake_adls2_resource"}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.file_manager

\nimport io\nimport uuid\nfrom contextlib import contextmanager\n\nimport dagster._check as check\nfrom dagster._core.storage.file_manager import (\n    FileHandle,\n    FileManager,\n    TempfileManager,\n    check_file_like_obj,\n)\n\n\n
[docs]class ADLS2FileHandle(FileHandle):\n """A reference to a file on ADLS2."""\n\n def __init__(self, account: str, file_system: str, key: str):\n self._account = check.str_param(account, "account")\n self._file_system = check.str_param(file_system, "file_system")\n self._key = check.str_param(key, "key")\n\n @property\n def account(self):\n """str: The name of the ADLS2 account."""\n return self._account\n\n @property\n def file_system(self):\n """str: The name of the ADLS2 file system."""\n return self._file_system\n\n @property\n def key(self):\n """str: The ADLS2 key."""\n return self._key\n\n @property\n def path_desc(self):\n """str: The file's ADLS2 URL."""\n return self.adls2_path\n\n @property\n def adls2_path(self):\n """str: The file's ADLS2 URL."""\n return f"adfss://{self.file_system}@{self.account}.dfs.core.windows.net/{self.key}"
\n\n\nclass ADLS2FileManager(FileManager):\n def __init__(self, adls2_client, file_system, prefix):\n self._client = adls2_client\n self._file_system = check.str_param(file_system, "file_system")\n self._prefix = check.str_param(prefix, "prefix")\n self._local_handle_cache = {}\n self._temp_file_manager = TempfileManager()\n\n def copy_handle_to_local_temp(self, file_handle):\n self._download_if_not_cached(file_handle)\n return self._get_local_path(file_handle)\n\n def _download_if_not_cached(self, file_handle):\n if not self._file_handle_cached(file_handle):\n # instigate download\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_name = temp_file_obj.name\n file = self._client.get_file_client(\n file_system=file_handle.file_system,\n file_path=file_handle.key,\n )\n download = file.download_file()\n with open(temp_name, "wb") as file_obj:\n download.readinto(file_obj)\n self._local_handle_cache[file_handle.adls2_path] = temp_name\n\n return file_handle\n\n @contextmanager\n def read(self, file_handle, mode="rb"):\n check.inst_param(file_handle, "file_handle", ADLS2FileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n self._download_if_not_cached(file_handle)\n\n encoding = None if "b" in mode else "utf-8"\n with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:\n yield file_obj\n\n def _file_handle_cached(self, file_handle):\n return file_handle.adls2_path in self._local_handle_cache\n\n def _get_local_path(self, file_handle):\n return self._local_handle_cache[file_handle.adls2_path]\n\n def read_data(self, file_handle):\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read()\n\n def write_data(self, data, ext=None):\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", ext=ext)\n\n def write(self, file_obj, mode="wb", ext=None):\n check_file_like_obj(file_obj)\n adls2_key = self.get_full_key(str(uuid.uuid4()) + (("." + ext) if ext is not None else ""))\n adls2_file = self._client.get_file_client(\n file_system=self._file_system, file_path=adls2_key\n )\n adls2_file.upload_data(file_obj, overwrite=True)\n return ADLS2FileHandle(self._client.account_name, self._file_system, adls2_key)\n\n def get_full_key(self, file_key):\n return f"{self._prefix}/{file_key}"\n\n def delete_local_temp(self):\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster_azure/adls2/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.file_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.io_manager

\nimport pickle\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Union\n\nfrom dagster import (\n    InputContext,\n    OutputContext,\n    ResourceDependency,\n    _check as check,\n    io_manager,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._config.pythonic_config import ConfigurableIOManager\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL\nfrom dagster._utils.cached_method import cached_method\nfrom pydantic import Field\nfrom upath import UPath\n\nfrom dagster_azure.adls2.resources import ADLS2Resource\nfrom dagster_azure.adls2.utils import ResourceNotFoundError\n\n_LEASE_DURATION = 60  # One minute\n\n\nclass PickledObjectADLS2IOManager(UPathIOManager):\n    def __init__(\n        self,\n        file_system: Any,\n        adls2_client: Any,\n        blob_client: Any,\n        lease_client_constructor: Any,\n        prefix: str = "dagster",\n    ):\n        self.adls2_client = adls2_client\n        self.file_system_client = self.adls2_client.get_file_system_client(file_system)\n        # We also need a blob client to handle copying as ADLS doesn't have a copy API yet\n        self.blob_client = blob_client\n        self.blob_container_client = self.blob_client.get_container_client(file_system)\n        self.prefix = check.str_param(prefix, "prefix")\n\n        self.lease_client_constructor = lease_client_constructor\n        self.lease_duration = _LEASE_DURATION\n        self.file_system_client.get_file_system_properties()\n        super().__init__(base_path=UPath(self.prefix))\n\n    def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:\n        parts = context.get_identifier()\n        run_id = parts[0]\n        output_parts = parts[1:]\n        return UPath("storage", run_id, "files", *output_parts)\n\n    def get_loading_input_log_message(self, path: UPath) -> str:\n        return f"Loading ADLS2 object from: {self._uri_for_path(path)}"\n\n    def get_writing_output_log_message(self, path: UPath) -> str:\n        return f"Writing ADLS2 object at: {self._uri_for_path(path)}"\n\n    def unlink(self, path: UPath) -> None:\n        file_client = self.file_system_client.get_file_client(str(path))\n        with self._acquire_lease(file_client, is_rm=True) as lease:\n            file_client.delete_file(lease=lease, recursive=True)\n\n    def make_directory(self, path: UPath) -> None:\n        # It is not necessary to create directories in ADLS2\n        return None\n\n    def path_exists(self, path: UPath) -> bool:\n        try:\n            self.file_system_client.get_file_client(str(path)).get_file_properties()\n        except ResourceNotFoundError:\n            return False\n        return True\n\n    def _uri_for_path(self, path: UPath, protocol: str = "abfss://") -> str:\n        return "{protocol}{filesystem}@{account}.dfs.core.windows.net/{key}".format(\n            protocol=protocol,\n            filesystem=self.file_system_client.file_system_name,\n            account=self.file_system_client.account_name,\n            key=path,\n        )\n\n    @contextmanager\n    def _acquire_lease(self, client: Any, is_rm: bool = False) -> Iterator[str]:\n        lease_client = self.lease_client_constructor(client=client)\n        try:\n            lease_client.acquire(lease_duration=self.lease_duration)\n            yield lease_client.id\n        finally:\n            # cannot release a lease on a file that no longer exists, so need to check\n            if not is_rm:\n                lease_client.release()\n\n    def load_from_path(self, context: InputContext, path: UPath) -> Any:\n        if context.dagster_type.typing_type == type(None):\n            return None\n        file = self.file_system_client.get_file_client(str(path))\n        stream = file.download_file()\n        return pickle.loads(stream.readall())\n\n    def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:\n        if self.path_exists(path):\n            context.log.warning(f"Removing existing ADLS2 key: {path}")\n            self.unlink(path)\n\n        pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)\n        file = self.file_system_client.create_file(str(path))\n        with self._acquire_lease(file) as lease:\n            file.upload_data(pickled_obj, lease=lease, overwrite=True)\n\n\n
[docs]class ADLS2PickleIOManager(ConfigurableIOManager):\n """Persistent IO manager using Azure Data Lake Storage Gen2 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for ADLS and the backing\n container.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_azure.adls2 import ADLS2PickleIOManager, adls2_resource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return df[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": ADLS2PickleIOManager(\n adls2_file_system="my-cool-fs",\n adls2_prefix="my-cool-prefix"\n ),\n "adls2": adls2_resource,\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_azure.adls2 import ADLS2PickleIOManager, adls2_resource\n\n @job(\n resource_defs={\n "io_manager": ADLS2PickleIOManager(\n adls2_file_system="my-cool-fs",\n adls2_prefix="my-cool-prefix"\n ),\n "adls2": adls2_resource,\n },\n )\n def my_job():\n ...\n """\n\n adls2: ResourceDependency[ADLS2Resource]\n adls2_file_system: str = Field(description="ADLS Gen2 file system name.")\n adls2_prefix: str = Field(\n default="dagster", description="ADLS Gen2 file system prefix to write to."\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _internal_io_manager(self) -> PickledObjectADLS2IOManager:\n return PickledObjectADLS2IOManager(\n self.adls2_file_system,\n self.adls2.adls2_client,\n self.adls2.blob_client,\n self.adls2.lease_client_constructor,\n self.adls2_prefix,\n )\n\n def load_input(self, context: "InputContext") -> Any:\n return self._internal_io_manager.load_input(context)\n\n def handle_output(self, context: "OutputContext", obj: Any) -> None:\n self._internal_io_manager.handle_output(context, obj)
\n\n\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use GCSPickleIOManager instead.",\n)\nclass ConfigurablePickledObjectADLS2IOManager(ADLS2PickleIOManager):\n """Renamed to ADLS2PickleIOManager. See ADLS2PickleIOManager for documentation."""\n\n pass
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=ADLS2PickleIOManager.to_config_schema(),\n required_resource_keys={"adls2"},\n)\ndef adls2_pickle_io_manager(init_context):\n """Persistent IO manager using Azure Data Lake Storage Gen2 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for ADLS and the backing\n container.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return df[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": adls2_pickle_io_manager.configured(\n {"adls2_file_system": "my-cool-fs", "adls2_prefix": "my-cool-prefix"}\n ),\n "adls2": adls2_resource,\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource\n\n @job(\n resource_defs={\n "io_manager": adls2_pickle_io_manager.configured(\n {"adls2_file_system": "my-cool-fs", "adls2_prefix": "my-cool-prefix"}\n ),\n "adls2": adls2_resource,\n },\n )\n def my_job():\n ...\n """\n adls_resource = init_context.resources.adls2\n adls2_client = adls_resource.adls2_client\n blob_client = adls_resource.blob_client\n lease_client = adls_resource.lease_client_constructor\n pickled_io_manager = PickledObjectADLS2IOManager(\n init_context.resource_config["adls2_file_system"],\n adls2_client,\n blob_client,\n lease_client,\n init_context.resource_config.get("adls2_prefix"),\n )\n return pickled_io_manager
\n
", "current_page_name": "_modules/dagster_azure/adls2/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.io_manager"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.resources

\nfrom typing import Any, Dict, Union\n\nfrom azure.identity import DefaultAzureCredential\nfrom azure.storage.filedatalake import DataLakeLeaseClient\nfrom dagster import (\n    Config,\n    ConfigurableResource,\n    Field as DagsterField,\n    Permissive,\n    Selector,\n    StringSource,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.merger import merge_dicts\nfrom pydantic import Field\nfrom typing_extensions import Literal\n\nfrom dagster_azure.blob.utils import BlobServiceClient, create_blob_client\n\nfrom .file_manager import ADLS2FileManager\nfrom .utils import DataLakeServiceClient, create_adls2_client\n\n\nclass ADLS2SASToken(Config):\n    credential_type: Literal["sas"] = "sas"\n    token: str\n\n\nclass ADLS2Key(Config):\n    credential_type: Literal["key"] = "key"\n    key: str\n\n\nclass ADLS2DefaultAzureCredential(Config):\n    credential_type: Literal["default_azure_credential"] = "default_azure_credential"\n    kwargs: Dict[str, Any]\n\n\nclass ADLS2BaseResource(ConfigurableResource):\n    storage_account: str = Field(description="The storage account name.")\n    credential: Union[ADLS2SASToken, ADLS2Key, ADLS2DefaultAzureCredential] = Field(\n        discriminator="credential_type", description="The credentials with which to authenticate."\n    )\n\n\nDEFAULT_AZURE_CREDENTIAL_CONFIG = DagsterField(\n    Permissive(\n        description="Uses DefaultAzureCredential to authenticate and passed as keyword arguments",\n    )\n)\n\nADLS2_CLIENT_CONFIG = {\n    "storage_account": DagsterField(StringSource, description="The storage account name."),\n    "credential": DagsterField(\n        Selector(\n            {\n                "sas": DagsterField(StringSource, description="SAS token for the account."),\n                "key": DagsterField(StringSource, description="Shared Access Key for the account."),\n                "DefaultAzureCredential": DEFAULT_AZURE_CREDENTIAL_CONFIG,\n            }\n        ),\n        description="The credentials with which to authenticate.",\n    ),\n}\n\n\n
[docs]class ADLS2Resource(ADLS2BaseResource):\n """Resource containing clients to access Azure Data Lake Storage Gen2.\n\n Contains a client for both the Data Lake and Blob APIs, to work around the limitations\n of each.\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _raw_credential(self) -> Any:\n if isinstance(self.credential, ADLS2Key):\n return self.credential.key\n elif isinstance(self.credential, ADLS2SASToken):\n return self.credential.token\n else:\n return DefaultAzureCredential(**self.credential.kwargs)\n\n @property\n @cached_method\n def adls2_client(self) -> DataLakeServiceClient:\n return create_adls2_client(self.storage_account, self._raw_credential)\n\n @property\n @cached_method\n def blob_client(self) -> BlobServiceClient:\n return create_blob_client(self.storage_account, self._raw_credential)\n\n @property\n def lease_client_constructor(self) -> Any:\n return DataLakeLeaseClient
\n\n\n# Due to a limitation of the discriminated union type, we can't directly mirror these old\n# config fields in the new resource config. Instead, we'll just use the old config fields\n# to construct the new config and then use that to construct the resource.\n
[docs]@dagster_maintained_resource\n@resource(ADLS2_CLIENT_CONFIG)\ndef adls2_resource(context):\n """Resource that gives ops access to Azure Data Lake Storage Gen2.\n\n The underlying client is a :py:class:`~azure.storage.filedatalake.DataLakeServiceClient`.\n\n Attach this resource definition to a :py:class:`~dagster.JobDefinition` in order to make it\n available to your ops.\n\n Example:\n .. code-block:: python\n\n from dagster import job, op\n from dagster_azure.adls2 import adls2_resource\n\n @op(required_resource_keys={'adls2'})\n def example_adls2_op(context):\n return list(context.resources.adls2.adls2_client.list_file_systems())\n\n @job(resource_defs={"adls2": adls2_resource})\n def my_job():\n example_adls2_op()\n\n Note that your ops must also declare that they require this resource with\n `required_resource_keys`, or it will not be initialized for the execution of their compute\n functions.\n\n You may pass credentials to this resource using either a SAS token, a key or by passing the\n `DefaultAzureCredential` object.\n\n .. code-block:: YAML\n\n resources:\n adls2:\n config:\n storage_account: my_storage_account\n # str: The storage account name.\n credential:\n sas: my_sas_token\n # str: the SAS token for the account.\n key:\n env: AZURE_DATA_LAKE_STORAGE_KEY\n # str: The shared access key for the account.\n DefaultAzureCredential: {}\n # dict: The keyword arguments used for DefaultAzureCredential\n # or leave the object empty for no arguments\n DefaultAzureCredential:\n exclude_environment_credential: true\n\n """\n return _adls2_resource_from_config(context.resource_config)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n merge_dicts(\n ADLS2_CLIENT_CONFIG,\n {\n "adls2_file_system": DagsterField(\n StringSource, description="ADLS Gen2 file system name"\n ),\n "adls2_prefix": DagsterField(StringSource, is_required=False, default_value="dagster"),\n },\n )\n)\ndef adls2_file_manager(context):\n """FileManager that provides abstract access to ADLS2.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n """\n adls2_client = _adls2_resource_from_config(context.resource_config).adls2_client\n\n return ADLS2FileManager(\n adls2_client=adls2_client,\n file_system=context.resource_config["adls2_file_system"],\n prefix=context.resource_config["adls2_prefix"],\n )
\n\n\ndef _adls2_resource_from_config(config) -> ADLS2Resource:\n """Args:\n config: A configuration containing the fields in ADLS2_CLIENT_CONFIG.\n\n Returns: An adls2 client.\n """\n storage_account = config["storage_account"]\n if "DefaultAzureCredential" in config["credential"]:\n credential = ADLS2DefaultAzureCredential(\n kwargs=config["credential"]["DefaultAzureCredential"]\n )\n elif "sas" in config["credential"]:\n credential = ADLS2SASToken(token=config["credential"]["sas"])\n else:\n credential = ADLS2Key(key=config["credential"]["key"])\n\n return ADLS2Resource(storage_account=storage_account, credential=credential)\n
", "current_page_name": "_modules/dagster_azure/adls2/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.resources"}}, "blob": {"compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.blob.compute_log_manager

\nimport os\nfrom contextlib import contextmanager\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._seven as seven\nfrom azure.identity import DefaultAzureCredential\nfrom dagster import (\n    Field,\n    Noneable,\n    Permissive,\n    StringSource,\n    _check as check,\n)\nfrom dagster._core.storage.cloud_storage_compute_log_manager import (\n    CloudStorageComputeLogManager,\n    PollingComputeLogSubscriptionManager,\n)\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\nfrom dagster._core.storage.local_compute_log_manager import (\n    IO_TYPE_EXTENSION,\n    LocalComputeLogManager,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import ensure_dir, ensure_file\nfrom typing_extensions import Self\n\nfrom .utils import create_blob_client, generate_blob_sas\n\n\n
[docs]class AzureBlobComputeLogManager(CloudStorageComputeLogManager, ConfigurableClass):\n """Logs op compute function stdout and stderr to Azure Blob Storage.\n\n This is also compatible with Azure Data Lake Storage.\n\n Users should not instantiate this class directly. Instead, use a YAML block in ``dagster.yaml``\n such as the following:\n\n .. code-block:: YAML\n\n compute_logs:\n module: dagster_azure.blob.compute_log_manager\n class: AzureBlobComputeLogManager\n config:\n storage_account: my-storage-account\n container: my-container\n credential: sas-token-or-secret-key\n default_azure_credential:\n exclude_environment_credential: true\n prefix: "dagster-test-"\n local_dir: "/tmp/cool"\n upload_interval: 30\n\n Args:\n storage_account (str): The storage account name to which to log.\n container (str): The container (or ADLS2 filesystem) to which to log.\n secret_key (Optional[str]): Secret key for the storage account. SAS tokens are not\n supported because we need a secret key to generate a SAS token for a download URL.\n default_azure_credential (Optional[dict]): Use and configure DefaultAzureCredential.\n Cannot be used with sas token or secret key config.\n local_dir (Optional[str]): Path to the local directory in which to stage logs. Default:\n ``dagster._seven.get_system_temp_directory()``.\n prefix (Optional[str]): Prefix for the log file keys.\n upload_interval: (Optional[int]): Interval in seconds to upload partial log files blob storage. By default, will only upload when the capture is complete.\n inst_data (Optional[ConfigurableClassData]): Serializable representation of the compute\n log manager when newed up from config.\n """\n\n def __init__(\n self,\n storage_account,\n container,\n secret_key=None,\n local_dir=None,\n inst_data: Optional[ConfigurableClassData] = None,\n prefix="dagster",\n upload_interval=None,\n default_azure_credential=None,\n ):\n self._storage_account = check.str_param(storage_account, "storage_account")\n self._container = check.str_param(container, "container")\n self._blob_prefix = self._clean_prefix(check.str_param(prefix, "prefix"))\n self._default_azure_credential = check.opt_dict_param(\n default_azure_credential, "default_azure_credential"\n )\n check.opt_str_param(secret_key, "secret_key")\n check.invariant(\n secret_key is not None or default_azure_credential is not None,\n "Missing config: need to provide one of secret_key or default_azure_credential",\n )\n\n if default_azure_credential is None:\n self._blob_client = create_blob_client(storage_account, secret_key)\n else:\n credential = DefaultAzureCredential(**self._default_azure_credential)\n self._blob_client = create_blob_client(storage_account, credential)\n\n self._container_client = self._blob_client.get_container_client(container)\n self._download_urls = {}\n\n # proxy calls to local compute log manager (for subscriptions, etc)\n if not local_dir:\n local_dir = seven.get_system_temp_directory()\n\n self._local_manager = LocalComputeLogManager(local_dir)\n self._subscription_manager = PollingComputeLogSubscriptionManager(self)\n self._upload_interval = check.opt_int_param(upload_interval, "upload_interval")\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @contextmanager\n def _watch_logs(self, dagster_run, step_key=None):\n # proxy watching to the local compute log manager, interacting with the filesystem\n with self.local_manager._watch_logs(dagster_run, step_key): # noqa: SLF001\n yield\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "storage_account": StringSource,\n "container": StringSource,\n "secret_key": Field(StringSource, is_required=False),\n "default_azure_credential": Field(\n Noneable(Permissive(description="keyword arguments for DefaultAzureCredential")),\n is_required=False,\n default_value=None,\n ),\n "local_dir": Field(StringSource, is_required=False),\n "prefix": Field(StringSource, is_required=False, default_value="dagster"),\n "upload_interval": Field(Noneable(int), is_required=False, default_value=None),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return AzureBlobComputeLogManager(inst_data=inst_data, **config_value)\n\n @property\n def local_manager(self) -> LocalComputeLogManager:\n return self._local_manager\n\n @property\n def upload_interval(self) -> Optional[int]:\n return self._upload_interval if self._upload_interval else None\n\n def _clean_prefix(self, prefix):\n parts = prefix.split("/")\n return "/".join([part for part in parts if part])\n\n def _blob_key(self, log_key, io_type, partial=False):\n check.inst_param(io_type, "io_type", ComputeIOType)\n extension = IO_TYPE_EXTENSION[io_type]\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n paths = [self._blob_prefix, "storage", *namespace, filename]\n return "/".join(paths) # blob path delimiter\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n self.local_manager.delete_logs(log_key=log_key, prefix=prefix)\n if log_key:\n prefix_path = "/".join([self._blob_prefix, "storage", *log_key])\n elif prefix:\n # add the trailing '/' to make sure that ['a'] does not match ['apple']\n prefix_path = "/".join([self._blob_prefix, "storage", *prefix, ""])\n else:\n prefix_path = None\n\n blob_list = {\n b.name for b in list(self._container_client.list_blobs(name_starts_with=prefix_path))\n }\n\n to_remove = None\n if log_key:\n # filter to the known set of keys\n known_keys = [\n self._blob_key(log_key, ComputeIOType.STDOUT),\n self._blob_key(log_key, ComputeIOType.STDERR),\n self._blob_key(log_key, ComputeIOType.STDOUT, partial=True),\n self._blob_key(log_key, ComputeIOType.STDERR, partial=True),\n ]\n to_remove = [key for key in known_keys if key in blob_list]\n elif prefix:\n to_remove = list(blob_list)\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n if to_remove:\n self._container_client.delete_blobs(*to_remove)\n\n def download_url_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n\n blob_key = self._blob_key(log_key, io_type)\n if blob_key in self._download_urls:\n return self._download_urls[blob_key]\n blob = self._container_client.get_blob_client(blob_key)\n sas = generate_blob_sas(\n self._storage_account,\n self._container,\n blob_key,\n account_key=self._blob_client.credential.account_key,\n )\n url = blob.url + sas\n self._download_urls[blob_key] = url\n return url\n\n def display_path_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n\n blob_key = self._blob_key(log_key, io_type)\n return f"https://{self._storage_account}.blob.core.windows.net/{self._container}/{blob_key}"\n\n def cloud_storage_has_logs(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial: bool = False\n ) -> bool:\n blob_key = self._blob_key(log_key, io_type, partial=partial)\n blob_objects = self._container_client.list_blobs(blob_key)\n exact_matches = [blob for blob in blob_objects if blob.name == blob_key]\n return len(exact_matches) > 0\n\n def upload_to_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n blob_key = self._blob_key(log_key, io_type, partial=partial)\n with open(path, "rb") as data:\n blob = self._container_client.get_blob_client(blob_key)\n blob.upload_blob(data)\n\n def download_from_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[io_type], partial=partial\n )\n ensure_dir(os.path.dirname(path))\n blob_key = self._blob_key(log_key, io_type, partial=partial)\n with open(path, "wb") as fileobj:\n blob = self._container_client.get_blob_client(blob_key)\n blob.download_blob().readinto(fileobj)\n\n def on_subscribe(self, subscription):\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription):\n self._subscription_manager.remove_subscription(subscription)
\n
", "current_page_name": "_modules/dagster_azure/blob/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.blob.compute_log_manager"}}}, "dagster_celery": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery.executor

\nfrom dagster import (\n    Executor,\n    Field,\n    Noneable,\n    Permissive,\n    StringSource,\n    _check as check,\n    executor,\n    multiple_process_executor_requirements,\n)\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._grpc.types import ExecuteStepArgs\nfrom dagster._serdes import pack_value\n\nfrom .config import DEFAULT_CONFIG, dict_wrapper\nfrom .defaults import broker_url, result_backend\n\nCELERY_CONFIG = {\n    "broker": Field(\n        Noneable(StringSource),\n        is_required=False,\n        description=(\n            "The URL of the Celery broker. Default: "\n            "'pyamqp://guest@{os.getenv('DAGSTER_CELERY_BROKER_HOST',"\n            "'localhost')}//'."\n        ),\n    ),\n    "backend": Field(\n        Noneable(StringSource),\n        is_required=False,\n        default_value="rpc://",\n        description="The URL of the Celery results backend. Default: 'rpc://'.",\n    ),\n    "include": Field(\n        [str], is_required=False, description="List of modules every worker should import"\n    ),\n    "config_source": Field(\n        Noneable(Permissive()),\n        is_required=False,\n        description="Additional settings for the Celery app.",\n    ),\n    "retries": get_retries_config(),\n}\n\n\n
[docs]@executor(\n name="celery",\n config_schema=CELERY_CONFIG,\n requirements=multiple_process_executor_requirements(),\n)\ndef celery_executor(init_context):\n """Celery-based executor.\n\n The Celery executor exposes config settings for the underlying Celery app under\n the ``config_source`` key. This config corresponds to the "new lowercase settings" introduced\n in Celery version 4.0 and the object constructed from config will be passed to the\n :py:class:`celery.Celery` constructor as its ``config_source`` argument.\n (See https://docs.celeryq.dev/en/stable/userguide/configuration.html for details.)\n\n The executor also exposes the ``broker``, `backend`, and ``include`` arguments to the\n :py:class:`celery.Celery` constructor.\n\n In the most common case, you may want to modify the ``broker`` and ``backend`` (e.g., to use\n Redis instead of RabbitMQ). We expect that ``config_source`` will be less frequently\n modified, but that when solid executions are especially fast or slow, or when there are\n different requirements around idempotence or retry, it may make sense to execute jobs\n with variations on these settings.\n\n To use the `celery_executor`, set it as the `executor_def` when defining a job:\n\n .. code-block:: python\n\n from dagster import job\n from dagster_celery import celery_executor\n\n @job(executor_def=celery_executor)\n def celery_enabled_job():\n pass\n\n Then you can configure the executor as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n broker: 'pyamqp://guest@localhost//' # Optional[str]: The URL of the Celery broker\n backend: 'rpc://' # Optional[str]: The URL of the Celery results backend\n include: ['my_module'] # Optional[List[str]]: Modules every worker should import\n config_source: # Dict[str, Any]: Any additional parameters to pass to the\n #... # Celery workers. This dict will be passed as the `config_source`\n #... # argument of celery.Celery().\n\n Note that the YAML you provide here must align with the configuration with which the Celery\n workers on which you hope to run were started. If, for example, you point the executor at a\n different broker than the one your workers are listening to, the workers will never be able to\n pick up tasks for execution.\n """\n return CeleryExecutor(\n broker=init_context.executor_config.get("broker"),\n backend=init_context.executor_config.get("backend"),\n config_source=init_context.executor_config.get("config_source"),\n include=init_context.executor_config.get("include"),\n retries=RetryMode.from_config(init_context.executor_config["retries"]),\n )
\n\n\ndef _submit_task(app, plan_context, step, queue, priority, known_state):\n from .tasks import create_task\n\n execute_step_args = ExecuteStepArgs(\n job_origin=plan_context.reconstructable_job.get_python_origin(),\n run_id=plan_context.dagster_run.run_id,\n step_keys_to_execute=[step.key],\n instance_ref=plan_context.instance.get_ref(),\n retry_mode=plan_context.executor.retries.for_inner_plan(),\n known_state=known_state,\n print_serialized_events=True, # Not actually checked by the celery task\n )\n\n task = create_task(app)\n task_signature = task.si(\n execute_step_args_packed=pack_value(execute_step_args),\n executable_dict=plan_context.reconstructable_job.to_dict(),\n )\n return task_signature.apply_async(\n priority=priority,\n queue=queue,\n routing_key=f"{queue}.execute_plan",\n )\n\n\nclass CeleryExecutor(Executor):\n def __init__(\n self,\n retries,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n ):\n self.broker = check.opt_str_param(broker, "broker", default=broker_url)\n self.backend = check.opt_str_param(backend, "backend", default=result_backend)\n self.include = check.opt_list_param(include, "include", of_type=str)\n self.config_source = dict_wrapper(\n dict(DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source"))\n )\n self._retries = check.inst_param(retries, "retries", RetryMode)\n\n @property\n def retries(self):\n return self._retries\n\n def execute(self, plan_context, execution_plan):\n from .core_execution_loop import core_celery_execution_loop\n\n return core_celery_execution_loop(\n plan_context, execution_plan, step_execution_fn=_submit_task\n )\n\n @staticmethod\n def for_cli(broker=None, backend=None, include=None, config_source=None):\n return CeleryExecutor(\n retries=RetryMode(RetryMode.DISABLED),\n broker=broker,\n backend=backend,\n include=include,\n config_source=config_source,\n )\n\n def app_args(self):\n return {\n "broker": self.broker,\n "backend": self.backend,\n "include": self.include,\n "config_source": self.config_source,\n "retries": self.retries,\n }\n
", "current_page_name": "_modules/dagster_celery/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery.executor"}}, "dagster_celery_docker": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery_docker.executor

\nimport os\n\nimport docker.client\nfrom dagster import (\n    DagsterInstance,\n    Executor,\n    Field,\n    Permissive,\n    StringSource,\n    _check as check,\n    executor,\n    multiple_process_executor_requirements,\n)\nfrom dagster._cli.api import ExecuteStepArgs\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.events.utils import filter_dagster_events_from_cli_logs\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._serdes import pack_value, serialize_value, unpack_value\nfrom dagster._utils.merger import merge_dicts\nfrom dagster_celery.config import DEFAULT_CONFIG, dict_wrapper\nfrom dagster_celery.core_execution_loop import DELEGATE_MARKER, core_celery_execution_loop\nfrom dagster_celery.defaults import broker_url, result_backend\nfrom dagster_celery.executor import CELERY_CONFIG\n\nCELERY_DOCKER_CONFIG_KEY = "celery-docker"\n\n\ndef celery_docker_config():\n    additional_config = {\n        "docker": Field(\n            {\n                "image": Field(\n                    StringSource,\n                    is_required=False,\n                    description="The docker image to be used for step execution.",\n                ),\n                "registry": Field(\n                    {\n                        "url": Field(StringSource),\n                        "username": Field(StringSource),\n                        "password": Field(StringSource),\n                    },\n                    is_required=False,\n                    description="Information for using a non local/public docker registry",\n                ),\n                "env_vars": Field(\n                    [str],\n                    is_required=False,\n                    description=(\n                        "The list of environment variables names to forward from the celery worker"\n                        " in to the docker container"\n                    ),\n                ),\n                "network": Field(\n                    str,\n                    is_required=False,\n                    description=(\n                        "Name of the network this container will be connected to at creation time"\n                    ),\n                ),\n                "container_kwargs": Field(\n                    Permissive(),\n                    is_required=False,\n                    description="Additional keyword args for the docker container",\n                ),\n            },\n            is_required=True,\n            description="The configuration for interacting with docker in the celery worker.",\n        ),\n    }\n\n    cfg = merge_dicts(CELERY_CONFIG, additional_config)\n    return cfg\n\n\n
[docs]@executor(\n name=CELERY_DOCKER_CONFIG_KEY,\n config_schema=celery_docker_config(),\n requirements=multiple_process_executor_requirements(),\n)\ndef celery_docker_executor(init_context):\n """Celery-based executor which launches tasks in docker containers.\n\n The Celery executor exposes config settings for the underlying Celery app under\n the ``config_source`` key. This config corresponds to the "new lowercase settings" introduced\n in Celery version 4.0 and the object constructed from config will be passed to the\n :py:class:`celery.Celery` constructor as its ``config_source`` argument.\n (See https://docs.celeryq.dev/en/stable/userguide/configuration.html for details.)\n\n The executor also exposes the ``broker``, `backend`, and ``include`` arguments to the\n :py:class:`celery.Celery` constructor.\n\n In the most common case, you may want to modify the ``broker`` and ``backend`` (e.g., to use\n Redis instead of RabbitMQ). We expect that ``config_source`` will be less frequently\n modified, but that when op executions are especially fast or slow, or when there are\n different requirements around idempotence or retry, it may make sense to execute jobs\n with variations on these settings.\n\n To use the `celery_docker_executor`, set it as the `executor_def` when defining a job:\n\n .. code-block:: python\n\n from dagster import job\n from dagster_celery_docker.executor import celery_docker_executor\n\n @job(executor_def=celery_docker_executor)\n def celery_enabled_job():\n pass\n\n Then you can configure the executor as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n docker:\n image: 'my_repo.com/image_name:latest'\n registry:\n url: 'my_repo.com'\n username: 'my_user'\n password: {env: 'DOCKER_PASSWORD'}\n env_vars: ["DAGSTER_HOME"] # environment vars to pass from celery worker to docker\n container_kwargs: # keyword args to be passed to the container. example:\n volumes: ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']\n\n broker: 'pyamqp://guest@localhost//' # Optional[str]: The URL of the Celery broker\n backend: 'rpc://' # Optional[str]: The URL of the Celery results backend\n include: ['my_module'] # Optional[List[str]]: Modules every worker should import\n config_source: # Dict[str, Any]: Any additional parameters to pass to the\n #... # Celery workers. This dict will be passed as the `config_source`\n #... # argument of celery.Celery().\n\n Note that the YAML you provide here must align with the configuration with which the Celery\n workers on which you hope to run were started. If, for example, you point the executor at a\n different broker than the one your workers are listening to, the workers will never be able to\n pick up tasks for execution.\n\n In deployments where the celery_docker_job_executor is used all appropriate celery and dagster_celery\n commands must be invoked with the `-A dagster_celery_docker.app` argument.\n """\n exc_cfg = init_context.executor_config\n\n return CeleryDockerExecutor(\n broker=exc_cfg.get("broker"),\n backend=exc_cfg.get("backend"),\n config_source=exc_cfg.get("config_source"),\n include=exc_cfg.get("include"),\n retries=RetryMode.from_config(exc_cfg.get("retries")),\n docker_config=exc_cfg.get("docker"),\n )
\n\n\nclass CeleryDockerExecutor(Executor):\n def __init__(\n self,\n retries,\n docker_config,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n ):\n self._retries = check.inst_param(retries, "retries", RetryMode)\n self.broker = check.opt_str_param(broker, "broker", default=broker_url)\n self.backend = check.opt_str_param(backend, "backend", default=result_backend)\n self.include = check.opt_list_param(include, "include", of_type=str)\n self.config_source = dict_wrapper(\n dict(DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source"))\n )\n self.docker_config = check.dict_param(docker_config, "docker_config")\n\n @property\n def retries(self):\n return self._retries\n\n def execute(self, plan_context, execution_plan):\n return core_celery_execution_loop(\n plan_context, execution_plan, step_execution_fn=_submit_task_docker\n )\n\n def app_args(self):\n return {\n "broker": self.broker,\n "backend": self.backend,\n "include": self.include,\n "config_source": self.config_source,\n "retries": self.retries,\n }\n\n\ndef _submit_task_docker(app, plan_context, step, queue, priority, known_state):\n execute_step_args = ExecuteStepArgs(\n job_origin=plan_context.reconstructable_job.get_python_origin(),\n run_id=plan_context.dagster_run.run_id,\n step_keys_to_execute=[step.key],\n instance_ref=plan_context.instance.get_ref(),\n retry_mode=plan_context.executor.retries.for_inner_plan(),\n known_state=known_state,\n print_serialized_events=True,\n )\n\n task = create_docker_task(app)\n task_signature = task.si(\n execute_step_args_packed=pack_value(execute_step_args),\n docker_config=plan_context.executor.docker_config,\n )\n return task_signature.apply_async(\n priority=priority,\n queue=queue,\n routing_key=f"{queue}.execute_step_docker",\n )\n\n\ndef create_docker_task(celery_app, **task_kwargs):\n @celery_app.task(bind=True, name="execute_step_docker", **task_kwargs)\n def _execute_step_docker(\n self,\n execute_step_args_packed,\n docker_config,\n ):\n """Run step execution in a Docker container."""\n execute_step_args = unpack_value(\n check.dict_param(\n execute_step_args_packed,\n "execute_step_args_packed",\n ),\n as_type=ExecuteStepArgs,\n )\n\n check.dict_param(docker_config, "docker_config")\n\n instance = DagsterInstance.from_ref(execute_step_args.instance_ref)\n dagster_run = instance.get_run_by_id(execute_step_args.run_id)\n check.inst(\n dagster_run,\n DagsterRun,\n f"Could not load run {execute_step_args.run_id}",\n )\n step_keys_str = ", ".join(execute_step_args.step_keys_to_execute)\n\n docker_image = (\n docker_config["image"]\n if docker_config.get("image")\n else dagster_run.job_code_origin.repository_origin.container_image\n )\n\n if not docker_image:\n raise Exception("No docker image specified by either the job or the repository")\n\n client = docker.client.from_env()\n\n if docker_config.get("registry"):\n client.login(\n registry=docker_config["registry"]["url"],\n username=docker_config["registry"]["username"],\n password=docker_config["registry"]["password"],\n )\n\n # Post event for starting execution\n engine_event = instance.report_engine_event(\n f"Executing steps {step_keys_str} in Docker container {docker_image}",\n dagster_run,\n EngineEventData(\n {\n "Step keys": step_keys_str,\n "Image": docker_image,\n "Celery worker": self.request.hostname,\n },\n marker_end=DELEGATE_MARKER,\n ),\n CeleryDockerExecutor,\n step_key=execute_step_args.step_keys_to_execute[0],\n )\n\n serialized_events = [serialize_value(engine_event)]\n\n docker_env = {}\n if docker_config.get("env_vars"):\n docker_env = {env_name: os.getenv(env_name) for env_name in docker_config["env_vars"]}\n\n container_kwargs = check.opt_dict_param(\n docker_config.get("container_kwargs"), "container_kwargs", key_type=str\n )\n\n # set defaults for detach and auto_remove\n container_kwargs["detach"] = container_kwargs.get("detach", False)\n container_kwargs["auto_remove"] = container_kwargs.get("auto_remove", True)\n\n # if environment variables are provided via container_kwargs, merge with env_vars\n if container_kwargs.get("environment") is not None:\n e_vars = container_kwargs.get("environment")\n if isinstance(e_vars, dict):\n docker_env.update(e_vars)\n else:\n for v in e_vars:\n key, val = v.split("=")\n docker_env[key] = val\n del container_kwargs["environment"]\n\n try:\n docker_response = client.containers.run(\n docker_image,\n command=execute_step_args.get_command_args(),\n # pass through this worker's environment for things like AWS creds etc.\n environment=docker_env,\n network=docker_config.get("network", None),\n **container_kwargs,\n )\n\n res = docker_response.decode("utf-8")\n except docker.errors.ContainerError as err:\n metadata = {"Job image": docker_image}\n if err.stderr is not None:\n metadata["Docker stderr"] = err.stderr\n\n instance.report_engine_event(\n f"Failed to run steps {step_keys_str} in Docker container {docker_image}",\n dagster_run,\n EngineEventData(metadata),\n CeleryDockerExecutor,\n step_key=execute_step_args.step_keys_to_execute[0],\n )\n raise\n else:\n if res is None:\n raise Exception("No response from execute_step in CeleryDockerExecutor")\n\n events = filter_dagster_events_from_cli_logs(res.split("\\n"))\n serialized_events += [serialize_value(event) for event in events]\n\n return serialized_events\n\n return _execute_step_docker\n
", "current_page_name": "_modules/dagster_celery_docker/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery_docker.executor"}}, "dagster_celery_k8s": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery_k8s.executor

\nimport logging\nimport os\nimport sys\nimport time\n\nimport kubernetes\nfrom dagster import (\n    DagsterEvent,\n    DagsterEventType,\n    DagsterInstance,\n    Executor,\n    _check as check,\n    executor,\n    multiple_process_executor_requirements,\n)\nfrom dagster._cli.api import ExecuteStepArgs\nfrom dagster._core.errors import DagsterUnmetExecutorRequirementsError\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.events.utils import filter_dagster_events_from_cli_logs\nfrom dagster._core.execution.plan.objects import StepFailureData, UserFailureData\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._serdes import pack_value, serialize_value, unpack_value\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster_celery.config import DEFAULT_CONFIG, dict_wrapper\nfrom dagster_celery.core_execution_loop import DELEGATE_MARKER\nfrom dagster_celery.defaults import broker_url, result_backend\nfrom dagster_k8s import DagsterK8sJobConfig, construct_dagster_k8s_job\nfrom dagster_k8s.client import (\n    DagsterK8sAPIRetryLimitExceeded,\n    DagsterK8sError,\n    DagsterK8sJobStatusException,\n    DagsterK8sTimeoutError,\n    DagsterK8sUnrecoverableAPIError,\n    DagsterKubernetesClient,\n)\nfrom dagster_k8s.job import (\n    UserDefinedDagsterK8sConfig,\n    get_k8s_job_name,\n    get_user_defined_k8s_config,\n)\n\nfrom .config import CELERY_K8S_CONFIG_KEY, celery_k8s_executor_config\nfrom .launcher import CeleryK8sRunLauncher\n\n\n
[docs]@executor(\n name=CELERY_K8S_CONFIG_KEY,\n config_schema=celery_k8s_executor_config(),\n requirements=multiple_process_executor_requirements(),\n)\ndef celery_k8s_job_executor(init_context):\n """Celery-based executor which launches tasks as Kubernetes Jobs.\n\n The Celery executor exposes config settings for the underlying Celery app under\n the ``config_source`` key. This config corresponds to the "new lowercase settings" introduced\n in Celery version 4.0 and the object constructed from config will be passed to the\n :py:class:`celery.Celery` constructor as its ``config_source`` argument.\n (See https://docs.celeryq.dev/en/stable/userguide/configuration.html for details.)\n\n The executor also exposes the ``broker``, `backend`, and ``include`` arguments to the\n :py:class:`celery.Celery` constructor.\n\n In the most common case, you may want to modify the ``broker`` and ``backend`` (e.g., to use\n Redis instead of RabbitMQ). We expect that ``config_source`` will be less frequently\n modified, but that when op executions are especially fast or slow, or when there are\n different requirements around idempotence or retry, it may make sense to execute dagster jobs\n with variations on these settings.\n\n To use the `celery_k8s_job_executor`, set it as the `executor_def` when defining a job:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-celery-k8s/dagster_celery_k8s_tests/example_celery_mode_def.py\n :language: python\n\n Then you can configure the executor as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n job_image: 'my_repo.com/image_name:latest'\n job_namespace: 'some-namespace'\n broker: 'pyamqp://guest@localhost//' # Optional[str]: The URL of the Celery broker\n backend: 'rpc://' # Optional[str]: The URL of the Celery results backend\n include: ['my_module'] # Optional[List[str]]: Modules every worker should import\n config_source: # Dict[str, Any]: Any additional parameters to pass to the\n #... # Celery workers. This dict will be passed as the `config_source`\n #... # argument of celery.Celery().\n\n Note that the YAML you provide here must align with the configuration with which the Celery\n workers on which you hope to run were started. If, for example, you point the executor at a\n different broker than the one your workers are listening to, the workers will never be able to\n pick up tasks for execution.\n\n In deployments where the celery_k8s_job_executor is used all appropriate celery and dagster_celery\n commands must be invoked with the `-A dagster_celery_k8s.app` argument.\n """\n run_launcher = init_context.instance.run_launcher\n exc_cfg = init_context.executor_config\n\n if not isinstance(run_launcher, CeleryK8sRunLauncher):\n raise DagsterUnmetExecutorRequirementsError(\n "This engine is only compatible with a CeleryK8sRunLauncher; configure the "\n "CeleryK8sRunLauncher on your instance to use it.",\n )\n\n job_config = run_launcher.get_k8s_job_config(\n job_image=exc_cfg.get("job_image") or os.getenv("DAGSTER_CURRENT_IMAGE"), exc_config=exc_cfg\n )\n\n # Set on the instance but overrideable here\n broker = run_launcher.broker or exc_cfg.get("broker")\n backend = run_launcher.backend or exc_cfg.get("backend")\n config_source = run_launcher.config_source or exc_cfg.get("config_source")\n include = run_launcher.include or exc_cfg.get("include")\n retries = run_launcher.retries or RetryMode.from_config(exc_cfg.get("retries"))\n\n return CeleryK8sJobExecutor(\n broker=broker,\n backend=backend,\n config_source=config_source,\n include=include,\n retries=retries,\n job_config=job_config,\n job_namespace=exc_cfg.get("job_namespace", run_launcher.job_namespace),\n load_incluster_config=exc_cfg.get("load_incluster_config"),\n kubeconfig_file=exc_cfg.get("kubeconfig_file"),\n repo_location_name=exc_cfg.get("repo_location_name"),\n job_wait_timeout=exc_cfg.get("job_wait_timeout"),\n )
\n\n\nclass CeleryK8sJobExecutor(Executor):\n def __init__(\n self,\n retries,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n job_config=None,\n job_namespace=None,\n load_incluster_config=False,\n kubeconfig_file=None,\n repo_location_name=None,\n job_wait_timeout=None,\n ):\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n\n self._retries = check.inst_param(retries, "retries", RetryMode)\n self.broker = check.opt_str_param(broker, "broker", default=broker_url)\n self.backend = check.opt_str_param(backend, "backend", default=result_backend)\n self.include = check.opt_list_param(include, "include", of_type=str)\n self.config_source = dict_wrapper(\n dict(DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source"))\n )\n self.job_config = check.inst_param(job_config, "job_config", DagsterK8sJobConfig)\n self.job_namespace = check.opt_str_param(job_namespace, "job_namespace")\n\n self.load_incluster_config = check.bool_param(\n load_incluster_config, "load_incluster_config"\n )\n\n self.kubeconfig_file = check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n self.repo_location_name = check.opt_str_param(repo_location_name, "repo_location_name")\n self.job_wait_timeout = check.float_param(job_wait_timeout, "job_wait_timeout")\n\n @property\n def retries(self):\n return self._retries\n\n def execute(self, plan_context, execution_plan):\n from dagster_celery.core_execution_loop import core_celery_execution_loop\n\n return core_celery_execution_loop(\n plan_context, execution_plan, step_execution_fn=_submit_task_k8s_job\n )\n\n def app_args(self):\n return {\n "broker": self.broker,\n "backend": self.backend,\n "include": self.include,\n "config_source": self.config_source,\n "retries": self.retries,\n }\n\n\ndef _submit_task_k8s_job(app, plan_context, step, queue, priority, known_state):\n user_defined_k8s_config = get_user_defined_k8s_config(step.tags)\n\n job_origin = plan_context.reconstructable_job.get_python_origin()\n\n execute_step_args = ExecuteStepArgs(\n job_origin=job_origin,\n run_id=plan_context.dagster_run.run_id,\n step_keys_to_execute=[step.key],\n instance_ref=plan_context.instance.get_ref(),\n retry_mode=plan_context.executor.retries.for_inner_plan(),\n known_state=known_state,\n should_verify_step=True,\n print_serialized_events=True,\n )\n\n job_config = plan_context.executor.job_config\n if not job_config.job_image:\n job_config = job_config.with_image(job_origin.repository_origin.container_image)\n\n if not job_config.job_image:\n raise Exception("No image included in either executor config or the dagster job")\n\n task = create_k8s_job_task(app)\n task_signature = task.si(\n execute_step_args_packed=pack_value(execute_step_args),\n job_config_dict=job_config.to_dict(),\n job_namespace=plan_context.executor.job_namespace,\n user_defined_k8s_config_dict=user_defined_k8s_config.to_dict(),\n load_incluster_config=plan_context.executor.load_incluster_config,\n job_wait_timeout=plan_context.executor.job_wait_timeout,\n kubeconfig_file=plan_context.executor.kubeconfig_file,\n )\n\n return task_signature.apply_async(\n priority=priority,\n queue=queue,\n routing_key=f"{queue}.execute_step_k8s_job",\n )\n\n\ndef construct_step_failure_event_and_handle(dagster_run, step_key, err, instance):\n step_failure_event = DagsterEvent(\n event_type_value=DagsterEventType.STEP_FAILURE.value,\n job_name=dagster_run.job_name,\n step_key=step_key,\n event_specific_data=StepFailureData(\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n user_failure_data=UserFailureData(label="K8sError"),\n ),\n )\n event_record = EventLogEntry(\n user_message=str(err),\n level=logging.ERROR,\n job_name=dagster_run.job_name,\n run_id=dagster_run.run_id,\n error_info=None,\n step_key=step_key,\n timestamp=time.time(),\n dagster_event=step_failure_event,\n )\n instance.handle_new_event(event_record)\n return step_failure_event\n\n\ndef create_k8s_job_task(celery_app, **task_kwargs):\n @celery_app.task(bind=True, name="execute_step_k8s_job", **task_kwargs)\n def _execute_step_k8s_job(\n self,\n execute_step_args_packed,\n job_config_dict,\n job_namespace,\n load_incluster_config,\n job_wait_timeout,\n user_defined_k8s_config_dict=None,\n kubeconfig_file=None,\n ):\n """Run step execution in a K8s job pod."""\n execute_step_args = unpack_value(\n check.dict_param(\n execute_step_args_packed,\n "execute_step_args_packed",\n )\n )\n check.inst_param(execute_step_args, "execute_step_args", ExecuteStepArgs)\n check.invariant(\n len(execute_step_args.step_keys_to_execute) == 1,\n "Celery K8s task executor can only execute 1 step at a time",\n )\n\n # Celery will serialize this as a list\n job_config = DagsterK8sJobConfig.from_dict(job_config_dict)\n check.inst_param(job_config, "job_config", DagsterK8sJobConfig)\n check.str_param(job_namespace, "job_namespace")\n\n check.bool_param(load_incluster_config, "load_incluster_config")\n\n user_defined_k8s_config = UserDefinedDagsterK8sConfig.from_dict(\n user_defined_k8s_config_dict\n )\n check.opt_inst_param(\n user_defined_k8s_config,\n "user_defined_k8s_config",\n UserDefinedDagsterK8sConfig,\n )\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n\n # For when launched via DinD or running the cluster\n if load_incluster_config:\n kubernetes.config.load_incluster_config()\n else:\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n api_client = DagsterKubernetesClient.production_client()\n instance = DagsterInstance.from_ref(execute_step_args.instance_ref)\n dagster_run = instance.get_run_by_id(execute_step_args.run_id)\n\n check.inst(\n dagster_run,\n DagsterRun,\n f"Could not load run {execute_step_args.run_id}",\n )\n step_key = execute_step_args.step_keys_to_execute[0]\n\n celery_worker_name = self.request.hostname\n celery_pod_name = os.environ.get("HOSTNAME")\n instance.report_engine_event(\n f"Task for step {step_key} picked up by Celery",\n dagster_run,\n EngineEventData(\n {\n "Celery worker name": celery_worker_name,\n "Celery worker Kubernetes Pod name": celery_pod_name,\n }\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n\n if dagster_run.status != DagsterRunStatus.STARTED:\n instance.report_engine_event(\n "Not scheduling step because dagster run status is not STARTED",\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n }\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n # Ensure we stay below k8s name length limits\n k8s_name_key = get_k8s_job_name(execute_step_args.run_id, step_key)\n\n retry_state = execute_step_args.known_state.get_retry_state()\n\n if retry_state.get_attempt_count(step_key):\n attempt_number = retry_state.get_attempt_count(step_key)\n job_name = "dagster-step-%s-%d" % (k8s_name_key, attempt_number)\n pod_name = "dagster-step-%s-%d" % (k8s_name_key, attempt_number)\n else:\n job_name = "dagster-step-%s" % (k8s_name_key)\n pod_name = "dagster-step-%s" % (k8s_name_key)\n\n args = execute_step_args.get_command_args()\n\n labels = {\n "dagster/job": dagster_run.job_name,\n "dagster/op": step_key,\n "dagster/run-id": execute_step_args.run_id,\n }\n if dagster_run.external_job_origin:\n labels["dagster/code-location"] = (\n dagster_run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n job = construct_dagster_k8s_job(\n job_config,\n args,\n job_name,\n user_defined_k8s_config,\n pod_name,\n component="step_worker",\n labels=labels,\n env_vars=[\n {\n "name": "DAGSTER_RUN_JOB_NAME",\n "value": dagster_run.job_name,\n },\n {"name": "DAGSTER_RUN_STEP_KEY", "value": step_key},\n ],\n )\n\n # Running list of events generated from this task execution\n events = []\n\n # Post event for starting execution\n job_name = job.metadata.name\n engine_event = instance.report_engine_event(\n f'Executing step "{step_key}" in Kubernetes job {job_name}.',\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n "Kubernetes Job name": job_name,\n "Job image": job_config.job_image,\n "Image pull policy": job_config.image_pull_policy,\n "Image pull secrets": str(job_config.image_pull_secrets),\n "Service account name": str(job_config.service_account_name),\n },\n marker_end=DELEGATE_MARKER,\n ),\n CeleryK8sJobExecutor,\n # validated above that step_keys is length 1, and it is not possible to use ETH or\n # execution plan in this function (Celery K8s workers should not access to user code)\n step_key=step_key,\n )\n events.append(engine_event)\n try:\n api_client.batch_api.create_namespaced_job(body=job, namespace=job_namespace)\n except kubernetes.client.rest.ApiException as e:\n if e.reason == "Conflict":\n # There is an existing job with the same name so proceed and see if the existing job succeeded\n instance.report_engine_event(\n "Did not create Kubernetes job {} for step {} since job name already "\n "exists, proceeding with existing job.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n "Kubernetes Job name": job_name,\n },\n marker_end=DELEGATE_MARKER,\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n else:\n instance.report_engine_event(\n "Encountered unexpected error while creating Kubernetes job {} for step {}, "\n "exiting.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n try:\n api_client.wait_for_job_success(\n job_name=job_name,\n namespace=job_namespace,\n instance=instance,\n run_id=execute_step_args.run_id,\n wait_timeout=job_wait_timeout,\n )\n except (DagsterK8sError, DagsterK8sTimeoutError) as err:\n step_failure_event = construct_step_failure_event_and_handle(\n dagster_run, step_key, err, instance=instance\n )\n events.append(step_failure_event)\n except DagsterK8sJobStatusException:\n instance.report_engine_event(\n "Terminating Kubernetes Job because dagster run status is not STARTED",\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n "Kubernetes Job name": job_name,\n "Kubernetes Job namespace": job_namespace,\n }\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n api_client.delete_job(job_name=job_name, namespace=job_namespace)\n return []\n except (\n DagsterK8sUnrecoverableAPIError,\n DagsterK8sAPIRetryLimitExceeded,\n # We shouldn't see unwrapped APIExceptions anymore, as they should all be wrapped in\n # a retry boundary. We still catch it here just in case we missed one so that we can\n # report it to the event log\n kubernetes.client.rest.ApiException,\n ):\n instance.report_engine_event(\n "Encountered unexpected error while waiting on Kubernetes job {} for step {}, "\n "exiting.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n try:\n pod_names = api_client.get_pod_names_in_job(job_name, namespace=job_namespace)\n except kubernetes.client.rest.ApiException:\n instance.report_engine_event(\n "Encountered unexpected error retreiving Pods for Kubernetes job {} for step {}, "\n "exiting.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n # Post engine event for log retrieval\n engine_event = instance.report_engine_event(\n "Retrieving logs from Kubernetes Job pods",\n dagster_run,\n EngineEventData({"Pod names": "\\n".join(pod_names)}),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n events.append(engine_event)\n\n logs = []\n for pod_name in pod_names:\n try:\n raw_logs = api_client.retrieve_pod_logs(pod_name, namespace=job_namespace)\n logs += raw_logs.split("\\n")\n except kubernetes.client.exceptions.ApiException:\n instance.report_engine_event(\n "Encountered unexpected error while fetching pod logs for Kubernetes job {}, "\n "Pod name {} for step {}. Will attempt to continue with other pods.".format(\n job_name, pod_name, step_key\n ),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n\n events += filter_dagster_events_from_cli_logs(logs)\n serialized_events = [serialize_value(event) for event in events]\n return serialized_events\n\n return _execute_step_k8s_job\n
", "current_page_name": "_modules/dagster_celery_k8s/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery_k8s.executor"}, "launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery_k8s.launcher

\nimport sys\nfrom typing import Optional, cast\n\nimport kubernetes\nfrom dagster import (\n    DagsterInvariantViolationError,\n    _check as check,\n)\nfrom dagster._config import process_config, resolve_to_config_type\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.launcher import LaunchRunContext, RunLauncher\nfrom dagster._core.launcher.base import CheckRunHealthResult, WorkerStatus\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import DOCKER_IMAGE_TAG\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster._utils.merger import merge_dicts\nfrom dagster_k8s.client import DagsterKubernetesClient\nfrom dagster_k8s.job import (\n    DagsterK8sJobConfig,\n    construct_dagster_k8s_job,\n    get_job_name_from_run_id,\n    get_user_defined_k8s_config,\n)\n\nfrom .config import CELERY_K8S_CONFIG_KEY, celery_k8s_executor_config\n\n\n
[docs]class CeleryK8sRunLauncher(RunLauncher, ConfigurableClass):\n """In contrast to the :py:class:`K8sRunLauncher`, which launches dagster runs as single K8s\n Jobs, this run launcher is intended for use in concert with\n :py:func:`dagster_celery_k8s.celery_k8s_job_executor`.\n\n With this run launcher, execution is delegated to:\n\n 1. A run worker Kubernetes Job, which traverses the dagster run execution plan and\n submits steps to Celery queues for execution;\n 2. The step executions which are submitted to Celery queues are picked up by Celery workers,\n and each step execution spawns a step execution Kubernetes Job. See the implementation\n defined in :py:func:`dagster_celery_k8.executor.create_k8s_job_task`.\n\n You can configure a Dagster instance to use this RunLauncher by adding a section to your\n ``dagster.yaml`` like the following:\n\n .. code-block:: yaml\n\n run_launcher:\n module: dagster_k8s.launcher\n class: CeleryK8sRunLauncher\n config:\n instance_config_map: "dagster-k8s-instance-config-map"\n dagster_home: "/some/path"\n postgres_password_secret: "dagster-k8s-pg-password"\n broker: "some_celery_broker_url"\n backend: "some_celery_backend_url"\n\n """\n\n def __init__(\n self,\n instance_config_map,\n dagster_home,\n postgres_password_secret,\n load_incluster_config=True,\n kubeconfig_file=None,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n retries=None,\n inst_data: Optional[ConfigurableClassData] = None,\n k8s_client_batch_api=None,\n env_config_maps=None,\n env_secrets=None,\n volume_mounts=None,\n volumes=None,\n service_account_name=None,\n image_pull_policy=None,\n image_pull_secrets=None,\n labels=None,\n fail_pod_on_run_failure=None,\n job_namespace=None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n kubernetes.config.load_incluster_config()\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n self._api_client = DagsterKubernetesClient.production_client(\n batch_api_override=k8s_client_batch_api\n )\n\n self.instance_config_map = check.str_param(instance_config_map, "instance_config_map")\n self.dagster_home = check.str_param(dagster_home, "dagster_home")\n self.postgres_password_secret = check.str_param(\n postgres_password_secret, "postgres_password_secret"\n )\n self.broker = check.opt_str_param(broker, "broker")\n self.backend = check.opt_str_param(backend, "backend")\n self.include = check.opt_list_param(include, "include")\n self.config_source = check.opt_dict_param(config_source, "config_source")\n\n retries = check.opt_dict_param(retries, "retries") or {"enabled": {}}\n self.retries = RetryMode.from_config(retries)\n\n self._env_config_maps = check.opt_list_param(\n env_config_maps, "env_config_maps", of_type=str\n )\n self._env_secrets = check.opt_list_param(env_secrets, "env_secrets", of_type=str)\n\n self._volume_mounts = check.opt_list_param(volume_mounts, "volume_mounts")\n self._volumes = check.opt_list_param(volumes, "volumes")\n\n self._service_account_name = check.opt_str_param(\n service_account_name, "service_account_name"\n )\n self._image_pull_policy = check.opt_str_param(\n image_pull_policy, "image_pull_policy", "IfNotPresent"\n )\n self._image_pull_secrets = check.opt_list_param(\n image_pull_secrets, "image_pull_secrets", of_type=dict\n )\n self._labels = check.opt_dict_param(labels, "labels", key_type=str, value_type=str)\n self._fail_pod_on_run_failure = check.opt_bool_param(\n fail_pod_on_run_failure, "fail_pod_on_run_failure"\n )\n self.job_namespace = check.opt_str_param(job_namespace, "job_namespace", default="default")\n\n super().__init__()\n\n @classmethod\n def config_type(cls):\n from dagster_celery.executor import CELERY_CONFIG\n\n return merge_dicts(DagsterK8sJobConfig.config_type_run_launcher(), CELERY_CONFIG)\n\n @classmethod\n def from_config_value(cls, inst_data, config_value):\n return cls(inst_data=inst_data, **config_value)\n\n @property\n def inst_data(self):\n return self._inst_data\n\n def launch_run(self, context: LaunchRunContext) -> None:\n run = context.dagster_run\n\n job_name = get_job_name_from_run_id(run.run_id)\n pod_name = job_name\n exc_config = _get_validated_celery_k8s_executor_config(run.run_config)\n\n job_image_from_executor_config = exc_config.get("job_image")\n\n job_origin = cast(JobPythonOrigin, context.job_code_origin)\n repository_origin = job_origin.repository_origin\n\n job_image = repository_origin.container_image\n\n if job_image:\n if job_image_from_executor_config:\n job_image = job_image_from_executor_config\n self._instance.report_engine_event(\n f"You have specified a job_image {job_image_from_executor_config} in your"\n f" executor configuration, but also {job_image} in your user-code"\n f" deployment. Using the job image {job_image_from_executor_config} from"\n " executor configuration as it takes precedence.",\n run,\n cls=self.__class__,\n )\n else:\n if not job_image_from_executor_config:\n raise DagsterInvariantViolationError(\n "You have not specified a job_image in your executor configuration. To resolve"\n " this error, specify the job_image configuration in the executor config"\n " section in your run config. \\nNote: You may also be seeing this error because"\n " you are using the configured API. Using configured with the celery-k8s"\n " executor is not supported at this time, and the job_image must be configured"\n " at the top-level executor config without using configured."\n )\n\n job_image = job_image_from_executor_config\n\n job_config = self.get_k8s_job_config(job_image, exc_config)\n\n self._instance.add_run_tags(\n run.run_id,\n {DOCKER_IMAGE_TAG: job_config.job_image},\n )\n\n user_defined_k8s_config = get_user_defined_k8s_config(run.tags)\n\n from dagster._cli.api import ExecuteRunArgs\n\n run_args = ExecuteRunArgs(\n job_origin=job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n set_exit_code_on_failure=self._fail_pod_on_run_failure,\n ).get_command_args()\n\n labels = {\n "dagster/job": job_origin.job_name,\n "dagster/run-id": run.run_id,\n }\n if run.external_job_origin:\n labels["dagster/code-location"] = (\n run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n job = construct_dagster_k8s_job(\n job_config,\n args=run_args,\n job_name=job_name,\n pod_name=pod_name,\n component="run_worker",\n user_defined_k8s_config=user_defined_k8s_config,\n labels=labels,\n env_vars=[{"name": "DAGSTER_RUN_JOB_NAME", "value": job_origin.job_name}],\n )\n\n job_namespace = exc_config.get("job_namespace", self.job_namespace)\n\n self._instance.report_engine_event(\n "Creating Kubernetes run worker job",\n run,\n EngineEventData(\n {\n "Kubernetes Job name": job_name,\n "Kubernetes Namespace": job_namespace,\n "Run ID": run.run_id,\n }\n ),\n cls=self.__class__,\n )\n\n self._api_client.batch_api.create_namespaced_job(body=job, namespace=job_namespace)\n self._instance.report_engine_event(\n "Kubernetes run worker job created",\n run,\n EngineEventData(\n {\n "Kubernetes Job name": job_name,\n "Kubernetes Namespace": job_namespace,\n "Run ID": run.run_id,\n }\n ),\n cls=self.__class__,\n )\n\n def get_k8s_job_config(self, job_image, exc_config):\n return DagsterK8sJobConfig(\n dagster_home=self.dagster_home,\n instance_config_map=self.instance_config_map,\n postgres_password_secret=self.postgres_password_secret,\n job_image=check.opt_str_param(job_image, "job_image"),\n image_pull_policy=exc_config.get("image_pull_policy", self._image_pull_policy),\n image_pull_secrets=exc_config.get("image_pull_secrets", []) + self._image_pull_secrets,\n service_account_name=exc_config.get("service_account_name", self._service_account_name),\n env_config_maps=exc_config.get("env_config_maps", []) + self._env_config_maps,\n env_secrets=exc_config.get("env_secrets", []) + self._env_secrets,\n volume_mounts=exc_config.get("volume_mounts", []) + self._volume_mounts,\n volumes=exc_config.get("volumes", []) + self._volumes,\n labels=merge_dicts(self._labels, exc_config.get("labels", {})),\n )\n\n def terminate(self, run_id):\n check.str_param(run_id, "run_id")\n\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n job_name = get_job_name_from_run_id(run_id)\n\n job_namespace = self.get_namespace_from_run_config(run_id)\n\n try:\n termination_result = self._api_client.delete_job(\n job_name=job_name, namespace=job_namespace\n )\n if termination_result:\n self._instance.report_engine_event(\n message="Dagster Job was terminated successfully.",\n dagster_run=run,\n cls=self.__class__,\n )\n else:\n self._instance.report_engine_event(\n message=(\n "Dagster Job was not terminated successfully; delete_job returned {}"\n .format(termination_result)\n ),\n dagster_run=run,\n cls=self.__class__,\n )\n return termination_result\n except Exception:\n self._instance.report_engine_event(\n message=(\n "Dagster Job was not terminated successfully; encountered error in delete_job"\n ),\n dagster_run=run,\n engine_event_data=EngineEventData.engine_error(\n serializable_error_info_from_exc_info(sys.exc_info())\n ),\n cls=self.__class__,\n )\n\n def get_namespace_from_run_config(self, run_id):\n check.str_param(run_id, "run_id")\n\n dagster_run = self._instance.get_run_by_id(run_id)\n run_config = dagster_run.run_config\n executor_config = _get_validated_celery_k8s_executor_config(run_config)\n return executor_config.get("job_namespace", self.job_namespace)\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n def check_run_worker_health(self, run: DagsterRun):\n job_namespace = _get_validated_celery_k8s_executor_config(run.run_config).get(\n "job_namespace", self.job_namespace\n )\n job_name = get_job_name_from_run_id(run.run_id)\n try:\n status = self._api_client.get_job_status(namespace=job_namespace, job_name=job_name)\n except Exception:\n return CheckRunHealthResult(\n WorkerStatus.UNKNOWN, str(serializable_error_info_from_exc_info(sys.exc_info()))\n )\n if status.failed:\n return CheckRunHealthResult(WorkerStatus.FAILED, "K8s job failed")\n return CheckRunHealthResult(WorkerStatus.RUNNING)
\n\n\ndef _get_validated_celery_k8s_executor_config(run_config):\n check.dict_param(run_config, "run_config")\n\n executor_config = run_config.get("execution", {})\n execution_config_schema = resolve_to_config_type(celery_k8s_executor_config())\n\n # In run config on jobs, we don't have an executor key\n if CELERY_K8S_CONFIG_KEY not in executor_config:\n execution_run_config = executor_config.get("config", {})\n else:\n execution_run_config = (run_config["execution"][CELERY_K8S_CONFIG_KEY] or {}).get(\n "config", {}\n )\n\n res = process_config(execution_config_schema, execution_run_config)\n\n check.invariant(\n res.success,\n "Incorrect execution schema provided. Note: You may also be seeing this error "\n "because you are using the configured API. "\n "Using configured with the {config_key} executor is not supported at this time, "\n "and all executor config must be directly in the run config without using configured."\n .format(\n config_key=CELERY_K8S_CONFIG_KEY,\n ),\n )\n\n return res.value\n
", "current_page_name": "_modules/dagster_celery_k8s/launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery_k8s.launcher"}}, "dagster_census": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_census.ops

\nfrom dagster import Array, Bool, Field, In, Noneable, Nothing, Out, Output, op\n\nfrom .resources import DEFAULT_POLL_INTERVAL\nfrom .types import CensusOutput\nfrom .utils import generate_materialization\n\n\n
[docs]@op(\n required_resource_keys={"census"},\n ins={"start_after": In(Nothing)},\n out=Out(\n CensusOutput,\n description=(\n "Parsed json dictionary representing the details of the Census sync after "\n "the sync successfully completes."\n ),\n ),\n config_schema={\n "sync_id": Field(\n int,\n is_required=True,\n description="Id of the parent sync.",\n ),\n "force_full_sync": Field(\n config=Bool,\n default_value=False,\n description=(\n "If this trigger request should be a Full Sync. "\n "Note that some sync configurations such as Append do not support full syncs."\n ),\n ),\n "poll_interval": Field(\n float,\n default_value=DEFAULT_POLL_INTERVAL,\n description="The time (in seconds) to wait between successive polls.",\n ),\n "poll_timeout": Field(\n Noneable(float),\n default_value=None,\n description=(\n "The maximum time to wait before this operation is timed out. By "\n "default, this will never time out."\n ),\n ),\n "yield_materializations": Field(\n config=Bool,\n default_value=True,\n description=(\n "If True, materializations corresponding to the results of the Census sync will "\n "be yielded when the op executes."\n ),\n ),\n "asset_key_prefix": Field(\n config=Array(str),\n default_value=["census"],\n description=(\n "If provided and yield_materializations is True, these components will be used to "\n "prefix the generated asset keys."\n ),\n ),\n },\n tags={"kind": "census"},\n)\ndef census_trigger_sync_op(context):\n """Executes a Census sync for a given ``sync_id`` and polls until that sync completes, raising\n an error if it is unsuccessful.\n\n It outputs a :py:class:`~dagster_census.CensusOutput` which contains the details of the Census\n sync after it successfully completes.\n\n It requires the use of the :py:class:`~dagster_census.census_resource`, which allows it to\n communicate with the Census API.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_census import census_resource, census_sync_op\n\n my_census_resource = census_resource.configured(\n {\n "api_key": {"env": "CENSUS_API_KEY"},\n }\n )\n\n sync_foobar = census_sync_op.configured({"sync_id": "foobar"}, name="sync_foobar")\n\n @job(resource_defs={"census": my_census_resource})\n def my_simple_census_job():\n sync_foobar()\n\n """\n census_output = context.resources.census.trigger_sync_and_poll(\n sync_id=context.op_config["sync_id"],\n force_full_sync=context.op_config["force_full_sync"],\n poll_interval=context.op_config["poll_interval"],\n poll_timeout=context.op_config["poll_timeout"],\n )\n if context.op_config["yield_materializations"]:\n yield generate_materialization(\n census_output, asset_key_prefix=context.op_config["asset_key_prefix"]\n )\n yield Output(census_output)
\n
", "current_page_name": "_modules/dagster_census/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_census.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_census.resources

\nimport datetime\nimport json\nimport logging\nimport time\nfrom typing import Any, Mapping, Optional\n\nimport requests\nfrom dagster import Failure, Field, StringSource, __version__, get_dagster_logger, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom requests.auth import HTTPBasicAuth\nfrom requests.exceptions import RequestException\n\nfrom .types import CensusOutput\n\nCENSUS_API_BASE = "app.getcensus.com/api"\nCENSUS_VERSION = "v1"\n\nDEFAULT_POLL_INTERVAL = 10\n\nSYNC_RUN_STATUSES = {"completed", "failed", "queued", "skipped", "working"}\n\n\n
[docs]class CensusResource:\n """This class exposes methods on top of the Census REST API."""\n\n def __init__(\n self,\n api_key: str,\n request_max_retries: int = 3,\n request_retry_delay: float = 0.25,\n log: logging.Logger = get_dagster_logger(),\n ):\n self.api_key = api_key\n\n self._request_max_retries = request_max_retries\n self._request_retry_delay = request_retry_delay\n\n self._log = log\n\n @property\n def _api_key(self):\n if self.api_key.startswith("secret-token:"):\n return self.api_key\n return "secret-token:" + self.api_key\n\n @property\n def api_base_url(self) -> str:\n return f"https://{CENSUS_API_BASE}/{CENSUS_VERSION}"\n\n def make_request(\n self, method: str, endpoint: str, data: Optional[str] = None\n ) -> Mapping[str, Any]:\n """Creates and sends a request to the desired Census API endpoint.\n\n Args:\n method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").\n endpoint (str): The Census API endpoint to send this request to.\n data (Optional[str]): JSON-formatted data string to be included in the request.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n url = f"{self.api_base_url}/{endpoint}"\n headers = {\n "User-Agent": f"dagster-census/{__version__}",\n "Content-Type": "application/json;version=2",\n }\n\n num_retries = 0\n while True:\n try:\n response = requests.request(\n method=method,\n url=url,\n headers=headers,\n auth=HTTPBasicAuth("bearer", self._api_key),\n data=data,\n )\n response.raise_for_status()\n return response.json()\n except RequestException as e:\n self._log.error("Request to Census API failed: %s", e)\n if num_retries == self._request_max_retries:\n break\n num_retries += 1\n time.sleep(self._request_retry_delay)\n\n raise Failure(f"Max retries ({self._request_max_retries}) exceeded with url: {url}.")\n\n def get_sync(self, sync_id: int) -> Mapping[str, Any]:\n """Gets details about a given sync from the Census API.\n\n Args:\n sync_id (int): The Census Sync ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"syncs/{sync_id}")\n\n def get_source(self, source_id: int) -> Mapping[str, Any]:\n """Gets details about a given source from the Census API.\n\n Args:\n source_id (int): The Census Source ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"sources/{source_id}")\n\n def get_destination(self, destination_id: int) -> Mapping[str, Any]:\n """Gets details about a given destination from the Census API.\n\n Args:\n destination_id (int): The Census Destination ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"destinations/{destination_id}")\n\n def get_sync_run(self, sync_run_id: int) -> Mapping[str, Any]:\n """Gets details about a specific sync run from the Census API.\n\n Args:\n sync_run_id (int): The Census Sync Run ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"sync_runs/{sync_run_id}")\n\n def poll_sync_run(\n self,\n sync_run_id: int,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> Mapping[str, Any]:\n """Given a Census sync run, poll until the run is complete.\n\n Args:\n sync_id (int): The Census Sync Run ID.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n log_url = f"https://app.getcensus.com/syncs_runs/{sync_run_id}"\n poll_start = datetime.datetime.now()\n\n while True:\n time.sleep(poll_interval)\n response_dict = self.get_sync_run(sync_run_id)\n if "data" not in response_dict.keys():\n raise ValueError(\n f"Getting status of sync failed, please visit Census Logs at {log_url} to see"\n " more."\n )\n\n sync_status = response_dict["data"]["status"]\n sync_id = response_dict["data"]["sync_id"]\n\n if sync_status not in SYNC_RUN_STATUSES:\n raise ValueError(\n f"Unexpected response status '{sync_status}'; "\n f"must be one of {','.join(sorted(SYNC_RUN_STATUSES))}. "\n "See Management API docs for more information: "\n "https://docs.getcensus.com/basics/developers/api/sync-runs"\n )\n\n if sync_status in {"queued", "working"}:\n self._log.debug(\n f"Sync {sync_id} still running after {datetime.datetime.now() - poll_start}."\n )\n continue\n\n if poll_timeout and datetime.datetime.now() > poll_start + datetime.timedelta(\n seconds=poll_timeout\n ):\n raise Failure(\n f"Sync for sync '{sync_id}' timed out after"\n f" {datetime.datetime.now() - poll_start}."\n )\n\n break\n\n self._log.debug(\n f"Sync {sync_id} has finished running after {datetime.datetime.now() - poll_start}."\n )\n self._log.info(f"View sync details here: {log_url}.")\n\n return response_dict\n\n def trigger_sync(self, sync_id: int, force_full_sync: bool = False) -> Mapping[str, Any]:\n """Trigger an asynchronous run for a specific sync.\n\n Args:\n sync_id (int): The Census Sync Run ID.\n force_full_sync (bool): If the Sync should perform a full sync\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n data = {"force_full_sync": force_full_sync}\n return self.make_request(\n method="POST", endpoint=f"syncs/{sync_id}/trigger", data=json.dumps(data)\n )\n\n def trigger_sync_and_poll(\n self,\n sync_id: int,\n force_full_sync: bool = False,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> CensusOutput:\n """Trigger a run for a specific sync and poll until it has completed.\n\n Args:\n sync_id (int): The Census Sync Run ID.\n force_full_sync (bool): If the Sync should perform a full sync\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~CensusOutput`:\n Object containing details about the sync run and the sync details\n """\n sync_details = self.get_sync(sync_id=sync_id)\n source_details = self.get_source(\n source_id=sync_details["data"]["source_attributes"]["connection_id"]\n )["data"]\n destination_details = self.get_destination(\n destination_id=sync_details["data"]["destination_attributes"]["connection_id"]\n )["data"]\n\n trigger_sync_resp = self.trigger_sync(sync_id=sync_id, force_full_sync=force_full_sync)\n sync_run_details = self.poll_sync_run(\n sync_run_id=trigger_sync_resp["data"]["sync_run_id"],\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )["data"]\n return CensusOutput(\n sync_run=sync_run_details,\n source=source_details,\n destination=destination_details,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema={\n "api_key": Field(\n StringSource,\n is_required=True,\n description="Census API Key.",\n ),\n "request_max_retries": Field(\n int,\n default_value=3,\n description=(\n "The maximum number of times requests to the Census API should be retried "\n "before failing."\n ),\n ),\n "request_retry_delay": Field(\n float,\n default_value=0.25,\n description="Time (in seconds) to wait between each request retry.",\n ),\n },\n description="This resource helps manage Census connectors",\n)\ndef census_resource(context) -> CensusResource:\n """This resource allows users to programatically interface with the Census REST API to launch\n syncs and monitor their progress. This currently implements only a subset of the functionality\n exposed by the API.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_census import census_resource\n\n my_census_resource = census_resource.configured(\n {\n "api_key": {"env": "CENSUS_API_KEY"},\n }\n )\n\n @job(resource_defs={"census":my_census_resource})\n def my_census_job():\n ...\n\n """\n return CensusResource(\n api_key=context.resource_config["api_key"],\n request_max_retries=context.resource_config["request_max_retries"],\n request_retry_delay=context.resource_config["request_retry_delay"],\n log=context.log,\n )
\n
", "current_page_name": "_modules/dagster_census/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_census.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_census.types

\nfrom typing import Any, Mapping, NamedTuple\n\n\n
[docs]class CensusOutput(\n NamedTuple(\n "_CensusOutput",\n [\n ("sync_run", Mapping[str, Any]),\n ("source", Mapping[str, Any]),\n ("destination", Mapping[str, Any]),\n ],\n )\n):\n """Contains recorded information about the state of a Census sync after a sync completes.\n\n Attributes:\n sync_run (Dict[str, Any]):\n The details of the specific sync run.\n source (Dict[str, Any]):\n Information about the source for the Census sync.\n destination (Dict[str, Any]):\n Information about the destination for the Census sync.\n """
\n
", "current_page_name": "_modules/dagster_census/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_census.types"}}, "dagster_dask": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dask.executor

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dask\nimport dask.distributed\nfrom dagster import (\n    Executor,\n    Field,\n    Permissive,\n    Selector,\n    StringSource,\n    _check as check,\n    _seven,\n    multiple_process_executor_requirements,\n)\nfrom dagster._core.definitions.executor_definition import executor\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.errors import raise_execution_interrupts\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.api import create_execution_plan, execute_plan\nfrom dagster._core.execution.context.system import PlanOrchestrationContext\nfrom dagster._core.execution.plan.plan import ExecutionPlan\nfrom dagster._core.execution.plan.state import KnownExecutionState\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._utils import iterate_with_context\n\n# Dask resource requirements are specified under this key\nDASK_RESOURCE_REQUIREMENTS_KEY = "dagster-dask/resource_requirements"\n\n\n
[docs]@executor(\n name="dask",\n requirements=multiple_process_executor_requirements(),\n config_schema={\n "cluster": Field(\n Selector(\n {\n "existing": Field(\n {"address": StringSource},\n description="Connect to an existing scheduler.",\n ),\n "local": Field(\n Permissive(), is_required=False, description="Local cluster configuration."\n ),\n "yarn": Field(\n Permissive(), is_required=False, description="YARN cluster configuration."\n ),\n "ssh": Field(\n Permissive(), is_required=False, description="SSH cluster configuration."\n ),\n "pbs": Field(\n Permissive(), is_required=False, description="PBS cluster configuration."\n ),\n "moab": Field(\n Permissive(), is_required=False, description="Moab cluster configuration."\n ),\n "sge": Field(\n Permissive(), is_required=False, description="SGE cluster configuration."\n ),\n "lsf": Field(\n Permissive(), is_required=False, description="LSF cluster configuration."\n ),\n "slurm": Field(\n Permissive(), is_required=False, description="SLURM cluster configuration."\n ),\n "oar": Field(\n Permissive(), is_required=False, description="OAR cluster configuration."\n ),\n "kube": Field(\n Permissive(),\n is_required=False,\n description="Kubernetes cluster configuration.",\n ),\n }\n )\n )\n },\n)\ndef dask_executor(init_context):\n """Dask-based executor.\n\n The 'cluster' can be one of the following:\n ('existing', 'local', 'yarn', 'ssh', 'pbs', 'moab', 'sge', 'lsf', 'slurm', 'oar', 'kube').\n\n If the Dask executor is used without providing executor-specific config, a local Dask cluster\n will be created (as when calling :py:class:`dask.distributed.Client() <dask:distributed.Client>`\n with :py:class:`dask.distributed.LocalCluster() <dask:distributed.LocalCluster>`).\n\n The Dask executor optionally takes the following config:\n\n .. code-block:: none\n\n cluster:\n {\n local?: # takes distributed.LocalCluster parameters\n {\n timeout?: 5, # Timeout duration for initial connection to the scheduler\n n_workers?: 4 # Number of workers to start\n threads_per_worker?: 1 # Number of threads per each worker\n }\n }\n\n To use the `dask_executor`, set it as the `executor_def` when defining a job:\n\n .. code-block:: python\n\n from dagster import job\n from dagster_dask import dask_executor\n\n @job(executor_def=dask_executor)\n def dask_enabled_job():\n pass\n\n """\n ((cluster_type, cluster_configuration),) = init_context.executor_config["cluster"].items()\n return DaskExecutor(cluster_type, cluster_configuration)
\n\n\ndef query_on_dask_worker(\n dependencies: Any,\n recon_job: ReconstructableJob,\n dagster_run: DagsterRun,\n run_config: Optional[Mapping[str, object]],\n step_keys: Optional[Sequence[str]],\n instance_ref: InstanceRef,\n known_state: Optional[KnownExecutionState],\n) -> Sequence[DagsterEvent]:\n """Note that we need to pass "dependencies" to ensure Dask sequences futures during task\n scheduling, even though we do not use this argument within the function.\n """\n with DagsterInstance.from_ref(instance_ref) as instance:\n subset_job = recon_job.get_subset(op_selection=dagster_run.resolved_op_selection)\n\n execution_plan = create_execution_plan(\n subset_job,\n run_config=run_config,\n step_keys_to_execute=step_keys,\n known_state=known_state,\n )\n\n return execute_plan(\n execution_plan, subset_job, instance, dagster_run, run_config=run_config\n )\n\n\ndef get_dask_resource_requirements(tags: Mapping[str, str]):\n check.mapping_param(tags, "tags", key_type=str, value_type=str)\n req_str = tags.get(DASK_RESOURCE_REQUIREMENTS_KEY)\n if req_str is not None:\n return _seven.json.loads(req_str)\n\n return {}\n\n\nclass DaskExecutor(Executor):\n def __init__(self, cluster_type, cluster_configuration):\n self.cluster_type = check.opt_str_param(cluster_type, "cluster_type", default="local")\n self.cluster_configuration = check.opt_dict_param(\n cluster_configuration, "cluster_configuration"\n )\n\n @property\n def retries(self):\n return RetryMode.DISABLED\n\n def execute(self, plan_context: PlanOrchestrationContext, execution_plan: ExecutionPlan):\n check.inst_param(plan_context, "plan_context", PlanOrchestrationContext)\n check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n check.param_invariant(\n isinstance(plan_context.executor, DaskExecutor),\n "plan_context",\n f"Expected executor to be DaskExecutor got {plan_context.executor}",\n )\n\n check.invariant(\n plan_context.instance.is_persistent,\n "Dask execution requires a persistent DagsterInstance",\n )\n\n step_levels = execution_plan.get_steps_to_execute_by_level()\n\n job_name = plan_context.job_name\n\n instance = plan_context.instance\n\n cluster_type = self.cluster_type\n if cluster_type == "existing":\n # address passed directly to Client() below to connect to existing Scheduler\n cluster = self.cluster_configuration["address"]\n elif cluster_type == "local":\n from dask.distributed import LocalCluster\n\n cluster = LocalCluster(**self.build_dict(job_name))\n elif cluster_type == "yarn":\n from dask_yarn import YarnCluster\n\n cluster = YarnCluster(**self.build_dict(job_name))\n elif cluster_type == "ssh":\n from dask.distributed import SSHCluster\n\n cluster = SSHCluster(**self.build_dict(job_name))\n elif cluster_type == "pbs":\n from dask_jobqueue import PBSCluster\n\n cluster = PBSCluster(**self.build_dict(job_name))\n elif cluster_type == "moab":\n from dask_jobqueue import MoabCluster\n\n cluster = MoabCluster(**self.build_dict(job_name))\n elif cluster_type == "sge":\n from dask_jobqueue import SGECluster\n\n cluster = SGECluster(**self.build_dict(job_name))\n elif cluster_type == "lsf":\n from dask_jobqueue import LSFCluster\n\n cluster = LSFCluster(**self.build_dict(job_name))\n elif cluster_type == "slurm":\n from dask_jobqueue import SLURMCluster\n\n cluster = SLURMCluster(**self.build_dict(job_name))\n elif cluster_type == "oar":\n from dask_jobqueue import OARCluster\n\n cluster = OARCluster(**self.build_dict(job_name))\n elif cluster_type == "kube":\n from dask_kubernetes import KubeCluster\n\n cluster = KubeCluster(**self.build_dict(job_name))\n else:\n raise ValueError(\n "Must be providing one of the following ('existing', 'local', 'yarn', 'ssh',"\n f" 'pbs', 'moab', 'sge', 'lsf', 'slurm', 'oar', 'kube') not {cluster_type}"\n )\n\n with dask.distributed.Client(cluster) as client:\n execution_futures = []\n execution_futures_dict = {}\n\n for step_level in step_levels:\n for step in step_level:\n # We ensure correctness in sequencing by letting Dask schedule futures and\n # awaiting dependencies within each step.\n dependencies = []\n for step_input in step.step_inputs:\n for key in step_input.dependency_keys:\n dependencies.append(execution_futures_dict[key])\n\n run_config = plan_context.run_config\n\n dask_task_name = "%s.%s" % (job_name, step.key)\n\n recon_job = plan_context.reconstructable_job\n\n future = client.submit(\n query_on_dask_worker,\n dependencies,\n recon_job,\n plan_context.dagster_run,\n run_config,\n [step.key],\n instance.get_ref(),\n execution_plan.known_state,\n key=dask_task_name,\n resources=get_dask_resource_requirements(step.tags),\n )\n\n execution_futures.append(future)\n execution_futures_dict[step.key] = future\n\n # This tells Dask to awaits the step executions and retrieve their results to the\n # master\n futures = dask.distributed.as_completed(execution_futures, with_results=True)\n\n # Allow interrupts while waiting for the results from Dask\n for future, result in iterate_with_context(raise_execution_interrupts, futures):\n for step_event in result:\n check.inst(step_event, DagsterEvent)\n yield step_event\n\n def build_dict(self, job_name):\n """Returns a dict we can use for kwargs passed to dask client instantiation.\n\n Intended to be used like:\n\n with dask.distributed.Client(**cfg.build_dict()) as client:\n << use client here >>\n\n """\n if self.cluster_type in ["yarn", "pbs", "moab", "sge", "lsf", "slurm", "oar", "kube"]:\n dask_cfg = {"name": job_name}\n else:\n dask_cfg = {}\n\n if self.cluster_configuration:\n for k, v in self.cluster_configuration.items():\n dask_cfg[k] = v\n\n # if address is set, don't add LocalCluster args\n # context: https://github.com/dask/distributed/issues/3313\n if (self.cluster_type == "local") and ("address" not in dask_cfg):\n # We set threads_per_worker because Dagster is not thread-safe. Even though\n # environments=True by default, there is a clever piece of machinery\n # (dask.distributed.deploy.local.nprocesses_nthreads) that automagically makes execution\n # multithreaded by default when the number of available cores is greater than 4.\n # See: https://github.com/dagster-io/dagster/issues/2181\n # We may want to try to figure out a way to enforce this on remote Dask clusters against\n # which users run Dagster workloads.\n dask_cfg["threads_per_worker"] = 1\n\n return dask_cfg\n
", "current_page_name": "_modules/dagster_dask/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dask.executor"}}, "dagster_databricks": {"databricks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.databricks

\nimport base64\nimport logging\nimport time\nfrom typing import IO, Any, Mapping, Optional, Tuple, Union, cast\n\nimport dagster\nimport dagster._check as check\nimport dagster_pyspark\nimport databricks_api\nimport databricks_cli.sdk\nimport requests.exceptions\nfrom dagster._annotations import deprecated, public\nfrom databricks.sdk import WorkspaceClient\nfrom databricks.sdk.service import compute, jobs\nfrom typing_extensions import Final\n\nimport dagster_databricks\n\nfrom .types import (\n    DatabricksRunState,\n)\nfrom .version import __version__\n\n# wait at most 24 hours by default for run execution\nDEFAULT_RUN_MAX_WAIT_TIME_SEC: Final = 24 * 60 * 60\n\n\n
[docs]class DatabricksError(Exception):\n pass
\n\n\n
[docs]class DatabricksClient:\n """A thin wrapper over the Databricks REST API."""\n\n def __init__(\n self,\n host: str,\n token: Optional[str] = None,\n oauth_client_id: Optional[str] = None,\n oauth_client_secret: Optional[str] = None,\n workspace_id: Optional[str] = None,\n ):\n self.host = host\n self.workspace_id = workspace_id\n\n self._workspace_client = WorkspaceClient(\n host=host,\n token=token,\n client_id=oauth_client_id,\n client_secret=oauth_client_secret,\n product="dagster-databricks",\n product_version=__version__,\n )\n\n # TODO: This is the old shim client that we were previously using. Arguably this is\n # confusing for users to use since this is an unofficial wrapper around the documented\n # Databricks REST API. We should consider removing this in the next minor release.\n if token:\n self._client = databricks_api.DatabricksAPI(host=host, token=token)\n self.__setup_user_agent(self._client.client)\n # TODO: This is the old `databricks_cli` client that was previously recommended by Databricks.\n # It is no longer supported and should be removed in favour of `databricks-sdk` in the next\n # minor release.\n self._api_client = databricks_cli.sdk.ApiClient(host=host, token=token)\n self.__setup_user_agent(self._api_client)\n else:\n self._client = None\n self._api_client = None\n\n def __setup_user_agent(\n self,\n client: Union[WorkspaceClient, databricks_api.DatabricksAPI, databricks_cli.sdk.ApiClient],\n ) -> None:\n """Overrides the user agent for the Databricks API client."""\n client.default_headers["user-agent"] = f"dagster-databricks/{__version__}"\n\n @deprecated(\n breaking_version="0.21.0", additional_warn_text="Use `workspace_client` property instead."\n )\n @public\n @property\n def client(self) -> databricks_api.DatabricksAPI:\n """Retrieve the legacy Databricks API client. Note: accessing this property will throw an exception if oauth\n credentials are used to initialize the DatabricksClient, because oauth credentials are not supported by the\n legacy Databricks API client.\n """\n if self._client is None:\n raise ValueError(\n "Legacy Databricks API client from `databricks-api` was not initialized because"\n " oauth credentials were used instead of an access token. This legacy Databricks"\n " API client is not supported when using oauth credentials. Use the"\n " `workspace_client` property instead."\n )\n return self._client\n\n @client.setter\n def client(self, value: Optional[databricks_api.DatabricksAPI]) -> None:\n self._client = value\n\n @deprecated(\n breaking_version="0.21.0", additional_warn_text="Use `workspace_client` property instead."\n )\n @public\n @property\n def api_client(self) -> databricks_cli.sdk.ApiClient:\n """Retrieve a reference to the underlying Databricks API client. For more information,\n see the `Databricks Python API <https://docs.databricks.com/dev-tools/python-api.html>`_.\n Noe: accessing this property will throw an exception if oauth credentials are used to initialize the\n DatabricksClient, because oauth credentials are not supported by the legacy Databricks API client.\n **Examples:**.\n\n .. code-block:: python\n\n from dagster import op\n from databricks_cli.jobs.api import JobsApi\n from databricks_cli.runs.api import RunsApi\n from databricks.sdk import WorkspaceClient\n\n @op(required_resource_keys={"databricks_client"})\n def op1(context):\n # Initialize the Databricks Jobs API\n jobs_client = JobsApi(context.resources.databricks_client.api_client)\n runs_client = RunsApi(context.resources.databricks_client.api_client)\n client = context.resources.databricks_client.api_client\n\n # Example 1: Run a Databricks job with some parameters.\n jobs_client.run_now(...)\n client.jobs.run_now(...)\n\n # Example 2: Trigger a one-time run of a Databricks workload.\n runs_client.submit_run(...)\n client.jobs.submit(...)\n\n # Example 3: Get an existing run.\n runs_client.get_run(...)\n client.jobs.get_run(...)\n\n # Example 4: Cancel a run.\n runs_client.cancel_run(...)\n client.jobs.cancel_run(...)\n\n Returns:\n ApiClient: The authenticated Databricks API client.\n """\n if self._api_client is None:\n raise ValueError(\n "Legacy Databricks API client from `databricks-cli` was not initialized because"\n " oauth credentials were used instead of an access token. This legacy Databricks"\n " API client is not supported when using oauth credentials. Use the"\n " `workspace_client` property instead."\n )\n return self._api_client\n\n @public\n @property\n def workspace_client(self) -> WorkspaceClient:\n """Retrieve a reference to the underlying Databricks Workspace client. For more information,\n see the `Databricks SDK for Python <https://docs.databricks.com/dev-tools/sdk-python.html>`_.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import op\n from databricks.sdk import WorkspaceClient\n\n @op(required_resource_keys={"databricks_client"})\n def op1(context):\n # Initialize the Databricks Jobs API\n client = context.resources.databricks_client.api_client\n\n # Example 1: Run a Databricks job with some parameters.\n client.jobs.run_now(...)\n\n # Example 2: Trigger a one-time run of a Databricks workload.\n client.jobs.submit(...)\n\n # Example 3: Get an existing run.\n client.jobs.get_run(...)\n\n # Example 4: Cancel a run.\n client.jobs.cancel_run(...)\n\n Returns:\n WorkspaceClient: The authenticated Databricks SDK Workspace Client.\n """\n return self._workspace_client\n\n def read_file(self, dbfs_path: str, block_size: int = 1024**2) -> bytes:\n """Read a file from DBFS to a **byte string**."""\n if dbfs_path.startswith("dbfs://"):\n dbfs_path = dbfs_path[7:]\n\n data = b""\n bytes_read = 0\n dbfs_service = self.workspace_client.dbfs\n\n jdoc = dbfs_service.read(path=dbfs_path, length=block_size)\n data += base64.b64decode(jdoc.data)\n while jdoc.bytes_read == block_size:\n bytes_read += jdoc.bytes_read\n jdoc = dbfs_service.read(path=dbfs_path, offset=bytes_read, length=block_size)\n data += base64.b64decode(jdoc.data)\n\n return data\n\n def put_file(\n self, file_obj: IO, dbfs_path: str, overwrite: bool = False, block_size: int = 1024**2\n ) -> None:\n """Upload an arbitrary large file to DBFS.\n\n This doesn't use the DBFS `Put` API because that endpoint is limited to 1MB.\n """\n if dbfs_path.startswith("dbfs://"):\n dbfs_path = dbfs_path[7:]\n\n dbfs_service = self.workspace_client.dbfs\n\n create_response = dbfs_service.create(path=dbfs_path, overwrite=overwrite)\n handle = create_response.handle\n\n block = file_obj.read(block_size)\n while block:\n data = base64.b64encode(block).decode("utf-8")\n dbfs_service.add_block(data=data, handle=handle)\n block = file_obj.read(block_size)\n\n dbfs_service.close(handle=handle)\n\n def get_run_state(self, databricks_run_id: int) -> "DatabricksRunState":\n """Get the state of a run by Databricks run ID.\n\n Return a `DatabricksRunState` object. Note that the `result_state`\n attribute may be `None` if the run hasn't yet terminated.\n """\n run = self.workspace_client.jobs.get_run(databricks_run_id)\n return DatabricksRunState.from_databricks(run.state)\n\n def poll_run_state(\n self,\n logger: logging.Logger,\n start_poll_time: float,\n databricks_run_id: int,\n max_wait_time_sec: float,\n verbose_logs: bool = True,\n ) -> bool:\n run_state = self.get_run_state(databricks_run_id)\n\n if run_state.has_terminated():\n if run_state.is_successful():\n logger.info(f"Run `{databricks_run_id}` completed successfully.")\n return True\n if run_state.is_skipped():\n logger.info(f"Run `{databricks_run_id}` was skipped.")\n return True\n else:\n error_message = (\n f"Run `{databricks_run_id}` failed with result state:"\n f" `{run_state.result_state}`. Message: {run_state.state_message}."\n )\n logger.error(error_message)\n raise DatabricksError(error_message)\n else:\n if verbose_logs:\n logger.debug(f"Run `{databricks_run_id}` in state {run_state}.")\n if time.time() - start_poll_time > max_wait_time_sec:\n raise DatabricksError(\n f"Run `{databricks_run_id}` took more than {max_wait_time_sec}s to complete."\n " Failing the run."\n )\n return False\n\n def wait_for_run_to_complete(\n self,\n logger: logging.Logger,\n databricks_run_id: int,\n poll_interval_sec: float,\n max_wait_time_sec: int,\n verbose_logs: bool = True,\n ) -> None:\n logger.info(f"Waiting for Databricks run `{databricks_run_id}` to complete...")\n\n start_poll_time = time.time()\n while True:\n if self.poll_run_state(\n logger=logger,\n start_poll_time=start_poll_time,\n databricks_run_id=databricks_run_id,\n max_wait_time_sec=max_wait_time_sec,\n verbose_logs=verbose_logs,\n ):\n return\n\n time.sleep(poll_interval_sec)
\n\n\nclass DatabricksJobRunner:\n """Submits jobs created using Dagster config to Databricks, and monitors their progress.\n\n Attributes:\n host (str): Databricks host, e.g. https://uksouth.azuredatabricks.net.\n token (str): Databricks authentication token.\n poll_interval_sec (float): How often to poll Databricks for run status.\n max_wait_time_sec (int): How long to wait for a run to complete before failing.\n """\n\n def __init__(\n self,\n host: str,\n token: Optional[str] = None,\n oauth_client_id: Optional[str] = None,\n oauth_client_secret: Optional[str] = None,\n poll_interval_sec: float = 5,\n max_wait_time_sec: int = DEFAULT_RUN_MAX_WAIT_TIME_SEC,\n ):\n self.host = check.str_param(host, "host")\n check.invariant(\n token is None or (oauth_client_id is None and oauth_client_secret is None),\n "Must provide either databricks_token or oauth_credentials, but cannot provide both",\n )\n self.token = check.opt_str_param(token, "token")\n self.oauth_client_id = check.opt_str_param(oauth_client_id, "oauth_client_id")\n self.oauth_client_secret = check.opt_str_param(oauth_client_secret, "oauth_client_secret")\n self.poll_interval_sec = check.numeric_param(poll_interval_sec, "poll_interval_sec")\n self.max_wait_time_sec = check.int_param(max_wait_time_sec, "max_wait_time_sec")\n\n self._client: DatabricksClient = DatabricksClient(\n host=self.host,\n token=self.token,\n oauth_client_id=oauth_client_id,\n oauth_client_secret=oauth_client_secret,\n )\n\n @property\n def client(self) -> DatabricksClient:\n """Return the underlying `DatabricksClient` object."""\n return self._client\n\n def submit_run(self, run_config: Mapping[str, Any], task: Mapping[str, Any]) -> int:\n """Submit a new run using the 'Runs submit' API."""\n existing_cluster_id = run_config["cluster"].get("existing")\n\n new_cluster = run_config["cluster"].get("new")\n\n # The Databricks API needs different keys to be present in API calls depending\n # on new/existing cluster, so we need to process the new_cluster\n # config first.\n if new_cluster:\n new_cluster = new_cluster.copy()\n\n nodes = new_cluster.pop("nodes")\n if "instance_pool_id" in nodes:\n new_cluster["instance_pool_id"] = nodes["instance_pool_id"]\n else:\n node_types = nodes["node_types"]\n new_cluster["node_type_id"] = node_types["node_type_id"]\n if "driver_node_type_id" in node_types:\n new_cluster["driver_node_type_id"] = node_types["driver_node_type_id"]\n\n cluster_size = new_cluster.pop("size")\n if "num_workers" in cluster_size:\n new_cluster["num_workers"] = cluster_size["num_workers"]\n else:\n new_cluster["autoscale"] = cluster_size["autoscale"]\n\n tags = new_cluster.get("custom_tags", {})\n if isinstance(tags, list):\n tags = {x["key"]: x["value"] for x in tags}\n tags["__dagster_version"] = dagster.__version__\n new_cluster["custom_tags"] = tags\n\n check.invariant(\n existing_cluster_id is not None or new_cluster is not None,\n "Invalid value for run_config.cluster",\n )\n\n # We'll always need some libraries, namely dagster/dagster_databricks/dagster_pyspark,\n # since they're imported by our scripts.\n # Add them if they're not already added by users in config.\n libraries = list(run_config.get("libraries", []))\n install_default_libraries = run_config.get("install_default_libraries", True)\n if install_default_libraries:\n python_libraries = {\n x["pypi"]["package"].split("==")[0].replace("_", "-")\n for x in libraries\n if "pypi" in x\n }\n\n for library_name, library in [\n ("dagster", dagster),\n ("dagster-databricks", dagster_databricks),\n ("dagster-pyspark", dagster_pyspark),\n ]:\n if library_name not in python_libraries:\n libraries.append(\n {"pypi": {"package": f"{library_name}=={library.__version__}"}}\n )\n\n # Only one task should be able to be chosen really; make sure of that here.\n check.invariant(\n sum(\n task.get(key) is not None\n for key in [\n "notebook_task",\n "spark_python_task",\n "spark_jar_task",\n "spark_submit_task",\n ]\n )\n == 1,\n "Multiple tasks specified in Databricks run",\n )\n\n return self.client.workspace_client.jobs.submit(\n run_name=run_config.get("run_name"),\n tasks=[\n jobs.SubmitTask.from_dict(\n {\n "new_cluster": new_cluster,\n "existing_cluster_id": existing_cluster_id,\n # "libraries": [compute.Library.from_dict(lib) for lib in libraries],\n "libraries": libraries,\n **task,\n "task_key": "dagster-task",\n },\n )\n ],\n ).bind()["run_id"]\n\n def retrieve_logs_for_run_id(\n self, log: logging.Logger, databricks_run_id: int\n ) -> Optional[Tuple[Optional[str], Optional[str]]]:\n """Retrieve the stdout and stderr logs for a run."""\n run = self.client.workspace_client.jobs.get_run(databricks_run_id)\n\n # Run.cluster_instance can be None. In that case, fall back to cluster instance on first\n # task. Currently pyspark step launcher runs jobs with singleton tasks.\n cluster_instance = run.cluster_instance or run.tasks[0].cluster_instance\n cluster_id = check.inst(\n cluster_instance.cluster_id,\n str,\n "cluster_id should be string like `1234-123456-abcdefgh` got:"\n f" `{cluster_instance.cluster_id}`",\n )\n cluster = self.client.workspace_client.clusters.get(cluster_id)\n log_config = cluster.cluster_log_conf\n if log_config is None:\n log.warn(\n f"Logs not configured for cluster {cluster_id} used for run {databricks_run_id}"\n )\n return None\n if cast(Optional[compute.S3StorageInfo], log_config.s3) is not None:\n logs_prefix = log_config.s3.destination\n log.warn("Retrieving S3 logs not yet implemented")\n return None\n elif cast(Optional[compute.DbfsStorageInfo], log_config.dbfs) is not None:\n logs_prefix = log_config.dbfs.destination\n stdout = self.wait_for_dbfs_logs(log, logs_prefix, cluster_id, "stdout")\n stderr = self.wait_for_dbfs_logs(log, logs_prefix, cluster_id, "stderr")\n return stdout, stderr\n\n def wait_for_dbfs_logs(\n self,\n log: logging.Logger,\n prefix: str,\n cluster_id: str,\n filename: str,\n waiter_delay: int = 10,\n waiter_max_attempts: int = 10,\n ) -> Optional[str]:\n """Attempt up to `waiter_max_attempts` attempts to get logs from DBFS."""\n path = "/".join([prefix, cluster_id, "driver", filename])\n log.info(f"Retrieving logs from {path}")\n num_attempts = 0\n while num_attempts <= waiter_max_attempts:\n try:\n logs = self.client.read_file(path)\n return logs.decode("utf-8")\n except requests.exceptions.HTTPError:\n num_attempts += 1\n time.sleep(waiter_delay)\n log.warn("Could not retrieve cluster logs!")\n
", "current_page_name": "_modules/dagster_databricks/databricks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.databricks"}, "databricks_pyspark_step_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.databricks_pyspark_step_launcher

\nimport gzip\nimport io\nimport os.path\nimport pickle\nimport sys\nimport tempfile\nimport time\nimport zlib\nfrom typing import Any, Dict, Iterator, Mapping, Optional, Sequence, cast\n\nfrom dagster import (\n    Bool,\n    Field,\n    IntSource,\n    Noneable,\n    StringSource,\n    _check as check,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.definitions.step_launcher import StepLauncher, StepRunRef\nfrom dagster._core.errors import raise_execution_interrupts\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.execution.context.system import StepExecutionContext\nfrom dagster._core.execution.plan.external_step import (\n    PICKLED_EVENTS_FILE_NAME,\n    PICKLED_STEP_RUN_REF_FILE_NAME,\n    step_context_to_step_run_ref,\n)\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._serdes import deserialize_value\nfrom dagster._utils.backoff import backoff\nfrom dagster_pyspark.utils import build_pyspark_zip\nfrom databricks.sdk.core import DatabricksError\nfrom databricks.sdk.service import jobs\n\nfrom dagster_databricks import databricks_step_main\nfrom dagster_databricks.databricks import (\n    DEFAULT_RUN_MAX_WAIT_TIME_SEC,\n    DatabricksJobRunner,\n)\n\nfrom .configs import (\n    define_databricks_env_variables,\n    define_databricks_permissions,\n    define_databricks_secrets_config,\n    define_databricks_storage_config,\n    define_databricks_submit_run_config,\n    define_oauth_credentials,\n)\n\nCODE_ZIP_NAME = "code.zip"\nPICKLED_CONFIG_FILE_NAME = "config.pkl"\nDAGSTER_SYSTEM_ENV_VARS = {\n    "DAGSTER_CLOUD_DEPLOYMENT_NAME",\n    "DAGSTER_CLOUD_IS_BRANCH_DEPLOYMENT",\n    "DAGSTER_CLOUD_GIT_SHA",\n    "DAGSTER_CLOUD_GIT_TIMESTAMP",\n    "DAGSTER_CLOUD_GIT_AUTHOR_EMAIL",\n    "DAGSTER_CLOUD_GIT_AUTHOR_NAME",\n    "DAGSTER_CLOUD_GIT_MESSAGE",\n    "DAGSTER_CLOUD_GIT_BRANCH",\n    "DAGSTER_CLOUD_GIT_REPO",\n    "DAGSTER_CLOUD_PULL_REQUEST_ID",\n    "DAGSTER_CLOUD_PULL_REQUEST_STATUS",\n}\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n {\n "run_config": define_databricks_submit_run_config(),\n "permissions": define_databricks_permissions(),\n "databricks_host": Field(\n StringSource,\n is_required=True,\n description="Databricks host, e.g. uksouth.azuredatabricks.com",\n ),\n "databricks_token": Field(\n Noneable(StringSource),\n default_value=None,\n description="Databricks access token",\n ),\n "oauth_credentials": define_oauth_credentials(),\n "env_variables": define_databricks_env_variables(),\n "secrets_to_env_variables": define_databricks_secrets_config(),\n "storage": define_databricks_storage_config(),\n "local_pipeline_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "Absolute path to root python package containing your Dagster code. If you set this"\n " value to a directory lower than the root package, and have user relative imports"\n " in your code (e.g. `from .foo import bar`), it's likely you'll encounter an"\n " import error on the remote step. Before every step run, the launcher will zip up"\n " the code in this local path, upload it to DBFS, and unzip it into the Python path"\n " of the remote Spark process. This gives the remote process access to up-to-date"\n " user code."\n ),\n ),\n "local_dagster_job_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "Absolute path to root python package containing your Dagster code. If you set this"\n " value to a directory lower than the root package, and have user relative imports"\n " in your code (e.g. `from .foo import bar`), it's likely you'll encounter an"\n " import error on the remote step. Before every step run, the launcher will zip up"\n " the code in this local path, upload it to DBFS, and unzip it into the Python path"\n " of the remote Spark process. This gives the remote process access to up-to-date"\n " user code."\n ),\n ),\n "staging_prefix": Field(\n StringSource,\n is_required=False,\n default_value="/dagster_staging",\n description="Directory in DBFS to use for uploaded job code. Must be absolute.",\n ),\n "wait_for_logs": Field(\n Bool,\n is_required=False,\n default_value=False,\n description=(\n "If set, and if the specified cluster is configured to export logs, the system will"\n " wait after job completion for the logs to appear in the configured location. Note"\n " that logs are copied every 5 minutes, so enabling this will add several minutes"\n " to the job runtime. NOTE: this integration will export stdout/stderrfrom the"\n " remote Databricks process automatically, so this option is not generally"\n " necessary."\n ),\n ),\n "max_completion_wait_time_seconds": Field(\n IntSource,\n is_required=False,\n default_value=DEFAULT_RUN_MAX_WAIT_TIME_SEC,\n description=(\n "If the Databricks job run takes more than this many seconds, then "\n "consider it failed and terminate the step."\n ),\n ),\n "poll_interval_sec": Field(\n float,\n is_required=False,\n default_value=5.0,\n description=(\n "How frequently Dagster will poll Databricks to determine the state of the job."\n ),\n ),\n "verbose_logs": Field(\n bool,\n default_value=True,\n description=(\n "Determines whether to display debug logs emitted while job is being polled. It can"\n " be helpful for Dagster UI performance to set to False when running long-running"\n " or fan-out Databricks jobs, to avoid forcing the UI to fetch large amounts of"\n " debug logs."\n ),\n ),\n "add_dagster_env_variables": Field(\n bool,\n default_value=True,\n description=(\n "Automatically add Dagster system environment variables. This option is only"\n " applicable when the code being executed is deployed on Dagster Cloud. It will be"\n " ignored when the environment variables provided by Dagster Cloud are not present."\n ),\n ),\n }\n)\ndef databricks_pyspark_step_launcher(\n context: InitResourceContext,\n) -> "DatabricksPySparkStepLauncher":\n """Resource for running ops as a Databricks Job.\n\n When this resource is used, the op will be executed in Databricks using the 'Run Submit'\n API. Pipeline code will be zipped up and copied to a directory in DBFS along with the op's\n execution context.\n\n Use the 'run_config' configuration to specify the details of the Databricks cluster used, and\n the 'storage' key to configure persistent storage on that cluster. Storage is accessed by\n setting the credentials in the Spark context, as documented `here for S3`_ and `here for ADLS`_.\n\n .. _`here for S3`: https://docs.databricks.com/data/data-sources/aws/amazon-s3.html#alternative-1-set-aws-keys-in-the-spark-context\n .. _`here for ADLS`: https://docs.microsoft.com/en-gb/azure/databricks/data/data-sources/azure/azure-datalake-gen2#--access-directly-using-the-storage-account-access-key\n """\n return DatabricksPySparkStepLauncher(**context.resource_config)
\n\n\nclass DatabricksPySparkStepLauncher(StepLauncher):\n def __init__(\n self,\n run_config: Mapping[str, Any],\n permissions: Mapping[str, Any],\n databricks_host: str,\n secrets_to_env_variables: Sequence[Mapping[str, Any]],\n staging_prefix: str,\n wait_for_logs: bool,\n max_completion_wait_time_seconds: int,\n databricks_token: Optional[str] = None,\n oauth_credentials: Optional[Mapping[str, str]] = None,\n env_variables: Optional[Mapping[str, str]] = None,\n storage: Optional[Mapping[str, Any]] = None,\n poll_interval_sec: int = 5,\n local_pipeline_package_path: Optional[str] = None,\n local_dagster_job_package_path: Optional[str] = None,\n verbose_logs: bool = True,\n add_dagster_env_variables: bool = True,\n ):\n self.run_config = check.mapping_param(run_config, "run_config")\n self.permissions = check.mapping_param(permissions, "permissions")\n self.databricks_host = check.str_param(databricks_host, "databricks_host")\n\n check.invariant(\n databricks_token is not None or oauth_credentials is not None,\n "Must provide either databricks_token or oauth_credentials",\n )\n check.invariant(\n databricks_token is None or oauth_credentials is None,\n "Must provide either databricks_token or oauth_credentials, but cannot provide both",\n )\n self.databricks_token = check.opt_str_param(databricks_token, "databricks_token")\n oauth_credentials = check.opt_mapping_param(\n oauth_credentials,\n "oauth_credentials",\n key_type=str,\n value_type=str,\n )\n\n self.secrets = check.sequence_param(\n secrets_to_env_variables, "secrets_to_env_variables", dict\n )\n self.env_variables = check.opt_mapping_param(env_variables, "env_variables")\n self.storage = check.opt_mapping_param(storage, "storage")\n check.invariant(\n local_dagster_job_package_path is not None or local_pipeline_package_path is not None,\n "Missing config: need to provide either 'local_dagster_job_package_path' or"\n " 'local_pipeline_package_path' config entry",\n )\n check.invariant(\n local_dagster_job_package_path is None or local_pipeline_package_path is None,\n "Error in config: Provided both 'local_dagster_job_package_path' and"\n " 'local_pipeline_package_path' entries. Need to specify one or the other.",\n )\n self.local_dagster_job_package_path = check.str_param(\n local_pipeline_package_path or local_dagster_job_package_path,\n "local_dagster_job_package_path",\n )\n self.staging_prefix = check.str_param(staging_prefix, "staging_prefix")\n check.invariant(staging_prefix.startswith("/"), "staging_prefix must be an absolute path")\n self.wait_for_logs = check.bool_param(wait_for_logs, "wait_for_logs")\n\n self.databricks_runner = DatabricksJobRunner(\n host=databricks_host,\n token=databricks_token,\n oauth_client_id=oauth_credentials.get("client_id"),\n oauth_client_secret=oauth_credentials.get("client_secret"),\n poll_interval_sec=poll_interval_sec,\n max_wait_time_sec=max_completion_wait_time_seconds,\n )\n self.verbose_logs = check.bool_param(verbose_logs, "verbose_logs")\n self.add_dagster_env_variables = check.bool_param(\n add_dagster_env_variables, "add_dagster_env_variables"\n )\n\n def launch_step(self, step_context: StepExecutionContext) -> Iterator[DagsterEvent]:\n step_run_ref = step_context_to_step_run_ref(\n step_context, self.local_dagster_job_package_path\n )\n run_id = step_context.dagster_run.run_id\n log = step_context.log\n\n step_key = step_run_ref.step_key\n self._upload_artifacts(log, step_run_ref, run_id, step_key)\n\n task = self._get_databricks_task(run_id, step_key)\n databricks_run_id = self.databricks_runner.submit_run(self.run_config, task)\n\n if self.permissions:\n self._grant_permissions(log, databricks_run_id)\n\n try:\n # If this is being called within a `capture_interrupts` context, allow interrupts while\n # waiting for the execution to complete, so that we can terminate slow or hanging steps\n with raise_execution_interrupts():\n yield from self.step_events_iterator(step_context, step_key, databricks_run_id)\n except:\n # if executon is interrupted before the step is completed, cancel the run\n self.databricks_runner.client.workspace_client.jobs.cancel_run(databricks_run_id)\n raise\n finally:\n self.log_compute_logs(log, run_id, step_key)\n # this is somewhat obsolete\n if self.wait_for_logs:\n self._log_logs_from_cluster(log, databricks_run_id)\n\n def log_compute_logs(self, log: DagsterLogManager, run_id: str, step_key: str) -> None:\n try:\n stdout = self.databricks_runner.client.read_file(\n self._dbfs_path(run_id, step_key, "stdout")\n ).decode()\n log.info(f"Captured stdout for step {step_key}:")\n log.info(stdout)\n sys.stdout.write(stdout)\n except Exception as e:\n log.error(\n f"Encountered exception {e} when attempting to load stdout logs for step"\n f" {step_key}. Check the databricks console for more info."\n )\n try:\n stderr = self.databricks_runner.client.read_file(\n self._dbfs_path(run_id, step_key, "stderr")\n ).decode()\n log.info(f"Captured stderr for step {step_key}:")\n log.info(stderr)\n sys.stderr.write(stderr)\n except Exception as e:\n log.error(\n f"Encountered exception {e} when attempting to load stderr logs for step"\n f" {step_key}. Check the databricks console for more info."\n )\n\n def step_events_iterator(\n self, step_context: StepExecutionContext, step_key: str, databricks_run_id: int\n ) -> Iterator[DagsterEvent]:\n """The launched Databricks job writes all event records to a specific dbfs file. This iterator\n regularly reads the contents of the file, adds any events that have not yet been seen to\n the instance, and yields any DagsterEvents.\n\n By doing this, we simulate having the remote Databricks process able to directly write to\n the local DagsterInstance. Importantly, this means that timestamps (and all other record\n properties) will be sourced from the Databricks process, rather than recording when this\n process happens to log them.\n """\n check.int_param(databricks_run_id, "databricks_run_id")\n processed_events = 0\n start_poll_time = time.time()\n done = False\n step_context.log.info("Waiting for Databricks run %s to complete..." % databricks_run_id)\n while not done:\n with raise_execution_interrupts():\n if self.verbose_logs:\n step_context.log.debug(\n "Waiting %.1f seconds...", self.databricks_runner.poll_interval_sec\n )\n time.sleep(self.databricks_runner.poll_interval_sec)\n try:\n done = self.databricks_runner.client.poll_run_state(\n logger=step_context.log,\n start_poll_time=start_poll_time,\n databricks_run_id=databricks_run_id,\n max_wait_time_sec=self.databricks_runner.max_wait_time_sec,\n verbose_logs=self.verbose_logs,\n )\n finally:\n all_events = self.get_step_events(\n step_context.run_id, step_key, step_context.previous_attempt_count\n )\n # we get all available records on each poll, but we only want to process the\n # ones we haven't seen before\n for event in all_events[processed_events:]:\n # write each event from the DataBricks instance to the local instance\n step_context.instance.handle_new_event(event)\n if event.is_dagster_event:\n yield event.get_dagster_event()\n processed_events = len(all_events)\n\n step_context.log.info(f"Databricks run {databricks_run_id} completed.")\n\n def get_step_events(\n self, run_id: str, step_key: str, retry_number: int\n ) -> Sequence[EventLogEntry]:\n path = self._dbfs_path(run_id, step_key, f"{retry_number}_{PICKLED_EVENTS_FILE_NAME}")\n\n def _get_step_records() -> Sequence[EventLogEntry]:\n serialized_records = self.databricks_runner.client.read_file(path)\n if not serialized_records:\n return []\n return cast(\n Sequence[EventLogEntry],\n deserialize_value(pickle.loads(gzip.decompress(serialized_records))),\n )\n\n try:\n # reading from dbfs while it writes can be flaky\n # allow for retry if we get malformed data\n return backoff(\n fn=_get_step_records,\n retry_on=(pickle.UnpicklingError, OSError, zlib.error, EOFError),\n max_retries=4,\n )\n # if you poll before the Databricks process has had a chance to create the file,\n # we expect to get this error\n except DatabricksError as e:\n if e.error_code == "RESOURCE_DOES_NOT_EXIST":\n return []\n raise\n\n def _grant_permissions(\n self, log: DagsterLogManager, databricks_run_id: int, request_retries: int = 3\n ) -> None:\n client = self.databricks_runner.client.workspace_client\n # Retrieve run info\n cluster_id = None\n for i in range(1, request_retries + 1):\n run_info = client.jobs.get_run(databricks_run_id)\n # if a new job cluster is created, the cluster_instance key may not be immediately present in the run response\n try:\n cluster_id = run_info.cluster_instance.cluster_id\n break\n except:\n log.warning(\n f"Failed to retrieve cluster info for databricks_run_id {databricks_run_id}. "\n f"Retrying {i} of {request_retries} times."\n )\n time.sleep(5)\n if not cluster_id:\n log.warning(\n f"Failed to retrieve cluster info for databricks_run_id {databricks_run_id} "\n f"{request_retries} times. Skipping permission updates..."\n )\n return\n\n # Update job permissions\n if "job_permissions" in self.permissions:\n job_permissions = self._format_permissions(self.permissions["job_permissions"])\n job_id = run_info.job_id # type: ignore # (??)\n log.debug(f"Updating job permissions with following json: {job_permissions}")\n client.permissions.update("jobs", job_id, access_control_list=job_permissions)\n log.info("Successfully updated cluster permissions")\n\n # Update cluster permissions\n if "cluster_permissions" in self.permissions:\n if "existing" in self.run_config["cluster"]:\n raise ValueError(\n "Attempting to update permissions of an existing cluster. "\n "This is dangerous and thus unsupported."\n )\n cluster_permissions = self._format_permissions(self.permissions["cluster_permissions"])\n log.debug(f"Updating cluster permissions with following json: {cluster_permissions}")\n client.permissions.update(\n "clusters", cluster_id, access_control_list=cluster_permissions\n )\n log.info("Successfully updated cluster permissions")\n\n def _format_permissions(\n self, input_permissions: Mapping[str, Sequence[Mapping[str, str]]]\n ) -> Sequence[Mapping[str, str]]:\n access_control_list = []\n for permission, accessors in input_permissions.items():\n access_control_list.extend(\n [\n jobs.JobAccessControlRequest.from_dict(\n {"permission_level": permission, **accessor}\n )\n for accessor in accessors\n ]\n )\n return access_control_list\n\n def _get_databricks_task(self, run_id: str, step_key: str) -> Mapping[str, Any]:\n """Construct the 'task' parameter to be submitted to the Databricks API.\n\n This will create a 'spark_python_task' dict where `python_file` is a path on DBFS\n pointing to the 'databricks_step_main.py' file, and `parameters` is an array with a single\n element, a path on DBFS pointing to the picked `step_run_ref` data.\n\n See https://docs.databricks.com/dev-tools/api/latest/jobs.html#jobssparkpythontask.\n """\n python_file = self._dbfs_path(run_id, step_key, self._main_file_name())\n parameters = [\n self._internal_dbfs_path(run_id, step_key, PICKLED_STEP_RUN_REF_FILE_NAME),\n self._internal_dbfs_path(run_id, step_key, PICKLED_CONFIG_FILE_NAME),\n self._internal_dbfs_path(run_id, step_key, CODE_ZIP_NAME),\n ]\n return {"spark_python_task": {"python_file": python_file, "parameters": parameters}}\n\n def _upload_artifacts(\n self, log: DagsterLogManager, step_run_ref: StepRunRef, run_id: str, step_key: str\n ) -> None:\n """Upload the step run ref and pyspark code to DBFS to run as a job."""\n log.info("Uploading main file to DBFS")\n main_local_path = self._main_file_local_path()\n with open(main_local_path, "rb") as infile:\n self.databricks_runner.client.put_file(\n infile, self._dbfs_path(run_id, step_key, self._main_file_name()), overwrite=True\n )\n\n log.info("Uploading dagster job to DBFS")\n with tempfile.TemporaryDirectory() as temp_dir:\n # Zip and upload package containing dagster job\n zip_local_path = os.path.join(temp_dir, CODE_ZIP_NAME)\n build_pyspark_zip(zip_local_path, self.local_dagster_job_package_path)\n with open(zip_local_path, "rb") as infile:\n self.databricks_runner.client.put_file(\n infile, self._dbfs_path(run_id, step_key, CODE_ZIP_NAME), overwrite=True\n )\n\n log.info("Uploading step run ref file to DBFS")\n step_pickle_file = io.BytesIO()\n\n pickle.dump(step_run_ref, step_pickle_file)\n step_pickle_file.seek(0)\n self.databricks_runner.client.put_file(\n step_pickle_file,\n self._dbfs_path(run_id, step_key, PICKLED_STEP_RUN_REF_FILE_NAME),\n overwrite=True,\n )\n\n databricks_config = self.create_remote_config()\n log.info("Uploading Databricks configuration to DBFS")\n databricks_config_file = io.BytesIO()\n pickle.dump(databricks_config, databricks_config_file)\n databricks_config_file.seek(0)\n self.databricks_runner.client.put_file(\n databricks_config_file,\n self._dbfs_path(run_id, step_key, PICKLED_CONFIG_FILE_NAME),\n overwrite=True,\n )\n\n def get_dagster_env_variables(self) -> Dict[str, str]:\n out = {}\n if self.add_dagster_env_variables:\n for var in DAGSTER_SYSTEM_ENV_VARS:\n if os.getenv(var):\n out.update({var: os.getenv(var)})\n return out\n\n def create_remote_config(self) -> "DatabricksConfig":\n env_variables = self.get_dagster_env_variables()\n env_variables.update(self.env_variables)\n databricks_config = DatabricksConfig(\n env_variables=env_variables,\n storage=self.storage,\n secrets=self.secrets,\n )\n return databricks_config\n\n def _log_logs_from_cluster(self, log: DagsterLogManager, run_id: int) -> None:\n logs = self.databricks_runner.retrieve_logs_for_run_id(log, run_id)\n if logs is None:\n return\n stdout, stderr = logs\n if stderr:\n log.info(stderr)\n if stdout:\n log.info(stdout)\n\n def _main_file_name(self) -> str:\n return os.path.basename(self._main_file_local_path())\n\n def _main_file_local_path(self) -> str:\n return databricks_step_main.__file__\n\n def _sanitize_step_key(self, step_key: str) -> str:\n # step_keys of dynamic steps contain brackets, which are invalid characters\n return step_key.replace("[", "__").replace("]", "__")\n\n def _dbfs_path(self, run_id: str, step_key: str, filename: str) -> str:\n path = "/".join(\n [\n self.staging_prefix,\n run_id,\n self._sanitize_step_key(step_key),\n os.path.basename(filename),\n ]\n )\n return f"dbfs://{path}"\n\n def _internal_dbfs_path(self, run_id: str, step_key: str, filename: str) -> str:\n """Scripts running on Databricks should access DBFS at /dbfs/."""\n path = "/".join(\n [\n self.staging_prefix,\n run_id,\n self._sanitize_step_key(step_key),\n os.path.basename(filename),\n ]\n )\n return f"/dbfs/{path}"\n\n\nclass DatabricksConfig:\n """Represents configuration required by Databricks to run jobs.\n\n Instances of this class will be created when a Databricks step is launched and will contain\n all configuration and secrets required to set up storage and environment variables within\n the Databricks environment. The instance will be serialized and uploaded to Databricks\n by the step launcher, then deserialized as part of the 'main' script when the job is running\n in Databricks.\n\n The `setup` method handles the actual setup prior to op execution on the Databricks side.\n\n This config is separated out from the regular Dagster run config system because the setup\n is done by the 'main' script before entering a Dagster context (i.e. using `run_step_from_ref`).\n We use a separate class to avoid coupling the setup to the format of the `step_run_ref` object.\n """\n\n def __init__(\n self,\n env_variables: Mapping[str, str],\n storage: Mapping[str, Any],\n secrets: Sequence[Mapping[str, Any]],\n ):\n """Create a new DatabricksConfig object.\n\n `storage` and `secrets` should be of the same shape as the `storage` and\n `secrets_to_env_variables` config passed to `databricks_pyspark_step_launcher`.\n """\n self.env_variables = env_variables\n self.storage = storage\n self.secrets = secrets\n\n def setup(self, dbutils: Any, sc: Any) -> None:\n """Set up storage and environment variables on Databricks.\n\n The `dbutils` and `sc` arguments must be passed in by the 'main' script, as they\n aren't accessible by any other modules.\n """\n self.setup_storage(dbutils, sc)\n self.setup_environment(dbutils)\n\n def setup_storage(self, dbutils: Any, sc: Any) -> None:\n """Set up storage using either S3 or ADLS2."""\n if "s3" in self.storage:\n self.setup_s3_storage(self.storage["s3"], dbutils, sc)\n elif "adls2" in self.storage:\n self.setup_adls2_storage(self.storage["adls2"], dbutils, sc)\n\n def setup_s3_storage(self, s3_storage: Mapping[str, Any], dbutils: Any, sc: Any) -> None:\n """Obtain AWS credentials from Databricks secrets and export so both Spark and boto can use them."""\n scope = s3_storage["secret_scope"]\n\n access_key = dbutils.secrets.get(scope=scope, key=s3_storage["access_key_key"])\n secret_key = dbutils.secrets.get(scope=scope, key=s3_storage["secret_key_key"])\n\n # Spark APIs will use this.\n # See https://docs.databricks.com/data/data-sources/aws/amazon-s3.html#alternative-1-set-aws-keys-in-the-spark-context.\n sc._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", access_key) # noqa: SLF001\n sc._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", secret_key) # noqa: SLF001\n\n # Boto will use these.\n os.environ["AWS_ACCESS_KEY_ID"] = access_key\n os.environ["AWS_SECRET_ACCESS_KEY"] = secret_key\n\n def setup_adls2_storage(self, adls2_storage: Mapping[str, Any], dbutils: Any, sc: Any) -> None:\n """Obtain an Azure Storage Account key from Databricks secrets and export so Spark can use it."""\n storage_account_key = dbutils.secrets.get(\n scope=adls2_storage["secret_scope"], key=adls2_storage["storage_account_key_key"]\n )\n # Spark APIs will use this.\n # See https://docs.microsoft.com/en-gb/azure/databricks/data/data-sources/azure/azure-datalake-gen2#--access-directly-using-the-storage-account-access-key\n # sc is globally defined in the Databricks runtime and points to the Spark context\n sc._jsc.hadoopConfiguration().set( # noqa: SLF001\n "fs.azure.account.key.{}.dfs.core.windows.net".format(\n adls2_storage["storage_account_name"]\n ),\n storage_account_key,\n )\n\n def setup_environment(self, dbutils: Any) -> None:\n """Setup any environment variables required by the run.\n\n Extract any secrets in the run config and export them as environment variables.\n\n This is important for any `StringSource` config since the environment variables\n won't ordinarily be available in the Databricks execution environment.\n """\n for env_k, env_v in self.env_variables.items():\n os.environ[env_k] = env_v\n\n for secret in self.secrets:\n name = secret["name"]\n key = secret["key"]\n scope = secret["scope"]\n print(f"Exporting {name} from Databricks secret {key}, scope {scope}") # noqa: T201\n val = dbutils.secrets.get(scope=scope, key=key)\n os.environ[name] = val\n
", "current_page_name": "_modules/dagster_databricks/databricks_pyspark_step_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.databricks_pyspark_step_launcher"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.ops

\nfrom typing import TYPE_CHECKING, Optional\n\nfrom dagster import (\n    In,\n    Nothing,\n    OpExecutionContext,\n    _check as check,\n    op,\n)\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom databricks.sdk.service import jobs\nfrom pydantic import Field\n\nDEFAULT_POLL_INTERVAL_SECONDS = 10\n# wait at most 24 hours by default for run execution\nDEFAULT_MAX_WAIT_TIME_SECONDS = 24 * 60 * 60\nfrom dagster import Config\n\nif TYPE_CHECKING:\n    from .databricks import DatabricksClient\n\n\n
[docs]def create_databricks_run_now_op(\n databricks_job_id: int,\n databricks_job_configuration: Optional[dict] = None,\n poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS,\n max_wait_time_seconds: float = DEFAULT_MAX_WAIT_TIME_SECONDS,\n name: Optional[str] = None,\n databricks_resource_key: str = "databricks",\n) -> OpDefinition:\n """Creates an op that launches an existing databricks job.\n\n As config, the op accepts a blob of the form described in Databricks' Job API:\n https://docs.databricks.com/api-explorer/workspace/jobs/runnow. The only required field is\n ``job_id``, which is the ID of the job to be executed. Additional fields can be used to specify\n override parameters for the Databricks Job.\n\n Arguments:\n databricks_job_id (int): The ID of the Databricks Job to be executed.\n databricks_job_configuration (dict): Configuration for triggering a new job run of a\n Databricks Job. See https://docs.databricks.com/api-explorer/workspace/jobs/runnow\n for the full configuration.\n poll_interval_seconds (float): How often to poll the Databricks API to check whether the\n Databricks job has finished running.\n max_wait_time_seconds (float): How long to wait for the Databricks job to finish running\n before raising an error.\n name (Optional[str]): The name of the op. If not provided, the name will be\n _databricks_run_now_op.\n databricks_resource_key (str): The name of the resource key used by this op. If not\n provided, the resource key will be "databricks".\n\n Returns:\n OpDefinition: An op definition to run the Databricks Job.\n\n Example:\n .. code-block:: python\n\n from dagster import job\n from dagster_databricks import create_databricks_run_now_op, DatabricksClientResource\n\n DATABRICKS_JOB_ID = 1234\n\n\n run_now_op = create_databricks_run_now_op(\n databricks_job_id=DATABRICKS_JOB_ID,\n databricks_job_configuration={\n "python_params": [\n "--input",\n "schema.db.input_table",\n "--output",\n "schema.db.output_table",\n ],\n },\n )\n\n @job(\n resource_defs={\n "databricks": DatabricksClientResource(\n host=EnvVar("DATABRICKS_HOST"),\n token=EnvVar("DATABRICKS_TOKEN")\n )\n }\n )\n def do_stuff():\n run_now_op()\n """\n _poll_interval_seconds = poll_interval_seconds\n _max_wait_time_seconds = max_wait_time_seconds\n\n class DatabricksRunNowOpConfig(Config):\n poll_interval_seconds: float = Field(\n default=_poll_interval_seconds,\n description="Check whether the Databricks Job is done at this interval, in seconds.",\n )\n max_wait_time_seconds: int = Field(\n default=_max_wait_time_seconds,\n description=(\n "If the Databricks Job is not complete after this length of time, in seconds,"\n " raise an error."\n ),\n )\n\n @op(\n ins={"start_after": In(Nothing)},\n required_resource_keys={databricks_resource_key},\n tags={"kind": "databricks"},\n name=name,\n )\n def _databricks_run_now_op(context: OpExecutionContext, config: DatabricksRunNowOpConfig):\n databricks: DatabricksClient = getattr(context.resources, databricks_resource_key)\n jobs_service = databricks.workspace_client.jobs\n\n run = jobs_service.run_now(\n job_id=databricks_job_id,\n **(databricks_job_configuration or {}),\n )\n run_id = run.bind()["run_id"]\n\n get_run_response = jobs_service.get_run(run_id=run_id)\n\n context.log.info(\n f"Launched databricks job run for '{get_run_response.run_name}' (`{run_id}`). URL:"\n f" {get_run_response.run_page_url}. Waiting to run to complete."\n )\n\n databricks.wait_for_run_to_complete(\n logger=context.log,\n databricks_run_id=run_id,\n poll_interval_sec=config.poll_interval_seconds,\n max_wait_time_sec=config.max_wait_time_seconds,\n )\n\n return _databricks_run_now_op
\n\n\n
[docs]def create_databricks_submit_run_op(\n databricks_job_configuration: dict,\n poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS,\n max_wait_time_seconds: float = DEFAULT_MAX_WAIT_TIME_SECONDS,\n name: Optional[str] = None,\n databricks_resource_key: str = "databricks",\n) -> OpDefinition:\n """Creates an op that submits a one-time run of a set of tasks on Databricks.\n\n As config, the op accepts a blob of the form described in Databricks' Job API:\n https://docs.databricks.com/api-explorer/workspace/jobs/submit.\n\n Arguments:\n databricks_job_configuration (dict): Configuration for submitting a one-time run of a set\n of tasks on Databricks. See https://docs.databricks.com/api-explorer/workspace/jobs/submit\n for the full configuration.\n poll_interval_seconds (float): How often to poll the Databricks API to check whether the\n Databricks job has finished running.\n max_wait_time_seconds (float): How long to wait for the Databricks job to finish running\n before raising an error.\n name (Optional[str]): The name of the op. If not provided, the name will be\n _databricks_submit_run_op.\n databricks_resource_key (str): The name of the resource key used by this op. If not\n provided, the resource key will be "databricks".\n\n Returns:\n OpDefinition: An op definition to submit a one-time run of a set of tasks on Databricks.\n\n Example:\n .. code-block:: python\n\n from dagster import job\n from dagster_databricks import create_databricks_submit_run_op, DatabricksClientResource\n\n\n submit_run_op = create_databricks_submit_run_op(\n databricks_job_configuration={\n "new_cluster": {\n "spark_version": '2.1.0-db3-scala2.11',\n "num_workers": 2\n },\n "notebook_task": {\n "notebook_path": "/Users/dagster@example.com/PrepareData",\n },\n }\n )\n\n @job(\n resource_defs={\n "databricks": DatabricksClientResource(\n host=EnvVar("DATABRICKS_HOST"),\n token=EnvVar("DATABRICKS_TOKEN")\n )\n }\n )\n def do_stuff():\n submit_run_op()\n """\n check.invariant(\n bool(databricks_job_configuration),\n "Configuration for the one-time Databricks Job is required.",\n )\n\n _poll_interval_seconds = poll_interval_seconds\n _max_wait_time_seconds = max_wait_time_seconds\n\n class DatabricksSubmitRunOpConfig(Config):\n poll_interval_seconds: float = Field(\n default=_poll_interval_seconds,\n description="Check whether the Databricks Job is done at this interval, in seconds.",\n )\n max_wait_time_seconds: int = Field(\n default=_max_wait_time_seconds,\n description=(\n "If the Databricks Job is not complete after this length of time, in seconds,"\n " raise an error."\n ),\n )\n\n @op(\n ins={"start_after": In(Nothing)},\n required_resource_keys={databricks_resource_key},\n tags={"kind": "databricks"},\n name=name,\n )\n def _databricks_submit_run_op(\n context: OpExecutionContext, config: DatabricksSubmitRunOpConfig\n ) -> None:\n databricks: DatabricksClient = getattr(context.resources, databricks_resource_key)\n jobs_service = databricks.workspace_client.jobs\n\n run = jobs_service.submit(\n tasks=[jobs.SubmitTask.from_dict(databricks_job_configuration)],\n )\n run_id: int = run.bind()["run_id"]\n\n get_run_response = jobs_service.get_run(run_id=run_id)\n\n context.log.info(\n f"Launched databricks job run for '{get_run_response.run_name}' (`{run_id}`). URL:"\n f" {get_run_response.run_page_url}. Waiting to run to complete."\n )\n\n databricks.wait_for_run_to_complete(\n logger=context.log,\n databricks_run_id=run_id,\n poll_interval_sec=config.poll_interval_seconds,\n max_wait_time_sec=config.max_wait_time_seconds,\n )\n\n return _databricks_submit_run_op
\n
", "current_page_name": "_modules/dagster_databricks/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.ops"}, "pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.pipes

\nimport base64\nimport json\nimport os\nimport random\nimport string\nimport sys\nimport time\nfrom contextlib import ExitStack, contextmanager\nfrom typing import Iterator, Literal, Mapping, Optional, TextIO\n\nimport dagster._check as check\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.resource_annotation import ResourceParam\nfrom dagster._core.errors import DagsterPipesExecutionError\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dagster._core.pipes.client import (\n    PipesClient,\n    PipesClientCompletedInvocation,\n    PipesContextInjector,\n    PipesMessageReader,\n)\nfrom dagster._core.pipes.utils import (\n    PipesBlobStoreMessageReader,\n    PipesBlobStoreStdioReader,\n    PipesChunkedStdioReader,\n    open_pipes_session,\n)\nfrom dagster_pipes import (\n    DAGSTER_PIPES_MESSAGES_ENV_VAR,\n    PipesContextData,\n    PipesExtras,\n    PipesParams,\n)\nfrom databricks.sdk import WorkspaceClient\nfrom databricks.sdk.service import files, jobs\nfrom pydantic import Field\n\n# Number of seconds between status checks on Databricks jobs launched by the\n# `PipesDatabricksClient`.\n_RUN_POLL_INTERVAL = 5\n\n\n@experimental\nclass _PipesDatabricksClient(PipesClient):\n    """Pipes client for databricks.\n\n    Args:\n        client (WorkspaceClient): A databricks `WorkspaceClient` object.\n        env (Optional[Mapping[str,str]]: An optional dict of environment variables to pass to the\n            databricks job.\n        context_injector (Optional[PipesContextInjector]): A context injector to use to inject\n            context into the k8s container process. Defaults to :py:class:`PipesDbfsContextInjector`.\n        message_reader (Optional[PipesMessageReader]): A message reader to use to read messages\n            from the databricks job. Defaults to :py:class:`PipesDbfsMessageReader`.\n    """\n\n    env: Optional[Mapping[str, str]] = Field(\n        default=None,\n        description="An optional dict of environment variables to pass to the subprocess.",\n    )\n\n    def __init__(\n        self,\n        client: WorkspaceClient,\n        env: Optional[Mapping[str, str]] = None,\n        context_injector: Optional[PipesContextInjector] = None,\n        message_reader: Optional[PipesMessageReader] = None,\n    ):\n        self.client = client\n        self.env = env\n        self.context_injector = check.opt_inst_param(\n            context_injector,\n            "context_injector",\n            PipesContextInjector,\n        ) or PipesDbfsContextInjector(client=self.client)\n        self.message_reader = check.opt_inst_param(\n            message_reader,\n            "message_reader",\n            PipesMessageReader,\n        ) or PipesDbfsMessageReader(\n            client=self.client,\n            stdout_reader=PipesDbfsStdioReader(\n                client=self.client, remote_log_name="stdout", target_stream=sys.stdout\n            ),\n            stderr_reader=PipesDbfsStdioReader(\n                client=self.client, remote_log_name="stderr", target_stream=sys.stderr\n            ),\n        )\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        return True\n\n    def run(\n        self,\n        *,\n        context: OpExecutionContext,\n        extras: Optional[PipesExtras] = None,\n        task: jobs.SubmitTask,\n        submit_args: Optional[Mapping[str, str]] = None,\n    ) -> PipesClientCompletedInvocation:\n        """Synchronously execute a Databricks job with the pipes protocol.\n\n        Args:\n            task (databricks.sdk.service.jobs.SubmitTask): Specification of the databricks\n                task to run. Environment variables used by dagster-pipes will be set under the\n                `spark_env_vars` key of the `new_cluster` field (if there is an existing dictionary\n                here, the EXT environment variables will be merged in). Everything else will be\n                passed unaltered under the `tasks` arg to `WorkspaceClient.jobs.submit`.\n            context (OpExecutionContext): The context from the executing op or asset.\n            extras (Optional[PipesExtras]): An optional dict of extra parameters to pass to the\n                subprocess.\n            submit_args (Optional[Mapping[str, str]]): Additional keyword arguments that will be\n                forwarded as-is to `WorkspaceClient.jobs.submit`.\n\n        Returns:\n            PipesClientCompletedInvocation: Wrapper containing results reported by the external\n                process.\n        """\n        with open_pipes_session(\n            context=context,\n            extras=extras,\n            context_injector=self.context_injector,\n            message_reader=self.message_reader,\n        ) as pipes_session:\n            submit_task_dict = task.as_dict()\n            submit_task_dict["new_cluster"]["spark_env_vars"] = {\n                **submit_task_dict["new_cluster"].get("spark_env_vars", {}),\n                **(self.env or {}),\n                **pipes_session.get_bootstrap_env_vars(),\n            }\n            cluster_log_root = pipes_session.get_bootstrap_params()[\n                DAGSTER_PIPES_MESSAGES_ENV_VAR\n            ].get("cluster_log_root")\n            if cluster_log_root is not None:\n                submit_task_dict["new_cluster"]["cluster_log_conf"] = {\n                    "dbfs": {"destination": f"dbfs:{cluster_log_root}"}\n                }\n            task = jobs.SubmitTask.from_dict(submit_task_dict)\n            run_id = self.client.jobs.submit(\n                tasks=[task],\n                **(submit_args or {}),\n            ).bind()["run_id"]\n\n            while True:\n                run = self.client.jobs.get_run(run_id)\n                context.log.info(\n                    f"Databricks run {run_id} current state: {run.state.life_cycle_state}"\n                )\n                if run.state.life_cycle_state in (\n                    jobs.RunLifeCycleState.TERMINATED,\n                    jobs.RunLifeCycleState.SKIPPED,\n                ):\n                    if run.state.result_state == jobs.RunResultState.SUCCESS:\n                        break\n                    else:\n                        raise DagsterPipesExecutionError(\n                            f"Error running Databricks job: {run.state.state_message}"\n                        )\n                elif run.state.life_cycle_state == jobs.RunLifeCycleState.INTERNAL_ERROR:\n                    raise DagsterPipesExecutionError(\n                        f"Error running Databricks job: {run.state.state_message}"\n                    )\n                time.sleep(_RUN_POLL_INTERVAL)\n        return PipesClientCompletedInvocation(tuple(pipes_session.get_results()))\n\n\nPipesDatabricksClient = ResourceParam[_PipesDatabricksClient]\n\n_CONTEXT_FILENAME = "context.json"\n\n\n@contextmanager\ndef dbfs_tempdir(dbfs_client: files.DbfsAPI) -> Iterator[str]:\n    dirname = "".join(random.choices(string.ascii_letters, k=30))\n    tempdir = f"/tmp/{dirname}"\n    dbfs_client.mkdirs(tempdir)\n    try:\n        yield tempdir\n    finally:\n        dbfs_client.delete(tempdir, recursive=True)\n\n\n
[docs]@experimental\nclass PipesDbfsContextInjector(PipesContextInjector):\n """A context injector that injects context into a Databricks job by writing a JSON file to DBFS.\n\n Args:\n client (WorkspaceClient): A databricks `WorkspaceClient` object.\n """\n\n def __init__(self, *, client: WorkspaceClient):\n super().__init__()\n self.dbfs_client = files.DbfsAPI(client.api_client)\n\n @contextmanager\n def inject_context(self, context: "PipesContextData") -> Iterator[PipesParams]:\n """Inject context to external environment by writing it to an automatically-generated\n DBFS temporary file as JSON and exposing the path to the file.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n with dbfs_tempdir(self.dbfs_client) as tempdir:\n path = os.path.join(tempdir, _CONTEXT_FILENAME)\n contents = base64.b64encode(json.dumps(context).encode("utf-8")).decode("utf-8")\n self.dbfs_client.put(path, contents=contents, overwrite=True)\n yield {"path": path}\n\n def no_messages_debug_text(self) -> str:\n return (\n "Attempted to inject context via a temporary file in dbfs. Expected"\n " PipesDbfsContextLoader to be explicitly passed to open_dagster_pipes in the external"\n " process."\n )
\n\n\n
[docs]@experimental\nclass PipesDbfsMessageReader(PipesBlobStoreMessageReader):\n """Message reader that reads messages by periodically reading message chunks from an\n automatically-generated temporary directory on DBFS.\n\n If `stdout_reader` or `stderr_reader` are passed, this reader will also start them when\n `read_messages` is called. If they are not passed, then the reader performs no stdout/stderr\n forwarding.\n\n Args:\n interval (float): interval in seconds between attempts to download a chunk\n client (WorkspaceClient): A databricks `WorkspaceClient` object.\n cluster_log_root (Optional[str]): The root path on DBFS where the cluster logs are written.\n If set, this will be used to read stderr/stdout logs.\n stdout_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stdout logs.\n stderr_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stderr logs.\n """\n\n def __init__(\n self,\n *,\n interval: float = 10,\n client: WorkspaceClient,\n stdout_reader: Optional[PipesBlobStoreStdioReader] = None,\n stderr_reader: Optional[PipesBlobStoreStdioReader] = None,\n ):\n super().__init__(\n interval=interval, stdout_reader=stdout_reader, stderr_reader=stderr_reader\n )\n self.dbfs_client = files.DbfsAPI(client.api_client)\n\n @contextmanager\n def get_params(self) -> Iterator[PipesParams]:\n with ExitStack() as stack:\n params: PipesParams = {}\n params["path"] = stack.enter_context(dbfs_tempdir(self.dbfs_client))\n if self.stdout_reader or self.stderr_reader:\n params["cluster_log_root"] = stack.enter_context(dbfs_tempdir(self.dbfs_client))\n yield params\n\n def download_messages_chunk(self, index: int, params: PipesParams) -> Optional[str]:\n message_path = os.path.join(params["path"], f"{index}.json")\n try:\n raw_message = self.dbfs_client.read(message_path)\n # Files written to dbfs using the Python IO interface used in PipesDbfsMessageWriter are\n # base64-encoded.\n return base64.b64decode(raw_message.data).decode("utf-8")\n # An error here is an expected result, since an IOError will be thrown if the next message\n # chunk doesn't yet exist. Swallowing the error here is equivalent to doing a no-op on a\n # status check showing a non-existent file.\n except IOError:\n return None\n\n def no_messages_debug_text(self) -> str:\n return (\n "Attempted to read messages from a temporary file in dbfs. Expected"\n " PipesDbfsMessageWriter to be explicitly passed to open_dagster_pipes in the external"\n " process."\n )
\n\n\n@experimental\nclass PipesDbfsStdioReader(PipesChunkedStdioReader):\n """Reader that reads stdout/stderr logs from DBFS.\n\n Args:\n interval (float): interval in seconds between attempts to download a log chunk\n remote_log_name (Literal["stdout", "stderr"]): The name of the log file to read.\n target_stream (TextIO): The stream to which to forward log chunk that have been read.\n client (WorkspaceClient): A databricks `WorkspaceClient` object.\n """\n\n def __init__(\n self,\n *,\n interval: float = 10,\n remote_log_name: Literal["stdout", "stderr"],\n target_stream: TextIO,\n client: WorkspaceClient,\n ):\n super().__init__(interval=interval, target_stream=target_stream)\n self.dbfs_client = files.DbfsAPI(client.api_client)\n self.remote_log_name = remote_log_name\n self.log_position = 0\n self.log_path = None\n\n def download_log_chunk(self, params: PipesParams) -> Optional[str]:\n log_path = self._get_log_path(params)\n if log_path is None:\n return None\n else:\n try:\n read_response = self.dbfs_client.read(log_path)\n assert read_response.data\n content = base64.b64decode(read_response.data).decode("utf-8")\n chunk = content[self.log_position :]\n self.log_position = len(content)\n return chunk\n except IOError:\n return None\n\n def is_ready(self, params: PipesParams) -> bool:\n return self._get_log_path(params) is not None\n\n # The directory containing logs will not exist until either 5 minutes have elapsed or the\n # job has finished.\n def _get_log_path(self, params: PipesParams) -> Optional[str]:\n if self.log_path is None:\n log_root_path = os.path.join(params["cluster_log_root"])\n child_dirs = list(self.dbfs_client.list(log_root_path))\n if len(child_dirs) > 0:\n self.log_path = f"dbfs:{child_dirs[0].path}/driver/{self.remote_log_name}"\n return self.log_path\n
", "current_page_name": "_modules/dagster_databricks/pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.pipes"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.resources

\nfrom typing import Any, Optional\n\nfrom dagster import (\n    Config,\n    ConfigurableResource,\n    IAttachDifferentObjectToOpContext,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field, root_validator\n\nfrom .databricks import DatabricksClient\n\n\nclass OauthCredentials(Config):\n    """OAuth credentials for Databricks.\n\n    See https://docs.databricks.com/dev-tools/api/latest/authentication.html#oauth-2-0.\n    """\n\n    client_id: str = Field(description="OAuth client ID")\n    client_secret: str = Field(description="OAuth client secret")\n\n\n
[docs]class DatabricksClientResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource which provides a Python client for interacting with Databricks within an\n op or asset.\n """\n\n host: str = Field(description="Databricks host, e.g. https://uksouth.azuredatabricks.com")\n token: Optional[str] = Field(default=None, description="Databricks access token")\n oauth_credentials: Optional[OauthCredentials] = Field(\n default=None,\n description=(\n "Databricks OAuth credentials for using a service principal. See"\n " https://docs.databricks.com/en/dev-tools/auth.html#oauth-2-0"\n ),\n )\n workspace_id: Optional[str] = Field(\n default=None,\n description=(\n "DEPRECATED: The Databricks workspace ID, as described in"\n " https://docs.databricks.com/workspace/workspace-details.html#workspace-instance-names-urls-and-ids."\n " This is no longer used and will be removed in a 0.21."\n ),\n )\n\n @root_validator()\n def has_token_or_oauth_credentials(cls, values):\n token = values.get("token")\n oauth_credentials = values.get("oauth_credentials")\n if not token and not oauth_credentials:\n raise ValueError("Must provide either token or oauth_credentials")\n if token and oauth_credentials:\n raise ValueError("Must provide either token or oauth_credentials, not both")\n return values\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> DatabricksClient:\n if self.oauth_credentials:\n client_id = self.oauth_credentials.client_id\n client_secret = self.oauth_credentials.client_secret\n else:\n client_id = None\n client_secret = None\n\n return DatabricksClient(\n host=self.host,\n token=self.token,\n oauth_client_id=client_id,\n oauth_client_secret=client_secret,\n workspace_id=self.workspace_id,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=DatabricksClientResource.to_config_schema())\ndef databricks_client(init_context) -> DatabricksClient:\n return DatabricksClientResource.from_resource_context(init_context).get_client()
\n
", "current_page_name": "_modules/dagster_databricks/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.resources"}}, "dagster_datadog": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_datadog.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom datadog import DogStatsd, initialize, statsd\nfrom pydantic import Field\n\n\nclass DatadogClient:\n    # Mirroring levels from the dogstatsd library\n    OK, WARNING, CRITICAL, UNKNOWN = (\n        DogStatsd.OK,\n        DogStatsd.WARNING,\n        DogStatsd.CRITICAL,\n        DogStatsd.UNKNOWN,\n    )\n\n    def __init__(self, api_key: str, app_key: str):\n        self.api_key = api_key\n        self.app_key = app_key\n        initialize(api_key=api_key, app_key=app_key)\n\n        # Pull in methods from the dogstatsd library\n        for method in [\n            "event",\n            "gauge",\n            "increment",\n            "decrement",\n            "histogram",\n            "distribution",\n            "set",\n            "service_check",\n            "timed",\n            "timing",\n        ]:\n            setattr(self, method, getattr(statsd, method))\n\n\n
[docs]class DatadogResource(ConfigurableResource):\n """This resource is a thin wrapper over the\n `dogstatsd library <https://datadogpy.readthedocs.io/en/latest/>`_.\n\n As such, we directly mirror the public API methods of DogStatsd here; you can refer to the\n `DataDog documentation <https://docs.datadoghq.com/developers/dogstatsd/>`_ for how to use this\n resource.\n\n Examples:\n .. code-block:: python\n\n @op\n def datadog_op(datadog_client: ResourceParam[DatadogClient]):\n datadog_client.event('Man down!', 'This server needs assistance.')\n datadog_client.gauge('users.online', 1001, tags=["protocol:http"])\n datadog_client.increment('page.views')\n datadog_client.decrement('page.views')\n datadog_client.histogram('album.photo.count', 26, tags=["gender:female"])\n datadog_client.distribution('album.photo.count', 26, tags=["color:blue"])\n datadog_client.set('visitors.uniques', 999, tags=["browser:ie"])\n datadog_client.service_check('svc.check_name', datadog_client.WARNING)\n datadog_client.timing("query.response.time", 1234)\n\n # Use timed decorator\n @datadog_client.timed('run_fn')\n def run_fn():\n pass\n\n run_fn()\n\n @job\n def job_for_datadog_op() -> None:\n datadog_op()\n\n job_for_datadog_op.execute_in_process(\n resources={"datadog_client": DatadogResource(api_key="FOO", app_key="BAR")}\n )\n\n """\n\n api_key: str = Field(\n description=(\n "Datadog API key. See https://docs.datadoghq.com/account_management/api-app-keys/"\n )\n )\n app_key: str = Field(\n description=(\n "Datadog application key. See"\n " https://docs.datadoghq.com/account_management/api-app-keys/."\n )\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> DatadogClient:\n return DatadogClient(self.api_key, self.app_key)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=DatadogResource.to_config_schema(),\n description="This resource is for publishing to DataDog",\n)\ndef datadog_resource(context) -> DatadogClient:\n """This legacy resource is a thin wrapper over the\n `dogstatsd library <https://datadogpy.readthedocs.io/en/latest/>`_.\n\n Prefer using :py:class:`DatadogResource`.\n\n As such, we directly mirror the public API methods of DogStatsd here; you can refer to the\n `DataDog documentation <https://docs.datadoghq.com/developers/dogstatsd/>`_ for how to use this\n resource.\n\n Examples:\n .. code-block:: python\n\n @op(required_resource_keys={'datadog'})\n def datadog_op(context):\n dd = context.resources.datadog\n\n dd.event('Man down!', 'This server needs assistance.')\n dd.gauge('users.online', 1001, tags=["protocol:http"])\n dd.increment('page.views')\n dd.decrement('page.views')\n dd.histogram('album.photo.count', 26, tags=["gender:female"])\n dd.distribution('album.photo.count', 26, tags=["color:blue"])\n dd.set('visitors.uniques', 999, tags=["browser:ie"])\n dd.service_check('svc.check_name', dd.WARNING)\n dd.timing("query.response.time", 1234)\n\n # Use timed decorator\n @dd.timed('run_fn')\n def run_fn():\n pass\n\n run_fn()\n\n @job(resource_defs={'datadog': datadog_resource})\n def dd_job():\n datadog_op()\n\n result = dd_job.execute_in_process(\n run_config={'resources': {'datadog': {'config': {'api_key': 'YOUR_KEY', 'app_key': 'YOUR_KEY'}}}}\n )\n\n """\n return DatadogResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_datadog/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_datadog.resources"}}, "dagster_datahub": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_datahub.resources

\nfrom typing import Any, Dict, List, Optional\n\nfrom dagster import InitResourceContext, resource\nfrom dagster._config.pythonic_config import Config, ConfigurableResource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom datahub.emitter.kafka_emitter import (\n    DEFAULT_MCE_KAFKA_TOPIC,\n    DEFAULT_MCP_KAFKA_TOPIC,\n    MCE_KEY,\n    MCP_KEY,\n    DatahubKafkaEmitter,\n    KafkaEmitterConfig,\n)\nfrom datahub.emitter.rest_emitter import DatahubRestEmitter\nfrom pydantic import Field\n\n\n
[docs]class DatahubRESTEmitterResource(ConfigurableResource):\n connection: str = Field(description="Datahub GMS Server")\n token: Optional[str] = Field(default=None, description="Personal Access Token")\n connect_timeout_sec: Optional[float] = None\n read_timeout_sec: Optional[float] = None\n retry_status_codes: Optional[List[int]] = None\n retry_methods: Optional[List[str]] = None\n retry_max_times: Optional[int] = None\n extra_headers: Optional[Dict[str, str]] = None\n ca_certificate_path: Optional[str] = None\n server_telemetry_id: Optional[str] = None # No-op - no longer accepted in DatahubRestEmitter\n disable_ssl_verification: bool = False\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_emitter(self) -> DatahubRestEmitter:\n return DatahubRestEmitter(\n gms_server=self.connection,\n token=self.token,\n connect_timeout_sec=self.connect_timeout_sec,\n read_timeout_sec=self.read_timeout_sec,\n retry_status_codes=self.retry_status_codes,\n retry_methods=self.retry_methods,\n retry_max_times=self.retry_max_times,\n extra_headers=self.extra_headers,\n ca_certificate_path=self.ca_certificate_path,\n disable_ssl_verification=self.disable_ssl_verification,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=DatahubRESTEmitterResource.to_config_schema())\ndef datahub_rest_emitter(init_context: InitResourceContext) -> DatahubRestEmitter:\n emitter = DatahubRestEmitter(\n gms_server=init_context.resource_config.get("connection"),\n token=init_context.resource_config.get("token"),\n connect_timeout_sec=init_context.resource_config.get("connect_timeout_sec"),\n read_timeout_sec=init_context.resource_config.get("read_timeout_sec"),\n retry_status_codes=init_context.resource_config.get("retry_status_codes"),\n retry_methods=init_context.resource_config.get("retry_methods"),\n retry_max_times=init_context.resource_config.get("retry_max_times"),\n extra_headers=init_context.resource_config.get("extra_headers"),\n ca_certificate_path=init_context.resource_config.get("ca_certificate_path"),\n disable_ssl_verification=init_context.resource_config.get("disable_ssl_verification"),\n )\n # Attempt to hit the server to ensure the resource is properly configured\n emitter.test_connection()\n return emitter
\n\n\nclass DatahubConnection(Config):\n bootstrap: str = Field(description="Kafka Boostrap Servers. Comma delimited")\n schema_registry_url: str = Field(description="Schema Registry Location.")\n schema_registry_config: Dict[str, Any] = Field(\n default={}, description="Extra Schema Registry Config."\n )\n\n\n
[docs]class DatahubKafkaEmitterResource(ConfigurableResource):\n connection: DatahubConnection\n topic: Optional[str] = None\n topic_routes: Dict[str, str] = Field(\n default={\n MCE_KEY: DEFAULT_MCE_KAFKA_TOPIC,\n MCP_KEY: DEFAULT_MCP_KAFKA_TOPIC,\n }\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_emitter(self) -> DatahubKafkaEmitter:\n return DatahubKafkaEmitter(\n KafkaEmitterConfig.parse_obj(\n {k: v for k, v in self._convert_to_config_dictionary().items() if v is not None}\n )\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=DatahubKafkaEmitterResource.to_config_schema())\ndef datahub_kafka_emitter(init_context: InitResourceContext) -> DatahubKafkaEmitter:\n return DatahubKafkaEmitter(KafkaEmitterConfig.parse_obj(init_context.resource_config))
\n
", "current_page_name": "_modules/dagster_datahub/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_datahub.resources"}}, "dagster_dbt": {"asset_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.asset_decorator

\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    FrozenSet,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n)\n\nimport dagster._check as check\nfrom dagster import (\n    AssetCheckSpec,\n    AssetKey,\n    AssetOut,\n    AssetsDefinition,\n    BackfillPolicy,\n    DagsterInvalidDefinitionError,\n    Nothing,\n    PartitionsDefinition,\n    multi_asset,\n)\n\nfrom .asset_utils import (\n    DAGSTER_DBT_TRANSLATOR_METADATA_KEY,\n    MANIFEST_METADATA_KEY,\n    default_asset_check_fn,\n    default_code_version_fn,\n    get_deps,\n)\nfrom .dagster_dbt_translator import DagsterDbtTranslator, DbtManifestWrapper\nfrom .dbt_manifest import DbtManifestParam, validate_manifest\nfrom .utils import (\n    ASSET_RESOURCE_TYPES,\n    get_dbt_resource_props_by_dbt_unique_id_from_manifest,\n    output_name_fn,\n    select_unique_ids_from_manifest,\n)\n\n\n
[docs]def dbt_assets(\n *,\n manifest: DbtManifestParam,\n select: str = "fqn:*",\n exclude: Optional[str] = None,\n io_manager_key: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n dagster_dbt_translator: DagsterDbtTranslator = DagsterDbtTranslator(),\n backfill_policy: Optional[BackfillPolicy] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n) -> Callable[..., AssetsDefinition]:\n """Create a definition for how to compute a set of dbt resources, described by a manifest.json.\n When invoking dbt commands using :py:class:`~dagster_dbt.DbtCliResource`'s\n :py:meth:`~dagster_dbt.DbtCliResource.cli` method, Dagster events are emitted by calling\n ``yield from`` on the event stream returned by :py:meth:`~dagster_dbt.DbtCliInvocation.stream`.\n\n Args:\n manifest (Union[Mapping[str, Any], str, Path]): The contents of a manifest.json file\n or the path to a manifest.json file. A manifest.json contains a representation of a\n dbt project (models, tests, macros, etc). We use this representation to create\n corresponding Dagster assets.\n select (str): A dbt selection string for the models in a project that you want\n to include. Defaults to ``fqn:*``.\n exclude (Optional[str]): A dbt selection string for the models in a project that you want\n to exclude. Defaults to "".\n io_manager_key (Optional[str]): The IO manager key that will be set on each of the returned\n assets. When other ops are downstream of the loaded assets, the IOManager specified\n here determines how the inputs to those ops are loaded. Defaults to "io_manager".\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the dbt assets.\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): Allows customizing how to map\n dbt models, seeds, etc. to asset keys and asset metadata.\n backfill_policy (Optional[BackfillPolicy]): If a partitions_def is defined, this determines\n how to execute backfills that target multiple partitions.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the assets.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n\n Examples:\n Running ``dbt build`` for a dbt project:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["build"], context=context).stream()\n\n Running dbt commands with flags:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["build", "--full-refresh"], context=context).stream()\n\n Running dbt commands with ``--vars``:\n\n .. code-block:: python\n\n import json\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_vars = {"key": "value"}\n\n yield from dbt.cli(["build", "--vars", json.dumps(dbt_vars)], context=context).stream()\n\n Retrieving dbt artifacts after running a dbt command:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_build_invocation = dbt.cli(["build"], context=context)\n\n yield from dbt_build_invocation.stream()\n\n run_results_json = dbt_build_invocation.get_artifact("run_results.json")\n\n Running multiple dbt commands for a dbt project:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["run"], context=context).stream()\n yield from dbt.cli(["test"], context=context).stream()\n\n Customizing the Dagster asset metadata inferred from a dbt project using :py:class:`~dagster_dbt.DagsterDbtTranslator`:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DagsterDbtTranslator, DbtCliResource, dbt_assets\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n ...\n\n\n @dbt_assets(\n manifest=Path("target", "manifest.json"),\n dagster_dbt_translator=CustomDagsterDbtTranslator(),\n )\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["build"], context=context).stream()\n\n Invoking another Dagster :py:class:`~dagster.ResourceDefinition` alongside dbt:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DagsterDbtTranslator, DbtCliResource, dbt_assets\n from dagster_slack import SlackResource\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource, slack: SlackResource):\n yield from dbt.cli(["build"], context=context).stream()\n\n slack_client = slack.get_client()\n slack_client.chat_postMessage(channel="#my-channel", text="dbt build succeeded!")\n\n Defining and accessing Dagster :py:class:`~dagster.Config` alongside dbt:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext, Config\n from dagster_dbt import DagsterDbtTranslator, DbtCliResource, dbt_assets\n\n\n class MyDbtConfig(Config):\n full_refresh: bool\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource, config: MyDbtConfig):\n dbt_build_args = ["build"]\n if config.full_refresh:\n dbt_build_args += ["--full-refresh"]\n\n yield from dbt.cli(dbt_build_args, context=context).stream()\n\n Defining Dagster :py:class:`~dagster.PartitionDefinition` alongside dbt:\n\n\n .. code-block:: python\n\n import json\n from pathlib import Path\n\n from dagster import AssetExecutionContext, DailyPartitionDefinition\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(\n manifest=Path("target", "manifest.json"),\n partitions_def=DailyPartitionsDefinition(start_date="2023-01-01")\n )\n def partitionshop_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n time_window = context.asset_partitions_time_window_for_output(\n list(context.selected_output_names)[0]\n )\n\n dbt_vars = {\n "min_date": time_window.start.isoformat(),\n "max_date": time_window.end.isoformat()\n }\n dbt_build_args = ["build", "--vars", json.dumps(dbt_vars)]\n\n yield from dbt.cli(dbt_build_args, context=context).stream()\n\n """\n check.inst_param(\n dagster_dbt_translator,\n "dagster_dbt_translator",\n DagsterDbtTranslator,\n additional_message=(\n "Ensure that the argument is an instantiated class that subclasses"\n " DagsterDbtTranslator."\n ),\n )\n manifest = validate_manifest(manifest)\n\n unique_ids = select_unique_ids_from_manifest(\n select=select, exclude=exclude or "", manifest_json=manifest\n )\n node_info_by_dbt_unique_id = get_dbt_resource_props_by_dbt_unique_id_from_manifest(manifest)\n deps = get_deps(\n dbt_nodes=node_info_by_dbt_unique_id,\n selected_unique_ids=unique_ids,\n asset_resource_types=ASSET_RESOURCE_TYPES,\n )\n (\n non_argument_deps,\n outs,\n internal_asset_deps,\n check_specs,\n ) = get_dbt_multi_asset_args(\n dbt_nodes=node_info_by_dbt_unique_id,\n deps=deps,\n io_manager_key=io_manager_key,\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n )\n\n if op_tags and "dagster-dbt/select" in op_tags:\n raise DagsterInvalidDefinitionError(\n "To specify a dbt selection, use the 'select' argument, not 'dagster-dbt/select'"\n " with op_tags"\n )\n\n if op_tags and "dagster-dbt/exclude" in op_tags:\n raise DagsterInvalidDefinitionError(\n "To specify a dbt exclusion, use the 'exclude' argument, not 'dagster-dbt/exclude'"\n " with op_tags"\n )\n\n resolved_op_tags = {\n **({"dagster-dbt/select": select} if select else {}),\n **({"dagster-dbt/exclude": exclude} if exclude else {}),\n **(op_tags if op_tags else {}),\n }\n\n def inner(fn) -> AssetsDefinition:\n asset_definition = multi_asset(\n outs=outs,\n internal_asset_deps=internal_asset_deps,\n deps=non_argument_deps,\n compute_kind="dbt",\n partitions_def=partitions_def,\n can_subset=True,\n op_tags=resolved_op_tags,\n check_specs=check_specs,\n backfill_policy=backfill_policy,\n )(fn)\n\n return asset_definition\n\n return inner
\n\n\ndef get_dbt_multi_asset_args(\n dbt_nodes: Mapping[str, Any],\n deps: Mapping[str, FrozenSet[str]],\n io_manager_key: Optional[str],\n manifest: Mapping[str, Any],\n dagster_dbt_translator: DagsterDbtTranslator,\n) -> Tuple[\n Sequence[AssetKey],\n Dict[str, AssetOut],\n Dict[str, Set[AssetKey]],\n Sequence[AssetCheckSpec],\n]:\n non_argument_deps: Set[AssetKey] = set()\n outs: Dict[str, AssetOut] = {}\n internal_asset_deps: Dict[str, Set[AssetKey]] = {}\n check_specs: Sequence[AssetCheckSpec] = []\n\n for unique_id, parent_unique_ids in deps.items():\n dbt_resource_props = dbt_nodes[unique_id]\n\n output_name = output_name_fn(dbt_resource_props)\n asset_key = dagster_dbt_translator.get_asset_key(dbt_resource_props)\n\n outs[output_name] = AssetOut(\n key=asset_key,\n dagster_type=Nothing,\n io_manager_key=io_manager_key,\n description=dagster_dbt_translator.get_description(dbt_resource_props),\n is_required=False,\n metadata={ # type: ignore\n **dagster_dbt_translator.get_metadata(dbt_resource_props),\n MANIFEST_METADATA_KEY: DbtManifestWrapper(manifest=manifest),\n DAGSTER_DBT_TRANSLATOR_METADATA_KEY: dagster_dbt_translator,\n },\n group_name=dagster_dbt_translator.get_group_name(dbt_resource_props),\n code_version=default_code_version_fn(dbt_resource_props),\n freshness_policy=dagster_dbt_translator.get_freshness_policy(dbt_resource_props),\n auto_materialize_policy=dagster_dbt_translator.get_auto_materialize_policy(\n dbt_resource_props\n ),\n )\n\n test_unique_ids = [\n child_unique_id\n for child_unique_id in manifest["child_map"][unique_id]\n if child_unique_id.startswith("test")\n ]\n for test_unique_id in test_unique_ids:\n test_resource_props = manifest["nodes"][test_unique_id]\n check_spec = default_asset_check_fn(\n asset_key, unique_id, dagster_dbt_translator.settings, test_resource_props\n )\n\n if check_spec:\n check_specs.append(check_spec)\n\n # Translate parent unique ids to internal asset deps and non argument dep\n output_internal_deps = internal_asset_deps.setdefault(output_name, set())\n for parent_unique_id in parent_unique_ids:\n parent_resource_props = dbt_nodes[parent_unique_id]\n parent_asset_key = dagster_dbt_translator.get_asset_key(parent_resource_props)\n\n # Add this parent as an internal dependency\n output_internal_deps.add(parent_asset_key)\n\n # Mark this parent as an input if it has no dependencies\n if parent_unique_id not in deps:\n non_argument_deps.add(parent_asset_key)\n\n return list(non_argument_deps), outs, internal_asset_deps, check_specs\n
", "current_page_name": "_modules/dagster_dbt/asset_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.asset_decorator"}, "asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.asset_defs

\nimport hashlib\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dateutil\nfrom dagster import (\n    AssetCheckResult,\n    AssetKey,\n    AssetsDefinition,\n    AutoMaterializePolicy,\n    FreshnessPolicy,\n    In,\n    OpExecutionContext,\n    Out,\n    PartitionsDefinition,\n    PermissiveConfig,\n    _check as check,\n    get_dagster_logger,\n    op,\n)\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions.events import (\n    AssetMaterialization,\n    AssetObservation,\n    CoercibleToAssetKeyPrefix,\n    Output,\n)\nfrom dagster._core.definitions.metadata import MetadataUserInput, RawMetadataValue\nfrom dagster._core.errors import DagsterInvalidSubsetError\nfrom dagster._utils.merger import deep_merge_dicts\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n    normalize_renamed_param,\n)\n\nfrom dagster_dbt.asset_utils import (\n    default_asset_key_fn,\n    default_auto_materialize_policy_fn,\n    default_description_fn,\n    default_freshness_policy_fn,\n    default_group_from_dbt_resource_props,\n    default_metadata_from_dbt_resource_props,\n    get_asset_deps,\n    get_deps,\n)\nfrom dagster_dbt.core.resources import DbtCliClient\nfrom dagster_dbt.core.resources_v2 import DbtCliResource\nfrom dagster_dbt.core.types import DbtCliOutput\nfrom dagster_dbt.core.utils import build_command_args_from_flags, execute_cli\nfrom dagster_dbt.dagster_dbt_translator import DagsterDbtTranslator\nfrom dagster_dbt.errors import DagsterDbtError\nfrom dagster_dbt.types import DbtOutput\nfrom dagster_dbt.utils import (\n    ASSET_RESOURCE_TYPES,\n    output_name_fn,\n    result_to_events,\n    select_unique_ids_from_manifest,\n)\n\n\ndef _load_manifest_for_project(\n    project_dir: str,\n    profiles_dir: str,\n    target_dir: str,\n    select: str,\n    exclude: str,\n) -> Tuple[Mapping[str, Any], DbtCliOutput]:\n    # running "dbt ls" regenerates the manifest.json, which includes a superset of the actual\n    # "dbt ls" output\n    cli_output = execute_cli(\n        executable="dbt",\n        command="ls",\n        log=get_dagster_logger(),\n        flags_dict={\n            "project-dir": project_dir,\n            "profiles-dir": profiles_dir,\n            "select": select,\n            "exclude": exclude,\n            "output": "json",\n        },\n        warn_error=False,\n        ignore_handled_error=False,\n        target_path=target_dir,\n        json_log_format=True,\n        capture_logs=True,\n    )\n    manifest_path = os.path.join(target_dir, "manifest.json")\n    with open(manifest_path, "r", encoding="utf8") as f:\n        return json.load(f), cli_output\n\n\ndef _can_stream_events(dbt_resource: Union[DbtCliClient, DbtCliResource]) -> bool:\n    """Check if the installed dbt version supports streaming events."""\n    import dbt.version\n    from packaging import version\n\n    if version.parse(dbt.version.__version__) >= version.parse("1.4.0"):\n        # The json log format is required for streaming events. DbtCliResource always uses this format, but\n        # DbtCliClient has an option to disable it.\n        if isinstance(dbt_resource, DbtCliResource):\n            return True\n        else:\n            return dbt_resource._json_log_format  # noqa: SLF001\n    else:\n        return False\n\n\ndef _batch_event_iterator(\n    context: OpExecutionContext,\n    dbt_resource: DbtCliClient,\n    use_build_command: bool,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    kwargs: Dict[str, Any],\n) -> Iterator[Union[AssetObservation, AssetMaterialization, Output]]:\n    """Yields events for a dbt cli invocation. Waits until the entire command has completed before\n    emitting outputs.\n    """\n    # clean up any run results from the last run\n    dbt_resource.remove_run_results_json()\n\n    dbt_output: Optional[DbtOutput] = None\n    try:\n        if use_build_command:\n            dbt_output = dbt_resource.build(**kwargs)\n        else:\n            dbt_output = dbt_resource.run(**kwargs)\n    finally:\n        # in the case that the project only partially runs successfully, still attempt to generate\n        # events for the parts that were successful\n        if dbt_output is None:\n            dbt_output = DbtOutput(result=check.not_none(dbt_resource.get_run_results_json()))\n\n        manifest_json = check.not_none(dbt_resource.get_manifest_json())\n\n        dbt_output = check.not_none(dbt_output)\n        for result in dbt_output.result["results"]:\n            extra_metadata: Optional[Mapping[str, RawMetadataValue]] = None\n            if runtime_metadata_fn:\n                node_info = manifest_json["nodes"][result["unique_id"]]\n                extra_metadata = runtime_metadata_fn(context, node_info)\n            yield from result_to_events(\n                result=result,\n                docs_url=dbt_output.docs_url,\n                node_info_to_asset_key=node_info_to_asset_key,\n                manifest_json=manifest_json,\n                extra_metadata=extra_metadata,\n                generate_asset_outputs=True,\n            )\n\n\ndef _events_for_structured_json_line(\n    json_line: Mapping[str, Any],\n    context: OpExecutionContext,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    manifest_json: Mapping[str, Any],\n) -> Iterator[Union[AssetObservation, Output]]:\n    """Parses a json line into a Dagster event. Attempts to replicate the behavior of result_to_events\n    as closely as possible.\n    """\n    runtime_node_info = json_line.get("data", {}).get("node_info", {})\n    if not runtime_node_info:\n        return\n\n    node_resource_type = runtime_node_info.get("resource_type")\n    node_status = runtime_node_info.get("node_status")\n    unique_id = runtime_node_info.get("unique_id")\n\n    if not node_resource_type or not unique_id:\n        return\n\n    compiled_node_info = manifest_json["nodes"][unique_id]\n\n    if node_resource_type in ASSET_RESOURCE_TYPES and node_status == "success":\n        metadata = dict(\n            runtime_metadata_fn(context, compiled_node_info) if runtime_metadata_fn else {}\n        )\n        started_at_str = runtime_node_info.get("node_started_at")\n        finished_at_str = runtime_node_info.get("node_finished_at")\n        if started_at_str is None or finished_at_str is None:\n            return\n\n        started_at = dateutil.parser.isoparse(started_at_str)  # type: ignore\n        completed_at = dateutil.parser.isoparse(finished_at_str)  # type: ignore\n        duration = completed_at - started_at\n        metadata.update(\n            {\n                "Execution Started At": started_at.isoformat(timespec="seconds"),\n                "Execution Completed At": completed_at.isoformat(timespec="seconds"),\n                "Execution Duration": duration.total_seconds(),\n            }\n        )\n        yield Output(\n            value=None,\n            output_name=output_name_fn(compiled_node_info),\n            metadata=metadata,\n        )\n    elif node_resource_type == "test" and runtime_node_info.get("node_finished_at"):\n        upstream_unique_ids = (\n            manifest_json["nodes"][unique_id].get("depends_on", {}).get("nodes", [])\n        )\n        # tests can apply to multiple asset keys\n        for upstream_id in upstream_unique_ids:\n            # the upstream id can reference a node or a source\n            upstream_node_info = manifest_json["nodes"].get(upstream_id) or manifest_json[\n                "sources"\n            ].get(upstream_id)\n            if upstream_node_info is None:\n                continue\n            upstream_asset_key = node_info_to_asset_key(upstream_node_info)\n            yield AssetObservation(\n                asset_key=upstream_asset_key,\n                metadata={\n                    "Test ID": unique_id,\n                    "Test Status": node_status,\n                },\n            )\n\n\ndef _stream_event_iterator(\n    context: OpExecutionContext,\n    dbt_resource: Union[DbtCliResource, DbtCliClient],\n    use_build_command: bool,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    kwargs: Dict[str, Any],\n    manifest_json: Mapping[str, Any],\n) -> Iterator[Union[AssetObservation, Output, AssetCheckResult]]:\n    """Yields events for a dbt cli invocation. Emits outputs as soon as the relevant dbt logs are\n    emitted.\n    """\n    if isinstance(dbt_resource, DbtCliClient):\n        for parsed_json_line in dbt_resource.cli_stream_json(\n            command="build" if use_build_command else "run",\n            **kwargs,\n        ):\n            yield from _events_for_structured_json_line(\n                parsed_json_line,\n                context,\n                node_info_to_asset_key,\n                runtime_metadata_fn,\n                manifest_json,\n            )\n    else:\n        if runtime_metadata_fn is not None:\n            raise DagsterDbtError(\n                "The runtime_metadata_fn argument on the load_assets_from_dbt_manifest and"\n                " load_assets_from_dbt_project functions is not supported when using the"\n                " DbtCliResource resource. Use the @dbt_assets decorator instead if you want"\n                " control over what metadata is yielded at runtime."\n            )\n\n        class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n            @classmethod\n            def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n                return node_info_to_asset_key(dbt_resource_props)\n\n        cli_output = dbt_resource.cli(\n            args=["build" if use_build_command else "run", *build_command_args_from_flags(kwargs)],\n            manifest=manifest_json,\n            dagster_dbt_translator=CustomDagsterDbtTranslator(),\n        )\n        yield from cli_output.stream()\n\n\nclass DbtOpConfig(PermissiveConfig):\n    """Keyword arguments to pass to the underlying dbt command. Additional arguments not listed in the schema will\n    be passed through as well, e.g. {'bool_flag': True, 'string_flag': 'hi'} will result in the flags\n    '--bool_flag --string_flag hi' being passed to the dbt command.\n    """\n\n    select: Optional[str] = None\n    exclude: Optional[str] = None\n    vars: Optional[Dict[str, Any]] = None\n    full_refresh: Optional[bool] = None\n\n\ndef _get_dbt_op(\n    op_name: str,\n    ins: Mapping[str, In],\n    outs: Mapping[str, Out],\n    select: str,\n    exclude: str,\n    use_build_command: bool,\n    fqns_by_output_name: Mapping[str, List[str]],\n    dbt_resource_key: str,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    manifest_json: Mapping[str, Any],\n):\n    @op(\n        name=op_name,\n        tags={"kind": "dbt"},\n        ins=ins,\n        out=outs,\n        required_resource_keys={dbt_resource_key},\n    )\n    def _dbt_op(context, config: DbtOpConfig):\n        dbt_resource: Union[DbtCliResource, DbtCliClient] = getattr(\n            context.resources, dbt_resource_key\n        )\n        check.inst(\n            dbt_resource,\n            (DbtCliResource, DbtCliClient),\n            "Resource with key 'dbt_resource_key' must be a DbtCliResource or DbtCliClient"\n            f" object, but is a {type(dbt_resource)}",\n        )\n\n        kwargs: Dict[str, Any] = {}\n        # in the case that we're running everything, opt for the cleaner selection string\n        if len(context.selected_output_names) == len(outs):\n            kwargs["select"] = select\n            kwargs["exclude"] = exclude\n        else:\n            # for each output that we want to emit, translate to a dbt select string by converting\n            # the out to its corresponding fqn\n            kwargs["select"] = [\n                ".".join(fqns_by_output_name[output_name])\n                for output_name in context.selected_output_names\n            ]\n        # variables to pass into the command\n        if partition_key_to_vars_fn:\n            kwargs["vars"] = partition_key_to_vars_fn(context.partition_key)\n        # merge in any additional kwargs from the config\n        kwargs = deep_merge_dicts(kwargs, context.op_config)\n\n        if _can_stream_events(dbt_resource):\n            yield from _stream_event_iterator(\n                context,\n                dbt_resource,\n                use_build_command,\n                node_info_to_asset_key,\n                runtime_metadata_fn,\n                kwargs,\n                manifest_json=manifest_json,\n            )\n        else:\n            if not isinstance(dbt_resource, DbtCliClient):\n                check.failed(\n                    "Chose batch event iterator, but it only works with DbtCliClient, and"\n                    f" resource has type {type(dbt_resource)}"\n                )\n            yield from _batch_event_iterator(\n                context,\n                dbt_resource,\n                use_build_command,\n                node_info_to_asset_key,\n                runtime_metadata_fn,\n                kwargs,\n            )\n\n    return _dbt_op\n\n\ndef _dbt_nodes_to_assets(\n    dbt_nodes: Mapping[str, Any],\n    select: str,\n    exclude: str,\n    selected_unique_ids: AbstractSet[str],\n    project_id: str,\n    dbt_resource_key: str,\n    manifest_json: Mapping[str, Any],\n    op_name: Optional[str],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    io_manager_key: Optional[str],\n    use_build_command: bool,\n    partitions_def: Optional[PartitionsDefinition],\n    partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n    dagster_dbt_translator: DagsterDbtTranslator,\n) -> AssetsDefinition:\n    if use_build_command:\n        deps = get_deps(\n            dbt_nodes,\n            selected_unique_ids,\n            asset_resource_types=["model", "seed", "snapshot"],\n        )\n    else:\n        deps = get_deps(dbt_nodes, selected_unique_ids, asset_resource_types=["model"])\n\n    (\n        asset_deps,\n        asset_ins,\n        asset_outs,\n        group_names_by_key,\n        freshness_policies_by_key,\n        auto_materialize_policies_by_key,\n        check_specs_by_output_name,\n        fqns_by_output_name,\n        _,\n    ) = get_asset_deps(\n        dbt_nodes=dbt_nodes,\n        deps=deps,\n        io_manager_key=io_manager_key,\n        manifest=manifest_json,\n        dagster_dbt_translator=dagster_dbt_translator,\n    )\n\n    # prevent op name collisions between multiple dbt multi-assets\n    if not op_name:\n        op_name = f"run_dbt_{project_id}"\n        if select != "fqn:*" or exclude:\n            op_name += "_" + hashlib.md5(select.encode() + exclude.encode()).hexdigest()[-5:]\n\n    check_outs_by_output_name: Mapping[str, Out] = {}\n    if check_specs_by_output_name:\n        check_outs_by_output_name = {\n            output_name: Out(dagster_type=None, is_required=False)\n            for output_name in check_specs_by_output_name.keys()\n        }\n\n    dbt_op = _get_dbt_op(\n        op_name=op_name,\n        ins=dict(asset_ins.values()),\n        outs={\n            **dict(asset_outs.values()),\n            **check_outs_by_output_name,\n        },\n        select=select,\n        exclude=exclude,\n        use_build_command=use_build_command,\n        fqns_by_output_name=fqns_by_output_name,\n        dbt_resource_key=dbt_resource_key,\n        node_info_to_asset_key=dagster_dbt_translator.get_asset_key,\n        partition_key_to_vars_fn=partition_key_to_vars_fn,\n        runtime_metadata_fn=runtime_metadata_fn,\n        manifest_json=manifest_json,\n    )\n\n    return AssetsDefinition(\n        keys_by_input_name={\n            input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n        },\n        keys_by_output_name={\n            output_name: asset_key for asset_key, (output_name, _) in asset_outs.items()\n        },\n        node_def=dbt_op,\n        can_subset=True,\n        asset_deps=asset_deps,\n        group_names_by_key=group_names_by_key,\n        freshness_policies_by_key=freshness_policies_by_key,\n        auto_materialize_policies_by_key=auto_materialize_policies_by_key,\n        check_specs_by_output_name=check_specs_by_output_name,\n        partitions_def=partitions_def,\n    )\n\n\n
[docs]def load_assets_from_dbt_project(\n project_dir: str,\n profiles_dir: Optional[str] = None,\n *,\n select: Optional[str] = None,\n exclude: Optional[str] = None,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n io_manager_key: Optional[str] = None,\n target_dir: Optional[str] = None,\n # All arguments below are deprecated\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n op_name: Optional[str] = None,\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ] = None,\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey] = default_asset_key_fn,\n use_build_command: bool = True,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n node_info_to_group_fn: Callable[\n [Mapping[str, Any]], Optional[str]\n ] = default_group_from_dbt_resource_props,\n node_info_to_freshness_policy_fn: Callable[\n [Mapping[str, Any]], Optional[FreshnessPolicy]\n ] = default_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ] = default_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ] = default_metadata_from_dbt_resource_props,\n display_raw_sql: Optional[bool] = None,\n dbt_resource_key: str = "dbt",\n) -> Sequence[AssetsDefinition]:\n """Loads a set of dbt models from a dbt project into Dagster assets.\n\n Creates one Dagster asset for each dbt model. All assets will be re-materialized using a single\n `dbt run` or `dbt build` command.\n\n When searching for more flexibility in defining the computations that materialize your\n dbt assets, we recommend that you use :py:class:`~dagster_dbt.dbt_assets`.\n\n Args:\n project_dir (Optional[str]): The directory containing the dbt project to load.\n profiles_dir (Optional[str]): The profiles directory to use for loading the DBT project.\n Defaults to a directory called "config" inside the project_dir.\n target_dir (Optional[str]): The target directory where dbt will place compiled artifacts.\n Defaults to "target" underneath the project_dir.\n select (Optional[str]): A dbt selection string for the models in a project that you want\n to include. Defaults to `"fqn:*"`.\n exclude (Optional[str]): A dbt selection string for the models in a project that you want\n to exclude. Defaults to "".\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): Allows customizing how to map\n dbt models, seeds, etc. to asset keys and asset metadata.\n key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all assets loaded\n from the dbt project. Does not apply to input assets. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(key_prefix=...) instead.\n source_key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all input\n assets for the set of assets loaded from the dbt project. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(source_key_prefix=...) instead.\n op_name (Optional[str]): [Deprecated] Sets the name of the underlying Op that will generate the dbt assets.\n Deprecated: use the `@dbt_assets` decorator if you need to customize the op name.\n dbt_resource_key (Optional[str]): [Deprecated] The resource key that the dbt resource will be specified at.\n Defaults to "dbt". Deprecated: use the `@dbt_assets` decorator if you need to customize\n the resource key.\n runtime_metadata_fn (Optional[Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]]): [Deprecated]\n A function that will be run after any of the assets are materialized and returns\n metadata entries for the asset, to be displayed in the asset catalog for that run.\n Deprecated: use the @dbt_assets decorator if you need to customize runtime metadata.\n manifest_json (Optional[Mapping[str, Any]]): [Deprecated] Use the manifest argument instead.\n selected_unique_ids (Optional[Set[str]]): [Deprecated] The set of dbt unique_ids that you want to load\n as assets. Deprecated: use the select argument instead.\n node_info_to_asset_key (Mapping[str, Any] -> AssetKey): [Deprecated] A function that takes a dictionary\n of dbt node info and returns the AssetKey that you want to represent that node. By\n default, the asset key will simply be the name of the dbt model. Deprecated: instead,\n provide a custom DagsterDbtTranslator that overrides node_info_to_asset_key.\n use_build_command (bool): Flag indicating if you want to use `dbt build` as the core computation\n for this asset. Defaults to True. If set to False, then `dbt run` will be used, and\n seeds and snapshots won't be loaded as assets.\n partitions_def (Optional[PartitionsDefinition]): [Deprecated] Defines the set of partition keys that\n compose the dbt assets. Deprecated: use the @dbt_assets decorator to define partitioned\n dbt assets.\n partition_key_to_vars_fn (Optional[str -> Dict[str, Any]]): [Deprecated] A function to translate a given\n partition key (e.g. '2022-01-01') to a dictionary of vars to be passed into the dbt\n invocation (e.g. {"run_date": "2022-01-01"}). Deprecated: use the @dbt_assets decorator\n to define partitioned dbt assets.\n node_info_to_group_fn (Dict[str, Any] -> Optional[str]): [Deprecated] A function that takes a\n dictionary of dbt node info and returns the group that this node should be assigned to.\n Deprecated: instead, configure dagster groups on a dbt resource's meta field or assign\n dbt groups.\n node_info_to_freshness_policy_fn (Dict[str, Any] -> Optional[FreshnessPolicy]): [Deprecated] A function\n that takes a dictionary of dbt node info and optionally returns a FreshnessPolicy that\n should be applied to this node. By default, freshness policies will be created from\n config applied to dbt models, i.e.:\n `dagster_freshness_policy={"maximum_lag_minutes": 60, "cron_schedule": "0 9 * * *"}`\n will result in that model being assigned\n `FreshnessPolicy(maximum_lag_minutes=60, cron_schedule="0 9 * * *")`. Deprecated:\n instead, configure auto-materialize policies on a dbt resource's meta field.\n node_info_to_auto_materialize_policy_fn (Dict[str, Any] -> Optional[AutoMaterializePolicy]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a AutoMaterializePolicy\n that should be applied to this node. By default, AutoMaterializePolicies will be created from\n config applied to dbt models, i.e.:\n `dagster_auto_materialize_policy={"type": "lazy"}` will result in that model being assigned\n `AutoMaterializePolicy.lazy()`. Deprecated: instead, configure auto-materialize\n policies on a dbt resource's meta field.\n node_info_to_definition_metadata_fn (Dict[str, Any] -> Optional[Dict[str, MetadataUserInput]]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a dictionary\n of metadata to be attached to the corresponding definition. This is added to the default\n metadata assigned to the node, which consists of the node's schema (if present).\n Deprecated: instead, provide a custom DagsterDbtTranslator that overrides\n node_info_to_metadata.\n display_raw_sql (Optional[bool]): [Deprecated] A flag to indicate if the raw sql associated\n with each model should be included in the asset description. For large projects, setting\n this flag to False is advised to reduce the size of the resulting snapshot. Deprecated:\n instead, provide a custom DagsterDbtTranslator that overrides node_info_to_description.\n """\n project_dir = check.str_param(project_dir, "project_dir")\n profiles_dir = check.opt_str_param(\n profiles_dir, "profiles_dir", os.path.join(project_dir, "config")\n )\n target_dir = check.opt_str_param(target_dir, "target_dir", os.path.join(project_dir, "target"))\n select = check.opt_str_param(select, "select", "fqn:*")\n exclude = check.opt_str_param(exclude, "exclude", "")\n\n _raise_warnings_for_deprecated_args(\n "load_assets_from_dbt_manifest",\n selected_unique_ids=None,\n dbt_resource_key=dbt_resource_key,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n runtime_metadata_fn=runtime_metadata_fn,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n )\n\n manifest, cli_output = _load_manifest_for_project(\n project_dir, profiles_dir, target_dir, select, exclude\n )\n selected_unique_ids: Set[str] = set(\n filter(None, (line.get("unique_id") for line in cli_output.logs))\n )\n return _load_assets_from_dbt_manifest(\n manifest=manifest,\n select=select,\n exclude=exclude,\n key_prefix=key_prefix,\n source_key_prefix=source_key_prefix,\n dagster_dbt_translator=dagster_dbt_translator,\n op_name=op_name,\n runtime_metadata_fn=runtime_metadata_fn,\n io_manager_key=io_manager_key,\n selected_unique_ids=selected_unique_ids,\n node_info_to_asset_key=node_info_to_asset_key,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n display_raw_sql=display_raw_sql,\n dbt_resource_key=dbt_resource_key,\n )
\n\n\n
[docs]@deprecated_param(\n param="manifest_json", breaking_version="0.21", additional_warn_text="Use manifest instead"\n)\n@deprecated_param(\n param="selected_unique_ids",\n breaking_version="0.21",\n additional_warn_text="Use the select parameter instead.",\n)\n@deprecated_param(\n param="dbt_resource_key",\n breaking_version="0.21",\n additional_warn_text=(\n "Use the `@dbt_assets` decorator if you need to customize your resource key."\n ),\n)\n@deprecated_param(\n param="use_build_command",\n breaking_version="0.21",\n additional_warn_text=(\n "Use the `@dbt_assets` decorator if you need to customize the underlying dbt commands."\n ),\n)\n@deprecated_param(\n param="partitions_def",\n breaking_version="0.21",\n additional_warn_text="Use the `@dbt_assets` decorator to define partitioned dbt assets.",\n)\n@deprecated_param(\n param="partition_key_to_vars_fn",\n breaking_version="0.21",\n additional_warn_text="Use the `@dbt_assets` decorator to define partitioned dbt assets.",\n)\n@deprecated_param(\n param="runtime_metadata_fn",\n breaking_version="0.21",\n additional_warn_text=(\n "Use the `@dbt_assets` decorator if you need to customize runtime metadata."\n ),\n)\ndef load_assets_from_dbt_manifest(\n manifest: Optional[Union[Path, Mapping[str, Any]]] = None,\n *,\n select: Optional[str] = None,\n exclude: Optional[str] = None,\n io_manager_key: Optional[str] = None,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n # All arguments below are deprecated\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n selected_unique_ids: Optional[AbstractSet[str]] = None,\n display_raw_sql: Optional[bool] = None,\n dbt_resource_key: str = "dbt",\n op_name: Optional[str] = None,\n manifest_json: Optional[Mapping[str, Any]] = None,\n use_build_command: bool = True,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ] = None,\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey] = default_asset_key_fn,\n node_info_to_group_fn: Callable[\n [Mapping[str, Any]], Optional[str]\n ] = default_group_from_dbt_resource_props,\n node_info_to_freshness_policy_fn: Callable[\n [Mapping[str, Any]], Optional[FreshnessPolicy]\n ] = default_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ] = default_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ] = default_metadata_from_dbt_resource_props,\n) -> Sequence[AssetsDefinition]:\n """Loads a set of dbt models, described in a manifest.json, into Dagster assets.\n\n Creates one Dagster asset for each dbt model. All assets will be re-materialized using a single\n `dbt run` command.\n\n When searching for more flexibility in defining the computations that materialize your\n dbt assets, we recommend that you use :py:class:`~dagster_dbt.dbt_assets`.\n\n Args:\n manifest (Optional[Mapping[str, Any]]): The contents of a DBT manifest.json, which contains\n a set of models to load into assets.\n select (Optional[str]): A dbt selection string for the models in a project that you want\n to include. Defaults to `"fqn:*"`.\n exclude (Optional[str]): A dbt selection string for the models in a project that you want\n to exclude. Defaults to "".\n io_manager_key (Optional[str]): The IO manager key that will be set on each of the returned\n assets. When other ops are downstream of the loaded assets, the IOManager specified\n here determines how the inputs to those ops are loaded. Defaults to "io_manager".\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): Allows customizing how to map\n dbt models, seeds, etc. to asset keys and asset metadata.\n key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all assets loaded\n from the dbt project. Does not apply to input assets. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(key_prefix=...) instead.\n source_key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all input\n assets for the set of assets loaded from the dbt project. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(source_key_prefix=...) instead.\n op_name (Optional[str]): [Deprecated] Sets the name of the underlying Op that will generate the dbt assets.\n Deprecated: use the `@dbt_assets` decorator if you need to customize the op name.\n dbt_resource_key (Optional[str]): [Deprecated] The resource key that the dbt resource will be specified at.\n Defaults to "dbt". Deprecated: use the `@dbt_assets` decorator if you need to customize\n the resource key.\n runtime_metadata_fn (Optional[Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]]): [Deprecated]\n A function that will be run after any of the assets are materialized and returns\n metadata entries for the asset, to be displayed in the asset catalog for that run.\n Deprecated: use the @dbt_assets decorator if you need to customize runtime metadata.\n selected_unique_ids (Optional[Set[str]]): [Deprecated] The set of dbt unique_ids that you want to load\n as assets. Deprecated: use the select argument instead.\n node_info_to_asset_key (Mapping[str, Any] -> AssetKey): [Deprecated] A function that takes a dictionary\n of dbt node info and returns the AssetKey that you want to represent that node. By\n default, the asset key will simply be the name of the dbt model.\n use_build_command (bool): Flag indicating if you want to use `dbt build` as the core computation\n for this asset. Defaults to True. If set to False, then `dbt run` will be used, and\n seeds and snapshots won't be loaded as assets.\n partitions_def (Optional[PartitionsDefinition]): [Deprecated] Defines the set of partition keys that\n compose the dbt assets. Deprecated: use the @dbt_assets decorator to define partitioned\n dbt assets.\n partition_key_to_vars_fn (Optional[str -> Dict[str, Any]]): [Deprecated] A function to translate a given\n partition key (e.g. '2022-01-01') to a dictionary of vars to be passed into the dbt\n invocation (e.g. {"run_date": "2022-01-01"}). Deprecated: use the @dbt_assets decorator\n to define partitioned dbt assets.\n node_info_to_group_fn (Dict[str, Any] -> Optional[str]): [Deprecated] A function that takes a\n dictionary of dbt node info and returns the group that this node should be assigned to.\n Deprecated: instead, configure dagster groups on a dbt resource's meta field or assign\n dbt groups.\n node_info_to_freshness_policy_fn (Dict[str, Any] -> Optional[FreshnessPolicy]): [Deprecated] A function\n that takes a dictionary of dbt node info and optionally returns a FreshnessPolicy that\n should be applied to this node. By default, freshness policies will be created from\n config applied to dbt models, i.e.:\n `dagster_freshness_policy={"maximum_lag_minutes": 60, "cron_schedule": "0 9 * * *"}`\n will result in that model being assigned\n `FreshnessPolicy(maximum_lag_minutes=60, cron_schedule="0 9 * * *")`. Deprecated:\n instead, configure auto-materialize policies on a dbt resource's meta field.\n node_info_to_auto_materialize_policy_fn (Dict[str, Any] -> Optional[AutoMaterializePolicy]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a AutoMaterializePolicy\n that should be applied to this node. By default, AutoMaterializePolicies will be created from\n config applied to dbt models, i.e.:\n `dagster_auto_materialize_policy={"type": "lazy"}` will result in that model being assigned\n `AutoMaterializePolicy.lazy()`. Deprecated: instead, configure auto-materialize\n policies on a dbt resource's meta field.\n node_info_to_definition_metadata_fn (Dict[str, Any] -> Optional[Dict[str, MetadataUserInput]]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a dictionary\n of metadata to be attached to the corresponding definition. This is added to the default\n metadata assigned to the node, which consists of the node's schema (if present).\n Deprecated: instead, provide a custom DagsterDbtTranslator that overrides\n node_info_to_metadata.\n display_raw_sql (Optional[bool]): [Deprecated] A flag to indicate if the raw sql associated\n with each model should be included in the asset description. For large projects, setting\n this flag to False is advised to reduce the size of the resulting snapshot. Deprecated:\n instead, provide a custom DagsterDbtTranslator that overrides node_info_to_description.\n """\n manifest = normalize_renamed_param(\n manifest,\n "manifest",\n manifest_json,\n "manifest_json",\n )\n manifest = cast(\n Union[Mapping[str, Any], Path], check.inst_param(manifest, "manifest", (Path, dict))\n )\n if isinstance(manifest, Path):\n manifest = cast(Mapping[str, Any], json.loads(manifest.read_bytes()))\n\n _raise_warnings_for_deprecated_args(\n "load_assets_from_dbt_manifest",\n selected_unique_ids=selected_unique_ids,\n dbt_resource_key=dbt_resource_key,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n runtime_metadata_fn=runtime_metadata_fn,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n )\n\n return _load_assets_from_dbt_manifest(\n manifest=manifest,\n select=select,\n exclude=exclude,\n io_manager_key=io_manager_key,\n dagster_dbt_translator=dagster_dbt_translator,\n key_prefix=key_prefix,\n source_key_prefix=source_key_prefix,\n selected_unique_ids=selected_unique_ids,\n display_raw_sql=display_raw_sql,\n dbt_resource_key=dbt_resource_key,\n op_name=op_name,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n runtime_metadata_fn=runtime_metadata_fn,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n )
\n\n\ndef _load_assets_from_dbt_manifest(\n manifest: Mapping[str, Any],\n select: Optional[str],\n exclude: Optional[str],\n io_manager_key: Optional[str],\n dagster_dbt_translator: Optional[DagsterDbtTranslator],\n key_prefix: Optional[CoercibleToAssetKeyPrefix],\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix],\n selected_unique_ids: Optional[AbstractSet[str]],\n display_raw_sql: Optional[bool],\n dbt_resource_key: str,\n op_name: Optional[str],\n use_build_command: bool,\n partitions_def: Optional[PartitionsDefinition],\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ],\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n node_info_to_group_fn: Callable[[Mapping[str, Any]], Optional[str]],\n node_info_to_freshness_policy_fn: Callable[[Mapping[str, Any]], Optional[FreshnessPolicy]],\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ],\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ],\n) -> Sequence[AssetsDefinition]:\n if partition_key_to_vars_fn:\n check.invariant(\n partitions_def is not None,\n "Cannot supply a `partition_key_to_vars_fn` without a `partitions_def`.",\n )\n\n dbt_resource_key = check.str_param(dbt_resource_key, "dbt_resource_key")\n\n dbt_nodes = {\n **manifest["nodes"],\n **manifest["sources"],\n **manifest["metrics"],\n **manifest["exposures"],\n }\n\n if selected_unique_ids:\n select = (\n " ".join(".".join(dbt_nodes[uid]["fqn"]) for uid in selected_unique_ids)\n if select is None\n else select\n )\n exclude = "" if exclude is None else exclude\n else:\n select = select if select is not None else "fqn:*"\n exclude = exclude if exclude is not None else ""\n\n selected_unique_ids = select_unique_ids_from_manifest(\n select=select, exclude=exclude, manifest_json=manifest\n )\n if len(selected_unique_ids) == 0:\n raise DagsterInvalidSubsetError(f"No dbt models match the selection string '{select}'.")\n\n if dagster_dbt_translator is not None:\n check.invariant(\n node_info_to_asset_key == default_asset_key_fn,\n "Can't specify both dagster_dbt_translator and node_info_to_asset_key",\n )\n check.invariant(\n key_prefix is None,\n "Can't specify both dagster_dbt_translator and key_prefix",\n )\n check.invariant(\n source_key_prefix is None,\n "Can't specify both dagster_dbt_translator and source_key_prefix",\n )\n check.invariant(\n node_info_to_group_fn == default_group_from_dbt_resource_props,\n "Can't specify both dagster_dbt_translator and node_info_to_group_fn",\n )\n check.invariant(\n display_raw_sql is None,\n "Can't specify both dagster_dbt_translator and display_raw_sql",\n )\n check.invariant(\n node_info_to_definition_metadata_fn is default_metadata_from_dbt_resource_props,\n "Can't specify both dagster_dbt_translator and node_info_to_definition_metadata_fn",\n )\n else:\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_asset_key(cls, dbt_resource_props):\n base_key = node_info_to_asset_key(dbt_resource_props)\n if dbt_resource_props["resource_type"] == "source":\n return base_key.with_prefix(source_key_prefix or [])\n else:\n return base_key.with_prefix(key_prefix or [])\n\n @classmethod\n def get_metadata(cls, dbt_resource_props):\n return node_info_to_definition_metadata_fn(dbt_resource_props)\n\n @classmethod\n def get_description(cls, dbt_resource_props):\n return default_description_fn(\n dbt_resource_props,\n display_raw_sql=display_raw_sql if display_raw_sql is not None else True,\n )\n\n @classmethod\n def get_group_name(cls, dbt_resource_props):\n return node_info_to_group_fn(dbt_resource_props)\n\n @classmethod\n def get_freshness_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[FreshnessPolicy]:\n return node_info_to_freshness_policy_fn(dbt_resource_props)\n\n @classmethod\n def get_auto_materialize_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[AutoMaterializePolicy]:\n return node_info_to_auto_materialize_policy_fn(dbt_resource_props)\n\n dagster_dbt_translator = CustomDagsterDbtTranslator()\n\n dbt_assets_def = _dbt_nodes_to_assets(\n dbt_nodes,\n runtime_metadata_fn=runtime_metadata_fn,\n io_manager_key=io_manager_key,\n select=select,\n exclude=exclude,\n selected_unique_ids=selected_unique_ids,\n dbt_resource_key=dbt_resource_key,\n op_name=op_name,\n project_id=manifest["metadata"]["project_id"][:5],\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n dagster_dbt_translator=dagster_dbt_translator,\n manifest_json=manifest,\n )\n\n return [dbt_assets_def]\n\n\ndef _raise_warnings_for_deprecated_args(\n public_fn_name: str,\n selected_unique_ids: Optional[AbstractSet[str]],\n dbt_resource_key: Optional[str],\n use_build_command: Optional[bool],\n partitions_def: Optional[PartitionsDefinition],\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ],\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n node_info_to_group_fn: Callable[[Mapping[str, Any]], Optional[str]],\n node_info_to_freshness_policy_fn: Callable[[Mapping[str, Any]], Optional[FreshnessPolicy]],\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ],\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ],\n):\n if node_info_to_asset_key != default_asset_key_fn:\n deprecation_warning(\n f"The node_info_to_asset_key_fn arg of {public_fn_name}",\n "0.21",\n "Instead, provide a custom DagsterDbtTranslator that overrides get_asset_key.",\n stacklevel=4,\n )\n\n if node_info_to_group_fn != default_group_from_dbt_resource_props:\n deprecation_warning(\n f"The node_info_to_group_fn arg of {public_fn_name}",\n "0.21",\n "Instead, configure dagster groups on a dbt resource's meta field or assign dbt"\n " groups or provide a custom DagsterDbtTranslator that overrides get_group_name.",\n stacklevel=4,\n )\n\n if node_info_to_auto_materialize_policy_fn != default_auto_materialize_policy_fn:\n deprecation_warning(\n f"The node_info_to_auto_materialize_policy_fn arg of {public_fn_name}",\n "0.21",\n "Instead, configure Dagster auto-materialize policies on a dbt resource's meta field.",\n stacklevel=4,\n )\n\n if node_info_to_freshness_policy_fn != default_freshness_policy_fn:\n deprecation_warning(\n f"The node_info_to_freshness_policy_fn arg of {public_fn_name}",\n "0.21",\n "Instead, configure Dagster freshness policies on a dbt resource's meta field.",\n stacklevel=4,\n )\n\n if node_info_to_definition_metadata_fn != default_metadata_from_dbt_resource_props:\n deprecation_warning(\n f"The node_info_to_definition_metadata_fn arg of {public_fn_name}",\n "0.21",\n "Instead, provide a custom DagsterDbtTranslator that overrides get_metadata.",\n stacklevel=4,\n )\n
", "current_page_name": "_modules/dagster_dbt/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.asset_defs"}, "asset_utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.asset_utils

\nimport hashlib\nimport textwrap\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    FrozenSet,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    cast,\n)\n\nfrom dagster import (\n    AssetCheckSpec,\n    AssetKey,\n    AssetsDefinition,\n    AssetSelection,\n    AutoMaterializePolicy,\n    DagsterInvariantViolationError,\n    FreshnessPolicy,\n    In,\n    MetadataValue,\n    Nothing,\n    Out,\n    RunConfig,\n    ScheduleDefinition,\n    TableColumn,\n    TableSchema,\n    _check as check,\n    define_asset_job,\n)\nfrom dagster._core.definitions.decorators.asset_decorator import (\n    _validate_and_assign_output_names_to_check_specs,\n)\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import deprecation_warning\n\nfrom .utils import input_name_fn, output_name_fn\n\nif TYPE_CHECKING:\n    from .dagster_dbt_translator import (\n        DagsterDbtTranslator,\n        DagsterDbtTranslatorSettings,\n        DbtManifestWrapper,\n    )\n\nMANIFEST_METADATA_KEY = "dagster_dbt/manifest"\nDAGSTER_DBT_TRANSLATOR_METADATA_KEY = "dagster_dbt/dagster_dbt_translator"\n\n\n
[docs]def get_asset_key_for_model(dbt_assets: Sequence[AssetsDefinition], model_name: str) -> AssetKey:\n """Return the corresponding Dagster asset key for a dbt model.\n\n Args:\n dbt_assets (AssetsDefinition): An AssetsDefinition object produced by\n load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets.\n model_name (str): The name of the dbt model.\n\n Returns:\n AssetKey: The corresponding Dagster asset key.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_dbt import dbt_assets, get_asset_key_for_model\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n\n @asset(deps={get_asset_key_for_model([all_dbt_assets], "customers")})\n def cleaned_customers():\n ...\n """\n check.sequence_param(dbt_assets, "dbt_assets", of_type=AssetsDefinition)\n check.str_param(model_name, "model_name")\n\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n\n matching_models = [\n value\n for value in manifest["nodes"].values()\n if value["name"] == model_name and value["resource_type"] == "model"\n ]\n\n if len(matching_models) == 0:\n raise KeyError(f"Could not find a dbt model with name: {model_name}")\n\n return dagster_dbt_translator.get_asset_key(next(iter(matching_models)))
\n\n\n
[docs]def get_asset_keys_by_output_name_for_source(\n dbt_assets: Sequence[AssetsDefinition], source_name: str\n) -> Mapping[str, AssetKey]:\n """Returns the corresponding Dagster asset keys for all tables in a dbt source.\n\n This is a convenience method that makes it easy to define a multi-asset that generates\n all the tables for a given dbt source.\n\n Args:\n source_name (str): The name of the dbt source.\n\n Returns:\n Mapping[str, AssetKey]: A mapping of the table name to corresponding Dagster asset key\n for all tables in the given dbt source.\n\n Examples:\n .. code-block:: python\n\n from dagster import AssetOut, multi_asset\n from dagster_dbt import dbt_assets, get_asset_keys_by_output_name_for_source\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n @multi_asset(\n outs={\n name: AssetOut(key=asset_key)\n for name, asset_key in get_asset_keys_by_output_name_for_source(\n [all_dbt_assets], "raw_data"\n ).items()\n },\n )\n def upstream_python_asset():\n ...\n\n """\n check.sequence_param(dbt_assets, "dbt_assets", of_type=AssetsDefinition)\n check.str_param(source_name, "source_name")\n\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n\n matching_nodes = [\n value for value in manifest["sources"].values() if value["source_name"] == source_name\n ]\n\n if len(matching_nodes) == 0:\n raise KeyError(f"Could not find a dbt source with name: {source_name}")\n\n return {\n output_name_fn(value): dagster_dbt_translator.get_asset_key(value)\n for value in matching_nodes\n }
\n\n\n
[docs]def get_asset_key_for_source(dbt_assets: Sequence[AssetsDefinition], source_name: str) -> AssetKey:\n """Returns the corresponding Dagster asset key for a dbt source with a singular table.\n\n Args:\n source_name (str): The name of the dbt source.\n\n Raises:\n DagsterInvalidInvocationError: If the source has more than one table.\n\n Returns:\n AssetKey: The corresponding Dagster asset key.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_dbt import dbt_assets, get_asset_key_for_source\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n @asset(key=get_asset_key_for_source([all_dbt_assets], "my_source"))\n def upstream_python_asset():\n ...\n """\n asset_keys_by_output_name = get_asset_keys_by_output_name_for_source(dbt_assets, source_name)\n\n if len(asset_keys_by_output_name) > 1:\n raise KeyError(\n f"Source {source_name} has more than one table:"\n f" {asset_keys_by_output_name.values()}. Use"\n " `get_asset_keys_by_output_name_for_source` instead to get all tables for a"\n " source."\n )\n\n return next(iter(asset_keys_by_output_name.values()))
\n\n\n
[docs]def build_dbt_asset_selection(\n dbt_assets: Sequence[AssetsDefinition],\n dbt_select: str = "fqn:*",\n dbt_exclude: Optional[str] = None,\n) -> AssetSelection:\n """Build an asset selection for a dbt selection string.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work for\n more information.\n\n Args:\n dbt_select (str): A dbt selection string to specify a set of dbt resources.\n dbt_exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n\n Returns:\n AssetSelection: An asset selection for the selected dbt nodes.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import dbt_assets, build_dbt_asset_selection\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n # Select the dbt assets that have the tag "foo".\n foo_selection = build_dbt_asset_selection([dbt_assets], dbt_select="tag:foo")\n\n # Select the dbt assets that have the tag "foo" and all Dagster assets downstream\n # of them (dbt-related or otherwise)\n foo_and_downstream_selection = foo_selection.downstream()\n\n """\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n from .dbt_manifest_asset_selection import DbtManifestAssetSelection\n\n return DbtManifestAssetSelection(\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n select=dbt_select,\n exclude=dbt_exclude,\n )
\n\n\n
[docs]def build_schedule_from_dbt_selection(\n dbt_assets: Sequence[AssetsDefinition],\n job_name: str,\n cron_schedule: str,\n dbt_select: str = "fqn:*",\n dbt_exclude: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n config: Optional[RunConfig] = None,\n execution_timezone: Optional[str] = None,\n) -> ScheduleDefinition:\n """Build a schedule to materialize a specified set of dbt resources from a dbt selection string.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work for\n more information.\n\n Args:\n job_name (str): The name of the job to materialize the dbt resources.\n cron_schedule (str): The cron schedule to define the schedule.\n dbt_select (str): A dbt selection string to specify a set of dbt resources.\n dbt_exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n tags (Optional[Mapping[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the scheduled runs.\n config (Optional[RunConfig]): The config that parameterizes the execution of this schedule.\n execution_timezone (Optional[str]): Timezone in which the schedule should run.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n\n Returns:\n ScheduleDefinition: A definition to materialize the selected dbt resources on a cron schedule.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import dbt_assets, build_schedule_from_dbt_selection\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n daily_dbt_assets_schedule = build_schedule_from_dbt_selection(\n [all_dbt_assets],\n job_name="all_dbt_assets",\n cron_schedule="0 0 * * *",\n dbt_select="fqn:*",\n )\n """\n return ScheduleDefinition(\n cron_schedule=cron_schedule,\n job=define_asset_job(\n name=job_name,\n selection=build_dbt_asset_selection(\n dbt_assets,\n dbt_select=dbt_select,\n dbt_exclude=dbt_exclude,\n ),\n config=config,\n tags=tags,\n ),\n execution_timezone=execution_timezone,\n )
\n\n\ndef get_manifest_and_translator_from_dbt_assets(\n dbt_assets: Sequence[AssetsDefinition],\n) -> Tuple[Mapping[str, Any], "DagsterDbtTranslator"]:\n check.invariant(len(dbt_assets) == 1, "Exactly one dbt AssetsDefinition is required")\n dbt_assets_def = dbt_assets[0]\n metadata_by_key = dbt_assets_def.metadata_by_key or {}\n first_asset_key = next(iter(dbt_assets_def.metadata_by_key.keys()))\n first_metadata = metadata_by_key.get(first_asset_key, {})\n manifest_wrapper: Optional["DbtManifestWrapper"] = first_metadata.get(MANIFEST_METADATA_KEY)\n if manifest_wrapper is None:\n raise DagsterInvariantViolationError(\n f"Expected to find dbt manifest metadata on asset {first_asset_key.to_user_string()},"\n " but did not. Did you pass in assets that weren't generated by"\n " load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets?"\n )\n\n dagster_dbt_translator = first_metadata.get(DAGSTER_DBT_TRANSLATOR_METADATA_KEY)\n if dagster_dbt_translator is None:\n raise DagsterInvariantViolationError(\n f"Expected to find dbt translator metadata on asset {first_asset_key.to_user_string()},"\n " but did not. Did you pass in assets that weren't generated by"\n " load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets?"\n )\n\n return manifest_wrapper.manifest, dagster_dbt_translator\n\n\n###################\n# DEFAULT FUNCTIONS\n###################\n\n\ndef default_asset_key_fn(dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n """Get the asset key for a dbt node.\n\n By default, if the dbt node has a Dagster asset key configured in its metadata, then that is\n parsed and used.\n\n Otherwise:\n dbt sources: a dbt source's key is the union of its source name and its table name\n dbt models: a dbt model's key is the union of its model name and any schema configured on\n the model itself.\n """\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n asset_key_config = dagster_metadata.get("asset_key", [])\n if asset_key_config:\n return AssetKey(asset_key_config)\n\n if dbt_resource_props["resource_type"] == "source":\n components = [dbt_resource_props["source_name"], dbt_resource_props["name"]]\n else:\n configured_schema = dbt_resource_props["config"].get("schema")\n if configured_schema is not None:\n components = [configured_schema, dbt_resource_props["name"]]\n else:\n components = [dbt_resource_props["name"]]\n\n return AssetKey(components)\n\n\n
[docs]def default_metadata_from_dbt_resource_props(\n dbt_resource_props: Mapping[str, Any]\n) -> Mapping[str, Any]:\n metadata: Dict[str, Any] = {}\n columns = dbt_resource_props.get("columns", {})\n if len(columns) > 0:\n metadata["table_schema"] = MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(\n name=column_name,\n type=column_info.get("data_type") or "?",\n description=column_info.get("description"),\n )\n for column_name, column_info in columns.items()\n ]\n )\n )\n return metadata
\n\n\n
[docs]def default_group_from_dbt_resource_props(dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n """Get the group name for a dbt node.\n\n If a Dagster group is configured in the metadata for the node, use that.\n\n Otherwise, if a dbt group is configured for the node, use that.\n """\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n\n dagster_group = dagster_metadata.get("group")\n if dagster_group:\n return dagster_group\n\n dbt_group = dbt_resource_props.get("config", {}).get("group")\n if dbt_group:\n return dbt_group\n\n return None
\n\n\n
[docs]def group_from_dbt_resource_props_fallback_to_directory(\n dbt_resource_props: Mapping[str, Any]\n) -> Optional[str]:\n """Get the group name for a dbt node.\n\n Has the same behavior as the default_group_from_dbt_resource_props, except for that, if no group can be determined\n from config or metadata, falls back to using the subdirectory of the models directory that the\n source file is in.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import group_from_dbt_resource_props_fallback_to_directory\n\n dbt_assets = load_assets_from_dbt_manifest(\n manifest=manifest,\n node_info_to_group_fn=group_from_dbt_resource_props_fallback_to_directory,\n )\n """\n group_name = default_group_from_dbt_resource_props(dbt_resource_props)\n if group_name is not None:\n return group_name\n\n fqn = dbt_resource_props.get("fqn", [])\n # the first component is the package name, and the last component is the model name\n if len(fqn) < 3:\n return None\n return fqn[1]
\n\n\ndef default_freshness_policy_fn(dbt_resource_props: Mapping[str, Any]) -> Optional[FreshnessPolicy]:\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n freshness_policy_config = dagster_metadata.get("freshness_policy", {})\n\n freshness_policy = _legacy_freshness_policy_fn(freshness_policy_config)\n if freshness_policy:\n return freshness_policy\n\n legacy_freshness_policy_config = dbt_resource_props["config"].get(\n "dagster_freshness_policy", {}\n )\n legacy_freshness_policy = _legacy_freshness_policy_fn(legacy_freshness_policy_config)\n\n if legacy_freshness_policy:\n deprecation_warning(\n "dagster_freshness_policy",\n "0.21.0",\n "Instead, configure a Dagster freshness policy on a dbt model using"\n " +meta.dagster.freshness_policy.",\n )\n\n return legacy_freshness_policy\n\n\ndef _legacy_freshness_policy_fn(\n freshness_policy_config: Mapping[str, Any]\n) -> Optional[FreshnessPolicy]:\n if freshness_policy_config:\n return FreshnessPolicy(\n maximum_lag_minutes=float(freshness_policy_config["maximum_lag_minutes"]),\n cron_schedule=freshness_policy_config.get("cron_schedule"),\n cron_schedule_timezone=freshness_policy_config.get("cron_schedule_timezone"),\n )\n return None\n\n\ndef default_auto_materialize_policy_fn(\n dbt_resource_props: Mapping[str, Any]\n) -> Optional[AutoMaterializePolicy]:\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n auto_materialize_policy_config = dagster_metadata.get("auto_materialize_policy", {})\n\n auto_materialize_policy = _auto_materialize_policy_fn(auto_materialize_policy_config)\n if auto_materialize_policy:\n return auto_materialize_policy\n\n legacy_auto_materialize_policy_config = dbt_resource_props["config"].get(\n "dagster_auto_materialize_policy", {}\n )\n legacy_auto_materialize_policy = _auto_materialize_policy_fn(\n legacy_auto_materialize_policy_config\n )\n\n if legacy_auto_materialize_policy:\n deprecation_warning(\n "dagster_auto_materialize_policy",\n "0.21.0",\n "Instead, configure a Dagster auto-materialize policy on a dbt model using"\n " +meta.dagster.auto_materialize_policy.",\n )\n\n return legacy_auto_materialize_policy\n\n\ndef _auto_materialize_policy_fn(\n auto_materialize_policy_config: Mapping[str, Any]\n) -> Optional[AutoMaterializePolicy]:\n if auto_materialize_policy_config.get("type") == "eager":\n return AutoMaterializePolicy.eager()\n elif auto_materialize_policy_config.get("type") == "lazy":\n return AutoMaterializePolicy.lazy()\n return None\n\n\ndef default_description_fn(dbt_resource_props: Mapping[str, Any], display_raw_sql: bool = True):\n code_block = textwrap.indent(\n dbt_resource_props.get("raw_sql") or dbt_resource_props.get("raw_code", ""), " "\n )\n description_sections = [\n dbt_resource_props["description"]\n or f"dbt {dbt_resource_props['resource_type']} {dbt_resource_props['name']}",\n ]\n if display_raw_sql:\n description_sections.append(f"#### Raw SQL:\\n```\\n{code_block}\\n```")\n return "\\n\\n".join(filter(None, description_sections))\n\n\ndef is_generic_test_on_attached_node_from_dbt_resource_props(\n unique_id: str, dbt_resource_props: Mapping[str, Any]\n) -> bool:\n attached_node_unique_id = dbt_resource_props.get("attached_node")\n is_generic_test = bool(attached_node_unique_id)\n\n return is_generic_test and attached_node_unique_id == unique_id\n\n\ndef default_asset_check_fn(\n asset_key: AssetKey,\n unique_id: str,\n dagster_dbt_translator_settings: "DagsterDbtTranslatorSettings",\n dbt_resource_props: Mapping[str, Any],\n) -> Optional[AssetCheckSpec]:\n is_generic_test_on_attached_node = is_generic_test_on_attached_node_from_dbt_resource_props(\n unique_id, dbt_resource_props\n )\n\n if not all(\n [\n dagster_dbt_translator_settings.enable_asset_checks,\n is_generic_test_on_attached_node,\n ]\n ):\n return None\n\n return AssetCheckSpec(\n name=dbt_resource_props["name"],\n asset=asset_key,\n description=dbt_resource_props["description"],\n )\n\n\ndef default_code_version_fn(dbt_resource_props: Mapping[str, Any]) -> str:\n return hashlib.sha1(\n (dbt_resource_props.get("raw_sql") or dbt_resource_props.get("raw_code", "")).encode(\n "utf-8"\n )\n ).hexdigest()\n\n\n###################\n# DEPENDENCIES\n###################\n\n\ndef is_non_asset_node(dbt_resource_props: Mapping[str, Any]):\n # some nodes exist inside the dbt graph but are not assets\n resource_type = dbt_resource_props["resource_type"]\n if resource_type == "metric":\n return True\n if (\n resource_type == "model"\n and dbt_resource_props.get("config", {}).get("materialized") == "ephemeral"\n ):\n return True\n return False\n\n\ndef get_deps(\n dbt_nodes: Mapping[str, Any],\n selected_unique_ids: AbstractSet[str],\n asset_resource_types: List[str],\n) -> Mapping[str, FrozenSet[str]]:\n def _valid_parent_node(dbt_resource_props):\n # sources are valid parents, but not assets\n return dbt_resource_props["resource_type"] in asset_resource_types + ["source"]\n\n asset_deps: Dict[str, Set[str]] = {}\n for unique_id in selected_unique_ids:\n dbt_resource_props = dbt_nodes[unique_id]\n node_resource_type = dbt_resource_props["resource_type"]\n\n # skip non-assets, such as metrics, tests, and ephemeral models\n if is_non_asset_node(dbt_resource_props) or node_resource_type not in asset_resource_types:\n continue\n\n asset_deps[unique_id] = set()\n for parent_unique_id in dbt_resource_props.get("depends_on", {}).get("nodes", []):\n parent_node_info = dbt_nodes[parent_unique_id]\n # for metrics or ephemeral dbt models, BFS to find valid parents\n if is_non_asset_node(parent_node_info):\n visited = set()\n replaced_parent_ids = set()\n # make a copy to avoid mutating the actual dictionary\n queue = list(parent_node_info.get("depends_on", {}).get("nodes", []))\n while queue:\n candidate_parent_id = queue.pop()\n if candidate_parent_id in visited:\n continue\n visited.add(candidate_parent_id)\n\n candidate_parent_info = dbt_nodes[candidate_parent_id]\n if is_non_asset_node(candidate_parent_info):\n queue.extend(candidate_parent_info.get("depends_on", {}).get("nodes", []))\n elif _valid_parent_node(candidate_parent_info):\n replaced_parent_ids.add(candidate_parent_id)\n\n asset_deps[unique_id] |= replaced_parent_ids\n # ignore nodes which are not assets / sources\n elif _valid_parent_node(parent_node_info):\n asset_deps[unique_id].add(parent_unique_id)\n\n frozen_asset_deps = {\n unique_id: frozenset(parent_ids) for unique_id, parent_ids in asset_deps.items()\n }\n\n return frozen_asset_deps\n\n\ndef get_asset_deps(\n dbt_nodes,\n deps,\n io_manager_key,\n manifest: Optional[Mapping[str, Any]],\n dagster_dbt_translator: "DagsterDbtTranslator",\n) -> Tuple[\n Dict[AssetKey, Set[AssetKey]],\n Dict[AssetKey, Tuple[str, In]],\n Dict[AssetKey, Tuple[str, Out]],\n Dict[AssetKey, str],\n Dict[AssetKey, FreshnessPolicy],\n Dict[AssetKey, AutoMaterializePolicy],\n Dict[str, AssetCheckSpec],\n Dict[str, List[str]],\n Dict[str, Dict[str, Any]],\n]:\n from .dagster_dbt_translator import DbtManifestWrapper\n\n asset_deps: Dict[AssetKey, Set[AssetKey]] = {}\n asset_ins: Dict[AssetKey, Tuple[str, In]] = {}\n asset_outs: Dict[AssetKey, Tuple[str, Out]] = {}\n\n # These dicts could be refactored as a single dict, mapping from output name to arbitrary\n # metadata that we need to store for reference.\n group_names_by_key: Dict[AssetKey, str] = {}\n freshness_policies_by_key: Dict[AssetKey, FreshnessPolicy] = {}\n auto_materialize_policies_by_key: Dict[AssetKey, AutoMaterializePolicy] = {}\n check_specs: List[AssetCheckSpec] = []\n fqns_by_output_name: Dict[str, List[str]] = {}\n metadata_by_output_name: Dict[str, Dict[str, Any]] = {}\n\n for unique_id, parent_unique_ids in deps.items():\n dbt_resource_props = dbt_nodes[unique_id]\n\n output_name = output_name_fn(dbt_resource_props)\n fqns_by_output_name[output_name] = dbt_resource_props["fqn"]\n\n metadata_by_output_name[output_name] = {\n key: dbt_resource_props[key] for key in ["unique_id", "resource_type"]\n }\n\n asset_key = dagster_dbt_translator.get_asset_key(dbt_resource_props)\n\n asset_deps[asset_key] = set()\n\n metadata = merge_dicts(\n dagster_dbt_translator.get_metadata(dbt_resource_props),\n {\n MANIFEST_METADATA_KEY: DbtManifestWrapper(manifest=manifest) if manifest else None,\n DAGSTER_DBT_TRANSLATOR_METADATA_KEY: dagster_dbt_translator,\n },\n )\n asset_outs[asset_key] = (\n output_name,\n Out(\n io_manager_key=io_manager_key,\n description=dagster_dbt_translator.get_description(dbt_resource_props),\n metadata=metadata,\n is_required=False,\n dagster_type=Nothing,\n code_version=default_code_version_fn(dbt_resource_props),\n ),\n )\n\n group_name = dagster_dbt_translator.get_group_name(dbt_resource_props)\n if group_name is not None:\n group_names_by_key[asset_key] = group_name\n\n freshness_policy = dagster_dbt_translator.get_freshness_policy(dbt_resource_props)\n if freshness_policy is not None:\n freshness_policies_by_key[asset_key] = freshness_policy\n\n auto_materialize_policy = dagster_dbt_translator.get_auto_materialize_policy(\n dbt_resource_props\n )\n if auto_materialize_policy is not None:\n auto_materialize_policies_by_key[asset_key] = auto_materialize_policy\n\n test_unique_ids = []\n if manifest:\n test_unique_ids = [\n child_unique_id\n for child_unique_id in manifest["child_map"][unique_id]\n if child_unique_id.startswith("test")\n ]\n\n for test_unique_id in test_unique_ids:\n test_resource_props = manifest["nodes"][test_unique_id]\n check_spec = default_asset_check_fn(\n asset_key, unique_id, dagster_dbt_translator.settings, test_resource_props\n )\n\n if check_spec:\n check_specs.append(check_spec)\n\n for parent_unique_id in parent_unique_ids:\n parent_node_info = dbt_nodes[parent_unique_id]\n parent_asset_key = dagster_dbt_translator.get_asset_key(parent_node_info)\n\n asset_deps[asset_key].add(parent_asset_key)\n\n # if this parent is not one of the selected nodes, it's an input\n if parent_unique_id not in deps:\n input_name = input_name_fn(parent_node_info)\n asset_ins[parent_asset_key] = (input_name, In(Nothing))\n\n check_specs_by_output_name = cast(\n Dict[str, AssetCheckSpec],\n _validate_and_assign_output_names_to_check_specs(check_specs, list(asset_outs.keys())),\n )\n\n return (\n asset_deps,\n asset_ins,\n asset_outs,\n group_names_by_key,\n freshness_policies_by_key,\n auto_materialize_policies_by_key,\n check_specs_by_output_name,\n fqns_by_output_name,\n metadata_by_output_name,\n )\n
", "current_page_name": "_modules/dagster_dbt/asset_utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.asset_utils"}, "cloud": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.cloud.asset_defs

\nimport json\nimport shlex\nfrom argparse import Namespace\nfrom contextlib import suppress\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    FrozenSet,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster import (\n    AssetExecutionContext,\n    AssetKey,\n    AssetOut,\n    AssetsDefinition,\n    AutoMaterializePolicy,\n    FreshnessPolicy,\n    MetadataValue,\n    PartitionsDefinition,\n    ResourceDefinition,\n    multi_asset,\n    with_resources,\n)\nfrom dagster._annotations import experimental, experimental_param\nfrom dagster._core.definitions.cacheable_assets import (\n    AssetsDefinitionCacheableData,\n    CacheableAssetsDefinition,\n)\nfrom dagster._core.definitions.metadata import MetadataUserInput\nfrom dagster._core.execution.context.init import build_init_resource_context\n\nfrom dagster_dbt.asset_utils import (\n    default_asset_key_fn,\n    default_auto_materialize_policy_fn,\n    default_description_fn,\n    default_freshness_policy_fn,\n    default_group_from_dbt_resource_props,\n    get_asset_deps,\n    get_deps,\n)\nfrom dagster_dbt.dagster_dbt_translator import DagsterDbtTranslator\n\nfrom ..errors import DagsterDbtCloudJobInvariantViolationError\nfrom ..utils import ASSET_RESOURCE_TYPES, result_to_events\nfrom .resources import DbtCloudClient, DbtCloudClientResource, DbtCloudRunStatus\n\nDAGSTER_DBT_COMPILE_RUN_ID_ENV_VAR = "DBT_DAGSTER_COMPILE_RUN_ID"\n\n\nclass DbtCloudCacheableAssetsDefinition(CacheableAssetsDefinition):\n    def __init__(\n        self,\n        dbt_cloud_resource_def: Union[DbtCloudClientResource, ResourceDefinition],\n        job_id: int,\n        node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n        node_info_to_group_fn: Callable[[Mapping[str, Any]], Optional[str]],\n        node_info_to_freshness_policy_fn: Callable[[Mapping[str, Any]], Optional[FreshnessPolicy]],\n        node_info_to_auto_materialize_policy_fn: Callable[\n            [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n        ],\n        partitions_def: Optional[PartitionsDefinition] = None,\n        partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n    ):\n        self._dbt_cloud_resource_def: ResourceDefinition = (\n            dbt_cloud_resource_def.get_resource_definition()\n            if isinstance(dbt_cloud_resource_def, DbtCloudClientResource)\n            else dbt_cloud_resource_def\n        )\n\n        self._dbt_cloud: DbtCloudClient = (\n            dbt_cloud_resource_def.process_config_and_initialize().get_dbt_client()\n            if isinstance(dbt_cloud_resource_def, DbtCloudClientResource)\n            else dbt_cloud_resource_def(build_init_resource_context())\n        )\n        self._job_id = job_id\n        self._project_id: int\n        self._has_generate_docs: bool\n        self._job_commands: List[str]\n        self._job_materialization_command_step: int\n        self._node_info_to_asset_key = node_info_to_asset_key\n        self._node_info_to_group_fn = node_info_to_group_fn\n        self._node_info_to_freshness_policy_fn = node_info_to_freshness_policy_fn\n        self._node_info_to_auto_materialize_policy_fn = node_info_to_auto_materialize_policy_fn\n        self._partitions_def = partitions_def\n        self._partition_key_to_vars_fn = partition_key_to_vars_fn\n\n        super().__init__(unique_id=f"dbt-cloud-{job_id}")\n\n    def compute_cacheable_data(self) -> Sequence[AssetsDefinitionCacheableData]:\n        dbt_nodes, dbt_dependencies = self._get_dbt_nodes_and_dependencies()\n        return [self._build_dbt_cloud_assets_cacheable_data(dbt_nodes, dbt_dependencies)]\n\n    def build_definitions(\n        self, data: Sequence[AssetsDefinitionCacheableData]\n    ) -> Sequence[AssetsDefinition]:\n        return with_resources(\n            [\n                self._build_dbt_cloud_assets_from_cacheable_data(assets_definition_metadata)\n                for assets_definition_metadata in data\n            ],\n            {"dbt_cloud": self._dbt_cloud_resource_def},\n        )\n\n    @staticmethod\n    def parse_dbt_command(dbt_command: str) -> Namespace:\n        args = shlex.split(dbt_command)[1:]\n        try:\n            from dbt.cli.flags import (\n                Flags,\n                args_to_context,\n            )\n\n            # nasty hack to get dbt to parse the args\n            # dbt >= 1.5.0 requires that profiles-dir is set to an existing directory\n            return Namespace(**vars(Flags(args_to_context(args + ["--profiles-dir", "."]))))\n        except ImportError:\n            # dbt < 1.5.0 compat\n            from dbt.main import parse_args  # type: ignore\n\n            return parse_args(args=args)\n\n    @staticmethod\n    def get_job_materialization_command_step(execute_steps: List[str]) -> int:\n        materialization_command_filter = [\n            DbtCloudCacheableAssetsDefinition.parse_dbt_command(command).which in ["run", "build"]\n            for command in execute_steps\n        ]\n\n        if sum(materialization_command_filter) != 1:\n            raise DagsterDbtCloudJobInvariantViolationError(\n                "The dbt Cloud job must have a single `dbt run` or `dbt build` in its commands. "\n                f"Received commands: {execute_steps}."\n            )\n\n        return materialization_command_filter.index(True)\n\n    @staticmethod\n    def get_compile_filters(parsed_args: Namespace) -> List[str]:\n        dbt_compile_options: List[str] = []\n\n        selected_models = parsed_args.select or []\n        if selected_models:\n            dbt_compile_options.append(f"--select {' '.join(selected_models)}")\n\n        excluded_models = parsed_args.exclude or []\n        if excluded_models:\n            dbt_compile_options.append(f"--exclude {' '.join(excluded_models)}")\n\n        selector = getattr(parsed_args, "selector_name", None) or getattr(\n            parsed_args, "selector", None\n        )\n        if selector:\n            dbt_compile_options.append(f"--selector {selector}")\n\n        return dbt_compile_options\n\n    def _get_cached_compile_dbt_cloud_job_run(self, compile_run_id: int) -> Tuple[int, int]:\n        # If the compile run is ongoing, allow it a grace period of 10 minutes to finish.\n        with suppress(Exception):\n            self._dbt_cloud.poll_run(run_id=compile_run_id, poll_timeout=600)\n\n        compile_run = self._dbt_cloud.get_run(\n            run_id=compile_run_id, include_related=["trigger", "run_steps"]\n        )\n\n        compile_run_status: str = compile_run["status_humanized"]\n        if compile_run_status != DbtCloudRunStatus.SUCCESS:\n            raise DagsterDbtCloudJobInvariantViolationError(\n                f"The cached dbt Cloud job run `{compile_run_id}` must have a status of"\n                f" `{DbtCloudRunStatus.SUCCESS}`. Received status: `{compile_run_status}. You can"\n                f" view the full status of your dbt Cloud run at {compile_run['href']}. Once it has"\n                " successfully completed, reload your Dagster definitions. If your run has failed,"\n                " you must manually refresh the cache using the `dagster-dbt"\n                " cache-compile-references` CLI."\n            )\n\n        compile_run_has_generate_docs = compile_run["trigger"]["generate_docs_override"]\n\n        compile_job_materialization_command_step = len(compile_run["run_steps"])\n        if compile_run_has_generate_docs:\n            compile_job_materialization_command_step -= 1\n\n        return compile_run_id, compile_job_materialization_command_step\n\n    def _compile_dbt_cloud_job(self, dbt_cloud_job: Mapping[str, Any]) -> Tuple[int, int]:\n        # Retrieve the filters options from the dbt Cloud job's materialization command.\n        #\n        # There are three filters: `--select`, `--exclude`, and `--selector`.\n        materialization_command = self._job_commands[self._job_materialization_command_step]\n        parsed_args = DbtCloudCacheableAssetsDefinition.parse_dbt_command(materialization_command)\n        dbt_compile_options = DbtCloudCacheableAssetsDefinition.get_compile_filters(\n            parsed_args=parsed_args\n        )\n\n        # Add the partition variable as a variable to the dbt Cloud job command.\n        #\n        # If existing variables passed through the dbt Cloud job's command, an error will be\n        # raised. Since these are static variables anyways, they can be moved to the\n        # `dbt_project.yml` without loss of functionality.\n        #\n        # Since we're only doing this to generate the dependency structure, just use an arbitrary\n        # partition key (e.g. the last one) to retrieve the partition variable.\n        if parsed_args.vars and parsed_args.vars != "{}":\n            raise DagsterDbtCloudJobInvariantViolationError(\n                f"The dbt Cloud job '{dbt_cloud_job['name']}' ({dbt_cloud_job['id']}) must not have"\n                " variables defined from `--vars` in its `dbt run` or `dbt build` command."\n                " Instead, declare the variables in the `dbt_project.yml` file. Received commands:"\n                f" {self._job_commands}."\n            )\n\n        if self._partitions_def and self._partition_key_to_vars_fn:\n            last_partition_key = self._partitions_def.get_last_partition_key()\n            if last_partition_key is None:\n                check.failed("PartitionsDefinition has no partitions")\n            partition_var = self._partition_key_to_vars_fn(last_partition_key)\n\n            dbt_compile_options.append(f"--vars '{json.dumps(partition_var)}'")\n\n        # We need to retrieve the dependency structure for the assets in the dbt Cloud project.\n        # However, we can't just use the dependency structure from the latest run, because\n        # this historical structure may not be up-to-date with the current state of the project.\n        #\n        # By always doing a compile step, we can always get the latest dependency structure.\n        # This incurs some latency, but at least it doesn't run through the entire materialization\n        # process.\n        dbt_compile_command = f"dbt compile {' '.join(dbt_compile_options)}"\n        compile_run_dbt_output = self._dbt_cloud.run_job_and_poll(\n            job_id=self._job_id,\n            cause="Generating software-defined assets for Dagster.",\n            steps_override=[dbt_compile_command],\n        )\n\n        # Target the compile execution step when retrieving run artifacts, rather than assuming\n        # that the last step is the correct target.\n        #\n        # Here, we ignore the `dbt docs generate` step.\n        compile_job_materialization_command_step = len(\n            compile_run_dbt_output.run_details.get("run_steps", [])\n        )\n        if self._has_generate_docs:\n            compile_job_materialization_command_step -= 1\n\n        return compile_run_dbt_output.run_id, compile_job_materialization_command_step\n\n    def _get_dbt_nodes_and_dependencies(\n        self,\n    ) -> Tuple[Mapping[str, Any], Mapping[str, FrozenSet[str]]]:\n        """For a given dbt Cloud job, fetch the latest run's dependency structure of executed nodes."""\n        # Fetch information about the job.\n        job = self._dbt_cloud.get_job(job_id=self._job_id)\n        self._project_id = job["project_id"]\n        self._has_generate_docs = job["generate_docs"]\n\n        # We constraint the kinds of dbt Cloud jobs that we support running.\n        #\n        # A simple constraint is that we only support jobs that run multiple steps,\n        # but it must contain one of either `dbt run` or `dbt build`.\n        #\n        # As a reminder, `dbt deps` is automatically run before the job's configured commands.\n        # And if the settings are enabled, `dbt docs generate` and `dbt source freshness` can\n        # automatically run after the job's configured commands.\n        #\n        # These commands that execute before and after the job's configured commands do not count\n        # towards the single command constraint.\n        self._job_commands = job["execute_steps"]\n        self._job_materialization_command_step = (\n            DbtCloudCacheableAssetsDefinition.get_job_materialization_command_step(\n                execute_steps=self._job_commands\n            )\n        )\n\n        # Determine whether to use a cached compile run. This should only be set up if the user is\n        # using a GitHub action along with their dbt project.\n        dbt_cloud_job_env_vars = self._dbt_cloud.get_job_environment_variables(\n            project_id=self._project_id, job_id=self._job_id\n        )\n        compile_run_id = (\n            dbt_cloud_job_env_vars.get(DAGSTER_DBT_COMPILE_RUN_ID_ENV_VAR, {})\n            .get("job", {})\n            .get("value")\n        )\n\n        compile_run_id, compile_job_materialization_command_step = (\n            # If a compile run is cached, then use it.\n            self._get_cached_compile_dbt_cloud_job_run(compile_run_id=int(compile_run_id))\n            if compile_run_id\n            # Otherwise, compile the dbt Cloud project in an ad-hoc manner.\n            else self._compile_dbt_cloud_job(dbt_cloud_job=job)\n        )\n\n        manifest_json = self._dbt_cloud.get_manifest(\n            run_id=compile_run_id, step=compile_job_materialization_command_step\n        )\n        run_results_json = self._dbt_cloud.get_run_results(\n            run_id=compile_run_id, step=compile_job_materialization_command_step\n        )\n\n        # Filter the manifest to only include the nodes that were executed.\n        dbt_nodes: Dict[str, Any] = {\n            **manifest_json.get("nodes", {}),\n            **manifest_json.get("sources", {}),\n            **manifest_json.get("metrics", {}),\n        }\n        executed_node_ids: Set[str] = set(\n            result["unique_id"] for result in run_results_json["results"]\n        )\n\n        # If there are no executed nodes, then there are no assets to generate.\n        # Inform the user to inspect their dbt Cloud job's command.\n        if not executed_node_ids:\n            raise DagsterDbtCloudJobInvariantViolationError(\n                f"The dbt Cloud job '{job['name']}' ({job['id']}) does not generate any "\n                "software-defined assets. Ensure that your dbt project has nodes to execute, "\n                "and that your dbt Cloud job's materialization command has the proper filter "\n                f"options applied. Received commands: {self._job_commands}."\n            )\n\n        # Generate the dependency structure for the executed nodes.\n        dbt_dependencies = get_deps(\n            dbt_nodes=dbt_nodes,\n            selected_unique_ids=executed_node_ids,\n            asset_resource_types=ASSET_RESOURCE_TYPES,\n        )\n\n        return dbt_nodes, dbt_dependencies\n\n    def _build_dbt_cloud_assets_cacheable_data(\n        self, dbt_nodes: Mapping[str, Any], dbt_dependencies: Mapping[str, FrozenSet[str]]\n    ) -> AssetsDefinitionCacheableData:\n        """Given all of the nodes and dependencies for a dbt Cloud job, build the cacheable\n        representation that generate the asset definition for the job.\n        """\n\n        class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n            @classmethod\n            def get_asset_key(cls, dbt_resource_props):\n                return self._node_info_to_asset_key(dbt_resource_props)\n\n            @classmethod\n            def get_description(cls, dbt_resource_props):\n                # We shouldn't display the raw sql. Instead, inspect if dbt docs were generated,\n                # and attach metadata to link to the docs.\n                return default_description_fn(dbt_resource_props, display_raw_sql=False)\n\n            @classmethod\n            def get_group_name(cls, dbt_resource_props):\n                return self._node_info_to_group_fn(dbt_resource_props)\n\n            @classmethod\n            def get_freshness_policy(cls, dbt_resource_props):\n                return self._node_info_to_freshness_policy_fn(dbt_resource_props)\n\n            @classmethod\n            def get_auto_materialize_policy(cls, dbt_resource_props):\n                return self._node_info_to_auto_materialize_policy_fn(dbt_resource_props)\n\n        (\n            asset_deps,\n            asset_ins,\n            asset_outs,\n            group_names_by_key,\n            freshness_policies_by_key,\n            auto_materialize_policies_by_key,\n            _,\n            fqns_by_output_name,\n            metadata_by_output_name,\n        ) = get_asset_deps(\n            dbt_nodes=dbt_nodes,\n            deps=dbt_dependencies,\n            # TODO: In the future, allow the IO manager to be specified.\n            io_manager_key=None,\n            dagster_dbt_translator=CustomDagsterDbtTranslator(),\n            manifest=None,\n        )\n\n        return AssetsDefinitionCacheableData(\n            # TODO: In the future, we should allow additional upstream assets to be specified.\n            keys_by_input_name={\n                input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n            },\n            keys_by_output_name={\n                output_name: asset_key for asset_key, (output_name, _) in asset_outs.items()\n            },\n            internal_asset_deps={\n                asset_outs[asset_key][0]: asset_deps for asset_key, asset_deps in asset_deps.items()\n            },\n            # We don't rely on a static group name. Instead, we map over the dbt metadata to\n            # determine the group name for each asset.\n            group_name=None,\n            metadata_by_output_name={\n                output_name: self._build_dbt_cloud_assets_metadata(dbt_metadata)\n                for output_name, dbt_metadata in metadata_by_output_name.items()\n            },\n            # TODO: In the future, we should allow the key prefix to be specified.\n            key_prefix=None,\n            can_subset=True,\n            extra_metadata={\n                "job_id": self._job_id,\n                "job_commands": self._job_commands,\n                "job_materialization_command_step": self._job_materialization_command_step,\n                "group_names_by_output_name": {\n                    asset_outs[asset_key][0]: group_name\n                    for asset_key, group_name in group_names_by_key.items()\n                },\n                "fqns_by_output_name": fqns_by_output_name,\n            },\n            freshness_policies_by_output_name={\n                asset_outs[asset_key][0]: freshness_policy\n                for asset_key, freshness_policy in freshness_policies_by_key.items()\n            },\n            auto_materialize_policies_by_output_name={\n                asset_outs[asset_key][0]: auto_materialize_policy\n                for asset_key, auto_materialize_policy in auto_materialize_policies_by_key.items()\n            },\n        )\n\n    def _build_dbt_cloud_assets_metadata(self, dbt_metadata: Dict[str, Any]) -> MetadataUserInput:\n        metadata = {\n            "dbt Cloud Job": MetadataValue.url(\n                self._dbt_cloud.build_url_for_job(\n                    project_id=self._project_id,\n                    job_id=self._job_id,\n                )\n            ),\n        }\n\n        if self._has_generate_docs:\n            metadata["dbt Cloud Documentation"] = MetadataValue.url(\n                self._dbt_cloud.build_url_for_cloud_docs(\n                    job_id=self._job_id,\n                    resource_type=dbt_metadata["resource_type"],\n                    unique_id=dbt_metadata["unique_id"],\n                )\n            )\n\n        return metadata\n\n    def _build_dbt_cloud_assets_from_cacheable_data(\n        self, assets_definition_cacheable_data: AssetsDefinitionCacheableData\n    ) -> AssetsDefinition:\n        metadata = cast(Mapping[str, Any], assets_definition_cacheable_data.extra_metadata)\n        job_id = cast(int, metadata["job_id"])\n        job_commands = cast(List[str], list(metadata["job_commands"]))\n        job_materialization_command_step = cast(int, metadata["job_materialization_command_step"])\n        group_names_by_output_name = cast(Mapping[str, str], metadata["group_names_by_output_name"])\n        fqns_by_output_name = cast(Mapping[str, List[str]], metadata["fqns_by_output_name"])\n\n        @multi_asset(\n            name=f"dbt_cloud_job_{job_id}",\n            deps=list((assets_definition_cacheable_data.keys_by_input_name or {}).values()),\n            outs={\n                output_name: AssetOut(\n                    key=asset_key,\n                    group_name=group_names_by_output_name.get(output_name),\n                    freshness_policy=(\n                        assets_definition_cacheable_data.freshness_policies_by_output_name or {}\n                    ).get(\n                        output_name,\n                    ),\n                    auto_materialize_policy=(\n                        assets_definition_cacheable_data.auto_materialize_policies_by_output_name\n                        or {}\n                    ).get(\n                        output_name,\n                    ),\n                    metadata=(assets_definition_cacheable_data.metadata_by_output_name or {}).get(\n                        output_name\n                    ),\n                    is_required=False,\n                )\n                for output_name, asset_key in (\n                    assets_definition_cacheable_data.keys_by_output_name or {}\n                ).items()\n            },\n            internal_asset_deps={\n                output_name: set(asset_deps)\n                for output_name, asset_deps in (\n                    assets_definition_cacheable_data.internal_asset_deps or {}\n                ).items()\n            },\n            partitions_def=self._partitions_def,\n            can_subset=assets_definition_cacheable_data.can_subset,\n            required_resource_keys={"dbt_cloud"},\n            compute_kind="dbt",\n        )\n        def _assets(context: AssetExecutionContext):\n            dbt_cloud = cast(DbtCloudClient, context.resources.dbt_cloud)\n\n            # Add the partition variable as a variable to the dbt Cloud job command.\n            dbt_options: List[str] = []\n            if context.has_partition_key and self._partition_key_to_vars_fn:\n                partition_var = self._partition_key_to_vars_fn(context.partition_key)\n\n                dbt_options.append(f"--vars '{json.dumps(partition_var)}'")\n\n            # Prepare the materialization step to be overriden with the selection filter\n            materialization_command = job_commands[job_materialization_command_step]\n\n            # Map the selected outputs to dbt models that should be materialized.\n            #\n            # HACK: This selection filter works even if an existing `--select` is specified in the\n            # dbt Cloud job. We take advantage of the fact that the last `--select` will be used.\n            #\n            # This is not ideal, as the triggered run for the dbt Cloud job will still have both\n            # `--select` options when displayed in the UI, but parsing the command line argument\n            # to remove the initial select using argparse.\n            if len(context.selected_output_names) != len(\n                assets_definition_cacheable_data.keys_by_output_name or {}\n            ):\n                selected_models = [\n                    ".".join(fqns_by_output_name[output_name])\n                    for output_name in context.selected_output_names\n                ]\n\n                dbt_options.append(f"--select {' '.join(sorted(selected_models))}")\n\n                # If the `--selector` option is used, we need to remove it from the command, since\n                # it disables other selection options from being functional.\n                #\n                # See https://docs.getdbt.com/reference/node-selection/syntax for details.\n                split_materialization_command = shlex.split(materialization_command)\n                if "--selector" in split_materialization_command:\n                    idx = split_materialization_command.index("--selector")\n\n                    materialization_command = " ".join(\n                        split_materialization_command[:idx]\n                        + split_materialization_command[idx + 2 :]\n                    )\n\n            job_commands[job_materialization_command_step] = (\n                f"{materialization_command} {' '.join(dbt_options)}".strip()\n            )\n\n            # Run the dbt Cloud job to rematerialize the assets.\n            dbt_cloud_output = dbt_cloud.run_job_and_poll(\n                job_id=job_id,\n                cause=f"Materializing software-defined assets in Dagster run {context.run_id[:8]}",\n                steps_override=job_commands,\n            )\n\n            # Target the materialization step when retrieving run artifacts, rather than assuming\n            # that the last step is the correct target.\n            #\n            # We ignore the commands in front of the materialization command. And again, we ignore\n            # the `dbt docs generate` step.\n            materialization_command_step = len(dbt_cloud_output.run_details.get("run_steps", []))\n            materialization_command_step -= len(job_commands) - job_materialization_command_step - 1\n            if dbt_cloud_output.run_details.get("job", {}).get("generate_docs"):\n                materialization_command_step -= 1\n\n            # TODO: Assume the run completely fails or completely succeeds.\n            # In the future, we can relax this assumption.\n            manifest_json = dbt_cloud.get_manifest(\n                run_id=dbt_cloud_output.run_id, step=materialization_command_step\n            )\n            run_results_json = self._dbt_cloud.get_run_results(\n                run_id=dbt_cloud_output.run_id, step=materialization_command_step\n            )\n\n            for result in run_results_json.get("results", []):\n                yield from result_to_events(\n                    result=result,\n                    docs_url=dbt_cloud_output.docs_url,\n                    node_info_to_asset_key=self._node_info_to_asset_key,\n                    manifest_json=manifest_json,\n                    # TODO: In the future, allow arbitrary mappings to Dagster output metadata from\n                    # the dbt metadata.\n                    extra_metadata=None,\n                    generate_asset_outputs=True,\n                )\n\n        return _assets\n\n\n
[docs]@experimental\n@experimental_param(param="partitions_def")\n@experimental_param(param="partition_key_to_vars_fn")\ndef load_assets_from_dbt_cloud_job(\n dbt_cloud: ResourceDefinition,\n job_id: int,\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey] = default_asset_key_fn,\n node_info_to_group_fn: Callable[\n [Mapping[str, Any]], Optional[str]\n ] = default_group_from_dbt_resource_props,\n node_info_to_freshness_policy_fn: Callable[\n [Mapping[str, Any]], Optional[FreshnessPolicy]\n ] = default_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ] = default_auto_materialize_policy_fn,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n) -> CacheableAssetsDefinition:\n """Loads a set of dbt models, managed by a dbt Cloud job, into Dagster assets. In order to\n determine the set of dbt models, the project is compiled to generate the necessary artifacts\n that define the dbt models and their dependencies.\n\n One Dagster asset is created for each dbt model.\n\n Args:\n dbt_cloud (ResourceDefinition): The dbt Cloud resource to use to connect to the dbt Cloud API.\n job_id (int): The ID of the dbt Cloud job to load assets from.\n node_info_to_asset_key: (Mapping[str, Any] -> AssetKey): A function that takes a dictionary\n of dbt metadata and returns the AssetKey that you want to represent a given model or\n source. By default: dbt model -> AssetKey([model_name]) and\n dbt source -> AssetKey([source_name, table_name])\n node_info_to_group_fn (Dict[str, Any] -> Optional[str]): A function that takes a\n dictionary of dbt node info and returns the group that this node should be assigned to.\n node_info_to_freshness_policy_fn (Dict[str, Any] -> Optional[FreshnessPolicy]): A function\n that takes a dictionary of dbt node info and optionally returns a FreshnessPolicy that\n should be applied to this node. By default, freshness policies will be created from\n config applied to dbt models, i.e.:\n `dagster_freshness_policy={"maximum_lag_minutes": 60, "cron_schedule": "0 9 * * *"}`\n will result in that model being assigned\n `FreshnessPolicy(maximum_lag_minutes=60, cron_schedule="0 9 * * *")`\n node_info_to_auto_materialize_policy_fn (Dict[str, Any] -> Optional[AutoMaterializePolicy]):\n A function that takes a dictionary of dbt node info and optionally returns a AutoMaterializePolicy\n that should be applied to this node. By default, AutoMaterializePolicies will be created from\n config applied to dbt models, i.e.:\n `dagster_auto_materialize_policy={"type": "lazy"}` will result in that model being assigned\n `AutoMaterializePolicy.lazy()`\n node_info_to_definition_metadata_fn (Dict[str, Any] -> Optional[Dict[str, MetadataUserInput]]):\n A function that takes a dictionary of dbt node info and optionally returns a dictionary\n of metadata to be attached to the corresponding definition. This is added to the default\n metadata assigned to the node, which consists of the node's schema (if present).\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the dbt assets.\n partition_key_to_vars_fn (Optional[str -> Dict[str, Any]]): A function to translate a given\n partition key (e.g. '2022-01-01') to a dictionary of vars to be passed into the dbt\n invocation (e.g. {"run_date": "2022-01-01"})\n\n Returns:\n CacheableAssetsDefinition: A definition for the loaded assets.\n\n Examples:\n .. code-block:: python\n\n from dagster import repository\n from dagster_dbt import dbt_cloud_resource, load_assets_from_dbt_cloud_job\n\n DBT_CLOUD_JOB_ID = 1234\n\n dbt_cloud = dbt_cloud_resource.configured(\n {\n "auth_token": {"env": "DBT_CLOUD_API_TOKEN"},\n "account_id": {"env": "DBT_CLOUD_ACCOUNT_ID"},\n }\n )\n\n dbt_cloud_assets = load_assets_from_dbt_cloud_job(\n dbt_cloud=dbt_cloud, job_id=DBT_CLOUD_JOB_ID\n )\n\n\n @repository\n def dbt_cloud_sandbox():\n return [dbt_cloud_assets]\n """\n if partition_key_to_vars_fn:\n check.invariant(\n partitions_def is not None,\n "Cannot supply a `partition_key_to_vars_fn` without a `partitions_def`.",\n )\n\n return DbtCloudCacheableAssetsDefinition(\n dbt_cloud_resource_def=dbt_cloud,\n job_id=job_id,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n )
\n
", "current_page_name": "_modules/dagster_dbt/cloud/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.cloud.asset_defs"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.cloud.ops

\nfrom typing import List, Optional\n\nfrom dagster import Config, In, Nothing, Out, Output, op\nfrom pydantic import Field\n\nfrom ..utils import generate_materializations\nfrom .resources import DEFAULT_POLL_INTERVAL\nfrom .types import DbtCloudOutput\n\n\nclass DbtCloudRunOpConfig(Config):\n    job_id: int = Field(\n        description=(\n            "The integer ID of the relevant dbt Cloud job. You can find this value by going to the"\n            " details page of your job in the dbt Cloud UI. It will be the final number in the url,"\n            " e.g.:    "\n            " https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/"\n        )\n    )\n    poll_interval: float = Field(\n        default=DEFAULT_POLL_INTERVAL,\n        description="The time (in seconds) that will be waited between successive polls.",\n    )\n    poll_timeout: Optional[float] = Field(\n        default=None,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    yield_materializations: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations corresponding to the results of the dbt operation will "\n            "be yielded when the op executes."\n        ),\n    )\n\n    asset_key_prefix: List[str] = Field(\n        default=["dbt"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(\n required_resource_keys={"dbt_cloud"},\n ins={"start_after": In(Nothing)},\n out=Out(DbtCloudOutput, description="Parsed output from running the dbt Cloud job."),\n tags={"kind": "dbt_cloud"},\n)\ndef dbt_cloud_run_op(context, config: DbtCloudRunOpConfig):\n """Initiates a run for a dbt Cloud job, then polls until the run completes. If the job\n fails or is otherwised stopped before succeeding, a `dagster.Failure` exception will be raised,\n and this op will fail.\n\n It requires the use of a 'dbt_cloud' resource, which is used to connect to the dbt Cloud API.\n\n **Config Options:**\n\n job_id (int)\n The integer ID of the relevant dbt Cloud job. You can find this value by going to the details\n page of your job in the dbt Cloud UI. It will be the final number in the url, e.g.:\n ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n poll_interval (float)\n The time (in seconds) that will be waited between successive polls. Defaults to ``10``.\n poll_timeout (float)\n The maximum time (in seconds) that will waited before this operation is timed out. By\n default, this will never time out.\n yield_materializations (bool)\n If True, materializations corresponding to the results of the dbt operation will be\n yielded when the solid executes. Defaults to ``True``.\n rasset_key_prefix (float)\n If provided and yield_materializations is True, these components will be used to "\n prefix the generated asset keys. Defaults to ["dbt"].\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_dbt import dbt_cloud_resource, dbt_cloud_run_op\n\n my_dbt_cloud_resource = dbt_cloud_resource.configured(\n {"auth_token": {"env": "DBT_CLOUD_AUTH_TOKEN"}, "account_id": 77777}\n )\n run_dbt_nightly_sync = dbt_cloud_run_op.configured(\n {"job_id": 54321}, name="run_dbt_nightly_sync"\n )\n\n @job(resource_defs={"dbt_cloud": my_dbt_cloud_resource})\n def dbt_cloud():\n run_dbt_nightly_sync()\n\n\n """\n dbt_output = context.resources.dbt_cloud.run_job_and_poll(\n config.job_id, poll_interval=config.poll_interval, poll_timeout=config.poll_timeout\n )\n if config.yield_materializations and "results" in dbt_output.result:\n yield from generate_materializations(dbt_output, asset_key_prefix=config.asset_key_prefix)\n yield Output(\n dbt_output,\n metadata={\n "created_at": dbt_output.run_details["created_at"],\n "started_at": dbt_output.run_details["started_at"],\n "finished_at": dbt_output.run_details["finished_at"],\n "total_duration": dbt_output.run_details["duration"],\n "run_duration": dbt_output.run_details["run_duration"],\n },\n )
\n
", "current_page_name": "_modules/dagster_dbt/cloud/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.cloud.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.cloud.resources

\nimport datetime\nimport json\nimport logging\nimport time\nfrom enum import Enum\nfrom typing import Any, Mapping, Optional, Sequence, cast\nfrom urllib.parse import urlencode, urljoin\n\nimport requests\nfrom dagster import (\n    ConfigurableResource,\n    Failure,\n    IAttachDifferentObjectToOpContext,\n    MetadataValue,\n    __version__,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.merger import deep_merge_dicts\nfrom pydantic import Field\nfrom requests.exceptions import RequestException\n\nfrom .types import DbtCloudOutput\n\nDBT_DEFAULT_HOST = "https://cloud.getdbt.com/"\nDBT_API_V2_PATH = "api/v2/accounts/"\nDBT_API_V3_PATH = "api/v3/accounts/"\n\n# default polling interval (in seconds)\nDEFAULT_POLL_INTERVAL = 10\n\n\nclass DbtCloudRunStatus(str, Enum):\n    QUEUED = "Queued"\n    STARTING = "Starting"\n    RUNNING = "Running"\n    SUCCESS = "Success"\n    ERROR = "Error"\n    CANCELLED = "Cancelled"\n\n\n# TODO: This resource should be a wrapper over an existing client for a accessing dbt Cloud,\n# rather than using requests to the API directly.\nclass DbtCloudClient:\n    """This class exposes methods on top of the dbt Cloud REST API v2.\n\n    For a complete set of documentation on the dbt Cloud Administrative REST API, including expected\n    response JSON schemae, see the `dbt Cloud API Docs <https://docs.getdbt.com/dbt-cloud/api-v2>`_.\n    """\n\n    def __init__(\n        self,\n        auth_token: str,\n        account_id: int,\n        disable_schedule_on_trigger: bool = True,\n        request_max_retries: int = 3,\n        request_retry_delay: float = 0.25,\n        dbt_cloud_host: str = DBT_DEFAULT_HOST,\n        log: logging.Logger = get_dagster_logger(),\n        log_requests: bool = False,\n    ):\n        self._auth_token = auth_token\n        self._account_id = account_id\n        self._disable_schedule_on_trigger = disable_schedule_on_trigger\n\n        self._request_max_retries = request_max_retries\n        self._request_retry_delay = request_retry_delay\n\n        self._dbt_cloud_host = dbt_cloud_host\n        self._log = log\n        self._log_requests = log_requests\n\n    @property\n    def api_v2_base_url(self) -> str:\n        return urljoin(self._dbt_cloud_host, DBT_API_V2_PATH)\n\n    @property\n    def api_v3_base_url(self) -> str:\n        return urljoin(self._dbt_cloud_host, DBT_API_V3_PATH)\n\n    def build_url_for_job(self, project_id: int, job_id: int) -> str:\n        return urljoin(\n            self._dbt_cloud_host,\n            f"next/deploy/{self._account_id}/projects/{project_id}/jobs/{job_id}/",\n        )\n\n    def build_url_for_cloud_docs(self, job_id: int, resource_type: str, unique_id: str) -> str:\n        return urljoin(\n            self._dbt_cloud_host,\n            f"/accounts/{self._account_id}/jobs/{job_id}/docs/#!/{resource_type}/{unique_id}",\n        )\n\n    def make_request(\n        self,\n        method: str,\n        endpoint: str,\n        data: Optional[Mapping[str, Any]] = None,\n        params: Optional[Mapping[str, Any]] = None,\n        return_text: bool = False,\n        base_url: Optional[str] = None,\n    ) -> Any:\n        """Creates and sends a request to the desired dbt Cloud API endpoint.\n\n        Args:\n            method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").\n            endpoint (str): The dbt Cloud API endpoint to send this request to.\n            data (Optional[Mapping[str, Any]]): JSON-formatable data string to be included in the request.\n            params (Optional[Mapping[str, Any]]): Payload to add to query string of the request.\n            return_text (bool): Override default behavior and return unparsed {"text": response.text}\n                blob instead of json.\n\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n        """\n        headers = {\n            "User-Agent": f"dagster-dbt/{__version__}",\n            "Content-Type": "application/json",\n            "Authorization": f"Bearer {self._auth_token}",\n        }\n        base_url = base_url or self.api_v2_base_url\n        url = urljoin(base_url, endpoint)\n\n        if self._log_requests:\n            self._log.debug(f"Making Request: method={method} url={url} data={data}")\n\n        num_retries = 0\n        while True:\n            try:\n                response = requests.request(\n                    method=method,\n                    url=url,\n                    headers=headers,\n                    data=json.dumps(data),\n                    params=params,\n                )\n                response.raise_for_status()\n                return {"text": response.text} if return_text else response.json()["data"]\n            except RequestException as e:\n                self._log.error("Request to dbt Cloud API failed: %s", e)\n                if num_retries == self._request_max_retries:\n                    break\n                num_retries += 1\n                time.sleep(self._request_retry_delay)\n\n        raise Failure(f"Max retries ({self._request_max_retries}) exceeded with url: {url}.")\n\n    def list_jobs(\n        self, project_id: int, order_by: Optional[str] = "-id"\n    ) -> Sequence[Mapping[str, Any]]:\n        """List all dbt jobs in a dbt Cloud project.\n\n        Args:\n            project_id (int): The ID of the relevant dbt Cloud project. You can find this value by\n                going to your account settings in the dbt Cloud UI. It will be the final\n                number in the url, e.g.: ``https://cloud.getdbt.com/next/settings/accounts/{account_id}/projects/{project_id}/``\n            order_by (Optional[str]): An identifier designated by dbt Cloud in which to sort the\n                results before returning them. Useful when combined with offset and limit to load\n                runs for a job. Defaults to "-id" where "-" designates reverse order and "id" is\n                the key to filter on.\n\n        Returns:\n            List[Dict[str, Any]]: Parsed json data from the response to this request\n        """\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/jobs",\n            params={"project_id": project_id, "order_by": order_by},\n        )\n\n    def get_job(self, job_id: int) -> Mapping[str, Any]:\n        """Gets details about a given dbt job from the dbt Cloud API.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n        """\n        return self.make_request("GET", f"{self._account_id}/jobs/{job_id}/")\n\n    def update_job(self, job_id: int, **kwargs) -> Mapping[str, Any]:\n        """Updates specific properties of a dbt job.\n\n        Documentation on the full set of potential parameters can be found here:\n        https://docs.getdbt.com/dbt-cloud/api-v2#operation/updateJobById.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            kwargs: Passed in as the properties to be changed.\n\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n\n        Examples:\n        .. code-block:: python\n\n            # disable schedule for job with id=12345\n            my_dbt_cloud_resource.update_job(12345, triggers={"schedule": False})\n        """\n        # API requires you to supply a bunch of values, so we can just use the current state\n        # as the defaults\n        job_data = self.get_job(job_id)\n        return self.make_request(\n            "POST", f"{self._account_id}/jobs/{job_id}/", data=deep_merge_dicts(job_data, kwargs)\n        )\n\n    def run_job(self, job_id: int, **kwargs) -> Mapping[str, Any]:\n        """Initializes a run for a job.\n\n        Overrides for specific properties can be set by passing in values to the kwargs. A full list\n        of overridable properties can be found here:\n        https://docs.getdbt.com/dbt-cloud/api-v2#operation/triggerRun.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            kwargs: Passed in as the properties to be overridden.\n\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n        """\n        self._log.info(f"Initializing run for job with job_id={job_id}")\n        if "cause" not in kwargs:\n            kwargs["cause"] = "Triggered via Dagster"\n        resp = self.make_request("POST", f"{self._account_id}/jobs/{job_id}/run/", data=kwargs)\n\n        has_schedule: bool = resp.get("job", {}).get("triggers", {}).get("schedule", False)\n        if has_schedule and self._disable_schedule_on_trigger:\n            self._log.info("Disabling dbt Cloud job schedule.")\n            self.update_job(job_id, triggers={"schedule": False})\n\n        self._log.info(\n            f"Run initialized with run_id={resp['id']}. View this run in "\n            f"the dbt Cloud UI: {resp['href']}"\n        )\n        return resp\n\n    def get_runs(\n        self,\n        include_related: Optional[Sequence[str]] = None,\n        job_id: Optional[int] = None,\n        order_by: Optional[str] = "-id",\n        offset: int = 0,\n        limit: int = 100,\n    ) -> Sequence[Mapping[str, object]]:\n        """Returns a list of runs from dbt Cloud. This can be optionally filtered to a specific job\n        using the job_definition_id. It supports pagination using offset and limit as well and\n        can be configured to load a variety of related information about the runs.\n\n        Args:\n            include_related (Optional[List[str]]): A list of resources to include in the response\n                from dbt Cloud. This is technically a required field according to the API, but it\n                can be passed with an empty list where it will only load the default run\n                information. Valid values are "trigger", "job", "repository", and "environment".\n            job_definition_id (Optional[int]): This method can be optionally filtered to only\n                load runs for a specific job id if it is included here. If omitted it will pull\n                runs for every job.\n            order_by (Optional[str]): An identifier designated by dbt Cloud in which to sort the\n                results before returning them. Useful when combined with offset and limit to load\n                runs for a job. Defaults to "-id" where "-" designates reverse order and "id" is\n                the key to filter on.\n            offset (int): An offset to apply when listing runs. Can be used to paginate results\n                when combined with order_by and limit. Defaults to 0.\n            limit (int): Limits the amount of rows returned by the API. Defaults to 100.\n\n        Returns:\n            List[Dict[str, Any]]: A list of dictionaries containing the runs and any included\n                related information.\n        """\n        query_dict = {\n            "include_related": include_related or [],\n            "order_by": order_by,\n            "offset": offset,\n            "limit": limit,\n        }\n        if job_id:\n            query_dict["job_definition_id"] = job_id\n        return self.make_request("GET", f"{self._account_id}/runs/?{urlencode(query_dict)}")\n\n    def get_run(\n        self, run_id: int, include_related: Optional[Sequence[str]] = None\n    ) -> Mapping[str, Any]:\n        """Gets details about a specific job run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            include_related (List[str]): List of related fields to pull with the run. Valid values\n                are "trigger", "job", and "debug_logs".\n\n        Returns:\n            Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.\n                See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.\n        """\n        query_params = f"?include_related={','.join(include_related)}" if include_related else ""\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/runs/{run_id}/{query_params}",\n        )\n\n    def get_run_steps(self, run_id: int) -> Sequence[str]:\n        """Gets the steps of an initialized dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n\n        Returns:\n            List[str, Any]: List of commands for each step of the run.\n        """\n        run_details = self.get_run(run_id, include_related=["trigger", "job"])\n        steps = run_details["job"]["execute_steps"]\n        steps_override = run_details["trigger"]["steps_override"]\n        return steps_override or steps\n\n    def cancel_run(self, run_id: int) -> Mapping[str, Any]:\n        """Cancels a dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n\n        Returns:\n            Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.\n                See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.\n        """\n        self._log.info(f"Cancelling run with id '{run_id}'")\n        return self.make_request("POST", f"{self._account_id}/runs/{run_id}/cancel/")\n\n    def list_run_artifacts(self, run_id: int, step: Optional[int] = None) -> Sequence[str]:\n        """Lists the paths of the available run artifacts from a completed dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run\n\n        Returns:\n            List[str]: List of the paths of the available run artifacts\n        """\n        query_params = f"?step={step}" if step else ""\n        return cast(\n            list,\n            self.make_request(\n                "GET",\n                f"{self._account_id}/runs/{run_id}/artifacts/{query_params}",\n                data={"step": step} if step else None,\n            ),\n        )\n\n    def get_run_artifact(self, run_id: int, path: str, step: Optional[int] = None) -> str:\n        """The string contents of a run artifact from a dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            path (str): The path to this run artifact (e.g. 'run/my_new_project/models/example/my_first_dbt_model.sql')\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run.\n\n        Returns:\n            List[str]: List of the names of the available run artifacts\n        """\n        query_params = f"?step={step}" if step else ""\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/runs/{run_id}/artifacts/{path}{query_params}",\n            data={"step": step} if step else None,\n            return_text=True,\n        )["text"]\n\n    def get_manifest(self, run_id: int, step: Optional[int] = None) -> Mapping[str, Any]:\n        """The parsed contents of a manifest.json file created by a completed run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run.\n\n        Returns:\n            Dict[str, Any]: Parsed contents of the manifest.json file\n        """\n        return json.loads(self.get_run_artifact(run_id, "manifest.json", step=step))\n\n    def get_run_results(self, run_id: int, step: Optional[int] = None) -> Mapping[str, Any]:\n        """The parsed contents of a run_results.json file created by a completed run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run.\n\n        Returns:\n            Dict[str, Any]: Parsed contents of the run_results.json file\n        """\n        return json.loads(self.get_run_artifact(run_id, "run_results.json", step=step))\n\n    def poll_run(\n        self,\n        run_id: int,\n        poll_interval: float = DEFAULT_POLL_INTERVAL,\n        poll_timeout: Optional[float] = None,\n        href: Optional[str] = None,\n    ) -> Mapping[str, Any]:\n        """Polls a dbt Cloud job run until it completes. Will raise a `dagster.Failure` exception if the\n        run does not complete successfully.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            poll_interval (float): The time (in seconds) that should be waited between successive\n                polls of the dbt Cloud API.\n            poll_timeout (float): The maximum time (in seconds) that should be waited for this run\n                to complete. If this threshold is exceeded, the run will be cancelled and an\n                exception will be thrown. By default, this will poll forver.\n            href (str): For internal use, generally should not be set manually.\n\n        Returns:\n            Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.\n                See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.\n        """\n        status: Optional[str] = None\n\n        if href is None:\n            href = self.get_run(run_id).get("href")\n        assert isinstance(href, str), "Run must have an href"\n\n        poll_start = datetime.datetime.now()\n        try:\n            while True:\n                run_details = self.get_run(run_id)\n                status = run_details["status_humanized"]\n                self._log.info(f"Polled run {run_id}. Status: [{status}]")\n\n                # completed successfully\n                if status == DbtCloudRunStatus.SUCCESS:\n                    return self.get_run(run_id, include_related=["job", "trigger", "run_steps"])\n                elif status in [DbtCloudRunStatus.ERROR, DbtCloudRunStatus.CANCELLED]:\n                    break\n                elif status not in [\n                    DbtCloudRunStatus.QUEUED,\n                    DbtCloudRunStatus.STARTING,\n                    DbtCloudRunStatus.RUNNING,\n                ]:\n                    check.failed(f"Received unexpected status '{status}'. This should never happen")\n\n                if poll_timeout and datetime.datetime.now() > poll_start + datetime.timedelta(\n                    seconds=poll_timeout\n                ):\n                    self.cancel_run(run_id)\n                    raise Failure(\n                        f"Run {run_id} timed out after "\n                        f"{datetime.datetime.now() - poll_start}. Attempted to cancel.",\n                        metadata={"run_page_url": MetadataValue.url(href)},\n                    )\n\n                # Sleep for the configured time interval before polling again.\n                time.sleep(poll_interval)\n        finally:\n            if status not in (\n                DbtCloudRunStatus.SUCCESS,\n                DbtCloudRunStatus.ERROR,\n                DbtCloudRunStatus.CANCELLED,\n            ):\n                self.cancel_run(run_id)\n\n        run_details = self.get_run(run_id, include_related=["trigger"])\n        raise Failure(\n            f"Run {run_id} failed. Status Message: {run_details['status_message']}",\n            metadata={\n                "run_details": MetadataValue.json(run_details),\n                "run_page_url": MetadataValue.url(href),\n            },\n        )\n\n    def run_job_and_poll(\n        self,\n        job_id: int,\n        poll_interval: float = DEFAULT_POLL_INTERVAL,\n        poll_timeout: Optional[float] = None,\n        **kwargs,\n    ) -> DbtCloudOutput:\n        """Runs a dbt Cloud job and polls until it completes. Will raise a `dagster.Failure` exception\n        if the run does not complete successfully.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            poll_interval (float): The time (in seconds) that should be waited between successive\n                polls of the dbt Cloud API.\n            poll_timeout (float): The maximum time (in seconds) that should be waited for this run\n                to complete. If this threshold is exceeded, the run will be cancelled and an\n                exception will be thrown. By default, this will poll forver.\n\n        Returns:\n            :py:class:`~DbtCloudOutput`: Class containing details about the specific job run and the\n                parsed run results.\n        """\n        run_details = self.run_job(job_id, **kwargs)\n        run_id = run_details["id"]\n        href = run_details["href"]\n        final_run_details = self.poll_run(\n            run_id, poll_interval=poll_interval, poll_timeout=poll_timeout, href=href\n        )\n        try:\n            run_results = self.get_run_results(run_id)\n        # if you fail to get run_results for this job, just leave it empty\n        except Failure:\n            self._log.info(\n                "run_results.json not available for this run. Defaulting to empty value."\n            )\n            run_results = {}\n        output = DbtCloudOutput(run_details=final_run_details, result=run_results)\n        if output.docs_url:\n            self._log.info(f"Docs for this run can be viewed here: {output.docs_url}")\n        return output\n\n    def get_job_environment_variables(self, project_id: int, job_id: int) -> Mapping[str, Any]:\n        """Get the dbt Cloud environment variables for a specific job.\n\n        Args:\n            project_id (int): The ID of the relevant dbt Cloud project. You can find this value by\n                going to your account settings in the dbt Cloud UI. It will be the final\n                number in the url, e.g.: ``https://cloud.getdbt.com/next/settings/accounts/{account_id}/projects/{project_id}/``\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n        """\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/projects/{project_id}/environment-variables/job",\n            params={"job_definition_id": job_id},\n            base_url=self.api_v3_base_url,\n        )\n\n    def set_job_environment_variable(\n        self, project_id: int, job_id: int, environment_variable_id: int, name: str, value: str\n    ) -> Mapping[str, Any]:\n        """Set the dbt Cloud environment variables for a specific job.\n\n        Args:\n            project_id (int): The ID of the relevant dbt Cloud project. You can find this value by\n                going to your account settings in the dbt Cloud UI. It will be the final\n                number in the url, e.g.: ``https://cloud.getdbt.com/next/settings/accounts/{account_id}/projects/{project_id}/``\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            name (str): The name of the environment variable to set.\n            value (str): The raw value of the environment variable.\n        """\n        return self.make_request(\n            "POST",\n            f"{self._account_id}/projects/{project_id}/environment-variables/{environment_variable_id}",\n            data={\n                "id": environment_variable_id,\n                "account_id": self._account_id,\n                "project_id": project_id,\n                "job_definition_id": job_id,\n                "type": "job",\n                "name": name,\n                "raw_value": value,\n            },\n            base_url=self.api_v3_base_url,\n        )\n\n\nclass DbtCloudResource(DbtCloudClient):\n    pass\n\n\n
[docs]class DbtCloudClientResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """This resource helps interact with dbt Cloud connectors."""\n\n auth_token: str = Field(\n description=(\n "dbt Cloud API Token. User tokens can be found in the [dbt Cloud"\n " UI](https://cloud.getdbt.com/#/profile/api/), or see the [dbt Cloud"\n " Docs](https://docs.getdbt.com/docs/dbt-cloud/dbt-cloud-api/service-tokens) for"\n " instructions on creating a Service Account token."\n ),\n )\n account_id: int = Field(\n description=(\n "dbt Cloud Account ID. This value can be found in the url of a variety of views in"\n " the dbt Cloud UI, e.g."\n " https://cloud.getdbt.com/#/accounts/{account_id}/settings/."\n ),\n )\n disable_schedule_on_trigger: bool = Field(\n default=True,\n description=(\n "Specifies if you would like any job that is triggered using this "\n "resource to automatically disable its schedule."\n ),\n )\n request_max_retries: int = Field(\n default=3,\n description=(\n "The maximum number of times requests to the dbt Cloud API should be retried "\n "before failing."\n ),\n )\n request_retry_delay: float = Field(\n default=0.25,\n description="Time (in seconds) to wait between each request retry.",\n )\n dbt_cloud_host: str = Field(\n default=DBT_DEFAULT_HOST,\n description=(\n "The hostname where dbt cloud is being hosted (e.g. https://my_org.cloud.getdbt.com/)."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_dbt_client(self) -> DbtCloudClient:\n context = self.get_resource_context()\n assert context.log\n\n return DbtCloudClient(\n auth_token=self.auth_token,\n account_id=self.account_id,\n disable_schedule_on_trigger=self.disable_schedule_on_trigger,\n request_max_retries=self.request_max_retries,\n request_retry_delay=self.request_retry_delay,\n log=context.log,\n dbt_cloud_host=self.dbt_cloud_host,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_dbt_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=DbtCloudClientResource.to_config_schema(),\n description="This resource helps interact with dbt Cloud connectors",\n)\ndef dbt_cloud_resource(context) -> DbtCloudResource:\n """This resource allows users to programatically interface with the dbt Cloud Administrative REST\n API (v2) to launch jobs and monitor their progress. This currently implements only a subset of\n the functionality exposed by the API.\n\n For a complete set of documentation on the dbt Cloud Administrative REST API, including expected\n response JSON schemae, see the `dbt Cloud API Docs <https://docs.getdbt.com/dbt-cloud/api-v2>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_dbt import dbt_cloud_resource\n\n my_dbt_cloud_resource = dbt_cloud_resource.configured(\n {\n "auth_token": {"env": "DBT_CLOUD_AUTH_TOKEN"},\n "account_id": {"env": "DBT_CLOUD_ACCOUNT_ID"},\n }\n )\n\n @job(resource_defs={"dbt_cloud": my_dbt_cloud_resource})\n def my_dbt_cloud_job():\n ...\n """\n return DbtCloudResource(\n auth_token=context.resource_config["auth_token"],\n account_id=context.resource_config["account_id"],\n disable_schedule_on_trigger=context.resource_config["disable_schedule_on_trigger"],\n request_max_retries=context.resource_config["request_max_retries"],\n request_retry_delay=context.resource_config["request_retry_delay"],\n log=context.log,\n dbt_cloud_host=context.resource_config["dbt_cloud_host"],\n )
\n
", "current_page_name": "_modules/dagster_dbt/cloud/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.cloud.resources"}}, "core": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.core.resources

\nfrom typing import Any, Iterator, Mapping, Optional, Sequence, Set\n\nimport dagster._check as check\nfrom dagster import resource\nfrom dagster._annotations import deprecated, public\nfrom dagster._config.pythonic_config import ConfigurableResource, IAttachDifferentObjectToOpContext\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.merger import merge_dicts\nfrom pydantic import Field\n\nfrom ..dbt_resource import DbtClient\nfrom .types import DbtCliOutput\nfrom .utils import (\n    DEFAULT_DBT_TARGET_PATH,\n    execute_cli,\n    execute_cli_stream,\n    parse_manifest,\n    parse_run_results,\n    remove_run_results,\n)\n\nDEFAULT_DBT_EXECUTABLE = "dbt"\n\n# The set of dbt cli commands that result in the creation of a run_results.json output file\n# https://docs.getdbt.com/reference/artifacts/run-results-json\nDBT_RUN_RESULTS_COMMANDS = ["run", "test", "seed", "snapshot", "docs generate", "build"]\n\n# The following config fields correspond to flags that apply to all dbt CLI commands. For details\n# on dbt CLI flags, see\n# https://github.com/fishtown-analytics/dbt/blob/1f8e29276e910c697588c43f08bc881379fff178/core/dbt/main.py#L260-L329\n\nCOMMON_OPTION_KEYS = {\n    "warn_error",\n    "dbt_executable",\n    "ignore_handled_error",\n    "target_path",\n    "docs_url",\n    "json_log_format",\n    "capture_logs",\n    "debug",\n}\n\n\nclass ConfigurableResourceWithCliFlags(ConfigurableResource):\n    project_dir: str = Field(\n        default=".",\n        description=(\n            "Which directory to look in for the dbt_project.yml file. Default is the current "\n            "working directory and its parents."\n        ),\n    )\n    profiles_dir: Optional[str] = Field(\n        default=None,\n        description=(\n            "Which directory to look in for the profiles.yml file. Default = $DBT_PROFILES_DIR or "\n            "$HOME/.dbt"\n        ),\n    )\n    profile: Optional[str] = Field(\n        default=None, description="Which profile to load. Overrides setting in dbt_project.yml."\n    )\n    target: Optional[str] = Field(\n        default=None, description="Which target to load for the given profile."\n    )\n    vars: Optional[Mapping[str, Any]] = Field(\n        default=None,\n        description=(\n            "Supply variables to the project. This argument overrides variables defined in your "\n            "dbt_project.yml file. This argument should be a dictionary, eg. "\n            "{'my_variable': 'my_value'}"\n        ),\n    )\n    bypass_cache: bool = Field(\n        default=False, description="If set, bypass the adapter-level cache of database state"\n    )\n    warn_error: bool = Field(\n        default=False,\n        description=(\n            "If dbt would normally warn, instead raise an exception. Examples include --models "\n            "that selects nothing, deprecations, configurations with no associated models, "\n            "invalid test configurations, and missing sources/refs in tests."\n        ),\n    )\n    dbt_executable: str = Field(\n        default=DEFAULT_DBT_EXECUTABLE,\n        description=f"Path to the dbt executable. Default is {DEFAULT_DBT_EXECUTABLE}",\n    )\n    ignore_handled_error: bool = Field(\n        default=False,\n        description=(\n            "When True, will not raise an exception when the dbt CLI returns error code 1. "\n            "Default is False."\n        ),\n    )\n    target_path: str = Field(\n        default=DEFAULT_DBT_TARGET_PATH,\n        description=(\n            "The directory path for target if different from the default `target-path` in "\n            "your dbt project configuration file."\n        ),\n    )\n    docs_url: Optional[str] = Field(\n        default=None, description="The url for where dbt docs are being served for this project."\n    )\n    json_log_format: bool = Field(\n        default=True,\n        description=(\n            "When True, dbt will invoked with the `--log-format json` flag, allowing "\n            "Dagster to parse the log messages and emit simpler log messages to the event log."\n        ),\n    )\n    capture_logs: bool = Field(\n        default=True,\n        description=(\n            "When True, dbt will invoked with the `--capture-output` flag, allowing "\n            "Dagster to capture the logs and emit them to the event log."\n        ),\n    )\n    debug: bool = Field(\n        default=False,\n        description=(\n            "When True, dbt will invoked with the `--debug` flag, which will print "\n            "additional debug information to the console."\n        ),\n    )\n\n\nclass DbtCliClient(DbtClient):\n    """A resource that allows you to execute dbt cli commands.\n\n    For the most up-to-date documentation on the specific parameters available to you for each\n    command, check out the dbt docs:\n\n    https://docs.getdbt.com/reference/commands/run\n\n    To use this as a dagster resource, we recommend using\n    :func:`dbt_cli_resource <dagster_dbt.dbt_cli_resource>`.\n    """\n\n    def __init__(\n        self,\n        executable: str,\n        default_flags: Mapping[str, Any],\n        warn_error: bool,\n        ignore_handled_error: bool,\n        target_path: str,\n        logger: Optional[Any] = None,\n        docs_url: Optional[str] = None,\n        json_log_format: bool = True,\n        capture_logs: bool = True,\n        debug: bool = False,\n    ):\n        self._default_flags = default_flags\n        self._executable = executable\n        self._warn_error = warn_error\n        self._ignore_handled_error = ignore_handled_error\n        self._target_path = target_path\n        self._docs_url = docs_url\n        self._json_log_format = json_log_format\n        self._capture_logs = capture_logs\n        self._debug = debug\n        super().__init__(logger)\n\n    @property\n    def default_flags(self) -> Mapping[str, Any]:\n        """A set of params populated from resource config that are passed as flags to each dbt CLI command."""\n        return self._format_params(self._default_flags, replace_underscores=True)\n\n    @property\n    def strict_flags(self) -> Set[str]:\n        """A set of flags that should not be auto-populated from the default flags unless they are\n        arguments to the associated function.\n        """\n        return {"models", "exclude", "select"}\n\n    def _get_flags_dict(self, kwargs) -> Mapping[str, Any]:\n        extra_flags = {} if kwargs is None else kwargs\n\n        # remove default flags that are declared as "strict" and not explicitly passed in\n        default_flags = {\n            k: v\n            for k, v in self.default_flags.items()\n            if not (k in self.strict_flags and k not in extra_flags)\n        }\n\n        return merge_dicts(\n            default_flags, self._format_params(extra_flags, replace_underscores=True)\n        )\n\n    @public\n    def cli(self, command: str, **kwargs) -> DbtCliOutput:\n        """Executes a dbt CLI command. Params passed in as keyword arguments will be merged with the\n            default flags that were configured on resource initialization (if any) overriding the\n            default values if necessary.\n\n        Args:\n            command (str): The command you wish to run (e.g. 'run', 'test', 'docs generate', etc.)\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        command = check.str_param(command, "command")\n        return execute_cli(\n            executable=self._executable,\n            command=command,\n            flags_dict=self._get_flags_dict(kwargs),\n            log=self.logger,\n            warn_error=self._warn_error,\n            ignore_handled_error=self._ignore_handled_error,\n            target_path=self._target_path,\n            docs_url=self._docs_url,\n            json_log_format=self._json_log_format,\n            capture_logs=self._capture_logs,\n            debug=self._debug,\n        )\n\n    def cli_stream_json(self, command: str, **kwargs) -> Iterator[Mapping[str, Any]]:\n        """Executes a dbt CLI command. Params passed in as keyword arguments will be merged with the\n            default flags that were configured on resource initialization (if any) overriding the\n            default values if necessary.\n\n        Args:\n            command (str): The command you wish to run (e.g. 'run', 'test', 'docs generate', etc.)\n        """\n        check.invariant(self._json_log_format, "Cannot stream JSON if json_log_format is False.")\n        for event in execute_cli_stream(\n            executable=self._executable,\n            command=command,\n            flags_dict=self._get_flags_dict(kwargs),\n            log=self.logger,\n            warn_error=self._warn_error,\n            ignore_handled_error=self._ignore_handled_error,\n            json_log_format=self._json_log_format,\n            capture_logs=self._capture_logs,\n            debug=self._debug,\n        ):\n            if event.parsed_json_line is not None:\n                yield event.parsed_json_line\n\n    @public\n    def compile(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        select: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``compile`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in compilation.\n            exclude (List[str]), optional): the models to exclude from compilation.\n            select (List[str], optional): the models to include in compilation.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("compile", models=models, exclude=exclude, select=select, **kwargs)\n\n    @public\n    def run(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        select: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``run`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in the run.\n            exclude (List[str]), optional): the models to exclude from the run.\n            select (List[str], optional): the models to include in the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("run", models=models, exclude=exclude, select=select, **kwargs)\n\n    @public\n    def snapshot(\n        self,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``snapshot`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("snapshot", select=select, exclude=exclude, **kwargs)\n\n    @public\n    def test(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        data: bool = True,\n        schema: bool = True,\n        select: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``test`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in testing.\n            exclude (List[str], optional): the models to exclude from testing.\n            data (bool, optional): If ``True`` (default), then run data tests.\n            schema (bool, optional): If ``True`` (default), then run schema tests.\n            select (List[str], optional): the models to include in testing.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        if data and schema:\n            # do not include these arguments if both are True, as these are deprecated in later\n            # versions of dbt, and for older versions the functionality is the same regardless of\n            # if both are set or neither are set.\n            return self.cli("test", models=models, exclude=exclude, select=select, **kwargs)\n        return self.cli(\n            "test",\n            models=models,\n            exclude=exclude,\n            data=data,\n            schema=schema,\n            select=select,\n            **kwargs,\n        )\n\n    @public\n    def seed(\n        self,\n        show: bool = False,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``seed`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            show (bool, optional): If ``True``, then show a sample of the seeded data in the\n                response. Defaults to ``False``.\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("seed", show=show, select=select, exclude=exclude, **kwargs)\n\n    @public\n    def ls(\n        self,\n        select: Optional[Sequence[str]] = None,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``ls`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the resources to include in the output.\n            models (List[str], optional): the models to include in the output.\n            exclude (List[str], optional): the resources to exclude from the output.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("ls", select=select, models=models, exclude=exclude, **kwargs)\n\n    @public\n    def build(self, select: Optional[Sequence[str]] = None, **kwargs) -> DbtCliOutput:\n        """Run the ``build`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the models/resources to include in the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("build", select=select, **kwargs)\n\n    @public\n    def freshness(self, select: Optional[Sequence[str]] = None, **kwargs) -> DbtCliOutput:\n        """Run the ``source snapshot-freshness`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the sources to include in the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("source snapshot-freshness", select=select, **kwargs)\n\n    @public\n    def generate_docs(self, compile_project: bool = False, **kwargs) -> DbtCliOutput:\n        """Run the ``docs generate`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            compile_project (bool, optional): If true, compile the project before generating a catalog.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("docs generate", compile=compile_project, **kwargs)\n\n    @public\n    def run_operation(\n        self, macro: str, args: Optional[Mapping[str, Any]] = None, **kwargs\n    ) -> DbtCliOutput:\n        """Run the ``run-operation`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            macro (str): the dbt macro to invoke.\n            args (Dict[str, Any], optional): the keyword arguments to be supplied to the macro.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli(f"run-operation {macro}", args=args, **kwargs)\n\n    @public\n    def get_run_results_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the run_results.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the manifest json file\n                for this dbt project.\n        """\n        project_dir = kwargs.get("project_dir", self.default_flags["project-dir"])\n        target_path = kwargs.get("target_path", self._target_path)\n        return parse_run_results(project_dir, target_path)\n\n    @public\n    def remove_run_results_json(self, **kwargs):\n        """Remove the run_results.json file from previous runs (if it exists)."""\n        project_dir = kwargs.get("project_dir", self.default_flags["project-dir"])\n        target_path = kwargs.get("target_path", self._target_path)\n        remove_run_results(project_dir, target_path)\n\n    @public\n    def get_manifest_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the manifest.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the manifest json file\n                for this dbt project.\n        """\n        project_dir = kwargs.get("project_dir", self.default_flags["project-dir"])\n        target_path = kwargs.get("target_path", self._target_path)\n        return parse_manifest(project_dir, target_path)\n\n\nclass DbtCliClientResource(ConfigurableResourceWithCliFlags, IAttachDifferentObjectToOpContext):\n    """Resource which issues dbt CLI commands against a configured dbt project."""\n\n    class Config:\n        extra = "allow"\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        return True\n\n    def get_dbt_client(self) -> DbtCliClient:\n        context = self.get_resource_context()\n        default_flags = {\n            k: v\n            for k, v in self._get_non_none_public_field_values().items()\n            if k not in COMMON_OPTION_KEYS\n        }\n\n        return DbtCliClient(\n            executable=self.dbt_executable,\n            default_flags=default_flags,\n            warn_error=self.warn_error,\n            ignore_handled_error=self.ignore_handled_error,\n            target_path=self.target_path,\n            docs_url=self.docs_url,\n            logger=context.log,\n            json_log_format=self.json_log_format,\n            capture_logs=self.capture_logs,\n            debug=self.debug,\n        )\n\n    def get_object_to_set_on_execution_context(self) -> Any:\n        return self.get_dbt_client()\n\n\n
[docs]@deprecated(breaking_version="0.21", additional_warn_text="Use DbtCliResource instead.")\n@dagster_maintained_resource\n@resource(config_schema=DbtCliClientResource.to_config_schema())\ndef dbt_cli_resource(context) -> DbtCliClient:\n """This resource issues dbt CLI commands against a configured dbt project. It is deprecated\n in favor of :py:class:`~dagster_dbt.DbtCliResource`.\n """\n # all config options that are intended to be used as flags for dbt commands\n\n default_flags = {\n k: v for k, v in context.resource_config.items() if k not in COMMON_OPTION_KEYS\n }\n return DbtCliClient(\n executable=context.resource_config["dbt_executable"],\n default_flags=default_flags,\n warn_error=context.resource_config["warn_error"],\n ignore_handled_error=context.resource_config["ignore_handled_error"],\n target_path=context.resource_config["target_path"],\n logger=context.log,\n docs_url=context.resource_config.get("docs_url"),\n capture_logs=context.resource_config["capture_logs"],\n json_log_format=context.resource_config["json_log_format"],\n debug=context.resource_config["debug"],\n )
\n
", "current_page_name": "_modules/dagster_dbt/core/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.core.resources"}, "resources_v2": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.core.resources_v2

\nimport atexit\nimport contextlib\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport uuid\nfrom contextlib import suppress\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import (\n    Any,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Union,\n)\n\nimport dateutil.parser\nimport orjson\nfrom dagster import (\n    AssetCheckResult,\n    AssetCheckSeverity,\n    AssetObservation,\n    AssetsDefinition,\n    ConfigurableResource,\n    Output,\n    get_dagster_logger,\n)\nfrom dagster._annotations import public\nfrom dagster._core.errors import DagsterInvalidPropertyError\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dbt.contracts.results import NodeStatus, TestStatus\nfrom dbt.node_types import NodeType\nfrom dbt.version import __version__ as dbt_version\nfrom packaging import version\nfrom pydantic import Field, root_validator, validator\nfrom typing_extensions import Literal\n\nfrom ..asset_utils import (\n    get_manifest_and_translator_from_dbt_assets,\n    output_name_fn,\n)\nfrom ..dagster_dbt_translator import DagsterDbtTranslator\nfrom ..dbt_manifest import DbtManifestParam, validate_manifest\nfrom ..errors import DagsterDbtCliRuntimeError\nfrom ..utils import ASSET_RESOURCE_TYPES, get_dbt_resource_props_by_dbt_unique_id_from_manifest\n\nlogger = get_dagster_logger()\n\n\nDBT_PROJECT_YML_NAME = "dbt_project.yml"\nDBT_PROFILES_YML_NAME = "profiles.yml"\nPARTIAL_PARSE_FILE_NAME = "partial_parse.msgpack"\n\n\ndef _get_dbt_target_path() -> Path:\n    return Path(os.getenv("DBT_TARGET_PATH", "target"))\n\n\n
[docs]@dataclass\nclass DbtCliEventMessage:\n """The representation of a dbt CLI event.\n\n Args:\n raw_event (Dict[str, Any]): The raw event dictionary.\n See https://docs.getdbt.com/reference/events-logging#structured-logging for more\n information.\n """\n\n raw_event: Dict[str, Any]\n\n @classmethod\n def from_log(cls, log: str) -> "DbtCliEventMessage":\n """Parse an event according to https://docs.getdbt.com/reference/events-logging#structured-logging.\n\n We assume that the log format is json.\n """\n raw_event: Dict[str, Any] = orjson.loads(log)\n\n return cls(raw_event=raw_event)\n\n def __str__(self) -> str:\n return self.raw_event["info"]["msg"]\n\n
[docs] @public\n def to_default_asset_events(\n self,\n manifest: DbtManifestParam,\n dagster_dbt_translator: DagsterDbtTranslator = DagsterDbtTranslator(),\n ) -> Iterator[Union[Output, AssetObservation, AssetCheckResult]]:\n """Convert a dbt CLI event to a set of corresponding Dagster events.\n\n Args:\n manifest (Union[Mapping[str, Any], str, Path]): The dbt manifest blob.\n dagster_dbt_translator (DagsterDbtTranslator): Optionally, a custom translator for\n linking dbt nodes to Dagster assets.\n\n Returns:\n Iterator[Union[Output, AssetObservation, AssetCheckResult]]: A set of corresponding Dagster events.\n - Output for refables (e.g. models, seeds, snapshots.)\n - AssetObservation for dbt test results that are not enabled as asset checks.\n - AssetCheckResult for dbt test results that are enabled as asset checks.\n """\n if self.raw_event["info"]["level"] == "debug":\n return\n\n event_node_info: Dict[str, Any] = self.raw_event["data"].get("node_info")\n if not event_node_info:\n return\n\n manifest = validate_manifest(manifest)\n\n if not manifest:\n logger.info(\n "No dbt manifest was provided. Dagster events for dbt tests will not be created."\n )\n\n invocation_id: str = self.raw_event["info"]["invocation_id"]\n unique_id: str = event_node_info["unique_id"]\n node_resource_type: str = event_node_info["resource_type"]\n node_status: str = event_node_info["node_status"]\n\n is_node_successful = node_status == NodeStatus.Success\n is_node_finished = bool(event_node_info.get("node_finished_at"))\n if node_resource_type in NodeType.refable() and is_node_successful:\n started_at = dateutil.parser.isoparse(event_node_info["node_started_at"])\n finished_at = dateutil.parser.isoparse(event_node_info["node_finished_at"])\n duration_seconds = (finished_at - started_at).total_seconds()\n\n yield Output(\n value=None,\n output_name=output_name_fn(event_node_info),\n metadata={\n "unique_id": unique_id,\n "invocation_id": invocation_id,\n "Execution Duration": duration_seconds,\n },\n )\n elif manifest and node_resource_type == NodeType.Test and is_node_finished:\n upstream_unique_ids: List[str] = manifest["parent_map"][unique_id]\n test_resource_props = manifest["nodes"][unique_id]\n metadata = {\n "unique_id": unique_id,\n "invocation_id": invocation_id,\n "status": node_status,\n }\n\n is_asset_check = dagster_dbt_translator.settings.enable_asset_checks\n attached_node_unique_id = test_resource_props.get("attached_node")\n is_generic_test = bool(attached_node_unique_id)\n\n if is_asset_check and is_generic_test:\n is_test_successful = node_status == TestStatus.Pass\n severity = AssetCheckSeverity(test_resource_props["config"]["severity"].upper())\n\n attached_node_resource_props: Dict[str, Any] = manifest["nodes"].get(\n attached_node_unique_id\n ) or manifest["sources"].get(attached_node_unique_id)\n attached_node_asset_key = dagster_dbt_translator.get_asset_key(\n attached_node_resource_props\n )\n\n yield AssetCheckResult(\n passed=is_test_successful,\n asset_key=attached_node_asset_key,\n check_name=event_node_info["node_name"],\n metadata=metadata,\n severity=severity,\n )\n else:\n for upstream_unique_id in upstream_unique_ids:\n upstream_resource_props: Dict[str, Any] = manifest["nodes"].get(\n upstream_unique_id\n ) or manifest["sources"].get(upstream_unique_id)\n upstream_asset_key = dagster_dbt_translator.get_asset_key(\n upstream_resource_props\n )\n\n yield AssetObservation(\n asset_key=upstream_asset_key,\n metadata=metadata,\n )
\n\n\n
[docs]@dataclass\nclass DbtCliInvocation:\n """The representation of an invoked dbt command.\n\n Args:\n process (subprocess.Popen): The process running the dbt command.\n manifest (Mapping[str, Any]): The dbt manifest blob.\n project_dir (Path): The path to the dbt project.\n target_path (Path): The path to the dbt target folder.\n raise_on_error (bool): Whether to raise an exception if the dbt command fails.\n """\n\n process: subprocess.Popen\n manifest: Mapping[str, Any]\n dagster_dbt_translator: DagsterDbtTranslator\n project_dir: Path\n target_path: Path\n raise_on_error: bool\n\n @classmethod\n def run(\n cls,\n args: List[str],\n env: Dict[str, str],\n manifest: Mapping[str, Any],\n dagster_dbt_translator: DagsterDbtTranslator,\n project_dir: Path,\n target_path: Path,\n raise_on_error: bool,\n ) -> "DbtCliInvocation":\n # Attempt to take advantage of partial parsing. If there is a `partial_parse.msgpack` in\n # in the target folder, then copy it to the dynamic target path.\n #\n # This effectively allows us to skip the parsing of the manifest, which can be expensive.\n # See https://docs.getdbt.com/reference/programmatic-invocations#reusing-objects for more\n # details.\n current_target_path = _get_dbt_target_path()\n partial_parse_file_path = (\n current_target_path.joinpath(PARTIAL_PARSE_FILE_NAME)\n if current_target_path.is_absolute()\n else project_dir.joinpath(current_target_path, PARTIAL_PARSE_FILE_NAME)\n )\n partial_parse_destination_target_path = target_path.joinpath(PARTIAL_PARSE_FILE_NAME)\n\n if partial_parse_file_path.exists():\n logger.info(\n f"Copying `{partial_parse_file_path}` to `{partial_parse_destination_target_path}`"\n " to take advantage of partial parsing."\n )\n\n partial_parse_destination_target_path.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(partial_parse_file_path, partial_parse_destination_target_path)\n\n # Create a subprocess that runs the dbt CLI command.\n logger.info(f"Running dbt command: `{' '.join(args)}`.")\n process = subprocess.Popen(\n args=args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=env,\n cwd=project_dir,\n )\n\n # Add handler to terminate child process if running.\n # See https://stackoverflow.com/a/18258391 for more details.\n def cleanup_dbt_subprocess(process: subprocess.Popen) -> None:\n if process.returncode is None:\n logger.info(\n "The main process is being terminated, but the dbt command has not yet"\n " completed. Terminating the execution of dbt command."\n )\n process.terminate()\n process.wait()\n\n atexit.register(cleanup_dbt_subprocess, process)\n\n return cls(\n process=process,\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n project_dir=project_dir,\n target_path=target_path,\n raise_on_error=raise_on_error,\n )\n\n
[docs] @public\n def wait(self) -> "DbtCliInvocation":\n """Wait for the dbt CLI process to complete.\n\n Returns:\n DbtCliInvocation: The current representation of the dbt CLI invocation.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n dbt_cli_invocation = dbt.cli(["run"]).wait()\n """\n list(self.stream_raw_events())\n\n return self
\n\n
[docs] @public\n def is_successful(self) -> bool:\n """Return whether the dbt CLI process completed successfully.\n\n Returns:\n bool: True, if the dbt CLI process returns with a zero exit code, and False otherwise.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n dbt_cli_invocation = dbt.cli(["run"], raise_on_error=False)\n\n if dbt_cli_invocation.is_successful():\n ...\n """\n return self.process.wait() == 0
\n\n
[docs] @public\n def stream(self) -> Iterator[Union[Output, AssetObservation, AssetCheckResult]]:\n """Stream the events from the dbt CLI process and convert them to Dagster events.\n\n Returns:\n Iterator[Union[Output, AssetObservation, AssetCheckResult]]: A set of corresponding Dagster events.\n - Output for refables (e.g. models, seeds, snapshots.)\n - AssetObservation for dbt test results that are not enabled as asset checks.\n - AssetCheckResult for dbt test results that are enabled as asset checks.\n\n Examples:\n .. code-block:: python\n\n from pathlib import Path\n from dagster_dbt import DbtCliResource, dbt_assets\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context, dbt: DbtCliResource):\n yield from dbt.cli(["run"], context=context).stream()\n """\n for event in self.stream_raw_events():\n yield from event.to_default_asset_events(\n manifest=self.manifest, dagster_dbt_translator=self.dagster_dbt_translator\n )
\n\n
[docs] @public\n def stream_raw_events(self) -> Iterator[DbtCliEventMessage]:\n """Stream the events from the dbt CLI process.\n\n Returns:\n Iterator[DbtCliEventMessage]: An iterator of events from the dbt CLI process.\n """\n with self.process.stdout or contextlib.nullcontext():\n for raw_line in self.process.stdout or []:\n log: str = raw_line.decode().strip()\n try:\n event = DbtCliEventMessage.from_log(log=log)\n\n # Re-emit the logs from dbt CLI process into stdout.\n sys.stdout.write(str(event) + "\\n")\n sys.stdout.flush()\n\n yield event\n except:\n # If we can't parse the log, then just emit it as a raw log.\n sys.stdout.write(log + "\\n")\n sys.stdout.flush()\n\n # Ensure that the dbt CLI process has completed.\n self._raise_on_error()
\n\n
[docs] @public\n def get_artifact(\n self,\n artifact: Union[\n Literal["manifest.json"],\n Literal["catalog.json"],\n Literal["run_results.json"],\n Literal["sources.json"],\n ],\n ) -> Dict[str, Any]:\n """Retrieve a dbt artifact from the target path.\n\n See https://docs.getdbt.com/reference/artifacts/dbt-artifacts for more information.\n\n Args:\n artifact (Union[Literal["manifest.json"], Literal["catalog.json"], Literal["run_results.json"], Literal["sources.json"]]): The name of the artifact to retrieve.\n\n Returns:\n Dict[str, Any]: The artifact as a dictionary.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n dbt_cli_invocation = dbt.cli(["run"]).wait()\n\n # Retrieve the run_results.json artifact.\n run_results = dbt_cli_invocation.get_artifact("run_results.json")\n """\n artifact_path = self.target_path.joinpath(artifact)\n\n return orjson.loads(artifact_path.read_bytes())
\n\n def _raise_on_error(self) -> None:\n """Ensure that the dbt CLI process has completed. If the process has not successfully\n completed, then optionally raise an error.\n """\n if not self.is_successful() and self.raise_on_error:\n raise DagsterDbtCliRuntimeError(\n description=(\n f"The dbt CLI process failed with exit code {self.process.returncode}. Check"\n " the Dagster compute logs for the full information about the error, or view"\n f" the dbt debug log file: {self.target_path.joinpath('dbt.log')}."\n )\n )
\n\n\n
[docs]class DbtCliResource(ConfigurableResource):\n """A resource used to execute dbt CLI commands.\n\n Attributes:\n project_dir (str): The path to the dbt project directory. This directory should contain a\n `dbt_project.yml`. See https://docs.getdbt.com/reference/dbt_project.yml for more\n information.\n global_config_flags (List[str]): A list of global flags configuration to pass to the dbt CLI\n invocation. See https://docs.getdbt.com/reference/global-configs for a full list of\n configuration.\n profiles_dir (Optional[str]): The path to the directory containing your dbt `profiles.yml`.\n By default, the current working directory is used, which is the dbt project directory.\n See https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more\n information.\n profile (Optional[str]): The profile from your dbt `profiles.yml` to use for execution. See\n https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more\n information.\n target (Optional[str]): The target from your dbt `profiles.yml` to use for execution. See\n https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more\n information.\n\n Examples:\n Creating a dbt resource with only a reference to ``project_dir``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n Creating a dbt resource with a custom ``profiles_dir``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(\n project_dir="/path/to/dbt/project",\n profiles_dir="/path/to/dbt/project/profiles",\n )\n\n Creating a dbt resource with a custom ``profile`` and ``target``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(\n project_dir="/path/to/dbt/project",\n profiles_dir="/path/to/dbt/project/profiles",\n profile="jaffle_shop",\n target="dev",\n )\n\n Creating a dbt resource with global configs, e.g. disabling colored logs with ``--no-use-color``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(\n project_dir="/path/to/dbt/project",\n global_config_flags=["--no-use-color"],\n )\n """\n\n project_dir: str = Field(\n ...,\n description=(\n "The path to your dbt project directory. This directory should contain a"\n " `dbt_project.yml`. See https://docs.getdbt.com/reference/dbt_project.yml for more"\n " information."\n ),\n )\n global_config_flags: List[str] = Field(\n default=[],\n description=(\n "A list of global flags configuration to pass to the dbt CLI invocation. See"\n " https://docs.getdbt.com/reference/global-configs for a full list of configuration."\n ),\n )\n profiles_dir: Optional[str] = Field(\n default=None,\n description=(\n "The path to the directory containing your dbt `profiles.yml`. By default, the current"\n " working directory is used, which is the dbt project directory."\n " See https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for "\n " more information."\n ),\n )\n profile: Optional[str] = Field(\n default=None,\n description=(\n "The profile from your dbt `profiles.yml` to use for execution. See"\n " https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more"\n " information."\n ),\n )\n target: Optional[str] = Field(\n default=None,\n description=(\n "The target from your dbt `profiles.yml` to use for execution. See"\n " https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more"\n " information."\n ),\n )\n\n @classmethod\n def _validate_absolute_path_exists(cls, path: Union[str, Path]) -> Path:\n absolute_path = Path(path).absolute()\n try:\n resolved_path = absolute_path.resolve(strict=True)\n except FileNotFoundError:\n raise ValueError(f"The absolute path of '{path}' ('{absolute_path}') does not exist")\n\n return resolved_path\n\n @classmethod\n def _validate_path_contains_file(cls, path: Path, file_name: str, error_message: str):\n if not path.joinpath(file_name).exists():\n raise ValueError(error_message)\n\n @validator("project_dir", "profiles_dir", pre=True)\n def convert_path_to_str(cls, v: Any) -> Any:\n """Validate that the path is converted to a string."""\n if isinstance(v, Path):\n resolved_path = cls._validate_absolute_path_exists(v)\n\n absolute_path = Path(v).absolute()\n try:\n resolved_path = absolute_path.resolve(strict=True)\n except FileNotFoundError:\n raise ValueError(f"The absolute path of '{v}' ('{absolute_path}') does not exist")\n return os.fspath(resolved_path)\n\n return v\n\n @validator("project_dir")\n def validate_project_dir(cls, project_dir: str) -> str:\n resolved_project_dir = cls._validate_absolute_path_exists(project_dir)\n\n cls._validate_path_contains_file(\n path=resolved_project_dir,\n file_name=DBT_PROJECT_YML_NAME,\n error_message=(\n f"{resolved_project_dir} does not contain a {DBT_PROJECT_YML_NAME} file. Please"\n " specify a valid path to a dbt project."\n ),\n )\n\n return os.fspath(resolved_project_dir)\n\n @validator("profiles_dir")\n def validate_profiles_dir(cls, profiles_dir: str) -> str:\n resolved_project_dir = cls._validate_absolute_path_exists(profiles_dir)\n\n cls._validate_path_contains_file(\n path=resolved_project_dir,\n file_name=DBT_PROFILES_YML_NAME,\n error_message=(\n f"{resolved_project_dir} does not contain a {DBT_PROFILES_YML_NAME} file. Please"\n " specify a valid path to a dbt profile directory."\n ),\n )\n\n return os.fspath(resolved_project_dir)\n\n @root_validator(pre=True)\n def validate_dbt_version(cls, values: Dict[str, Any]) -> Dict[str, Any]:\n """Validate that the dbt version is supported."""\n if version.parse(dbt_version) < version.parse("1.4.0"):\n raise ValueError(\n "To use `dagster_dbt.DbtCliResource`, you must use `dbt-core>=1.4.0`. Currently,"\n f" you are using `dbt-core=={dbt_version}`. Please install a compatible dbt-core"\n " version."\n )\n\n return values\n\n def _get_unique_target_path(self, *, context: Optional[OpExecutionContext]) -> Path:\n """Get a unique target path for the dbt CLI invocation.\n\n Args:\n context (Optional[OpExecutionContext]): The execution context.\n\n Returns:\n str: A unique target path for the dbt CLI invocation.\n """\n unique_id = str(uuid.uuid4())[:7]\n path = unique_id\n if context:\n path = f"{context.op.name}-{context.run_id[:7]}-{unique_id}"\n\n current_target_path = _get_dbt_target_path()\n\n return current_target_path.joinpath(path)\n\n
[docs] @public\n def cli(\n self,\n args: List[str],\n *,\n raise_on_error: bool = True,\n manifest: Optional[DbtManifestParam] = None,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n context: Optional[OpExecutionContext] = None,\n target_path: Optional[Path] = None,\n ) -> DbtCliInvocation:\n """Create a subprocess to execute a dbt CLI command.\n\n Args:\n args (List[str]): The dbt CLI command to execute.\n raise_on_error (bool): Whether to raise an exception if the dbt CLI command fails.\n manifest (Optional[Union[Mapping[str, Any], str, Path]]): The dbt manifest blob. If an\n execution context from within `@dbt_assets` is provided to the context argument,\n then the manifest provided to `@dbt_assets` will be used.\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): The translator to link dbt\n nodes to Dagster assets. If an execution context from within `@dbt_assets` is\n provided to the context argument, then the dagster_dbt_translator provided to\n `@dbt_assets` will be used.\n context (Optional[OpExecutionContext]): The execution context from within `@dbt_assets`.\n target_path (Optional[Path]): An explicit path to a target folder to use to store and\n retrieve dbt artifacts when running a dbt CLI command. If not provided, a unique\n target path will be generated.\n\n Returns:\n DbtCliInvocation: A invocation instance that can be used to retrieve the output of the\n dbt CLI command.\n\n Examples:\n Streaming Dagster events for dbt asset materializations and observations:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["run"], context=context).stream()\n\n Retrieving a dbt artifact after streaming the Dagster events:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_run_invocation = dbt.cli(["run"], context=context)\n\n yield from dbt_run_invocation.stream()\n\n # Retrieve the `run_results.json` dbt artifact as a dictionary:\n run_results_json = dbt_run_invocation.get_artifact("run_results.json")\n\n # Retrieve the `run_results.json` dbt artifact as a file path:\n run_results_path = dbt_run_invocation.target_path.joinpath("run_results.json")\n\n Customizing the asset materialization metadata when streaming the Dagster events:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_cli_invocation = dbt.cli(["run"], context=context)\n\n for dbt_event in dbt_cli_invocation.stream_raw_events():\n for dagster_event in dbt_event.to_default_asset_events(manifest=dbt_cli_invocation.manifest):\n if isinstance(dagster_event, Output):\n context.add_output_metadata(\n metadata={\n "my_custom_metadata": "my_custom_metadata_value",\n },\n output_name=dagster_event.output_name,\n )\n\n yield dagster_event\n\n Suppressing exceptions from a dbt CLI command when a non-zero exit code is returned:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_run_invocation = dbt.cli(["run"], context=context, raise_on_error=False)\n\n if dbt_run_invocation.is_successful():\n yield from dbt_run_invocation.stream()\n else:\n ...\n\n Invoking a dbt CLI command in a custom asset or op:\n\n .. code-block:: python\n\n import json\n\n from dagster import asset, op\n from dagster_dbt import DbtCliResource\n\n\n @asset\n def my_dbt_asset(dbt: DbtCliResource):\n dbt_macro_args = {"key": "value"}\n dbt.cli(["run-operation", "my-macro", json.dumps(dbt_macro_args)]).wait()\n\n\n @op\n def my_dbt_op(dbt: DbtCliResource):\n dbt_macro_args = {"key": "value"}\n dbt.cli(["run-operation", "my-macro", json.dumps(dbt_macro_args)]).wait()\n """\n target_path = target_path or self._get_unique_target_path(context=context)\n env = {\n **os.environ.copy(),\n # Run dbt with unbuffered output.\n "PYTHONUNBUFFERED": "1",\n # Disable anonymous usage statistics for performance.\n "DBT_SEND_ANONYMOUS_USAGE_STATS": "false",\n # The DBT_LOG_FORMAT environment variable must be set to `json`. We use this\n # environment variable to ensure that the dbt CLI outputs structured logs.\n "DBT_LOG_FORMAT": "json",\n # The DBT_TARGET_PATH environment variable is set to a unique value for each dbt\n # invocation so that artifact paths are separated.\n # See https://discourse.getdbt.com/t/multiple-run-results-json-and-manifest-json-files/7555\n # for more information.\n "DBT_TARGET_PATH": os.fspath(target_path),\n # The DBT_LOG_PATH environment variable is set to the same value as DBT_TARGET_PATH\n # so that logs for each dbt invocation has separate log files.\n "DBT_LOG_PATH": os.fspath(target_path),\n # The DBT_PROFILES_DIR environment variable is set to the path containing the dbt\n # profiles.yml file.\n # See https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles#advanced-customizing-a-profile-directory\n # for more information.\n **({"DBT_PROFILES_DIR": self.profiles_dir} if self.profiles_dir else {}),\n }\n\n assets_def: Optional[AssetsDefinition] = None\n with suppress(DagsterInvalidPropertyError):\n assets_def = context.assets_def if context else None\n\n selection_args: List[str] = []\n dagster_dbt_translator = dagster_dbt_translator or DagsterDbtTranslator()\n if context and assets_def is not None:\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(\n [assets_def]\n )\n\n # When dbt is enabled with asset checks, we turn off any indirection with dbt selection.\n # This way, the Dagster context completely determines what is executed in a dbt\n # invocation with a subsetted selection.\n if (\n version.parse(dbt_version) >= version.parse("1.5.0")\n and dagster_dbt_translator.settings.enable_asset_checks\n ):\n env["DBT_INDIRECT_SELECTION"] = "empty"\n\n selection_args = get_subset_selection_for_context(\n context=context,\n manifest=manifest,\n select=context.op.tags.get("dagster-dbt/select"),\n exclude=context.op.tags.get("dagster-dbt/exclude"),\n )\n else:\n manifest = validate_manifest(manifest) if manifest else {}\n\n # TODO: verify that args does not have any selection flags if the context and manifest\n # are passed to this function.\n profile_args: List[str] = []\n if self.profile:\n profile_args = ["--profile", self.profile]\n\n if self.target:\n profile_args += ["--target", self.target]\n\n args = ["dbt"] + self.global_config_flags + args + profile_args + selection_args\n project_dir = Path(self.project_dir)\n\n if not target_path.is_absolute():\n target_path = project_dir.joinpath(target_path)\n\n return DbtCliInvocation.run(\n args=args,\n env=env,\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n project_dir=project_dir,\n target_path=target_path,\n raise_on_error=raise_on_error,\n )
\n\n\ndef get_subset_selection_for_context(\n context: OpExecutionContext,\n manifest: Mapping[str, Any],\n select: Optional[str],\n exclude: Optional[str],\n) -> List[str]:\n """Generate a dbt selection string to materialize the selected resources in a subsetted execution context.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work.\n\n Args:\n context (OpExecutionContext): The execution context for the current execution step.\n select (Optional[str]): A dbt selection string to select resources to materialize.\n exclude (Optional[str]): A dbt selection string to exclude resources from materializing.\n\n Returns:\n List[str]: dbt CLI arguments to materialize the selected resources in a\n subsetted execution context.\n\n If the current execution context is not performing a subsetted execution,\n return CLI arguments composed of the inputed selection and exclusion arguments.\n """\n default_dbt_selection = []\n if select:\n default_dbt_selection += ["--select", select]\n if exclude:\n default_dbt_selection += ["--exclude", exclude]\n\n dbt_resource_props_by_output_name = get_dbt_resource_props_by_output_name(manifest)\n dbt_resource_props_by_test_name = get_dbt_resource_props_by_test_name(manifest)\n\n # TODO: this should be a property on the context if this is a permanent indicator for\n # determining whether the current execution context is performing a subsetted execution.\n is_subsetted_execution = len(context.selected_output_names) != len(\n context.assets_def.node_keys_by_output_name\n )\n if not is_subsetted_execution:\n logger.info(\n "A dbt subsetted execution is not being performed. Using the default dbt selection"\n f" arguments `{default_dbt_selection}`."\n )\n return default_dbt_selection\n\n selected_dbt_resources = []\n for output_name in context.selected_output_names:\n dbt_resource_props = dbt_resource_props_by_output_name[output_name]\n\n # Explicitly select a dbt resource by its fully qualified name (FQN).\n # https://docs.getdbt.com/reference/node-selection/methods#the-file-or-fqn-method\n fqn_selector = f"fqn:{'.'.join(dbt_resource_props['fqn'])}"\n\n selected_dbt_resources.append(fqn_selector)\n\n for _, check_name in context.selected_asset_check_keys:\n test_resource_props = dbt_resource_props_by_test_name[check_name]\n\n # Explicitly select a dbt resource by its fully qualified name (FQN).\n # https://docs.getdbt.com/reference/node-selection/methods#the-file-or-fqn-method\n fqn_selector = f"fqn:{'.'.join(test_resource_props['fqn'])}"\n\n selected_dbt_resources.append(fqn_selector)\n\n # Take the union of all the selected resources.\n # https://docs.getdbt.com/reference/node-selection/set-operators#unions\n union_selected_dbt_resources = ["--select"] + [" ".join(selected_dbt_resources)]\n\n logger.info(\n "A dbt subsetted execution is being performed. Overriding default dbt selection"\n f" arguments `{default_dbt_selection}` with arguments: `{union_selected_dbt_resources}`"\n )\n\n return union_selected_dbt_resources\n\n\ndef get_dbt_resource_props_by_output_name(\n manifest: Mapping[str, Any]\n) -> Mapping[str, Mapping[str, Any]]:\n node_info_by_dbt_unique_id = get_dbt_resource_props_by_dbt_unique_id_from_manifest(manifest)\n\n return {\n output_name_fn(node): node\n for node in node_info_by_dbt_unique_id.values()\n if node["resource_type"] in ASSET_RESOURCE_TYPES\n }\n\n\ndef get_dbt_resource_props_by_test_name(\n manifest: Mapping[str, Any]\n) -> Mapping[str, Mapping[str, Any]]:\n return {\n dbt_resource_props["name"]: dbt_resource_props\n for unique_id, dbt_resource_props in manifest["nodes"].items()\n if unique_id.startswith("test")\n }\n
", "current_page_name": "_modules/dagster_dbt/core/resources_v2", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.core.resources_v2"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.core.types

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._check as check\n\nfrom ..types import DbtOutput\n\n\n
[docs]class DbtCliOutput(DbtOutput):\n """The results of executing a dbt command, along with additional metadata about the dbt CLI\n process that was run.\n\n This class is deprecated, because it's only produced by methods of the DbtCliClientResource class,\n which is deprecated in favor of DbtCliResource.\n\n Note that users should not construct instances of this class directly. This class is intended\n to be constructed from the JSON output of dbt commands.\n\n Attributes:\n command (str): The full shell command that was executed.\n return_code (int): The return code of the dbt CLI process.\n raw_output (str): The raw output (``stdout``) of the dbt CLI process.\n logs (List[Dict[str, Any]]): List of parsed JSON logs produced by the dbt command.\n result (Optional[Dict[str, Any]]): Dictionary containing dbt-reported result information\n contained in run_results.json. Some dbt commands do not produce results, and will\n therefore have result = None.\n docs_url (Optional[str]): Hostname where dbt docs are being served for this project.\n """\n\n def __init__(\n self,\n command: str,\n return_code: int,\n raw_output: str,\n logs: Sequence[Mapping[str, Any]],\n result: Mapping[str, Any],\n docs_url: Optional[str] = None,\n ):\n self._command = check.str_param(command, "command")\n self._return_code = check.int_param(return_code, "return_code")\n self._raw_output = check.str_param(raw_output, "raw_output")\n self._logs = check.sequence_param(logs, "logs", of_type=dict)\n self._docs_url = check.opt_str_param(docs_url, "docs_url")\n super().__init__(result)\n\n @property\n def command(self) -> str:\n return self._command\n\n @property\n def return_code(self) -> int:\n return self._return_code\n\n @property\n def raw_output(self) -> str:\n return self._raw_output\n\n @property\n def logs(self) -> Sequence[Mapping[str, Any]]:\n return self._logs\n\n @property\n def docs_url(self) -> Optional[str]:\n return self._docs_url
\n
", "current_page_name": "_modules/dagster_dbt/core/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.core.types"}}, "dagster_dbt_translator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.dagster_dbt_translator

\nfrom dataclasses import dataclass\nfrom typing import Any, Mapping, Optional\n\nfrom dagster import AssetKey, AutoMaterializePolicy, FreshnessPolicy\nfrom dagster._annotations import public\nfrom dagster._core.definitions.events import (\n    CoercibleToAssetKeyPrefix,\n    check_opt_coercible_to_asset_key_prefix_param,\n)\n\nfrom .asset_utils import (\n    default_asset_key_fn,\n    default_auto_materialize_policy_fn,\n    default_description_fn,\n    default_freshness_policy_fn,\n    default_group_from_dbt_resource_props,\n    default_metadata_from_dbt_resource_props,\n)\n\n\n
[docs]@dataclass(frozen=True)\nclass DagsterDbtTranslatorSettings:\n """Settings to enable Dagster features for your dbt project.\n\n Args:\n enable_asset_checks (bool): Whether to load dbt tests as Dagster asset checks.\n Defaults to False.\n """\n\n enable_asset_checks: bool = False
\n\n\n
[docs]class DagsterDbtTranslator:\n """Holds a set of methods that derive Dagster asset definition metadata given a representation\n of a dbt resource (models, tests, sources, etc).\n\n This class is exposed so that methods can be overriden to customize how Dagster asset metadata\n is derived.\n """\n\n def __init__(self, settings: Optional[DagsterDbtTranslatorSettings] = None):\n """Initialize the translator.\n\n Args:\n settings (Optional[DagsterDbtTranslatorSettings]): Settings for the translator.\n """\n self._settings = settings or DagsterDbtTranslatorSettings()\n\n @property\n def settings(self) -> DagsterDbtTranslatorSettings:\n if not hasattr(self, "_settings"):\n self._settings = DagsterDbtTranslatorSettings()\n\n return self._settings\n\n
[docs] @classmethod\n @public\n def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster asset key that represents that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom asset key for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n AssetKey: The Dagster asset key for the dbt resource.\n\n Examples:\n Adding a prefix to the default asset key generated for each dbt resource:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster import AssetKey\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n return super().get_asset_key(dbt_resource_props).with_prefix("prefix")\n\n Adding a prefix to the default asset key generated for each dbt resource, but only for dbt sources:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster import AssetKey\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n asset_key = super().get_asset_key(dbt_resource_props)\n\n if dbt_resource_props["resource_type"] == "source":\n asset_key = asset_key.with_prefix("my_prefix")\n\n return asset_key\n """\n return default_asset_key_fn(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_description(cls, dbt_resource_props: Mapping[str, Any]) -> str:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster description for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom description for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n str: The description for the dbt resource.\n\n Examples:\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_description(cls, dbt_resource_props: Mapping[str, Any]) -> str:\n return "custom description"\n """\n return default_description_fn(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_metadata(cls, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster metadata for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom metadata for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Mapping[str, Any]: A dictionary representing the Dagster metadata for the dbt resource.\n\n Examples:\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_metadata(cls, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:\n return {"custom": "metadata"}\n """\n return default_metadata_from_dbt_resource_props(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_group_name(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster group name for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom group name for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Optional[str]: A Dagster group name.\n\n Examples:\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_group_name(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n return "custom_group_prefix" + dbt_resource_props.get("config", {}).get("group")\n """\n return default_group_from_dbt_resource_props(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_freshness_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[FreshnessPolicy]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster :py:class:`dagster.FreshnessPolicy` for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom freshness policy for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Optional[FreshnessPolicy]: A Dagster freshness policy.\n\n Examples:\n Set a custom freshness policy for all dbt resources:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_freshness_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[FreshnessPolicy]:\n return FreshnessPolicy(maximum_lag_minutes=60)\n\n Set a custom freshness policy for dbt resources with a specific tag:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_freshness_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[FreshnessPolicy]:\n freshness_policy = None\n if "my_custom_tag" in dbt_resource_props.get("tags", []):\n freshness_policy = FreshnessPolicy(maximum_lag_minutes=60)\n\n return freshness_policy\n """\n return default_freshness_policy_fn(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_auto_materialize_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[AutoMaterializePolicy]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster :py:class:`dagster.AutoMaterializePolicy` for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom auto-materialize policy for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Optional[AutoMaterializePolicy]: A Dagster auto-materialize policy.\n\n Examples:\n Set a custom auto-materialize policy for all dbt resources:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_auto_materialize_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[AutoMaterializePolicy]:\n return AutoMaterializePolicy.eager()\n\n Set a custom auto-materialize policy for dbt resources with a specific tag:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_auto_materialize_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[AutoMaterializePolicy]:\n auto_materialize_policy = None\n if "my_custom_tag" in dbt_resource_props.get("tags", []):\n auto_materialize_policy = AutoMaterializePolicy.eager()\n\n return auto_materialize_policy\n\n """\n return default_auto_materialize_policy_fn(dbt_resource_props)
\n\n\nclass KeyPrefixDagsterDbtTranslator(DagsterDbtTranslator):\n """A DagsterDbtTranslator that applies prefixes to the asset keys generated from dbt resources.\n\n Attributes:\n asset_key_prefix (Optional[Union[str, Sequence[str]]]): A prefix to apply to all dbt models,\n seeds, snapshots, etc. This will *not* apply to dbt sources.\n source_asset_key_prefix (Optional[Union[str, Sequence[str]]]): A prefix to apply to all dbt\n sources.\n """\n\n def __init__(\n self,\n asset_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n source_asset_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *args,\n **kwargs,\n ):\n self._asset_key_prefix = (\n check_opt_coercible_to_asset_key_prefix_param(asset_key_prefix, "asset_key_prefix")\n or []\n )\n self._source_asset_key_prefix = (\n check_opt_coercible_to_asset_key_prefix_param(\n source_asset_key_prefix, "source_asset_key_prefix"\n )\n or []\n )\n\n super().__init__(*args, **kwargs)\n\n @public\n def get_asset_key(self, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n base_key = default_asset_key_fn(dbt_resource_props)\n if dbt_resource_props["resource_type"] == "source":\n return base_key.with_prefix(self._source_asset_key_prefix)\n else:\n return base_key.with_prefix(self._asset_key_prefix)\n\n\n@dataclass\nclass DbtManifestWrapper:\n manifest: Mapping[str, Any]\n
", "current_page_name": "_modules/dagster_dbt/dagster_dbt_translator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.dagster_dbt_translator"}, "dbt_manifest_asset_selection": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.dbt_manifest_asset_selection

\nfrom typing import AbstractSet, Optional\n\nfrom dagster import (\n    AssetKey,\n    AssetSelection,\n    _check as check,\n)\nfrom dagster._core.definitions.asset_graph import AssetGraph\n\nfrom .asset_utils import is_non_asset_node\nfrom .dagster_dbt_translator import DagsterDbtTranslator\nfrom .dbt_manifest import DbtManifestParam, validate_manifest\nfrom .utils import (\n    ASSET_RESOURCE_TYPES,\n    get_dbt_resource_props_by_dbt_unique_id_from_manifest,\n    select_unique_ids_from_manifest,\n)\n\n\n
[docs]class DbtManifestAssetSelection(AssetSelection):\n """Defines a selection of assets from a dbt manifest wrapper and a dbt selection string.\n\n Args:\n manifest (Mapping[str, Any]): The dbt manifest blob.\n select (str): A dbt selection string to specify a set of dbt resources.\n exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n\n Examples:\n .. code-block:: python\n\n import json\n from pathlib import Path\n\n from dagster_dbt import DbtManifestAssetSelection\n\n manifest = json.loads(Path("path/to/manifest.json").read_text())\n\n # select the dbt assets that have the tag "foo".\n my_selection = DbtManifestAssetSelection(manifest=manifest, select="tag:foo")\n """\n\n def __init__(\n self,\n manifest: DbtManifestParam,\n select: str = "fqn:*",\n *,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n exclude: Optional[str] = None,\n ) -> None:\n self.manifest = validate_manifest(manifest)\n self.select = check.str_param(select, "select")\n self.exclude = check.opt_str_param(exclude, "exclude", default="")\n self.dagster_dbt_translator = check.opt_inst_param(\n dagster_dbt_translator,\n "dagster_dbt_translator",\n DagsterDbtTranslator,\n DagsterDbtTranslator(),\n )\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n dbt_nodes = get_dbt_resource_props_by_dbt_unique_id_from_manifest(self.manifest)\n\n keys = set()\n for unique_id in select_unique_ids_from_manifest(\n select=self.select,\n exclude=self.exclude,\n manifest_json=self.manifest,\n ):\n dbt_resource_props = dbt_nodes[unique_id]\n is_dbt_asset = dbt_resource_props["resource_type"] in ASSET_RESOURCE_TYPES\n if is_dbt_asset and not is_non_asset_node(dbt_resource_props):\n asset_key = self.dagster_dbt_translator.get_asset_key(dbt_resource_props)\n keys.add(asset_key)\n\n return keys
\n
", "current_page_name": "_modules/dagster_dbt/dbt_manifest_asset_selection", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.dbt_manifest_asset_selection"}, "dbt_resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.dbt_resource

\nimport logging\nfrom abc import abstractmethod\nfrom typing import Any, Mapping, Optional, Sequence\n\nfrom dagster import get_dagster_logger\n\nfrom .types import DbtOutput\n\n\nclass DbtClient:\n    """Base class for a client allowing users to interface with dbt."""\n\n    def __init__(\n        self,\n        logger: Optional[logging.Logger] = None,\n    ):\n        """Constructor.\n\n        Args:\n            logger (Optional[Any]): A property for injecting a logger dependency.\n                Default is ``None``.\n        """\n        self._logger = logger or get_dagster_logger()\n\n    def _format_params(\n        self, flags: Mapping[str, Any], replace_underscores: bool = False\n    ) -> Mapping[str, Any]:\n        """Reformats arguments that are easier to express as a list into the format that dbt expects,\n        and deletes and keys with no value.\n        """\n        # remove any keys with a value of None\n        if replace_underscores:\n            flags = {k.replace("_", "-"): v for k, v in flags.items() if v is not None}\n        else:\n            flags = {k: v for k, v in flags.items() if v is not None}\n\n        for param in ["select", "exclude", "models"]:\n            if param in flags:\n                if isinstance(flags[param], list):\n                    # if it's a list, format as space-separated\n                    flags[param] = " ".join(set(flags[param]))\n\n        return flags\n\n    @property\n    def logger(self) -> logging.Logger:\n        """logging.Logger: A property for injecting a logger dependency."""\n        return self._logger\n\n    @abstractmethod\n    def compile(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``compile`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in compilation.\n            exclude (List[str]), optional): the models to exclude from compilation.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def run(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``run`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in the run.\n            exclude (List[str]), optional): the models to exclude from the run.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def snapshot(\n        self,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``snapshot`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def test(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        data: bool = True,\n        schema: bool = True,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``test`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in testing.\n            exclude (List[str], optional): the models to exclude from testing.\n            data (bool, optional): If ``True`` (default), then run data tests.\n            schema (bool, optional): If ``True`` (default), then run schema tests.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def seed(\n        self,\n        show: bool = False,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``seed`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            show (bool, optional): If ``True``, then show a sample of the seeded data in the\n                response. Defaults to ``False``.\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def ls(\n        self,\n        select: Optional[Sequence[str]] = None,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``ls`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the resources to include in the output.\n            models (List[str], optional): the models to include in the output.\n            exclude (List[str], optional): the resources to exclude from the output.\n\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def build(self, select: Optional[Sequence[str]] = None, **kwargs) -> DbtOutput:\n        """Run the ``build`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the models/resources to include in the run.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n        raise NotImplementedError()\n\n    @abstractmethod\n    def generate_docs(self, compile_project: bool = False, **kwargs) -> DbtOutput:\n        """Run the ``docs generate`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            compile_project (bool, optional): If true, compile the project before generating a catalog.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def run_operation(\n        self, macro: str, args: Optional[Mapping[str, Any]] = None, **kwargs\n    ) -> DbtOutput:\n        """Run the ``run-operation`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            macro (str): the dbt macro to invoke.\n            args (Dict[str, Any], optional): the keyword arguments to be supplied to the macro.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def get_run_results_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the run_results.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the run_results json file\n                for this dbt project.\n        """\n\n    @abstractmethod\n    def get_manifest_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the manifest.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the manifest json file\n                for this dbt project.\n        """\n\n\n
[docs]class DbtResource(DbtClient):\n pass
\n
", "current_page_name": "_modules/dagster_dbt/dbt_resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.dbt_resource"}, "errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.errors

\nimport warnings\nfrom abc import ABC\nfrom typing import Any, Mapping, Optional, Sequence\n\nfrom dagster import (\n    DagsterInvariantViolationError,\n    Failure,\n    MetadataValue,\n    _check as check,\n)\n\n\n
[docs]class DagsterDbtError(Failure, ABC):\n """The base exception of the ``dagster-dbt`` library."""
\n\n\n
[docs]class DagsterDbtCliUnexpectedOutputError(DagsterDbtError):\n """Represents an error when parsing the output of a dbt CLI command."""\n\n invalid_line_nos: Sequence[int]\n\n def __init__(self, invalid_line_nos: Sequence[int]):\n check.sequence_param(invalid_line_nos, "invalid_line_nos", int)\n line_nos_str = ", ".join(map(str, invalid_line_nos))\n description = f"dbt CLI emitted unexpected output on lines {line_nos_str}"\n metadata = {\n "Invalid CLI Output Line Numbers": MetadataValue.json({"line_nos": invalid_line_nos})\n }\n super().__init__(description, metadata=metadata)\n self.invalid_line_nos = invalid_line_nos
\n\n\n
[docs]class DagsterDbtCliRuntimeError(DagsterDbtError, ABC):\n """Represents an error while executing a dbt CLI command."""\n\n def __init__(\n self,\n description: str,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n if logs is not None:\n warnings.warn(\n "`logs` is a deprecated argument to DagsterDbtCliRuntimeError and will be discarded"\n )\n if raw_output is not None:\n warnings.warn(\n "`raw_output` is a deprecated argument to DagsterDbtCliRuntimeError and will be"\n " discarded"\n )\n metadata = {"Parsed CLI Messages": "\\n".join(messages or [])}\n super().__init__(description, metadata=metadata)
\n\n\n
[docs]class DagsterDbtCliHandledRuntimeError(DagsterDbtCliRuntimeError):\n """Represents a model error reported by the dbt CLI at runtime (return code 1)."""\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__("Handled error in the dbt CLI (return code 1)", logs, raw_output, messages)
\n\n\n
[docs]class DagsterDbtCliFatalRuntimeError(DagsterDbtCliRuntimeError):\n """Represents a fatal error in the dbt CLI (return code 2)."""\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__(\n "Fatal error in the dbt CLI (return code 2): " + " ".join(messages or []),\n logs,\n raw_output,\n messages,\n )
\n\n\n
[docs]class DagsterDbtCliOutputsNotFoundError(DagsterDbtError):\n """Represents a problem in finding the ``target/run_results.json`` artifact when executing a dbt\n CLI command.\n\n For more details on ``target/run_results.json``, see\n https://docs.getdbt.com/reference/dbt-artifacts#run_resultsjson.\n """\n\n def __init__(self, path: str):\n super().__init__(f"Expected to find file at path {path}")
\n\n\nclass DagsterDbtCloudJobInvariantViolationError(DagsterDbtError, DagsterInvariantViolationError):\n """Represents an error when a dbt Cloud job is not supported by the ``dagster-dbt`` library."""\n
", "current_page_name": "_modules/dagster_dbt/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.errors"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.ops

\nfrom typing import Any, Dict, List, Optional\n\nfrom dagster import Config, In, Nothing, Out, Output, op\nfrom pydantic import Field\n\nfrom .types import DbtOutput\nfrom .utils import generate_events, generate_materializations\n\n_DEFAULT_OP_PROPS: Dict[str, Any] = dict(\n    required_resource_keys={"dbt"},\n    ins={"start_after": In(Nothing)},\n    out=Out(DbtOutput, description="Parsed output from running the dbt command."),\n    tags={"kind": "dbt"},\n)\n\n\ndef _get_doc(op_name: str, dbt_command: str) -> str:\n    return f"""\nThis op executes a ``dbt {dbt_command}`` command. It requires the use of a dbt resource, which can be\nset to execute this command through the CLI (using the :py:class:`~dagster_dbt.dbt_cli_resource`).\n\nExamples:\n\n.. code-block:: python\n\n    from dagster import job\n    from dagster_dbt import {op_name}, dbt_cli_resource\n\n    @job(resource_defs={{"dbt":dbt_cli_resource}})\n    def my_dbt_cli_job():\n        {op_name}()\n    """\n\n\n# NOTE: mypy fails to properly track the type of `_DEFAULT_OP_PROPS` items when they are\n# double-splatted, so we type-ignore the below op declarations.\n\n\nclass DbtBuildOpConfig(Config):\n    yield_asset_events: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations and asset observations corresponding to the results of "\n            "the dbt operation will be yielded when the op executes. Default: True"\n        ),\n    )\n    asset_key_prefix: List[str] = Field(\n        default=["dbt"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n@op(**_DEFAULT_OP_PROPS)\ndef dbt_build_op(context, config: DbtBuildOpConfig) -> Any:\n    dbt_output = context.resources.dbt.build()\n    if config.yield_asset_events and "results" in dbt_output.result:\n        yield from generate_events(\n            dbt_output,\n            node_info_to_asset_key=lambda info: config.asset_key_prefix\n            + info["unique_id"].split("."),\n            manifest_json=context.resources.dbt.get_manifest_json(),\n        )\n    yield Output(dbt_output)\n\n\nclass DbtRunOpConfig(Config):\n    yield_materializations: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations corresponding to the results of the dbt operation will "\n            "be yielded when the op executes. Default: True"\n        ),\n    )\n    asset_key_prefix: Optional[List[str]] = Field(\n        default=["dbt"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_run_op(context, config: DbtRunOpConfig):\n dbt_output = context.resources.dbt.run()\n if config.yield_materializations and "results" in dbt_output.result:\n yield from generate_materializations(dbt_output, asset_key_prefix=config.asset_key_prefix)\n yield Output(dbt_output)
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_compile_op(context):\n return context.resources.dbt.compile()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_ls_op(context):\n return context.resources.dbt.ls()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_test_op(context):\n return context.resources.dbt.test()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_snapshot_op(context):\n return context.resources.dbt.snapshot()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_seed_op(context):\n return context.resources.dbt.seed()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_docs_generate_op(context):\n return context.resources.dbt.generate_docs()
\n\n\nfor dbt_op, cmd in [\n (dbt_build_op, "build"),\n (dbt_run_op, "run"),\n (dbt_compile_op, "compile"),\n (dbt_ls_op, "ls"),\n (dbt_test_op, "test"),\n (dbt_snapshot_op, "snapshot"),\n (dbt_seed_op, "seed"),\n (dbt_docs_generate_op, "docs generate"),\n]:\n dbt_op.__doc__ = _get_doc(dbt_op.name, cmd)\n
", "current_page_name": "_modules/dagster_dbt/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.ops"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.types

\nfrom typing import Any, Mapping, Optional\n\nimport dagster._check as check\n\n\n
[docs]class DbtOutput:\n """Base class for both DbtCliOutput and DbtRPCOutput. Contains a single field, `result`, which\n represents the dbt-formatted result of the command that was run (if any).\n\n Used internally, should not be instantiated directly by the user.\n """\n\n def __init__(self, result: Mapping[str, Any]):\n self._result = check.mapping_param(result, "result", key_type=str)\n\n @property\n def result(self) -> Mapping[str, Any]:\n return self._result\n\n @property\n def docs_url(self) -> Optional[str]:\n return None
\n
", "current_page_name": "_modules/dagster_dbt/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.types"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.utils

\nfrom pathlib import Path\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n)\n\nimport dateutil\nfrom dagster import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    MetadataValue,\n    Output,\n    _check as check,\n)\nfrom dagster._core.definitions.metadata import RawMetadataValue\n\nfrom .types import DbtOutput\n\n# dbt resource types that may be considered assets\nASSET_RESOURCE_TYPES = ["model", "seed", "snapshot"]\n\n\ndef default_node_info_to_asset_key(node_info: Mapping[str, Any]) -> AssetKey:\n    return AssetKey(node_info["unique_id"].split("."))\n\n\ndef _resource_type(unique_id: str) -> str:\n    # returns the type of the node (e.g. model, test, snapshot)\n    return unique_id.split(".")[0]\n\n\ndef input_name_fn(dbt_resource_props: Mapping[str, Any]) -> str:\n    # * can be present when sources are sharded tables\n    return dbt_resource_props["unique_id"].replace(".", "_").replace("*", "_star")\n\n\ndef output_name_fn(dbt_resource_props: Mapping[str, Any]) -> str:\n    # hyphens are valid in dbt model names, but not in output names\n    return dbt_resource_props["unique_id"].split(".")[-1].replace("-", "_")\n\n\ndef _node_result_to_metadata(node_result: Mapping[str, Any]) -> Mapping[str, RawMetadataValue]:\n    return {\n        "Materialization Strategy": node_result["config"]["materialized"],\n        "Database": node_result["database"],\n        "Schema": node_result["schema"],\n        "Alias": node_result["alias"],\n        "Description": node_result["description"],\n    }\n\n\ndef _timing_to_metadata(timings: Sequence[Mapping[str, Any]]) -> Mapping[str, RawMetadataValue]:\n    metadata: Dict[str, RawMetadataValue] = {}\n    for timing in timings:\n        if timing["name"] == "execute":\n            desc = "Execution"\n        elif timing["name"] == "compile":\n            desc = "Compilation"\n        else:\n            continue\n\n        # dateutil does not properly expose its modules to static checkers\n        started_at = dateutil.parser.isoparse(timing["started_at"])  # type: ignore\n        completed_at = dateutil.parser.isoparse(timing["completed_at"])  # type: ignore\n        duration = completed_at - started_at\n        metadata.update(\n            {\n                f"{desc} Started At": started_at.isoformat(timespec="seconds"),\n                f"{desc} Completed At": started_at.isoformat(timespec="seconds"),\n                f"{desc} Duration": duration.total_seconds(),\n            }\n        )\n    return metadata\n\n\ndef result_to_events(\n    result: Mapping[str, Any],\n    docs_url: Optional[str] = None,\n    node_info_to_asset_key: Optional[Callable[[Mapping[str, Any]], AssetKey]] = None,\n    manifest_json: Optional[Mapping[str, Any]] = None,\n    extra_metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n    generate_asset_outputs: bool = False,\n) -> Iterator[Union[AssetMaterialization, AssetObservation, Output]]:\n    """This is a hacky solution that attempts to consolidate parsing many of the potential formats\n    that dbt can provide its results in. This is known to work for CLI Outputs for dbt versions 0.18+,\n    as well as RPC responses for a similar time period, but as the RPC response schema is not documented\n    nor enforced, this can become out of date easily.\n    """\n    node_info_to_asset_key = check.opt_callable_param(\n        node_info_to_asset_key, "node_info_to_asset_key", default=default_node_info_to_asset_key\n    )\n\n    # status comes from set of fields rather than "status"\n    if "fail" in result:\n        status = (\n            "fail"\n            if result.get("fail")\n            else "skip" if result.get("skip") else "error" if result.get("error") else "success"\n        )\n    else:\n        status = result["status"]\n\n    # all versions represent timing the same way\n    metadata = {"Status": status, "Execution Time (seconds)": result["execution_time"]}\n    metadata.update(_timing_to_metadata(result["timing"]))\n\n    # working with a response that contains the node block (RPC and CLI 0.18.x)\n    if "node" in result:\n        unique_id = result["node"]["unique_id"]\n        metadata.update(_node_result_to_metadata(result["node"]))\n    else:\n        unique_id = result["unique_id"]\n\n    if docs_url:\n        metadata["docs_url"] = MetadataValue.url(f"{docs_url}#!/model/{unique_id}")\n\n    if extra_metadata:\n        metadata.update(extra_metadata)\n\n    # if you have a manifest available, get the full node info, otherwise just populate unique_id\n    dbt_resource_props = (\n        manifest_json["nodes"][unique_id] if manifest_json else {"unique_id": unique_id}\n    )\n\n    node_resource_type = _resource_type(unique_id)\n\n    if node_resource_type in ASSET_RESOURCE_TYPES and status == "success":\n        if generate_asset_outputs:\n            yield Output(\n                value=None,\n                output_name=output_name_fn(dbt_resource_props),\n                metadata=metadata,\n            )\n        else:\n            yield AssetMaterialization(\n                asset_key=node_info_to_asset_key(dbt_resource_props),\n                description=f"dbt node: {unique_id}",\n                metadata=metadata,\n            )\n    # can only associate tests with assets if we have manifest_json available\n    elif node_resource_type == "test" and manifest_json and status != "skipped":\n        upstream_unique_ids = manifest_json["nodes"][unique_id]["depends_on"]["nodes"]\n        # tests can apply to multiple asset keys\n        for upstream_id in upstream_unique_ids:\n            # the upstream id can reference a node or a source\n            dbt_resource_props = manifest_json["nodes"].get(upstream_id) or manifest_json[\n                "sources"\n            ].get(upstream_id)\n            if dbt_resource_props is None:\n                continue\n            upstream_asset_key = node_info_to_asset_key(dbt_resource_props)\n            yield AssetObservation(\n                asset_key=upstream_asset_key,\n                metadata={\n                    "Test ID": result["unique_id"],\n                    "Test Status": status,\n                    "Test Message": result.get("message") or "",\n                },\n            )\n\n\ndef generate_events(\n    dbt_output: DbtOutput,\n    node_info_to_asset_key: Optional[Callable[[Mapping[str, Any]], AssetKey]] = None,\n    manifest_json: Optional[Mapping[str, Any]] = None,\n) -> Iterator[Union[AssetMaterialization, AssetObservation]]:\n    """This function yields :py:class:`dagster.AssetMaterialization` events for each model updated by\n    a dbt command, and :py:class:`dagster.AssetObservation` events for each test run.\n\n    Information parsed from a :py:class:`~DbtOutput` object.\n    """\n    for result in dbt_output.result["results"]:\n        for event in result_to_events(\n            result,\n            docs_url=dbt_output.docs_url,\n            node_info_to_asset_key=node_info_to_asset_key,\n            manifest_json=manifest_json,\n        ):\n            yield check.inst(\n                cast(Union[AssetMaterialization, AssetObservation], event),\n                (AssetMaterialization, AssetObservation),\n            )\n\n\n
[docs]def generate_materializations(\n dbt_output: DbtOutput,\n asset_key_prefix: Optional[Sequence[str]] = None,\n) -> Iterator[AssetMaterialization]:\n """This function yields :py:class:`dagster.AssetMaterialization` events for each model updated by\n a dbt command.\n\n Information parsed from a :py:class:`~DbtOutput` object.\n\n Examples:\n .. code-block:: python\n\n from dagster import op, Output\n from dagster_dbt.utils import generate_materializations\n from dagster_dbt import dbt_cli_resource\n\n @op(required_resource_keys={"dbt"})\n def my_custom_dbt_run(context):\n dbt_output = context.resources.dbt.run()\n for materialization in generate_materializations(dbt_output):\n # you can modify the materialization object to add extra metadata, if desired\n yield materialization\n yield Output(my_dbt_output)\n\n @job(resource_defs={{"dbt":dbt_cli_resource}})\n def my_dbt_cli_job():\n my_custom_dbt_run()\n """\n asset_key_prefix = check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n for event in generate_events(\n dbt_output,\n node_info_to_asset_key=lambda info: AssetKey(\n asset_key_prefix + info["unique_id"].split(".")\n ),\n ):\n yield check.inst(cast(AssetMaterialization, event), AssetMaterialization)
\n\n\ndef select_unique_ids_from_manifest(\n select: str,\n exclude: str,\n state_path: Optional[str] = None,\n manifest_json_path: Optional[str] = None,\n manifest_json: Optional[Mapping[str, Any]] = None,\n manifest_parsed: Optional[Any] = None,\n) -> AbstractSet[str]:\n """Method to apply a selection string to an existing manifest.json file."""\n import dbt.graph.cli as graph_cli\n import dbt.graph.selector as graph_selector\n from dbt.contracts.graph.manifest import Manifest, WritableManifest\n from dbt.contracts.state import PreviousState\n from dbt.graph.selector_spec import IndirectSelection, SelectionSpec\n from networkx import DiGraph\n\n if state_path is not None:\n previous_state = PreviousState(\n path=Path(state_path), # type: ignore # (unused path, slated for deletion)\n current_path=( # type: ignore # (unused path, slated for deletion)\n Path("/tmp/null") if manifest_json_path is None else Path(manifest_json_path)\n ),\n )\n else:\n previous_state = None\n\n if manifest_json_path is not None:\n manifest = WritableManifest.read_and_check_versions(manifest_json_path)\n child_map = manifest.child_map\n elif manifest_json is not None:\n\n class _DictShim(dict):\n """Shim to enable hydrating a dictionary into a dot-accessible object."""\n\n def __getattr__(self, item):\n ret = super().get(item)\n # allow recursive access e.g. foo.bar.baz\n return _DictShim(ret) if isinstance(ret, dict) else ret\n\n manifest = Manifest(\n # dbt expects dataclasses that can be accessed with dot notation, not bare dictionaries\n nodes={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["nodes"].items() # type: ignore\n },\n sources={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["sources"].items() # type: ignore\n },\n metrics={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["metrics"].items() # type: ignore\n },\n exposures={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["exposures"].items() # type: ignore\n },\n )\n child_map = manifest_json["child_map"]\n elif manifest_parsed is not None:\n manifest = manifest_parsed\n child_map = manifest.child_map\n else:\n check.failed("Must provide either a manifest_json_path, manifest_json, or manifest_parsed.")\n graph = graph_selector.Graph(DiGraph(incoming_graph_data=child_map))\n\n # create a parsed selection from the select string\n try:\n from dbt.flags import GLOBAL_FLAGS\n except ImportError:\n # dbt < 1.5.0 compat\n import dbt.flags as GLOBAL_FLAGS\n setattr(GLOBAL_FLAGS, "INDIRECT_SELECTION", IndirectSelection.Eager)\n setattr(GLOBAL_FLAGS, "WARN_ERROR", True)\n parsed_spec: SelectionSpec = graph_cli.parse_union([select], True)\n\n if exclude:\n parsed_spec = graph_cli.SelectionDifference(\n components=[parsed_spec, graph_cli.parse_union([exclude], True)]\n )\n\n # execute this selection against the graph\n selector = graph_selector.NodeSelector(graph, manifest, previous_state=previous_state)\n selected, _ = selector.select_nodes(parsed_spec)\n return selected\n\n\ndef get_dbt_resource_props_by_dbt_unique_id_from_manifest(\n manifest: Mapping[str, Any]\n) -> Mapping[str, Mapping[str, Any]]:\n """A mapping of a dbt node's unique id to the node's dictionary representation in the manifest."""\n return {\n **manifest["nodes"],\n **manifest["sources"],\n **manifest["exposures"],\n **manifest["metrics"],\n }\n
", "current_page_name": "_modules/dagster_dbt/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.utils"}}, "dagster_docker": {"docker_executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.docker_executor

\nfrom typing import Iterator, Optional, cast\n\nimport dagster._check as check\nimport docker\nimport docker.errors\nfrom dagster import Field, IntSource, executor\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.executor_definition import multiple_process_executor_requirements\nfrom dagster._core.events import DagsterEvent, EngineEventData\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._core.execution.tags import get_tag_concurrency_limits_config\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.executor.init import InitExecutorContext\nfrom dagster._core.executor.step_delegating import StepDelegatingExecutor\nfrom dagster._core.executor.step_delegating.step_handler.base import (\n    CheckStepHealthResult,\n    StepHandler,\n    StepHandlerContext,\n)\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.utils import parse_env_var\nfrom dagster._grpc.types import ExecuteStepArgs\nfrom dagster._serdes.utils import hash_str\nfrom dagster._utils.merger import merge_dicts\n\nfrom dagster_docker.utils import DOCKER_CONFIG_SCHEMA, validate_docker_config, validate_docker_image\n\nfrom .container_context import DockerContainerContext\n\n\n
[docs]@executor(\n name="docker",\n config_schema=merge_dicts(\n DOCKER_CONFIG_SCHEMA,\n {\n "retries": get_retries_config(),\n "max_concurrent": Field(\n IntSource,\n is_required=False,\n description=(\n "Limit on the number of containers that will run concurrently within the scope "\n "of a Dagster run. Note that this limit is per run, not global."\n ),\n ),\n "tag_concurrency_limits": get_tag_concurrency_limits_config(),\n },\n ),\n requirements=multiple_process_executor_requirements(),\n)\n@experimental\ndef docker_executor(init_context: InitExecutorContext) -> Executor:\n """Executor which launches steps as Docker containers.\n\n To use the `docker_executor`, set it as the `executor_def` when defining a job:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-docker/dagster_docker_tests/test_example_executor.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n Then you can configure the executor with run config as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n registry: ...\n network: ...\n networks: ...\n container_kwargs: ...\n\n If you're using the DockerRunLauncher, configuration set on the containers created by the run\n launcher will also be set on the containers that are created for each step.\n """\n config = init_context.executor_config\n image = check.opt_str_elem(config, "image")\n registry = check.opt_dict_elem(config, "registry", key_type=str)\n env_vars = check.opt_list_elem(config, "env_vars", of_type=str)\n network = check.opt_str_elem(config, "network")\n networks = check.opt_list_elem(config, "networks", of_type=str)\n container_kwargs = check.opt_dict_elem(config, "container_kwargs", key_type=str)\n retries = check.dict_elem(config, "retries", key_type=str)\n max_concurrent = check.opt_int_elem(config, "max_concurrent")\n tag_concurrency_limits = check.opt_list_elem(config, "tag_concurrency_limits")\n\n validate_docker_config(network, networks, container_kwargs)\n\n if network and not networks:\n networks = [network]\n\n container_context = DockerContainerContext(\n registry=registry,\n env_vars=env_vars or [],\n networks=networks or [],\n container_kwargs=container_kwargs,\n )\n\n return StepDelegatingExecutor(\n DockerStepHandler(image, container_context),\n retries=check.not_none(RetryMode.from_config(retries)),\n max_concurrent=max_concurrent,\n tag_concurrency_limits=tag_concurrency_limits,\n )
\n\n\nclass DockerStepHandler(StepHandler):\n def __init__(\n self,\n image: Optional[str],\n container_context: DockerContainerContext,\n ):\n super().__init__()\n\n self._image = check.opt_str_param(image, "image")\n self._container_context = check.inst_param(\n container_context, "container_context", DockerContainerContext\n )\n\n def _get_image(self, step_handler_context: StepHandlerContext):\n from . import DockerRunLauncher\n\n image = cast(\n JobPythonOrigin, step_handler_context.dagster_run.job_code_origin\n ).repository_origin.container_image\n if not image:\n image = self._image\n\n run_launcher = step_handler_context.instance.run_launcher\n\n if not image and isinstance(run_launcher, DockerRunLauncher):\n image = run_launcher.image\n\n if not image:\n raise Exception("No docker image specified by the executor config or repository")\n\n return image\n\n def _get_docker_container_context(self, step_handler_context: StepHandlerContext):\n # This doesn't vary per step: would be good to have a hook where it can be set once\n # for the whole StepHandler but we need access to the DagsterRun for that\n\n from .docker_run_launcher import DockerRunLauncher\n\n run_launcher = step_handler_context.instance.run_launcher\n run_target = DockerContainerContext.create_for_run(\n step_handler_context.dagster_run,\n run_launcher if isinstance(run_launcher, DockerRunLauncher) else None,\n )\n\n merged_container_context = run_target.merge(self._container_context)\n\n validate_docker_config(\n network=None,\n networks=merged_container_context.networks,\n container_kwargs=merged_container_context.container_kwargs,\n )\n\n return merged_container_context\n\n @property\n def name(self) -> str:\n return "DockerStepHandler"\n\n def _get_client(self, docker_container_context: DockerContainerContext):\n client = docker.client.from_env()\n if docker_container_context.registry:\n client.login(\n registry=docker_container_context.registry["url"],\n username=docker_container_context.registry["username"],\n password=docker_container_context.registry["password"],\n )\n return client\n\n def _get_container_name(self, execute_step_args: ExecuteStepArgs):\n run_id = execute_step_args.run_id\n step_keys_to_execute = check.not_none(execute_step_args.step_keys_to_execute)\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n step_name = f"dagster-step-{hash_str(run_id + step_key)}"\n\n if execute_step_args.known_state:\n retry_state = execute_step_args.known_state.get_retry_state()\n retry_number = retry_state.get_attempt_count(step_key)\n if retry_number:\n step_name = f"{step_name}-{retry_number}"\n\n return step_name\n\n def _create_step_container(\n self,\n client,\n container_context,\n step_image,\n step_handler_context: StepHandlerContext,\n ):\n execute_step_args = step_handler_context.execute_step_args\n step_keys_to_execute = check.not_none(execute_step_args.step_keys_to_execute)\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n env_vars = dict([parse_env_var(env_var) for env_var in container_context.env_vars])\n env_vars["DAGSTER_RUN_JOB_NAME"] = step_handler_context.dagster_run.job_name\n env_vars["DAGSTER_RUN_STEP_KEY"] = step_key\n return client.containers.create(\n step_image,\n name=self._get_container_name(execute_step_args),\n detach=True,\n network=container_context.networks[0] if len(container_context.networks) else None,\n command=execute_step_args.get_command_args(),\n environment=env_vars,\n **container_context.container_kwargs,\n )\n\n def launch_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n container_context = self._get_docker_container_context(step_handler_context)\n\n client = self._get_client(container_context)\n\n step_image = self._get_image(step_handler_context)\n validate_docker_image(step_image)\n\n try:\n step_container = self._create_step_container(\n client, container_context, step_image, step_handler_context\n )\n except docker.errors.ImageNotFound:\n client.images.pull(step_image)\n step_container = self._create_step_container(\n client, container_context, step_image, step_handler_context\n )\n\n if len(container_context.networks) > 1:\n for network_name in container_context.networks[1:]:\n network = client.networks.get(network_name)\n network.connect(step_container)\n\n step_keys_to_execute = check.not_none(\n step_handler_context.execute_step_args.step_keys_to_execute\n )\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n yield DagsterEvent.step_worker_starting(\n step_handler_context.get_step_context(step_key),\n message="Launching step in Docker container.",\n metadata={\n "Docker container id": step_container.id,\n },\n )\n step_container.start()\n\n def check_step_health(self, step_handler_context: StepHandlerContext) -> CheckStepHealthResult:\n container_context = self._get_docker_container_context(step_handler_context)\n\n client = self._get_client(container_context)\n\n container_name = self._get_container_name(step_handler_context.execute_step_args)\n\n container = client.containers.get(container_name)\n\n if container.status == "running":\n return CheckStepHealthResult.healthy()\n\n try:\n container_info = container.wait(timeout=0.1)\n except Exception as e:\n raise Exception(\n f"Container status is {container.status}. Raised exception attempting to get its"\n " return code."\n ) from e\n\n ret_code = container_info.get("StatusCode")\n if ret_code == 0:\n return CheckStepHealthResult.healthy()\n\n return CheckStepHealthResult.unhealthy(\n reason=f"Container status is {container.status}. Return code is {ret_code}."\n )\n\n def terminate_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n container_context = self._get_docker_container_context(step_handler_context)\n\n step_keys_to_execute = check.not_none(\n step_handler_context.execute_step_args.step_keys_to_execute\n )\n assert (\n len(step_keys_to_execute) == 1\n ), "Terminating multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n container_name = self._get_container_name(step_handler_context.execute_step_args)\n\n yield DagsterEvent.engine_event(\n step_handler_context.get_step_context(step_key),\n message=f"Stopping Docker container {container_name} for step.",\n event_specific_data=EngineEventData(),\n )\n\n client = self._get_client(container_context)\n\n container = client.containers.get(container_name)\n\n container.stop()\n
", "current_page_name": "_modules/dagster_docker/docker_executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.docker_executor"}, "docker_run_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.docker_run_launcher

\nfrom typing import Any, Mapping, Optional\n\nimport dagster._check as check\nimport docker\nfrom dagster._core.launcher.base import (\n    CheckRunHealthResult,\n    LaunchRunContext,\n    ResumeRunContext,\n    RunLauncher,\n    WorkerStatus,\n)\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import DOCKER_IMAGE_TAG\nfrom dagster._core.utils import parse_env_var\nfrom dagster._grpc.types import ExecuteRunArgs, ResumeRunArgs\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom typing_extensions import Self\n\nfrom dagster_docker.utils import DOCKER_CONFIG_SCHEMA, validate_docker_config, validate_docker_image\n\nfrom .container_context import DockerContainerContext\n\nDOCKER_CONTAINER_ID_TAG = "docker/container_id"\n\n\n
[docs]class DockerRunLauncher(RunLauncher, ConfigurableClass):\n """Launches runs in a Docker container."""\n\n def __init__(\n self,\n inst_data: Optional[ConfigurableClassData] = None,\n image=None,\n registry=None,\n env_vars=None,\n network=None,\n networks=None,\n container_kwargs=None,\n ):\n self._inst_data = inst_data\n self.image = image\n self.registry = registry\n self.env_vars = env_vars\n\n validate_docker_config(network, networks, container_kwargs)\n\n if network:\n self.networks = [network]\n elif networks:\n self.networks = networks\n else:\n self.networks = []\n\n self.container_kwargs = check.opt_dict_param(\n container_kwargs, "container_kwargs", key_type=str\n )\n\n super().__init__()\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return DOCKER_CONFIG_SCHEMA\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return DockerRunLauncher(inst_data=inst_data, **config_value)\n\n def get_container_context(self, dagster_run: DagsterRun) -> DockerContainerContext:\n return DockerContainerContext.create_for_run(dagster_run, self)\n\n def _get_client(self, container_context: DockerContainerContext):\n client = docker.client.from_env()\n if container_context.registry:\n client.login(\n registry=container_context.registry["url"],\n username=container_context.registry["username"],\n password=container_context.registry["password"],\n )\n return client\n\n def _get_docker_image(self, job_code_origin):\n docker_image = job_code_origin.repository_origin.container_image\n\n if not docker_image:\n docker_image = self.image\n\n if not docker_image:\n raise Exception("No docker image specified by the instance config or repository")\n\n validate_docker_image(docker_image)\n return docker_image\n\n def _launch_container_with_command(self, run, docker_image, command):\n container_context = self.get_container_context(run)\n docker_env = dict([parse_env_var(env_var) for env_var in container_context.env_vars])\n docker_env["DAGSTER_RUN_JOB_NAME"] = run.job_name\n\n client = self._get_client(container_context)\n\n try:\n container = client.containers.create(\n image=docker_image,\n command=command,\n detach=True,\n environment=docker_env,\n network=container_context.networks[0] if len(container_context.networks) else None,\n **container_context.container_kwargs,\n )\n\n except docker.errors.ImageNotFound:\n client.images.pull(docker_image)\n container = client.containers.create(\n image=docker_image,\n command=command,\n detach=True,\n environment=docker_env,\n network=container_context.networks[0] if len(container_context.networks) else None,\n **container_context.container_kwargs,\n )\n\n if len(container_context.networks) > 1:\n for network_name in container_context.networks[1:]:\n network = client.networks.get(network_name)\n network.connect(container)\n\n self._instance.report_engine_event(\n message=f"Launching run in a new container {container.id} with image {docker_image}",\n dagster_run=run,\n cls=self.__class__,\n )\n\n self._instance.add_run_tags(\n run.run_id,\n {DOCKER_CONTAINER_ID_TAG: container.id, DOCKER_IMAGE_TAG: docker_image},\n )\n\n container.start()\n\n def launch_run(self, context: LaunchRunContext) -> None:\n run = context.dagster_run\n job_code_origin = check.not_none(context.job_code_origin)\n docker_image = self._get_docker_image(job_code_origin)\n\n command = ExecuteRunArgs(\n job_origin=job_code_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n ).get_command_args()\n\n self._launch_container_with_command(run, docker_image, command)\n\n @property\n def supports_resume_run(self):\n return True\n\n def resume_run(self, context: ResumeRunContext) -> None:\n run = context.dagster_run\n job_code_origin = check.not_none(context.job_code_origin)\n docker_image = self._get_docker_image(job_code_origin)\n\n command = ResumeRunArgs(\n job_origin=job_code_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n ).get_command_args()\n\n self._launch_container_with_command(run, docker_image, command)\n\n def _get_container(self, run):\n if not run or run.is_finished:\n return None\n\n container_id = run.tags.get(DOCKER_CONTAINER_ID_TAG)\n\n if not container_id:\n return None\n\n container_context = self.get_container_context(run)\n\n try:\n return self._get_client(container_context).containers.get(container_id)\n except Exception:\n return None\n\n def terminate(self, run_id):\n run = self._instance.get_run_by_id(run_id)\n\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n container = self._get_container(run)\n\n if not container:\n self._instance.report_engine_event(\n message="Unable to get docker container to send termination request to.",\n dagster_run=run,\n cls=self.__class__,\n )\n return False\n\n container.stop()\n\n return True\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n def check_run_worker_health(self, run: DagsterRun):\n container = self._get_container(run)\n if container is None:\n return CheckRunHealthResult(WorkerStatus.NOT_FOUND)\n if container.status == "running":\n return CheckRunHealthResult(WorkerStatus.RUNNING)\n return CheckRunHealthResult(\n WorkerStatus.FAILED, msg=f"Container status is {container.status}"\n )
\n
", "current_page_name": "_modules/dagster_docker/docker_run_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.docker_run_launcher"}, "ops": {"docker_container_op": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.ops.docker_container_op

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport docker\nfrom dagster import Field, In, Nothing, OpExecutionContext, StringSource, op\nfrom dagster._annotations import experimental\nfrom dagster._core.utils import parse_env_var\nfrom dagster._serdes.utils import hash_str\n\nfrom ..container_context import DockerContainerContext\nfrom ..docker_run_launcher import DockerRunLauncher\nfrom ..utils import DOCKER_CONFIG_SCHEMA, validate_docker_image\n\nDOCKER_CONTAINER_OP_CONFIG = {\n    **DOCKER_CONFIG_SCHEMA,\n    "image": Field(\n        StringSource,\n        is_required=True,\n        description="The image in which to run the Docker container.",\n    ),\n    "entrypoint": Field(\n        [str],\n        is_required=False,\n        description="The ENTRYPOINT for the Docker container",\n    ),\n    "command": Field(\n        [str],\n        is_required=False,\n        description="The command to run in the container within the launched Docker container.",\n    ),\n}\n\n\ndef _get_client(docker_container_context: DockerContainerContext):\n    client = docker.client.from_env()\n    if docker_container_context.registry:\n        client.login(\n            registry=docker_container_context.registry["url"],\n            username=docker_container_context.registry["username"],\n            password=docker_container_context.registry["password"],\n        )\n    return client\n\n\ndef _get_container_name(run_id, op_name, retry_number):\n    container_name = hash_str(run_id + op_name)\n\n    if retry_number > 0:\n        container_name = f"{container_name}-{retry_number}"\n\n    return container_name\n\n\ndef _create_container(\n    op_context: OpExecutionContext,\n    client,\n    container_context: DockerContainerContext,\n    image: str,\n    entrypoint: Optional[Sequence[str]],\n    command: Optional[Sequence[str]],\n):\n    env_vars = dict([parse_env_var(env_var) for env_var in container_context.env_vars])\n    return client.containers.create(\n        image,\n        name=_get_container_name(op_context.run_id, op_context.op.name, op_context.retry_number),\n        detach=True,\n        network=container_context.networks[0] if len(container_context.networks) else None,\n        entrypoint=entrypoint,\n        command=command,\n        environment=env_vars,\n        **container_context.container_kwargs,\n    )\n\n\n
[docs]@experimental\ndef execute_docker_container(\n context: OpExecutionContext,\n image: str,\n entrypoint: Optional[Sequence[str]] = None,\n command: Optional[Sequence[str]] = None,\n networks: Optional[Sequence[str]] = None,\n registry: Optional[Mapping[str, str]] = None,\n env_vars: Optional[Sequence[str]] = None,\n container_kwargs: Optional[Mapping[str, Any]] = None,\n):\n """This function is a utility for executing a Docker container from within a Dagster op.\n\n Args:\n image (str): The image to use for the launched Docker container.\n entrypoint (Optional[Sequence[str]]): The ENTRYPOINT to run in the launched Docker\n container. Default: None.\n command (Optional[Sequence[str]]): The CMD to run in the launched Docker container.\n Default: None.\n networks (Optional[Sequence[str]]): Names of the Docker networks to which to connect the\n launched container. Default: None.\n registry: (Optional[Mapping[str, str]]): Information for using a non local/public Docker\n registry. Can have "url", "username", or "password" keys.\n env_vars (Optional[Sequence[str]]): List of environemnt variables to include in the launched\n container. ach can be of the form KEY=VALUE or just KEY (in which case the value will be\n pulled from the calling environment.\n container_kwargs (Optional[Dict[str[Any]]]): key-value pairs that can be passed into\n containers.create in the Docker Python API. See\n https://docker-py.readthedocs.io/en/stable/containers.html for the full list\n of available options.\n """\n run_container_context = DockerContainerContext.create_for_run(\n context.dagster_run,\n (\n context.instance.run_launcher\n if isinstance(context.instance.run_launcher, DockerRunLauncher)\n else None\n ),\n )\n\n validate_docker_image(image)\n\n op_container_context = DockerContainerContext(\n registry=registry, env_vars=env_vars, networks=networks, container_kwargs=container_kwargs\n )\n\n container_context = run_container_context.merge(op_container_context)\n\n client = _get_client(container_context)\n\n try:\n container = _create_container(\n context, client, container_context, image, entrypoint, command\n )\n except docker.errors.ImageNotFound:\n client.images.pull(image)\n container = _create_container(\n context, client, container_context, image, entrypoint, command\n )\n\n if len(container_context.networks) > 1:\n for network_name in container_context.networks[1:]:\n network = client.networks.get(network_name)\n network.connect(container)\n\n container.start()\n\n for line in container.logs(stdout=True, stderr=True, stream=True, follow=True):\n print(line) # noqa: T201\n\n exit_status = container.wait()["StatusCode"]\n\n if exit_status != 0:\n raise Exception(f"Docker container returned exit code {exit_status}")
\n\n\n
[docs]@op(ins={"start_after": In(Nothing)}, config_schema=DOCKER_CONTAINER_OP_CONFIG)\n@experimental\ndef docker_container_op(context):\n """An op that runs a Docker container using the docker Python API.\n\n Contrast with the `docker_executor`, which runs each Dagster op in a Dagster job in its\n own Docker container.\n\n This op may be useful when:\n - You need to orchestrate a command that isn't a Dagster op (or isn't written in Python)\n - You want to run the rest of a Dagster job using a specific executor, and only a single\n op in docker.\n\n For example:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-docker/dagster_docker_tests/test_example_docker_container_op.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n You can create your own op with the same implementation by calling the `execute_docker_container` function\n inside your own op.\n """\n execute_docker_container(context, **context.op_config)
\n
", "current_page_name": "_modules/dagster_docker/ops/docker_container_op", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.ops.docker_container_op"}}, "pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.pipes

\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Mapping, Optional, Sequence, Union\n\nimport docker\nfrom dagster import (\n    OpExecutionContext,\n    ResourceParam,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom dagster._core.pipes.client import (\n    PipesClient,\n    PipesClientCompletedInvocation,\n    PipesContextInjector,\n    PipesMessageReader,\n)\nfrom dagster._core.pipes.context import (\n    PipesMessageHandler,\n)\nfrom dagster._core.pipes.utils import (\n    PipesEnvContextInjector,\n    extract_message_or_forward_to_stdout,\n    open_pipes_session,\n)\nfrom dagster_pipes import (\n    DagsterPipesError,\n    PipesDefaultMessageWriter,\n    PipesExtras,\n    PipesParams,\n)\n\n\n
[docs]@experimental\nclass PipesDockerLogsMessageReader(PipesMessageReader):\n @contextmanager\n def read_messages(\n self,\n handler: PipesMessageHandler,\n ) -> Iterator[PipesParams]:\n self._handler = handler\n try:\n yield {PipesDefaultMessageWriter.STDIO_KEY: PipesDefaultMessageWriter.STDERR}\n finally:\n self._handler = None\n\n def consume_docker_logs(self, container) -> None:\n handler = check.not_none(\n self._handler, "Can only consume logs within context manager scope."\n )\n for log_line in container.logs(stdout=True, stderr=True, stream=True, follow=True):\n if isinstance(log_line, bytes):\n log_entry = log_line.decode("utf-8")\n elif isinstance(log_line, str):\n log_entry = log_line\n else:\n continue\n\n extract_message_or_forward_to_stdout(handler, log_entry)\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to read messages by extracting them from docker logs directly."
\n\n\n@experimental\nclass _PipesDockerClient(PipesClient):\n """A pipes client that runs external processes in docker containers.\n\n By default context is injected via environment variables and messages are parsed out of the\n log stream, with other logs forwarded to stdout of the orchestration process.\n\n Args:\n env (Optional[Mapping[str, str]]): An optional dict of environment variables to pass to the\n container.\n register (Optional[Mapping[str, str]]): An optional dict of registry credentials to login to\n the docker client.\n context_injector (Optional[PipesContextInjector]): A context injector to use to inject\n context into the docker container process. Defaults to :py:class:`PipesEnvContextInjector`.\n message_reader (Optional[PipesContextInjector]): A message reader to use to read messages\n from the docker container process. Defaults to :py:class:`DockerLogsMessageReader`.\n """\n\n def __init__(\n self,\n env: Optional[Mapping[str, str]] = None,\n registry: Optional[Mapping[str, str]] = None,\n context_injector: Optional[PipesContextInjector] = None,\n message_reader: Optional[PipesMessageReader] = None,\n ):\n self.env = check.opt_mapping_param(env, "env", key_type=str, value_type=str)\n self.registry = check.opt_mapping_param(registry, "registry", key_type=str, value_type=str)\n self.context_injector = (\n check.opt_inst_param(\n context_injector,\n "context_injector",\n PipesContextInjector,\n )\n or PipesEnvContextInjector()\n )\n\n self.message_reader = (\n check.opt_inst_param(message_reader, "message_reader", PipesMessageReader)\n or PipesDockerLogsMessageReader()\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def run(\n self,\n *,\n context: OpExecutionContext,\n image: str,\n extras: Optional[PipesExtras] = None,\n command: Optional[Union[str, Sequence[str]]] = None,\n env: Optional[Mapping[str, str]] = None,\n registry: Optional[Mapping[str, str]] = None,\n container_kwargs: Optional[Mapping[str, Any]] = None,\n ) -> PipesClientCompletedInvocation:\n """Create a docker container and run it to completion, enriched with the pipes protocol.\n\n Args:\n image (str):\n The image for the container to use.\n command (Optional[Union[str, Sequence[str]]]):\n The command for the container use.\n env (Optional[Mapping[str,str]]):\n A mapping of environment variable names to values to set on the first\n container in the pod spec, on top of those configured on resource.\n registry (Optional[Mapping[str, str]]:\n A mapping containing url, username, and password to be used\n with docker client login.\n container_kwargs (Optional[Mapping[str, Any]]:\n Arguments to be forwarded to docker client containers.create.\n extras (Optional[PipesExtras]):\n Extra values to pass along as part of the ext protocol.\n context_injector (Optional[PipesContextInjector]):\n Override the default ext protocol context injection.\n message_reader (Optional[PipesMessageReader]):\n Override the default ext protocol message reader.\n\n Returns:\n PipesClientCompletedInvocation: Wrapper containing results reported by the external\n process.\n """\n with open_pipes_session(\n context=context,\n context_injector=self.context_injector,\n message_reader=self.message_reader,\n extras=extras,\n ) as pipes_session:\n client = docker.client.from_env()\n registry = registry or self.registry\n if registry:\n client.login(\n registry=registry["url"],\n username=registry["username"],\n password=registry["password"],\n )\n\n try:\n container = self._create_container(\n client=client,\n image=image,\n command=command,\n env=env,\n open_pipes_session_env=pipes_session.get_bootstrap_env_vars(),\n container_kwargs=container_kwargs,\n )\n except docker.errors.ImageNotFound:\n client.images.pull(image)\n container = self._create_container(\n client=client,\n image=image,\n command=command,\n env=env,\n open_pipes_session_env=pipes_session.get_bootstrap_env_vars(),\n container_kwargs=container_kwargs,\n )\n\n result = container.start()\n try:\n if isinstance(self.message_reader, PipesDockerLogsMessageReader):\n self.message_reader.consume_docker_logs(container)\n\n result = container.wait()\n if result["StatusCode"] != 0:\n raise DagsterPipesError(f"Container exited with non-zero status code: {result}")\n finally:\n container.stop()\n return PipesClientCompletedInvocation(tuple(pipes_session.get_results()))\n\n def _create_container(\n self,\n client,\n image: str,\n command: Optional[Union[str, Sequence[str]]],\n env: Optional[Mapping[str, str]],\n container_kwargs: Optional[Mapping[str, Any]],\n open_pipes_session_env: Mapping[str, str],\n ):\n kwargs = dict(container_kwargs or {})\n kwargs_env = kwargs.pop("environment", {})\n return client.containers.create(\n image=image,\n command=command,\n detach=True,\n environment={\n **open_pipes_session_env,\n **(self.env or {}),\n **(env or {}),\n **kwargs_env,\n },\n **kwargs,\n )\n\n\nPipesDockerClient = ResourceParam[_PipesDockerClient]\n
", "current_page_name": "_modules/dagster_docker/pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.pipes"}}, "dagster_duckdb": {"io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb.io_manager

\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Optional, Sequence, Type, cast\n\nimport duckdb\nfrom dagster import IOManagerDefinition, OutputContext, io_manager\nfrom dagster._config.pythonic_config import ConfigurableIOManagerFactory\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.storage.db_io_manager import (\n    DbClient,\n    DbIOManager,\n    DbTypeHandler,\n    TablePartitionDimension,\n    TableSlice,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._utils.backoff import backoff\nfrom pydantic import Field\n\nDUCKDB_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"\n\n\n
[docs]def build_duckdb_io_manager(\n type_handlers: Sequence[DbTypeHandler], default_load_type: Optional[Type] = None\n) -> IOManagerDefinition:\n """Builds an IO manager definition that reads inputs from and writes outputs to DuckDB.\n\n Args:\n type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between\n DuckDB tables and an in-memory type - e.g. a Pandas DataFrame. If only\n one DbTypeHandler is provided, it will be used as teh default_load_type.\n default_load_type (Type): When an input has no type annotation, load it as this type.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb import build_duckdb_io_manager\n from dagster_duckdb_pandas import DuckDBPandasTypeHandler\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n duckdb_io_manager = build_duckdb_io_manager([DuckDBPandasTypeHandler()])\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key. For ops, the schema can be\n specified by including a "schema" entry in output metadata. If none of these is provided, the schema will\n default to "public".\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame):\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @dagster_maintained_io_manager\n @io_manager(config_schema=DuckDBIOManager.to_config_schema())\n def duckdb_io_manager(init_context):\n """IO Manager for storing outputs in a DuckDB database.\n\n Assets will be stored in the schema and table name specified by their AssetKey.\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n Op outputs will be stored in the schema specified by output metadata (defaults to public) in a\n table of the name of the output.\n """\n return DbIOManager(\n type_handlers=type_handlers,\n db_client=DuckDbClient(),\n io_manager_name="DuckDBIOManager",\n database=init_context.resource_config["database"],\n schema=init_context.resource_config.get("schema"),\n default_load_type=default_load_type,\n )\n\n return duckdb_io_manager
\n\n\n
[docs]class DuckDBIOManager(ConfigurableIOManagerFactory):\n """Base class for an IO manager definition that reads inputs from and writes outputs to DuckDB.\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_pandas import DuckDBPandasTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If none\n of these is provided, the schema will default to "public".\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame):\n # my_table will just contain the data from column "a"\n ...\n\n Set DuckDB configuration options using the config field. See\n https://duckdb.org/docs/sql/configuration.html for all available settings.\n\n .. code-block:: python\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb",\n config={"arrow_large_buffer_size": True})}\n )\n\n """\n\n database: str = Field(description="Path to the DuckDB database.")\n config: Dict[str, Any] = Field(description="DuckDB configuration options.", default={})\n schema_: Optional[str] = Field(\n default=None, alias="schema", description="Name of the schema to use."\n ) # schema is a reserved word for pydantic\n\n @staticmethod\n @abstractmethod\n def type_handlers() -> Sequence[DbTypeHandler]: ...\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return None\n\n def create_io_manager(self, context) -> DbIOManager:\n return DbIOManager(\n db_client=DuckDbClient(),\n database=self.database,\n schema=self.schema_,\n type_handlers=self.type_handlers(),\n default_load_type=self.default_load_type(),\n io_manager_name="DuckDBIOManager",\n )
\n\n\nclass DuckDbClient(DbClient):\n @staticmethod\n def delete_table_slice(context: OutputContext, table_slice: TableSlice, connection) -> None:\n try:\n connection.execute(_get_cleanup_statement(table_slice))\n except duckdb.CatalogException:\n # table doesn't exist yet, so ignore the error\n pass\n\n @staticmethod\n def ensure_schema_exists(context: OutputContext, table_slice: TableSlice, connection) -> None:\n connection.execute(f"create schema if not exists {table_slice.schema};")\n\n @staticmethod\n def get_select_statement(table_slice: TableSlice) -> str:\n col_str = ", ".join(table_slice.columns) if table_slice.columns else "*"\n\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = f"SELECT {col_str} FROM {table_slice.schema}.{table_slice.table} WHERE\\n"\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"""SELECT {col_str} FROM {table_slice.schema}.{table_slice.table}"""\n\n @staticmethod\n @contextmanager\n def connect(context, _):\n conn = backoff(\n fn=duckdb.connect,\n retry_on=(RuntimeError, duckdb.IOException),\n kwargs={\n "database": context.resource_config["database"],\n "read_only": False,\n "config": context.resource_config["config"],\n },\n max_retries=10,\n )\n\n yield conn\n\n conn.close()\n\n\ndef _get_cleanup_statement(table_slice: TableSlice) -> str:\n """Returns a SQL statement that deletes data in the given table to make way for the output data\n being written.\n """\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = f"DELETE FROM {table_slice.schema}.{table_slice.table} WHERE\\n"\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"DELETE FROM {table_slice.schema}.{table_slice.table}"\n\n\ndef _partition_where_clause(partition_dimensions: Sequence[TablePartitionDimension]) -> str:\n return " AND\\n".join(\n (\n _time_window_where_clause(partition_dimension)\n if isinstance(partition_dimension.partitions, TimeWindow)\n else _static_where_clause(partition_dimension)\n )\n for partition_dimension in partition_dimensions\n )\n\n\ndef _time_window_where_clause(table_partition: TablePartitionDimension) -> str:\n partition = cast(TimeWindow, table_partition.partitions)\n start_dt, end_dt = partition\n start_dt_str = start_dt.strftime(DUCKDB_DATETIME_FORMAT)\n end_dt_str = end_dt.strftime(DUCKDB_DATETIME_FORMAT)\n return f"""{table_partition.partition_expr} >= '{start_dt_str}' AND {table_partition.partition_expr} < '{end_dt_str}'"""\n\n\ndef _static_where_clause(table_partition: TablePartitionDimension) -> str:\n partitions = ", ".join(f"'{partition}'" for partition in table_partition.partitions)\n return f"""{table_partition.partition_expr} in ({partitions})"""\n
", "current_page_name": "_modules/dagster_duckdb/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb.io_manager"}, "resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb.resource

\nfrom contextlib import contextmanager\nfrom typing import Any, Dict\n\nimport duckdb\nfrom dagster import ConfigurableResource\nfrom dagster._utils.backoff import backoff\nfrom pydantic import Field\n\n\n
[docs]class DuckDBResource(ConfigurableResource):\n """Resource for interacting with a DuckDB database.\n\n Examples:\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_duckdb import DuckDBResource\n\n @asset\n def my_table(duckdb: DuckDBResource):\n with duckdb.get_connection() as conn:\n conn.execute("SELECT * from MY_SCHEMA.MY_TABLE")\n\n defs = Definitions(\n assets=[my_table],\n resources={"duckdb": DuckDBResource(database="path/to/db.duckdb")}\n )\n\n """\n\n database: str = Field(\n description=(\n "Path to the DuckDB database. Setting database=':memory:' will use an in-memory"\n " database "\n )\n )\n config: Dict[str, Any] = Field(description="DuckDB configuration options.", default={})\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @contextmanager\n def get_connection(self):\n conn = backoff(\n fn=duckdb.connect,\n retry_on=(RuntimeError, duckdb.IOException),\n kwargs={"database": self.database, "read_only": False, "config": self.config},\n max_retries=10,\n )\n\n yield conn\n\n conn.close()
\n
", "current_page_name": "_modules/dagster_duckdb/resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb.resource"}}, "dagster_duckdb_pandas": {"duckdb_pandas_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb_pandas.duckdb_pandas_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport pandas as pd\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_duckdb.io_manager import (\n    DuckDbClient,\n    DuckDBIOManager,\n    build_duckdb_io_manager,\n)\n\n\n
[docs]class DuckDBPandasTypeHandler(DbTypeHandler[pd.DataFrame]):\n """Stores and loads Pandas DataFrames in DuckDB.\n\n To use this type handler, return it from the ``type_handlers` method of an I/O manager that inherits from ``DuckDBIOManager``.\n\n Example:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_pandas import DuckDBPandasTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection\n ):\n """Stores the pandas DataFrame in duckdb."""\n connection.execute(\n f"create table if not exists {table_slice.schema}.{table_slice.table} as select * from"\n " obj;"\n )\n if not connection.fetchall():\n # table was not created, therefore already exists. Insert the data\n connection.execute(\n f"insert into {table_slice.schema}.{table_slice.table} select * from obj"\n )\n\n context.add_output_metadata(\n {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype)) # type: ignore # (bad stubs)\n for name, dtype in obj.dtypes.items()\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pd.DataFrame:\n """Loads the input as a Pandas DataFrame."""\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pd.DataFrame()\n return connection.execute(DuckDbClient.get_select_statement(table_slice)).fetchdf()\n\n @property\n def supported_types(self):\n return [pd.DataFrame]
\n\n\nduckdb_pandas_io_manager = build_duckdb_io_manager(\n [DuckDBPandasTypeHandler()], default_load_type=pd.DataFrame\n)\nduckdb_pandas_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes Pandas DataFrames to DuckDB. When\nusing the duckdb_pandas_io_manager, any inputs and outputs without type annotations will be loaded\nas Pandas DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_duckdb_pandas import duckdb_pandas_io_manager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_pandas_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class DuckDBPandasIOManager(DuckDBIOManager):\n """An I/O manager definition that reads inputs from and writes Pandas DataFrames to DuckDB. When\n using the DuckDBPandasIOManager, any inputs and outputs without type annotations will be loaded\n as Pandas DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb_pandas import DuckDBPandasIOManager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": DuckDBPandasIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPandasTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame
\n
", "current_page_name": "_modules/dagster_duckdb_pandas/duckdb_pandas_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb_pandas.duckdb_pandas_type_handler"}}, "dagster_duckdb_polars": {"duckdb_polars_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb_polars.duckdb_polars_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport polars as pl\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_duckdb.io_manager import DuckDbClient, DuckDBIOManager, build_duckdb_io_manager\n\n\n
[docs]class DuckDBPolarsTypeHandler(DbTypeHandler[pl.DataFrame]):\n """Stores and loads Polars DataFrames in DuckDB.\n\n To use this type handler, return it from the ``type_handlers` method of an I/O manager that inherits from ``DuckDBIOManager``.\n\n Example:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_polars import DuckDBPolarsTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPolarsTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pl.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pl.DataFrame, connection\n ):\n """Stores the polars DataFrame in duckdb."""\n obj_arrow = obj.to_arrow() # noqa: F841 # need obj_arrow symbol to exist for duckdb query\n connection.execute(f"create schema if not exists {table_slice.schema};")\n connection.execute(\n f"create table if not exists {table_slice.schema}.{table_slice.table} as select * from"\n " obj_arrow;"\n )\n if not connection.fetchall():\n # table was not created, therefore already exists. Insert the data\n connection.execute(\n f"insert into {table_slice.schema}.{table_slice.table} select * from obj_arrow"\n )\n\n context.add_output_metadata(\n {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype))\n for name, dtype in zip(obj.columns, obj.dtypes)\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pl.DataFrame:\n """Loads the input as a Polars DataFrame."""\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pl.DataFrame()\n select_statement = connection.execute(\n DuckDbClient.get_select_statement(table_slice=table_slice)\n )\n duckdb_to_arrow = select_statement.arrow()\n return pl.DataFrame(duckdb_to_arrow)\n\n @property\n def supported_types(self):\n return [pl.DataFrame]
\n\n\nduckdb_polars_io_manager = build_duckdb_io_manager(\n [DuckDBPolarsTypeHandler()], default_load_type=pl.DataFrame\n)\nduckdb_polars_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes polars dataframes to DuckDB. When\nusing the duckdb_polars_io_manager, any inputs and outputs without type annotations will be loaded\nas Polars DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_duckdb_polars import duckdb_polars_io_manager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pl.DataFrame: # the name of the asset will be the table name\n ...\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_polars_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pl.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pl.DataFrame) -> pl.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class DuckDBPolarsIOManager(DuckDBIOManager):\n """An I/O manager definition that reads inputs from and writes Polars DataFrames to DuckDB. When\n using the DuckDBPolarsIOManager, any inputs and outputs without type annotations will be loaded\n as Polars DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb_polars import DuckDBPolarsIOManager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pl.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": DuckDBPolarsIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pl.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pl.DataFrame) -> pl.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPolarsTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pl.DataFrame
\n
", "current_page_name": "_modules/dagster_duckdb_polars/duckdb_polars_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb_polars.duckdb_polars_type_handler"}}, "dagster_duckdb_pyspark": {"duckdb_pyspark_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb_pyspark.duckdb_pyspark_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport pyarrow as pa\nimport pyspark\nimport pyspark.sql\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_duckdb.io_manager import (\n    DuckDbClient,\n    DuckDBIOManager,\n    build_duckdb_io_manager,\n)\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType\n\n\ndef pyspark_df_to_arrow_table(df: pyspark.sql.DataFrame) -> pa.Table:\n    """Converts a PySpark DataFrame to a PyArrow Table."""\n    # `_collect_as_arrow` API call sourced from:\n    #   https://stackoverflow.com/questions/73203318/how-to-transform-spark-dataframe-to-polars-dataframe\n    return pa.Table.from_batches(df._collect_as_arrow())  # noqa: SLF001\n\n\n
[docs]class DuckDBPySparkTypeHandler(DbTypeHandler[pyspark.sql.DataFrame]):\n """Stores PySpark DataFrames in DuckDB.\n\n To use this type handler, return it from the ``type_handlers` method of an I/O manager that inherits from ``DuckDBIOManager``.\n\n Example:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_pyspark import DuckDBPySparkTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pyspark.sql.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n """\n\n def handle_output(\n self,\n context: OutputContext,\n table_slice: TableSlice,\n obj: pyspark.sql.DataFrame,\n connection,\n ):\n """Stores the given object at the provided filepath."""\n pa_df = pyspark_df_to_arrow_table(obj) # noqa: F841\n connection.execute(\n f"create table if not exists {table_slice.schema}.{table_slice.table} as select * from"\n " pa_df;"\n )\n if not connection.fetchall():\n # table was not created, therefore already exists. Insert the data\n connection.execute(\n f"insert into {table_slice.schema}.{table_slice.table} select * from pa_df;"\n )\n\n context.add_output_metadata(\n {\n "row_count": obj.count(),\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype)) for name, dtype in obj.dtypes\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pyspark.sql.DataFrame:\n """Loads the return of the query as the correct type."""\n spark = SparkSession.builder.getOrCreate() # type: ignore\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return spark.createDataFrame([], StructType([]))\n\n pd_df = connection.execute(DuckDbClient.get_select_statement(table_slice)).fetchdf()\n return spark.createDataFrame(pd_df)\n\n @property\n def supported_types(self):\n return [pyspark.sql.DataFrame]
\n\n\nduckdb_pyspark_io_manager = build_duckdb_io_manager(\n [DuckDBPySparkTypeHandler()], default_load_type=pyspark.sql.DataFrame\n)\nduckdb_pyspark_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes PySpark DataFrames to DuckDB. When\nusing the duckdb_pyspark_io_manager, any inputs and outputs without type annotations will be loaded\nas PySpark DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_duckdb_pyspark import duckdb_pyspark_io_manager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pyspark.sql.DataFrame: # the name of the asset will be the table name\n ...\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_pyspark_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pyspark.sql.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class DuckDBPySparkIOManager(DuckDBIOManager):\n """An I/O manager definition that reads inputs from and writes PySpark DataFrames to DuckDB. When\n using the DuckDBPySparkIOManager, any inputs and outputs without type annotations will be loaded\n as PySpark DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb_pyspark import DuckDBPySparkIOManager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pyspark.sql.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": DuckDBPySparkIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pyspark.sql.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pyspark.sql.DataFrame
\n
", "current_page_name": "_modules/dagster_duckdb_pyspark/duckdb_pyspark_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb_pyspark.duckdb_pyspark_type_handler"}}, "dagster_embedded_elt": {"sling": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_embedded_elt.sling.asset_defs

\nimport re\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom dagster import (\n    AssetExecutionContext,\n    AssetsDefinition,\n    AssetSpec,\n    MaterializeResult,\n    multi_asset,\n)\nfrom dagster._annotations import experimental\n\nfrom dagster_embedded_elt.sling.resources import SlingMode, SlingResource\n\n\n
[docs]@experimental\ndef build_sling_asset(\n asset_spec: AssetSpec,\n source_stream: str,\n target_object: str,\n mode: SlingMode = SlingMode.FULL_REFRESH,\n primary_key: Optional[Union[str, List[str]]] = None,\n update_key: Optional[Union[str, List[str]]] = None,\n source_options: Optional[Dict[str, Any]] = None,\n target_options: Optional[Dict[str, Any]] = None,\n sling_resource_key: str = "sling",\n) -> AssetsDefinition:\n """Asset Factory for using Sling to sync data from a source stream to a target object.\n\n Args:\n asset_spec (AssetSpec): The AssetSpec to use to materialize this asset.\n source_stream (str): The source stream to sync from. This can be a table, a query, or a path.\n target_object (str): The target object to sync to. This can be a table, or a path.\n mode (SlingMode, optional): The sync mode to use when syncing. Defaults to SlingMode.FULL_REFRESH.\n primary_key (Optional[Union[str, List[str]]], optional): The optional primary key to use when syncing.\n update_key (Optional[Union[str, List[str]]], optional): The optional update key to use when syncing.\n source_options (Optional[Dict[str, Any]], optional): Any optional Sling source options to use when syncing.\n target_options (Optional[Dict[str, Any]], optional): Any optional target options to use when syncing.\n sling_resource_key (str, optional): The resource key for the SlingResource. Defaults to "sling".\n\n Examples:\n Creating a Sling asset that syncs from a file to a table:\n\n .. code-block:: python\n\n asset_spec = AssetSpec(key=["main", "dest_tbl"])\n asset_def = build_sling_asset(\n asset_spec=asset_spec,\n source_stream="file:///tmp/test.csv",\n target_object="main.dest_table",\n mode=SlingMode.INCREMENTAL,\n primary_key="id"\n )\n\n Creating a Sling asset that syncs from a table to a file with a full refresh:\n\n .. code-block:: python\n\n asset_spec = AssetSpec(key="test.csv")\n asset_def = build_sling_asset(\n asset_spec=asset_spec,\n source_stream="main.dest_table",\n target_object="file:///tmp/test.csv",\n mode=SlingMode.FULL_REFRESH\n )\n\n\n """\n if primary_key is not None and not isinstance(primary_key, list):\n primary_key = [primary_key]\n\n if update_key is not None and not isinstance(update_key, list):\n update_key = [update_key]\n\n @multi_asset(\n compute_kind="sling", specs=[asset_spec], required_resource_keys={sling_resource_key}\n )\n def sync(context: AssetExecutionContext) -> MaterializeResult:\n sling: SlingResource = getattr(context.resources, sling_resource_key)\n last_row_count_observed = None\n for stdout_line in sling.sync(\n source_stream=source_stream,\n target_object=target_object,\n mode=mode,\n primary_key=primary_key,\n update_key=update_key,\n source_options=source_options,\n target_options=target_options,\n ):\n match = re.search(r"(\\d+) rows", stdout_line)\n if match:\n last_row_count_observed = int(match.group(1))\n context.log.info(stdout_line)\n\n return MaterializeResult(\n metadata=(\n {} if last_row_count_observed is None else {"row_count": last_row_count_observed}\n )\n )\n\n return sync
\n
", "current_page_name": "_modules/dagster_embedded_elt/sling/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_embedded_elt.sling.asset_defs"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_embedded_elt.sling.resources

\nimport contextlib\nimport json\nimport re\nfrom enum import Enum\nfrom subprocess import PIPE, STDOUT, Popen\nfrom typing import Any, Dict, Generator, List, Optional\n\nfrom dagster import ConfigurableResource, PermissiveConfig, get_dagster_logger\nfrom dagster._annotations import experimental\nfrom dagster._utils.env import environ\nfrom pydantic import Field\nfrom sling import Sling\n\nlogger = get_dagster_logger()\n\n\nclass SlingMode(str, Enum):\n    """The mode to use when syncing.\n\n    See the Sling docs for more information: https://docs.slingdata.io/sling-cli/running-tasks#modes.\n    """\n\n    INCREMENTAL = "incremental"\n    TRUNCATE = "truncate"\n    FULL_REFRESH = "full-refresh"\n    SNAPSHOT = "snapshot"\n\n\n
[docs]class SlingSourceConnection(PermissiveConfig):\n """A Sling Source Connection defines the source connection used by :py:class:`~dagster_elt.sling.SlingResource`.\n\n Examples:\n Creating a Sling Source for a file, such as CSV or JSON:\n\n .. code-block:: python\n\n source = SlingSourceConnection(type="file")\n\n Create a Sling Source for a Postgres database, using a connection string:\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING"))\n source = SlingSourceConnection(type="postgres", connection_string="postgresql://user:password@host:port/schema")\n\n Create a Sling Source for a Postgres database, using keyword arguments, as described here:\n https://docs.slingdata.io/connections/database-connections/postgres\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="postgres", host="host", user="hunter42", password=EnvVar("POSTGRES_PASSWORD"))\n\n """\n\n type: str = Field(description="Type of the source connection. Use 'file' for local storage.")\n connection_string: Optional[str] = Field(\n description="The connection string for the source database."\n )
\n\n\n
[docs]class SlingTargetConnection(PermissiveConfig):\n """A Sling Target Connection defines the target connection used by :py:class:`~dagster_elt.sling.SlingResource`.\n\n Examples:\n Creating a Sling Target for a file, such as CSV or JSON:\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="file")\n\n Create a Sling Source for a Postgres database, using a connection string:\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="postgres", connection_string="postgresql://user:password@host:port/schema"\n source = SlingTargetConnection(type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING"))\n\n Create a Sling Source for a Postgres database, using keyword arguments, as described here:\n https://docs.slingdata.io/connections/database-connections/postgres\n\n .. code-block::python\n\n source = SlingTargetConnection(type="postgres", host="host", user="hunter42", password=EnvVar("POSTGRES_PASSWORD"))\n\n\n """\n\n type: str = Field(\n description="Type of the destination connection. Use 'file' for local storage."\n )\n connection_string: Optional[str] = Field(\n description="The connection string for the target database."\n )
\n\n\n
[docs]@experimental\nclass SlingResource(ConfigurableResource):\n """Resource for interacting with the Sling package.\n\n Examples:\n .. code-block:: python\n\n from dagster_etl.sling import SlingResource\n sling_resource = SlingResource(\n source_connection=SlingSourceConnection(\n type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING")\n ),\n target_connection=SlingTargetConnection(\n type="snowflake",\n host="host",\n user="user",\n database="database",\n password="password",\n role="role",\n ),\n )\n\n """\n\n source_connection: SlingSourceConnection\n target_connection: SlingTargetConnection\n\n @contextlib.contextmanager\n def _setup_config(self) -> Generator[None, None, None]:\n """Uses environment variables to set the Sling source and target connections."""\n sling_source = self.source_connection.dict()\n sling_target = self.target_connection.dict()\n if self.source_connection.connection_string:\n sling_source["url"] = self.source_connection.connection_string\n if self.target_connection.connection_string:\n sling_target["url"] = self.target_connection.connection_string\n with environ(\n {\n "SLING_SOURCE": json.dumps(sling_source),\n "SLING_TARGET": json.dumps(sling_target),\n }\n ):\n yield\n\n @staticmethod\n def _exec_sling_cmd(cmd, stdin=None, stdout=PIPE, stderr=STDOUT) -> Generator[str, None, None]:\n ansi_escape = re.compile(r"\\x1B(?:[@-Z\\\\-_]|\\[[0-?]*[ -/]*[@-~])")\n with Popen(cmd, shell=True, stdin=stdin, stdout=stdout, stderr=stderr) as proc:\n assert proc.stdout\n\n for line in proc.stdout:\n fmt_line = str(line, "utf-8")\n clean_line = ansi_escape.sub("", fmt_line).replace("INF", "")\n yield clean_line\n\n proc.wait()\n if proc.returncode != 0:\n raise Exception("Sling command failed with error code %s", proc.returncode)\n\n def _sync(\n self,\n source_stream: str,\n target_object: str,\n mode: SlingMode = SlingMode.FULL_REFRESH,\n primary_key: Optional[List[str]] = None,\n update_key: Optional[List[str]] = None,\n source_options: Optional[Dict[str, Any]] = None,\n target_options: Optional[Dict[str, Any]] = None,\n ) -> Generator[str, None, None]:\n """Runs a Sling sync from the given source table to the given destination table. Generates\n output lines from the Sling CLI.\n """\n if self.source_connection.type == "file" and not source_stream.startswith("file://"):\n source_stream = "file://" + source_stream\n\n if self.target_connection.type == "file" and not target_object.startswith("file://"):\n target_object = "file://" + target_object\n\n with self._setup_config():\n config = {\n "source": {\n "conn": "SLING_SOURCE",\n "stream": source_stream,\n "primary_key": primary_key,\n "update_key": update_key,\n "options": source_options,\n },\n "target": {\n "conn": "SLING_TARGET",\n "object": target_object,\n "options": target_options,\n },\n }\n config["source"] = {k: v for k, v in config["source"].items() if v is not None}\n config["target"] = {k: v for k, v in config["target"].items() if v is not None}\n\n sling_cli = Sling(**config)\n logger.info("Starting Sling sync with mode: %s", mode)\n cmd = sling_cli._prep_cmd() # noqa: SLF001\n\n yield from self._exec_sling_cmd(cmd)\n\n def sync(\n self,\n source_stream: str,\n target_object: str,\n mode: SlingMode,\n primary_key: Optional[List[str]] = None,\n update_key: Optional[List[str]] = None,\n source_options: Optional[Dict[str, Any]] = None,\n target_options: Optional[Dict[str, Any]] = None,\n ) -> Generator[str, None, None]:\n """Initiate a Sling Sync between a source stream and a target object.\n\n Args:\n source_stream (str): The source stream to read from. For database sources, the source stream can be either\n a table name, a SQL statement or a path to a SQL file e.g. `TABLE1` or `SCHEMA1.TABLE2` or\n `SELECT * FROM TABLE`. For file sources, the source stream is a path or an url to a file.\n For file targets, the target object is a path or a url to a file, e.g. file:///tmp/file.csv or\n s3://my_bucket/my_folder/file.csv\n target_object (str): The target object to write into. For database targets, the target object is a table\n name, e.g. TABLE1, SCHEMA1.TABLE2. For file targets, the target object is a path or an url to a file.\n mode (SlingMode): The Sling mode to use when syncing, i.e. incremental, full-refresh\n See the Sling docs for more information: https://docs.slingdata.io/sling-cli/running-tasks#modes.\n primary_key (str): For incremental syncs, a primary key is used during merge statements to update\n existing rows.\n update_key (str): For incremental syncs, an update key is used to stream records after max(update_key)\n source_options (Dict[str, Any]): Other source options to pass to Sling,\n see https://docs.slingdata.io/sling-cli/running-tasks#source-options-src-options-flag-source.options-key\n for details\n target_options (Dict[str, Any[): Other target options to pass to Sling,\n see https://docs.slingdata.io/sling-cli/running-tasks#target-options-tgt-options-flag-target.options-key\n for details\n\n Examples:\n Sync from a source file to a sqlite database:\n\n .. code-block:: python\n\n sqllite_path = "/path/to/sqlite.db"\n csv_path = "/path/to/file.csv"\n\n @asset\n def run_sync(context, sling: SlingResource):\n res = sling.sync(\n source_stream=csv_path,\n target_object="events",\n mode=SlingMode.FULL_REFRESH,\n )\n for stdout in res:\n context.log.debug(stdout)\n counts = sqlite3.connect(sqllitepath).execute("SELECT count(1) FROM events").fetchone()\n assert counts[0] == 3\n\n source = SlingSourceConnection(\n type="file",\n )\n target = SlingTargetConnection(type="sqlite", instance=sqllitepath)\n\n materialize(\n [run_sync],\n resources={\n "sling": SlingResource(\n source_connection=source,\n target_connection=target,\n mode=SlingMode.TRUNCATE,\n )\n },\n )\n\n """\n yield from self._sync(\n source_stream=source_stream,\n target_object=target_object,\n mode=mode,\n primary_key=primary_key,\n update_key=update_key,\n source_options=source_options,\n target_options=target_options,\n )
\n
", "current_page_name": "_modules/dagster_embedded_elt/sling/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_embedded_elt.sling.resources"}}}, "dagster_fivetran": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_fivetran.asset_defs

\nimport hashlib\nimport inspect\nimport re\nfrom functools import partial\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nfrom dagster import (\n    AssetKey,\n    AssetOut,\n    AssetsDefinition,\n    Nothing,\n    OpExecutionContext,\n    Output,\n    _check as check,\n    multi_asset,\n)\nfrom dagster._core.definitions.cacheable_assets import (\n    AssetsDefinitionCacheableData,\n    CacheableAssetsDefinition,\n)\nfrom dagster._core.definitions.events import CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.metadata import MetadataUserInput\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.errors import DagsterStepOutputNotFoundError\nfrom dagster._core.execution.context.init import build_init_resource_context\n\nfrom dagster_fivetran.resources import DEFAULT_POLL_INTERVAL, FivetranResource\nfrom dagster_fivetran.utils import (\n    generate_materializations,\n    get_fivetran_connector_url,\n    metadata_for_table,\n)\n\n\ndef _build_fivetran_assets(\n    connector_id: str,\n    destination_tables: Sequence[str],\n    poll_interval: float = DEFAULT_POLL_INTERVAL,\n    poll_timeout: Optional[float] = None,\n    io_manager_key: Optional[str] = None,\n    asset_key_prefix: Optional[Sequence[str]] = None,\n    metadata_by_table_name: Optional[Mapping[str, MetadataUserInput]] = None,\n    table_to_asset_key_map: Optional[Mapping[str, AssetKey]] = None,\n    resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n    group_name: Optional[str] = None,\n    infer_missing_tables: bool = False,\n    op_tags: Optional[Mapping[str, Any]] = None,\n) -> Sequence[AssetsDefinition]:\n    asset_key_prefix = check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n    tracked_asset_keys = {\n        table: AssetKey([*asset_key_prefix, *table.split(".")]) for table in destination_tables\n    }\n    user_facing_asset_keys = table_to_asset_key_map or tracked_asset_keys\n\n    _metadata_by_table_name = check.opt_mapping_param(\n        metadata_by_table_name, "metadata_by_table_name", key_type=str\n    )\n\n    @multi_asset(\n        name=f"fivetran_sync_{connector_id}",\n        outs={\n            "_".join(key.path): AssetOut(\n                io_manager_key=io_manager_key,\n                key=user_facing_asset_keys[table],\n                metadata=_metadata_by_table_name.get(table),\n                dagster_type=Nothing,\n            )\n            for table, key in tracked_asset_keys.items()\n        },\n        compute_kind="fivetran",\n        resource_defs=resource_defs,\n        group_name=group_name,\n        op_tags=op_tags,\n    )\n    def _assets(context: OpExecutionContext, fivetran: FivetranResource) -> Any:\n        fivetran_output = fivetran.sync_and_poll(\n            connector_id=connector_id,\n            poll_interval=poll_interval,\n            poll_timeout=poll_timeout,\n        )\n\n        materialized_asset_keys = set()\n        for materialization in generate_materializations(\n            fivetran_output, asset_key_prefix=asset_key_prefix\n        ):\n            # scan through all tables actually created, if it was expected then emit an Output.\n            # otherwise, emit a runtime AssetMaterialization\n            if materialization.asset_key in tracked_asset_keys.values():\n                yield Output(\n                    value=None,\n                    output_name="_".join(materialization.asset_key.path),\n                    metadata=materialization.metadata,\n                )\n                materialized_asset_keys.add(materialization.asset_key)\n\n            else:\n                yield materialization\n\n        unmaterialized_asset_keys = set(tracked_asset_keys.values()) - materialized_asset_keys\n        if infer_missing_tables:\n            for asset_key in unmaterialized_asset_keys:\n                yield Output(\n                    value=None,\n                    output_name="_".join(asset_key.path),\n                )\n\n        else:\n            if unmaterialized_asset_keys:\n                asset_key = next(iter(unmaterialized_asset_keys))\n                output_name = "_".join(asset_key.path)\n                raise DagsterStepOutputNotFoundError(\n                    f"Core compute for {context.op_def.name} did not return an output for"\n                    f' non-optional output "{output_name}".',\n                    step_key=context.get_step_execution_context().step.key,\n                    output_name=output_name,\n                )\n\n    return [_assets]\n\n\n
[docs]def build_fivetran_assets(\n connector_id: str,\n destination_tables: Sequence[str],\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n io_manager_key: Optional[str] = None,\n asset_key_prefix: Optional[Sequence[str]] = None,\n metadata_by_table_name: Optional[Mapping[str, MetadataUserInput]] = None,\n group_name: Optional[str] = None,\n infer_missing_tables: bool = False,\n op_tags: Optional[Mapping[str, Any]] = None,\n) -> Sequence[AssetsDefinition]:\n """Build a set of assets for a given Fivetran connector.\n\n Returns an AssetsDefinition which connects the specified ``asset_keys`` to the computation that\n will update them. Internally, executes a Fivetran sync for a given ``connector_id``, and\n polls until that sync completes, raising an error if it is unsuccessful. Requires the use of the\n :py:class:`~dagster_fivetran.fivetran_resource`, which allows it to communicate with the\n Fivetran API.\n\n Args:\n connector_id (str): The Fivetran Connector ID that this op will sync. You can retrieve this\n value from the "Setup" tab of a given connector in the Fivetran UI.\n destination_tables (List[str]): `schema_name.table_name` for each table that you want to be\n represented in the Dagster asset graph for this connection.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (Optional[float]): The maximum time that will waited before this operation is\n timed out. By default, this will never time out.\n io_manager_key (Optional[str]): The io_manager to be used to handle each of these assets.\n asset_key_prefix (Optional[List[str]]): A prefix for the asset keys inside this asset.\n If left blank, assets will have a key of `AssetKey([schema_name, table_name])`.\n metadata_by_table_name (Optional[Mapping[str, MetadataUserInput]]): A mapping from destination\n table name to user-supplied metadata that should be associated with the asset for that table.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. This\n group name will be applied to all assets produced by this multi_asset.\n infer_missing_tables (bool): If True, will create asset materializations for tables specified\n in destination_tables even if they are not present in the Fivetran sync output. This is useful\n in cases where Fivetran does not sync any data for a table and therefore does not include it\n in the sync output API response.\n op_tags (Optional[Dict[str, Any]]):\n A dictionary of tags for the op that computes the asset. Frameworks may expect and\n require certain metadata to be attached to a op. Values that are not strings will be\n json encoded and must meet the criteria that json.loads(json.dumps(value)) == value.\n\n **Examples:**\n\n Basic example:\n\n .. code-block:: python\n\n from dagster import AssetKey, repository, with_resources\n\n from dagster_fivetran import fivetran_resource\n from dagster_fivetran.assets import build_fivetran_assets\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n Attaching metadata:\n\n .. code-block:: python\n\n fivetran_assets = build_fivetran_assets(\n connector_id="foobar",\n table_names=["schema1.table1", "schema2.table2"],\n metadata_by_table_name={\n "schema1.table1": {\n "description": "This is a table that contains foo and bar",\n },\n "schema2.table2": {\n "description": "This is a table that contains baz and quux",\n },\n },\n )\n """\n return _build_fivetran_assets(\n connector_id=connector_id,\n destination_tables=destination_tables,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n io_manager_key=io_manager_key,\n asset_key_prefix=asset_key_prefix,\n metadata_by_table_name=metadata_by_table_name,\n group_name=group_name,\n infer_missing_tables=infer_missing_tables,\n op_tags=op_tags,\n )
\n\n\nclass FivetranConnectionMetadata(\n NamedTuple(\n "_FivetranConnectionMetadata",\n [\n ("name", str),\n ("connector_id", str),\n ("connector_url", str),\n ("schemas", Mapping[str, Any]),\n ],\n )\n):\n def build_asset_defn_metadata(\n self,\n key_prefix: Sequence[str],\n group_name: Optional[str],\n table_to_asset_key_fn: Callable[[str], AssetKey],\n io_manager_key: Optional[str] = None,\n ) -> AssetsDefinitionCacheableData:\n schema_table_meta: Dict[str, MetadataUserInput] = {}\n if "schemas" in self.schemas:\n schemas_inner = cast(Dict[str, Any], self.schemas["schemas"])\n for schema in schemas_inner.values():\n if schema["enabled"]:\n schema_name = schema["name_in_destination"]\n schema_tables = cast(Dict[str, Dict[str, Any]], schema["tables"])\n for table in schema_tables.values():\n if table["enabled"]:\n table_name = table["name_in_destination"]\n schema_table_meta[f"{schema_name}.{table_name}"] = metadata_for_table(\n table, self.connector_url\n )\n else:\n schema_table_meta[self.name] = {}\n\n outputs = {\n table: AssetKey([*key_prefix, *list(table_to_asset_key_fn(table).path)])\n for table in schema_table_meta.keys()\n }\n\n internal_deps: Dict[str, Set[AssetKey]] = {}\n\n return AssetsDefinitionCacheableData(\n keys_by_input_name={},\n keys_by_output_name=outputs,\n internal_asset_deps=internal_deps,\n group_name=group_name,\n key_prefix=key_prefix,\n can_subset=False,\n metadata_by_output_name=schema_table_meta,\n extra_metadata={\n "connector_id": self.connector_id,\n "io_manager_key": io_manager_key,\n },\n )\n\n\ndef _build_fivetran_assets_from_metadata(\n assets_defn_meta: AssetsDefinitionCacheableData,\n resource_defs: Mapping[str, ResourceDefinition],\n poll_interval: float,\n poll_timeout: Optional[float] = None,\n) -> AssetsDefinition:\n metadata = cast(Mapping[str, Any], assets_defn_meta.extra_metadata)\n connector_id = cast(str, metadata["connector_id"])\n io_manager_key = cast(Optional[str], metadata["io_manager_key"])\n\n return _build_fivetran_assets(\n connector_id=connector_id,\n destination_tables=list(\n assets_defn_meta.keys_by_output_name.keys()\n if assets_defn_meta.keys_by_output_name\n else []\n ),\n asset_key_prefix=list(assets_defn_meta.key_prefix or []),\n metadata_by_table_name=cast(\n Dict[str, MetadataUserInput], assets_defn_meta.metadata_by_output_name\n ),\n io_manager_key=io_manager_key,\n table_to_asset_key_map=assets_defn_meta.keys_by_output_name,\n resource_defs=resource_defs,\n group_name=assets_defn_meta.group_name,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )[0]\n\n\nclass FivetranInstanceCacheableAssetsDefinition(CacheableAssetsDefinition):\n def __init__(\n self,\n fivetran_resource_def: Union[FivetranResource, ResourceDefinition],\n key_prefix: Sequence[str],\n connector_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connector_filter: Optional[Callable[[FivetranConnectionMetadata], bool]],\n connector_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connector_to_asset_key_fn: Optional[Callable[[FivetranConnectionMetadata, str], AssetKey]],\n poll_interval: float,\n poll_timeout: Optional[float],\n ):\n self._fivetran_resource_def = fivetran_resource_def\n self._fivetran_instance: FivetranResource = (\n fivetran_resource_def.process_config_and_initialize()\n if isinstance(fivetran_resource_def, FivetranResource)\n else fivetran_resource_def(build_init_resource_context())\n )\n\n self._key_prefix = key_prefix\n self._connector_to_group_fn = connector_to_group_fn\n self._connection_filter = connector_filter\n self._connector_to_io_manager_key_fn = connector_to_io_manager_key_fn\n self._connector_to_asset_key_fn: Callable[[FivetranConnectionMetadata, str], AssetKey] = (\n connector_to_asset_key_fn or (lambda _, table: AssetKey(path=table.split(".")))\n )\n self._poll_interval = poll_interval\n self._poll_timeout = poll_timeout\n\n contents = hashlib.sha1()\n contents.update(",".join(key_prefix).encode("utf-8"))\n if connector_filter:\n contents.update(inspect.getsource(connector_filter).encode("utf-8"))\n\n super().__init__(unique_id=f"fivetran-{contents.hexdigest()}")\n\n def _get_connectors(self) -> Sequence[FivetranConnectionMetadata]:\n output_connectors: List[FivetranConnectionMetadata] = []\n\n groups = self._fivetran_instance.make_request("GET", "groups")["items"]\n\n for group in groups:\n group_id = group["id"]\n\n connectors = self._fivetran_instance.make_request(\n "GET", f"groups/{group_id}/connectors"\n )["items"]\n for connector in connectors:\n connector_id = connector["id"]\n\n connector_name = connector["schema"]\n\n setup_state = connector.get("status", {}).get("setup_state")\n if setup_state and setup_state in ("incomplete", "broken"):\n continue\n\n connector_url = get_fivetran_connector_url(connector)\n\n schemas = self._fivetran_instance.make_request(\n "GET", f"connectors/{connector_id}/schemas"\n )\n\n output_connectors.append(\n FivetranConnectionMetadata(\n name=connector_name,\n connector_id=connector_id,\n connector_url=connector_url,\n schemas=schemas,\n )\n )\n\n return output_connectors\n\n def compute_cacheable_data(self) -> Sequence[AssetsDefinitionCacheableData]:\n asset_defn_data: List[AssetsDefinitionCacheableData] = []\n for connector in self._get_connectors():\n if not self._connection_filter or self._connection_filter(connector):\n table_to_asset_key = partial(self._connector_to_asset_key_fn, connector)\n asset_defn_data.append(\n connector.build_asset_defn_metadata(\n key_prefix=self._key_prefix,\n group_name=(\n self._connector_to_group_fn(connector.name)\n if self._connector_to_group_fn\n else None\n ),\n io_manager_key=(\n self._connector_to_io_manager_key_fn(connector.name)\n if self._connector_to_io_manager_key_fn\n else None\n ),\n table_to_asset_key_fn=table_to_asset_key,\n )\n )\n\n return asset_defn_data\n\n def build_definitions(\n self, data: Sequence[AssetsDefinitionCacheableData]\n ) -> Sequence[AssetsDefinition]:\n return [\n _build_fivetran_assets_from_metadata(\n meta,\n {"fivetran": self._fivetran_instance.get_resource_definition()},\n poll_interval=self._poll_interval,\n poll_timeout=self._poll_timeout,\n )\n for meta in data\n ]\n\n\ndef _clean_name(name: str) -> str:\n """Cleans an input to be a valid Dagster asset name."""\n return re.sub(r"[^a-z0-9]+", "_", name.lower())\n\n\n
[docs]def load_assets_from_fivetran_instance(\n fivetran: Union[FivetranResource, ResourceDefinition],\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n connector_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connector_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connector_filter: Optional[Callable[[FivetranConnectionMetadata], bool]] = None,\n connector_to_asset_key_fn: Optional[\n Callable[[FivetranConnectionMetadata, str], AssetKey]\n ] = None,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n) -> CacheableAssetsDefinition:\n """Loads Fivetran connector assets from a configured FivetranResource instance. This fetches information\n about defined connectors at initialization time, and will error on workspace load if the Fivetran\n instance is not reachable.\n\n Args:\n fivetran (ResourceDefinition): A FivetranResource configured with the appropriate connection\n details.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n connector_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Fivetran connector name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The IO manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connector_to_io_manager_key_fn.\n connector_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n IO manager key for a given Fivetran connector name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connector_filter (Optional[Callable[[FivetranConnectorMetadata], bool]]): Optional function which takes\n in connector metadata and returns False if the connector should be excluded from the output assets.\n connector_to_asset_key_fn (Optional[Callable[[FivetranConnectorMetadata, str], AssetKey]]): Optional function\n which takes in connector metadata and a table name and returns an AssetKey for that table. Defaults to\n a function that generates an AssetKey matching the table name, split by ".".\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (Optional[float]): The maximum time that will waited before this operation is\n timed out. By default, this will never time out.\n\n **Examples:**\n\n Loading all Fivetran connectors as assets:\n\n .. code-block:: python\n\n from dagster_fivetran import fivetran_resource, load_assets_from_fivetran_instance\n\n fivetran_instance = fivetran_resource.configured(\n {\n "api_key": "some_key",\n "api_secret": "some_secret",\n }\n )\n fivetran_assets = load_assets_from_fivetran_instance(fivetran_instance)\n\n Filtering the set of loaded connectors:\n\n .. code-block:: python\n\n from dagster_fivetran import fivetran_resource, load_assets_from_fivetran_instance\n\n fivetran_instance = fivetran_resource.configured(\n {\n "api_key": "some_key",\n "api_secret": "some_secret",\n }\n )\n fivetran_assets = load_assets_from_fivetran_instance(\n fivetran_instance,\n connector_filter=lambda meta: "snowflake" in meta.name,\n )\n """\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connector_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connector_to_io_manager_key_fn",\n )\n if not connector_to_io_manager_key_fn:\n connector_to_io_manager_key_fn = lambda _: io_manager_key\n\n return FivetranInstanceCacheableAssetsDefinition(\n fivetran_resource_def=fivetran,\n key_prefix=key_prefix,\n connector_to_group_fn=connector_to_group_fn,\n connector_to_io_manager_key_fn=connector_to_io_manager_key_fn,\n connector_filter=connector_filter,\n connector_to_asset_key_fn=connector_to_asset_key_fn,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )
\n
", "current_page_name": "_modules/dagster_fivetran/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_fivetran.asset_defs"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_fivetran.ops

\nfrom typing import Any, Dict, List, Optional\n\nfrom dagster import (\n    AssetKey,\n    Config,\n    In,\n    Nothing,\n    Out,\n    Output,\n    op,\n)\nfrom pydantic import Field\n\nfrom dagster_fivetran.resources import DEFAULT_POLL_INTERVAL, FivetranResource\nfrom dagster_fivetran.types import FivetranOutput\nfrom dagster_fivetran.utils import generate_materializations\n\n\nclass SyncConfig(Config):\n    connector_id: str = Field(\n        description=(\n            "The Fivetran Connector ID that this op will sync. You can retrieve this "\n            'value from the "Setup" tab of a given connector in the Fivetran UI.'\n        ),\n    )\n    poll_interval: float = Field(\n        default=DEFAULT_POLL_INTERVAL,\n        description="The time (in seconds) that will be waited between successive polls.",\n    )\n    poll_timeout: Optional[float] = Field(\n        default=None,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    yield_materializations: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations corresponding to the results of the Fivetran sync will "\n            "be yielded when the op executes."\n        ),\n    )\n    asset_key_prefix: List[str] = Field(\n        default=["fivetran"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(\n ins={"start_after": In(Nothing)},\n out=Out(\n FivetranOutput,\n description=(\n "Parsed json dictionary representing the details of the Fivetran connector after the"\n " sync successfully completes. See the [Fivetran API"\n " Docs](https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails) to see"\n " detailed information on this response."\n ),\n ),\n tags={"kind": "fivetran"},\n)\ndef fivetran_sync_op(config: SyncConfig, fivetran: FivetranResource) -> Any:\n """Executes a Fivetran sync for a given ``connector_id``, and polls until that sync\n completes, raising an error if it is unsuccessful. It outputs a FivetranOutput which contains\n the details of the Fivetran connector after the sync successfully completes, as well as details\n about which tables the sync updates.\n\n It requires the use of the :py:class:`~dagster_fivetran.fivetran_resource`, which allows it to\n communicate with the Fivetran API.\n\n Examples:\n .. code-block:: python\n\n from dagster import job\n from dagster_fivetran import fivetran_resource, fivetran_sync_op\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n sync_foobar = fivetran_sync_op.configured({"connector_id": "foobar"}, name="sync_foobar")\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_simple_fivetran_job():\n sync_foobar()\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_composed_fivetran_job():\n final_foobar_state = sync_foobar(start_after=some_op())\n other_op(final_foobar_state)\n """\n fivetran_output = fivetran.sync_and_poll(\n connector_id=config.connector_id,\n poll_interval=config.poll_interval,\n poll_timeout=config.poll_timeout,\n )\n if config.yield_materializations:\n yield from generate_materializations(\n fivetran_output, asset_key_prefix=config.asset_key_prefix\n )\n yield Output(fivetran_output)
\n\n\nclass FivetranResyncConfig(SyncConfig):\n resync_parameters: Optional[Dict[str, Any]] = Field(\n None,\n description=(\n "Optional resync parameters to send in the payload to the Fivetran API. You can"\n " find an example resync payload here:"\n " https://fivetran.com/docs/rest-api/connectors#request_7"\n ),\n )\n\n\n@op(\n ins={"start_after": In(Nothing)},\n out=Out(\n FivetranOutput,\n description=(\n "Parsed json dictionary representing the details of the Fivetran connector after the"\n " resync successfully completes. See the [Fivetran API"\n " Docs](https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails) to see"\n " detailed information on this response."\n ),\n ),\n tags={"kind": "fivetran"},\n)\ndef fivetran_resync_op(\n config: FivetranResyncConfig,\n fivetran: FivetranResource,\n) -> Any:\n """Executes a Fivetran historical resync for a given ``connector_id``, and polls until that resync\n completes, raising an error if it is unsuccessful. It outputs a FivetranOutput which contains\n the details of the Fivetran connector after the resync successfully completes, as well as details\n about which tables the resync updates.\n\n It requires the use of the :py:class:`~dagster_fivetran.fivetran_resource`, which allows it to\n communicate with the Fivetran API.\n\n Examples:\n .. code-block:: python\n\n from dagster import job\n from dagster_fivetran import fivetran_resource, fivetran_resync_op\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n sync_foobar = fivetran_resync_op.configured(\n {\n "connector_id": "foobar",\n "resync_parameters": {\n "schema_a": ["table_a", "table_b"],\n "schema_b": ["table_c"]\n }\n },\n name="sync_foobar"\n )\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_simple_fivetran_job():\n sync_foobar()\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_composed_fivetran_job():\n final_foobar_state = sync_foobar(start_after=some_op())\n other_op(final_foobar_state)\n """\n fivetran_output = fivetran.resync_and_poll(\n connector_id=config.connector_id,\n resync_parameters=config.resync_parameters,\n poll_interval=config.poll_interval,\n poll_timeout=config.poll_timeout,\n )\n if config.yield_materializations:\n asset_key_filter = (\n [\n AssetKey(config.asset_key_prefix + [schema, table])\n for schema, tables in config.resync_parameters.items()\n for table in tables\n ]\n if config.resync_parameters is not None\n else None\n )\n for mat in generate_materializations(\n fivetran_output, asset_key_prefix=config.asset_key_prefix\n ):\n if asset_key_filter is None or mat.asset_key in asset_key_filter:\n yield mat\n\n yield Output(fivetran_output)\n
", "current_page_name": "_modules/dagster_fivetran/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_fivetran.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_fivetran.resources

\nimport datetime\nimport json\nimport logging\nimport time\nfrom typing import Any, Mapping, Optional, Sequence, Tuple\nfrom urllib.parse import urljoin\n\nimport requests\nfrom dagster import (\n    Failure,\n    InitResourceContext,\n    MetadataValue,\n    __version__,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._config.pythonic_config import ConfigurableResource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\nfrom dateutil import parser\nfrom pydantic import Field\nfrom requests.auth import HTTPBasicAuth\nfrom requests.exceptions import RequestException\n\nfrom dagster_fivetran.types import FivetranOutput\nfrom dagster_fivetran.utils import get_fivetran_connector_url, get_fivetran_logs_url\n\nFIVETRAN_API_BASE = "https://api.fivetran.com"\nFIVETRAN_API_VERSION_PATH = "v1/"\nFIVETRAN_CONNECTOR_PATH = "connectors/"\n\n# default polling interval (in seconds)\nDEFAULT_POLL_INTERVAL = 10\n\n\n
[docs]class FivetranResource(ConfigurableResource):\n """This class exposes methods on top of the Fivetran REST API."""\n\n api_key: str = Field(description="The Fivetran API key to use for this resource.")\n api_secret: str = Field(description="The Fivetran API secret to use for this resource.")\n disable_schedule_on_trigger: bool = Field(\n default=True,\n description=(\n "Specifies if you would like any connector that is sync'd using this "\n "resource to be automatically taken off its Fivetran schedule."\n ),\n )\n request_max_retries: int = Field(\n default=3,\n description=(\n "The maximum number of times requests to the Fivetran API should be retried "\n "before failing."\n ),\n )\n request_retry_delay: float = Field(\n default=0.25,\n description="Time (in seconds) to wait between each request retry.",\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n def _auth(self) -> HTTPBasicAuth:\n return HTTPBasicAuth(self.api_key, self.api_secret)\n\n @property\n @cached_method\n def _log(self) -> logging.Logger:\n return get_dagster_logger()\n\n @property\n def api_base_url(self) -> str:\n return urljoin(FIVETRAN_API_BASE, FIVETRAN_API_VERSION_PATH)\n\n @property\n def api_connector_url(self) -> str:\n return urljoin(self.api_base_url, FIVETRAN_CONNECTOR_PATH)\n\n def make_connector_request(\n self, method: str, endpoint: str, data: Optional[str] = None\n ) -> Mapping[str, Any]:\n return self.make_request(method, urljoin(FIVETRAN_CONNECTOR_PATH, endpoint), data)\n\n def make_request(\n self, method: str, endpoint: str, data: Optional[str] = None\n ) -> Mapping[str, Any]:\n """Creates and sends a request to the desired Fivetran Connector API endpoint.\n\n Args:\n method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").\n endpoint (str): The Fivetran API endpoint to send this request to.\n data (Optional[str]): JSON-formatted data string to be included in the request.\n\n Returns:\n Dict[str, Any]: Parsed json data from the response to this request\n """\n url = urljoin(self.api_base_url, endpoint)\n headers = {\n "User-Agent": f"dagster-fivetran/{__version__}",\n "Content-Type": "application/json;version=2",\n }\n\n num_retries = 0\n while True:\n try:\n response = requests.request(\n method=method,\n url=url,\n headers=headers,\n auth=self._auth,\n data=data,\n )\n response.raise_for_status()\n resp_dict = response.json()\n return resp_dict["data"] if "data" in resp_dict else resp_dict\n except RequestException as e:\n self._log.error("Request to Fivetran API failed: %s", e)\n if num_retries == self.request_max_retries:\n break\n num_retries += 1\n time.sleep(self.request_retry_delay)\n\n raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.")\n\n def get_connector_details(self, connector_id: str) -> Mapping[str, Any]:\n """Gets details about a given connector from the Fivetran Connector API.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n\n Returns:\n Dict[str, Any]: Parsed json data from the response to this request\n """\n return self.make_connector_request(method="GET", endpoint=connector_id)\n\n def _assert_syncable_connector(self, connector_id: str):\n """Confirms that a given connector is eligible to sync. Will raise a Failure in the event that\n the connector is either paused or not fully setup.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n """\n connector_details = self.get_connector_details(connector_id)\n if connector_details["paused"]:\n raise Failure(f"Connector '{connector_id}' cannot be synced as it is currently paused.")\n if connector_details["status"]["setup_state"] != "connected":\n raise Failure(f"Connector '{connector_id}' cannot be synced as it has not been setup")\n\n def get_connector_sync_status(self, connector_id: str) -> Tuple[datetime.datetime, bool, str]:\n """Gets details about the status of the most recent Fivetran sync operation for a given\n connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n\n Returns:\n Tuple[datetime.datetime, bool, str]:\n Tuple representing the timestamp of the last completeded sync, if it succeeded, and\n the currently reported sync status.\n """\n connector_details = self.get_connector_details(connector_id)\n\n min_time_str = "0001-01-01 00:00:00+00"\n succeeded_at = parser.parse(connector_details["succeeded_at"] or min_time_str)\n failed_at = parser.parse(connector_details["failed_at"] or min_time_str)\n\n return (\n max(succeeded_at, failed_at),\n succeeded_at > failed_at,\n connector_details["status"]["sync_state"],\n )\n\n def update_connector(\n self, connector_id: str, properties: Optional[Mapping[str, Any]] = None\n ) -> Mapping[str, Any]:\n """Updates properties of a Fivetran Connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n properties (Dict[str, Any]): The properties to be updated. For a comprehensive list of\n properties, see the [Fivetran docs](https://fivetran.com/docs/rest-api/connectors#modifyaconnector).\n\n Returns:\n Dict[str, Any]: Parsed json data representing the API response.\n """\n return self.make_connector_request(\n method="PATCH", endpoint=connector_id, data=json.dumps(properties)\n )\n\n def update_schedule_type(\n self, connector_id: str, schedule_type: Optional[str] = None\n ) -> Mapping[str, Any]:\n """Updates the schedule type property of the connector to either "auto" or "manual".\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n schedule_type (Optional[str]): Either "auto" (to turn the schedule on) or "manual" (to\n turn it off).\n\n Returns:\n Dict[str, Any]: Parsed json data representing the API response.\n """\n if schedule_type not in ["auto", "manual"]:\n check.failed(f"schedule_type must be either 'auto' or 'manual': got '{schedule_type}'")\n return self.update_connector(connector_id, properties={"schedule_type": schedule_type})\n\n def get_connector_schema_config(self, connector_id: str) -> Mapping[str, Any]:\n return self.make_connector_request("GET", endpoint=f"{connector_id}/schemas")\n\n def start_sync(self, connector_id: str) -> Mapping[str, Any]:\n """Initiates a sync of a Fivetran connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n\n Returns:\n Dict[str, Any]: Parsed json data representing the connector details API response after\n the sync is started.\n """\n if self.disable_schedule_on_trigger:\n self._log.info("Disabling Fivetran sync schedule.")\n self.update_schedule_type(connector_id, "manual")\n self._assert_syncable_connector(connector_id)\n self.make_connector_request(method="POST", endpoint=f"{connector_id}/force")\n connector_details = self.get_connector_details(connector_id)\n self._log.info(\n f"Sync initialized for connector_id={connector_id}. View this sync in the Fivetran UI: "\n + get_fivetran_connector_url(connector_details)\n )\n return connector_details\n\n def start_resync(\n self, connector_id: str, resync_parameters: Optional[Mapping[str, Sequence[str]]] = None\n ) -> Mapping[str, Any]:\n """Initiates a historical sync of all data for multiple schema tables within a Fivetran connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n resync_parameters (Optional[Dict[str, List[str]]]): Optional resync parameters to send to the Fivetran API.\n An example payload can be found here: https://fivetran.com/docs/rest-api/connectors#request_7\n\n Returns:\n Dict[str, Any]: Parsed json data representing the connector details API response after\n the resync is started.\n """\n if self.disable_schedule_on_trigger:\n self._log.info("Disabling Fivetran sync schedule.")\n self.update_schedule_type(connector_id, "manual")\n self._assert_syncable_connector(connector_id)\n self.make_connector_request(\n method="POST",\n endpoint=(\n f"{connector_id}/schemas/tables/resync"\n if resync_parameters is not None\n else f"{connector_id}/resync"\n ),\n data=json.dumps(resync_parameters) if resync_parameters is not None else None,\n )\n connector_details = self.get_connector_details(connector_id)\n self._log.info(\n f"Sync initialized for connector_id={connector_id}. View this resync in the Fivetran"\n " UI: "\n + get_fivetran_connector_url(connector_details)\n )\n return connector_details\n\n def poll_sync(\n self,\n connector_id: str,\n initial_last_sync_completion: datetime.datetime,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> Mapping[str, Any]:\n """Given a Fivetran connector and the timestamp at which the previous sync completed, poll\n until the next sync completes.\n\n The previous sync completion time is necessary because the only way to tell when a sync\n completes is when this value changes.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n initial_last_sync_completion (datetime.datetime): The timestamp of the last completed sync\n (successful or otherwise) for this connector, prior to running this method.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n Dict[str, Any]: Parsed json data representing the API response.\n """\n poll_start = datetime.datetime.now()\n while True:\n (\n curr_last_sync_completion,\n curr_last_sync_succeeded,\n curr_sync_state,\n ) = self.get_connector_sync_status(connector_id)\n self._log.info(f"Polled '{connector_id}'. Status: [{curr_sync_state}]")\n\n if curr_last_sync_completion > initial_last_sync_completion:\n break\n\n if poll_timeout and datetime.datetime.now() > poll_start + datetime.timedelta(\n seconds=poll_timeout\n ):\n raise Failure(\n f"Sync for connector '{connector_id}' timed out after "\n f"{datetime.datetime.now() - poll_start}."\n )\n\n # Sleep for the configured time interval before polling again.\n time.sleep(poll_interval)\n\n connector_details = self.get_connector_details(connector_id)\n if not curr_last_sync_succeeded:\n raise Failure(\n f"Sync for connector '{connector_id}' failed!",\n metadata={\n "connector_details": MetadataValue.json(connector_details),\n "log_url": MetadataValue.url(get_fivetran_logs_url(connector_details)),\n },\n )\n return connector_details\n\n def sync_and_poll(\n self,\n connector_id: str,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> FivetranOutput:\n """Initializes a sync operation for the given connector, and polls until it completes.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~FivetranOutput`:\n Object containing details about the connector and the tables it updates\n """\n schema_config = self.get_connector_schema_config(connector_id)\n init_last_sync_timestamp, _, _ = self.get_connector_sync_status(connector_id)\n self.start_sync(connector_id)\n final_details = self.poll_sync(\n connector_id,\n init_last_sync_timestamp,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )\n return FivetranOutput(connector_details=final_details, schema_config=schema_config)\n\n def resync_and_poll(\n self,\n connector_id: str,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n resync_parameters: Optional[Mapping[str, Sequence[str]]] = None,\n ) -> FivetranOutput:\n """Initializes a historical resync operation for the given connector, and polls until it completes.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n resync_parameters (Dict[str, List[str]]): The payload to send to the Fivetran API.\n This should be a dictionary with schema names as the keys and a list of tables\n to resync as the values.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~FivetranOutput`:\n Object containing details about the connector and the tables it updates\n """\n schema_config = self.get_connector_schema_config(connector_id)\n init_last_sync_timestamp, _, _ = self.get_connector_sync_status(connector_id)\n self.start_resync(connector_id, resync_parameters)\n final_details = self.poll_sync(\n connector_id,\n init_last_sync_timestamp,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )\n return FivetranOutput(connector_details=final_details, schema_config=schema_config)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=FivetranResource.to_config_schema())\ndef fivetran_resource(context: InitResourceContext) -> FivetranResource:\n """This resource allows users to programatically interface with the Fivetran REST API to launch\n syncs and monitor their progress. This currently implements only a subset of the functionality\n exposed by the API.\n\n For a complete set of documentation on the Fivetran REST API, including expected response JSON\n schemae, see the `Fivetran API Docs <https://fivetran.com/docs/rest-api/connectors>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_fivetran import fivetran_resource\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n @job(resource_defs={"fivetran":my_fivetran_resource})\n def my_fivetran_job():\n ...\n\n """\n return FivetranResource.from_resource_context(context)
\n
", "current_page_name": "_modules/dagster_fivetran/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_fivetran.resources"}}, "dagster_gcp": {"bigquery": {"io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.io_manager

\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Generator, Optional, Sequence, Type, cast\n\nfrom dagster import IOManagerDefinition, OutputContext, io_manager\nfrom dagster._annotations import experimental\nfrom dagster._config.pythonic_config import (\n    ConfigurableIOManagerFactory,\n)\nfrom dagster._core.storage.db_io_manager import (\n    DbClient,\n    DbIOManager,\n    DbTypeHandler,\n    TablePartitionDimension,\n    TableSlice,\n    TimeWindow,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom google.api_core.exceptions import NotFound\nfrom google.cloud import bigquery\nfrom pydantic import Field\n\nfrom .utils import setup_gcp_creds\n\nBIGQUERY_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"\n\n\n
[docs]@experimental\ndef build_bigquery_io_manager(\n type_handlers: Sequence[DbTypeHandler], default_load_type: Optional[Type] = None\n) -> IOManagerDefinition:\n """Builds an I/O manager definition that reads inputs from and writes outputs to BigQuery.\n\n Args:\n type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between\n slices of BigQuery tables and an in-memory type - e.g. a Pandas DataFrame.\n If only one DbTypeHandler is provided, it will be used as the default_load_type.\n default_load_type (Type): When an input has no type annotation, load it as this type.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import build_bigquery_io_manager\n from dagster_bigquery_pandas import BigQueryPandasTypeHandler\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n bigquery_io_manager = build_bigquery_io_manager([BigQueryPandasTypeHandler()])\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": bigquery_io_manager.configured({\n "project" : {"env": "GCP_PROJECT"}\n })\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the ``dataset`` configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset ``my_table`` had the key prefix ``["gcp", "bigquery", "my_dataset"]``, the dataset ``my_dataset`` will be\n used. For ops, the dataset can be specified by including a `schema` entry in output metadata. If ``schema`` is\n not provided via config or on the asset/op, ``public`` will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n :py:class:`~dagster.In` or :py:class:`~dagster.AssetIn`.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the ``gcp_credentials`` configuration.\n Dagster willstore this key in a temporary file and set ``GOOGLE_APPLICATION_CREDENTIALS`` to point to the file.\n After the run completes, the file will be deleted, and ``GOOGLE_APPLICATION_CREDENTIALS`` will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded with this shell command: ``cat $GOOGLE_APPLICATION_CREDENTIALS | base64``\n """\n\n @dagster_maintained_io_manager\n @io_manager(config_schema=BigQueryIOManager.to_config_schema())\n def bigquery_io_manager(init_context):\n """I/O Manager for storing outputs in a BigQuery database.\n\n Assets will be stored in the dataset and table name specified by their AssetKey.\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n Op outputs will be stored in the dataset specified by output metadata (defaults to public) in a\n table of the name of the output.\n\n Note that the BigQuery config is mapped to the DB IO manager table hierarchy as follows:\n BigQuery DB IO\n * project -> database\n * dataset -> schema\n * table -> table\n """\n mgr = DbIOManager(\n type_handlers=type_handlers,\n db_client=BigQueryClient(),\n io_manager_name="BigQueryIOManager",\n database=init_context.resource_config["project"],\n schema=init_context.resource_config.get("dataset"),\n default_load_type=default_load_type,\n )\n if init_context.resource_config.get("gcp_credentials"):\n with setup_gcp_creds(init_context.resource_config.get("gcp_credentials")):\n yield mgr\n else:\n yield mgr\n\n return bigquery_io_manager
\n\n\n
[docs]class BigQueryIOManager(ConfigurableIOManagerFactory):\n """Base class for an I/O manager definition that reads inputs from and writes outputs to BigQuery.\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import BigQueryIOManager\n from dagster_bigquery_pandas import BigQueryPandasTypeHandler\n from dagster import Definitions, EnvVar\n\n class MyBigQueryIOManager(BigQueryIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the ``dataset`` configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset ``my_table`` had the key prefix ``["gcp", "bigquery", "my_dataset"]``, the dataset ``my_dataset`` will be\n used. For ops, the dataset can be specified by including a ``schema`` entry in output metadata. If ``schema`` is\n not provided via config or on the asset/op, ``public`` will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n :py:class:`~dagster.In` or :py:class:`~dagster.AssetIn`.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the ``gcp_credentials`` configuration.\n Dagster will store this key in a temporary file and set ``GOOGLE_APPLICATION_CREDENTIALS`` to point to the file.\n After the run completes, the file will be deleted, and ``GOOGLE_APPLICATION_CREDENTIALS`` will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded with this shell command: ``cat $GOOGLE_APPLICATION_CREDENTIALS | base64``\n """\n\n project: str = Field(description="The GCP project to use.")\n dataset: Optional[str] = Field(\n default=None,\n description=(\n "Name of the BigQuery dataset to use. If not provided, the last prefix before"\n " the asset name will be used."\n ),\n )\n location: Optional[str] = Field(\n default=None,\n description=(\n "The GCP location. Note: When using PySpark DataFrames, the default"\n " location of the project will be used. A custom location can be specified in"\n " your SparkSession configuration."\n ),\n )\n gcp_credentials: Optional[str] = Field(\n default=None,\n description=(\n "GCP authentication credentials. If provided, a temporary file will be created"\n " with the credentials and ``GOOGLE_APPLICATION_CREDENTIALS`` will be set to the"\n " temporary file. To avoid issues with newlines in the keys, you must base64"\n " encode the key. You can retrieve the base64 encoded key with this shell"\n " command: ``cat $GOOGLE_AUTH_CREDENTIALS | base64``"\n ),\n )\n temporary_gcs_bucket: Optional[str] = Field(\n default=None,\n description=(\n "When using PySpark DataFrames, optionally specify a temporary GCS bucket to"\n " store data. If not provided, data will be directly written to BigQuery."\n ),\n )\n timeout: Optional[float] = Field(\n default=None,\n description=(\n "When using Pandas DataFrames, optionally specify a timeout for the BigQuery"\n " queries (loading and reading from tables)."\n ),\n )\n\n @staticmethod\n @abstractmethod\n def type_handlers() -> Sequence[DbTypeHandler]: ...\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return None\n\n def create_io_manager(self, context) -> Generator:\n mgr = DbIOManager(\n db_client=BigQueryClient(),\n io_manager_name="BigQueryIOManager",\n database=self.project,\n schema=self.dataset,\n type_handlers=self.type_handlers(),\n default_load_type=self.default_load_type(),\n )\n if self.gcp_credentials:\n with setup_gcp_creds(self.gcp_credentials):\n yield mgr\n else:\n yield mgr
\n\n\nclass BigQueryClient(DbClient):\n @staticmethod\n def delete_table_slice(context: OutputContext, table_slice: TableSlice, connection) -> None:\n try:\n connection.query(_get_cleanup_statement(table_slice)).result()\n except NotFound:\n # table doesn't exist yet, so ignore the error\n pass\n\n @staticmethod\n def get_select_statement(table_slice: TableSlice) -> str:\n col_str = ", ".join(table_slice.columns) if table_slice.columns else "*"\n\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"SELECT {col_str} FROM"\n f" `{table_slice.database}.{table_slice.schema}.{table_slice.table}` WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"""SELECT {col_str} FROM `{table_slice.database}.{table_slice.schema}.{table_slice.table}`"""\n\n @staticmethod\n def ensure_schema_exists(context: OutputContext, table_slice: TableSlice, connection) -> None:\n connection.query(f"CREATE SCHEMA IF NOT EXISTS {table_slice.schema}").result()\n\n @staticmethod\n @contextmanager\n def connect(context, _):\n conn = bigquery.Client(\n project=context.resource_config.get("project"),\n location=context.resource_config.get("location"),\n )\n\n yield conn\n\n\ndef _get_cleanup_statement(table_slice: TableSlice) -> str:\n """Returns a SQL statement that deletes data in the given table to make way for the output data\n being written.\n """\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"DELETE FROM `{table_slice.database}.{table_slice.schema}.{table_slice.table}` WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"TRUNCATE TABLE `{table_slice.database}.{table_slice.schema}.{table_slice.table}`"\n\n\ndef _partition_where_clause(partition_dimensions: Sequence[TablePartitionDimension]) -> str:\n return " AND\\n".join(\n (\n _time_window_where_clause(partition_dimension)\n if isinstance(partition_dimension.partitions, TimeWindow)\n else _static_where_clause(partition_dimension)\n )\n for partition_dimension in partition_dimensions\n )\n\n\ndef _time_window_where_clause(table_partition: TablePartitionDimension) -> str:\n partition = cast(TimeWindow, table_partition.partitions)\n start_dt, end_dt = partition\n start_dt_str = start_dt.strftime(BIGQUERY_DATETIME_FORMAT)\n end_dt_str = end_dt.strftime(BIGQUERY_DATETIME_FORMAT)\n return f"""{table_partition.partition_expr} >= '{start_dt_str}' AND {table_partition.partition_expr} < '{end_dt_str}'"""\n\n\ndef _static_where_clause(table_partition: TablePartitionDimension) -> str:\n partitions = ", ".join(f"'{partition}'" for partition in table_partition.partitions)\n return f"""{table_partition.partition_expr} in ({partitions})"""\n
", "current_page_name": "_modules/dagster_gcp/bigquery/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.io_manager"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.ops

\nimport hashlib\n\nfrom dagster import (\n    In,\n    List,\n    Nothing,\n    Out,\n    _check as check,\n    op,\n)\nfrom dagster_pandas import DataFrame\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\nfrom google.cloud.bigquery.job import LoadJobConfig, QueryJobConfig\nfrom google.cloud.bigquery.table import TimePartitioning\n\nfrom .configs import (\n    define_bigquery_create_dataset_config,\n    define_bigquery_delete_dataset_config,\n    define_bigquery_load_config,\n    define_bigquery_query_config,\n)\nfrom .types import BigQueryLoadSource\n\n_START = "start"\n\n\ndef _preprocess_config(cfg):\n    destination_encryption_configuration = cfg.get("destination_encryption_configuration")\n    time_partitioning = cfg.get("time_partitioning")\n\n    if destination_encryption_configuration is not None:\n        cfg["destination_encryption_configuration"] = EncryptionConfiguration(\n            kms_key_name=destination_encryption_configuration\n        )\n\n    if time_partitioning is not None:\n        cfg["time_partitioning"] = TimePartitioning(**time_partitioning)\n\n    return cfg\n\n\n
[docs]def bq_op_for_queries(sql_queries):\n """Executes BigQuery SQL queries.\n\n Expects a BQ client to be provisioned in resources as context.resources.bigquery.\n """\n sql_queries = check.list_param(sql_queries, "sql queries", of_type=str)\n m = hashlib.sha1()\n for query in sql_queries:\n m.update(query.encode("utf-8"))\n hash_str = m.hexdigest()[:10]\n name = f"bq_op_{hash_str}"\n\n @op(\n name=name,\n ins={_START: In(Nothing)},\n out=Out(List[DataFrame]),\n config_schema=define_bigquery_query_config(),\n required_resource_keys={"bigquery"},\n tags={"kind": "sql", "sql": "\\n".join(sql_queries)},\n )\n def _bq_fn(context):\n query_job_config = _preprocess_config(context.op_config.get("query_job_config", {}))\n\n # Retrieve results as pandas DataFrames\n results = []\n for sql_query in sql_queries:\n # We need to construct a new QueryJobConfig for each query.\n # See: https://bit.ly/2VjD6sl\n cfg = QueryJobConfig(**query_job_config) if query_job_config else None\n context.log.info(\n "executing query %s with config: %s"\n % (sql_query, cfg.to_api_repr() if cfg else "(no config provided)")\n )\n results.append(\n context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()\n )\n\n return results\n\n return _bq_fn
\n\n\nBIGQUERY_LOAD_CONFIG = define_bigquery_load_config()\n\n\n
[docs]@op(\n ins={"paths": In(List[str])},\n out=Out(Nothing),\n config_schema=BIGQUERY_LOAD_CONFIG,\n required_resource_keys={"bigquery"},\n)\ndef import_gcs_paths_to_bq(context, paths):\n return _execute_load_in_source(context, paths, BigQueryLoadSource.GCS)
\n\n\n
[docs]@op(\n ins={"df": In(DataFrame)},\n out=Out(Nothing),\n config_schema=BIGQUERY_LOAD_CONFIG,\n required_resource_keys={"bigquery"},\n)\ndef import_df_to_bq(context, df):\n return _execute_load_in_source(context, df, BigQueryLoadSource.DataFrame)
\n\n\n
[docs]@op(\n ins={"path": In(str)},\n out=Out(Nothing),\n config_schema=BIGQUERY_LOAD_CONFIG,\n required_resource_keys={"bigquery"},\n)\ndef import_file_to_bq(context, path):\n return _execute_load_in_source(context, path, BigQueryLoadSource.File)
\n\n\ndef _execute_load_in_source(context, source, source_name):\n destination = context.op_config.get("destination")\n load_job_config = _preprocess_config(context.op_config.get("load_job_config", {}))\n cfg = LoadJobConfig(**load_job_config) if load_job_config else None\n\n context.log.info(\n "executing BQ load with config: %s for source %s"\n % (cfg.to_api_repr() if cfg else "(no config provided)", source)\n )\n\n if source_name == BigQueryLoadSource.DataFrame:\n context.resources.bigquery.load_table_from_dataframe(\n source, destination, job_config=cfg\n ).result()\n\n # Load from file. See: https://cloud.google.com/bigquery/docs/loading-data-local\n elif source_name == BigQueryLoadSource.File:\n with open(source, "rb") as file_obj:\n context.resources.bigquery.load_table_from_file(\n file_obj, destination, job_config=cfg\n ).result()\n\n # Load from GCS. See: https://cloud.google.com/bigquery/docs/loading-data-cloud-storage\n elif source_name == BigQueryLoadSource.GCS:\n context.resources.bigquery.load_table_from_uri(source, destination, job_config=cfg).result()\n\n\n
[docs]@op(\n ins={_START: In(Nothing)},\n config_schema=define_bigquery_create_dataset_config(),\n required_resource_keys={"bigquery"},\n)\ndef bq_create_dataset(context):\n """BigQuery Create Dataset.\n\n This op encapsulates creating a BigQuery dataset.\n\n Expects a BQ client to be provisioned in resources as context.resources.bigquery.\n """\n (dataset, exists_ok) = [context.op_config.get(k) for k in ("dataset", "exists_ok")]\n context.log.info("executing BQ create_dataset for dataset %s" % (dataset))\n context.resources.bigquery.create_dataset(dataset, exists_ok)
\n\n\n
[docs]@op(\n ins={_START: In(Nothing)},\n config_schema=define_bigquery_delete_dataset_config(),\n required_resource_keys={"bigquery"},\n)\ndef bq_delete_dataset(context):\n """BigQuery Delete Dataset.\n\n This op encapsulates deleting a BigQuery dataset.\n\n Expects a BQ client to be provisioned in resources as context.resources.bigquery.\n """\n (dataset, delete_contents, not_found_ok) = [\n context.op_config.get(k) for k in ("dataset", "delete_contents", "not_found_ok")\n ]\n\n context.log.info("executing BQ delete_dataset for dataset %s" % dataset)\n\n context.resources.bigquery.delete_dataset(\n dataset, delete_contents=delete_contents, not_found_ok=not_found_ok\n )
\n
", "current_page_name": "_modules/dagster_gcp/bigquery/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.resources

\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Optional\n\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom google.cloud import bigquery\nfrom pydantic import Field\n\nfrom .utils import setup_gcp_creds\n\n\n
[docs]class BigQueryResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource for interacting with Google BigQuery.\n\n Examples:\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_gcp import BigQueryResource\n\n @asset\n def my_table(bigquery: BigQueryResource):\n with bigquery.get_client() as client:\n client.query("SELECT * FROM my_dataset.my_table")\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "bigquery": BigQueryResource(project="my-project")\n }\n )\n """\n\n project: Optional[str] = Field(\n default=None,\n description=(\n "Project ID for the project which the client acts on behalf of. Will be passed when"\n " creating a dataset / job. If not passed, falls back to the default inferred from the"\n " environment."\n ),\n )\n\n location: Optional[str] = Field(\n default=None,\n description="Default location for jobs / datasets / tables.",\n )\n\n gcp_credentials: Optional[str] = Field(\n default=None,\n description=(\n "GCP authentication credentials. If provided, a temporary file will be created"\n " with the credentials and ``GOOGLE_APPLICATION_CREDENTIALS`` will be set to the"\n " temporary file. To avoid issues with newlines in the keys, you must base64"\n " encode the key. You can retrieve the base64 encoded key with this shell"\n " command: ``cat $GOOGLE_AUTH_CREDENTIALS | base64``"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @contextmanager\n def get_client(self) -> Iterator[bigquery.Client]:\n """Context manager to create a BigQuery Client.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_gcp import BigQueryResource\n\n @asset\n def my_table(bigquery: BigQueryResource):\n with bigquery.get_client() as client:\n client.query("SELECT * FROM my_dataset.my_table")\n """\n if self.gcp_credentials:\n with setup_gcp_creds(self.gcp_credentials):\n yield bigquery.Client(project=self.project, location=self.location)\n\n else:\n yield bigquery.Client(project=self.project, location=self.location)\n\n def get_object_to_set_on_execution_context(self) -> Any:\n with self.get_client() as client:\n yield client
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=BigQueryResource.to_config_schema(),\n description="Dagster resource for connecting to BigQuery",\n)\ndef bigquery_resource(context):\n bq_resource = BigQueryResource.from_resource_context(context)\n with bq_resource.get_client() as client:\n yield client
\n
", "current_page_name": "_modules/dagster_gcp/bigquery/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.types

\nimport re\nfrom enum import Enum as PyEnum\n\nfrom dagster import Enum, EnumValue\nfrom dagster._config import ConfigScalar, ConfigScalarKind, PostProcessingError\nfrom google.cloud.bigquery.job import (\n    CreateDisposition,\n    Encoding,\n    QueryPriority,\n    SchemaUpdateOption,\n    SourceFormat,\n    WriteDisposition,\n)\n\n\nclass BigQueryLoadSource(PyEnum):\n    DataFrame = "DATA_FRAME"\n    GCS = "GCS"\n    File = "FILE"\n\n\nBQCreateDisposition = Enum(\n    name="BQCreateDisposition",\n    enum_values=[\n        EnumValue(CreateDisposition.CREATE_IF_NEEDED),\n        EnumValue(CreateDisposition.CREATE_NEVER),\n    ],\n)\n\nBQPriority = Enum(\n    name="BQPriority",\n    enum_values=[EnumValue(QueryPriority.BATCH), EnumValue(QueryPriority.INTERACTIVE)],\n)\n\nBQSchemaUpdateOption = Enum(\n    name="BQSchemaUpdateOption",\n    enum_values=[\n        EnumValue(\n            SchemaUpdateOption.ALLOW_FIELD_ADDITION,\n            description="Allow adding a nullable field to the schema.",\n        ),\n        EnumValue(\n            SchemaUpdateOption.ALLOW_FIELD_RELAXATION,\n            description="Allow relaxing a required field in the original schema to nullable.",\n        ),\n    ],\n)\n\nBQWriteDisposition = Enum(\n    name="BQWriteDisposition",\n    enum_values=[\n        EnumValue(WriteDisposition.WRITE_APPEND),\n        EnumValue(WriteDisposition.WRITE_EMPTY),\n        EnumValue(WriteDisposition.WRITE_TRUNCATE),\n    ],\n)\n\nBQEncoding = Enum(\n    name="BQEncoding", enum_values=[EnumValue(Encoding.ISO_8859_1), EnumValue(Encoding.UTF_8)]\n)\n\nBQSourceFormat = Enum(\n    name="BQSourceFormat",\n    enum_values=[\n        EnumValue(SourceFormat.AVRO),\n        EnumValue(SourceFormat.CSV),\n        EnumValue(SourceFormat.DATASTORE_BACKUP),\n        EnumValue(SourceFormat.NEWLINE_DELIMITED_JSON),\n        EnumValue(SourceFormat.ORC),\n        EnumValue(SourceFormat.PARQUET),\n    ],\n)\n\n\n# Project names are permitted to have alphanumeric, dashes and underscores, up to 1024 characters.\nRE_PROJECT = r"[\\w\\d\\-\\_]{1,1024}"\n\n# Datasets and tables are permitted to have alphanumeric or underscores, no dashes allowed, up to\n# 1024 characters\nRE_DS_TABLE = r"[\\w\\d\\_]{1,1024}"\n\n# BigQuery supports writes directly to date partitions with the syntax foo.bar$20190101\nRE_PARTITION_SUFFIX = r"(\\$\\d{8})?"\n\n\ndef _is_valid_dataset(config_value):\n    """Datasets must be of form "project.dataset" or "dataset"."""\n    return re.match(\n        # regex matches: project.dataset -- OR -- dataset\n        r"^" + RE_PROJECT + r"\\." + RE_DS_TABLE + r"$|^" + RE_DS_TABLE + r"$",\n        config_value,\n    )\n\n\ndef _is_valid_table(config_value):\n    """Tables must be of form "project.dataset.table" or "dataset.table" with optional\n    date-partition suffix.\n    """\n    return re.match(\n        r"^"\n        + RE_PROJECT  #          project\n        + r"\\."  #               .\n        + RE_DS_TABLE  #         dataset\n        + r"\\."  #               .\n        + RE_DS_TABLE  #         table\n        + RE_PARTITION_SUFFIX  # date partition suffix\n        + r"$|^"  #              -- OR --\n        + RE_DS_TABLE  #         dataset\n        + r"\\."  #               .\n        + RE_DS_TABLE  #         table\n        + RE_PARTITION_SUFFIX  # date partition suffix\n        + r"$",\n        config_value,\n    )\n\n\nclass _Dataset(ConfigScalar):\n    def __init__(self):\n        super(_Dataset, self).__init__(\n            key=type(self).__name__,\n            given_name=type(self).__name__,\n            scalar_kind=ConfigScalarKind.STRING,\n        )\n\n    def post_process(self, value):\n        if not _is_valid_dataset(value):\n            raise PostProcessingError('Datasets must be of the form "project.dataset" or "dataset"')\n        return value\n\n\nclass _Table(ConfigScalar):\n    def __init__(self):\n        super(_Table, self).__init__(\n            key=type(self).__name__,\n            given_name=type(self).__name__,\n            scalar_kind=ConfigScalarKind.STRING,\n        )\n\n    def post_process(self, value):\n        if not _is_valid_table(value):\n            raise PostProcessingError(\n                'Tables must be of the form "project.dataset.table" or "dataset.table" '\n                "with optional date-partition suffix"\n            )\n\n        return value\n\n\n# https://github.com/dagster-io/dagster/issues/1971\nTable = _Table()\nDataset = _Dataset()\n\n\n
[docs]class BigQueryError(Exception):\n pass
\n
", "current_page_name": "_modules/dagster_gcp/bigquery/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.types"}}, "dataproc": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.dataproc.ops

\nfrom typing import Any, Dict\n\nfrom dagster import (\n    Bool,\n    Config,\n    Field as DagsterField,\n    Int,\n    op,\n)\nfrom dagster._seven import json\nfrom pydantic import Field\n\nfrom .configs import define_dataproc_submit_job_config\nfrom .resources import TWENTY_MINUTES, DataprocResource\n\n# maintain the old config schema because of the nested job_config schema\nDATAPROC_CONFIG_SCHEMA = {\n    "job_timeout_in_seconds": DagsterField(\n        Int,\n        description="""Optional. Maximum time in seconds to wait for the job being\n                    completed. Default is set to 1200 seconds (20 minutes).\n                    """,\n        is_required=False,\n        default_value=TWENTY_MINUTES,\n    ),\n    "job_config": define_dataproc_submit_job_config(),\n    "job_scoped_cluster": DagsterField(\n        Bool,\n        description="whether to create a cluster or use an existing cluster",\n        is_required=False,\n        default_value=True,\n    ),\n}\n\n\nclass DataprocOpConfig(Config):\n    job_timeout_in_seconds: int = Field(\n        default=TWENTY_MINUTES,\n        description=(\n            "Maximum time in seconds to wait for the job being completed. Default is set to 1200"\n            " seconds (20 minutes)."\n        ),\n    )\n    job_scoped_cluster: bool = Field(\n        default=True,\n        description="Whether to create a cluster or use an existing cluster. Defaults to True.",\n    )\n    project_id: str = Field(\n        description=(\n            "Required. Project ID for the project which the client acts on behalf of. Will be"\n            " passed when creating a dataset/job."\n        )\n    )\n    region: str = Field(description="The GCP region.")\n    job_config: Dict[str, Any] = Field(\n        description="Python dictionary containing configuration for the Dataproc Job."\n    )\n\n\ndef _dataproc_compute(context):\n    job_config = context.op_config["job_config"]\n    job_timeout = context.op_config["job_timeout_in_seconds"]\n\n    context.log.info(\n        "submitting job with config: %s and timeout of: %d seconds"\n        % (str(json.dumps(job_config)), job_timeout)\n    )\n\n    if context.op_config["job_scoped_cluster"]:\n        # Cluster context manager, creates and then deletes cluster\n        with context.resources.dataproc.cluster_context_manager() as cluster:\n            # Submit the job specified by this solid to the cluster defined by the associated resource\n            result = cluster.submit_job(job_config)\n\n            job_id = result["reference"]["jobId"]\n            context.log.info(f"Submitted job ID {job_id}")\n            cluster.wait_for_job(job_id, wait_timeout=job_timeout)\n\n    else:\n        # Submit to an existing cluster\n        # Submit the job specified by this solid to the cluster defined by the associated resource\n        result = context.resources.dataproc.submit_job(job_config)\n\n        job_id = result["reference"]["jobId"]\n        context.log.info(f"Submitted job ID {job_id}")\n        context.resources.dataproc.wait_for_job(job_id, wait_timeout=job_timeout)\n\n\n@op(required_resource_keys={"dataproc"}, config_schema=DATAPROC_CONFIG_SCHEMA)\ndef dataproc_solid(context):\n    return _dataproc_compute(context)\n\n\n
[docs]@op(required_resource_keys={"dataproc"}, config_schema=DATAPROC_CONFIG_SCHEMA)\ndef dataproc_op(context):\n return _dataproc_compute(context)
\n\n\n@op\ndef configurable_dataproc_op(context, dataproc: DataprocResource, config: DataprocOpConfig):\n job_config = {"projectId": config.project_id, "region": config.region, "job": config.job_config}\n job_timeout = config.job_timeout_in_seconds\n\n context.log.info(\n "submitting job with config: %s and timeout of: %d seconds"\n % (str(json.dumps(job_config)), job_timeout)\n )\n\n dataproc_client = dataproc.get_client()\n\n if config.job_scoped_cluster:\n # Cluster context manager, creates and then deletes cluster\n with dataproc_client.cluster_context_manager() as cluster:\n # Submit the job specified by this solid to the cluster defined by the associated resource\n result = cluster.submit_job(job_config)\n\n job_id = result["reference"]["jobId"]\n context.log.info(f"Submitted job ID {job_id}")\n cluster.wait_for_job(job_id, wait_timeout=job_timeout)\n\n else:\n # Submit to an existing cluster\n # Submit the job specified by this solid to the cluster defined by the associated resource\n result = dataproc_client.submit_job(job_config)\n\n job_id = result["reference"]["jobId"]\n context.log.info(f"Submitted job ID {job_id}")\n dataproc_client.wait_for_job(job_id, wait_timeout=job_timeout)\n
", "current_page_name": "_modules/dagster_gcp/dataproc/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.dataproc.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.dataproc.resources

\nimport json\nimport time\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Mapping, Optional\n\nimport dagster._check as check\nimport yaml\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom googleapiclient.discovery import build\nfrom oauth2client.client import GoogleCredentials\nfrom pydantic import Field\n\nfrom .configs import define_dataproc_create_cluster_config\nfrom .types import DataprocError\n\nTWENTY_MINUTES = 20 * 60\nDEFAULT_ITER_TIME_SEC = 5\n\n\nclass DataprocClient:\n    """Builds a client to the dataproc API."""\n\n    def __init__(self, config):\n        # Use Application Default Credentials to check the\n        # GOOGLE_APPLICATION_CREDENTIALS environment variable\n        # for the location of the service account key file.\n        credentials = GoogleCredentials.get_application_default()\n\n        # See https://github.com/googleapis/google-api-python-client/issues/299 for the\n        # cache_discovery=False configuration below\n        self.dataproc = build("dataproc", "v1", credentials=credentials, cache_discovery=False)\n\n        self.config = config\n\n        (self.project_id, self.region, self.cluster_name, self.cluster_config) = (\n            self.config.get(k) for k in ("projectId", "region", "clusterName", "cluster_config")\n        )\n\n    @property\n    def dataproc_clusters(self):\n        return (\n            # Google APIs dynamically genned, so pylint pukes\n            self.dataproc.projects()\n            .regions()\n            .clusters()\n        )\n\n    @property\n    def dataproc_jobs(self):\n        return (\n            # Google APIs dynamically genned, so pylint pukes\n            self.dataproc.projects()\n            .regions()\n            .jobs()\n        )\n\n    def create_cluster(self):\n        (\n            self.dataproc_clusters.create(\n                projectId=self.project_id,\n                region=self.region,\n                body={\n                    "projectId": self.project_id,\n                    "clusterName": self.cluster_name,\n                    "config": self.cluster_config,\n                },\n            ).execute()\n        )\n\n        def iter_fn():\n            # TODO: Add logging\n            # See: https://bit.ly/2UW5JaN\n            cluster = self.get_cluster()\n            return cluster["status"]["state"] in {"RUNNING", "UPDATING"}\n\n        done = DataprocClient._iter_and_sleep_until_ready(iter_fn)\n        if not done:\n            cluster = self.get_cluster()\n            raise DataprocError(\n                "Could not provision cluster -- status: %s" % str(cluster["status"])\n            )\n\n    def get_cluster(self):\n        return self.dataproc_clusters.get(\n            projectId=self.project_id, region=self.region, clusterName=self.cluster_name\n        ).execute()\n\n    def delete_cluster(self):\n        return self.dataproc_clusters.delete(\n            projectId=self.project_id, region=self.region, clusterName=self.cluster_name\n        ).execute()\n\n    def submit_job(self, job_details):\n        return self.dataproc_jobs.submit(\n            projectId=self.project_id, region=self.region, body=job_details\n        ).execute()\n\n    def get_job(self, job_id):\n        return self.dataproc_jobs.get(\n            projectId=self.project_id, region=self.region, jobId=job_id\n        ).execute()\n\n    def wait_for_job(self, job_id, wait_timeout=TWENTY_MINUTES):\n        """This method polls job status every 5 seconds."""\n\n        # TODO: Add logging here print('Waiting for job ID {} to finish...'.format(job_id))\n        def iter_fn():\n            # See: https://bit.ly/2Lg2tHr\n            result = self.get_job(job_id)\n\n            # Handle exceptions\n            if result["status"]["state"] in {"CANCELLED", "ERROR"}:\n                raise DataprocError("Job error: %s" % str(result["status"]))\n\n            if result["status"]["state"] == "DONE":\n                return True\n\n            return False\n\n        done = DataprocClient._iter_and_sleep_until_ready(iter_fn, max_wait_time_sec=wait_timeout)\n        if not done:\n            job = self.get_job(job_id)\n            raise DataprocError("Job run timed out: %s" % str(job["status"]))\n\n    @staticmethod\n    def _iter_and_sleep_until_ready(\n        callable_fn, max_wait_time_sec=TWENTY_MINUTES, iter_time=DEFAULT_ITER_TIME_SEC\n    ):\n        """Iterates and sleeps until callable_fn returns true."""\n        # Wait for cluster ready state\n        ready, curr_iter = False, 0\n        max_iter = max_wait_time_sec / iter_time\n        while not ready and curr_iter < max_iter:\n            ready = callable_fn()\n            time.sleep(iter_time)\n            curr_iter += 1\n\n        # Will return false if ran up to max_iter without success\n        return ready\n\n    @contextmanager\n    def cluster_context_manager(self):\n        """Context manager allowing execution with a dataproc cluster.\n\n        Example:\n        .. code-block::\n            with context.resources.dataproc.cluster as cluster:\n                # do stuff...\n        """\n        self.create_cluster()\n        try:\n            yield self\n        finally:\n            self.delete_cluster()\n\n\n
[docs]class DataprocResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource for connecting to a Dataproc cluster.\n\n Example:\n .. code-block::\n\n @asset\n def my_asset(dataproc: DataprocResource):\n with dataproc.get_client() as client:\n # client is a dagster_gcp.DataprocClient\n ...\n """\n\n project_id: str = Field(\n description=(\n "Required. Project ID for the project which the client acts on behalf of. Will be"\n " passed when creating a dataset/job."\n )\n )\n region: str = Field(description="The GCP region.")\n cluster_name: str = Field(\n description=(\n "Required. The cluster name. Cluster names within a project must be unique. Names of"\n " deleted clusters can be reused."\n )\n )\n cluster_config_yaml_path: Optional[str] = Field(\n default=None,\n description=(\n "Full path to a YAML file containing cluster configuration. See"\n " https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig for"\n " configuration options. Only one of cluster_config_yaml_path,"\n " cluster_config_json_path, or cluster_config_dict may be provided."\n ),\n )\n cluster_config_json_path: Optional[str] = Field(\n default=None,\n description=(\n "Full path to a JSON file containing cluster configuration. See"\n " https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig for"\n " configuration options. Only one of cluster_config_yaml_path,"\n " cluster_config_json_path, or cluster_config_dict may be provided."\n ),\n )\n cluster_config_dict: Optional[Dict[str, Any]] = Field(\n default=None,\n description=(\n "Python dictionary containing cluster configuration. See"\n " https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig for"\n " configuration options. Only one of cluster_config_yaml_path,"\n " cluster_config_json_path, or cluster_config_dict may be provided."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def _read_yaml_config(self, path: str) -> Mapping[str, Any]:\n with open(path, "r", encoding="utf8") as f:\n return yaml.safe_load(f)\n\n def _read_json_config(self, path: str) -> Mapping[str, Any]:\n with open(path, "r", encoding="utf8") as f:\n return json.load(f)\n\n def _get_cluster_config(self) -> Optional[Mapping[str, Any]]:\n methods = 0\n methods += 1 if self.cluster_config_dict is not None else 0\n methods += 1 if self.cluster_config_json_path is not None else 0\n methods += 1 if self.cluster_config_yaml_path is not None else 0\n\n # ensure that at most 1 method is provided\n check.invariant(\n methods <= 1,\n "Dataproc Resource: Incorrect config: Cannot provide cluster config multiple ways."\n " Choose one of cluster_config_dict, cluster_config_json_path, or"\n " cluster_config_yaml_path",\n )\n\n cluster_config = None\n if self.cluster_config_json_path:\n cluster_config = self._read_json_config(self.cluster_config_json_path)\n elif self.cluster_config_yaml_path:\n cluster_config = self._read_yaml_config(self.cluster_config_yaml_path)\n elif self.cluster_config_dict:\n cluster_config = self.cluster_config_dict\n\n return cluster_config\n\n def get_client(self) -> DataprocClient:\n cluster_config = self._get_cluster_config()\n\n client_config_dict = {\n "projectId": self.project_id,\n "region": self.region,\n "clusterName": self.cluster_name,\n "cluster_config": cluster_config,\n }\n\n return DataprocClient(config=client_config_dict)\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=define_dataproc_create_cluster_config(),\n description="Manage a Dataproc cluster resource",\n)\ndef dataproc_resource(context):\n return DataprocClient(context.resource_config)
\n
", "current_page_name": "_modules/dagster_gcp/dataproc/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.dataproc.resources"}}, "gcs": {"compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.compute_log_manager

\nimport datetime\nimport json\nimport os\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._seven as seven\nfrom dagster import (\n    Field,\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_type import Noneable\nfrom dagster._core.storage.cloud_storage_compute_log_manager import (\n    CloudStorageComputeLogManager,\n    PollingComputeLogSubscriptionManager,\n)\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\nfrom dagster._core.storage.local_compute_log_manager import (\n    IO_TYPE_EXTENSION,\n    LocalComputeLogManager,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import ensure_dir, ensure_file\nfrom google.cloud import storage\nfrom typing_extensions import Self\n\n\n
[docs]class GCSComputeLogManager(CloudStorageComputeLogManager, ConfigurableClass):\n """Logs op compute function stdout and stderr to GCS.\n\n Users should not instantiate this class directly. Instead, use a YAML block in ``dagster.yaml``\n such as the following:\n\n .. code-block:: YAML\n\n compute_logs:\n module: dagster_gcp.gcs.compute_log_manager\n class: GCSComputeLogManager\n config:\n bucket: "mycorp-dagster-compute-logs"\n local_dir: "/tmp/cool"\n prefix: "dagster-test-"\n upload_interval: 30\n\n There are more configuration examples in the instance documentation guide: https://docs.dagster.io/deployment/dagster-instance#compute-log-storage\n\n Args:\n bucket (str): The name of the GCS bucket to which to log.\n local_dir (Optional[str]): Path to the local directory in which to stage logs. Default:\n ``dagster._seven.get_system_temp_directory()``.\n prefix (Optional[str]): Prefix for the log file keys.\n json_credentials_envvar (Optional[str]): Environment variable that contains the JSON with a private key\n and other credentials information. If this is set, ``GOOGLE_APPLICATION_CREDENTIALS`` will be ignored.\n Can be used when the private key cannot be used as a file.\n upload_interval: (Optional[int]): Interval in seconds to upload partial log files to GCS. By default, will only upload when the capture is complete.\n inst_data (Optional[ConfigurableClassData]): Serializable representation of the compute\n log manager when instantiated from config.\n """\n\n def __init__(\n self,\n bucket,\n local_dir=None,\n inst_data: Optional[ConfigurableClassData] = None,\n prefix="dagster",\n json_credentials_envvar=None,\n upload_interval=None,\n ):\n self._bucket_name = check.str_param(bucket, "bucket")\n self._prefix = self._clean_prefix(check.str_param(prefix, "prefix"))\n\n if json_credentials_envvar:\n json_info_str = os.environ.get(json_credentials_envvar)\n credentials_info = json.loads(json_info_str) # type: ignore # (possible none)\n self._bucket = (\n storage.Client()\n .from_service_account_info(credentials_info)\n .bucket(self._bucket_name)\n )\n else:\n self._bucket = storage.Client().bucket(self._bucket_name)\n\n # Check if the bucket exists\n check.invariant(self._bucket.exists())\n\n # proxy calls to local compute log manager (for subscriptions, etc)\n if not local_dir:\n local_dir = seven.get_system_temp_directory()\n\n self._upload_interval = check.opt_int_param(upload_interval, "upload_interval")\n self._local_manager = LocalComputeLogManager(local_dir)\n self._subscription_manager = PollingComputeLogSubscriptionManager(self)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "bucket": StringSource,\n "local_dir": Field(StringSource, is_required=False),\n "prefix": Field(StringSource, is_required=False, default_value="dagster"),\n "json_credentials_envvar": Field(StringSource, is_required=False),\n "upload_interval": Field(Noneable(int), is_required=False, default_value=None),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return GCSComputeLogManager(inst_data=inst_data, **config_value)\n\n @property\n def local_manager(self) -> LocalComputeLogManager:\n return self._local_manager\n\n @property\n def upload_interval(self) -> Optional[int]:\n return self._upload_interval if self._upload_interval else None\n\n def _clean_prefix(self, prefix):\n parts = prefix.split("/")\n return "/".join([part for part in parts if part])\n\n def _gcs_key(self, log_key, io_type, partial=False):\n check.inst_param(io_type, "io_type", ComputeIOType)\n extension = IO_TYPE_EXTENSION[io_type]\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n paths = [self._prefix, "storage", *namespace, filename]\n return "/".join(paths)\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n self._local_manager.delete_logs(log_key, prefix)\n if log_key:\n gcs_keys_to_remove = [\n self._gcs_key(log_key, ComputeIOType.STDOUT),\n self._gcs_key(log_key, ComputeIOType.STDERR),\n self._gcs_key(log_key, ComputeIOType.STDOUT, partial=True),\n self._gcs_key(log_key, ComputeIOType.STDERR, partial=True),\n ]\n # if the blob doesn't exist, do nothing instead of raising a not found exception\n self._bucket.delete_blobs(gcs_keys_to_remove, on_error=lambda _: None)\n elif prefix:\n # add the trailing '/' to make sure that ['a'] does not match ['apple']\n delete_prefix = "/".join([self._prefix, "storage", *prefix, ""])\n to_delete = self._bucket.list_blobs(prefix=delete_prefix)\n self._bucket.delete_blobs(list(to_delete))\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n def download_url_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n\n gcs_key = self._gcs_key(log_key, io_type)\n try:\n return self._bucket.blob(gcs_key).generate_signed_url(\n expiration=datetime.timedelta(minutes=60)\n )\n except:\n # fallback to the local download url if the current credentials are insufficient to create\n # signed urls\n return self.local_manager.get_captured_log_download_url(log_key, io_type)\n\n def display_path_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n gcs_key = self._gcs_key(log_key, io_type)\n return f"gs://{self._bucket_name}/{gcs_key}"\n\n def cloud_storage_has_logs(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial: bool = False\n ) -> bool:\n gcs_key = self._gcs_key(log_key, io_type, partial)\n return self._bucket.blob(gcs_key).exists()\n\n def upload_to_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n\n if partial and os.stat(path).st_size == 0:\n return\n\n gcs_key = self._gcs_key(log_key, io_type, partial=partial)\n with open(path, "rb") as data:\n self._bucket.blob(gcs_key).upload_from_file(data)\n\n def download_from_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[io_type], partial=partial\n )\n ensure_dir(os.path.dirname(path))\n\n gcs_key = self._gcs_key(log_key, io_type, partial=partial)\n with open(path, "wb") as fileobj:\n self._bucket.blob(gcs_key).download_to_file(fileobj)\n\n def on_subscribe(self, subscription):\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription):\n self._subscription_manager.remove_subscription(subscription)\n\n def dispose(self):\n self._subscription_manager.dispose()\n self._local_manager.dispose()
\n
", "current_page_name": "_modules/dagster_gcp/gcs/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.compute_log_manager"}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.file_manager

\nimport io\nimport uuid\nfrom contextlib import contextmanager\nfrom typing import Optional\n\nimport dagster._check as check\nfrom dagster._core.storage.file_manager import (\n    FileHandle,\n    FileManager,\n    TempfileManager,\n    check_file_like_obj,\n)\nfrom google.cloud import storage\n\n\n
[docs]class GCSFileHandle(FileHandle):\n """A reference to a file on GCS."""\n\n def __init__(self, gcs_bucket: str, gcs_key: str):\n self._gcs_bucket = check.str_param(gcs_bucket, "gcs_bucket")\n self._gcs_key = check.str_param(gcs_key, "gcs_key")\n\n @property\n def gcs_bucket(self) -> str:\n """str: The name of the GCS bucket."""\n return self._gcs_bucket\n\n @property\n def gcs_key(self) -> str:\n """str: The GCS key."""\n return self._gcs_key\n\n @property\n def path_desc(self) -> str:\n """str: The file's GCS URL."""\n return self.gcs_path\n\n @property\n def gcs_path(self) -> str:\n """str: The file's GCS URL."""\n return f"gs://{self.gcs_bucket}/{self.gcs_key}"
\n\n\nclass GCSFileManager(FileManager):\n def __init__(self, client, gcs_bucket, gcs_base_key):\n self._client = check.inst_param(client, "client", storage.client.Client)\n self._gcs_bucket = check.str_param(gcs_bucket, "gcs_bucket")\n self._gcs_base_key = check.str_param(gcs_base_key, "gcs_base_key")\n self._local_handle_cache = {}\n self._temp_file_manager = TempfileManager()\n\n def copy_handle_to_local_temp(self, file_handle):\n self._download_if_not_cached(file_handle)\n return self._get_local_path(file_handle)\n\n def _download_if_not_cached(self, file_handle):\n if not self._file_handle_cached(file_handle):\n # instigate download\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_name = temp_file_obj.name\n bucket_obj = self._client.bucket(file_handle.gcs_bucket)\n bucket_obj.blob(file_handle.gcs_key).download_to_file(temp_file_obj)\n self._local_handle_cache[file_handle.gcs_path] = temp_name\n\n return file_handle\n\n @contextmanager\n def read(self, file_handle, mode="rb"):\n check.inst_param(file_handle, "file_handle", GCSFileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n self._download_if_not_cached(file_handle)\n\n encoding = None if mode == "rb" else "utf-8"\n with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:\n yield file_obj\n\n def _file_handle_cached(self, file_handle):\n return file_handle.gcs_path in self._local_handle_cache\n\n def _get_local_path(self, file_handle):\n return self._local_handle_cache[file_handle.gcs_path]\n\n def read_data(self, file_handle):\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read()\n\n def write_data(self, data, ext=None, key: Optional[str] = None):\n key = check.opt_str_param(key, "key", default=str(uuid.uuid4()))\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", key=key, ext=ext)\n\n def write(self, file_obj, mode="wb", ext=None, key: Optional[str] = None):\n key = check.opt_str_param(key, "key", default=str(uuid.uuid4()))\n check_file_like_obj(file_obj)\n gcs_key = self.get_full_key(key + (("." + ext) if ext is not None else ""))\n bucket_obj = self._client.bucket(self._gcs_bucket)\n bucket_obj.blob(gcs_key).upload_from_file(file_obj)\n return GCSFileHandle(self._gcs_bucket, gcs_key)\n\n def get_full_key(self, file_key):\n return f"{self._gcs_base_key}/{file_key}"\n\n def delete_local_temp(self):\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster_gcp/gcs/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.file_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.io_manager

\nimport pickle\nfrom typing import Any, Optional, Union\n\nfrom dagster import (\n    ConfigurableIOManager,\n    InputContext,\n    OutputContext,\n    ResourceDependency,\n    _check as check,\n    io_manager,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL\nfrom dagster._utils.backoff import backoff\nfrom dagster._utils.cached_method import cached_method\nfrom google.api_core.exceptions import Forbidden, ServiceUnavailable, TooManyRequests\nfrom google.cloud import storage\nfrom pydantic import Field\nfrom upath import UPath\n\nfrom .resources import GCSResource\n\nDEFAULT_LEASE_DURATION = 60  # One minute\n\n\nclass PickledObjectGCSIOManager(UPathIOManager):\n    def __init__(self, bucket: str, client: Optional[Any] = None, prefix: str = "dagster"):\n        self.bucket = check.str_param(bucket, "bucket")\n        self.client = client or storage.Client()\n        self.bucket_obj = self.client.bucket(bucket)\n        check.invariant(self.bucket_obj.exists())\n        self.prefix = check.str_param(prefix, "prefix")\n        super().__init__(base_path=UPath(self.prefix))\n\n    def unlink(self, path: UPath) -> None:\n        key = str(path)\n        if self.bucket_obj.blob(key).exists():\n            self.bucket_obj.blob(key).delete()\n\n    def path_exists(self, path: UPath) -> bool:\n        key = str(path)\n        blobs = self.client.list_blobs(self.bucket, prefix=key)\n        return len(list(blobs)) > 0\n\n    def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:\n        parts = context.get_identifier()\n        run_id = parts[0]\n        output_parts = parts[1:]\n        return UPath("storage", run_id, "files", *output_parts)\n\n    def get_loading_input_log_message(self, path: UPath) -> str:\n        return f"Loading GCS object from: {self._uri_for_path(path)}"\n\n    def get_writing_output_log_message(self, path: UPath) -> str:\n        return f"Writing GCS object at: {self._uri_for_path(path)}"\n\n    def _uri_for_path(self, path: UPath) -> str:\n        return f"gs://{self.bucket}/{path}"\n\n    def make_directory(self, path: UPath) -> None:\n        # It is not necessary to create directories in GCP\n        return None\n\n    def load_from_path(self, context: InputContext, path: UPath) -> Any:\n        bytes_obj = self.bucket_obj.blob(str(path)).download_as_bytes()\n        return pickle.loads(bytes_obj)\n\n    def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:\n        if self.path_exists(path):\n            context.log.warning(f"Removing existing GCS key: {path}")\n            self.unlink(path)\n\n        pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)\n\n        backoff(\n            self.bucket_obj.blob(str(path)).upload_from_string,\n            args=[pickled_obj],\n            retry_on=(TooManyRequests, Forbidden, ServiceUnavailable),\n        )\n\n\n
[docs]class GCSPickleIOManager(ConfigurableIOManager):\n """Persistent IO manager using GCS for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for GCS and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at ``<base_dir>/<asset_key>``. If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of ``/my/base/path``, an asset with key\n ``AssetKey(["one", "two", "three"])`` would be stored in a file called ``three`` in a directory\n with path ``/my/base/path/one/two/``.\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import asset, Definitions\n from dagster_gcp.gcs import GCSPickleIOManager, GCSResource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": GCSPickleIOManager(\n gcs_bucket="my-cool-bucket",\n gcs_prefix="my-cool-prefix"\n ),\n "gcs": GCSResource(project="my-cool-project")\n }\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_gcp.gcs import GCSPickleIOManager, GCSResource\n\n @job(\n resource_defs={\n "io_manager": GCSPickleIOManager(\n gcs=GCSResource(project="my-cool-project")\n gcs_bucket="my-cool-bucket",\n gcs_prefix="my-cool-prefix"\n ),\n }\n )\n def my_job():\n ...\n """\n\n gcs: ResourceDependency[GCSResource]\n gcs_bucket: str = Field(description="GCS bucket to store files")\n gcs_prefix: str = Field(default="dagster", description="Prefix to add to all file paths")\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _internal_io_manager(self) -> PickledObjectGCSIOManager:\n return PickledObjectGCSIOManager(\n bucket=self.gcs_bucket, client=self.gcs.get_client(), prefix=self.gcs_prefix\n )\n\n def load_input(self, context: InputContext) -> Any:\n return self._internal_io_manager.load_input(context)\n\n def handle_output(self, context: OutputContext, obj: Any) -> None:\n self._internal_io_manager.handle_output(context, obj)
\n\n\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use GCSPickleIOManager instead.",\n)\nclass ConfigurablePickledObjectGCSIOManager(GCSPickleIOManager):\n """Renamed to GCSPickleIOManager. See GCSPickleIOManager for documentation."""\n\n pass
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=GCSPickleIOManager.to_config_schema(),\n required_resource_keys={"gcs"},\n)\ndef gcs_pickle_io_manager(init_context):\n """Persistent IO manager using GCS for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for GCS and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at ``<base_dir>/<asset_key>``. If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of ``/my/base/path``, an asset with key\n ``AssetKey(["one", "two", "three"])`` would be stored in a file called ``three`` in a directory\n with path ``/my/base/path/one/two/``.\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_gcp.gcs import gcs_pickle_io_manager, gcs_resource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": gcs_pickle_io_manager.configured(\n {"gcs_bucket": "my-cool-bucket", "gcs_prefix": "my-cool-prefix"}\n ),\n "gcs": gcs_resource.configured({"project": "my-cool-project"}),\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_gcp.gcs import gcs_pickle_io_manager, gcs_resource\n\n @job(\n resource_defs={\n "io_manager": gcs_pickle_io_manager.configured(\n {"gcs_bucket": "my-cool-bucket", "gcs_prefix": "my-cool-prefix"}\n ),\n "gcs": gcs_resource.configured({"project": "my-cool-project"}),\n },\n )\n def my_job():\n ...\n """\n client = init_context.resources.gcs\n pickled_io_manager = PickledObjectGCSIOManager(\n bucket=init_context.resource_config["gcs_bucket"],\n client=client,\n prefix=init_context.resource_config["gcs_prefix"],\n )\n return pickled_io_manager
\n
", "current_page_name": "_modules/dagster_gcp/gcs/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.io_manager"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.resources

\nfrom typing import Any, Optional\n\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom google.cloud import storage\nfrom pydantic import Field\n\nfrom .file_manager import GCSFileManager\n\n\n
[docs]class GCSResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource for interacting with Google Cloud Storage.\n\n Example:\n .. code-block::\n\n @asset\n def my_asset(gcs: GCSResource):\n with gcs.get_client() as client:\n # client is a google.cloud.storage.Client\n ...\n """\n\n project: Optional[str] = Field(default=None, description="Project name")\n\n def get_client(self) -> storage.Client:\n """Creates a GCS Client.\n\n Returns: google.cloud.storage.Client\n """\n return _gcs_client_from_config(project=self.project)\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=GCSResource.to_config_schema(),\n description="This resource provides a GCS client",\n)\ndef gcs_resource(init_context) -> storage.Client:\n return GCSResource.from_resource_context(init_context).get_client()
\n\n\n
[docs]class GCSFileManagerResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """FileManager that provides abstract access to GCS."""\n\n project: Optional[str] = Field(default=None, description="Project name")\n gcs_bucket: str = Field(description="GCS bucket to store files")\n gcs_prefix: str = Field(default="dagster", description="Prefix to add to all file paths")\n\n def get_client(self) -> GCSFileManager:\n """Creates a :py:class:`~dagster_gcp.GCSFileManager` object that implements the\n :py:class:`~dagster._core.storage.file_manager.FileManager` API .\n\n Returns: GCSFileManager\n """\n gcs_client = _gcs_client_from_config(project=self.project)\n return GCSFileManager(\n client=gcs_client,\n gcs_bucket=self.gcs_bucket,\n gcs_base_key=self.gcs_prefix,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=GCSFileManagerResource.to_config_schema())\ndef gcs_file_manager(context):\n """FileManager that provides abstract access to GCS.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n """\n return GCSFileManagerResource.from_resource_context(context).get_client()
\n\n\ndef _gcs_client_from_config(project: Optional[str]) -> storage.Client:\n """Creates a GCS Client.\n\n Args:\n project: The GCP project\n\n Returns: A GCS client.\n """\n return storage.client.Client(project=project)\n
", "current_page_name": "_modules/dagster_gcp/gcs/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.resources"}}}, "dagster_gcp_pandas": {"bigquery": {"bigquery_pandas_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp_pandas.bigquery.bigquery_pandas_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport pandas as pd\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_gcp.bigquery.io_manager import (\n    BigQueryClient,\n    BigQueryIOManager,\n    build_bigquery_io_manager,\n)\n\n\n
[docs]class BigQueryPandasTypeHandler(DbTypeHandler[pd.DataFrame]):\n """Plugin for the BigQuery I/O Manager that can store and load Pandas DataFrames as BigQuery tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import BigQueryIOManager\n from dagster_bigquery_pandas import BigQueryPandasTypeHandler\n from dagster import Definitions, EnvVar\n\n class MyBigQueryIOManager(BigQueryIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection\n ):\n """Stores the pandas DataFrame in BigQuery."""\n with_uppercase_cols = obj.rename(str.upper, copy=False, axis="columns")\n\n job = connection.load_table_from_dataframe(\n dataframe=with_uppercase_cols,\n destination=f"{table_slice.schema}.{table_slice.table}",\n project=table_slice.database,\n location=context.resource_config.get("location") if context.resource_config else None,\n timeout=context.resource_config.get("timeout") if context.resource_config else None,\n )\n job.result()\n\n context.add_output_metadata(\n {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype)) # type: ignore # (bad stubs)\n for name, dtype in obj.dtypes.items()\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pd.DataFrame:\n """Loads the input as a Pandas DataFrame."""\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pd.DataFrame()\n result = connection.query(\n query=BigQueryClient.get_select_statement(table_slice),\n project=table_slice.database,\n location=context.resource_config.get("location") if context.resource_config else None,\n timeout=context.resource_config.get("timeout") if context.resource_config else None,\n ).to_dataframe()\n\n result.columns = map(str.lower, result.columns)\n return result\n\n @property\n def supported_types(self):\n return [pd.DataFrame]
\n\n\nbigquery_pandas_io_manager = build_bigquery_io_manager(\n [BigQueryPandasTypeHandler()], default_load_type=pd.DataFrame\n)\nbigquery_pandas_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes pandas DataFrames to BigQuery.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_gcp_pandas import bigquery_pandas_io_manager\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": bigquery_pandas_io_manager.configured({\n "project" : {"env": "GCP_PROJECT"}\n })\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n"""\n\n\n
[docs]class BigQueryPandasIOManager(BigQueryIOManager):\n """An I/O manager definition that reads inputs from and writes pandas DataFrames to BigQuery.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp_pandas import BigQueryPandasIOManager\n from dagster import Definitions, EnvVar\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": BigQueryPandasIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPandasTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame
\n
", "current_page_name": "_modules/dagster_gcp_pandas/bigquery/bigquery_pandas_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp_pandas.bigquery.bigquery_pandas_type_handler"}}}, "dagster_gcp_pyspark": {"bigquery": {"bigquery_pyspark_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp_pyspark.bigquery.bigquery_pyspark_type_handler

\nfrom typing import Any, Mapping, Optional, Sequence, Type\n\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.definitions.metadata import RawMetadataValue\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_gcp import BigQueryIOManager, build_bigquery_io_manager\nfrom dagster_gcp.bigquery.io_manager import BigQueryClient\nfrom pyspark.sql import DataFrame, SparkSession\nfrom pyspark.sql.types import StructType\n\n\ndef _get_bigquery_write_options(\n    config: Optional[Mapping[str, Any]], table_slice: TableSlice\n) -> Mapping[str, str]:\n    conf = {\n        "table": f"{table_slice.database}.{table_slice.schema}.{table_slice.table}",\n    }\n    if config and config.get("temporary_gcs_bucket") is not None:\n        conf["temporaryGcsBucket"] = config["temporary_gcs_bucket"]\n    else:\n        conf["writeMethod"] = "direct"\n    return conf\n\n\ndef _get_bigquery_read_options(table_slice: TableSlice) -> Mapping[str, str]:\n    conf = {"viewsEnabled": "true", "materializationDataset": table_slice.schema}\n    return conf\n\n\n
[docs]class BigQueryPySparkTypeHandler(DbTypeHandler[DataFrame]):\n """Plugin for the BigQuery I/O Manager that can store and load PySpark DataFrames as BigQuery tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import BigQueryIOManager\n from dagster_bigquery_pandas import BigQueryPySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MyBigQueryIOManager(BigQueryIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: DataFrame, _\n ) -> Mapping[str, RawMetadataValue]:\n options = _get_bigquery_write_options(context.resource_config, table_slice)\n\n with_uppercase_cols = obj.toDF(*[c.upper() for c in obj.columns])\n\n with_uppercase_cols.write.format("bigquery").options(**options).mode("append").save()\n\n return {\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=field.name, type=field.dataType.typeName())\n for field in obj.schema.fields\n ]\n )\n ),\n }\n\n def load_input(self, context: InputContext, table_slice: TableSlice, _) -> DataFrame:\n options = _get_bigquery_read_options(table_slice)\n spark = SparkSession.builder.getOrCreate() # type: ignore\n\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return spark.createDataFrame([], StructType([]))\n\n df = (\n spark.read.format("bigquery")\n .options(**options)\n .load(BigQueryClient.get_select_statement(table_slice))\n )\n\n return df.toDF(*[c.lower() for c in df.columns])\n\n @property\n def supported_types(self):\n return [DataFrame]
\n\n\nbigquery_pyspark_io_manager = build_bigquery_io_manager(\n [BigQueryPySparkTypeHandler()], default_load_type=DataFrame\n)\nbigquery_pyspark_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes PySpark DataFrames to BigQuery.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_gcp_pyspark import bigquery_pyspark_io_manager\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": bigquery_pyspark_io_manager.configured({\n "project" : {"env": "GCP_PROJECT"}\n })\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n"""\n\n\n
[docs]class BigQueryPySparkIOManager(BigQueryIOManager):\n """An I/O manager definition that reads inputs from and writes PySpark DataFrames to BigQuery.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp_pyspark import BigQueryPySparkIOManager\n from dagster import Definitions, EnvVar\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": BigQueryPySparkIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return DataFrame
\n
", "current_page_name": "_modules/dagster_gcp_pyspark/bigquery/bigquery_pyspark_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp_pyspark.bigquery.bigquery_pyspark_type_handler"}}}, "dagster_ge": {"factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_ge.factory

\nimport datetime\nfrom typing import Any, Dict\n\nimport great_expectations as ge\nfrom dagster import (\n    ConfigurableResource,\n    ExpectationResult,\n    IAttachDifferentObjectToOpContext,\n    In,\n    MetadataValue,\n    OpExecutionContext,\n    Out,\n    Output,\n    _check as check,\n    op,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster_pandas import DataFrame\nfrom great_expectations.render.renderer import ValidationResultsPageRenderer\nfrom great_expectations.render.view import DefaultMarkdownPageView\nfrom pydantic import Field\n\ntry:\n    # ge < v0.13.0\n    from great_expectations.core import convert_to_json_serializable\nexcept ImportError:\n    # ge >= v0.13.0\n    from great_expectations.core.util import convert_to_json_serializable\n\n\nclass GEContextResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n    ge_root_dir: str = Field(\n        default=None,\n        description="The root directory for your Great Expectations project.",\n    )\n\n    def get_data_context(self):\n        if self.ge_root_dir is None:\n            return ge.data_context.DataContext()\n        return ge.data_context.DataContext(context_root_dir=self.ge_root_dir)\n\n    def get_object_to_set_on_execution_context(self):\n        return self.get_data_context()\n\n\n@dagster_maintained_resource\n@resource(config_schema=GEContextResource.to_config_schema())\ndef ge_data_context(context):\n    return GEContextResource.from_resource_context(context).get_data_context()\n\n\n
[docs]def ge_validation_op_factory(\n name,\n datasource_name,\n suite_name,\n validation_operator_name=None,\n input_dagster_type=DataFrame,\n batch_kwargs=None,\n):\n """Generates ops for interacting with GE.\n\n Args:\n name (str): the name of the op\n datasource_name (str): the name of your DataSource, see your great_expectations.yml\n suite_name (str): the name of your expectation suite, see your great_expectations.yml\n validation_operator_name (Optional[str]): what validation operator to run -- defaults to\n None, which generates an ephemeral validator. If you want to save data docs, use\n 'action_list_operator'.\n See https://legacy.docs.greatexpectations.io/en/0.12.1/reference/core_concepts/validation_operators_and_actions.html#\n input_dagster_type (DagsterType): the Dagster type used to type check the input to the op.\n Defaults to `dagster_pandas.DataFrame`.\n batch_kwargs (Optional[dict]): overrides the `batch_kwargs` parameter when calling the\n `ge_data_context`'s `get_batch` method. Defaults to `{"dataset": dataset}`, where\n `dataset` is the input to the generated op.\n\n Returns:\n An op that takes in a set of data and yields both an expectation with relevant metadata\n and an output with all the metadata (for user processing)\n """\n check.str_param(datasource_name, "datasource_name")\n check.str_param(suite_name, "suite_name")\n check.opt_str_param(validation_operator_name, "validation_operator_name")\n batch_kwargs = check.opt_dict_param(batch_kwargs, "batch_kwargs")\n\n @op(\n name=name,\n ins={"dataset": In(input_dagster_type)},\n out=Out(\n dict,\n description="""\n This op yields an expectationResult with a structured dict of metadata from\n the GE suite, as well as the full result in case a user wants to process it differently.\n The structured dict contains both summary stats from the suite as well as expectation by\n expectation results/details.\n """,\n ),\n required_resource_keys={"ge_data_context"},\n tags={"kind": "ge"},\n )\n def _ge_validation_fn(context: OpExecutionContext, dataset):\n data_context = context.resources.ge_data_context\n\n if validation_operator_name is not None:\n validation_operator = validation_operator_name\n else:\n data_context.add_validation_operator(\n "ephemeral_validation",\n {"class_name": "ActionListValidationOperator", "action_list": []},\n )\n validation_operator = "ephemeral_validation"\n suite = data_context.get_expectation_suite(suite_name)\n final_batch_kwargs = batch_kwargs or {"dataset": dataset}\n if "datasource" in final_batch_kwargs:\n context.log.warning(\n "`datasource` field of `batch_kwargs` will be ignored; use the `datasource_name` "\n "parameter of the op factory instead."\n )\n final_batch_kwargs["datasource"] = datasource_name\n batch = data_context.get_batch(final_batch_kwargs, suite)\n run_id = {\n "run_name": datasource_name + " run",\n "run_time": datetime.datetime.utcnow(),\n }\n results = data_context.run_validation_operator(\n validation_operator, assets_to_validate=[batch], run_id=run_id\n )\n res = convert_to_json_serializable(results.list_validation_results())[0]\n validation_results_page_renderer = ValidationResultsPageRenderer(run_info_at_end=True)\n rendered_document_content_list = (\n validation_results_page_renderer.render_validation_operator_result(results)\n )\n md_str = " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))\n\n yield ExpectationResult(\n success=res["success"],\n metadata={"Expectation Results": MetadataValue.md(md_str)},\n )\n yield Output(res)\n\n return _ge_validation_fn
\n\n\ndef ge_validation_op_factory_v3(\n name,\n datasource_name,\n data_connector_name,\n data_asset_name,\n suite_name,\n batch_identifiers: dict,\n input_dagster_type=DataFrame,\n runtime_method_type="batch_data",\n extra_kwargs=None,\n):\n """Generates ops for interacting with GE (v3 API).\n\n Args:\n name (str): the name of the op\n datasource_name (str): the name of your DataSource, see your great_expectations.yml\n data_connector_name (str): the name of the data connector for this datasource. This should\n point to a RuntimeDataConnector. For information on how to set this up, see:\n https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_create_a_batch_of_data_from_an_in_memory_spark_or_pandas_dataframe\n data_asset_name (str): the name of the data asset that this op will be validating.\n suite_name (str): the name of your expectation suite, see your great_expectations.yml\n batch_identifier_fn (dict): A dicitonary of batch identifiers to uniquely identify this\n batch of data. To learn more about batch identifiers, see:\n https://docs.greatexpectations.io/docs/reference/datasources#batches.\n input_dagster_type (DagsterType): the Dagster type used to type check the input to the op.\n Defaults to `dagster_pandas.DataFrame`.\n runtime_method_type (str): how GE should interperet the op input. One of ("batch_data",\n "path", "query"). Defaults to "batch_data", which will interperet the input as an\n in-memory object.\n extra_kwargs (Optional[dict]): adds extra kwargs to the invocation of `ge_data_context`'s\n `get_validator` method. If not set, input will be:\n {\n "datasource_name": datasource_name,\n "data_connector_name": data_connector_name,\n "data_asset_name": data_asset_name,\n "runtime_parameters": {\n "<runtime_method_type>": <op input>\n },\n "batch_identifiers": batch_identifiers,\n "expectation_suite_name": suite_name,\n }\n\n Returns:\n An op that takes in a set of data and yields both an expectation with relevant metadata and\n an output with all the metadata (for user processing)\n\n """\n check.str_param(datasource_name, "datasource_name")\n check.str_param(data_connector_name, "data_connector_name")\n check.str_param(suite_name, "suite_name")\n\n _extra_kwargs: Dict[Any, Any] = check.opt_dict_param(extra_kwargs, "extra_kwargs")\n\n @op(\n name=name,\n ins={"dataset": In(input_dagster_type)},\n out=Out(\n dict,\n description="""\n This op yields an ExpectationResult with a structured dict of metadata from\n the GE suite, as well as the full result in case a user wants to process it differently.\n The structured dict contains both summary stats from the suite as well as expectation by\n expectation results/details.\n """,\n ),\n required_resource_keys={"ge_data_context"},\n tags={"kind": "ge"},\n )\n def _ge_validation_fn(context: OpExecutionContext, dataset):\n data_context = context.resources.ge_data_context\n\n validator_kwargs = {\n "datasource_name": datasource_name,\n "data_connector_name": data_connector_name,\n "data_asset_name": datasource_name or data_asset_name,\n "runtime_parameters": {runtime_method_type: dataset},\n "batch_identifiers": batch_identifiers,\n "expectation_suite_name": suite_name,\n **_extra_kwargs,\n }\n validator = data_context.get_validator(**validator_kwargs)\n\n run_id = {\n "run_name": datasource_name + " run",\n "run_time": datetime.datetime.utcnow(),\n }\n results = validator.validate(run_id=run_id)\n\n validation_results_page_renderer = ValidationResultsPageRenderer(run_info_at_end=True)\n rendered_document_content_list = validation_results_page_renderer.render(\n validation_results=results\n )\n md_str = "".join(DefaultMarkdownPageView().render(rendered_document_content_list))\n\n yield ExpectationResult(\n success=bool(results["success"]),\n metadata={"Expectation Results": MetadataValue.md(md_str)},\n )\n yield Output(results.to_json_dict())\n\n return _ge_validation_fn\n
", "current_page_name": "_modules/dagster_ge/factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_ge.factory"}}, "dagster_github": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_github.resources

\nimport time\nfrom datetime import datetime\nfrom typing import Optional\n\nimport jwt\nimport requests\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\n\ndef to_seconds(dt):\n    return (dt - datetime(1970, 1, 1)).total_seconds()\n\n\nclass GithubClient:\n    def __init__(\n        self, client, app_id, app_private_rsa_key, default_installation_id, hostname=None\n    ) -> None:\n        self.client = client\n        self.app_private_rsa_key = app_private_rsa_key\n        self.app_id = app_id\n        self.default_installation_id = default_installation_id\n        self.installation_tokens = {}\n        self.app_token = {}\n        self.hostname = hostname\n\n    def __set_app_token(self):\n        # from https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/\n        # needing to self-sign a JWT\n        now = int(time.time())\n        # JWT expiration time (10 minute maximum)\n        expires = now + (10 * 60)\n        encoded_token = jwt.encode(\n            {\n                # issued at time\n                "iat": now,\n                # JWT expiration time\n                "exp": expires,\n                # GitHub App's identifier\n                "iss": self.app_id,\n            },\n            self.app_private_rsa_key,\n            algorithm="RS256",\n        )\n        self.app_token = {\n            "value": encoded_token,\n            "expires": expires,\n        }\n\n    def __check_app_token(self):\n        if ("expires" not in self.app_token) or (\n            self.app_token["expires"] < (int(time.time()) + 60)\n        ):\n            self.__set_app_token()\n\n    def get_installations(self, headers=None):\n        if headers is None:\n            headers = {}\n        self.__check_app_token()\n        headers["Authorization"] = "Bearer {}".format(self.app_token["value"])\n        headers["Accept"] = "application/vnd.github.machine-man-preview+json"\n        request = self.client.get(\n            (\n                "https://api.github.com/app/installations"\n                if self.hostname is None\n                else f"https://{self.hostname}/api/v3/app/installations"\n            ),\n            headers=headers,\n        )\n        request.raise_for_status()\n        return request.json()\n\n    def __set_installation_token(self, installation_id, headers=None):\n        if headers is None:\n            headers = {}\n        self.__check_app_token()\n        headers["Authorization"] = "Bearer {}".format(self.app_token["value"])\n        headers["Accept"] = "application/vnd.github.machine-man-preview+json"\n        request = requests.post(\n            (\n                f"https://api.github.com/app/installations/{installation_id}/access_tokens"\n                if self.hostname is None\n                else "https://{}/api/v3/app/installations/{}/access_tokens".format(\n                    self.hostname, installation_id\n                )\n            ),\n            headers=headers,\n        )\n        request.raise_for_status()\n        auth = request.json()\n        self.installation_tokens[installation_id] = {\n            "value": auth["token"],\n            "expires": to_seconds(datetime.strptime(auth["expires_at"], "%Y-%m-%dT%H:%M:%SZ")),\n        }\n\n    def __check_installation_tokens(self, installation_id):\n        if (installation_id not in self.installation_tokens) or (\n            self.installation_tokens[installation_id]["expires"] < (int(time.time()) + 60)\n        ):\n            self.__set_installation_token(installation_id)\n\n    def execute(self, query, variables, headers=None, installation_id=None):\n        if headers is None:\n            headers = {}\n        if installation_id is None:\n            installation_id = self.default_installation_id\n        self.__check_installation_tokens(installation_id)\n        headers["Authorization"] = "token {}".format(\n            self.installation_tokens[installation_id]["value"]\n        )\n        request = requests.post(\n            (\n                "https://api.github.com/graphql"\n                if self.hostname is None\n                else f"https://{self.hostname}/api/graphql"\n            ),\n            json={"query": query, "variables": variables},\n            headers=headers,\n        )\n        request.raise_for_status()\n        return request.json()\n\n    def create_issue(self, repo_name, repo_owner, title, body, installation_id=None):\n        if installation_id is None:\n            installation_id = self.default_installation_id\n        res = self.execute(\n            query="""\n            query get_repo_id($repo_name: String!, $repo_owner: String!) {\n                repository(name: $repo_name, owner: $repo_owner) {\n                    id\n                }\n            }\n            """,\n            variables={"repo_name": repo_name, "repo_owner": repo_owner},\n            installation_id=installation_id,\n        )\n\n        return self.execute(\n            query="""\n                mutation CreateIssue($id: ID!, $title: String!, $body: String!) {\n                createIssue(input: {\n                    repositoryId: $id,\n                    title: $title,\n                    body: $body\n                }) {\n                    clientMutationId,\n                    issue {\n                        body\n                        title\n                        url\n                    }\n                }\n                }\n            """,\n            variables={\n                "id": res["data"]["repository"]["id"],\n                "title": title,\n                "body": body,\n            },\n            installation_id=installation_id,\n        )\n\n\n
[docs]class GithubResource(ConfigurableResource):\n github_app_id: int = Field(\n description="Github Application ID, for more info see https://developer.github.com/apps/",\n )\n github_app_private_rsa_key: str = Field(\n description=(\n "Github Application Private RSA key text, for more info see"\n " https://developer.github.com/apps/"\n ),\n )\n github_installation_id: Optional[int] = Field(\n default=None,\n description=(\n "Github Application Installation ID, for more info see"\n " https://developer.github.com/apps/"\n ),\n )\n github_hostname: Optional[str] = Field(\n default=None,\n description=(\n "Github hostname. Defaults to `api.github.com`, for more info see"\n " https://developer.github.com/apps/"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> GithubClient:\n return GithubClient(\n client=requests.Session(),\n app_id=self.github_app_id,\n app_private_rsa_key=self.github_app_private_rsa_key,\n default_installation_id=self.github_installation_id,\n hostname=self.github_hostname,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=GithubResource.to_config_schema(),\n description="This resource is for connecting to Github",\n)\ndef github_resource(context) -> GithubClient:\n return GithubResource(**context.resource_config).get_client()
\n
", "current_page_name": "_modules/dagster_github/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_github.resources"}}, "dagster_graphql": {"client": {"client": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_graphql.client.client

\nfrom itertools import chain\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence, Union\n\nimport dagster._check as check\nimport requests.exceptions\nfrom dagster import DagsterRunStatus\nfrom dagster._annotations import deprecated, public\nfrom dagster._core.definitions.run_config import RunConfig, convert_config_input\nfrom dagster._core.definitions.utils import validate_tags\nfrom gql import Client, gql\nfrom gql.transport import Transport\nfrom gql.transport.requests import RequestsHTTPTransport\n\nfrom .client_queries import (\n    CLIENT_GET_REPO_LOCATIONS_NAMES_AND_PIPELINES_QUERY,\n    CLIENT_SUBMIT_PIPELINE_RUN_MUTATION,\n    GET_PIPELINE_RUN_STATUS_QUERY,\n    RELOAD_REPOSITORY_LOCATION_MUTATION,\n    SHUTDOWN_REPOSITORY_LOCATION_MUTATION,\n    TERMINATE_RUN_JOB_MUTATION,\n)\nfrom .utils import (\n    DagsterGraphQLClientError,\n    InvalidOutputErrorInfo,\n    JobInfo,\n    ReloadRepositoryLocationInfo,\n    ReloadRepositoryLocationStatus,\n    ShutdownRepositoryLocationInfo,\n    ShutdownRepositoryLocationStatus,\n)\n\n\n
[docs]class DagsterGraphQLClient:\n """Official Dagster Python Client for GraphQL.\n\n Utilizes the gql library to dispatch queries over HTTP to a remote Dagster GraphQL Server\n\n As of now, all operations on this client are synchronous.\n\n Intended usage:\n\n .. code-block:: python\n\n client = DagsterGraphQLClient("localhost", port_number=3000)\n status = client.get_run_status(**SOME_RUN_ID**)\n\n Args:\n hostname (str): Hostname for the Dagster GraphQL API, like `localhost` or\n `dagster.YOUR_ORG_HERE`.\n port_number (Optional[int]): Port number to connect to on the host.\n Defaults to None.\n transport (Optional[Transport], optional): A custom transport to use to connect to the\n GraphQL API with (e.g. for custom auth). Defaults to None.\n use_https (bool, optional): Whether to use https in the URL connection string for the\n GraphQL API. Defaults to False.\n timeout (int): Number of seconds before requests should time out. Defaults to 60.\n headers (Optional[Dict[str, str]]): Additional headers to include in the request. To use\n this client in Dagster Cloud, set the "Dagster-Cloud-Api-Token" header to a user token\n generated in the Dagster Cloud UI.\n\n Raises:\n :py:class:`~requests.exceptions.ConnectionError`: if the client cannot connect to the host.\n """\n\n def __init__(\n self,\n hostname: str,\n port_number: Optional[int] = None,\n transport: Optional[Transport] = None,\n use_https: bool = False,\n timeout: int = 300,\n headers: Optional[Dict[str, str]] = None,\n ):\n self._hostname = check.str_param(hostname, "hostname")\n self._port_number = check.opt_int_param(port_number, "port_number")\n self._use_https = check.bool_param(use_https, "use_https")\n\n self._url = (\n ("https://" if self._use_https else "http://")\n + (f"{self._hostname}:{self._port_number}" if self._port_number else self._hostname)\n + "/graphql"\n )\n\n self._transport = check.opt_inst_param(\n transport,\n "transport",\n Transport,\n default=RequestsHTTPTransport(\n url=self._url, use_json=True, timeout=timeout, headers=headers\n ),\n )\n try:\n self._client = Client(transport=self._transport, fetch_schema_from_transport=True)\n except requests.exceptions.ConnectionError as exc:\n raise DagsterGraphQLClientError(\n f"Error when connecting to url {self._url}. "\n + f"Did you specify hostname: {self._hostname} "\n + (f"and port_number: {self._port_number} " if self._port_number else "")\n + "correctly?"\n ) from exc\n\n def _execute(self, query: str, variables: Optional[Dict[str, Any]] = None):\n try:\n return self._client.execute(gql(query), variable_values=variables)\n except Exception as exc: # catch generic Exception from the gql client\n raise DagsterGraphQLClientError(\n f"Exception occured during execution of query \\n{query}\\n with variables"\n f" \\n{variables}\\n"\n ) from exc\n\n def _get_repo_locations_and_names_with_pipeline(self, job_name: str) -> List[JobInfo]:\n res_data = self._execute(CLIENT_GET_REPO_LOCATIONS_NAMES_AND_PIPELINES_QUERY)\n query_res = res_data["repositoriesOrError"]\n repo_connection_status = query_res["__typename"]\n if repo_connection_status == "RepositoryConnection":\n valid_nodes: Iterable[JobInfo] = chain(*map(JobInfo.from_node, query_res["nodes"]))\n return [info for info in valid_nodes if info.job_name == job_name]\n else:\n raise DagsterGraphQLClientError(repo_connection_status, query_res["message"])\n\n def _core_submit_execution(\n self,\n pipeline_name: str,\n repository_location_name: Optional[str] = None,\n repository_name: Optional[str] = None,\n run_config: Optional[Union[RunConfig, Mapping[str, Any]]] = None,\n mode: str = "default",\n preset: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n op_selection: Optional[Sequence[str]] = None,\n is_using_job_op_graph_apis: Optional[bool] = False,\n ):\n check.opt_str_param(repository_location_name, "repository_location_name")\n check.opt_str_param(repository_name, "repository_name")\n check.str_param(pipeline_name, "pipeline_name")\n check.opt_str_param(mode, "mode")\n check.opt_str_param(preset, "preset")\n run_config = check.opt_mapping_param(convert_config_input(run_config), "run_config")\n\n # The following invariant will never fail when a job is executed\n check.invariant(\n (mode is not None and run_config is not None) or preset is not None,\n "Either a mode and run_config or a preset must be specified in order to "\n f"submit the pipeline {pipeline_name} for execution",\n )\n tags = validate_tags(tags)\n\n pipeline_or_job = "Job" if is_using_job_op_graph_apis else "Pipeline"\n\n if not repository_location_name or not repository_name:\n job_info_lst = self._get_repo_locations_and_names_with_pipeline(pipeline_name)\n if len(job_info_lst) == 0:\n raise DagsterGraphQLClientError(\n f"{pipeline_or_job}NotFoundError",\n f"No {'jobs' if is_using_job_op_graph_apis else 'pipelines'} with the name"\n f" `{pipeline_name}` exist",\n )\n elif len(job_info_lst) == 1:\n job_info = job_info_lst[0]\n repository_location_name = job_info.repository_location_name\n repository_name = job_info.repository_name\n else:\n raise DagsterGraphQLClientError(\n "Must specify repository_location_name and repository_name since there are"\n f" multiple {'jobs' if is_using_job_op_graph_apis else 'pipelines'} with the"\n f" name {pipeline_name}.\\n\\tchoose one of: {job_info_lst}"\n )\n\n variables: Dict[str, Any] = {\n "executionParams": {\n "selector": {\n "repositoryLocationName": repository_location_name,\n "repositoryName": repository_name,\n "pipelineName": pipeline_name,\n "solidSelection": op_selection,\n }\n }\n }\n if preset is not None:\n variables["executionParams"]["preset"] = preset\n if mode is not None and run_config is not None:\n variables["executionParams"] = {\n **variables["executionParams"],\n "runConfigData": run_config,\n "mode": mode,\n "executionMetadata": (\n {"tags": [{"key": k, "value": v} for k, v in tags.items()]} if tags else {}\n ),\n }\n\n res_data: Dict[str, Any] = self._execute(CLIENT_SUBMIT_PIPELINE_RUN_MUTATION, variables)\n query_result = res_data["launchPipelineExecution"]\n query_result_type = query_result["__typename"]\n if (\n query_result_type == "LaunchRunSuccess"\n or query_result_type == "LaunchPipelineRunSuccess"\n ):\n return query_result["run"]["runId"]\n elif query_result_type == "InvalidStepError":\n raise DagsterGraphQLClientError(query_result_type, query_result["invalidStepKey"])\n elif query_result_type == "InvalidOutputError":\n error_info = InvalidOutputErrorInfo(\n step_key=query_result["stepKey"],\n invalid_output_name=query_result["invalidOutputName"],\n )\n raise DagsterGraphQLClientError(query_result_type, body=error_info)\n elif (\n query_result_type == "RunConfigValidationInvalid"\n or query_result_type == "PipelineConfigValidationInvalid"\n ):\n raise DagsterGraphQLClientError(query_result_type, query_result["errors"])\n else:\n # query_result_type is a ConflictingExecutionParamsError, a PresetNotFoundError\n # a PipelineNotFoundError, a RunConflict, or a PythonError\n raise DagsterGraphQLClientError(query_result_type, query_result["message"])\n\n
[docs] @public\n def submit_job_execution(\n self,\n job_name: str,\n repository_location_name: Optional[str] = None,\n repository_name: Optional[str] = None,\n run_config: Optional[Dict[str, Any]] = None,\n tags: Optional[Dict[str, Any]] = None,\n op_selection: Optional[Sequence[str]] = None,\n ) -> str:\n """Submits a job with attached configuration for execution.\n\n Args:\n job_name (str): The job's name\n repository_location_name (Optional[str]): The name of the repository location where\n the job is located. If omitted, the client will try to infer the repository location\n from the available options on the Dagster deployment. Defaults to None.\n repository_name (Optional[str]): The name of the repository where the job is located.\n If omitted, the client will try to infer the repository from the available options\n on the Dagster deployment. Defaults to None.\n run_config (Optional[Dict[str, Any]]): This is the run config to execute the job with.\n Note that runConfigData is any-typed in the GraphQL type system. This type is used when passing in\n an arbitrary object for run config. However, it must conform to the constraints of the config\n schema for this job. If it does not, the client will throw a DagsterGraphQLClientError with a message of\n JobConfigValidationInvalid. Defaults to None.\n tags (Optional[Dict[str, Any]]): A set of tags to add to the job execution.\n\n Raises:\n DagsterGraphQLClientError("InvalidStepError", invalid_step_key): the job has an invalid step\n DagsterGraphQLClientError("InvalidOutputError", body=error_object): some solid has an invalid output within the job.\n The error_object is of type dagster_graphql.InvalidOutputErrorInfo.\n DagsterGraphQLClientError("RunConflict", message): a `DagsterRunConflict` occured during execution.\n This indicates that a conflicting job run already exists in run storage.\n DagsterGraphQLClientError("PipelineConfigurationInvalid", invalid_step_key): the run_config is not in the expected format\n for the job\n DagsterGraphQLClientError("JobNotFoundError", message): the requested job does not exist\n DagsterGraphQLClientError("PythonError", message): an internal framework error occurred\n\n Returns:\n str: run id of the submitted pipeline run\n """\n return self._core_submit_execution(\n pipeline_name=job_name,\n repository_location_name=repository_location_name,\n repository_name=repository_name,\n run_config=run_config,\n mode="default",\n preset=None,\n tags=tags,\n op_selection=op_selection,\n is_using_job_op_graph_apis=True,\n )
\n\n
[docs] @public\n def get_run_status(self, run_id: str) -> DagsterRunStatus:\n """Get the status of a given Pipeline Run.\n\n Args:\n run_id (str): run id of the requested pipeline run.\n\n Raises:\n DagsterGraphQLClientError("PipelineNotFoundError", message): if the requested run id is not found\n DagsterGraphQLClientError("PythonError", message): on internal framework errors\n\n Returns:\n DagsterRunStatus: returns a status Enum describing the state of the requested pipeline run\n """\n check.str_param(run_id, "run_id")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n GET_PIPELINE_RUN_STATUS_QUERY, {"runId": run_id}\n )\n query_result: Dict[str, Any] = res_data["pipelineRunOrError"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "PipelineRun" or query_result_type == "Run":\n return DagsterRunStatus(query_result["status"])\n else:\n raise DagsterGraphQLClientError(query_result_type, query_result["message"])
\n\n
[docs] @public\n def reload_repository_location(\n self, repository_location_name: str\n ) -> ReloadRepositoryLocationInfo:\n """Reloads a Dagster Repository Location, which reloads all repositories in that repository location.\n\n This is useful in a variety of contexts, including refreshing the Dagster UI without restarting\n the server.\n\n Args:\n repository_location_name (str): The name of the repository location\n\n Returns:\n ReloadRepositoryLocationInfo: Object with information about the result of the reload request\n """\n check.str_param(repository_location_name, "repository_location_name")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n RELOAD_REPOSITORY_LOCATION_MUTATION,\n {"repositoryLocationName": repository_location_name},\n )\n\n query_result: Dict[str, Any] = res_data["reloadRepositoryLocation"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "WorkspaceLocationEntry":\n location_or_error_type = query_result["locationOrLoadError"]["__typename"]\n if location_or_error_type == "RepositoryLocation":\n return ReloadRepositoryLocationInfo(status=ReloadRepositoryLocationStatus.SUCCESS)\n else:\n return ReloadRepositoryLocationInfo(\n status=ReloadRepositoryLocationStatus.FAILURE,\n failure_type="PythonError",\n message=query_result["locationOrLoadError"]["message"],\n )\n else:\n # query_result_type is either ReloadNotSupported or RepositoryLocationNotFound\n return ReloadRepositoryLocationInfo(\n status=ReloadRepositoryLocationStatus.FAILURE,\n failure_type=query_result_type,\n message=query_result["message"],\n )
\n\n
[docs] @deprecated(breaking_version="2.0")\n @public\n def shutdown_repository_location(\n self, repository_location_name: str\n ) -> ShutdownRepositoryLocationInfo:\n """Shuts down the server that is serving metadata for the provided repository location.\n\n This is primarily useful when you want the server to be restarted by the compute environment\n in which it is running (for example, in Kubernetes, the pod in which the server is running\n will automatically restart when the server is shut down, and the repository metadata will\n be reloaded)\n\n Args:\n repository_location_name (str): The name of the repository location\n\n Returns:\n ShutdownRepositoryLocationInfo: Object with information about the result of the reload request\n """\n check.str_param(repository_location_name, "repository_location_name")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n SHUTDOWN_REPOSITORY_LOCATION_MUTATION,\n {"repositoryLocationName": repository_location_name},\n )\n\n query_result: Dict[str, Any] = res_data["shutdownRepositoryLocation"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "ShutdownRepositoryLocationSuccess":\n return ShutdownRepositoryLocationInfo(status=ShutdownRepositoryLocationStatus.SUCCESS)\n elif (\n query_result_type == "RepositoryLocationNotFound" or query_result_type == "PythonError"\n ):\n return ShutdownRepositoryLocationInfo(\n status=ShutdownRepositoryLocationStatus.FAILURE,\n message=query_result["message"],\n )\n else:\n raise Exception(f"Unexpected query result type {query_result_type}")
\n\n def terminate_run(self, run_id: str):\n """Terminates a pipeline run. This method it is useful when you would like to stop a pipeline run\n based on a external event.\n\n Args:\n run_id (str): The run id of the pipeline run to terminate\n """\n check.str_param(run_id, "run_id")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n TERMINATE_RUN_JOB_MUTATION, {"runId": run_id}\n )\n\n query_result: Dict[str, Any] = res_data["terminateRun"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "TerminateRunSuccess":\n return\n\n elif query_result_type == "RunNotFoundError":\n raise DagsterGraphQLClientError("RunNotFoundError", f"Run Id {run_id} not found")\n else:\n raise DagsterGraphQLClientError(query_result_type, query_result["message"])
\n
", "current_page_name": "_modules/dagster_graphql/client/client", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_graphql.client.client"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_graphql.client.utils

\nfrom enum import Enum\nfrom typing import Any, Dict, List, NamedTuple, Optional\n\n\n
[docs]class DagsterGraphQLClientError(Exception):\n def __init__(self, *args, body=None):\n super().__init__(*args)\n self.body = body
\n\n\n
[docs]class ReloadRepositoryLocationStatus(Enum):\n """This enum describes the status of a GraphQL mutation to reload a Dagster repository location.\n\n Args:\n Enum (str): can be either `ReloadRepositoryLocationStatus.SUCCESS`\n or `ReloadRepositoryLocationStatus.FAILURE`.\n """\n\n SUCCESS = "SUCCESS"\n FAILURE = "FAILURE"
\n\n\nclass ShutdownRepositoryLocationStatus(Enum):\n SUCCESS = "SUCCESS"\n FAILURE = "FAILURE"\n\n\n
[docs]class ReloadRepositoryLocationInfo(NamedTuple):\n """This class gives information about the result of reloading\n a Dagster repository location with a GraphQL mutation.\n\n Args:\n status (ReloadRepositoryLocationStatus): The status of the reload repository location mutation\n failure_type: (Optional[str], optional): the failure type if `status == ReloadRepositoryLocationStatus.FAILURE`.\n Can be one of `ReloadNotSupported`, `RepositoryLocationNotFound`, or `RepositoryLocationLoadFailure`. Defaults to None.\n message (Optional[str], optional): the failure message/reason if\n `status == ReloadRepositoryLocationStatus.FAILURE`. Defaults to None.\n """\n\n status: ReloadRepositoryLocationStatus\n failure_type: Optional[str] = None\n message: Optional[str] = None
\n\n\nclass ShutdownRepositoryLocationInfo(NamedTuple):\n """This class gives information about the result of shutting down the server for\n a Dagster repository location using a GraphQL mutation.\n\n Args:\n status (ShutdownRepositoryLocationStatus) Whether the shutdown succeeded or failed.\n message (Optional[str], optional): the failure message/reason if\n `status == ShutdownRepositoryLocationStatus.FAILURE`. Defaults to None.\n """\n\n status: ShutdownRepositoryLocationStatus\n message: Optional[str] = None\n\n\nclass JobInfo(NamedTuple):\n repository_location_name: str\n repository_name: str\n job_name: str\n\n @staticmethod\n def from_node(node: Dict[str, Any]) -> List["JobInfo"]:\n repo_name = node["name"]\n repo_location_name = node["location"]["name"]\n return [\n JobInfo(\n repository_location_name=repo_location_name,\n repository_name=repo_name,\n job_name=job["name"],\n )\n for job in node["pipelines"]\n ]\n\n\n
[docs]class InvalidOutputErrorInfo(NamedTuple):\n """This class gives information about an InvalidOutputError from submitting a pipeline for execution\n from GraphQL.\n\n Args:\n step_key (str): key of the step that failed\n invalid_output_name (str): the name of the invalid output from the given step\n """\n\n step_key: str\n invalid_output_name: str
\n
", "current_page_name": "_modules/dagster_graphql/client/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_graphql.client.utils"}}}, "dagster_k8s": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.executor

\nfrom typing import Iterator, List, Optional, cast\n\nimport kubernetes.config\nfrom dagster import (\n    Field,\n    IntSource,\n    Noneable,\n    StringSource,\n    _check as check,\n    executor,\n)\nfrom dagster._core.definitions.executor_definition import multiple_process_executor_requirements\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.events import DagsterEvent, EngineEventData\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._core.execution.tags import get_tag_concurrency_limits_config\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.executor.init import InitExecutorContext\nfrom dagster._core.executor.step_delegating import (\n    CheckStepHealthResult,\n    StepDelegatingExecutor,\n    StepHandler,\n    StepHandlerContext,\n)\nfrom dagster._utils.merger import merge_dicts\n\nfrom dagster_k8s.launcher import K8sRunLauncher\n\nfrom .client import DagsterKubernetesClient\nfrom .container_context import K8sContainerContext\nfrom .job import (\n    USER_DEFINED_K8S_CONFIG_SCHEMA,\n    DagsterK8sJobConfig,\n    UserDefinedDagsterK8sConfig,\n    construct_dagster_k8s_job,\n    get_k8s_job_name,\n    get_user_defined_k8s_config,\n)\n\n_K8S_EXECUTOR_CONFIG_SCHEMA = merge_dicts(\n    DagsterK8sJobConfig.config_type_job(),\n    {\n        "load_incluster_config": Field(\n            bool,\n            is_required=False,\n            description="""Whether or not the executor is running within a k8s cluster already. If\n            the job is using the `K8sRunLauncher`, the default value of this parameter will be\n            the same as the corresponding value on the run launcher.\n            If ``True``, we assume the executor is running within the target cluster and load config\n            using ``kubernetes.config.load_incluster_config``. Otherwise, we will use the k8s config\n            specified in ``kubeconfig_file`` (using ``kubernetes.config.load_kube_config``) or fall\n            back to the default kubeconfig.""",\n        ),\n        "kubeconfig_file": Field(\n            Noneable(str),\n            is_required=False,\n            description="""Path to a kubeconfig file to use, if not using default kubeconfig. If\n            the job is using the `K8sRunLauncher`, the default value of this parameter will be\n            the same as the corresponding value on the run launcher.""",\n        ),\n        "job_namespace": Field(StringSource, is_required=False),\n        "retries": get_retries_config(),\n        "max_concurrent": Field(\n            IntSource,\n            is_required=False,\n            description=(\n                "Limit on the number of pods that will run concurrently within the scope "\n                "of a Dagster run. Note that this limit is per run, not global."\n            ),\n        ),\n        "tag_concurrency_limits": get_tag_concurrency_limits_config(),\n        "step_k8s_config": Field(\n            USER_DEFINED_K8S_CONFIG_SCHEMA,\n            is_required=False,\n            description="Raw Kubernetes configuration for each step launched by the executor.",\n        ),\n    },\n)\n\n\n
[docs]@executor(\n name="k8s",\n config_schema=_K8S_EXECUTOR_CONFIG_SCHEMA,\n requirements=multiple_process_executor_requirements(),\n)\ndef k8s_job_executor(init_context: InitExecutorContext) -> Executor:\n """Executor which launches steps as Kubernetes Jobs.\n\n To use the `k8s_job_executor`, set it as the `executor_def` when defining a job:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-k8s/dagster_k8s_tests/unit_tests/test_example_executor_mode_def.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n Then you can configure the executor with run config as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n job_namespace: 'some-namespace'\n image_pull_policy: ...\n image_pull_secrets: ...\n service_account_name: ...\n env_config_maps: ...\n env_secrets: ...\n env_vars: ...\n job_image: ... # leave out if using userDeployments\n max_concurrent: ...\n\n `max_concurrent` limits the number of pods that will execute concurrently for one run. By default\n there is no limit- it will maximally parallel as allowed by the DAG. Note that this is not a\n global limit.\n\n Configuration set on the Kubernetes Jobs and Pods created by the `K8sRunLauncher` will also be\n set on Kubernetes Jobs and Pods created by the `k8s_job_executor`.\n\n Configuration set using `tags` on a `@job` will only apply to the `run` level. For configuration\n to apply at each `step` it must be set using `tags` for each `@op`.\n """\n run_launcher = (\n init_context.instance.run_launcher\n if isinstance(init_context.instance.run_launcher, K8sRunLauncher)\n else None\n )\n\n exc_cfg = init_context.executor_config\n\n k8s_container_context = K8sContainerContext(\n image_pull_policy=exc_cfg.get("image_pull_policy"), # type: ignore\n image_pull_secrets=exc_cfg.get("image_pull_secrets"), # type: ignore\n service_account_name=exc_cfg.get("service_account_name"), # type: ignore\n env_config_maps=exc_cfg.get("env_config_maps"), # type: ignore\n env_secrets=exc_cfg.get("env_secrets"), # type: ignore\n env_vars=exc_cfg.get("env_vars"), # type: ignore\n volume_mounts=exc_cfg.get("volume_mounts"), # type: ignore\n volumes=exc_cfg.get("volumes"), # type: ignore\n labels=exc_cfg.get("labels"), # type: ignore\n namespace=exc_cfg.get("job_namespace"), # type: ignore\n resources=exc_cfg.get("resources"), # type: ignore\n scheduler_name=exc_cfg.get("scheduler_name"), # type: ignore\n # step_k8s_config feeds into the run_k8s_config field because it is merged\n # with any configuration for the run that was set on the run launcher or code location\n run_k8s_config=UserDefinedDagsterK8sConfig.from_dict(exc_cfg.get("step_k8s_config", {})),\n )\n\n if "load_incluster_config" in exc_cfg:\n load_incluster_config = cast(bool, exc_cfg["load_incluster_config"])\n else:\n load_incluster_config = run_launcher.load_incluster_config if run_launcher else True\n\n if "kubeconfig_file" in exc_cfg:\n kubeconfig_file = cast(Optional[str], exc_cfg["kubeconfig_file"])\n else:\n kubeconfig_file = run_launcher.kubeconfig_file if run_launcher else None\n\n return StepDelegatingExecutor(\n K8sStepHandler(\n image=exc_cfg.get("job_image"), # type: ignore\n container_context=k8s_container_context,\n load_incluster_config=load_incluster_config,\n kubeconfig_file=kubeconfig_file,\n ),\n retries=RetryMode.from_config(exc_cfg["retries"]), # type: ignore\n max_concurrent=check.opt_int_elem(exc_cfg, "max_concurrent"),\n tag_concurrency_limits=check.opt_list_elem(exc_cfg, "tag_concurrency_limits"),\n should_verify_step=True,\n )
\n\n\nclass K8sStepHandler(StepHandler):\n @property\n def name(self):\n return "K8sStepHandler"\n\n def __init__(\n self,\n image: Optional[str],\n container_context: K8sContainerContext,\n load_incluster_config: bool,\n kubeconfig_file: Optional[str],\n k8s_client_batch_api=None,\n ):\n super().__init__()\n\n self._executor_image = check.opt_str_param(image, "image")\n self._executor_container_context = check.inst_param(\n container_context, "container_context", K8sContainerContext\n )\n\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n kubernetes.config.load_incluster_config()\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n self._api_client = DagsterKubernetesClient.production_client(\n batch_api_override=k8s_client_batch_api\n )\n\n def _get_step_key(self, step_handler_context: StepHandlerContext) -> str:\n step_keys_to_execute = cast(\n List[str], step_handler_context.execute_step_args.step_keys_to_execute\n )\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n return step_keys_to_execute[0]\n\n def _get_container_context(\n self, step_handler_context: StepHandlerContext\n ) -> K8sContainerContext:\n step_key = self._get_step_key(step_handler_context)\n\n context = K8sContainerContext.create_for_run(\n step_handler_context.dagster_run,\n cast(K8sRunLauncher, step_handler_context.instance.run_launcher),\n include_run_tags=False, # For now don't include job-level dagster-k8s/config tags in step pods\n )\n context = context.merge(self._executor_container_context)\n\n user_defined_k8s_config = get_user_defined_k8s_config(\n step_handler_context.step_tags[step_key]\n )\n return context.merge(K8sContainerContext(run_k8s_config=user_defined_k8s_config))\n\n def _get_k8s_step_job_name(self, step_handler_context: StepHandlerContext):\n step_key = self._get_step_key(step_handler_context)\n\n name_key = get_k8s_job_name(\n step_handler_context.execute_step_args.run_id,\n step_key,\n )\n\n if step_handler_context.execute_step_args.known_state:\n retry_state = step_handler_context.execute_step_args.known_state.get_retry_state()\n if retry_state.get_attempt_count(step_key):\n return "dagster-step-%s-%d" % (name_key, retry_state.get_attempt_count(step_key))\n\n return "dagster-step-%s" % (name_key)\n\n def launch_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n step_key = self._get_step_key(step_handler_context)\n\n job_name = self._get_k8s_step_job_name(step_handler_context)\n pod_name = job_name\n\n container_context = self._get_container_context(step_handler_context)\n\n job_config = container_context.get_k8s_job_config(\n self._executor_image, step_handler_context.instance.run_launcher\n )\n\n args = step_handler_context.execute_step_args.get_command_args(\n skip_serialized_namedtuple=True\n )\n\n if not job_config.job_image:\n job_config = job_config.with_image(\n step_handler_context.execute_step_args.job_origin.repository_origin.container_image\n )\n\n if not job_config.job_image:\n raise Exception("No image included in either executor config or the job")\n\n run = step_handler_context.dagster_run\n labels = {\n "dagster/job": run.job_name,\n "dagster/op": step_key,\n "dagster/run-id": step_handler_context.execute_step_args.run_id,\n }\n if run.external_job_origin:\n labels["dagster/code-location"] = (\n run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n job = construct_dagster_k8s_job(\n job_config=job_config,\n args=args,\n job_name=job_name,\n pod_name=pod_name,\n component="step_worker",\n user_defined_k8s_config=container_context.run_k8s_config,\n labels=labels,\n env_vars=[\n *step_handler_context.execute_step_args.get_command_env(),\n {\n "name": "DAGSTER_RUN_JOB_NAME",\n "value": run.job_name,\n },\n {"name": "DAGSTER_RUN_STEP_KEY", "value": step_key},\n *container_context.env,\n ],\n )\n\n yield DagsterEvent.step_worker_starting(\n step_handler_context.get_step_context(step_key),\n message=f'Executing step "{step_key}" in Kubernetes job {job_name}.',\n metadata={\n "Kubernetes Job name": MetadataValue.text(job_name),\n },\n )\n\n namespace = check.not_none(container_context.namespace)\n self._api_client.create_namespaced_job_with_retries(body=job, namespace=namespace)\n\n def check_step_health(self, step_handler_context: StepHandlerContext) -> CheckStepHealthResult:\n step_key = self._get_step_key(step_handler_context)\n\n job_name = self._get_k8s_step_job_name(step_handler_context)\n\n container_context = self._get_container_context(step_handler_context)\n\n status = self._api_client.get_job_status(\n namespace=container_context.namespace,\n job_name=job_name,\n )\n if status.failed:\n return CheckStepHealthResult.unhealthy(\n reason=f"Discovered failed Kubernetes job {job_name} for step {step_key}.",\n )\n\n return CheckStepHealthResult.healthy()\n\n def terminate_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n step_key = self._get_step_key(step_handler_context)\n\n job_name = self._get_k8s_step_job_name(step_handler_context)\n container_context = self._get_container_context(step_handler_context)\n\n yield DagsterEvent.engine_event(\n step_handler_context.get_step_context(step_key),\n message=f"Deleting Kubernetes job {job_name} for step",\n event_specific_data=EngineEventData(),\n )\n\n self._api_client.delete_job(job_name=job_name, namespace=container_context.namespace)\n
", "current_page_name": "_modules/dagster_k8s/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.executor"}, "launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.launcher

\nimport logging\nimport sys\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport kubernetes\nfrom dagster import (\n    _check as check,\n)\nfrom dagster._cli.api import ExecuteRunArgs\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.launcher import LaunchRunContext, ResumeRunContext, RunLauncher\nfrom dagster._core.launcher.base import CheckRunHealthResult, WorkerStatus\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.storage.tags import DOCKER_IMAGE_TAG\nfrom dagster._grpc.types import ResumeRunArgs\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils.error import serializable_error_info_from_exc_info\n\nfrom .client import DagsterKubernetesClient\nfrom .container_context import K8sContainerContext\nfrom .job import DagsterK8sJobConfig, construct_dagster_k8s_job, get_job_name_from_run_id\n\n\n
[docs]class K8sRunLauncher(RunLauncher, ConfigurableClass):\n """RunLauncher that starts a Kubernetes Job for each Dagster job run.\n\n Encapsulates each run in a separate, isolated invocation of ``dagster-graphql``.\n\n You can configure a Dagster instance to use this RunLauncher by adding a section to your\n ``dagster.yaml`` like the following:\n\n .. code-block:: yaml\n\n run_launcher:\n module: dagster_k8s.launcher\n class: K8sRunLauncher\n config:\n service_account_name: your_service_account\n job_image: my_project/dagster_image:latest\n instance_config_map: dagster-instance\n postgres_password_secret: dagster-postgresql-secret\n\n """\n\n def __init__(\n self,\n service_account_name,\n instance_config_map,\n postgres_password_secret=None,\n dagster_home=None,\n job_image=None,\n image_pull_policy=None,\n image_pull_secrets=None,\n load_incluster_config=True,\n kubeconfig_file=None,\n inst_data: Optional[ConfigurableClassData] = None,\n job_namespace="default",\n env_config_maps=None,\n env_secrets=None,\n env_vars=None,\n k8s_client_batch_api=None,\n volume_mounts=None,\n volumes=None,\n labels=None,\n fail_pod_on_run_failure=None,\n resources=None,\n scheduler_name=None,\n security_context=None,\n run_k8s_config=None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.job_namespace = check.str_param(job_namespace, "job_namespace")\n\n self.load_incluster_config = load_incluster_config\n self.kubeconfig_file = kubeconfig_file\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n kubernetes.config.load_incluster_config()\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n self._api_client = DagsterKubernetesClient.production_client(\n batch_api_override=k8s_client_batch_api\n )\n\n self._job_config = None\n self._job_image = check.opt_str_param(job_image, "job_image")\n self.dagster_home = check.str_param(dagster_home, "dagster_home")\n self._image_pull_policy = check.opt_str_param(\n image_pull_policy, "image_pull_policy", "IfNotPresent"\n )\n self._image_pull_secrets = check.opt_list_param(\n image_pull_secrets, "image_pull_secrets", of_type=dict\n )\n self._service_account_name = check.str_param(service_account_name, "service_account_name")\n self.instance_config_map = check.str_param(instance_config_map, "instance_config_map")\n self.postgres_password_secret = check.opt_str_param(\n postgres_password_secret, "postgres_password_secret"\n )\n self._env_config_maps = check.opt_list_param(\n env_config_maps, "env_config_maps", of_type=str\n )\n self._env_secrets = check.opt_list_param(env_secrets, "env_secrets", of_type=str)\n self._env_vars = check.opt_list_param(env_vars, "env_vars", of_type=str)\n self._volume_mounts = check.opt_list_param(volume_mounts, "volume_mounts")\n self._volumes = check.opt_list_param(volumes, "volumes")\n self._labels: Mapping[str, str] = check.opt_mapping_param(\n labels, "labels", key_type=str, value_type=str\n )\n self._fail_pod_on_run_failure = check.opt_bool_param(\n fail_pod_on_run_failure, "fail_pod_on_run_failure"\n )\n self._resources: Mapping[str, Any] = check.opt_mapping_param(resources, "resources")\n self._scheduler_name = check.opt_str_param(scheduler_name, "scheduler_name")\n self._security_context = check.opt_dict_param(security_context, "security_context")\n self._run_k8s_config = check.opt_dict_param(run_k8s_config, "run_k8s_config")\n super().__init__()\n\n @property\n def job_image(self):\n return self._job_image\n\n @property\n def image_pull_policy(self) -> str:\n return self._image_pull_policy\n\n @property\n def image_pull_secrets(self) -> Sequence[Mapping]:\n return self._image_pull_secrets\n\n @property\n def service_account_name(self) -> str:\n return self._service_account_name\n\n @property\n def env_config_maps(self) -> Sequence[str]:\n return self._env_config_maps\n\n @property\n def env_secrets(self) -> Sequence[str]:\n return self._env_secrets\n\n @property\n def volume_mounts(self) -> Sequence:\n return self._volume_mounts\n\n @property\n def volumes(self) -> Sequence:\n return self._volumes\n\n @property\n def resources(self) -> Mapping:\n return self._resources\n\n @property\n def scheduler_name(self) -> Optional[str]:\n return self._scheduler_name\n\n @property\n def security_context(self) -> Mapping[str, Any]:\n return self._security_context\n\n @property\n def env_vars(self) -> Sequence[str]:\n return self._env_vars\n\n @property\n def labels(self) -> Mapping[str, str]:\n return self._labels\n\n @property\n def run_k8s_config(self) -> Mapping[str, str]:\n return self._run_k8s_config\n\n @property\n def fail_pod_on_run_failure(self) -> Optional[bool]:\n return self._fail_pod_on_run_failure\n\n @classmethod\n def config_type(cls):\n """Include all arguments required for DagsterK8sJobConfig along with additional arguments\n needed for the RunLauncher itself.\n """\n return DagsterK8sJobConfig.config_type_run_launcher()\n\n @classmethod\n def from_config_value(cls, inst_data, config_value):\n return cls(inst_data=inst_data, **config_value)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n def get_container_context_for_run(self, dagster_run: DagsterRun) -> K8sContainerContext:\n return K8sContainerContext.create_for_run(dagster_run, self, include_run_tags=True)\n\n def _launch_k8s_job_with_args(\n self, job_name: str, args: Optional[Sequence[str]], run: DagsterRun\n ) -> None:\n container_context = self.get_container_context_for_run(run)\n\n pod_name = job_name\n\n job_origin = check.not_none(run.job_code_origin)\n user_defined_k8s_config = container_context.run_k8s_config\n repository_origin = job_origin.repository_origin\n\n job_config = container_context.get_k8s_job_config(\n job_image=repository_origin.container_image, run_launcher=self\n )\n job_image = job_config.job_image\n if job_image: # expected to be set\n self._instance.add_run_tags(\n run.run_id,\n {DOCKER_IMAGE_TAG: job_image},\n )\n\n labels = {\n "dagster/job": job_origin.job_name,\n "dagster/run-id": run.run_id,\n }\n if run.external_job_origin:\n labels["dagster/code-location"] = (\n run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n job = construct_dagster_k8s_job(\n job_config=job_config,\n args=args,\n job_name=job_name,\n pod_name=pod_name,\n component="run_worker",\n user_defined_k8s_config=user_defined_k8s_config,\n labels=labels,\n env_vars=[\n {\n "name": "DAGSTER_RUN_JOB_NAME",\n "value": job_origin.job_name,\n },\n *container_context.env,\n ],\n )\n\n namespace = check.not_none(container_context.namespace)\n\n self._instance.report_engine_event(\n "Creating Kubernetes run worker job",\n run,\n EngineEventData(\n {\n "Kubernetes Job name": job_name,\n "Kubernetes Namespace": namespace,\n "Run ID": run.run_id,\n }\n ),\n cls=self.__class__,\n )\n\n self._api_client.create_namespaced_job_with_retries(body=job, namespace=namespace)\n self._instance.report_engine_event(\n "Kubernetes run worker job created",\n run,\n cls=self.__class__,\n )\n\n def launch_run(self, context: LaunchRunContext) -> None:\n run = context.dagster_run\n job_name = get_job_name_from_run_id(run.run_id)\n job_origin = check.not_none(run.job_code_origin)\n\n args = ExecuteRunArgs(\n job_origin=job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n set_exit_code_on_failure=self._fail_pod_on_run_failure,\n ).get_command_args()\n\n self._launch_k8s_job_with_args(job_name, args, run)\n\n @property\n def supports_resume_run(self):\n return True\n\n def resume_run(self, context: ResumeRunContext) -> None:\n run = context.dagster_run\n job_name = get_job_name_from_run_id(\n run.run_id, resume_attempt_number=context.resume_attempt_number\n )\n job_origin = check.not_none(run.job_code_origin)\n\n args = ResumeRunArgs(\n job_origin=job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n set_exit_code_on_failure=self._fail_pod_on_run_failure,\n ).get_command_args()\n\n self._launch_k8s_job_with_args(job_name, args, run)\n\n def terminate(self, run_id):\n check.str_param(run_id, "run_id")\n run = self._instance.get_run_by_id(run_id)\n\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n container_context = self.get_container_context_for_run(run)\n\n job_name = get_job_name_from_run_id(\n run_id, resume_attempt_number=self._instance.count_resume_run_attempts(run.run_id)\n )\n\n try:\n termination_result = self._api_client.delete_job(\n job_name=job_name, namespace=container_context.namespace\n )\n if termination_result:\n self._instance.report_engine_event(\n message="Run was terminated successfully.",\n dagster_run=run,\n cls=self.__class__,\n )\n else:\n self._instance.report_engine_event(\n message="Run was not terminated successfully; delete_job returned {}".format(\n termination_result\n ),\n dagster_run=run,\n cls=self.__class__,\n )\n return termination_result\n except Exception:\n self._instance.report_engine_event(\n message="Run was not terminated successfully; encountered error in delete_job",\n dagster_run=run,\n engine_event_data=EngineEventData.engine_error(\n serializable_error_info_from_exc_info(sys.exc_info())\n ),\n cls=self.__class__,\n )\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n @property\n def supports_run_worker_crash_recovery(self):\n return True\n\n def get_run_worker_debug_info(self, run: DagsterRun) -> Optional[str]:\n container_context = self.get_container_context_for_run(run)\n if self.supports_run_worker_crash_recovery:\n resume_attempt_number = self._instance.count_resume_run_attempts(run.run_id)\n else:\n resume_attempt_number = None\n\n job_name = get_job_name_from_run_id(run.run_id, resume_attempt_number=resume_attempt_number)\n namespace = container_context.namespace\n user_defined_k8s_config = container_context.run_k8s_config\n container_name = user_defined_k8s_config.container_config.get("name", "dagster")\n pod_names = self._api_client.get_pod_names_in_job(job_name, namespace=namespace)\n full_msg = ""\n try:\n pod_debug_info = [\n self._api_client.get_pod_debug_info(\n pod_name, namespace, container_name=container_name\n )\n for pod_name in pod_names\n ]\n full_msg = "\\n".join(pod_debug_info)\n except Exception:\n logging.exception(\n f"Error trying to get debug information for failed k8s job {job_name}"\n )\n if pod_names:\n full_msg = (\n full_msg\n + "\\nFor more information about the failure, try running `kubectl describe pod"\n f" {pod_names[0]}`, `kubectl logs {pod_names[0]}`, or `kubectl describe job"\n f" {job_name}` in your cluster."\n )\n\n else:\n full_msg = (\n full_msg\n + "\\nFor more information about the failure, try running `kubectl describe job"\n f" {job_name}` in your cluster."\n )\n\n return full_msg\n\n def check_run_worker_health(self, run: DagsterRun):\n container_context = self.get_container_context_for_run(run)\n\n if self.supports_run_worker_crash_recovery:\n resume_attempt_number = self._instance.count_resume_run_attempts(run.run_id)\n else:\n resume_attempt_number = None\n\n job_name = get_job_name_from_run_id(run.run_id, resume_attempt_number=resume_attempt_number)\n try:\n status = self._api_client.get_job_status(\n namespace=container_context.namespace,\n job_name=job_name,\n )\n except Exception:\n return CheckRunHealthResult(\n WorkerStatus.UNKNOWN, str(serializable_error_info_from_exc_info(sys.exc_info()))\n )\n\n inactive_job_with_finished_pods = bool(\n (not status.active) and (status.failed or status.succeeded)\n )\n\n # If the run is in a non-terminal (and non-STARTING) state but the k8s job is not active,\n # something went wrong\n if (\n run.status in (DagsterRunStatus.STARTED, DagsterRunStatus.CANCELING)\n and inactive_job_with_finished_pods\n ):\n return CheckRunHealthResult(\n WorkerStatus.FAILED, "Run has not completed but K8s job has no active pods"\n )\n\n if status.failed:\n return CheckRunHealthResult(WorkerStatus.FAILED, "K8s job failed")\n if status.succeeded:\n return CheckRunHealthResult(WorkerStatus.SUCCESS)\n return CheckRunHealthResult(WorkerStatus.RUNNING)
\n
", "current_page_name": "_modules/dagster_k8s/launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.launcher"}, "ops": {"k8s_job_op": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.ops.k8s_job_op

\nimport time\nfrom typing import Any, Dict, List, Optional\n\nimport kubernetes.config\nimport kubernetes.watch\nfrom dagster import (\n    Enum as DagsterEnum,\n    Field,\n    In,\n    Noneable,\n    Nothing,\n    OpExecutionContext,\n    Permissive,\n    StringSource,\n    op,\n)\nfrom dagster._annotations import experimental\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..client import DEFAULT_JOB_POD_COUNT, DagsterKubernetesClient\nfrom ..container_context import K8sContainerContext\nfrom ..job import (\n    DagsterK8sJobConfig,\n    K8sConfigMergeBehavior,\n    UserDefinedDagsterK8sConfig,\n    construct_dagster_k8s_job,\n    get_k8s_job_name,\n)\nfrom ..launcher import K8sRunLauncher\n\nK8S_JOB_OP_CONFIG = merge_dicts(\n    DagsterK8sJobConfig.config_type_container(),\n    {\n        "image": Field(\n            StringSource,\n            is_required=True,\n            description="The image in which to launch the k8s job.",\n        ),\n        "command": Field(\n            [str],\n            is_required=False,\n            description="The command to run in the container within the launched k8s job.",\n        ),\n        "args": Field(\n            [str],\n            is_required=False,\n            description="The args for the command for the container.",\n        ),\n        "namespace": Field(StringSource, is_required=False),\n        "load_incluster_config": Field(\n            bool,\n            is_required=False,\n            default_value=True,\n            description="""Set this value if you are running the launcher\n            within a k8s cluster. If ``True``, we assume the launcher is running within the target\n            cluster and load config using ``kubernetes.config.load_incluster_config``. Otherwise,\n            we will use the k8s config specified in ``kubeconfig_file`` (using\n            ``kubernetes.config.load_kube_config``) or fall back to the default kubeconfig.""",\n        ),\n        "kubeconfig_file": Field(\n            Noneable(str),\n            is_required=False,\n            default_value=None,\n            description=(\n                "The kubeconfig file from which to load config. Defaults to using the default"\n                " kubeconfig."\n            ),\n        ),\n        "timeout": Field(\n            int,\n            is_required=False,\n            description="How long to wait for the job to succeed before raising an exception",\n        ),\n        "container_config": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s pod's main container"\n                " (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "pod_template_spec_metadata": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s pod's metadata"\n                " (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "pod_spec_config": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s pod's pod spec"\n                " (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "job_metadata": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s job's metadata"\n                " (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "job_spec_config": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s job's job spec"\n                " (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#jobspec-v1-batch)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "merge_behavior": Field(\n            DagsterEnum.from_python_enum(K8sConfigMergeBehavior),\n            is_required=False,\n            default_value=K8sConfigMergeBehavior.SHALLOW.value,\n            description=(\n                "How raw k8s config set on this op should be merged with any raw k8s config set on"\n                " the code location that launched the op. By default, the value is SHALLOW, meaning"\n                " that the two dictionaries are shallowly merged - any shared values in the "\n                " dictionaries will be replaced by the values set on this op. Setting it to DEEP"\n                " will recursively merge the two dictionaries, appending list fields together and"\n                " merging dictionary fields."\n            ),\n        ),\n    },\n)\n\n\n
[docs]@experimental\ndef execute_k8s_job(\n context: OpExecutionContext,\n image: str,\n command: Optional[List[str]] = None,\n args: Optional[List[str]] = None,\n namespace: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n image_pull_secrets: Optional[List[Dict[str, str]]] = None,\n service_account_name: Optional[str] = None,\n env_config_maps: Optional[List[str]] = None,\n env_secrets: Optional[List[str]] = None,\n env_vars: Optional[List[str]] = None,\n volume_mounts: Optional[List[Dict[str, Any]]] = None,\n volumes: Optional[List[Dict[str, Any]]] = None,\n labels: Optional[Dict[str, str]] = None,\n resources: Optional[Dict[str, Any]] = None,\n scheduler_name: Optional[str] = None,\n load_incluster_config: bool = True,\n kubeconfig_file: Optional[str] = None,\n timeout: Optional[int] = None,\n container_config: Optional[Dict[str, Any]] = None,\n pod_template_spec_metadata: Optional[Dict[str, Any]] = None,\n pod_spec_config: Optional[Dict[str, Any]] = None,\n job_metadata: Optional[Dict[str, Any]] = None,\n job_spec_config: Optional[Dict[str, Any]] = None,\n k8s_job_name: Optional[str] = None,\n merge_behavior: K8sConfigMergeBehavior = K8sConfigMergeBehavior.SHALLOW,\n):\n """This function is a utility for executing a Kubernetes job from within a Dagster op.\n\n Args:\n image (str): The image in which to launch the k8s job.\n command (Optional[List[str]]): The command to run in the container within the launched\n k8s job. Default: None.\n args (Optional[List[str]]): The args for the command for the container. Default: None.\n namespace (Optional[str]): Override the kubernetes namespace in which to run the k8s job.\n Default: None.\n image_pull_policy (Optional[str]): Allows the image pull policy to be overridden, e.g. to\n facilitate local testing with `kind <https://kind.sigs.k8s.io/>`_. Default:\n ``"Always"``. See:\n https://kubernetes.io/docs/concepts/containers/images/#updating-images.\n image_pull_secrets (Optional[List[Dict[str, str]]]): Optionally, a list of dicts, each of\n which corresponds to a Kubernetes ``LocalObjectReference`` (e.g.,\n ``{'name': 'myRegistryName'}``). This allows you to specify the ```imagePullSecrets`` on\n a pod basis. Typically, these will be provided through the service account, when needed,\n and you will not need to pass this argument. See:\n https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n and https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#podspec-v1-core\n service_account_name (Optional[str]): The name of the Kubernetes service account under which\n to run the Job. Defaults to "default" env_config_maps (Optional[List[str]]): A list of custom ConfigMapEnvSource names from which to\n draw environment variables (using ``envFrom``) for the Job. Default: ``[]``. See:\n https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#define-an-environment-variable-for-a-container\n env_secrets (Optional[List[str]]): A list of custom Secret names from which to\n draw environment variables (using ``envFrom``) for the Job. Default: ``[]``. See:\n https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables\n env_vars (Optional[List[str]]): A list of environment variables to inject into the Job.\n Default: ``[]``. See: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables\n volume_mounts (Optional[List[Permissive]]): A list of volume mounts to include in the job's\n container. Default: ``[]``. See:\n https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volumemount-v1-core\n volumes (Optional[List[Permissive]]): A list of volumes to include in the Job's Pod. Default: ``[]``. See:\n https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volume-v1-core\n labels (Optional[Dict[str, str]]): Additional labels that should be included in the Job's Pod. See:\n https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\n resources (Optional[Dict[str, Any]]) Compute resource requirements for the container. See:\n https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n scheduler_name (Optional[str]): Use a custom Kubernetes scheduler for launched Pods. See:\n https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/\n load_incluster_config (bool): Whether the op is running within a k8s cluster. If ``True``,\n we assume the launcher is running within the target cluster and load config using\n ``kubernetes.config.load_incluster_config``. Otherwise, we will use the k8s config\n specified in ``kubeconfig_file`` (using ``kubernetes.config.load_kube_config``) or fall\n back to the default kubeconfig. Default: True,\n kubeconfig_file (Optional[str]): The kubeconfig file from which to load config. Defaults to\n using the default kubeconfig. Default: None.\n timeout (Optional[int]): Raise an exception if the op takes longer than this timeout in\n seconds to execute. Default: None.\n container_config (Optional[Dict[str, Any]]): Raw k8s config for the k8s pod's main container\n (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core).\n Keys can either snake_case or camelCase.Default: None.\n pod_template_spec_metadata (Optional[Dict[str, Any]]): Raw k8s config for the k8s pod's\n metadata (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta).\n Keys can either snake_case or camelCase. Default: None.\n pod_spec_config (Optional[Dict[str, Any]]): Raw k8s config for the k8s pod's pod spec\n (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec).\n Keys can either snake_case or camelCase. Default: None.\n job_metadata (Optional[Dict[str, Any]]): Raw k8s config for the k8s job's metadata\n (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta).\n Keys can either snake_case or camelCase. Default: None.\n job_spec_config (Optional[Dict[str, Any]]): Raw k8s config for the k8s job's job spec\n (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#jobspec-v1-batch).\n Keys can either snake_case or camelCase.Default: None.\n k8s_job_name (Optional[str]): Overrides the name of the the k8s job. If not set, will be set\n to a unique name based on the current run ID and the name of the calling op. If set,\n make sure that the passed in name is a valid Kubernetes job name that does not\n already exist in the cluster.\n merge_behavior (Optional[K8sConfigMergeBehavior]): How raw k8s config set on this op should\n be merged with any raw k8s config set on the code location that launched the op. By\n default, the value is K8sConfigMergeBehavior.SHALLOW, meaning that the two dictionaries\n are shallowly merged - any shared values in the dictionaries will be replaced by the\n values set on this op. Setting it to DEEP will recursively merge the two dictionaries,\n appending list fields together andmerging dictionary fields.\n """\n run_container_context = K8sContainerContext.create_for_run(\n context.dagster_run,\n (\n context.instance.run_launcher\n if isinstance(context.instance.run_launcher, K8sRunLauncher)\n else None\n ),\n include_run_tags=False,\n )\n\n container_config = container_config.copy() if container_config else {}\n if command:\n container_config["command"] = command\n\n op_container_context = K8sContainerContext(\n image_pull_policy=image_pull_policy,\n image_pull_secrets=image_pull_secrets,\n service_account_name=service_account_name,\n env_config_maps=env_config_maps,\n env_secrets=env_secrets,\n env_vars=env_vars,\n volume_mounts=volume_mounts,\n volumes=volumes,\n labels=labels,\n namespace=namespace,\n resources=resources,\n scheduler_name=scheduler_name,\n run_k8s_config=UserDefinedDagsterK8sConfig.from_dict(\n {\n "container_config": container_config,\n "pod_template_spec_metadata": pod_template_spec_metadata,\n "pod_spec_config": pod_spec_config,\n "job_metadata": job_metadata,\n "job_spec_config": job_spec_config,\n "merge_behavior": merge_behavior.value,\n }\n ),\n )\n\n container_context = run_container_context.merge(op_container_context)\n\n namespace = container_context.namespace\n\n user_defined_k8s_config = container_context.run_k8s_config\n\n k8s_job_config = DagsterK8sJobConfig(\n job_image=image,\n dagster_home=None,\n image_pull_policy=container_context.image_pull_policy,\n image_pull_secrets=container_context.image_pull_secrets,\n service_account_name=container_context.service_account_name,\n instance_config_map=None,\n postgres_password_secret=None,\n env_config_maps=container_context.env_config_maps,\n env_secrets=container_context.env_secrets,\n env_vars=container_context.env_vars,\n volume_mounts=container_context.volume_mounts,\n volumes=container_context.volumes,\n labels=container_context.labels,\n resources=container_context.resources,\n )\n\n job_name = k8s_job_name or get_k8s_job_name(\n context.run_id, context.get_step_execution_context().step.key\n )\n\n retry_number = context.retry_number\n if retry_number > 0:\n job_name = f"{job_name}-{retry_number}"\n\n labels = {\n "dagster/job": context.dagster_run.job_name,\n "dagster/op": context.op.name,\n "dagster/run-id": context.dagster_run.run_id,\n }\n if context.dagster_run.external_job_origin:\n labels["dagster/code-location"] = (\n context.dagster_run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n job = construct_dagster_k8s_job(\n job_config=k8s_job_config,\n args=args,\n job_name=job_name,\n pod_name=job_name,\n component="k8s_job_op",\n user_defined_k8s_config=user_defined_k8s_config,\n labels=labels,\n )\n\n if load_incluster_config:\n kubernetes.config.load_incluster_config()\n else:\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n # changing this to be able to be passed in will allow for unit testing\n api_client = DagsterKubernetesClient.production_client()\n\n context.log.info(f"Creating Kubernetes job {job_name} in namespace {namespace}...")\n\n start_time = time.time()\n\n api_client.batch_api.create_namespaced_job(namespace, job)\n\n context.log.info("Waiting for Kubernetes job to finish...")\n\n timeout = timeout or 0\n\n api_client.wait_for_job(\n job_name=job_name,\n namespace=namespace,\n wait_timeout=timeout,\n start_time=start_time,\n )\n\n restart_policy = user_defined_k8s_config.pod_spec_config.get("restart_policy", "Never")\n\n if restart_policy == "Never":\n container_name = container_config.get("name", "dagster")\n\n pods = api_client.wait_for_job_to_have_pods(\n job_name,\n namespace,\n wait_timeout=timeout,\n start_time=start_time,\n )\n\n pod_names = [p.metadata.name for p in pods]\n\n if not pod_names:\n raise Exception("No pod names in job after it started")\n\n pod_to_watch = pod_names[0]\n watch = kubernetes.watch.Watch() # consider moving in to api_client\n\n api_client.wait_for_pod(\n pod_to_watch, namespace, wait_timeout=timeout, start_time=start_time\n )\n\n log_stream = watch.stream(\n api_client.core_api.read_namespaced_pod_log,\n name=pod_to_watch,\n namespace=namespace,\n container=container_name,\n )\n\n while True:\n if timeout and time.time() - start_time > timeout:\n watch.stop()\n raise Exception("Timed out waiting for pod to finish")\n\n try:\n log_entry = next(log_stream)\n print(log_entry) # noqa: T201\n except StopIteration:\n break\n else:\n context.log.info("Pod logs are disabled, because restart_policy is not Never")\n\n if job_spec_config and job_spec_config.get("parallelism"):\n num_pods_to_wait_for = job_spec_config["parallelism"]\n else:\n num_pods_to_wait_for = DEFAULT_JOB_POD_COUNT\n api_client.wait_for_running_job_to_succeed(\n job_name=job_name,\n namespace=namespace,\n wait_timeout=timeout,\n start_time=start_time,\n num_pods_to_wait_for=num_pods_to_wait_for,\n )
\n\n\n
[docs]@op(ins={"start_after": In(Nothing)}, config_schema=K8S_JOB_OP_CONFIG)\n@experimental\ndef k8s_job_op(context):\n """An op that runs a Kubernetes job using the k8s API.\n\n Contrast with the `k8s_job_executor`, which runs each Dagster op in a Dagster job in its\n own k8s job.\n\n This op may be useful when:\n - You need to orchestrate a command that isn't a Dagster op (or isn't written in Python)\n - You want to run the rest of a Dagster job using a specific executor, and only a single\n op in k8s.\n\n For example:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-k8s/dagster_k8s_tests/unit_tests/test_example_k8s_job_op.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n You can create your own op with the same implementation by calling the `execute_k8s_job` function\n inside your own op.\n\n The service account that is used to run this job should have the following RBAC permissions:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/kubernetes/k8s_job_op_rbac.yaml\n :language: YAML\n """\n if "merge_behavior" in context.op_config:\n merge_behavior = K8sConfigMergeBehavior(context.op_config.pop("merge_behavior"))\n else:\n merge_behavior = K8sConfigMergeBehavior.SHALLOW\n\n execute_k8s_job(context, merge_behavior=merge_behavior, **context.op_config)
\n
", "current_page_name": "_modules/dagster_k8s/ops/k8s_job_op", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.ops.k8s_job_op"}}, "pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.pipes

\nimport random\nimport string\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Mapping, Optional, Sequence, Union\n\nimport kubernetes\nfrom dagster import (\n    OpExecutionContext,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.resource_annotation import ResourceParam\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.pipes.client import (\n    PipesClient,\n    PipesClientCompletedInvocation,\n    PipesContextInjector,\n    PipesMessageReader,\n    PipesParams,\n)\nfrom dagster._core.pipes.context import (\n    PipesMessageHandler,\n)\nfrom dagster._core.pipes.utils import (\n    PipesEnvContextInjector,\n    extract_message_or_forward_to_stdout,\n    open_pipes_session,\n)\nfrom dagster_pipes import (\n    PipesDefaultMessageWriter,\n    PipesExtras,\n)\n\nfrom dagster_k8s.utils import get_common_labels\n\nfrom .client import DagsterKubernetesClient, WaitForPodState\nfrom .models import k8s_model_from_dict, k8s_snake_case_dict\n\n\ndef get_pod_name(run_id: str, op_name: str):\n    clean_op_name = op_name.replace("_", "-")\n    suffix = "".join(random.choice(string.digits) for i in range(10))\n    return f"dagster-{run_id[:18]}-{clean_op_name[:20]}-{suffix}"\n\n\nDEFAULT_CONTAINER_NAME = "dagster-pipes-execution"\n\n\n
[docs]@experimental\nclass PipesK8sPodLogsMessageReader(PipesMessageReader):\n """Message reader that reads messages from kubernetes pod logs."""\n\n @contextmanager\n def read_messages(\n self,\n handler: PipesMessageHandler,\n ) -> Iterator[PipesParams]:\n self._handler = handler\n try:\n yield {PipesDefaultMessageWriter.STDIO_KEY: PipesDefaultMessageWriter.STDERR}\n finally:\n self._handler = None\n\n def consume_pod_logs(\n self,\n core_api: kubernetes.client.CoreV1Api,\n pod_name: str,\n namespace: str,\n ):\n handler = check.not_none(\n self._handler, "can only consume logs within scope of context manager"\n )\n for line in core_api.read_namespaced_pod_log(\n pod_name,\n namespace,\n follow=True,\n _preload_content=False, # avoid JSON processing\n ).stream():\n log_chunk = line.decode("utf-8")\n for log_line in log_chunk.split("\\n"):\n extract_message_or_forward_to_stdout(handler, log_line)\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to read messages by extracting them from kubernetes pod logs directly."
\n\n\n@experimental\nclass _PipesK8sClient(PipesClient):\n """A pipes client for launching kubernetes pods.\n\n By default context is injected via environment variables and messages are parsed out of\n the pod logs, with other logs forwarded to stdout of the orchestration process.\n\n The first container within the containers list of the pod spec is expected (or set) to be\n the container prepared for pipes protocol communication.\n\n Args:\n env (Optional[Mapping[str, str]]): An optional dict of environment variables to pass to the\n subprocess.\n context_injector (Optional[PipesContextInjector]): A context injector to use to inject\n context into the k8s container process. Defaults to :py:class:`PipesEnvContextInjector`.\n message_reader (Optional[PipesMessageReader]): A message reader to use to read messages\n from the k8s container process. Defaults to :py:class:`PipesK8sPodLogsMessageReader`.\n """\n\n def __init__(\n self,\n env: Optional[Mapping[str, str]] = None,\n context_injector: Optional[PipesContextInjector] = None,\n message_reader: Optional[PipesMessageReader] = None,\n ):\n self.env = check.opt_mapping_param(env, "env", key_type=str, value_type=str)\n self.context_injector = (\n check.opt_inst_param(\n context_injector,\n "context_injector",\n PipesContextInjector,\n )\n or PipesEnvContextInjector()\n )\n\n self.message_reader = (\n check.opt_inst_param(message_reader, "message_reader", PipesMessageReader)\n or PipesK8sPodLogsMessageReader()\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def run(\n self,\n *,\n context: OpExecutionContext,\n extras: Optional[PipesExtras] = None,\n image: Optional[str] = None,\n command: Optional[Union[str, Sequence[str]]] = None,\n namespace: Optional[str] = None,\n env: Optional[Mapping[str, str]] = None,\n base_pod_meta: Optional[Mapping[str, Any]] = None,\n base_pod_spec: Optional[Mapping[str, Any]] = None,\n ) -> PipesClientCompletedInvocation:\n """Publish a kubernetes pod and wait for it to complete, enriched with the pipes protocol.\n\n Args:\n image (Optional[str]):\n The image to set the first container in the pod spec to use.\n command (Optional[Union[str, Sequence[str]]]):\n The command to set the first container in the pod spec to use.\n namespace (Optional[str]):\n Which kubernetes namespace to use, defaults to "default"\n env (Optional[Mapping[str,str]]):\n A mapping of environment variable names to values to set on the first\n container in the pod spec, on top of those configured on resource.\n base_pod_meta (Optional[Mapping[str, Any]]:\n Raw k8s config for the k8s pod's metadata\n (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta)\n Keys can either snake_case or camelCase. The name value will be overridden.\n base_pod_spec (Optional[Mapping[str, Any]]:\n Raw k8s config for the k8s pod's pod spec\n (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec).\n Keys can either snake_case or camelCase.\n extras (Optional[PipesExtras]):\n Extra values to pass along as part of the ext protocol.\n context_injector (Optional[PipesContextInjector]):\n Override the default ext protocol context injection.\n message_reader (Optional[PipesMessageReader]):\n Override the default ext protocol message reader.\n\n Returns:\n PipesClientCompletedInvocation: Wrapper containing results reported by the external\n process.\n """\n client = DagsterKubernetesClient.production_client()\n\n with open_pipes_session(\n context=context,\n extras=extras,\n context_injector=self.context_injector,\n message_reader=self.message_reader,\n ) as pipes_session:\n namespace = namespace or "default"\n pod_name = get_pod_name(context.run_id, context.op.name)\n pod_body = build_pod_body(\n pod_name=pod_name,\n image=image,\n command=command,\n env_vars={\n **pipes_session.get_bootstrap_env_vars(),\n **(self.env or {}),\n **(env or {}),\n },\n base_pod_meta=base_pod_meta,\n base_pod_spec=base_pod_spec,\n )\n client.core_api.create_namespaced_pod(namespace, pod_body)\n try:\n # if were doing direct pod reading, wait for pod to start and then stream logs out\n if isinstance(self.message_reader, PipesK8sPodLogsMessageReader):\n client.wait_for_pod(\n pod_name,\n namespace,\n wait_for_state=WaitForPodState.Ready,\n )\n self.message_reader.consume_pod_logs(\n core_api=client.core_api,\n pod_name=pod_name,\n namespace=namespace,\n )\n else:\n # if were not doing direct log reading, just wait for pod to finish\n client.wait_for_pod(\n pod_name,\n namespace,\n wait_for_state=WaitForPodState.Terminated,\n )\n finally:\n client.core_api.delete_namespaced_pod(pod_name, namespace)\n return PipesClientCompletedInvocation(tuple(pipes_session.get_results()))\n\n\ndef build_pod_body(\n pod_name: str,\n image: Optional[str],\n command: Optional[Union[str, Sequence[str]]],\n env_vars: Mapping[str, str],\n base_pod_meta: Optional[Mapping[str, Any]],\n base_pod_spec: Optional[Mapping[str, Any]],\n):\n meta = {\n **(k8s_snake_case_dict(kubernetes.client.V1ObjectMeta, base_pod_meta or {})),\n "name": pod_name,\n }\n if "labels" in meta:\n meta["labels"] = {**get_common_labels(), **meta["labels"]}\n else:\n meta["labels"] = get_common_labels()\n\n spec = {**k8s_snake_case_dict(kubernetes.client.V1PodSpec, base_pod_spec or {})}\n if "containers" not in spec:\n spec["containers"] = [{}]\n\n if "restart_policy" not in spec:\n spec["restart_policy"] = "Never"\n elif spec["restart_policy"] == "Always":\n raise DagsterInvariantViolationError(\n "A restart policy of Always is not allowed, computations are expected to complete."\n )\n\n if "image" not in spec["containers"][0] and not image:\n raise DagsterInvariantViolationError(\n "Must specify image property or provide base_pod_spec with one set."\n )\n\n if "name" not in spec["containers"][0]:\n spec["containers"][0]["name"] = DEFAULT_CONTAINER_NAME\n\n if image:\n spec["containers"][0]["image"] = image\n\n if command:\n spec["containers"][0]["command"] = command\n\n if "env" not in spec["containers"][0]:\n spec["containers"][0]["env"] = []\n\n spec["containers"][0]["env"].extend({"name": k, "value": v} for k, v in env_vars.items())\n\n return k8s_model_from_dict(\n kubernetes.client.V1Pod,\n {\n "metadata": meta,\n "spec": spec,\n },\n )\n\n\nPipesK8sClient = ResourceParam[_PipesK8sClient]\n
", "current_page_name": "_modules/dagster_k8s/pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.pipes"}}, "dagster_mlflow": {"hooks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mlflow.hooks

\nfrom dagster._core.definitions.decorators.hook_decorator import event_list_hook\nfrom dagster._core.definitions.events import HookExecutionResult\nfrom mlflow.entities.run_status import RunStatus\n\n\ndef _create_mlflow_run_hook(name):\n    @event_list_hook(name=name, required_resource_keys={"mlflow"})\n    def _hook(context, event_list):\n        for event in event_list:\n            if event.is_step_success:\n                _cleanup_on_success(context)\n            elif event.is_step_failure:\n                mlf = context.resources.mlflow\n                mlf.end_run(status=RunStatus.to_string(RunStatus.FAILED))\n\n        return HookExecutionResult(hook_name=name, is_skipped=False)\n\n    return _hook\n\n\ndef _cleanup_on_success(context):\n    """Checks if the current solid in the context is the last solid in the job\n    and ends the mlflow run with a successful status when this is the case.\n    """\n    last_solid_name = context._step_execution_context.job_def.nodes_in_topological_order[  # noqa: SLF001  # fmt: skip\n        -1\n    ].name\n\n    if context.op.name == last_solid_name:\n        context.resources.mlflow.end_run()\n\n\nend_mlflow_on_run_finished = _create_mlflow_run_hook("end_mlflow_on_run_finished")\n
", "current_page_name": "_modules/dagster_mlflow/hooks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mlflow.hooks"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mlflow.resources

\n"""This module contains the mlflow resource provided by the MlFlow\nclass. This resource provides an easy way to configure mlflow for logging various\nthings from dagster runs.\n"""\nimport atexit\nimport sys\nfrom itertools import islice\nfrom os import environ\nfrom typing import Any, Optional\n\nimport mlflow\nfrom dagster import Field, Noneable, Permissive, StringSource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom mlflow.entities.run_status import RunStatus\n\nCONFIG_SCHEMA = {\n    "experiment_name": Field(StringSource, is_required=True, description="MlFlow experiment name."),\n    "mlflow_tracking_uri": Field(\n        Noneable(StringSource),\n        default_value=None,\n        is_required=False,\n        description="MlFlow tracking server uri.",\n    ),\n    "parent_run_id": Field(\n        Noneable(str),\n        default_value=None,\n        is_required=False,\n        description="Mlflow run ID of parent run if this is a nested run.",\n    ),\n    "env": Field(Permissive(), description="Environment variables for mlflow setup."),\n    "env_to_tag": Field(\n        Noneable(list),\n        default_value=None,\n        is_required=False,\n        description="List of environment variables to log as tags in mlflow.",\n    ),\n    "extra_tags": Field(Permissive(), description="Any extra key-value tags to log to mlflow."),\n}\n\n\nclass MlflowMeta(type):\n    """Mlflow Metaclass to create methods that "inherit" all of Mlflow's\n    methods. If the class has a method defined it is excluded from the\n    attribute setting from mlflow.\n    """\n\n    def __new__(cls, name, bases, attrs):\n        class_cls = super(MlflowMeta, cls).__new__(cls, name, bases, attrs)\n        for attr in (attr for attr in dir(mlflow) if attr not in dir(class_cls)):\n            mlflow_attribute = getattr(mlflow, attr)\n            if callable(mlflow_attribute):\n                setattr(class_cls, attr, staticmethod(mlflow_attribute))\n            else:\n                setattr(class_cls, attr, mlflow_attribute)\n        return class_cls\n\n\nclass MlFlow(metaclass=MlflowMeta):\n    """Class for setting up an mlflow resource for dagster runs.\n    This takes care of all the configuration required to use mlflow tracking and the complexities of\n    mlflow tracking dagster parallel runs.\n    """\n\n    def __init__(self, context):\n        # Context associated attributes\n        self.log = context.log\n        self.run_name = context.dagster_run.job_name\n        self.dagster_run_id = context.run_id\n\n        # resource config attributes\n        resource_config = context.resource_config\n        self.tracking_uri = resource_config.get("mlflow_tracking_uri")\n        if self.tracking_uri:\n            mlflow.set_tracking_uri(self.tracking_uri)\n        self.parent_run_id = resource_config.get("parent_run_id")\n        self.experiment_name = resource_config["experiment_name"]\n        self.env_tags_to_log = resource_config.get("env_to_tag") or []\n        self.extra_tags = resource_config.get("extra_tags")\n\n        # Update env variables if any are given\n        self.env_vars = resource_config.get("env", {})\n        if self.env_vars:\n            environ.update(self.env_vars)\n\n        # If the experiment exists then the set won't do anything\n        mlflow.set_experiment(self.experiment_name)\n        self.experiment = mlflow.get_experiment_by_name(self.experiment_name)\n\n        # Get the client object\n        self.tracking_client = mlflow.tracking.MlflowClient()\n\n        # Set up the active run and tags\n        self._setup()\n\n    def _setup(self):\n        """Sets the active run and tags. If an Mlflow run_id exists then the\n        active run is set to it. This way a single Dagster run outputs data\n        to the same Mlflow run, even when multiprocess executors are used.\n        """\n        # Get the run id\n        run_id = self._get_current_run_id()\n        self._set_active_run(run_id=run_id)\n        self._set_all_tags()\n\n        # hack needed to stop mlflow from marking run as finished when\n        # a process exits in parallel runs\n        atexit.unregister(mlflow.end_run)\n\n    def _get_current_run_id(\n        self, experiment: Optional[Any] = None, dagster_run_id: Optional[str] = None\n    ):\n        """Gets the run id of a specific dagster run and experiment id.\n        If it doesn't exist then it returns a None.\n\n        Args:\n            experiment (optional): Mlflow experiment.\n            When none is passed it fetches the experiment object set in\n            the constructor.  Defaults to None.\n            dagster_run_id (optional): The Dagster run id.\n            When none is passed it fetches the dagster_run_id object set in\n            the constructor.  Defaults to None.\n\n        Returns:\n            run_id (str or None): run_id if it is found else None\n        """\n        experiment = experiment or self.experiment\n        dagster_run_id = dagster_run_id or self.dagster_run_id\n        if experiment:\n            # Check if a run with this dagster run id has already been started\n            # in mlflow, will get an empty dataframe if not\n            current_run_df = mlflow.search_runs(\n                experiment_ids=[experiment.experiment_id],\n                filter_string=f"tags.dagster_run_id='{dagster_run_id}'",\n            )\n            if not current_run_df.empty:\n                return current_run_df.run_id.values[0]\n\n    def _set_active_run(self, run_id=None):\n        """This method sets the active run to be that of the specified\n        run_id. If None is passed then a new run is started. The new run also\n        takes care of nested runs.\n\n        Args:\n            run_id (str, optional): Mlflow run_id. Defaults to None.\n        """\n        nested_run = False\n        if self.parent_run_id is not None:\n            self._start_run(run_id=self.parent_run_id, run_name=self.run_name)\n            nested_run = True\n        self._start_run(run_id=run_id, run_name=self.run_name, nested=nested_run)\n\n    def _start_run(self, **kwargs):\n        """Catches the Mlflow exception if a run is already active."""\n        try:\n            run = mlflow.start_run(**kwargs)\n            self.log.info(\n                f"Starting a new mlflow run with id {run.info.run_id} "\n                f"in experiment {self.experiment_name}"\n            )\n        except Exception as ex:\n            run = mlflow.active_run()\n            if "is already active" not in str(ex):\n                raise (ex)\n            self.log.info(f"Run with id {run.info.run_id} is already active.")\n\n    def _set_all_tags(self):\n        """Method collects dagster_run_id plus all env variables/tags that have been\n            specified by the user in the config_schema and logs them as tags in mlflow.\n\n        Returns:\n            tags [dict]: Dictionary of all the tags\n        """\n        tags = {tag: environ.get(tag) for tag in self.env_tags_to_log}\n        tags["dagster_run_id"] = self.dagster_run_id\n        if self.extra_tags:\n            tags.update(self.extra_tags)\n\n        mlflow.set_tags(tags)\n\n    def cleanup_on_error(self):\n        """Method ends mlflow run with correct exit status for failed runs. Note that\n        this method does not work when a job running in the webserver fails, it seems\n        that in this case a different process runs the job and when it fails\n        the stack trace is therefore not available. For this case we can use the\n        cleanup_on_failure hook defined below.\n        """\n        any_error = sys.exc_info()\n\n        if any_error[1]:\n            if isinstance(any_error[1], KeyboardInterrupt):\n                mlflow.end_run(status=RunStatus.to_string(RunStatus.KILLED))\n            else:\n                mlflow.end_run(status=RunStatus.to_string(RunStatus.FAILED))\n\n    @staticmethod\n    def log_params(params: dict):\n        """Overload of the mlflow.log_params. If len(params) >100 then\n        params is sent to mlflow in chunks.\n\n        Args:\n            params (dict): Parameters to be logged\n        """\n        for param_chunk in MlFlow.chunks(params, 100):\n            mlflow.log_params(param_chunk)\n\n    @staticmethod\n    def chunks(params: dict, size: int = 100):\n        """Method that chunks a dictionary into batches of size.\n\n        Args:\n            params (dict): Dictionary set to be batched\n            size (int, optional): Number of batches. Defaults to 100.\n\n        Yields:\n            (dict): Batch of dictionary\n        """\n        it = iter(params)\n        for _ in range(0, len(params), size):\n            yield {k: params[k] for k in islice(it, size)}\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=CONFIG_SCHEMA)\ndef mlflow_tracking(context):\n """This resource initializes an MLflow run that's used for all steps within a Dagster run.\n\n This resource provides access to all of mlflow's methods as well as the mlflow tracking client's\n methods.\n\n Usage:\n\n 1. Add the mlflow resource to any ops in which you want to invoke mlflow tracking APIs.\n 2. Add the `end_mlflow_on_run_finished` hook to your job to end the MLflow run\n when the Dagster run is finished.\n\n Examples:\n .. code-block:: python\n\n from dagster_mlflow import end_mlflow_on_run_finished, mlflow_tracking\n\n @op(required_resource_keys={"mlflow"})\n def mlflow_op(context):\n mlflow.log_params(some_params)\n mlflow.tracking.MlflowClient().create_registered_model(some_model_name)\n\n @end_mlflow_on_run_finished\n @job(resource_defs={"mlflow": mlflow_tracking})\n def mlf_example():\n mlflow_op()\n\n # example using an mlflow instance with s3 storage\n mlf_example.execute_in_process(run_config={\n "resources": {\n "mlflow": {\n "config": {\n "experiment_name": my_experiment,\n "mlflow_tracking_uri": "http://localhost:5000",\n\n # if want to run a nested run, provide parent_run_id\n "parent_run_id": an_existing_mlflow_run_id,\n\n # env variables to pass to mlflow\n "env": {\n "MLFLOW_S3_ENDPOINT_URL": my_s3_endpoint,\n "AWS_ACCESS_KEY_ID": my_aws_key_id,\n "AWS_SECRET_ACCESS_KEY": my_secret,\n },\n\n # env variables you want to log as mlflow tags\n "env_to_tag": ["DOCKER_IMAGE_TAG"],\n\n # key-value tags to add to your experiment\n "extra_tags": {"super": "experiment"},\n }\n }\n }\n })\n """\n mlf = MlFlow(context)\n yield mlf\n mlf.cleanup_on_error()
\n
", "current_page_name": "_modules/dagster_mlflow/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mlflow.resources"}}, "dagster_msteams": {"hooks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_msteams.hooks

\nfrom typing import Callable, Optional\n\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions import failure_hook, success_hook\nfrom dagster._core.execution.context.hook import HookContext\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom dagster_msteams.card import Card\n\n\ndef _default_status_message(context: HookContext, status: str) -> str:\n    return f"Op {context.op.name} on job {context.job_name} {status}!\\nRun ID: {context.run_id}"\n\n\ndef _default_failure_message(context: HookContext) -> str:\n    return _default_status_message(context, status="failed")\n\n\ndef _default_success_message(context: HookContext) -> str:\n    return _default_status_message(context, status="succeeded")\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef teams_on_failure(\n message_fn: Callable[[HookContext], str] = _default_failure_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step failure events that will message the given MS Teams webhook URL.\n\n Args:\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the\n HookContext outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this\n to allow messages to include deeplinks to the specific run that triggered\n the hook.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this\n to allow messages to include deeplinks to the specific run that triggered\n the hook.\n\n Examples:\n .. code-block:: python\n\n @teams_on_failure(webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op.name} failed!"\n\n @op\n def a_op(context):\n pass\n\n @job(...)\n def my_job():\n a_op.with_hooks(hook_defs={teams_on_failure("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @failure_hook(required_resource_keys={"msteams"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"<a href='{webserver_base_url}/runs/{context.run_id}'>View in Dagster UI</a>"\n card = Card()\n card.add_attachment(text_message=text)\n context.resources.msteams.post_message(payload=card.payload)\n\n return _hook
\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef teams_on_success(\n message_fn: Callable[[HookContext], str] = _default_success_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step success events that will message the given MS Teams webhook URL.\n\n Args:\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the\n HookContext outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this\n to allow messages to include deeplinks to the specific run that triggered\n the hook.\n\n Examples:\n .. code-block:: python\n\n @teams_on_success(webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op.name} failed!"\n\n @op\n def a_op(context):\n pass\n\n @job(...)\n def my_job():\n a_op.with_hooks(hook_defs={teams_on_success("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @success_hook(required_resource_keys={"msteams"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"<a href='{webserver_base_url}/runs/{context.run_id}'>View in webserver</a>"\n card = Card()\n card.add_attachment(text_message=text)\n context.resources.msteams.post_message(payload=card.payload)\n\n return _hook
\n
", "current_page_name": "_modules/dagster_msteams/hooks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_msteams.hooks"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_msteams.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\nfrom dagster_msteams.client import TeamsClient\n\n\n
[docs]class MSTeamsResource(ConfigurableResource):\n """This resource is for connecting to Microsoft Teams.\n\n Provides a `dagster_msteams.TeamsClient` which can be used to\n interface with the MS Teams API.\n\n By configuring this resource, you can post messages to MS Teams from any Dagster op,\n asset, schedule, or sensor:\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import op, job, Definitions, EnvVar\n from dagster_msteams import Card, MSTeamsResource\n\n\n @op\n def teams_op(msteams: MSTeamsResource):\n card = Card()\n card.add_attachment(text_message="Hello There !!")\n msteams.get_client().post_message(payload=card.payload)\n\n\n @job\n def teams_job():\n teams_op()\n\n defs = Definitions(\n jobs=[teams_job],\n resources={\n "msteams": MSTeamsResource(\n hook_url=EnvVar("TEAMS_WEBHOOK_URL")\n )\n }\n )\n """\n\n hook_url: str = Field(\n default=None,\n description=(\n "To send messages to MS Teams channel, an incoming webhook has to be created. The"\n " incoming webhook url must be given as a part of the resource config to the"\n " MSTeamsResource in Dagster. For more information on how to create an incoming"\n " webhook, see"\n " https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook"\n ),\n )\n http_proxy: str = Field(default=None, description="HTTP proxy URL")\n https_proxy: str = Field(default=None, description="HTTPS proxy URL")\n timeout: float = Field(default=60, description="Timeout for requests to MS Teams")\n verify: bool = Field(\n default=True, description="Whether to verify SSL certificates, defaults to True"\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> TeamsClient:\n return TeamsClient(\n hook_url=self.hook_url,\n http_proxy=self.http_proxy,\n https_proxy=self.https_proxy,\n timeout=self.timeout,\n verify=self.verify,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=MSTeamsResource.to_config_schema(),\n description="This resource is for connecting to MS Teams",\n)\ndef msteams_resource(context) -> TeamsClient:\n """This resource is for connecting to Microsoft Teams.\n\n The resource object is a `dagster_msteams.TeamsClient`.\n\n By configuring this resource, you can post messages to MS Teams from any Dagster solid:\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import op, job\n from dagster_msteams import Card, msteams_resource\n\n\n @op(required_resource_keys={"msteams"})\n def teams_op(context):\n card = Card()\n card.add_attachment(text_message="Hello There !!")\n context.resources.msteams.post_message(payload=card.payload)\n\n\n @job(resource_defs={"msteams": msteams_resource})\n def teams_job():\n teams_op()\n\n\n teams_job.execute_in_process(\n {"resources": {"msteams": {"config": {"hook_url": os.getenv("TEAMS_WEBHOOK_URL")}}}}\n )\n """\n return MSTeamsResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_msteams/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_msteams.resources"}, "sensors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_msteams.sensors

\nfrom typing import TYPE_CHECKING, Callable, Optional, Sequence, Union\n\nfrom dagster import DefaultSensorStatus\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions import GraphDefinition, JobDefinition\nfrom dagster._core.definitions.run_status_sensor_definition import (\n    RunFailureSensorContext,\n    run_failure_sensor,\n)\nfrom dagster._core.definitions.unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom dagster_msteams.card import Card\nfrom dagster_msteams.client import TeamsClient\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.selector import JobSelector, RepositorySelector\n\n\ndef _default_failure_message(context: RunFailureSensorContext) -> str:\n    return "\\n".join(\n        [\n            f"Job {context.dagster_run.job_name} failed!",\n            f"Run ID: {context.dagster_run.run_id}",\n            f"Error: {context.failure_event.message}",\n        ]\n    )\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef make_teams_on_run_failure_sensor(\n hook_url: str,\n message_fn: Callable[[RunFailureSensorContext], str] = _default_failure_message,\n http_proxy: Optional[str] = None,\n https_proxy: Optional[str] = None,\n timeout: Optional[float] = 60,\n verify: Optional[bool] = None,\n name: Optional[str] = None,\n dagit_base_url: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n webserver_base_url: Optional[str] = None,\n):\n """Create a sensor on run failures that will message the given MS Teams webhook URL.\n\n Args:\n hook_url (str): MS Teams incoming webhook URL.\n message_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` and outputs the message you want to send.\n Defaults to a text message that contains error message, job name, and run ID.\n http_proxy : (Optional[str]): Proxy for requests using http protocol.\n https_proxy : (Optional[str]): Proxy for requests using https protocol.\n timeout: (Optional[float]): Connection timeout in seconds. Defaults to 60.\n verify: (Optional[bool]): Whether to verify the servers TLS certificate.\n name: (Optional[str]): The name of the sensor. Defaults to "teams_on_run_failure".\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the failed run.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from Dagit or via the GraphQL API.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, RepositorySelector, JobSelector]]]):\n Jobs in the current repository that will be monitored by this sensor. Defaults to None,\n which means the alert will be sent when any job in the repository matches the requested\n run_status. To monitor jobs in external repositories, use RepositorySelector and JobSelector.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the failed run.\n\n Examples:\n .. code-block:: python\n\n teams_on_run_failure = make_teams_on_run_failure_sensor(\n hook_url=os.getenv("TEAMS_WEBHOOK_URL")\n )\n\n @repository\n def my_repo():\n return [my_job + teams_on_run_failure]\n\n .. code-block:: python\n\n def my_message_fn(context: RunFailureSensorContext) -> str:\n return "Job {job_name} failed! Error: {error}".format(\n job_name=context.dagster_run.job_name,\n error=context.failure_event.message,\n )\n\n teams_on_run_failure = make_teams_on_run_failure_sensor(\n hook_url=os.getenv("TEAMS_WEBHOOK_URL"),\n message_fn=my_message_fn,\n webserver_base_url="http://localhost:3000",\n )\n\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n teams_client = TeamsClient(\n hook_url=hook_url,\n http_proxy=http_proxy,\n https_proxy=https_proxy,\n timeout=timeout,\n verify=verify,\n )\n\n @run_failure_sensor(\n name=name,\n default_status=default_status,\n monitored_jobs=monitored_jobs,\n monitor_all_repositories=monitor_all_repositories,\n )\n def teams_on_run_failure(context: RunFailureSensorContext):\n text = message_fn(context)\n if webserver_base_url:\n text += "<a href='{base_url}/runs/{run_id}'>View in Dagit</a>".format(\n base_url=webserver_base_url,\n run_id=context.dagster_run.run_id,\n )\n card = Card()\n card.add_attachment(text_message=text)\n teams_client.post_message(payload=card.payload)\n\n return teams_on_run_failure
\n
", "current_page_name": "_modules/dagster_msteams/sensors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_msteams.sensors"}}, "dagster_mysql": {"event_log": {"event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mysql.event_log.event_log

\nfrom typing import ContextManager, Optional, cast\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.exc as db_exc\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.event_api import EventHandlerFn\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.config import MySqlStorageConfig, mysql_config\nfrom dagster._core.storage.event_log import (\n    AssetKeyTable,\n    SqlEventLogStorage,\n    SqlEventLogStorageMetadata,\n    SqlPollingEventWatcher,\n)\nfrom dagster._core.storage.event_log.base import EventLogCursor\nfrom dagster._core.storage.event_log.migration import ASSET_KEY_INDEX_COLS\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_mysql_connection,\n    mysql_alembic_config,\n    mysql_isolation_level,\n    mysql_url_from_config,\n    parse_mysql_version,\n    retry_mysql_connection_fn,\n    retry_mysql_creation_fn,\n)\n\nMINIMUM_MYSQL_INTERSECT_VERSION = "8.0.31"\n\n\n
[docs]class MySQLEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """MySQL-backed event log storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml\n :caption: dagster.yaml\n :start-after: start_marker_event_log\n :end-before: end_marker_event_log\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n\n """\n\n def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.mysql_url = check.str_param(mysql_url, "mysql_url")\n self._disposed = False\n\n self._event_watcher = SqlPollingEventWatcher(self)\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n poolclass=db_pool.NullPool,\n )\n self._secondary_index_cache = {}\n\n table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if "event_logs" not in table_names:\n retry_mysql_creation_fn(self._init_db)\n # mark all secondary indexes to be used\n self.reindex_events()\n self.reindex_assets()\n\n self._mysql_version = self.get_server_version()\n super().__init__()\n\n def _init_db(self) -> None:\n with self._connect() as conn:\n SqlEventLogStorageMetadata.create_all(conn)\n stamp_alembic_rev(mysql_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection\n # https://github.com/dagster-io/dagster/issues/3719\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n pool_size=1,\n pool_recycle=pool_recycle,\n )\n\n def upgrade(self) -> None:\n alembic_config = mysql_alembic_config(__file__)\n with self._connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return mysql_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig\n ) -> "MySQLEventLogStorage":\n return MySQLEventLogStorage(\n inst_data=inst_data, mysql_url=mysql_url_from_config(config_value)\n )\n\n @staticmethod\n def wipe_storage(mysql_url: str) -> None:\n engine = create_engine(\n mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool\n )\n try:\n SqlEventLogStorageMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n @staticmethod\n def create_clean_storage(conn_string: str) -> "MySQLEventLogStorage":\n MySQLEventLogStorage.wipe_storage(conn_string)\n return MySQLEventLogStorage(conn_string)\n\n def get_server_version(self) -> Optional[str]:\n with self.index_connection() as conn:\n row = conn.execute(db.text("select version()")).fetchone()\n\n if not row:\n return None\n\n return cast(str, row[0])\n\n def store_asset_event(self, event: EventLogEntry, event_id: int) -> None:\n # last_materialization_timestamp is updated upon observation, materialization, materialization_planned\n # See SqlEventLogStorage.store_asset_event method for more details\n\n values = self._get_asset_entry_values(\n event, event_id, self.has_secondary_index(ASSET_KEY_INDEX_COLS)\n )\n with self.index_connection() as conn:\n if values:\n conn.execute(\n db_dialects.mysql.insert(AssetKeyTable)\n .values(\n asset_key=event.dagster_event.asset_key.to_string(), # type: ignore # (possible none)\n **values,\n )\n .on_duplicate_key_update(\n **values,\n )\n )\n else:\n try:\n conn.execute(\n db_dialects.mysql.insert(AssetKeyTable).values(\n asset_key=event.dagster_event.asset_key.to_string(), # type: ignore # (possible none)\n )\n )\n except db_exc.IntegrityError:\n pass\n\n def _connect(self) -> ContextManager[Connection]:\n return create_mysql_connection(self._engine, __file__, "event log")\n\n def run_connection(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return self._connect()\n\n def index_connection(self) -> ContextManager[Connection]:\n return self._connect()\n\n def has_table(self, table_name: str) -> bool:\n with self._connect() as conn:\n return table_name in db.inspect(conn).get_table_names()\n\n def has_secondary_index(self, name: str) -> bool:\n if name not in self._secondary_index_cache:\n self._secondary_index_cache[name] = super(\n MySQLEventLogStorage, self\n ).has_secondary_index(name)\n return self._secondary_index_cache[name]\n\n def enable_secondary_index(self, name: str) -> None:\n super(MySQLEventLogStorage, self).enable_secondary_index(name)\n if name in self._secondary_index_cache:\n del self._secondary_index_cache[name]\n\n def watch(self, run_id: str, cursor: Optional[str], callback: EventHandlerFn) -> None:\n if cursor and EventLogCursor.parse(cursor).is_offset_cursor():\n check.failed("Cannot call `watch` with an offset cursor")\n self._event_watcher.watch_run(run_id, cursor, callback)\n\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n self._event_watcher.unwatch_run(run_id, handler)\n\n @property\n def supports_intersect(self) -> bool:\n return parse_mysql_version(self._mysql_version) >= parse_mysql_version( # type: ignore # (possible none)\n MINIMUM_MYSQL_INTERSECT_VERSION\n )\n\n @property\n def event_watcher(self) -> SqlPollingEventWatcher:\n return self._event_watcher\n\n def __del__(self) -> None:\n self.dispose()\n\n def dispose(self) -> None:\n if not self._disposed:\n self._disposed = True\n self._event_watcher.close()\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = mysql_alembic_config(__file__)\n with self._connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_mysql/event_log/event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mysql.event_log.event_log"}}, "run_storage": {"run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mysql.run_storage.run_storage

\nfrom typing import ContextManager, Mapping, Optional, cast\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.config import MySqlStorageConfig, mysql_config\nfrom dagster._core.storage.runs import (\n    DaemonHeartbeatsTable,\n    InstanceInfo,\n    RunStorageSqlMetadata,\n    SqlRunStorage,\n)\nfrom dagster._core.storage.runs.schema import KeyValueStoreTable\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom dagster._utils import utc_datetime_from_timestamp\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_mysql_connection,\n    mysql_alembic_config,\n    mysql_isolation_level,\n    mysql_url_from_config,\n    parse_mysql_version,\n    retry_mysql_connection_fn,\n    retry_mysql_creation_fn,\n)\n\nMINIMUM_MYSQL_BUCKET_VERSION = "8.0.0"\nMINIMUM_MYSQL_INTERSECT_VERSION = "8.0.31"\n\n\n
[docs]class MySQLRunStorage(SqlRunStorage, ConfigurableClass):\n """MySQL-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml\n :caption: dagster.yaml\n :start-after: start_marker_runs\n :end-before: end_marker_runs\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.mysql_url = mysql_url\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n poolclass=db_pool.NullPool,\n )\n\n self._index_migration_cache = {}\n table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if "runs" not in table_names:\n retry_mysql_creation_fn(self._init_db)\n self.migrate()\n self.optimize()\n\n elif "instance_info" not in table_names:\n InstanceInfo.create(self._engine)\n\n self._mysql_version = self.get_server_version()\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n RunStorageSqlMetadata.create_all(conn)\n stamp_alembic_rev(mysql_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold 1 open connection\n # https://github.com/dagster-io/dagster/issues/3719\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n pool_size=1,\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return mysql_config()\n\n def get_server_version(self) -> Optional[str]:\n with self.connect() as conn:\n row = conn.execute(db.text("select version()")).fetchone()\n\n if not row:\n return None\n\n return cast(str, row[0])\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig\n ) -> "MySQLRunStorage":\n return MySQLRunStorage(inst_data=inst_data, mysql_url=mysql_url_from_config(config_value))\n\n @staticmethod\n def wipe_storage(mysql_url: str) -> None:\n engine = create_engine(\n mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool\n )\n try:\n RunStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n @staticmethod\n def create_clean_storage(mysql_url: str) -> "MySQLRunStorage":\n MySQLRunStorage.wipe_storage(mysql_url)\n return MySQLRunStorage(mysql_url)\n\n def connect(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return create_mysql_connection(self._engine, __file__, "run")\n\n def upgrade(self) -> None:\n alembic_config = mysql_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def has_built_index(self, migration_name: str) -> None:\n if migration_name not in self._index_migration_cache:\n self._index_migration_cache[migration_name] = super(\n MySQLRunStorage, self\n ).has_built_index(migration_name)\n return self._index_migration_cache[migration_name]\n\n def mark_index_built(self, migration_name: str) -> None:\n super(MySQLRunStorage, self).mark_index_built(migration_name)\n if migration_name in self._index_migration_cache:\n del self._index_migration_cache[migration_name]\n\n @property\n def supports_intersect(self) -> bool:\n return parse_mysql_version(self._mysql_version) >= parse_mysql_version( # type: ignore\n MINIMUM_MYSQL_INTERSECT_VERSION\n )\n\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n with self.connect() as conn:\n conn.execute(\n db_dialects.mysql.insert(DaemonHeartbeatsTable)\n .values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_type=daemon_heartbeat.daemon_type,\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n .on_duplicate_key_update(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n )\n\n def set_cursor_values(self, pairs: Mapping[str, str]) -> None:\n check.mapping_param(pairs, "pairs", key_type=str, value_type=str)\n db_values = [{"key": k, "value": v} for k, v in pairs.items()]\n\n with self.connect() as conn:\n insert_stmt = db_dialects.mysql.insert(KeyValueStoreTable).values(db_values)\n conn.execute(\n insert_stmt.on_duplicate_key_update(\n value=insert_stmt.inserted.value,\n )\n )\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = mysql_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_mysql/run_storage/run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mysql.run_storage.run_storage"}}, "schedule_storage": {"schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mysql.schedule_storage.schedule_storage

\nfrom typing import ContextManager, Optional, cast\n\nimport dagster._check as check\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.config import MySqlStorageConfig, mysql_config\nfrom dagster._core.storage.schedules import ScheduleStorageSqlMetadata, SqlScheduleStorage\nfrom dagster._core.storage.schedules.schema import InstigatorsTable\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_mysql_connection,\n    mysql_alembic_config,\n    mysql_isolation_level,\n    mysql_url_from_config,\n    parse_mysql_version,\n    retry_mysql_connection_fn,\n    retry_mysql_creation_fn,\n)\n\nMINIMUM_MYSQL_BATCH_VERSION = "8.0.0"\n\n\n
[docs]class MySQLScheduleStorage(SqlScheduleStorage, ConfigurableClass):\n """MySQL-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml\n :caption: dagster.yaml\n :start-after: start_marker_schedules\n :end-before: end_marker_schedules\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.mysql_url = mysql_url\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n poolclass=db_pool.NullPool,\n )\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)\n if "jobs" not in table_names:\n retry_mysql_creation_fn(self._init_db)\n\n self._mysql_version = self.get_server_version()\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n ScheduleStorageSqlMetadata.create_all(conn)\n stamp_alembic_rev(mysql_alembic_config(__file__), conn)\n\n # mark all the data migrations as applied\n self.migrate()\n self.optimize()\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection\n # https://github.com/dagster-io/dagster/issues/3719\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n pool_size=1,\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return mysql_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig\n ) -> "MySQLScheduleStorage":\n return MySQLScheduleStorage(\n inst_data=inst_data, mysql_url=mysql_url_from_config(config_value)\n )\n\n @staticmethod\n def wipe_storage(mysql_url: str) -> None:\n engine = create_engine(\n mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool\n )\n try:\n ScheduleStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n @staticmethod\n def create_clean_storage(mysql_url: str) -> "MySQLScheduleStorage":\n MySQLScheduleStorage.wipe_storage(mysql_url)\n return MySQLScheduleStorage(mysql_url)\n\n def connect(self) -> ContextManager[Connection]:\n return create_mysql_connection(self._engine, __file__, "schedule")\n\n @property\n def supports_batch_queries(self) -> bool:\n if not self._mysql_version:\n return False\n\n return parse_mysql_version(self._mysql_version) >= parse_mysql_version(\n MINIMUM_MYSQL_BATCH_VERSION\n )\n\n def get_server_version(self) -> Optional[str]:\n with self.connect() as conn:\n row = conn.execute(db.text("select version()")).fetchone()\n\n if not row:\n return None\n\n return cast(str, row[0])\n\n def upgrade(self) -> None:\n with self.connect() as conn:\n alembic_config = mysql_alembic_config(__file__)\n run_alembic_upgrade(alembic_config, conn)\n\n def _add_or_update_instigators_table(self, conn: Connection, state) -> None:\n selector_id = state.selector_id\n conn.execute(\n db_dialects.mysql.insert(InstigatorsTable)\n .values(\n selector_id=selector_id,\n repository_selector_id=state.repository_selector_id,\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n )\n .on_duplicate_key_update(\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n update_timestamp=pendulum.now("UTC"),\n )\n )\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = mysql_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_mysql/schedule_storage/schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mysql.schedule_storage.schedule_storage"}}}, "dagster_pagerduty": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pagerduty.resources

\nfrom typing import Dict, Optional, cast\n\nimport pypd\nfrom dagster import ConfigurableResource, resource\nfrom dagster._config.pythonic_config import infer_schema_from_config_class\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.warnings import suppress_dagster_warnings\nfrom pydantic import Field as PyField\n\n\n
[docs]class PagerDutyService(ConfigurableResource):\n """This resource is for posting events to PagerDuty."""\n\n """Integrates with PagerDuty via the pypd library.\n\n See:\n https://v2.developer.pagerduty.com/docs/events-api-v2\n https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2\n https://support.pagerduty.com/docs/services-and-integrations#section-events-api-v2\n https://github.com/PagerDuty/pagerduty-api-python-client\n\n for documentation and more information.\n """\n\n routing_key: str = PyField(\n ...,\n description=(\n "The routing key provisions access to your PagerDuty service. You"\n "will need to include the integration key for your new integration, as a"\n "routing_key in the event payload."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def EventV2_create(\n self,\n summary: str,\n source: str,\n severity: str,\n event_action: str = "trigger",\n dedup_key: Optional[str] = None,\n timestamp: Optional[str] = None,\n component: Optional[str] = None,\n group: Optional[str] = None,\n event_class: Optional[str] = None,\n custom_details: Optional[object] = None,\n ) -> object:\n """Events API v2 enables you to add PagerDuty's advanced event and incident management\n functionality to any system that can make an outbound HTTP connection.\n\n Args:\n summary (str):\n A high-level, text summary message of the event. Will be used to construct an\n alert's description. Example:\n\n "PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host\n 'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN"\n\n source (str):\n Specific human-readable unique identifier, such as a hostname, for the system having\n the problem. Examples:\n\n "prod05.theseus.acme-widgets.com"\n "171.26.23.22"\n "aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003"\n "9c09acd49a25"\n\n severity (str):\n How impacted the affected system is. Displayed to users in lists and influences the\n priority of any created incidents. Must be one of {info, warning, error, critical}\n\n Keyword Args:\n event_action (str):\n There are three types of events that PagerDuty recognizes, and are used to represent\n different types of activity in your monitored systems. (default: 'trigger')\n\n * trigger: When PagerDuty receives a trigger event, it will either open a new alert,\n or add a new trigger log entry to an existing alert, depending on the\n provided dedup_key. Your monitoring tools should send PagerDuty a trigger\n when a new problem has been detected. You may send additional triggers\n when a previously detected problem has occurred again.\n\n * acknowledge: acknowledge events cause the referenced incident to enter the\n acknowledged state. While an incident is acknowledged, it won't\n generate any additional notifications, even if it receives new\n trigger events. Your monitoring tools should send PagerDuty an\n acknowledge event when they know someone is presently working on the\n problem.\n\n * resolve: resolve events cause the referenced incident to enter the resolved state.\n Once an incident is resolved, it won't generate any additional\n notifications. New trigger events with the same dedup_key as a resolved\n incident won't re-open the incident. Instead, a new incident will be\n created. Your monitoring tools should send PagerDuty a resolve event when\n the problem that caused the initial trigger event has been fixed.\n\n dedup_key (str):\n Deduplication key for correlating triggers and resolves. The maximum permitted\n length of this property is 255 characters.\n\n timestamp (str):\n Timestamp (ISO 8601). When the upstream system detected / created the event. This is\n useful if a system batches or holds events before sending them to PagerDuty. This\n will be auto-generated by PagerDuty if not provided. Example:\n\n 2015-07-17T08:42:58.315+0000\n\n component (str):\n The part or component of the affected system that is broken. Examples:\n\n "keepalive"\n "webping"\n "mysql"\n "wqueue"\n\n group (str):\n A cluster or grouping of sources. For example, sources "prod-datapipe-02" and\n "prod-datapipe-03" might both be part of "prod-datapipe". Examples:\n\n "prod-datapipe"\n "www"\n "web_stack"\n\n event_class (str):\n The class/type of the event. Examples:\n\n "High CPU"\n "Latency"\n "500 Error"\n\n custom_details (Dict[str, str]):\n Additional details about the event and affected system. Example:\n\n {"ping time": "1500ms", "load avg": 0.75 }\n """\n data = {\n "routing_key": self.routing_key,\n "event_action": event_action,\n "payload": {"summary": summary, "source": source, "severity": severity},\n }\n\n if dedup_key is not None:\n data["dedup_key"] = dedup_key\n\n payload: Dict[str, object] = cast(Dict[str, object], data["payload"])\n\n if timestamp is not None:\n payload["timestamp"] = timestamp\n\n if component is not None:\n payload["component"] = component\n\n if group is not None:\n payload["group"] = group\n\n if event_class is not None:\n payload["class"] = event_class\n\n if custom_details is not None:\n payload["custom_details"] = custom_details\n\n return pypd.EventV2.create(data=data)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=infer_schema_from_config_class(PagerDutyService),\n description="""This resource is for posting events to PagerDuty.""",\n)\n@suppress_dagster_warnings\ndef pagerduty_resource(context) -> PagerDutyService:\n """A resource for posting events (alerts) to PagerDuty.\n\n Example:\n .. code-block:: python\n\n @op\n def pagerduty_op(pagerduty: PagerDutyService):\n pagerduty.EventV2_create(\n summary='alert from dagster'\n source='localhost',\n severity='error',\n event_action='trigger',\n )\n\n @job(resource_defs={ 'pagerduty': pagerduty_resource })\n def pagerduty_test():\n pagerduty_op()\n\n pagerduty_test.execute_in_process(\n run_config={\n "resources": {\n 'pagerduty': {'config': {'routing_key': '0123456789abcdef0123456789abcdef'}}\n }\n }\n )\n """\n return PagerDutyService(**context.resource_config)
\n
", "current_page_name": "_modules/dagster_pagerduty/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pagerduty.resources"}}, "dagster_pandas": {"constraints": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandas.constraints

\nimport sys\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom functools import wraps\n\nimport pandas as pd\nfrom dagster import (\n    DagsterType,\n    TypeCheck,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom pandas import DataFrame\nfrom typing_extensions import Final\n\nCONSTRAINT_METADATA_KEY: Final = "constraint_metadata"\n\n\nclass ConstraintViolationException(Exception):\n    """Indicates that a constraint has been violated."""\n\n\nclass ConstraintWithMetadataException(Exception):\n    """This class defines the response generated when a pandas DF fails validation -- it can be used to generate either a\n    failed typecheck or an exception.\n\n    Args:\n        constraint_name (str):  the name of the violated constraint\n        constraint_description (Optional[str]): the description of the violated constraint\n        expectation (Optional[Union[dict,list, str, set]]): what result was expected -- typically a jsonlike, though it can be a string\n        offending (Optional[Union[dict,list, str, set]]):  which pieces of the dataframe violated the expectation, typically list or string\n        actual (Optional[Union[dict,list, str, set]]): what those pieces of the dataframe actually were -- typically a jsonlike\n    """\n\n    def __init__(\n        self,\n        constraint_name,\n        constraint_description="",\n        expectation=None,\n        offending=None,\n        actual=None,\n    ):\n        self.constraint_name = constraint_name\n        self.constraint_description = constraint_description\n        self.expectation = check.opt_inst_param(expectation, "expectation", (dict, list, str, set))\n        self.offending = check.opt_inst_param(offending, "offending", (dict, list, str, set))\n        self.actual = check.opt_inst_param(actual, "actual", (dict, list, str, set))\n        super(ConstraintWithMetadataException, self).__init__(\n            "Violated {} - {}, {} was/were expected, but we received {} which was/were {}".format(\n                constraint_name,\n                constraint_description,\n                expectation,\n                offending,\n                actual,\n            )\n        )\n\n    def normalize_metadata_json_value(self, val):\n        if isinstance(val, set):\n            return list(val)\n        else:\n            return val\n\n    def convert_to_metadata(self):\n        return {\n            CONSTRAINT_METADATA_KEY: {\n                "constraint_name": self.constraint_name,\n                "constraint_description": self.constraint_description,\n                "expected": self.normalize_metadata_json_value(self.expectation),\n                "offending": self.normalize_metadata_json_value(self.offending),\n                "actual": self.normalize_metadata_json_value(self.actual),\n            },\n        }\n\n    def return_as_typecheck(self):\n        return TypeCheck(\n            success=False, description=self.args[0], metadata=self.convert_to_metadata()\n        )\n\n\nclass DataFrameConstraintViolationException(ConstraintViolationException):\n    """Indicates a dataframe level constraint has been violated."""\n\n    def __init__(self, constraint_name, constraint_description):\n        super(DataFrameConstraintViolationException, self).__init__(\n            f"Violated {constraint_name} - {constraint_description}"\n        )\n\n\nclass DataFrameWithMetadataException(ConstraintWithMetadataException):\n    def __init__(self, constraint_name, constraint_description, expectation, actual):\n        super(DataFrameWithMetadataException, self).__init__(\n            constraint_name, constraint_description, expectation, "a malformed dataframe", actual\n        )\n\n\nclass ColumnConstraintViolationException(ConstraintViolationException):\n    """Indicates that a column constraint has been violated."""\n\n    def __init__(self, constraint_name, constraint_description, column_name, offending_rows=None):\n        self.constraint_name = constraint_name\n        self.constraint_description = constraint_description\n        self.column_name = column_name\n        self.offending_rows = offending_rows\n        super(ColumnConstraintViolationException, self).__init__(self.construct_message())\n\n    def construct_message(self):\n        base_message = (\n            'Violated "{constraint_name}" for column "{column_name}" - {constraint_description}'\n            .format(\n                constraint_name=self.constraint_name,\n                constraint_description=self.constraint_description,\n                column_name=self.column_name,\n            )\n        )\n        if self.offending_rows is not None:\n            base_message += "The offending (index, row values) are the following: {}".format(\n                self.offending_rows\n            )\n        return base_message\n\n\nclass ColumnWithMetadataException(ConstraintWithMetadataException):\n    def __init__(self, constraint_name, constraint_description, expectation, offending, actual):\n        super(ColumnWithMetadataException, self).__init__(\n            "the column constraint " + constraint_name,\n            constraint_description,\n            expectation,\n            offending,\n            actual,\n        )\n\n\nclass Constraint:\n    """Base constraint object that all constraints inherit from.\n\n    Args:\n        error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.\n        markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.\n    """\n\n    def __init__(self, error_description=None, markdown_description=None):\n        self.name = self.__class__.__name__\n        self.markdown_description = check.str_param(markdown_description, "markdown_description")\n        self.error_description = check.str_param(error_description, "error_description")\n\n\n@experimental\nclass ConstraintWithMetadata:\n    """This class defines a base constraint over pandas DFs with organized metadata.\n\n    Args:\n        description (str): description of the constraint\n        validation_fn (Callable[[DataFrame], Tuple[bool, dict[str, Union[dict,list, str, set]]]]:\n                    the validation function to run over inputted data\n                    This function should return a tuple of a boolean for success or failure, and a dict containing\n                    metadata about the test -- this metadata will be passed to the resulting exception if validation\n                    fails.\n        resulting_exception (ConstraintWithMetadataException):  what response a failed typecheck should induce\n        raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n                    (if set to False) when validation fails\n        name (Optional[str]): what to call the constraint, defaults to the class name.\n    """\n\n    # TODO:  validation_fn returning metadata is sorta broken.  maybe have it yield typecheck events and grab metadata?\n\n    def __init__(\n        self, description, validation_fn, resulting_exception, raise_or_typecheck=True, name=None\n    ):\n        if name is None:\n            self.name = self.__class__.__name__\n        else:\n            self.name = name\n        self.description = description\n        # should return a tuple of (bool, and either an empty dict or a dict of extra params)\n        self.validation_fn = validation_fn\n        self.resulting_exception = resulting_exception\n        self.raise_or_typecheck = raise_or_typecheck\n\n    def validate(self, data, *args, **kwargs):\n        res = self.validation_fn(data, *args, **kwargs)\n        if not res[0]:\n            exc = self.resulting_exception(\n                constraint_name=self.name, constraint_description=self.description, **res[1]\n            )\n\n            if self.raise_or_typecheck:\n                raise exc\n            else:\n                return exc.return_as_typecheck()\n\n        else:\n            if res[0]:\n                return TypeCheck(success=True)\n\n    # TODO:  composition of validations\n    def as_dagster_type(self, *args, **kwargs):\n        if self.raise_or_typecheck:\n            raise Exception(\n                "Dagster types can only be constructed from constraints that return typechecks"\n            )\n        return DagsterType(\n            name=self.name,\n            description=f"A Pandas DataFrame with the following validation: {self.description}",\n            type_check_fn=lambda x: self.validate(x, *args),\n            **kwargs,\n        )\n\n\nclass MultiConstraintWithMetadata(ConstraintWithMetadata):\n    """Use this class if you have multiple constraints to check over the entire dataframe.\n\n    Args:\n        description (str): description of the constraint\n        validation_fn_arr(List[Callable[[DataFrame], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n                    a list of the validation functions to run over inputted data\n                    Each function should return a tuple of a boolean for success or failure, and a dict containing\n                    metadata about the test -- this metadata will be passed to the resulting exception if validation\n                    fails.\n        resulting_exception (ConstraintWithMetadataException):  what response a failed typecheck should induce\n        raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n                    (if set to False) when validation fails\n        name (Optional[str]): what to call the constraint, defaults to the class name.\n    """\n\n    def __init__(\n        self,\n        description,\n        validation_fn_arr,\n        resulting_exception,\n        raise_or_typecheck=True,\n        name=None,\n    ):\n        validation_fn_arr = check.list_param(validation_fn_arr, "validation_fn_arr")\n\n        def validation_fn(data, *args, **kwargs):\n            results = [f(data, *args, **kwargs) for f in validation_fn_arr]\n            truthparam = all(item[0] for item in results)\n            metadict = defaultdict(dict)\n            for i, dicta in enumerate(item[1] for item in results):\n                if len(dicta.keys()) > 0:\n                    for key in dicta:\n                        metadict[key][validation_fn_arr[i].__name__] = dicta[key]\n            return (truthparam, metadict)\n\n        super(MultiConstraintWithMetadata, self).__init__(\n            description,\n            validation_fn,\n            resulting_exception,\n            raise_or_typecheck=raise_or_typecheck,\n            name=name,\n        )\n\n\nclass StrictColumnsWithMetadata(ConstraintWithMetadata):\n    def __init__(self, column_list, enforce_ordering=False, raise_or_typecheck=True, name=None):\n        self.enforce_ordering = check.bool_param(enforce_ordering, "enforce_ordering")\n        self.column_list = check.list_param(column_list, "strict_column_list", of_type=str)\n\n        def validation_fcn(inframe):\n            if list(inframe.columns) == column_list:\n                return (True, {})\n            else:\n                if self.enforce_ordering:\n                    resdict = {"expectation": self.column_list, "actual": list(inframe.columns)}\n                    return (False, resdict)\n                else:\n                    if set(inframe.columns) == set(column_list):\n                        return (True, {})\n                    else:\n                        extra = [x for x in inframe.columns if x not in set(column_list)]\n                        missing = [x for x in set(column_list) if x not in inframe.columns]\n                        resdict = {\n                            "expectation": self.column_list,\n                            "actual": {"extra_columns": extra, "missing_columns": missing},\n                        }\n                        return (False, resdict)\n\n        basestr = f"ensuring that the right columns, {self.column_list} were present"\n        if enforce_ordering:\n            basestr += " in the right order"\n        super(StrictColumnsWithMetadata, self).__init__(\n            basestr,\n            validation_fcn,\n            DataFrameWithMetadataException,\n            raise_or_typecheck=raise_or_typecheck,\n            name=name,\n        )\n\n\nclass DataFrameConstraint(Constraint):\n    """Base constraint object that represent Dataframe shape constraints.\n\n    Args:\n        error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.\n        markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.\n    """\n\n    def __init__(self, error_description=None, markdown_description=None):\n        super(DataFrameConstraint, self).__init__(\n            error_description=error_description, markdown_description=markdown_description\n        )\n\n    def validate(self, dataframe):\n        raise NotImplementedError()\n\n\n
[docs]class StrictColumnsConstraint(DataFrameConstraint):\n """A dataframe constraint that validates column existence and ordering.\n\n Args:\n strict_column_list (List[str]): The exact list of columns that your dataframe must have.\n enforce_ordering (Optional[bool]): If true, will enforce that the ordering of column names must match.\n Default is False.\n """\n\n def __init__(self, strict_column_list, enforce_ordering=False):\n self.enforce_ordering = check.bool_param(enforce_ordering, "enforce_ordering")\n self.strict_column_list = check.list_param(\n strict_column_list, "strict_column_list", of_type=str\n )\n description = f"No columns outside of {self.strict_column_list} allowed. "\n if enforce_ordering:\n description += "Columns must be in that order."\n super(StrictColumnsConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe):\n check.inst_param(dataframe, "dataframe", DataFrame)\n columns_received = list(dataframe.columns)\n if self.enforce_ordering:\n if self.strict_column_list != columns_received:\n raise DataFrameConstraintViolationException(\n constraint_name=self.name,\n constraint_description=(\n "Expected the following ordering of columns {expected}. Received:"\n " {received}".format(\n expected=self.strict_column_list, received=columns_received\n )\n ),\n )\n for column in columns_received:\n if column not in self.strict_column_list:\n raise DataFrameConstraintViolationException(\n constraint_name=self.name,\n constraint_description="Expected {}. Recevied {}.".format(\n self.strict_column_list, columns_received\n ),\n )
\n\n\n
[docs]class RowCountConstraint(DataFrameConstraint):\n """A dataframe constraint that validates the expected count of rows.\n\n Args:\n num_allowed_rows (int): The number of allowed rows in your dataframe.\n error_tolerance (Optional[int]): The acceptable threshold if you are not completely certain. Defaults to 0.\n """\n\n def __init__(self, num_allowed_rows, error_tolerance=0):\n self.num_allowed_rows = check.int_param(num_allowed_rows, "num_allowed_rows")\n self.error_tolerance = abs(check.int_param(error_tolerance, "error_tolerance"))\n if self.error_tolerance > self.num_allowed_rows:\n raise ValueError("Tolerance can't be greater than the number of rows you expect.")\n description = f"Dataframe must have {self.num_allowed_rows} +- {self.error_tolerance} rows."\n super(RowCountConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe):\n check.inst_param(dataframe, "dataframe", DataFrame)\n\n if not (\n self.num_allowed_rows - self.error_tolerance\n <= len(dataframe)\n <= self.num_allowed_rows + self.error_tolerance\n ):\n raise DataFrameConstraintViolationException(\n constraint_name=self.name,\n constraint_description=(\n "Expected {expected} +- {tolerance} rows. Got {received}".format(\n expected=self.num_allowed_rows,\n tolerance=self.error_tolerance,\n received=len(dataframe),\n )\n ),\n )
\n\n\ndef apply_ignore_missing_data_to_mask(mask, column):\n return mask & ~column.isnull()\n\n\nclass ColumnAggregateConstraintWithMetadata(ConstraintWithMetadata):\n """Similar to the base class, but now your validation functions should take in columns (pd.Series) not Dataframes.\n\n Args:\n description (str): description of the constraint\n validation_fn (Callable[[pd.Series], Tuple[bool, dict[str, Union[dict,list, str, set]]]]:\n the validation function to run over inputted data\n This function should return a tuple of a boolean for success or failure, and a dict containing\n metadata about the test -- this metadata will be passed to the resulting exception if validation\n fails.\n resulting_exception (ConstraintWithMetadataException): what response a failed typecheck should induce\n raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n (if set to False) when validation fails\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def validate(self, data, *columns, **kwargs):\n if len(columns) == 0:\n columns = data.columns\n columns = [column for column in columns if column in data.columns]\n relevant_data = data[list(columns)]\n\n offending_columns = set()\n offending_values = {}\n for column in columns:\n # TODO: grab extra metadata\n res = self.validation_fn(relevant_data[column])\n if not res[0]:\n offending_columns.add(column)\n if res[1].get("actual") is not None:\n offending_values[column] = [x.item() for x in res[1].get("actual").to_numpy()]\n else:\n offending_values[column] = [x.item() for x in relevant_data[column].to_numpy()]\n if len(offending_columns) == 0 and not self.raise_or_typecheck:\n return TypeCheck(success=True)\n elif len(offending_columns) > 0:\n metadict = {\n "expectation": self.description.replace("Confirms", ""),\n "actual": offending_values,\n "offending": offending_columns,\n }\n exc = self.resulting_exception(\n constraint_name=self.name, constraint_description=self.description, **metadict\n )\n\n if self.raise_or_typecheck:\n raise exc\n else:\n return exc.return_as_typecheck()\n\n\nclass ColumnConstraintWithMetadata(ConstraintWithMetadata):\n """This class is useful for constructing single constraints that you want to apply to multiple\n columns of your dataframe.\n\n The main difference from the base class in terms of construction is that now, your validation_fns should operate on\n individual values.\n\n Args:\n description (str): description of the constraint\n validation_fn (Callable[[Any], Tuple[bool, dict[str, Union[dict,list, str, set]]]]:\n the validation function to run over inputted data\n This function should return a tuple of a boolean for success or failure, and a dict containing\n metadata about the test -- this metadata will be passed to the resulting exception if validation\n fails.\n resulting_exception (ConstraintWithMetadataException): what response a failed typecheck should induce\n raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n (if set to False) when validation fails\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def validate(self, data, *columns, **kwargs):\n if len(columns) == 0:\n columns = data.columns\n\n columns = [column for column in columns if column in data.columns]\n relevant_data = data[list(columns)]\n offending = {}\n offending_values = {}\n # TODO: grab metadata from here\n inverse_validation = lambda x: not self.validation_fn(x)[0]\n for column in columns:\n results = relevant_data[relevant_data[column].apply(inverse_validation)]\n if len(results.index.tolist()) > 0:\n offending[column] = ["row " + str(i) for i in (results.index.tolist())]\n offending_values[column] = results[column].tolist()\n if len(offending) == 0:\n if not self.raise_or_typecheck:\n return TypeCheck(success=True)\n else:\n metadict = {\n "expectation": self.validation_fn.__doc__,\n "actual": offending_values,\n "offending": offending,\n }\n exc = self.resulting_exception(\n constraint_name=self.name, constraint_description=self.description, **metadict\n )\n\n if self.raise_or_typecheck:\n raise exc\n else:\n return exc.return_as_typecheck()\n\n\nclass MultiColumnConstraintWithMetadata(ColumnConstraintWithMetadata):\n """This class is useful for constructing more complicated relationships between columns\n and expectations -- i.e. you want some validations on column A, others on column B, etc.\n This lets you package up the metadata neatly, and also allows for cases like 'fail if any one of\n these constraints fails but still run all of them'.\n\n Args:\n description (str): description of the overall set of validations\n fn_and_columns_dict (Dict[str, List[Callable[[Any], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n while this is a relatively complex type,\n what it amounts to is 'a dict mapping columns to the functions to\n run on them'\n resulting_exception (type): the response to generate if validation fails. Subclass of\n ConstraintWithMetadataException\n raise_or_typecheck (Optional[bool]): whether to raise an exception (true) or a failed typecheck (false)\n type_for_internal (Optional[type]): what type to use for internal validators. Subclass of\n ConstraintWithMetadata\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def __init__(\n self,\n description,\n fn_and_columns_dict,\n resulting_exception,\n raise_or_typecheck=True,\n type_for_internal=ColumnConstraintWithMetadata,\n name=None,\n ):\n # TODO: support multiple descriptions\n self.column_to_fn_dict = check.dict_param(\n fn_and_columns_dict, "fn_and_columns_dict", key_type=str\n )\n\n def validation_fn(data, *args, **kwargs):\n metadict = defaultdict(dict)\n truthparam = True\n for column, fn_arr in self.column_to_fn_dict.items():\n if column not in data.columns:\n continue\n for fn in fn_arr:\n # TODO: do this more effectively\n new_validator = type_for_internal(\n fn.__doc__, fn, ColumnWithMetadataException, raise_or_typecheck=False\n )\n result = new_validator.validate(\n DataFrame(data[column]), column, *args, **kwargs\n )\n result_val = result.success\n if result_val:\n continue\n result_dict = result.metadata[CONSTRAINT_METADATA_KEY].data\n truthparam = truthparam and result_val\n for key in result_dict.keys():\n if "constraint" not in key:\n if key == "expected":\n new_key = "expectation"\n result_dict[key] = result_dict[key].replace("returns", "").strip()\n if column not in metadict[new_key] or new_key not in metadict:\n metadict[new_key][column] = dict()\n metadict[new_key][column][fn.__name__] = result_dict[key]\n else:\n if column not in metadict[key] or key not in metadict:\n metadict[key][column] = dict()\n if isinstance(result_dict[key], dict):\n metadict[key][column][fn.__name__] = result_dict[key][column]\n else:\n metadict[key][column][fn.__name__] = "a violation"\n return truthparam, metadict\n\n super(MultiColumnConstraintWithMetadata, self).__init__(\n description,\n validation_fn,\n resulting_exception,\n raise_or_typecheck=raise_or_typecheck,\n name=name,\n )\n\n def validate(self, data, *args, **kwargs):\n return ConstraintWithMetadata.validate(self, data, *args, **kwargs)\n\n\nclass MultiAggregateConstraintWithMetadata(MultiColumnConstraintWithMetadata):\n """This class is similar to multicolumn, but takes in functions that operate on the whole column at once\n rather than ones that operate on each value --\n consider this similar to the difference between apply-map and apply aggregate.\n\n Args:\n description (str): description of the overall set of validations (TODO: support multiple descriptions)\n fn_and_columns_dict (Dict[str, List[Callable[[pd.Series], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n while this is a relatively complex type,\n what it amounts to is a dict mapping columns to the functions to\n run on them'\n resulting_exception (type): the response to generate if validation fails. Subclass of\n ConstraintWithMetadataException\n raise_or_typecheck (Optional[bool]): whether to raise an exception (true) or a failed typecheck (false)\n type_for_internal (Optional[type]): what type to use for internal validators. Subclass of\n ConstraintWithMetadata\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def __init__(\n self,\n description,\n fn_and_columns_dict,\n resulting_exception,\n raise_or_typecheck=True,\n name=None,\n ):\n super(MultiAggregateConstraintWithMetadata, self).__init__(\n description,\n fn_and_columns_dict,\n resulting_exception,\n raise_or_typecheck=raise_or_typecheck,\n type_for_internal=ColumnAggregateConstraintWithMetadata,\n name=name,\n )\n\n\ndef non_null_validation(x):\n """Validates that a particular value in a column is not null.\n\n Usage:\n pass this as a column validator to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n Generally, you should prefer to use nonnull as a decorator/wrapper rather than using this\n directly.\n """\n return not pd.isnull(x), {}\n\n\ndef all_unique_validator(column, ignore_missing_vals=False):\n """Validates that all values in an iterable are unique.\n\n Returns duplicated values as metadata.\n\n Usage:\n As a validation function for a\n :py:class:'~dagster_pandas.constraints.ColumnAggregateConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiAggregateConstraintWithMetadata'\n Example:\n .. code-block:: python\n aggregate_validator = MultiAggregateConstraintWithMetadata(\n "confirms all values are unique",\n {'bar': [all_unique_validator]},\n ConstraintWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_aggregate_validator=aggregate_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 2, 3], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'bar': {'all_unique_validator': 'a violation'}}\n metadata['actual'] == {'bar': {'all_unique_validator': [10.0]}}\n """\n column = pd.Series(column)\n duplicated = column.duplicated()\n if ignore_missing_vals:\n duplicated = apply_ignore_missing_data_to_mask(duplicated, column)\n return not duplicated.any(), {"actual": column[duplicated]}\n\n\ndef nonnull(func):\n """Decorator for column validation functions to make them error on nulls.\n\n Usage:\n pass decorated functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n Args:\n func (Callable[[Any], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n the column validator you want to error on nulls.\n """\n\n @wraps(func)\n def nvalidator(val):\n origval = func(val)\n nval = non_null_validation(val)\n return origval[0] and nval[0], {}\n\n nvalidator.__doc__ += " and ensures no values are null"\n\n return nvalidator\n\n\ndef column_range_validation_factory(minim=None, maxim=None, ignore_missing_vals=False):\n """Factory for validators testing if column values are within a range.\n\n Args:\n minim(Optional[Comparable]): the low end of the range\n maxim(Optional[Comparable]): the high end of the range\n ignore_missing_vals(Optional[bool]): whether to ignore nulls.\n\n Returns: a validation function for this constraint\n Usage:\n pass returned functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n Examples:\n .. code-block:: python\n in_range_validator = column_range_validation_factory(1, 3, ignore_missing_vals=True)\n column_validator = MultiColumnConstraintWithMetadata(\n "confirms values are numbers in a range",\n {'foo': [in_range_validator]},\n ColumnWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_validator=column_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 2, 7], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'foo': {'in_range_validation_fn': ['row 2']}}\n metadata['actual'] == {'foo': {'in_range_validation_fn': [7]}}\n\n """\n if minim is None:\n if isinstance(maxim, datetime):\n minim = datetime.min\n else:\n minim = -1 * (sys.maxsize - 1)\n if maxim is None:\n if isinstance(minim, datetime):\n maxim = datetime.max\n else:\n maxim = sys.maxsize\n\n def in_range_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return (isinstance(x, (type(minim), type(maxim)))) and (x <= maxim) and (x >= minim), {}\n\n in_range_validation_fn.__doc__ = f"checks whether values are between {minim} and {maxim}"\n if ignore_missing_vals:\n in_range_validation_fn.__doc__ += ", ignoring nulls"\n\n return in_range_validation_fn\n\n\ndef categorical_column_validator_factory(categories, ignore_missing_vals=False):\n """Factory for validators testing if all values are in some set.\n\n Args:\n categories(Union[Sequence, set]): the set of allowed values\n ignore_missing_vals(Optional[bool]): whether to ignore nulls.\n\n Returns: a validation function for this constraint\n\n Usage:\n pass returned functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n\n Example:\n .. code-block:: python\n categorical_validation_fn = categorical_column_validator_factory([1, 2])\n column_validator = MultiColumnConstraintWithMetadata(\n "confirms values are numbers in a range",\n {'foo': [categorical_validation_fn]},\n ColumnWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_validator=column_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 2, 7], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'foo': {'categorical_validation_fn': ['row 2']}}\n metadata['actual'] == {'foo': {'categorical_validation_fn': [7]}}\n\n """\n categories = set(categories)\n\n def categorical_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return (x in categories), {}\n\n categorical_validation_fn.__doc__ = (\n f"checks whether values are within this set of values: {categories}"\n )\n if ignore_missing_vals:\n categorical_validation_fn.__doc__ += ", ignoring nulls"\n\n return categorical_validation_fn\n\n\ndef dtype_in_set_validation_factory(datatypes, ignore_missing_vals=False):\n """Factory for testing if the dtype of a val falls within some allowed set.\n\n Args:\n datatypes(Union[set[type], type]): which datatype/datatypes are allowed\n ignore_missing_vals(Optional[bool]): whether to ignore nulls\n\n Returns: a validation function for this constraint\n\n Usage:\n pass returned functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n\n Examples:\n .. code-block:: python\n dtype_is_num_validator = dtype_in_set_validation_factory((int, float, int64, float64))\n column_validator = MultiColumnConstraintWithMetadata(\n "confirms values are numbers in a range",\n {'foo': [dtype_is_num_validator]},\n ColumnWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_validator=column_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 'a', 7], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'foo': {'categorical_validation_fn': ['row 1']}}\n metadata['actual'] == {'foo': {'categorical_validation_fn': ['a']}}\n\n """\n\n def dtype_in_set_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return isinstance(x, datatypes), {}\n\n dtype_in_set_validation_fn.__doc__ = f"checks whether values are this type/types: {datatypes}"\n if ignore_missing_vals:\n dtype_in_set_validation_fn.__doc__ += ", ignoring nulls"\n\n return dtype_in_set_validation_fn\n\n\nclass ColumnRangeConstraintWithMetadata(ColumnConstraintWithMetadata):\n def __init__(self, minim=None, maxim=None, columns=None, raise_or_typecheck=True):\n self.name = self.__class__.__name__\n\n description = f"Confirms values are between {minim} and {maxim}"\n super(ColumnRangeConstraintWithMetadata, self).__init__(\n description=description,\n validation_fn=column_range_validation_factory(minim=minim, maxim=maxim),\n resulting_exception=ColumnWithMetadataException,\n raise_or_typecheck=raise_or_typecheck,\n )\n self.columns = columns\n\n def validate(self, data, *args, **kwargs):\n if self.columns is None:\n self.columns = list(data.columns)\n self.columns.extend(args)\n return super(ColumnRangeConstraintWithMetadata, self).validate(\n data, *self.columns, **kwargs\n )\n\n\nclass ColumnConstraint(Constraint):\n """Base constraint object that represent dataframe column shape constraints.\n\n Args:\n error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.\n markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.\n """\n\n def __init__(self, error_description=None, markdown_description=None):\n super(ColumnConstraint, self).__init__(\n error_description=error_description, markdown_description=markdown_description\n )\n\n def validate(self, dataframe, column_name):\n pass\n\n @staticmethod\n def get_offending_row_pairs(dataframe, column_name):\n return zip(dataframe.index.tolist(), dataframe[column_name].tolist())\n\n\nclass ColumnDTypeFnConstraint(ColumnConstraint):\n """A column constraint that applies a pandas dtype validation function to a columns dtype.\n\n Args:\n type_fn (Callable[[Set[str]], bool]): This is a function that takes the pandas columns dtypes and\n returns if those dtypes match the types it expects. See pandas.core.dtypes.common for examples.\n """\n\n def __init__(self, type_fn):\n self.type_fn = check.callable_param(type_fn, "type_fn")\n description = f'Dtype must satisfy "{self.type_fn.__name__}"'\n super(ColumnDTypeFnConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n column_dtype = dataframe[column_name].dtype\n if not self.type_fn(column_dtype):\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=f'{self.error_description}, but was "{column_dtype}"',\n column_name=column_name,\n )\n\n\nclass ColumnDTypeInSetConstraint(ColumnConstraint):\n """A column constraint that validates the pandas column dtypes based on the expected set of dtypes.\n\n Args:\n expected_dtype_set (Set[str]): The set of pandas dtypes that the pandas column dtypes must match.\n """\n\n def __init__(self, expected_dtype_set):\n self.expected_dtype_set = check.set_param(expected_dtype_set, "expected_dtype_set")\n description = f"Column dtype must be in the following set {self.expected_dtype_set}."\n super(ColumnDTypeInSetConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n received_dtypes = dataframe[column_name].dtype\n if str(received_dtypes) not in self.expected_dtype_set:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=(\n f"{self.error_description}. DTypes received: {received_dtypes}"\n ),\n column_name=column_name,\n )\n\n\nclass NonNullableColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are not null."""\n\n def __init__(self):\n description = "No Null values allowed."\n super(NonNullableColumnConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n rows_with_null_columns = dataframe[dataframe[column_name].isna()]\n if not rows_with_null_columns.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=self.get_offending_row_pairs(rows_with_null_columns, column_name),\n )\n\n\nclass UniqueColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are unique.\n\n Args:\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, ignore_missing_vals):\n description = "Column must be unique."\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(UniqueColumnConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n invalid = dataframe[column_name].duplicated()\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n rows_with_duplicated_values = dataframe[invalid]\n if not rows_with_duplicated_values.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=rows_with_duplicated_values,\n )\n\n\nclass CategoricalColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are a valid category.\n\n Args:\n categories (Set[str]): Set of categories that values in your pandas column must match.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, categories, ignore_missing_vals):\n self.categories = list(check.set_param(categories, "categories", of_type=str))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(CategoricalColumnConstraint, self).__init__(\n error_description=f"Expected Categories are {self.categories}",\n markdown_description=f"Category examples are {self.categories[:5]}...",\n )\n\n def validate(self, dataframe, column_name):\n invalid = ~dataframe[column_name].isin(self.categories)\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n rows_with_unexpected_buckets = dataframe[invalid]\n if not rows_with_unexpected_buckets.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=rows_with_unexpected_buckets,\n )\n\n\nclass MinValueColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are greater than the provided\n lower bound [inclusive].\n\n Args:\n min_value (Union[int, float, datetime.datetime]): The lower bound.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, min_value, ignore_missing_vals):\n self.min_value = check.inst_param(min_value, "min_value", (int, float, datetime))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(MinValueColumnConstraint, self).__init__(\n markdown_description=f"values > {self.min_value}",\n error_description=f"Column must have values > {self.min_value}",\n )\n\n def validate(self, dataframe, column_name):\n invalid = dataframe[column_name] < self.min_value\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n out_of_bounds_rows = dataframe[invalid]\n if not out_of_bounds_rows.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=out_of_bounds_rows,\n )\n\n\nclass MaxValueColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are less than the provided\n upper bound [inclusive].\n\n Args:\n max_value (Union[int, float, datetime.datetime]): The upper bound.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, max_value, ignore_missing_vals):\n self.max_value = check.inst_param(max_value, "max_value", (int, float, datetime))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(MaxValueColumnConstraint, self).__init__(\n markdown_description=f"values < {self.max_value}",\n error_description=f"Column must have values < {self.max_value}",\n )\n\n def validate(self, dataframe, column_name):\n invalid = dataframe[column_name] > self.max_value\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n out_of_bounds_rows = dataframe[invalid]\n if not out_of_bounds_rows.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=out_of_bounds_rows,\n )\n\n\nclass InRangeColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are between the lower and upper\n bound [inclusive].\n\n Args:\n min_value (Union[int, float, datetime.datetime]): The lower bound.\n max_value (Union[int, float, datetime.datetime]): The upper bound.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non\n missing values.\n """\n\n def __init__(self, min_value, max_value, ignore_missing_vals):\n self.min_value = check.inst_param(min_value, "min_value", (int, float, datetime))\n self.max_value = check.inst_param(max_value, "max_value", (int, float, datetime))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(InRangeColumnConstraint, self).__init__(\n markdown_description=f"{self.min_value} < values < {self.max_value}",\n error_description="Column must have values between {} and {} inclusive.".format(\n self.min_value, self.max_value\n ),\n )\n\n def validate(self, dataframe, column_name):\n invalid = ~dataframe[column_name].between(self.min_value, self.max_value)\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n out_of_bounds_rows = dataframe[invalid]\n if not out_of_bounds_rows.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=out_of_bounds_rows,\n )\n
", "current_page_name": "_modules/dagster_pandas/constraints", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandas.constraints"}, "data_frame": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandas.data_frame

\nimport pandas as pd\nfrom dagster import (\n    DagsterInvariantViolationError,\n    DagsterType,\n    Field,\n    MetadataValue,\n    StringSource,\n    TableColumn,\n    TableSchema,\n    TableSchemaMetadataValue,\n    TypeCheck,\n    _check as check,\n    dagster_type_loader,\n)\nfrom dagster._annotations import experimental\nfrom dagster._config import Selector\nfrom dagster._core.definitions.metadata import normalize_metadata\nfrom dagster._utils import dict_without_keys\n\nfrom dagster_pandas.constraints import (\n    CONSTRAINT_METADATA_KEY,\n    ColumnDTypeFnConstraint,\n    ColumnDTypeInSetConstraint,\n    ConstraintViolationException,\n)\nfrom dagster_pandas.validation import PandasColumn, validate_constraints\n\nCONSTRAINT_BLACKLIST = {ColumnDTypeFnConstraint, ColumnDTypeInSetConstraint}\n\n\n@dagster_type_loader(\n    Selector(\n        {\n            "csv": {\n                "path": StringSource,\n                "sep": Field(StringSource, is_required=False, default_value=","),\n            },\n            "parquet": {"path": StringSource},\n            "table": {"path": StringSource},\n            "pickle": {"path": StringSource},\n        },\n    )\n)\ndef dataframe_loader(_context, config):\n    file_type, file_options = next(iter(config.items()))\n\n    if file_type == "csv":\n        path = file_options["path"]\n        return pd.read_csv(path, **dict_without_keys(file_options, "path"))\n    elif file_type == "parquet":\n        return pd.read_parquet(file_options["path"])\n    elif file_type == "table":\n        return pd.read_csv(file_options["path"], sep="\\t")\n    elif file_type == "pickle":\n        return pd.read_pickle(file_options["path"])\n    else:\n        raise DagsterInvariantViolationError(f"Unsupported file_type {file_type}")\n\n\ndef df_type_check(_, value):\n    if not isinstance(value, pd.DataFrame):\n        return TypeCheck(success=False)\n    return TypeCheck(\n        success=True,\n        metadata={\n            "row_count": str(len(value)),\n            # string cast columns since they may be things like datetime\n            "metadata": {"columns": list(map(str, value.columns))},\n        },\n    )\n\n\nDataFrame = DagsterType(\n    name="PandasDataFrame",\n    description="""Two-dimensional size-mutable, potentially heterogeneous\n    tabular data structure with labeled axes (rows and columns).\n    See http://pandas.pydata.org/""",\n    loader=dataframe_loader,\n    type_check_fn=df_type_check,\n    typing_type=pd.DataFrame,\n)\n\n\ndef _construct_constraint_list(constraints):\n    def add_bullet(constraint_list, constraint_description):\n        return constraint_list + f"+ {constraint_description}\\n"\n\n    constraint_list = ""\n    for constraint in constraints:\n        if constraint.__class__ not in CONSTRAINT_BLACKLIST:\n            constraint_list = add_bullet(constraint_list, constraint.markdown_description)\n    return constraint_list\n\n\ndef _build_column_header(column_name, constraints):\n    header = f"**{column_name}**"\n    for constraint in constraints:\n        if isinstance(constraint, ColumnDTypeInSetConstraint):\n            dtypes_tuple = tuple(constraint.expected_dtype_set)\n            return header + f": `{dtypes_tuple if len(dtypes_tuple) > 1 else dtypes_tuple[0]}`"\n        elif isinstance(constraint, ColumnDTypeFnConstraint):\n            return header + f": Validator `{constraint.type_fn.__name__}`"\n    return header\n\n\ndef create_dagster_pandas_dataframe_description(description, columns):\n    title = "\\n".join([description, "### Columns", ""])\n    buildme = title\n    for column in columns:\n        buildme += "{}\\n{}\\n".format(\n            _build_column_header(column.name, column.constraints),\n            _construct_constraint_list(column.constraints),\n        )\n    return buildme\n\n\ndef create_table_schema_metadata_from_dataframe(\n    pandas_df: pd.DataFrame,\n) -> TableSchemaMetadataValue:\n    """This function takes a pandas DataFrame and returns its metadata as a Dagster TableSchema.\n\n    Args:\n        pandas_df (pandas.DataFrame): A pandas DataFrame for which to create metadata.\n\n    Returns:\n        TableSchemaMetadataValue: returns an object with the TableSchema for the DataFrame.\n    """\n    check.inst(pandas_df, pd.DataFrame, "Input must be a pandas DataFrame object")\n    return MetadataValue.table_schema(\n        TableSchema(\n            columns=[\n                TableColumn(name=str(name), type=str(dtype))\n                for name, dtype in pandas_df.dtypes.items()\n            ]\n        )\n    )\n\n\n
[docs]def create_dagster_pandas_dataframe_type(\n name,\n description=None,\n columns=None,\n metadata_fn=None,\n dataframe_constraints=None,\n loader=None,\n):\n """Constructs a custom pandas dataframe dagster type.\n\n Args:\n name (str): Name of the dagster pandas type.\n description (Optional[str]): A markdown-formatted string, displayed in tooling.\n columns (Optional[List[PandasColumn]]): A list of :py:class:`~dagster.PandasColumn` objects\n which express dataframe column schemas and constraints.\n metadata_fn (Optional[Callable[[], Union[Dict[str, Union[str, float, int, Dict, MetadataValue]])\n A callable which takes your dataframe and returns a dict with string label keys and\n MetadataValue values.\n dataframe_constraints (Optional[List[DataFrameConstraint]]): A list of objects that inherit from\n :py:class:`~dagster.DataFrameConstraint`. This allows you to express dataframe-level constraints.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader`. If None, we will default\n to using `dataframe_loader`.\n """\n # We allow for the plugging in of a dagster_type_loader so that users can load their custom\n # dataframes via configuration their own way if the default configs don't suffice. This is\n # purely optional.\n check.str_param(name, "name")\n metadata_fn = check.opt_callable_param(metadata_fn, "metadata_fn")\n description = create_dagster_pandas_dataframe_description(\n check.opt_str_param(description, "description", default=""),\n check.opt_list_param(columns, "columns", of_type=PandasColumn),\n )\n\n def _dagster_type_check(_, value):\n if not isinstance(value, pd.DataFrame):\n return TypeCheck(\n success=False,\n description=(\n f"Must be a pandas.DataFrame. Got value of type. {type(value).__name__}"\n ),\n )\n\n try:\n validate_constraints(\n value,\n pandas_columns=columns,\n dataframe_constraints=dataframe_constraints,\n )\n except ConstraintViolationException as e:\n return TypeCheck(success=False, description=str(e))\n\n return TypeCheck(\n success=True,\n metadata=_execute_summary_stats(name, value, metadata_fn) if metadata_fn else None,\n )\n\n return DagsterType(\n name=name,\n type_check_fn=_dagster_type_check,\n loader=loader if loader else dataframe_loader,\n description=description,\n typing_type=pd.DataFrame,\n )
\n\n\n@experimental\ndef create_structured_dataframe_type(\n name,\n description=None,\n columns_validator=None,\n columns_aggregate_validator=None,\n dataframe_validator=None,\n loader=None,\n):\n """Args:\n name (str): the name of the new type\n description (Optional[str]): the description of the new type\n columns_validator (Optional[Union[ColumnConstraintWithMetadata, MultiColumnConstraintWithMetadata]]):\n what column-level row by row validation you want to have applied.\n Leave empty for no column-level row by row validation.\n columns_aggregate_validator (Optional[Union[ColumnAggregateConstraintWithMetadata,\n MultiAggregateConstraintWithMetadata]]):\n what column-level aggregate validation you want to have applied,\n Leave empty for no column-level aggregate validation.\n dataframe_validator (Optional[Union[ConstraintWithMetadata, MultiConstraintWithMetadata]]):\n what dataframe-wide validation you want to have applied.\n Leave empty for no dataframe-wide validation.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader`. If None, we will default\n to using `dataframe_loader`.\n\n Returns:\n a DagsterType with the corresponding name and packaged validation.\n\n """\n\n def _dagster_type_check(_, value):\n if not isinstance(value, pd.DataFrame):\n return TypeCheck(\n success=False,\n description=(\n f"Must be a pandas.DataFrame. Got value of type. {type(value).__name__}"\n ),\n )\n individual_result_dict = {}\n\n if dataframe_validator is not None:\n individual_result_dict["dataframe"] = dataframe_validator.validate(value)\n if columns_validator is not None:\n individual_result_dict["columns"] = columns_validator.validate(value)\n\n if columns_aggregate_validator is not None:\n individual_result_dict["column-aggregates"] = columns_aggregate_validator.validate(\n value\n )\n\n typechecks_succeeded = True\n metadata = {}\n overall_description = "Failed Constraints: {}"\n constraint_clauses = []\n for key, result in individual_result_dict.items():\n result_val = result.success\n if result_val:\n continue\n typechecks_succeeded = typechecks_succeeded and result_val\n result_dict = result.metadata[CONSTRAINT_METADATA_KEY].data\n metadata[f"{key}-constraint-metadata"] = MetadataValue.json(result_dict)\n constraint_clauses.append(f"{key} failing constraints, {result.description}")\n # returns aggregates, then column, then dataframe\n return TypeCheck(\n success=typechecks_succeeded,\n description=overall_description.format(constraint_clauses),\n metadata=metadata,\n )\n\n description = check.opt_str_param(description, "description", default="")\n return DagsterType(\n name=name,\n type_check_fn=_dagster_type_check,\n loader=loader if loader else dataframe_loader,\n description=description,\n )\n\n\ndef _execute_summary_stats(type_name, value, metadata_fn):\n if not metadata_fn:\n return []\n\n user_metadata = metadata_fn(value)\n try:\n return normalize_metadata(user_metadata)\n except:\n raise DagsterInvariantViolationError(\n "The return value of the user-defined summary_statistics function for pandas "\n f"data frame type {type_name} returned {value}. This function must return "\n "Dict[str, RawMetadataValue]."\n )\n
", "current_page_name": "_modules/dagster_pandas/data_frame", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandas.data_frame"}, "validation": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandas.validation

\nfrom dagster import (\n    DagsterInvariantViolationError,\n    _check as check,\n)\nfrom pandas import DataFrame, Timestamp\nfrom pandas.core.dtypes.common import (\n    is_bool_dtype,\n    is_float_dtype,\n    is_integer_dtype,\n    is_numeric_dtype,\n    is_string_dtype,\n)\n\nfrom dagster_pandas.constraints import (\n    CategoricalColumnConstraint,\n    ColumnDTypeFnConstraint,\n    ColumnDTypeInSetConstraint,\n    Constraint,\n    ConstraintViolationException,\n    DataFrameConstraint,\n    InRangeColumnConstraint,\n    NonNullableColumnConstraint,\n    UniqueColumnConstraint,\n)\n\nPANDAS_NUMERIC_TYPES = {"int64", "float"}\n\n\ndef _construct_keyword_constraints(non_nullable, unique, ignore_missing_vals):\n    non_nullable = check.bool_param(non_nullable, "exists")\n    unique = check.bool_param(unique, "unique")\n    ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n    if non_nullable and ignore_missing_vals:\n        raise DagsterInvariantViolationError(\n            "PandasColumn cannot have a non-null constraint while also ignore missing values"\n        )\n    constraints = []\n    if non_nullable:\n        constraints.append(NonNullableColumnConstraint())\n    if unique:\n        constraints.append(UniqueColumnConstraint(ignore_missing_vals=ignore_missing_vals))\n    return constraints\n\n\n
[docs]class PandasColumn:\n """The main API for expressing column level schemas and constraints for your custom dataframe\n types.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If th column exists, the validate function will validate the column. Defaults to True.\n constraints (Optional[List[Constraint]]): List of constraint objects that indicate the\n validation rules for the pandas column.\n """\n\n def __init__(self, name, constraints=None, is_required=None):\n self.name = check.str_param(name, "name")\n self.is_required = check.opt_bool_param(is_required, "is_required", default=True)\n self.constraints = check.opt_list_param(constraints, "constraints", of_type=Constraint)\n\n def validate(self, dataframe):\n if self.name not in dataframe.columns:\n # Ignore validation if column is missing from dataframe and is not required\n if self.is_required:\n raise ConstraintViolationException(\n f"Required column {self.name} not in dataframe with columns {dataframe.columns}"\n )\n else:\n for constraint in self.constraints:\n constraint.validate(dataframe, self.name)\n\n @staticmethod\n def exists(name, non_nullable=False, unique=False, ignore_missing_vals=False, is_required=None):\n """Simple constructor for PandasColumns that expresses existence constraints.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=_construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def boolean_column(\n name, non_nullable=False, unique=False, ignore_missing_vals=False, is_required=None\n ):\n """Simple constructor for PandasColumns that expresses boolean constraints on boolean dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[ColumnDTypeFnConstraint(is_bool_dtype)]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def numeric_column(\n name,\n min_value=-float("inf"),\n max_value=float("inf"),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses numeric constraints numeric dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_value (Optional[Union[int,float]]): The lower bound for values you expect in this column. Defaults to -float('inf')\n max_value (Optional[Union[int,float]]): The upper bound for values you expect in this column. Defaults to float('inf')\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeFnConstraint(is_numeric_dtype),\n InRangeColumnConstraint(\n check.numeric_param(min_value, "min_value"),\n check.numeric_param(max_value, "max_value"),\n ignore_missing_vals=ignore_missing_vals,\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def integer_column(\n name,\n min_value=-float("inf"),\n max_value=float("inf"),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses numeric constraints on integer dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_value (Optional[Union[int,float]]): The lower bound for values you expect in this column. Defaults to -float('inf')\n max_value (Optional[Union[int,float]]): The upper bound for values you expect in this column. Defaults to float('inf')\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeFnConstraint(is_integer_dtype),\n InRangeColumnConstraint(\n check.numeric_param(min_value, "min_value"),\n check.numeric_param(max_value, "max_value"),\n ignore_missing_vals=ignore_missing_vals,\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def float_column(\n name,\n min_value=-float("inf"),\n max_value=float("inf"),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses numeric constraints on float dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_value (Optional[Union[int,float]]): The lower bound for values you expect in this column. Defaults to -float('inf')\n max_value (Optional[Union[int,float]]): The upper bound for values you expect in this column. Defaults to float('inf')\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeFnConstraint(is_float_dtype),\n InRangeColumnConstraint(\n check.numeric_param(min_value, "min_value"),\n check.numeric_param(max_value, "max_value"),\n ignore_missing_vals=ignore_missing_vals,\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def datetime_column(\n name,\n min_datetime=Timestamp.min,\n max_datetime=Timestamp.max,\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n tz=None,\n ):\n """Simple constructor for PandasColumns that expresses datetime constraints on 'datetime64[ns]' dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_datetime (Optional[Union[int,float]]): The lower bound for values you expect in this column.\n Defaults to pandas.Timestamp.min.\n max_datetime (Optional[Union[int,float]]): The upper bound for values you expect in this column.\n Defaults to pandas.Timestamp.max.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n tz (Optional[str]): Required timezone for values eg: tz='UTC', tz='Europe/Dublin', tz='US/Eastern'.\n Defaults to None, meaning naive datetime values.\n """\n if tz is None:\n datetime_constraint = ColumnDTypeInSetConstraint({"datetime64[ns]"})\n else:\n datetime_constraint = ColumnDTypeInSetConstraint({f"datetime64[ns, {tz}]"})\n # One day more/less than absolute min/max to prevent OutOfBoundsDatetime errors when converting min/max to be tz aware\n if min_datetime.tz_localize(None) == Timestamp.min:\n min_datetime = Timestamp("1677-09-22 00:12:43.145225Z")\n if max_datetime.tz_localize(None) == Timestamp.max:\n max_datetime = Timestamp("2262-04-10 23:47:16.854775807Z")\n # Convert bounds to same tz\n if Timestamp(min_datetime).tz is None:\n min_datetime = Timestamp(min_datetime).tz_localize(tz)\n if Timestamp(max_datetime).tz is None:\n max_datetime = Timestamp(max_datetime).tz_localize(tz)\n\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n datetime_constraint,\n InRangeColumnConstraint(\n min_datetime, max_datetime, ignore_missing_vals=ignore_missing_vals\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def string_column(\n name, non_nullable=False, unique=False, ignore_missing_vals=False, is_required=None\n ):\n """Simple constructor for PandasColumns that expresses constraints on string dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[ColumnDTypeFnConstraint(is_string_dtype)]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def categorical_column(\n name,\n categories,\n of_types=frozenset({"category", "object"}),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses categorical constraints on specified dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n categories (List[Any]): The valid set of buckets that all values in the column must match.\n of_types (Optional[Union[str, Set[str]]]): The expected dtype[s] that your categories and values must\n abide by.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in\n the column ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the\n constraint will only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n of_types = {of_types} if isinstance(of_types, str) else of_types\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeInSetConstraint(of_types),\n CategoricalColumnConstraint(categories, ignore_missing_vals=ignore_missing_vals),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )
\n\n\ndef validate_constraints(dataframe, pandas_columns=None, dataframe_constraints=None):\n dataframe = check.inst_param(dataframe, "dataframe", DataFrame)\n pandas_columns = check.opt_list_param(\n pandas_columns, "column_constraints", of_type=PandasColumn\n )\n dataframe_constraints = check.opt_list_param(\n dataframe_constraints, "dataframe_constraints", of_type=DataFrameConstraint\n )\n\n if pandas_columns:\n for column in pandas_columns:\n column.validate(dataframe)\n\n if dataframe_constraints:\n for dataframe_constraint in dataframe_constraints:\n dataframe_constraint.validate(dataframe)\n
", "current_page_name": "_modules/dagster_pandas/validation", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandas.validation"}}, "dagster_postgres": {"event_log": {"event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_postgres.event_log.event_log

\nfrom typing import Any, ContextManager, Mapping, Optional, Sequence\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.event_api import EventHandlerFn\nfrom dagster._core.events import ASSET_CHECK_EVENTS, ASSET_EVENTS\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.config import pg_config\nfrom dagster._core.storage.event_log import (\n    AssetKeyTable,\n    DynamicPartitionsTable,\n    SqlEventLogStorage,\n    SqlEventLogStorageMetadata,\n    SqlEventLogStorageTable,\n)\nfrom dagster._core.storage.event_log.base import EventLogCursor\nfrom dagster._core.storage.event_log.migration import ASSET_KEY_INDEX_COLS\nfrom dagster._core.storage.event_log.polling_event_watcher import SqlPollingEventWatcher\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlalchemy_compat import db_select\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, deserialize_value\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_pg_connection,\n    pg_alembic_config,\n    pg_statement_timeout,\n    pg_url_from_config,\n    retry_pg_connection_fn,\n    retry_pg_creation_fn,\n)\n\nCHANNEL_NAME = "run_events"\n\n\n
[docs]class PostgresEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """Postgres-backed event log storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To use Postgres for all of the components of your instance storage, you can add the following\n block to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :lines: 1-8\n :language: YAML\n\n If you are configuring the different storage components separately and are specifically\n configuring your event log storage to use Postgres, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml\n :caption: dagster.yaml\n :lines: 12-21\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n\n """\n\n def __init__(\n self,\n postgres_url: str,\n should_autocreate_tables: bool = True,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.postgres_url = check.str_param(postgres_url, "postgres_url")\n self.should_autocreate_tables = check.bool_param(\n should_autocreate_tables, "should_autocreate_tables"\n )\n\n self._disposed = False\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n\n self._event_watcher = SqlPollingEventWatcher(self)\n\n self._secondary_index_cache = {}\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if self.should_autocreate_tables:\n table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())\n if "event_logs" not in table_names:\n retry_pg_creation_fn(self._init_db)\n self.reindex_events()\n self.reindex_assets()\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self._connect() as conn:\n with conn.begin():\n SqlEventLogStorageMetadata.create_all(conn)\n stamp_alembic_rev(pg_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection and set statement_timeout\n existing_options = self._engine.url.query.get("options")\n timeout_option = pg_statement_timeout(statement_timeout)\n if existing_options:\n options = f"{timeout_option} {existing_options}"\n else:\n options = timeout_option\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n pool_size=1,\n connect_args={"options": options},\n pool_recycle=pool_recycle,\n )\n\n def upgrade(self) -> None:\n alembic_config = pg_alembic_config(__file__)\n with self._connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return pg_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, Any]\n ) -> "PostgresEventLogStorage":\n return PostgresEventLogStorage(\n inst_data=inst_data,\n postgres_url=pg_url_from_config(config_value),\n should_autocreate_tables=config_value.get("should_autocreate_tables", True),\n )\n\n @staticmethod\n def create_clean_storage(\n conn_string: str, should_autocreate_tables: bool = True\n ) -> "PostgresEventLogStorage":\n engine = create_engine(\n conn_string, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n try:\n SqlEventLogStorageMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n return PostgresEventLogStorage(conn_string, should_autocreate_tables)\n\n def store_event(self, event: EventLogEntry) -> None:\n """Store an event corresponding to a run.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n check.inst_param(event, "event", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event) # from SqlEventLogStorage.py\n with self._connect() as conn:\n result = conn.execute(\n insert_event_statement.returning(\n SqlEventLogStorageTable.c.run_id, SqlEventLogStorageTable.c.id\n )\n )\n res = result.fetchone()\n result.close()\n\n # LISTEN/NOTIFY no longer used for pg event watch - preserved here to support version skew\n conn.execute(\n db.text(f"""NOTIFY {CHANNEL_NAME}, :notify_id; """),\n {"notify_id": res[0] + "_" + str(res[1])}, # type: ignore\n )\n event_id = int(res[1]) # type: ignore\n\n if (\n event.is_dagster_event\n and event.dagster_event_type in ASSET_EVENTS\n and event.dagster_event.asset_key # type: ignore\n ):\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n "Cannot store asset event tags for null event id."\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, event_id)\n\n def store_asset_event(self, event: EventLogEntry, event_id: int) -> None:\n check.inst_param(event, "event", EventLogEntry)\n if not (event.dagster_event and event.dagster_event.asset_key):\n return\n\n # We switched to storing the entire event record of the last materialization instead of just\n # the AssetMaterialization object, so that we have access to metadata like timestamp,\n # job, run_id, etc.\n #\n # This should make certain asset queries way more performant, without having to do extra\n # queries against the event log.\n #\n # This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`\n # to `last_materialization_event`, for clarity. For now, we should do some back-compat.\n #\n # https://github.com/dagster-io/dagster/issues/3945\n\n # The AssetKeyTable contains a `last_materialization_timestamp` column that is exclusively\n # used to determine if an asset exists (last materialization timestamp > wipe timestamp).\n # This column is used nowhere else, and as of AssetObservation/AssetMaterializationPlanned\n # event creation, we want to extend this functionality to ensure that assets with any event\n # (observation, materialization, or materialization planned) yielded with timestamp\n # > wipe timestamp display in the Dagster UI.\n\n # As of the following PRs, we update last_materialization_timestamp to store the timestamp\n # of the latest asset observation, materialization, or materialization_planned that has occurred.\n # https://github.com/dagster-io/dagster/pull/6885\n # https://github.com/dagster-io/dagster/pull/7319\n\n # The AssetKeyTable also contains a `last_run_id` column that is updated upon asset\n # materialization. This column was not being used until the below PR. This new change\n # writes to the column upon `ASSET_MATERIALIZATION_PLANNED` events to fetch the last\n # run id for a set of assets in one roundtrip call to event log storage.\n # https://github.com/dagster-io/dagster/pull/7319\n\n values = self._get_asset_entry_values(\n event, event_id, self.has_secondary_index(ASSET_KEY_INDEX_COLS)\n )\n with self.index_connection() as conn:\n query = db_dialects.postgresql.insert(AssetKeyTable).values(\n asset_key=event.dagster_event.asset_key.to_string(),\n **values,\n )\n if values:\n query = query.on_conflict_do_update(\n index_elements=[AssetKeyTable.c.asset_key],\n set_=dict(**values),\n )\n else:\n query = query.on_conflict_do_nothing()\n conn.execute(query)\n\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n if not partition_keys:\n return\n\n # Overload base implementation to push upsert logic down into the db layer\n self._check_partitions_table()\n with self.index_connection() as conn:\n conn.execute(\n db_dialects.postgresql.insert(DynamicPartitionsTable)\n .values(\n [\n dict(partitions_def_name=partitions_def_name, partition=partition_key)\n for partition_key in partition_keys\n ]\n )\n .on_conflict_do_nothing(),\n )\n\n def _connect(self) -> ContextManager[Connection]:\n return create_pg_connection(self._engine)\n\n def run_connection(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return self._connect()\n\n def index_connection(self) -> ContextManager[Connection]:\n return self._connect()\n\n def has_table(self, table_name: str) -> bool:\n return bool(self._engine.dialect.has_table(self._engine.connect(), table_name))\n\n def has_secondary_index(self, name: str) -> bool:\n if name not in self._secondary_index_cache:\n self._secondary_index_cache[name] = super(\n PostgresEventLogStorage, self\n ).has_secondary_index(name)\n return self._secondary_index_cache[name]\n\n def enable_secondary_index(self, name: str) -> None:\n super(PostgresEventLogStorage, self).enable_secondary_index(name)\n if name in self._secondary_index_cache:\n del self._secondary_index_cache[name]\n\n def watch(\n self,\n run_id: str,\n cursor: Optional[str],\n callback: EventHandlerFn,\n ) -> None:\n if cursor and EventLogCursor.parse(cursor).is_offset_cursor():\n check.failed("Cannot call `watch` with an offset cursor")\n\n self._event_watcher.watch_run(run_id, cursor, callback)\n\n def _gen_event_log_entry_from_cursor(self, cursor) -> EventLogEntry:\n with self._engine.connect() as conn:\n cursor_res = conn.execute(\n db_select([SqlEventLogStorageTable.c.event]).where(\n SqlEventLogStorageTable.c.id == cursor\n ),\n )\n return deserialize_value(cursor_res.scalar(), EventLogEntry) # type: ignore\n\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n self._event_watcher.unwatch_run(run_id, handler)\n\n def __del__(self) -> None:\n # Keep the inherent limitations of __del__ in Python in mind!\n self.dispose()\n\n def dispose(self) -> None:\n if not self._disposed:\n self._disposed = True\n self._event_watcher.close()\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = pg_alembic_config(__file__)\n with self._connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_postgres/event_log/event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_postgres.event_log.event_log"}}, "run_storage": {"run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_postgres.run_storage.run_storage

\nimport zlib\nfrom typing import ContextManager, Mapping, Optional\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.config import PostgresStorageConfig, pg_config\nfrom dagster._core.storage.runs import (\n    DaemonHeartbeatsTable,\n    InstanceInfo,\n    RunStorageSqlMetadata,\n    SqlRunStorage,\n)\nfrom dagster._core.storage.runs.schema import KeyValueStoreTable, SnapshotsTable\nfrom dagster._core.storage.runs.sql_run_storage import SnapshotType\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom dagster._utils import utc_datetime_from_timestamp\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_pg_connection,\n    pg_alembic_config,\n    pg_statement_timeout,\n    pg_url_from_config,\n    retry_pg_connection_fn,\n    retry_pg_creation_fn,\n)\n\n\n
[docs]class PostgresRunStorage(SqlRunStorage, ConfigurableClass):\n """Postgres-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To use Postgres for all of the components of your instance storage, you can add the following\n block to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :lines: 1-8\n :language: YAML\n\n If you are configuring the different storage components separately and are specifically\n configuring your run storage to use Postgres, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml\n :caption: dagster.yaml\n :lines: 1-10\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(\n self,\n postgres_url: str,\n should_autocreate_tables: bool = True,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.postgres_url = postgres_url\n self.should_autocreate_tables = check.bool_param(\n should_autocreate_tables, "should_autocreate_tables"\n )\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n poolclass=db_pool.NullPool,\n )\n\n self._index_migration_cache = {}\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if self.should_autocreate_tables:\n table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())\n if "runs" not in table_names:\n retry_pg_creation_fn(self._init_db)\n self.migrate()\n self.optimize()\n elif "instance_info" not in table_names:\n InstanceInfo.create(self._engine)\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n with conn.begin():\n RunStorageSqlMetadata.create_all(conn)\n # This revision may be shared by any other dagster storage classes using the same DB\n stamp_alembic_rev(pg_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold 1 open connection and set statement_timeout\n existing_options = self._engine.url.query.get("options")\n timeout_option = pg_statement_timeout(statement_timeout)\n if existing_options:\n options = f"{timeout_option} {existing_options}"\n else:\n options = timeout_option\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n pool_size=1,\n connect_args={"options": options},\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return pg_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: PostgresStorageConfig\n ):\n return PostgresRunStorage(\n inst_data=inst_data,\n postgres_url=pg_url_from_config(config_value),\n should_autocreate_tables=config_value.get("should_autocreate_tables", True),\n )\n\n @staticmethod\n def create_clean_storage(\n postgres_url: str, should_autocreate_tables: bool = True\n ) -> "PostgresRunStorage":\n engine = create_engine(\n postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n try:\n RunStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n return PostgresRunStorage(postgres_url, should_autocreate_tables)\n\n def connect(self) -> ContextManager[Connection]:\n return create_pg_connection(self._engine)\n\n def upgrade(self) -> None:\n with self.connect() as conn:\n run_alembic_upgrade(pg_alembic_config(__file__), conn)\n\n def has_built_index(self, migration_name: str) -> bool:\n if migration_name not in self._index_migration_cache:\n self._index_migration_cache[migration_name] = super(\n PostgresRunStorage, self\n ).has_built_index(migration_name)\n return self._index_migration_cache[migration_name]\n\n def mark_index_built(self, migration_name: str) -> None:\n super(PostgresRunStorage, self).mark_index_built(migration_name)\n if migration_name in self._index_migration_cache:\n del self._index_migration_cache[migration_name]\n\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n with self.connect() as conn:\n # insert or update if already present, using postgres specific on_conflict\n conn.execute(\n db_dialects.postgresql.insert(DaemonHeartbeatsTable)\n .values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_type=daemon_heartbeat.daemon_type,\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n .on_conflict_do_update(\n index_elements=[DaemonHeartbeatsTable.c.daemon_type],\n set_={\n "timestamp": utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n "daemon_id": daemon_heartbeat.daemon_id,\n "body": serialize_value(daemon_heartbeat),\n },\n )\n .returning(\n # required because sqlalchemy might by default return the declared primary key,\n # which might not exist\n DaemonHeartbeatsTable.c.daemon_type,\n )\n )\n\n def set_cursor_values(self, pairs: Mapping[str, str]) -> None:\n check.mapping_param(pairs, "pairs", key_type=str, value_type=str)\n\n # pg speciic on_conflict_do_update\n insert_stmt = db_dialects.postgresql.insert(KeyValueStoreTable).values(\n [{"key": k, "value": v} for k, v in pairs.items()]\n )\n upsert_stmt = insert_stmt.on_conflict_do_update(\n index_elements=[\n KeyValueStoreTable.c.key,\n ],\n set_={"value": insert_stmt.excluded.value},\n ).returning(\n # required because sqlalchemy might by default return the declared primary key,\n # which might not exist\n KeyValueStoreTable.c.key\n )\n\n with self.connect() as conn:\n conn.execute(upsert_stmt)\n\n def _add_snapshot(self, snapshot_id: str, snapshot_obj, snapshot_type: SnapshotType) -> str:\n with self.connect() as conn:\n snapshot_insert = (\n db_dialects.postgresql.insert(SnapshotsTable)\n .values(\n snapshot_id=snapshot_id,\n snapshot_body=zlib.compress(serialize_value(snapshot_obj).encode("utf-8")),\n snapshot_type=snapshot_type.value,\n )\n .on_conflict_do_nothing()\n )\n conn.execute(snapshot_insert)\n return snapshot_id\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = pg_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_postgres/run_storage/run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_postgres.run_storage.run_storage"}}, "schedule_storage": {"schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_postgres.schedule_storage.schedule_storage

\nfrom typing import ContextManager, Optional\n\nimport dagster._check as check\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.scheduler.instigation import InstigatorState\nfrom dagster._core.storage.config import PostgresStorageConfig, pg_config\nfrom dagster._core.storage.schedules import ScheduleStorageSqlMetadata, SqlScheduleStorage\nfrom dagster._core.storage.schedules.schema import InstigatorsTable\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_pg_connection,\n    pg_alembic_config,\n    pg_statement_timeout,\n    pg_url_from_config,\n    retry_pg_connection_fn,\n    retry_pg_creation_fn,\n)\n\n\n
[docs]class PostgresScheduleStorage(SqlScheduleStorage, ConfigurableClass):\n """Postgres-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To use Postgres for all of the components of your instance storage, you can add the following\n block to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :lines: 1-8\n :language: YAML\n\n If you are configuring the different storage components separately and are specifically\n configuring your schedule storage to use Postgres, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml\n :caption: dagster.yaml\n :lines: 23-32\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(\n self,\n postgres_url: str,\n should_autocreate_tables: bool = True,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.postgres_url = postgres_url\n self.should_autocreate_tables = check.bool_param(\n should_autocreate_tables, "should_autocreate_tables"\n )\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if self.should_autocreate_tables:\n table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())\n missing_main_table = "schedules" not in table_names and "jobs" not in table_names\n if missing_main_table:\n retry_pg_creation_fn(self._init_db)\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n with conn.begin():\n ScheduleStorageSqlMetadata.create_all(conn)\n stamp_alembic_rev(pg_alembic_config(__file__), conn)\n\n # mark all the data migrations as applied\n self.migrate()\n self.optimize()\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection and set statement_timeout\n existing_options = self._engine.url.query.get("options")\n timeout_option = pg_statement_timeout(statement_timeout)\n if existing_options:\n options = f"{timeout_option} {existing_options}"\n else:\n options = timeout_option\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n pool_size=1,\n connect_args={"options": options},\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return pg_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: PostgresStorageConfig\n ) -> "PostgresScheduleStorage":\n return PostgresScheduleStorage(\n inst_data=inst_data,\n postgres_url=pg_url_from_config(config_value),\n should_autocreate_tables=config_value.get("should_autocreate_tables", True),\n )\n\n @staticmethod\n def create_clean_storage(\n postgres_url: str, should_autocreate_tables: bool = True\n ) -> "PostgresScheduleStorage":\n engine = create_engine(\n postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n try:\n ScheduleStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n return PostgresScheduleStorage(postgres_url, should_autocreate_tables)\n\n def connect(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return create_pg_connection(self._engine)\n\n def upgrade(self) -> None:\n alembic_config = pg_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def _add_or_update_instigators_table(self, conn: Connection, state: InstigatorState) -> None:\n selector_id = state.selector_id\n conn.execute(\n db_dialects.postgresql.insert(InstigatorsTable)\n .values(\n selector_id=selector_id,\n repository_selector_id=state.repository_selector_id,\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n )\n .on_conflict_do_update(\n index_elements=[InstigatorsTable.c.selector_id],\n set_={\n "status": state.status.value,\n "instigator_type": state.instigator_type.value,\n "instigator_body": serialize_value(state),\n "update_timestamp": pendulum.now("UTC"),\n },\n )\n )\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = pg_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_postgres/schedule_storage/schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_postgres.schedule_storage.schedule_storage"}}}, "dagster_prometheus": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_prometheus.resources

\nimport prometheus_client\nfrom dagster import (\n    ConfigurableResource,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom prometheus_client.exposition import default_handler\nfrom pydantic import Field, PrivateAttr\n\n\n
[docs]class PrometheusClient:\n """Integrates with Prometheus via the prometheus_client library."""
\n\n\n
[docs]class PrometheusResource(ConfigurableResource):\n """This resource is used to send metrics to a Prometheus Pushgateway.\n\n **Example:**\n\n .. code-block:: python\n\n from dagster_prometheus import PrometheusResource\n from dagster import Definitions, job, op\n\n @op\n def example_prometheus_op(prometheus: PrometheusResource):\n prometheus.push_to_gateway(job="my_job")\n\n @job\n def my_job():\n example_prometheus_op()\n\n defs = Definitions(\n jobs=[my_job],\n resources={"prometheus": PrometheusResource(gateway="http://pushgateway.local")},\n )\n\n """\n\n gateway: str = Field(\n description=(\n "The url for your push gateway. Either of the"\n " form 'http://pushgateway.local', or 'pushgateway.local'."\n " Scheme defaults to 'http' if none is provided"\n )\n )\n timeout: int = Field(\n default=30,\n description="is how long delete will attempt to connect before giving up. Defaults to 30s.",\n )\n _registry: prometheus_client.CollectorRegistry = PrivateAttr(default=None)\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def setup_for_execution(self, context: InitResourceContext) -> None:\n self._registry = prometheus_client.CollectorRegistry()\n\n @property\n def registry(self) -> prometheus_client.CollectorRegistry:\n return self._registry\n\n def push_to_gateway(self, job, grouping_key=None, handler=default_handler) -> None:\n """Push metrics to the given pushgateway.\n `job` is the job label to be attached to all pushed metrics\n `grouping_key` please see the pushgateway documentation for details.\n Defaults to None\n `handler` is an optional function which can be provided to perform\n requests to the 'gateway'.\n Defaults to None, in which case an http or https request\n will be carried out by a default handler.\n If not None, the argument must be a function which accepts\n the following arguments:\n url, method, timeout, headers, and content\n May be used to implement additional functionality not\n supported by the built-in default handler (such as SSL\n client certicates, and HTTP authentication mechanisms).\n 'url' is the URL for the request, the 'gateway' argument\n described earlier will form the basis of this URL.\n 'method' is the HTTP method which should be used when\n carrying out the request.\n 'timeout' requests not successfully completed after this\n many seconds should be aborted. If timeout is None, then\n the handler should not set a timeout.\n 'headers' is a list of ("header-name","header-value") tuples\n which must be passed to the pushgateway in the form of HTTP\n request headers.\n The function should raise an exception (e.g. IOError) on\n failure.\n 'content' is the data which should be used to form the HTTP\n Message Body.\n This overwrites all metrics with the same job and grouping_key.\n This uses the PUT HTTP method.\n """\n prometheus_client.push_to_gateway(\n gateway=self.gateway,\n job=job,\n registry=self._registry,\n grouping_key=grouping_key,\n timeout=self.timeout,\n handler=handler,\n )\n\n def pushadd_to_gateway(self, job, grouping_key=None, handler=default_handler) -> None:\n """PushAdd metrics to the given pushgateway.\n `job` is the job label to be attached to all pushed metrics\n `registry` is an instance of CollectorRegistry\n `grouping_key` please see the pushgateway documentation for details.\n Defaults to None\n `handler` is an optional function which can be provided to perform\n requests to the 'gateway'.\n Defaults to None, in which case an http or https request\n will be carried out by a default handler.\n See the 'prometheus_client.push_to_gateway' documentation\n for implementation requirements.\n This replaces metrics with the same name, job and grouping_key.\n This uses the POST HTTP method.\n """\n prometheus_client.pushadd_to_gateway(\n gateway=self.gateway,\n job=job,\n registry=self._registry,\n grouping_key=grouping_key,\n timeout=self.timeout,\n handler=handler,\n )\n\n def delete_from_gateway(self, job, grouping_key=None, handler=default_handler) -> None:\n """Delete metrics from the given pushgateway.\n `job` is the job label to be attached to all pushed metrics\n `grouping_key` please see the pushgateway documentation for details.\n Defaults to None\n `handler` is an optional function which can be provided to perform\n requests to the 'gateway'.\n Defaults to None, in which case an http or https request\n will be carried out by a default handler.\n See the 'prometheus_client.push_to_gateway' documentation\n for implementation requirements.\n This deletes metrics with the given job and grouping_key.\n This uses the DELETE HTTP method.\n """\n prometheus_client.delete_from_gateway(\n gateway=self.gateway,\n job=job,\n grouping_key=grouping_key,\n timeout=self.timeout,\n handler=handler,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=PrometheusResource.to_config_schema(),\n description="""This resource is for sending metrics to a Prometheus Pushgateway.""",\n)\ndef prometheus_resource(context):\n return PrometheusResource(\n gateway=context.resource_config["gateway"], timeout=context.resource_config["timeout"]\n )
\n
", "current_page_name": "_modules/dagster_prometheus/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_prometheus.resources"}}, "dagster_pyspark": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pyspark.resources

\nfrom typing import Any, Dict\n\nimport dagster._check as check\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster_spark.configs_spark import spark_config\nfrom dagster_spark.utils import flatten_dict\nfrom pydantic import PrivateAttr\nfrom pyspark.sql import SparkSession\n\n\ndef spark_session_from_config(spark_conf=None):\n    spark_conf = check.opt_dict_param(spark_conf, "spark_conf")\n    builder = SparkSession.builder\n    flat = flatten_dict(spark_conf)\n    for key, value in flat:\n        builder = builder.config(key, value)\n\n    return builder.getOrCreate()\n\n\n
[docs]class PySparkResource(ConfigurableResource):\n """This resource provides access to a PySpark Session for executing PySpark code within Dagster.\n\n Example:\n .. code-block:: python\n\n @op\n def my_op(pyspark: PySparkResource)\n spark_session = pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n\n @job(\n resource_defs={\n "pyspark": PySparkResource(\n spark_config={\n "spark.executor.memory": "2g"\n }\n )\n }\n )\n def my_spark_job():\n my_op()\n """\n\n spark_config: Dict[str, Any]\n _spark_session = PrivateAttr(default=None)\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def setup_for_execution(self, context: InitResourceContext) -> None:\n self._spark_session = spark_session_from_config(self.spark_config)\n\n @property\n def spark_session(self) -> Any:\n return self._spark_session\n\n @property\n def spark_context(self) -> Any:\n return self.spark_session.sparkContext
\n\n\n
[docs]@dagster_maintained_resource\n@resource({"spark_conf": spark_config()})\ndef pyspark_resource(init_context) -> PySparkResource:\n """This resource provides access to a PySpark SparkSession for executing PySpark code within Dagster.\n\n Example:\n .. code-block:: python\n\n @op(required_resource_keys={"pyspark"})\n def my_op(context):\n spark_session = context.resources.pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n my_pyspark_resource = pyspark_resource.configured(\n {"spark_conf": {"spark.executor.memory": "2g"}}\n )\n\n @job(resource_defs={"pyspark": my_pyspark_resource})\n def my_spark_job():\n my_op()\n """\n context_updated_config = init_context.replace_config(\n {"spark_config": init_context.resource_config["spark_conf"]}\n )\n return PySparkResource.from_resource_context(context_updated_config)
\n\n\nclass LazyPySparkResource(ConfigurableResource):\n """This resource provides access to a lazily-created PySpark SparkSession for executing PySpark\n code within Dagster, avoiding the creation of a SparkSession object until the .spark_session attribute\n of the resource is accessed. This is helpful for avoiding the creation (and startup penalty) of a SparkSession\n until it is actually needed / accessed by an op or IOManager.\n\n Example:\n .. code-block:: python\n\n @op\n def my_op(lazy_pyspark: LazyPySparkResource)\n spark_session = lazy_pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n @job(\n resource_defs={\n "lazy_pyspark": LazyPySparkResource(\n spark_config={\n "spark.executor.memory": "2g"\n }\n )\n }\n )\n def my_spark_job():\n my_op()\n """\n\n spark_config: Dict[str, Any]\n _spark_session = PrivateAttr(default=None)\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def _init_session(self) -> None:\n if self._spark_session is None:\n self._spark_session = spark_session_from_config(self.spark_config)\n\n @property\n def spark_session(self) -> Any:\n self._init_session()\n return self._spark_session\n\n @property\n def spark_context(self) -> Any:\n self._init_session()\n return self._spark_session.sparkContext\n\n\n@dagster_maintained_resource\n@resource({"spark_conf": spark_config()})\ndef lazy_pyspark_resource(init_context: InitResourceContext) -> LazyPySparkResource:\n """This resource provides access to a lazily-created PySpark SparkSession for executing PySpark\n code within Dagster, avoiding the creation of a SparkSession object until the .spark_session attribute\n of the resource is accessed. This is helpful for avoiding the creation (and startup penalty) of a SparkSession\n until it is actually needed / accessed by an op or IOManager.\n\n Example:\n .. code-block:: python\n\n @op(required_resource_keys={"lazy_pyspark"})\n def my_op(context):\n spark_session = context.resources.lazy_pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n my_pyspark_resource = lazy_pyspark_resource.configured(\n {"spark_conf": {"spark.executor.memory": "2g"}}\n )\n\n @job(resource_defs={"lazy_pyspark": my_pyspark_resource})\n def my_spark_job():\n my_op()\n """\n context_updated_config = init_context.replace_config(\n {"spark_config": init_context.resource_config["spark_conf"]}\n )\n return LazyPySparkResource.from_resource_context(context_updated_config)\n
", "current_page_name": "_modules/dagster_pyspark/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pyspark.resources"}}, "dagster_shell": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_shell.ops

\nimport os\nfrom enum import Enum\nfrom typing import AbstractSet, Any, Dict, Mapping, Optional\n\nfrom dagster import (\n    Config,\n    Failure,\n    In,\n    Nothing,\n    OpExecutionContext,\n    Out,\n    _check as check,\n    op,\n)\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom pydantic import Field\n\nfrom .utils import execute, execute_script_file\n\n\nclass OutputType(Enum):\n    STREAM = "STREAM"\n    """Stream script stdout/stderr."""\n\n    BUFFER = "BUFFER"\n    """Buffer shell script stdout/stderr, then log upon completion."""\n\n    NONE = "NONE"\n    """No logging."""\n\n\nclass ShellOpConfig(Config):\n    env: Optional[Dict[str, str]] = Field(\n        default=None,\n        description="An optional dict of environment variables to pass to the subprocess.",\n    )\n    output_logging: OutputType = Field(\n        OutputType.BUFFER.value,\n    )\n    cwd: Optional[str] = Field(\n        default=None, description="Working directory in which to execute shell script"\n    )\n\n    def to_execute_params(self) -> Dict[str, Any]:\n        return {\n            "env": {**os.environ, **(self.env or {})},\n            "output_logging": self.output_logging.value,\n            "cwd": self.cwd,\n        }\n\n\n
[docs]@op(\n name="shell_op",\n description=(\n "This op executes a shell command it receives as input.\\n\\n"\n "This op is suitable for uses where the command to execute is generated dynamically by "\n "upstream ops. If you know the command to execute at job construction time, "\n "consider `shell_command_op` instead."\n ),\n ins={"shell_command": In(str)},\n out=Out(str),\n)\ndef shell_op(context: OpExecutionContext, shell_command: str, config: ShellOpConfig) -> str:\n """This op executes a shell command it receives as input.\n This op is suitable for uses where the command to execute is generated dynamically by\n upstream ops. If you know the command to execute at job construction time,\n consider ``shell_command_op`` instead.\n\n Args:\n shell_command: The shell command to be executed\n config (ShellOpConfig): A ShellOpConfig object specifying configuration options\n\n Examples:\n .. code-block:: python\n\n @op\n def create_shell_command():\n return "echo hello world!"\n\n @graph\n def echo_graph():\n shell_op(create_shell_command())\n """\n output, return_code = execute(\n shell_command=shell_command, log=context.log, **config.to_execute_params()\n )\n\n if return_code:\n raise Failure(description=f"Shell command execution failed with output: {output}")\n\n return output
\n\n\n
[docs]def create_shell_command_op(\n shell_command: str,\n name: str,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n tags: Optional[Mapping[str, str]] = None,\n) -> OpDefinition:\n """This function is a factory that constructs ops to execute a shell command.\n\n Note that you can only use ``shell_command_op`` if you know the command you'd like to execute\n at job construction time. If you'd like to construct shell commands dynamically during\n job execution and pass them between ops, you should use ``shell_op`` instead.\n\n The resulting op can take a single ``start`` argument that is a\n `Nothing dependency <https://docs.dagster.io/concepts/ops-jobs-graphs/graphs#defining-nothing-dependencies>`__\n to allow you to run ops before the shell op.\n\n Examples:\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_command_op.py\n :language: python\n\n .. code-block:: python\n\n @op\n def run_before_shell_op():\n do_some_work()\n\n @graph\n def my_graph():\n my_echo_op = create_shell_command_op("echo hello world!", name="echo_op")\n my_echo_op(start=run_before_shell_op())\n\n\n Args:\n shell_command (str): The shell command that the constructed op will execute.\n name (str): The name of the constructed op.\n description (Optional[str]): Human-readable description of this op.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.\n Setting this ensures that resource spin up for the required resources will occur before\n the shell command is executed.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may\n expect and require certain metadata to be attached to a op. Users should generally\n not set metadata directly. Values that are not strings will be json encoded and must meet\n the criteria that `json.loads(json.dumps(value)) == value`.\n\n Raises:\n Failure: Raised when the shell command returns a non-zero exit code.\n\n Returns:\n OpDefinition: Returns the constructed op definition.\n """\n\n @op(\n name=name,\n description=description,\n ins={"start": In(Nothing)},\n out=Out(str),\n required_resource_keys=required_resource_keys,\n tags=tags,\n )\n def _shell_fn(context, config: ShellOpConfig):\n output, return_code = execute(\n shell_command=shell_command, log=context.log, **config.to_execute_params()\n )\n\n if return_code:\n raise Failure(description=f"Shell command execution failed with output: {output}")\n\n return output\n\n return _shell_fn
\n\n\n
[docs]def create_shell_script_op(\n shell_script_path,\n name="create_shell_script_op",\n ins: Optional[Mapping[str, In]] = None,\n **kwargs: Any,\n) -> OpDefinition:\n """This function is a factory which constructs an op that will execute a shell command read\n from a script file.\n\n Any kwargs passed to this function will be passed along to the underlying :func:`@op\n <dagster.op>` decorator. However, note that overriding ``config`` or ``output_defs`` is not\n supported.\n\n You might consider using :func:`@graph <dagster.graph>` to wrap this op\n in the cases where you'd like to configure the shell op with different config fields.\n\n If no ``ins`` are passed then the resulting op can take a single ``start`` argument that is a\n `Nothing dependency <https://docs.dagster.io/concepts/ops-jobs-graphs/graphs#defining-nothing-dependencies>`__\n to allow you to run ops before the shell op.\n\n\n Examples:\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_script_op.py\n :language: python\n\n .. code-block:: python\n\n @op\n def run_before_shell_op():\n do_some_work()\n\n @graph\n def my_graph():\n my_echo_op = create_shell_script_op(file_relative_path(__file__, "hello_world.sh"), name="echo_op")\n my_echo_op(start=run_before_shell_op())\n\n\n Args:\n shell_script_path (str): The script file to execute.\n name (Optional[str]): The name of this op. Defaults to "create_shell_script_op".\n ins (Optional[Mapping[str, In]]): Ins for the op. Defaults to\n a single Nothing input.\n\n Raises:\n Failure: Raised when the shell command returns a non-zero exit code.\n\n Returns:\n OpDefinition: Returns the constructed op definition.\n """\n check.str_param(shell_script_path, "shell_script_path")\n name = check.str_param(name, "name")\n check.opt_mapping_param(ins, "ins", value_type=In)\n\n if "config" in kwargs:\n raise TypeError("Overriding config for shell op is not supported.")\n\n @op(\n name=name,\n description=kwargs.pop("description", "An op to invoke a shell command."),\n ins=ins or {"start": In(Nothing)},\n out=Out(str),\n **kwargs,\n )\n def _shell_script_fn(context, config: ShellOpConfig):\n output, return_code = execute_script_file(\n shell_script_path=shell_script_path, log=context.log, **config.to_execute_params()\n )\n\n if return_code:\n raise Failure(description=f"Shell command execution failed with output: {output}")\n\n return output\n\n return _shell_script_fn
\n
", "current_page_name": "_modules/dagster_shell/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_shell.ops"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_shell.utils

\n#\n# NOTE: This file is based on the bash operator from Apache Airflow, which can be found here:\n# https://github.com/apache/airflow/blob/master/airflow/operators/bash.py\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nimport signal\nfrom logging import Logger\nfrom subprocess import PIPE, STDOUT, Popen\nfrom typing import Mapping, Optional, Tuple\n\nimport dagster._check as check\nfrom dagster._utils import safe_tempfile_path\nfrom typing_extensions import Final\n\nOUTPUT_LOGGING_OPTIONS: Final = ["STREAM", "BUFFER", "NONE"]\n\n\ndef execute_script_file(\n    shell_script_path: str,\n    output_logging: str,\n    log: Logger,\n    cwd: Optional[str] = None,\n    env: Optional[Mapping[str, str]] = None,\n) -> Tuple[str, int]:\n    """Execute a shell script file specified by the argument ``shell_script_path``. The script will be\n    invoked via ``subprocess.Popen(['bash', shell_script_path], ...)``.\n\n    In the Popen invocation, ``stdout=PIPE, stderr=STDOUT`` is used, and the combined stdout/stderr\n    output is retrieved.\n\n    Examples:\n        .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_script_utility.py\n           :language: python\n\n    Args:\n        shell_script_path (str): The shell script to execute.\n        output_logging (str): The logging mode to use. Supports STREAM, BUFFER, and NONE.\n        log (Union[logging.Logger, DagsterLogManager]): Any logger which responds to .info()\n        cwd (str, optional): Working directory for the shell command to use. Defaults to the\n            temporary path where we store the shell command in a script file.\n        env (Dict[str, str], optional): Environment dictionary to pass to ``subprocess.Popen``.\n            Unused by default.\n\n    Raises:\n        Exception: When an invalid output_logging is selected. Unreachable from op-based\n            invocation since the config system will check output_logging against the config\n            enum.\n\n    Returns:\n        Tuple[str, int]: A tuple where the first element is the combined stdout/stderr output of running the shell\n        command and the second element is the return code.\n    """\n    check.str_param(shell_script_path, "shell_script_path")\n    check.str_param(output_logging, "output_logging")\n    check.opt_str_param(cwd, "cwd", default=os.path.dirname(shell_script_path))\n    env = check.opt_nullable_dict_param(env, "env", key_type=str, value_type=str)\n\n    if output_logging not in OUTPUT_LOGGING_OPTIONS:\n        raise Exception("Unrecognized output_logging %s" % output_logging)\n\n    def pre_exec():\n        # Restore default signal disposition and invoke setsid\n        for sig in ("SIGPIPE", "SIGXFZ", "SIGXFSZ"):\n            if hasattr(signal, sig):\n                signal.signal(getattr(signal, sig), signal.SIG_DFL)\n        os.setsid()\n\n    with open(shell_script_path, "rb") as f:\n        shell_command = f.read().decode("utf-8")\n\n    log.info(f"Running command:\\n{shell_command}")\n\n    sub_process = None\n    try:\n        stdout_pipe = PIPE\n        stderr_pipe = STDOUT\n        if output_logging == "NONE":\n            stdout_pipe = stderr_pipe = None\n\n        sub_process = Popen(\n            ["bash", shell_script_path],\n            stdout=stdout_pipe,\n            stderr=stderr_pipe,\n            cwd=cwd,\n            env=env,\n            preexec_fn=pre_exec,  # noqa: PLW1509\n            encoding="UTF-8",\n        )\n\n        log.info(f"Command pid: {sub_process.pid}")\n\n        output = ""\n        if output_logging == "STREAM":\n            assert sub_process.stdout is not None, "Setting stdout=PIPE should always set stdout."\n            # Stream back logs as they are emitted\n            lines = []\n            for line in sub_process.stdout:\n                log.info(line.rstrip())\n                lines.append(line)\n            output = "".join(lines)\n        elif output_logging == "BUFFER":\n            # Collect and buffer all logs, then emit\n            output, _ = sub_process.communicate()\n            log.info(output)\n\n        sub_process.wait()\n        log.info(f"Command exited with return code {sub_process.returncode}")\n\n        return output, sub_process.returncode\n    finally:\n        # Always terminate subprocess, including in cases where the run is terminated\n        if sub_process:\n            sub_process.terminate()\n\n\ndef execute(\n    shell_command: str,\n    output_logging: str,\n    log: Logger,\n    cwd: Optional[str] = None,\n    env: Optional[Mapping[str, str]] = None,\n) -> Tuple[str, int]:\n    """This function is a utility for executing shell commands from within a Dagster op (or from Python in general).\n    It can be used to execute shell commands on either op input data, or any data generated within a generic python op.\n\n    Internally, it executes a shell script specified by the argument ``shell_command``. The script will be written\n    to a temporary file first and invoked via ``subprocess.Popen(['bash', shell_script_path], ...)``.\n\n    In the Popen invocation, ``stdout=PIPE, stderr=STDOUT`` is used, and the combined stdout/stderr\n    output is retrieved.\n\n    Examples:\n        .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_command_utility.py\n           :language: python\n\n    Args:\n        shell_command (str): The shell command to execute\n        output_logging (str): The logging mode to use. Supports STREAM, BUFFER, and NONE.\n        log (Union[logging.Logger, DagsterLogManager]): Any logger which responds to .info()\n        cwd (str, optional): Working directory for the shell command to use. Defaults to the\n            temporary path where we store the shell command in a script file.\n        env (Dict[str, str], optional): Environment dictionary to pass to ``subprocess.Popen``.\n            Unused by default.\n\n    Returns:\n        Tuple[str, int]: A tuple where the first element is the combined stdout/stderr output of running the shell\n        command and the second element is the return code.\n    """\n    check.str_param(shell_command, "shell_command")\n    # other args checked in execute_file\n\n    with safe_tempfile_path() as tmp_file_path:\n        tmp_path = os.path.dirname(tmp_file_path)\n        log.info("Using temporary directory: %s" % tmp_path)\n\n        with open(tmp_file_path, "wb") as tmp_file:\n            tmp_file.write(shell_command.encode("utf-8"))\n            tmp_file.flush()\n            script_location = os.path.abspath(tmp_file.name)\n            log.info(f"Temporary script location: {script_location}")\n            return execute_script_file(\n                shell_script_path=tmp_file.name,\n                output_logging=output_logging,\n                log=log,\n                cwd=(cwd or tmp_path),\n                env=env,\n            )\n
", "current_page_name": "_modules/dagster_shell/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_shell.utils"}}, "dagster_slack": {"hooks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_slack.hooks

\nfrom typing import Callable, Optional\n\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions import failure_hook, success_hook\nfrom dagster._core.execution.context.hook import HookContext\nfrom dagster._utils.warnings import normalize_renamed_param\n\n\ndef _default_status_message(context: HookContext, status: str) -> str:\n    return f"Op {context.op.name} on job {context.job_name} {status}!\\nRun ID: {context.run_id}"\n\n\ndef _default_failure_message(context: HookContext) -> str:\n    return _default_status_message(context, status="failed")\n\n\ndef _default_success_message(context: HookContext) -> str:\n    return _default_status_message(context, status="succeeded")\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef slack_on_failure(\n channel: str,\n message_fn: Callable[[HookContext], str] = _default_failure_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step failure events that will message the given Slack channel.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the HookContext\n outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n\n Examples:\n .. code-block:: python\n\n @slack_on_failure("#foo", webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op} failed!"\n\n @op\n def an_op(context):\n pass\n\n @job(...)\n def my_job():\n an_op.with_hooks(hook_defs={slack_on_failure("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @failure_hook(required_resource_keys={"slack"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"\\n<{webserver_base_url}/runs/{context.run_id}|View in Dagster UI>"\n\n context.resources.slack.chat_postMessage(channel=channel, text=text)\n\n return _hook
\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef slack_on_success(\n channel: str,\n message_fn: Callable[[HookContext], str] = _default_success_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step success events that will message the given Slack channel.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the HookContext\n outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n\n Examples:\n .. code-block:: python\n\n @slack_on_success("#foo", webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op} worked!"\n\n @op\n def an_op(context):\n pass\n\n @job(...)\n def my_job():\n an_op.with_hooks(hook_defs={slack_on_success("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @success_hook(required_resource_keys={"slack"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"\\n<{webserver_base_url}/runs/{context.run_id}|View in Dagster UI>"\n\n context.resources.slack.chat_postMessage(channel=channel, text=text)\n\n return _hook
\n
", "current_page_name": "_modules/dagster_slack/hooks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_slack.hooks"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_slack.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\nfrom slack_sdk.web.client import WebClient\n\n\n
[docs]class SlackResource(ConfigurableResource):\n """This resource is for connecting to Slack.\n\n By configuring this Slack resource, you can post messages to Slack from any Dagster op, asset, schedule or sensor.\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import EnvVar, job, op\n from dagster_slack import SlackResource\n\n\n @op\n def slack_op(slack: SlackResource):\n slack.get_client().chat_postMessage(channel='#noise', text=':wave: hey there!')\n\n @job\n def slack_job():\n slack_op()\n\n defs = Definitions(\n jobs=[slack_job],\n resources={\n "slack": SlackResource(token=EnvVar("MY_SLACK_TOKEN")),\n },\n )\n """\n\n token: str = Field(\n description=(\n "To configure access to the Slack API, you'll need an access"\n " token provisioned with access to your Slack workspace."\n " Tokens are typically either user tokens or bot tokens. For programmatic posting"\n " to Slack from this resource, you probably want to provision and use a bot token."\n " More in the Slack API documentation here: https://api.slack.com/docs/token-types"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> WebClient:\n """Returns a ``slack_sdk.WebClient`` for interacting with the Slack API."""\n return WebClient(self.token)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=SlackResource.to_config_schema(),\n)\ndef slack_resource(context) -> WebClient:\n """This resource is for connecting to Slack.\n\n The resource object is a `slack_sdk.WebClient`.\n\n By configuring this Slack resource, you can post messages to Slack from any Dagster op, asset, schedule or sensor.\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import job, op\n from dagster_slack import slack_resource\n\n\n @op(required_resource_keys={'slack'})\n def slack_op(context):\n context.resources.slack.chat_postMessage(channel='#noise', text=':wave: hey there!')\n\n @job(resource_defs={'slack': slack_resource})\n def slack_job():\n slack_op()\n\n slack_job.execute_in_process(\n run_config={'resources': {'slack': {'config': {'token': os.getenv('SLACK_TOKEN')}}}}\n )\n """\n return SlackResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_slack/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_slack.resources"}, "sensors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_slack.sensors

\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    List,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n)\n\nfrom dagster import (\n    AssetSelection,\n    DefaultSensorStatus,\n    FreshnessPolicySensorContext,\n    freshness_policy_sensor,\n)\nfrom dagster._annotations import deprecated_param, experimental\nfrom dagster._core.definitions import GraphDefinition, JobDefinition\nfrom dagster._core.definitions.run_status_sensor_definition import (\n    RunFailureSensorContext,\n    run_failure_sensor,\n)\nfrom dagster._core.definitions.unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom dagster._utils.warnings import normalize_renamed_param\nfrom slack_sdk.web.client import WebClient\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.selector import (\n        CodeLocationSelector,\n        JobSelector,\n        RepositorySelector,\n    )\n\nT = TypeVar("T", RunFailureSensorContext, FreshnessPolicySensorContext)\n\n\ndef _build_slack_blocks_and_text(\n    context: T,\n    text_fn: Callable[[T], str],\n    blocks_fn: Optional[Callable[[T], List[Dict[Any, Any]]]],\n    webserver_base_url: Optional[str],\n) -> Tuple[List[Dict[str, Any]], str]:\n    main_body_text = text_fn(context)\n    blocks: List[Dict[Any, Any]] = []\n    if blocks_fn:\n        blocks.extend(blocks_fn(context))\n    else:\n        if isinstance(context, RunFailureSensorContext):\n            text = (\n                f'*Job "{context.dagster_run.job_name}" failed.'\n                f' `{context.dagster_run.run_id.split("-")[0]}`*'\n            )\n        else:\n            text = (\n                f'*Asset "{context.asset_key.to_user_string()}" is now'\n                f' {"on time" if context.minutes_overdue == 0 else f"{context.minutes_overdue:.2f} minutes late.*"}'\n            )\n\n        blocks.extend(\n            [\n                {\n                    "type": "section",\n                    "text": {\n                        "type": "mrkdwn",\n                        "text": text,\n                    },\n                },\n                {\n                    "type": "section",\n                    "text": {"type": "mrkdwn", "text": main_body_text},\n                },\n            ]\n        )\n\n    if webserver_base_url:\n        if isinstance(context, RunFailureSensorContext):\n            url = f"{webserver_base_url}/runs/{context.dagster_run.run_id}"\n        else:\n            url = f"{webserver_base_url}/assets/{'/'.join(context.asset_key.path)}"\n        blocks.append(\n            {\n                "type": "actions",\n                "elements": [\n                    {\n                        "type": "button",\n                        "text": {"type": "plain_text", "text": "View in Dagster UI"},\n                        "url": url,\n                    }\n                ],\n            }\n        )\n    return blocks, main_body_text\n\n\ndef _default_failure_message_text_fn(context: RunFailureSensorContext) -> str:\n    return f"Error: ```{context.failure_event.message}```"\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\n@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef make_slack_on_run_failure_sensor(\n channel: str,\n slack_token: str,\n text_fn: Callable[[RunFailureSensorContext], str] = _default_failure_message_text_fn,\n blocks_fn: Optional[Callable[[RunFailureSensorContext], List[Dict[Any, Any]]]] = None,\n name: Optional[str] = None,\n dagit_base_url: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n webserver_base_url: Optional[str] = None,\n):\n """Create a sensor on job failures that will message the given Slack channel.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n slack_token (str): The slack token.\n Tokens are typically either user tokens or bot tokens. More in the Slack API\n documentation here: https://api.slack.com/docs/token-types\n text_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` and outputs the message you want to send.\n Defaults to a text message that contains error message, job name, and run ID.\n The usage of the `text_fn` changes depending on whether you're using `blocks_fn`. If you\n are using `blocks_fn`, this is used as a fallback string to display in notifications. If\n you aren't, this is the main body text of the message. It can be formatted as plain text,\n or with markdown.\n See more details in https://api.slack.com/methods/chat.postMessage#text_usage\n blocks_fn (Callable[[RunFailureSensorContext], List[Dict]]): Function which takes in\n the ``RunFailureSensorContext`` and outputs the message blocks you want to send.\n See information about Blocks in https://api.slack.com/reference/block-kit/blocks\n name: (Optional[str]): The name of the sensor. Defaults to "slack_on_run_failure".\n dagit_base_url: (Optional[str]): The base url of your Dagit instance. Specify this to allow\n messages to include deeplinks to the failed job run.\n minimum_interval_seconds: (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSensor]]]): The jobs in the\n current repository that will be monitored by this failure sensor. Defaults to None, which\n means the alert will be sent when any job in the repository fails. To monitor jobs in external repositories, use RepositorySelector and JobSelector\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSensor]]]): (deprecated in favor of monitored_jobs)\n The jobs in the current repository that will be monitored by this failure sensor. Defaults to None, which means the alert will\n be sent when any job in the repository fails.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from Dagit or via the GraphQL API.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the failed job run.\n\n Examples:\n .. code-block:: python\n\n slack_on_run_failure = make_slack_on_run_failure_sensor(\n "#my_channel",\n os.getenv("MY_SLACK_TOKEN")\n )\n\n @repository\n def my_repo():\n return [my_job + slack_on_run_failure]\n\n .. code-block:: python\n\n def my_message_fn(context: RunFailureSensorContext) -> str:\n return (\n f"Job {context.dagster_run.job_name} failed!"\n f"Error: {context.failure_event.message}"\n )\n\n slack_on_run_failure = make_slack_on_run_failure_sensor(\n channel="#my_channel",\n slack_token=os.getenv("MY_SLACK_TOKEN"),\n text_fn=my_message_fn,\n webserver_base_url="http://mycoolsite.com",\n )\n\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n slack_client = WebClient(token=slack_token)\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n @run_failure_sensor(\n name=name,\n minimum_interval_seconds=minimum_interval_seconds,\n monitored_jobs=jobs,\n monitor_all_repositories=monitor_all_repositories,\n default_status=default_status,\n )\n def slack_on_run_failure(context: RunFailureSensorContext):\n blocks, main_body_text = _build_slack_blocks_and_text(\n context=context,\n text_fn=text_fn,\n blocks_fn=blocks_fn,\n webserver_base_url=webserver_base_url,\n )\n\n slack_client.chat_postMessage(channel=channel, blocks=blocks, text=main_body_text)\n\n return slack_on_run_failure
\n\n\ndef _default_freshness_message_text_fn(context: FreshnessPolicySensorContext) -> str:\n return (\n f"Asset `{context.asset_key.to_user_string()}` is now {context.minutes_overdue:.2f} minutes"\n " late."\n )\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\n@experimental\ndef make_slack_on_freshness_policy_status_change_sensor(\n channel: str,\n slack_token: str,\n asset_selection: AssetSelection,\n warn_after_minutes_overdue: float = 0,\n notify_when_back_on_time: bool = False,\n text_fn: Callable[[FreshnessPolicySensorContext], str] = _default_freshness_message_text_fn,\n blocks_fn: Optional[Callable[[FreshnessPolicySensorContext], List[Dict[Any, Any]]]] = None,\n name: Optional[str] = None,\n dagit_base_url: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n webserver_base_url: Optional[str] = None,\n):\n """Create a sensor that will message the given Slack channel whenever an asset in the provided\n AssetSelection becomes out of date. Messages are only fired when the state changes, meaning\n only a single slack message will be sent (when the asset begins to be out of date). If\n `notify_when_back_on_time` is set to `True`, a second slack message will be sent once the asset\n is on time again.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n slack_token (str): The slack token.\n Tokens are typically either user tokens or bot tokens. More in the Slack API\n documentation here: https://api.slack.com/docs/token-types\n asset_selection (AssetSelection): The selection of assets which this sensor will monitor.\n Alerts will only be fired for assets that have a FreshnessPolicy defined.\n warn_after_minutes_overdue (float): How many minutes past the specified FreshnessPolicy this\n sensor will wait before firing an alert (by default, an alert will be fired as soon as\n the policy is violated).\n notify_when_back_on_time (bool): If a success message should be sent when the asset becomes on\n time again.\n text_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``FreshnessPolicySensorContext`` and outputs the message you want to send.\n Defaults to a text message that contains the relevant asset key, and the number of\n minutes past its defined freshness policy it currently is.\n The usage of the `text_fn` changes depending on whether you're using `blocks_fn`. If you\n are using `blocks_fn`, this is used as a fallback string to display in notifications. If\n you aren't, this is the main body text of the message. It can be formatted as plain text,\n or with markdown.\n See more details in https://api.slack.com/methods/chat.postMessage#text_usage\n blocks_fn (Callable[[FreshnessPolicySensorContext], List[Dict]]): Function which takes in\n the ``FreshnessPolicySensorContext`` and outputs the message blocks you want to send.\n See information about Blocks in https://api.slack.com/reference/block-kit/blocks\n name: (Optional[str]): The name of the sensor. Defaults to "slack_on_freshness_policy".\n dagit_base_url: (Optional[str]): The base url of your Dagit instance. Specify this to allow\n messages to include deeplinks to the relevant asset page.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from Dagit or via the GraphQL API.\n webserver_base_url: (Optional[str]): The base url of your Dagit instance. Specify this to allow\n messages to include deeplinks to the relevant asset page.\n\n Examples:\n .. code-block:: python\n\n slack_on_freshness_policy = make_slack_on_freshness_policy_status_change_sensor(\n "#my_channel",\n os.getenv("MY_SLACK_TOKEN"),\n )\n\n .. code-block:: python\n\n def my_message_fn(context: FreshnessPolicySensorContext) -> str:\n if context.minutes_overdue == 0:\n return f"Asset {context.asset_key} is currently on time :)"\n return (\n f"Asset {context.asset_key} is currently {context.minutes_overdue} minutes late!!"\n )\n\n slack_on_run_failure = make_slack_on_run_failure_sensor(\n channel="#my_channel",\n slack_token=os.getenv("MY_SLACK_TOKEN"),\n text_fn=my_message_fn,\n webserver_base_url="http://mycoolsite.com",\n )\n\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n slack_client = WebClient(token=slack_token)\n\n @freshness_policy_sensor(\n name=name, asset_selection=asset_selection, default_status=default_status\n )\n def slack_on_freshness_policy(context: FreshnessPolicySensorContext):\n if context.minutes_overdue is None or context.previous_minutes_overdue is None:\n return\n\n if (\n context.minutes_overdue > warn_after_minutes_overdue\n and context.previous_minutes_overdue <= warn_after_minutes_overdue\n ) or (\n notify_when_back_on_time\n and context.minutes_overdue == 0\n and context.previous_minutes_overdue != 0\n ):\n blocks, main_body_text = _build_slack_blocks_and_text(\n context=context,\n text_fn=text_fn,\n blocks_fn=blocks_fn,\n webserver_base_url=webserver_base_url,\n )\n\n slack_client.chat_postMessage(channel=channel, blocks=blocks, text=main_body_text)\n\n return slack_on_freshness_policy
\n
", "current_page_name": "_modules/dagster_slack/sensors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_slack.sensors"}}, "dagster_snowflake": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake.ops

\nfrom dagster import (\n    Nothing,\n    _check as check,\n    op,\n)\nfrom dagster._core.definitions.input import In\n\n\ndef _core_create_snowflake_command(dagster_decorator, decorator_name, sql, parameters=None):\n    check.str_param(sql, "sql")\n    check.opt_dict_param(parameters, "parameters")\n\n    @dagster_decorator(\n        name=f"snowflake_{decorator_name}",\n        ins={"start": In(Nothing)},\n        required_resource_keys={"snowflake"},\n        tags={"kind": "sql", "sql": sql},\n    )\n    def snowflake_fn(context):\n        context.resources.snowflake.execute_query(sql=sql, parameters=parameters)\n\n    return snowflake_fn\n\n\ndef snowflake_solid_for_query(sql, parameters=None):\n    """This function is a solid factory that constructs solids to execute a snowflake query.\n\n    Note that you can only use `snowflake_solid_for_query` if you know the query you'd like to\n    execute at job construction time. If you'd like to execute queries dynamically during\n    job execution, you should manually execute those queries in your custom solid using the\n    snowflake resource.\n\n    Args:\n        sql (str): The sql query that will execute against the provided snowflake resource.\n        parameters (dict): The parameters for the sql query.\n\n    Returns:\n        SolidDefinition: Returns the constructed solid definition.\n    """\n    return _core_create_snowflake_command(op, "solid", sql, parameters)\n\n\n
[docs]def snowflake_op_for_query(sql, parameters=None):\n """This function is an op factory that constructs an op to execute a snowflake query.\n\n Note that you can only use `snowflake_op_for_query` if you know the query you'd like to\n execute at graph construction time. If you'd like to execute queries dynamically during\n job execution, you should manually execute those queries in your custom op using the\n snowflake resource.\n\n Args:\n sql (str): The sql query that will execute against the provided snowflake resource.\n parameters (dict): The parameters for the sql query.\n\n Returns:\n OpDefinition: Returns the constructed op definition.\n """\n return _core_create_snowflake_command(op, "op", sql, parameters)
\n
", "current_page_name": "_modules/dagster_snowflake/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake.resources

\nimport base64\nimport sys\nimport warnings\nfrom contextlib import closing, contextmanager\nfrom typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Union\n\nimport dagster._check as check\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom dagster import (\n    ConfigurableResource,\n    IAttachDifferentObjectToOpContext,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._annotations import public\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.storage.event_log.sql_event_log import SqlDbConnection\nfrom dagster._utils.cached_method import cached_method\nfrom pydantic import Field, root_validator, validator\n\ntry:\n    import snowflake.connector\nexcept ImportError:\n    msg = (\n        "Could not import snowflake.connector. This could mean you have an incompatible version "\n        "of azure-storage-blob installed. dagster-snowflake requires azure-storage-blob<12.0.0; "\n        "this conflicts with dagster-azure which requires azure-storage-blob~=12.0.0 and is "\n        "incompatible with dagster-snowflake. Please uninstall dagster-azure and reinstall "\n        "dagster-snowflake to fix this error."\n    )\n    warnings.warn(msg)\n    raise\n\n\n
[docs]class SnowflakeResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """A resource for connecting to the Snowflake data warehouse.\n\n If connector configuration is not set, SnowflakeResource.get_connection() will return a\n `snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__\n object. If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will\n return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__\n or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__.\n\n A simple example of loading data into Snowflake and subsequently querying that data is shown below:\n\n Examples:\n .. code-block:: python\n\n from dagster import job, op\n from dagster_snowflake import SnowflakeResource\n\n @op\n def get_one(snowflake_resource: SnowflakeResource):\n with snowflake_resource.get_connection() as conn:\n # conn is a snowflake.connector.Connection object\n conn.cursor().execute("SELECT 1")\n\n @job\n def my_snowflake_job():\n get_one()\n\n my_snowflake_job.execute_in_process(\n resources={\n 'snowflake_resource': SnowflakeResource(\n account=EnvVar("SNOWFLAKE_ACCOUNT"),\n user=EnvVar("SNOWFLAKE_USER"),\n password=EnvVar("SNOWFLAKE_PASSWORD")\n database="MY_DATABASE",\n schema="MY_SCHEMA",\n warehouse="MY_WAREHOUSE"\n )\n }\n )\n """\n\n account: Optional[str] = Field(\n default=None,\n description=(\n "Your Snowflake account name. For more details, see the `Snowflake documentation."\n " <https://docs.snowflake.com/developer-guide/python-connector/python-connector-api>`__"\n ),\n )\n\n user: str = Field(description="User login name.")\n\n password: Optional[str] = Field(default=None, description="User password.")\n\n database: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default database to use. After login, you can use ``USE DATABASE`` "\n " to change the database."\n ),\n )\n\n schema_: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default schema to use. After login, you can use ``USE SCHEMA`` to "\n "change the schema."\n ),\n alias="schema",\n ) # schema is a reserved word for pydantic\n\n role: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default role to use. After login, you can use ``USE ROLE`` to change "\n " the role."\n ),\n )\n\n warehouse: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default warehouse to use. After login, you can use ``USE WAREHOUSE`` "\n "to change the role."\n ),\n )\n\n private_key: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Alternately, set private_key_path and private_key_password. To avoid issues with"\n " newlines in the keys, you can base64 encode the key. You can retrieve the base64"\n " encoded key with this shell command: ``cat rsa_key.p8 | base64``"\n ),\n )\n\n private_key_password: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key password to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Required for both ``private_key`` and ``private_key_path`` if the private key is"\n " encrypted. For unencrypted keys, this config can be omitted or set to None."\n ),\n )\n\n private_key_path: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key path to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Alternately, set the raw private key as ``private_key``."\n ),\n )\n\n autocommit: Optional[bool] = Field(\n default=None,\n description=(\n "None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True "\n "or False to enable or disable autocommit mode in the session, respectively."\n ),\n )\n\n client_prefetch_threads: Optional[int] = Field(\n default=None,\n description=(\n "Number of threads used to download the results sets (4 by default). "\n "Increasing the value improves fetch performance but requires more memory."\n ),\n )\n\n client_session_keep_alive: Optional[bool] = Field(\n default=None,\n description=(\n "False by default. Set this to True to keep the session active indefinitely, "\n "even if there is no activity from the user. Make certain to call the close method to "\n "terminate the thread properly or the process may hang."\n ),\n )\n\n login_timeout: Optional[int] = Field(\n default=None,\n description=(\n "Timeout in seconds for login. By default, 60 seconds. The login request gives "\n 'up after the timeout length if the HTTP response is "success".'\n ),\n )\n\n network_timeout: Optional[int] = Field(\n default=None,\n description=(\n "Timeout in seconds for all other operations. By default, none/infinite. A general"\n " request gives up after the timeout length if the HTTP response is not 'success'."\n ),\n )\n\n ocsp_response_cache_filename: Optional[str] = Field(\n default=None,\n description=(\n "URI for the OCSP response cache file. By default, the OCSP response cache "\n "file is created in the cache directory."\n ),\n )\n\n validate_default_parameters: Optional[bool] = Field(\n default=None,\n description=(\n "If True, raise an exception if the warehouse, database, or schema doesn't exist."\n " Defaults to False."\n ),\n )\n\n paramstyle: Optional[str] = Field(\n default=None,\n description=(\n "pyformat by default for client side binding. Specify qmark or numeric to "\n "change bind variable formats for server side binding."\n ),\n )\n\n timezone: Optional[str] = Field(\n default=None,\n description=(\n "None by default, which honors the Snowflake parameter TIMEZONE. Set to a "\n "valid time zone (e.g. America/Los_Angeles) to set the session time zone."\n ),\n )\n\n connector: Optional[str] = Field(\n default=None,\n description=(\n "Indicate alternative database connection engine. Permissible option is "\n "'sqlalchemy' otherwise defaults to use the Snowflake Connector for Python."\n ),\n is_required=False,\n )\n\n cache_column_metadata: Optional[str] = Field(\n default=None,\n description=(\n "Optional parameter when connector is set to sqlalchemy. Snowflake SQLAlchemy takes a"\n " flag ``cache_column_metadata=True`` such that all of column metadata for all tables"\n ' are "cached"'\n ),\n )\n\n numpy: Optional[bool] = Field(\n default=None,\n description=(\n "Optional parameter when connector is set to sqlalchemy. To enable fetching "\n "NumPy data types, add numpy=True to the connection parameters."\n ),\n )\n\n authenticator: Optional[str] = Field(\n default=None,\n description="Optional parameter to specify the authentication mechanism to use.",\n )\n\n @validator("paramstyle")\n def validate_paramstyle(cls, v: Optional[str]) -> Optional[str]:\n valid_config = ["pyformat", "qmark", "numeric"]\n if v is not None and v not in valid_config:\n raise ValueError(\n "Snowflake Resource: 'paramstyle' configuration value must be one of:"\n f" {','.join(valid_config)}."\n )\n return v\n\n @validator("connector")\n def validate_connector(cls, v: Optional[str]) -> Optional[str]:\n if v is not None and v != "sqlalchemy":\n raise ValueError(\n "Snowflake Resource: 'connector' configuration value must be None or sqlalchemy."\n )\n return v\n\n @root_validator\n def validate_authentication(cls, values):\n auths_set = 0\n auths_set += 1 if values.get("password") is not None else 0\n auths_set += 1 if values.get("private_key") is not None else 0\n auths_set += 1 if values.get("private_key_path") is not None else 0\n\n # if authenticator is set, there can be 0 or 1 additional auth method;\n # otherwise, ensure at least 1 method is provided\n check.invariant(\n auths_set > 0 or values.get("authenticator") is not None,\n "Missing config: Password, private key, or authenticator authentication required"\n " for Snowflake resource.",\n )\n\n # ensure that only 1 non-authenticator method is provided\n check.invariant(\n auths_set <= 1,\n "Incorrect config: Cannot provide both password and private key authentication to"\n " Snowflake Resource.",\n )\n\n return values\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _connection_args(self) -> Mapping[str, Any]:\n conn_args = {\n k: self._resolved_config_dict.get(k)\n for k in (\n "account",\n "user",\n "password",\n "database",\n "schema",\n "role",\n "warehouse",\n "autocommit",\n "client_prefetch_threads",\n "client_session_keep_alive",\n "login_timeout",\n "network_timeout",\n "ocsp_response_cache_filename",\n "validate_default_parameters",\n "paramstyle",\n "timezone",\n "authenticator",\n )\n if self._resolved_config_dict.get(k) is not None\n }\n if (\n self._resolved_config_dict.get("private_key", None) is not None\n or self._resolved_config_dict.get("private_key_path", None) is not None\n ):\n conn_args["private_key"] = self._snowflake_private_key(self._resolved_config_dict)\n\n return conn_args\n\n @property\n @cached_method\n def _sqlalchemy_connection_args(self) -> Mapping[str, Any]:\n conn_args: Dict[str, Any] = {\n k: self._resolved_config_dict.get(k)\n for k in (\n "account",\n "user",\n "password",\n "database",\n "schema",\n "role",\n "warehouse",\n "cache_column_metadata",\n "numpy",\n )\n if self._resolved_config_dict.get(k) is not None\n }\n\n return conn_args\n\n @property\n @cached_method\n def _sqlalchemy_engine_args(self) -> Mapping[str, Any]:\n config = self._resolved_config_dict\n sqlalchemy_engine_args = {}\n if (\n config.get("private_key", None) is not None\n or config.get("private_key_path", None) is not None\n ):\n # sqlalchemy passes private key args separately, so store them in a new dict\n sqlalchemy_engine_args["private_key"] = self._snowflake_private_key(config)\n if config.get("authenticator", None) is not None:\n sqlalchemy_engine_args["authenticator"] = config["authenticator"]\n\n return sqlalchemy_engine_args\n\n def _snowflake_private_key(self, config) -> bytes:\n # If the user has defined a path to a private key, we will use that.\n if config.get("private_key_path", None) is not None:\n # read the file from the path.\n with open(config.get("private_key_path"), "rb") as key:\n private_key = key.read()\n else:\n private_key = config.get("private_key", None)\n\n kwargs = {}\n if config.get("private_key_password", None) is not None:\n kwargs["password"] = config["private_key_password"].encode()\n else:\n kwargs["password"] = None\n\n try:\n p_key = serialization.load_pem_private_key(\n private_key, backend=default_backend(), **kwargs\n )\n except TypeError:\n try:\n private_key = base64.b64decode(private_key)\n p_key = serialization.load_pem_private_key(\n private_key, backend=default_backend(), **kwargs\n )\n except ValueError:\n raise ValueError(\n "Unable to load private key. You may need to base64 encode your private key."\n " You can retrieve the base64 encoded key with this shell command: cat"\n " rsa_key.p8 | base64"\n )\n\n pkb = p_key.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n return pkb\n\n @public\n @contextmanager\n def get_connection(\n self, raw_conn: bool = True\n ) -> Iterator[Union[SqlDbConnection, snowflake.connector.SnowflakeConnection]]:\n """Gets a connection to Snowflake as a context manager.\n\n If connector configuration is not set, SnowflakeResource.get_connection() will return a\n `snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__\n If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will\n return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__\n or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__\n if raw_conn=True.\n\n\n Args:\n raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw\n connection. Defaults to True.\n\n Examples:\n .. code-block:: python\n\n @op\n def get_query_status(snowflake: SnowflakeResource, query_id):\n with snowflake.get_connection() as conn:\n # conn is a Snowflake Connection object or a SQLAlchemy Connection if\n # sqlalchemy is specified as the connector in the Snowflake Resource config\n\n return conn.get_query_status(query_id)\n\n """\n if self.connector == "sqlalchemy":\n from snowflake.sqlalchemy import URL\n from sqlalchemy import create_engine\n\n engine = create_engine(\n URL(**self._sqlalchemy_connection_args), connect_args=self._sqlalchemy_engine_args\n )\n conn = engine.raw_connection() if raw_conn else engine.connect()\n\n yield conn\n conn.close()\n engine.dispose()\n else:\n conn = snowflake.connector.connect(**self._connection_args)\n\n yield conn\n if not self.autocommit:\n conn.commit()\n conn.close()\n\n def get_object_to_set_on_execution_context(self) -> Any:\n # Directly create a SnowflakeConnection here for backcompat since the SnowflakeConnection\n # has methods this resource does not have\n return SnowflakeConnection(\n config=self._resolved_config_dict,\n log=get_dagster_logger(),\n snowflake_connection_resource=self,\n )
\n\n\n
[docs]class SnowflakeConnection:\n """A connection to Snowflake that can execute queries. In general this class should not be\n directly instantiated, but rather used as a resource in an op or asset via the\n :py:func:`snowflake_resource`.\n\n Note that the SnowflakeConnection is only used by the snowflake_resource. The Pythonic SnowflakeResource does\n not use this SnowflakeConnection class.\n """\n\n def __init__(\n self, config: Mapping[str, str], log, snowflake_connection_resource: SnowflakeResource\n ):\n self.snowflake_connection_resource = snowflake_connection_resource\n self.log = log\n\n
[docs] @public\n @contextmanager\n def get_connection(\n self, raw_conn: bool = True\n ) -> Iterator[Union[SqlDbConnection, snowflake.connector.SnowflakeConnection]]:\n """Gets a connection to Snowflake as a context manager.\n\n If using the execute_query, execute_queries, or load_table_from_local_parquet methods,\n you do not need to create a connection using this context manager.\n\n Args:\n raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw\n connection. Defaults to True.\n\n Examples:\n .. code-block:: python\n\n @op(\n required_resource_keys={"snowflake"}\n )\n def get_query_status(query_id):\n with context.resources.snowflake.get_connection() as conn:\n # conn is a Snowflake Connection object or a SQLAlchemy Connection if\n # sqlalchemy is specified as the connector in the Snowflake Resource config\n\n return conn.get_query_status(query_id)\n\n """\n with self.snowflake_connection_resource.get_connection(raw_conn=raw_conn) as conn:\n yield conn
\n\n
[docs] @public\n def execute_query(\n self,\n sql: str,\n parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,\n fetch_results: bool = False,\n use_pandas_result: bool = False,\n ):\n """Execute a query in Snowflake.\n\n Args:\n sql (str): the query to be executed\n parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to the query. See the\n `Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__\n for more information.\n fetch_results (bool): If True, will return the result of the query. Defaults to False. If True\n and use_pandas_result is also True, results will be returned as a Pandas DataFrame.\n use_pandas_result (bool): If True, will return the result of the query as a Pandas DataFrame.\n Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be\n raised.\n\n Returns:\n The result of the query if fetch_results or use_pandas_result is True, otherwise returns None\n\n Examples:\n .. code-block:: python\n\n @op\n def drop_database(snowflake: SnowflakeResource):\n snowflake.execute_query(\n "DROP DATABASE IF EXISTS MY_DATABASE"\n )\n """\n check.str_param(sql, "sql")\n check.opt_inst_param(parameters, "parameters", (list, dict))\n check.bool_param(fetch_results, "fetch_results")\n if not fetch_results and use_pandas_result:\n check.failed("If use_pandas_result is True, fetch_results must also be True.")\n\n with self.get_connection() as conn:\n with closing(conn.cursor()) as cursor:\n if sys.version_info[0] < 3:\n sql = sql.encode("utf-8")\n\n self.log.info("Executing query: " + sql)\n parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters\n cursor.execute(sql, parameters)\n if use_pandas_result:\n return cursor.fetch_pandas_all()\n if fetch_results:\n return cursor.fetchall()
\n\n
[docs] @public\n def execute_queries(\n self,\n sql_queries: Sequence[str],\n parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,\n fetch_results: bool = False,\n use_pandas_result: bool = False,\n ) -> Optional[Sequence[Any]]:\n """Execute multiple queries in Snowflake.\n\n Args:\n sql_queries (str): List of queries to be executed in series\n parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to every query. See the\n `Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__\n for more information.\n fetch_results (bool): If True, will return the results of the queries as a list. Defaults to False. If True\n and use_pandas_result is also True, results will be returned as Pandas DataFrames.\n use_pandas_result (bool): If True, will return the results of the queries as a list of a Pandas DataFrames.\n Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be\n raised.\n\n Returns:\n The results of the queries as a list if fetch_results or use_pandas_result is True,\n otherwise returns None\n\n Examples:\n .. code-block:: python\n\n @op\n def create_fresh_database(snowflake: SnowflakeResource):\n queries = ["DROP DATABASE IF EXISTS MY_DATABASE", "CREATE DATABASE MY_DATABASE"]\n snowflake.execute_queries(\n sql_queries=queries\n )\n\n """\n check.sequence_param(sql_queries, "sql_queries", of_type=str)\n check.opt_inst_param(parameters, "parameters", (list, dict))\n check.bool_param(fetch_results, "fetch_results")\n if not fetch_results and use_pandas_result:\n check.failed("If use_pandas_result is True, fetch_results must also be True.")\n\n results: List[Any] = []\n with self.get_connection() as conn:\n with closing(conn.cursor()) as cursor:\n for raw_sql in sql_queries:\n sql = raw_sql.encode("utf-8") if sys.version_info[0] < 3 else raw_sql\n self.log.info("Executing query: " + sql)\n parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters\n cursor.execute(sql, parameters)\n if use_pandas_result:\n results = results.append(cursor.fetch_pandas_all()) # type: ignore\n elif fetch_results:\n results.append(cursor.fetchall())\n\n return results if len(results) > 0 else None
\n\n
[docs] @public\n def load_table_from_local_parquet(self, src: str, table: str):\n """Stores the content of a parquet file to a Snowflake table.\n\n Args:\n src (str): the name of the file to store in Snowflake\n table (str): the name of the table to store the data. If the table does not exist, it will\n be created. Otherwise the contents of the table will be replaced with the data in src\n\n Examples:\n .. code-block:: python\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n @op\n def write_parquet_file(snowflake: SnowflakeResource):\n df = pd.DataFrame({"one": [1, 2, 3], "ten": [11, 12, 13]})\n table = pa.Table.from_pandas(df)\n pq.write_table(table, "example.parquet')\n snowflake.load_table_from_local_parquet(\n src="example.parquet",\n table="MY_TABLE"\n )\n\n """\n check.str_param(src, "src")\n check.str_param(table, "table")\n\n sql_queries = [\n f"CREATE OR REPLACE TABLE {table} ( data VARIANT DEFAULT NULL);",\n "CREATE OR REPLACE FILE FORMAT parquet_format TYPE = 'parquet';",\n f"PUT {src} @%{table};",\n f"COPY INTO {table} FROM @%{table} FILE_FORMAT = (FORMAT_NAME = 'parquet_format');",\n ]\n\n self.execute_queries(sql_queries)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=SnowflakeResource.to_config_schema(),\n description="This resource is for connecting to the Snowflake data warehouse",\n)\ndef snowflake_resource(context) -> SnowflakeConnection:\n """A resource for connecting to the Snowflake data warehouse. The returned resource object is an\n instance of :py:class:`SnowflakeConnection`.\n\n A simple example of loading data into Snowflake and subsequently querying that data is shown below:\n\n Examples:\n .. code-block:: python\n\n from dagster import job, op\n from dagster_snowflake import snowflake_resource\n\n @op(required_resource_keys={'snowflake'})\n def get_one(context):\n context.resources.snowflake.execute_query('SELECT 1')\n\n @job(resource_defs={'snowflake': snowflake_resource})\n def my_snowflake_job():\n get_one()\n\n my_snowflake_job.execute_in_process(\n run_config={\n 'resources': {\n 'snowflake': {\n 'config': {\n 'account': {'env': 'SNOWFLAKE_ACCOUNT'},\n 'user': {'env': 'SNOWFLAKE_USER'},\n 'password': {'env': 'SNOWFLAKE_PASSWORD'},\n 'database': {'env': 'SNOWFLAKE_DATABASE'},\n 'schema': {'env': 'SNOWFLAKE_SCHEMA'},\n 'warehouse': {'env': 'SNOWFLAKE_WAREHOUSE'},\n }\n }\n }\n }\n )\n """\n snowflake_resource = SnowflakeResource.from_resource_context(context)\n return SnowflakeConnection(\n config=context, log=context.log, snowflake_connection_resource=snowflake_resource\n )
\n
", "current_page_name": "_modules/dagster_snowflake/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake.resources"}, "snowflake_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake.snowflake_io_manager

\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Optional, Sequence, Type, cast\n\nfrom dagster import IOManagerDefinition, OutputContext, io_manager\nfrom dagster._config.pythonic_config import (\n    ConfigurableIOManagerFactory,\n)\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.storage.db_io_manager import (\n    DbClient,\n    DbIOManager,\n    DbTypeHandler,\n    TablePartitionDimension,\n    TableSlice,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom pydantic import Field\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom .resources import SnowflakeResource\n\nSNOWFLAKE_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"\n\n\n
[docs]def build_snowflake_io_manager(\n type_handlers: Sequence[DbTypeHandler], default_load_type: Optional[Type] = None\n) -> IOManagerDefinition:\n """Builds an IO manager definition that reads inputs from and writes outputs to Snowflake.\n\n Args:\n type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between\n slices of Snowflake tables and an in-memory type - e.g. a Pandas DataFrame. If only\n one DbTypeHandler is provided, it will be used as teh default_load_type.\n default_load_type (Type): When an input has no type annotation, load it as this type.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import build_snowflake_io_manager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n snowflake_io_manager = build_snowflake_io_manager([SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()])\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": snowflake_io_manager.configured({\n "database": "my_database",\n "account" : {"env": "SNOWFLAKE_ACCOUNT"}\n ...\n })\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the schema. For example,\n if the asset ``my_table`` had the key prefix ``["snowflake", "my_schema"]``, the schema ``my_schema`` will be\n used. For ops, the schema can be specified by including a ``schema`` entry in output metadata. If ``schema`` is not provided\n via config or on the asset/op, ``public`` will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @dagster_maintained_io_manager\n @io_manager(config_schema=SnowflakeIOManager.to_config_schema())\n def snowflake_io_manager(init_context):\n return DbIOManager(\n type_handlers=type_handlers,\n db_client=SnowflakeDbClient(),\n io_manager_name="SnowflakeIOManager",\n database=init_context.resource_config["database"],\n schema=init_context.resource_config.get("schema"),\n default_load_type=default_load_type,\n )\n\n return snowflake_io_manager
\n\n\n
[docs]class SnowflakeIOManager(ConfigurableIOManagerFactory):\n """Base class for an IO manager definition that reads inputs from and writes outputs to Snowflake.\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MySnowflakeIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the schema. For example,\n if the asset ``my_table`` had the key prefix ``["snowflake", "my_schema"]``, the schema ``my_schema`` will be\n used. For ops, the schema can be specified by including a ``schema`` entry in output metadata. If ``schema`` is not provided\n via config or on the asset/op, ``public`` will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n database: str = Field(description="Name of the database to use.")\n account: str = Field(\n description=(\n "Your Snowflake account name. For more details, see the `Snowflake documentation."\n " <https://docs.snowflake.com/developer-guide/python-connector/python-connector-api>`__"\n ),\n )\n user: str = Field(description="User login name.")\n schema_: Optional[str] = Field(\n default=None, alias="schema", description="Name of the schema to use."\n ) # schema is a reserved word for pydantic\n password: Optional[str] = Field(default=None, description="User password.")\n warehouse: Optional[str] = Field(default=None, description="Name of the warehouse to use.")\n role: Optional[str] = Field(default=None, description="Name of the role to use.")\n private_key: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details. To"\n " avoid issues with newlines in the keys, you can base64 encode the key. You can"\n " retrieve the base64 encoded key with this shell command: cat rsa_key.p8 | base64"\n ),\n )\n private_key_path: Optional[str] = Field(\n default=None,\n description=(\n "Path to the private key. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n ),\n )\n private_key_password: Optional[str] = Field(\n default=None,\n description=(\n "The password of the private key. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Required for both private_key and private_key_path if the private key is encrypted."\n " For unencrypted keys, this config can be omitted or set to None."\n ),\n )\n store_timestamps_as_strings: bool = Field(\n default=False,\n description=(\n "If using Pandas DataFrames, whether to convert time data to strings. If True, time"\n " data will be converted to strings when storing the DataFrame and converted back to"\n " time data when loading the DataFrame. If False, time data without a timezone will be"\n " set to UTC timezone to avoid a Snowflake bug. Defaults to False."\n ),\n )\n authenticator: Optional[str] = Field(\n default=None,\n description="Optional parameter to specify the authentication mechanism to use.",\n )\n\n @staticmethod\n @abstractmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n """type_handlers should return a list of the TypeHandlers that the I/O manager can use.\n\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n """\n ...\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n """If an asset or op is not annotated with an return type, default_load_type will be used to\n determine which TypeHandler to use to store and load the output.\n\n If left unimplemented, default_load_type will return None. In that case, if there is only\n one TypeHandler, the I/O manager will default to loading unannotated outputs with that\n TypeHandler.\n\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n import pandas as pd\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame\n """\n return None\n\n def create_io_manager(self, context) -> DbIOManager:\n return DbIOManager(\n db_client=SnowflakeDbClient(),\n io_manager_name="SnowflakeIOManager",\n database=self.database,\n schema=self.schema_,\n type_handlers=self.type_handlers(),\n default_load_type=self.default_load_type(),\n )
\n\n\nclass SnowflakeDbClient(DbClient):\n @staticmethod\n @contextmanager\n def connect(context, table_slice):\n no_schema_config = (\n {k: v for k, v in context.resource_config.items() if k != "schema"}\n if context.resource_config\n else {}\n )\n with SnowflakeResource(\n schema=table_slice.schema, connector="sqlalchemy", **no_schema_config\n ).get_connection(raw_conn=False) as conn:\n yield conn\n\n @staticmethod\n def ensure_schema_exists(context: OutputContext, table_slice: TableSlice, connection) -> None:\n schemas = connection.execute(\n f"show schemas like '{table_slice.schema}' in database {table_slice.database}"\n ).fetchall()\n if len(schemas) == 0:\n connection.execute(f"create schema {table_slice.schema};")\n\n @staticmethod\n def delete_table_slice(context: OutputContext, table_slice: TableSlice, connection) -> None:\n try:\n connection.execute(_get_cleanup_statement(table_slice))\n except ProgrammingError:\n # table doesn't exist yet, so ignore the error\n pass\n\n @staticmethod\n def get_select_statement(table_slice: TableSlice) -> str:\n col_str = ", ".join(table_slice.columns) if table_slice.columns else "*"\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"SELECT {col_str} FROM"\n f" {table_slice.database}.{table_slice.schema}.{table_slice.table} WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"""SELECT {col_str} FROM {table_slice.database}.{table_slice.schema}.{table_slice.table}"""\n\n\ndef _get_cleanup_statement(table_slice: TableSlice) -> str:\n """Returns a SQL statement that deletes data in the given table to make way for the output data\n being written.\n """\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"DELETE FROM {table_slice.database}.{table_slice.schema}.{table_slice.table} WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"DELETE FROM {table_slice.database}.{table_slice.schema}.{table_slice.table}"\n\n\ndef _partition_where_clause(partition_dimensions: Sequence[TablePartitionDimension]) -> str:\n return " AND\\n".join(\n (\n _time_window_where_clause(partition_dimension)\n if isinstance(partition_dimension.partitions, TimeWindow)\n else _static_where_clause(partition_dimension)\n )\n for partition_dimension in partition_dimensions\n )\n\n\ndef _time_window_where_clause(table_partition: TablePartitionDimension) -> str:\n partition = cast(TimeWindow, table_partition.partitions)\n start_dt, end_dt = partition\n start_dt_str = start_dt.strftime(SNOWFLAKE_DATETIME_FORMAT)\n end_dt_str = end_dt.strftime(SNOWFLAKE_DATETIME_FORMAT)\n # Snowflake BETWEEN is inclusive; start <= partition expr <= end. We don't want to remove the next partition so we instead\n # write this as start <= partition expr < end.\n return f"""{table_partition.partition_expr} >= '{start_dt_str}' AND {table_partition.partition_expr} < '{end_dt_str}'"""\n\n\ndef _static_where_clause(table_partition: TablePartitionDimension) -> str:\n partitions = ", ".join(f"'{partition}'" for partition in table_partition.partitions)\n return f"""{table_partition.partition_expr} in ({partitions})"""\n
", "current_page_name": "_modules/dagster_snowflake/snowflake_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake.snowflake_io_manager"}}, "dagster_snowflake_pandas": {"snowflake_pandas_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake_pandas.snowflake_pandas_type_handler

\nfrom typing import Mapping, Optional, Sequence, Type\n\nimport pandas as pd\nimport pandas.core.dtypes.common as pd_core_dtypes_common\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.definitions.metadata import RawMetadataValue\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_snowflake import build_snowflake_io_manager\nfrom dagster_snowflake.snowflake_io_manager import SnowflakeDbClient, SnowflakeIOManager\nfrom snowflake.connector.pandas_tools import pd_writer\n\n\ndef _table_exists(table_slice: TableSlice, connection):\n    tables = connection.execute(\n        f"SHOW TABLES LIKE '{table_slice.table}' IN SCHEMA"\n        f" {table_slice.database}.{table_slice.schema}"\n    ).fetchall()\n    return len(tables) > 0\n\n\ndef _get_table_column_types(table_slice: TableSlice, connection) -> Optional[Mapping[str, str]]:\n    if _table_exists(table_slice, connection):\n        schema_list = connection.execute(f"DESCRIBE TABLE {table_slice.table}").fetchall()\n        return {item[0]: item[1] for item in schema_list}\n\n\ndef _convert_timestamp_to_string(\n    s: pd.Series, column_types: Optional[Mapping[str, str]], table_name: str\n) -> pd.Series:\n    """Converts columns of data of type pd.Timestamp to string so that it can be stored in\n    snowflake.\n    """\n    column_name = str(s.name)\n    if pd_core_dtypes_common.is_datetime_or_timedelta_dtype(s):  # type: ignore  # (bad stubs)\n        if column_types:\n            if "VARCHAR" not in column_types[column_name]:\n                raise DagsterInvariantViolationError(\n                    "Snowflake I/O manager: Snowflake I/O manager configured to convert time data"\n                    f" in DataFrame column {column_name} to strings, but the corresponding"\n                    f" {column_name.upper()} column in table {table_name} is not of type VARCHAR,"\n                    f" it is of type {column_types[column_name]}. Please set"\n                    " store_timestamps_as_strings=False in the Snowflake I/O manager configuration"\n                    " to store time data as TIMESTAMP types."\n                )\n        return s.dt.strftime("%Y-%m-%d %H:%M:%S.%f %z")\n    else:\n        return s\n\n\ndef _convert_string_to_timestamp(s: pd.Series) -> pd.Series:\n    """Converts columns of strings in Timestamp format to pd.Timestamp to undo the conversion in\n    _convert_timestamp_to_string.\n\n    This will not convert non-timestamp strings into timestamps (pd.to_datetime will raise an\n    exception if the string cannot be converted)\n    """\n    if isinstance(s[0], str):\n        try:\n            return pd.to_datetime(s.values)  # type: ignore  # (bad stubs)\n        except ValueError:\n            return s\n    else:\n        return s\n\n\ndef _add_missing_timezone(\n    s: pd.Series, column_types: Optional[Mapping[str, str]], table_name: str\n) -> pd.Series:\n    column_name = str(s.name)\n    if pd_core_dtypes_common.is_datetime_or_timedelta_dtype(s):  # type: ignore  # (bad stubs)\n        if column_types:\n            if "VARCHAR" in column_types[column_name]:\n                raise DagsterInvariantViolationError(\n                    f"Snowflake I/O manager: The Snowflake column {column_name.upper()} in table"\n                    f" {table_name} is of type {column_types[column_name]} and should be of type"\n                    f" TIMESTAMP to store the time data in dataframe column {column_name}. Please"\n                    " migrate this column to be of time TIMESTAMP_NTZ(9) to store time data."\n                )\n        return s.dt.tz_localize("UTC")\n    return s\n\n\n
[docs]class SnowflakePandasTypeHandler(DbTypeHandler[pd.DataFrame]):\n """Plugin for the Snowflake I/O Manager that can store and load Pandas DataFrames as Snowflake tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MySnowflakeIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)\n }\n )\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection\n ) -> Mapping[str, RawMetadataValue]:\n from snowflake import connector\n\n connector.paramstyle = "pyformat"\n with_uppercase_cols = obj.rename(str.upper, copy=False, axis="columns")\n column_types = _get_table_column_types(table_slice, connection)\n if context.resource_config and context.resource_config.get(\n "store_timestamps_as_strings", False\n ):\n with_uppercase_cols = with_uppercase_cols.apply(\n lambda x: _convert_timestamp_to_string(x, column_types, table_slice.table),\n axis="index",\n )\n else:\n with_uppercase_cols = with_uppercase_cols.apply(\n lambda x: _add_missing_timezone(x, column_types, table_slice.table), axis="index"\n )\n with_uppercase_cols.to_sql(\n table_slice.table,\n con=connection.engine,\n if_exists="append",\n index=False,\n method=pd_writer,\n )\n\n return {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=str(name), type=str(dtype))\n for name, dtype in obj.dtypes.items()\n ]\n )\n ),\n }\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pd.DataFrame:\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pd.DataFrame()\n result = pd.read_sql(\n sql=SnowflakeDbClient.get_select_statement(table_slice), con=connection\n )\n if context.resource_config and context.resource_config.get(\n "store_timestamps_as_strings", False\n ):\n result = result.apply(_convert_string_to_timestamp, axis="index")\n result.columns = map(str.lower, result.columns) # type: ignore # (bad stubs)\n return result\n\n @property\n def supported_types(self):\n return [pd.DataFrame]
\n\n\nsnowflake_pandas_io_manager = build_snowflake_io_manager(\n [SnowflakePandasTypeHandler()], default_load_type=pd.DataFrame\n)\nsnowflake_pandas_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes Pandas DataFrames to Snowflake. When\nusing the snowflake_pandas_io_manager, any inputs and outputs without type annotations will be loaded\nas Pandas DataFrames.\n\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_snowflake_pandas import snowflake_pandas_io_manager\n from dagster import asset, Definitions\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": snowflake_pandas_io_manager.configured({\n "database": "my_database",\n "account" : {"env": "SNOWFLAKE_ACCOUNT"}\n ...\n })\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class SnowflakePandasIOManager(SnowflakeIOManager):\n """An I/O manager definition that reads inputs from and writes Pandas DataFrames to Snowflake. When\n using the SnowflakePandasIOManager, any inputs and outputs without type annotations will be loaded\n as Pandas DataFrames.\n\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake_pandas import SnowflakePandasIOManager\n from dagster import asset, Definitions, EnvVar\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": SnowflakePandasIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame
\n
", "current_page_name": "_modules/dagster_snowflake_pandas/snowflake_pandas_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake_pandas.snowflake_pandas_type_handler"}}, "dagster_snowflake_pyspark": {"snowflake_pyspark_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake_pyspark.snowflake_pyspark_type_handler

\nfrom typing import Mapping, Optional, Sequence, Type\n\nimport dagster._check as check\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.definitions.metadata import RawMetadataValue\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_snowflake import SnowflakeIOManager, build_snowflake_io_manager\nfrom dagster_snowflake.snowflake_io_manager import SnowflakeDbClient\nfrom pyspark.sql import DataFrame, SparkSession\nfrom pyspark.sql.types import StructType\n\nSNOWFLAKE_CONNECTOR = "net.snowflake.spark.snowflake"\n\n\ndef _get_snowflake_options(config, table_slice: TableSlice) -> Mapping[str, str]:\n    check.invariant(\n        config.get("warehouse", None) is not None,\n        "Missing config: Warehouse is required when using PySpark with the Snowflake I/O manager.",\n    )\n\n    conf = {\n        "sfURL": f"{config['account']}.snowflakecomputing.com",\n        "sfUser": config["user"],\n        "sfPassword": config["password"],\n        "sfDatabase": config["database"],\n        "sfSchema": table_slice.schema,\n        "sfWarehouse": config["warehouse"],\n    }\n\n    return conf\n\n\n
[docs]class SnowflakePySparkTypeHandler(DbTypeHandler[DataFrame]):\n """Plugin for the Snowflake I/O Manager that can store and load PySpark DataFrames as Snowflake tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MySnowflakeIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), warehouse="my_warehouse", ...)\n }\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: DataFrame, _\n ) -> Mapping[str, RawMetadataValue]:\n options = _get_snowflake_options(context.resource_config, table_slice)\n\n with_uppercase_cols = obj.toDF(*[c.upper() for c in obj.columns])\n\n with_uppercase_cols.write.format(SNOWFLAKE_CONNECTOR).options(**options).option(\n "dbtable", table_slice.table\n ).mode("append").save()\n\n return {\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=field.name, type=field.dataType.typeName())\n for field in obj.schema.fields\n ]\n )\n ),\n }\n\n def load_input(self, context: InputContext, table_slice: TableSlice, _) -> DataFrame:\n options = _get_snowflake_options(context.resource_config, table_slice)\n\n spark = SparkSession.builder.getOrCreate() # type: ignore\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return spark.createDataFrame([], StructType([]))\n\n df = (\n spark.read.format(SNOWFLAKE_CONNECTOR)\n .options(**options)\n .option("query", SnowflakeDbClient.get_select_statement(table_slice))\n .load()\n )\n return df.toDF(*[c.lower() for c in df.columns])\n\n @property\n def supported_types(self):\n return [DataFrame]
\n\n\nsnowflake_pyspark_io_manager = build_snowflake_io_manager(\n [SnowflakePySparkTypeHandler()], default_load_type=DataFrame\n)\nsnowflake_pyspark_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes PySpark DataFrames to Snowflake. When\nusing the snowflake_pyspark_io_manager, any inputs and outputs without type annotations will be loaded\nas PySpark DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_snowflake_pyspark import snowflake_pyspark_io_manager\n from pyspark.sql import DataFrame\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": snowflake_pyspark_io_manager.configured({\n "database": "my_database",\n "warehouse": "my_warehouse", # required for snowflake_pyspark_io_manager\n "account" : {"env": "SNOWFLAKE_ACCOUNT"},\n "password": {"env": "SNOWFLAKE_PASSWORD"},\n ...\n })\n }\n )\n\n Note that the warehouse configuration value is required when using the snowflake_pyspark_io_manager\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: DataFrame) -> DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class SnowflakePySparkIOManager(SnowflakeIOManager):\n """An I/O manager definition that reads inputs from and writes PySpark DataFrames to Snowflake. When\n using the SnowflakePySparkIOManager, any inputs and outputs without type annotations will be loaded\n as PySpark DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake_pyspark import SnowflakePySparkIOManager\n from pyspark.sql import DataFrame\n from dagster import Definitions, EnvVar\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": SnowflakePySparkIOManager(\n database="my_database",\n warehouse="my_warehouse", # required for SnowflakePySparkIOManager\n account=EnvVar("SNOWFLAKE_ACCOUNT"),\n password=EnvVar("SNOWFLAKE_PASSWORD"),\n ...\n )\n }\n )\n\n Note that the warehouse configuration value is required when using the SnowflakePySparkIOManager\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: DataFrame) -> DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return DataFrame
\n
", "current_page_name": "_modules/dagster_snowflake_pyspark/snowflake_pyspark_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake_pyspark.snowflake_pyspark_type_handler"}}, "dagster_spark": {"configs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.configs

\n"""Spark Configuration.\n\nIn this file we define the key configuration parameters for submitting Spark jobs. Spark can be run\nin a variety of deployment contexts. See the Spark documentation at\nhttps://spark.apache.org/docs/latest/submitting-applications.html for a more in-depth summary of\nSpark deployment contexts and configuration.\n"""\nfrom dagster import Field, StringSource\n\nfrom .configs_spark import spark_config\nfrom .types import SparkDeployMode\n\n\n
[docs]def define_spark_config():\n """Spark configuration.\n\n See the Spark documentation for reference:\n https://spark.apache.org/docs/latest/submitting-applications.html\n """\n master_url = Field(\n StringSource,\n description="The master URL for the cluster (e.g. spark://23.195.26.187:7077)",\n is_required=True,\n )\n\n deploy_mode = Field(\n SparkDeployMode,\n description="""Whether to deploy your driver on the worker nodes (cluster) or locally as an\n external client (client) (default: client). A common deployment strategy is to submit your\n application from a gateway machine that is physically co-located with your worker machines\n (e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate.\n In client mode, the driver is launched directly within the spark-submit process which acts\n as a client to the cluster. The input and output of the application is attached to the\n console. Thus, this mode is especially suitable for applications that involve the REPL (e.g.\n Spark shell).""",\n is_required=False,\n )\n\n application_jar = Field(\n StringSource,\n description="""Path to a bundled jar including your application and all\n dependencies. The URL must be globally visible inside of your cluster, for\n instance, an hdfs:// path or a file:// path that is present on all nodes.\n """,\n is_required=True,\n )\n\n application_arguments = Field(\n StringSource,\n description="Arguments passed to the main method of your main class, if any",\n is_required=False,\n )\n\n spark_home = Field(\n StringSource,\n description=(\n "The path to your spark installation. Defaults to $SPARK_HOME at runtime if not"\n " provided."\n ),\n is_required=False,\n )\n\n return {\n "master_url": master_url,\n "deploy_mode": deploy_mode,\n "application_jar": application_jar,\n "spark_conf": spark_config(),\n "spark_home": spark_home,\n "application_arguments": application_arguments,\n }
\n
", "current_page_name": "_modules/dagster_spark/configs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.configs"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.ops

\nfrom dagster import (\n    In,\n    Nothing,\n    Out,\n    _check as check,\n    op,\n)\n\nfrom .configs import define_spark_config\n\n\n
[docs]def create_spark_op(\n name, main_class, description=None, required_resource_keys=frozenset(["spark"])\n):\n check.str_param(name, "name")\n check.str_param(main_class, "main_class")\n check.opt_str_param(description, "description", "A parameterized Spark job.")\n check.set_param(required_resource_keys, "required_resource_keys")\n\n @op(\n name=name,\n description=description,\n config_schema=define_spark_config(),\n ins={"start": In(Nothing)},\n out=Out(Nothing),\n tags={"kind": "spark", "main_class": main_class},\n required_resource_keys=required_resource_keys,\n )\n def spark_op(context):\n context.resources.spark.run_spark_job(context.op_config, main_class)\n\n return spark_op
\n
", "current_page_name": "_modules/dagster_spark/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.resources

\nimport os\nimport subprocess\n\nimport dagster._check as check\nfrom dagster import resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.log_manager import DagsterLogManager\n\nfrom .types import SparkOpError\nfrom .utils import construct_spark_shell_command\n\n\nclass SparkResource:\n    def __init__(self, logger):\n        self.logger = check.inst_param(logger, "logger", DagsterLogManager)\n\n    def run_spark_job(self, config, main_class):\n        check.dict_param(config, "config")\n        check.str_param(main_class, "main_class")\n\n        # Extract parameters from config\n        (\n            master_url,\n            deploy_mode,\n            application_jar,\n            spark_conf,\n            application_arguments,\n            spark_home,\n        ) = [\n            config.get(k)\n            for k in (\n                "master_url",\n                "deploy_mode",\n                "application_jar",\n                "spark_conf",\n                "application_arguments",\n                "spark_home",\n            )\n        ]\n\n        if not os.path.exists(application_jar):\n            raise SparkOpError(\n                f"Application jar {application_jar} does not exist. A valid jar must be "\n                "built before running this op."\n            )\n\n        spark_shell_cmd = construct_spark_shell_command(\n            application_jar=application_jar,\n            main_class=main_class,\n            master_url=master_url,\n            spark_conf=spark_conf,\n            deploy_mode=deploy_mode,\n            application_arguments=application_arguments,\n            spark_home=spark_home,\n        )\n        self.logger.info("Running spark-submit: " + " ".join(spark_shell_cmd))\n\n        retcode = subprocess.call(" ".join(spark_shell_cmd), shell=True)\n\n        if retcode != 0:\n            raise SparkOpError("Spark job failed. Please consult your logs.")\n\n\n
[docs]@dagster_maintained_resource\n@resource\ndef spark_resource(context):\n return SparkResource(context.log)
\n
", "current_page_name": "_modules/dagster_spark/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.types

\nfrom dagster import Enum, EnumValue\n\nSparkDeployModeCluster = EnumValue("cluster")\nSparkDeployModeClient = EnumValue("client")\nSparkDeployMode = Enum(\n    name="SparkDeployMode", enum_values=[SparkDeployModeCluster, SparkDeployModeClient]\n)\n\n\n
[docs]class SparkOpError(Exception):\n pass
\n
", "current_page_name": "_modules/dagster_spark/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.types"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.utils

\nimport itertools\nimport os\n\nimport dagster._check as check\n\nfrom .types import SparkOpError\n\n\ndef flatten_dict(d):\n    def _flatten_dict(d, result, key_path=None):\n        """Iterates an arbitrarily nested dictionary and yield dot-notation key:value tuples.\n\n        {'foo': {'bar': 3, 'baz': 1}, {'other': {'key': 1}} =>\n            [('foo.bar', 3), ('foo.baz', 1), ('other.key', 1)]\n\n        """\n        for k, v in d.items():\n            new_key_path = (key_path or []) + [k]\n            if isinstance(v, dict):\n                _flatten_dict(v, result, new_key_path)\n            else:\n                result.append((".".join(new_key_path), v))\n\n    result = []\n    if d is not None:\n        _flatten_dict(d, result)\n    return result\n\n\ndef parse_spark_config(spark_conf):\n    """Convert spark conf dict to list of CLI arguments.\n\n    For each key-value pair in spark conf, we need to pass to CLI in format:\n\n    --conf "key=value"\n    """\n    spark_conf_list = flatten_dict(spark_conf)\n    return format_for_cli(spark_conf_list)\n\n\ndef format_for_cli(spark_conf_list):\n    return list(\n        itertools.chain.from_iterable([("--conf", "{}={}".format(*c)) for c in spark_conf_list])\n    )\n\n\n
[docs]def construct_spark_shell_command(\n application_jar,\n main_class,\n master_url=None,\n spark_conf=None,\n deploy_mode=None,\n application_arguments=None,\n spark_home=None,\n):\n """Constructs the spark-submit command for a Spark job."""\n check.opt_str_param(master_url, "master_url")\n check.str_param(application_jar, "application_jar")\n spark_conf = check.opt_dict_param(spark_conf, "spark_conf")\n check.opt_str_param(deploy_mode, "deploy_mode")\n check.opt_str_param(application_arguments, "application_arguments")\n check.opt_str_param(spark_home, "spark_home")\n\n spark_home = spark_home if spark_home else os.environ.get("SPARK_HOME")\n if spark_home is None:\n raise SparkOpError(\n "No spark home set. You must either pass spark_home in config or "\n "set $SPARK_HOME in your environment (got None)."\n )\n\n master_url = ["--master", master_url] if master_url else []\n deploy_mode = ["--deploy-mode", deploy_mode] if deploy_mode else []\n\n spark_shell_cmd = (\n [f"{spark_home}/bin/spark-submit", "--class", main_class]\n + master_url\n + deploy_mode\n + parse_spark_config(spark_conf)\n + [application_jar]\n + [application_arguments]\n )\n return spark_shell_cmd
\n
", "current_page_name": "_modules/dagster_spark/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.utils"}}, "dagster_ssh": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_ssh.resources

\nimport getpass\nimport os\nfrom io import StringIO\n\nimport paramiko\nfrom dagster import (\n    BoolSource,\n    Field,\n    IntSource,\n    StringSource,\n    _check as check,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils import mkdir_p\nfrom dagster._utils.merger import merge_dicts\nfrom paramiko.config import SSH_PORT\nfrom sshtunnel import SSHTunnelForwarder\n\n\ndef key_from_str(key_str):\n    """Creates a paramiko SSH key from a string."""\n    check.str_param(key_str, "key_str")\n\n    # py2 StringIO doesn't support with\n    key_file = StringIO(key_str)\n    result = paramiko.RSAKey.from_private_key(key_file)\n    key_file.close()\n    return result\n\n\nclass SSHResource:\n    """Resource for ssh remote execution using Paramiko.\n\n    ref: https://github.com/paramiko/paramiko\n    """\n\n    def __init__(\n        self,\n        remote_host,\n        remote_port,\n        username=None,\n        password=None,\n        key_file=None,\n        key_string=None,\n        timeout=10,\n        keepalive_interval=30,\n        compress=True,\n        no_host_key_check=True,\n        allow_host_key_change=False,\n        logger=None,\n    ):\n        self.remote_host = check.str_param(remote_host, "remote_host")\n        self.remote_port = check.opt_int_param(remote_port, "remote_port")\n        self.username = check.opt_str_param(username, "username")\n        self.password = check.opt_str_param(password, "password")\n        self.key_file = check.opt_str_param(key_file, "key_file")\n        self.timeout = check.opt_int_param(timeout, "timeout")\n        self.keepalive_interval = check.opt_int_param(keepalive_interval, "keepalive_interval")\n        self.compress = check.opt_bool_param(compress, "compress")\n        self.no_host_key_check = check.opt_bool_param(no_host_key_check, "no_host_key_check")\n        self.log = logger\n\n        self.host_proxy = None\n\n        # Create RSAKey object from private key string\n        self.key_obj = key_from_str(key_string) if key_string is not None else None\n\n        # Auto detecting username values from system\n        if not self.username:\n            logger.debug(\n                "username to ssh to host: %s is not specified. Using system's default provided by"\n                " getpass.getuser()"\n                % self.remote_host\n            )\n            self.username = getpass.getuser()\n\n        user_ssh_config_filename = os.path.expanduser("~/.ssh/config")\n        if os.path.isfile(user_ssh_config_filename):\n            ssh_conf = paramiko.SSHConfig()\n            ssh_conf.parse(open(user_ssh_config_filename, encoding="utf8"))\n            host_info = ssh_conf.lookup(self.remote_host)\n            if host_info and host_info.get("proxycommand"):\n                self.host_proxy = paramiko.ProxyCommand(host_info.get("proxycommand"))\n\n            if not (self.password or self.key_file):\n                if host_info and host_info.get("identityfile"):\n                    self.key_file = host_info.get("identityfile")[0]\n\n    def get_connection(self):\n        """Opens a SSH connection to the remote host.\n\n        :rtype: paramiko.client.SSHClient\n        """\n        client = paramiko.SSHClient()\n        client.load_system_host_keys()\n        if self.no_host_key_check:\n            self.log.warning(\n                "No Host Key Verification. This won't protect against Man-In-The-Middle attacks"\n            )\n            # Default is RejectPolicy\n            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n        if self.password and self.password.strip():\n            client.connect(\n                hostname=self.remote_host,\n                username=self.username,\n                password=self.password,\n                key_filename=self.key_file,\n                pkey=self.key_obj,\n                timeout=self.timeout,\n                compress=self.compress,\n                port=self.remote_port,\n                sock=self.host_proxy,\n                look_for_keys=False,\n            )\n        else:\n            client.connect(\n                hostname=self.remote_host,\n                username=self.username,\n                key_filename=self.key_file,\n                pkey=self.key_obj,\n                timeout=self.timeout,\n                compress=self.compress,\n                port=self.remote_port,\n                sock=self.host_proxy,\n            )\n\n        if self.keepalive_interval:\n            client.get_transport().set_keepalive(self.keepalive_interval)\n\n        return client\n\n    def get_tunnel(self, remote_port, remote_host="localhost", local_port=None):\n        check.int_param(remote_port, "remote_port")\n        check.str_param(remote_host, "remote_host")\n        check.opt_int_param(local_port, "local_port")\n\n        if local_port is not None:\n            local_bind_address = ("localhost", local_port)\n        else:\n            local_bind_address = ("localhost",)\n\n        # Will prefer key string if specified, otherwise use the key file\n        pkey = self.key_obj if self.key_obj else self.key_file\n\n        if self.password and self.password.strip():\n            client = SSHTunnelForwarder(\n                self.remote_host,\n                ssh_port=self.remote_port,\n                ssh_username=self.username,\n                ssh_password=self.password,\n                ssh_pkey=pkey,\n                ssh_proxy=self.host_proxy,\n                local_bind_address=local_bind_address,\n                remote_bind_address=(remote_host, remote_port),\n                logger=self.log,\n            )\n        else:\n            client = SSHTunnelForwarder(\n                self.remote_host,\n                ssh_port=self.remote_port,\n                ssh_username=self.username,\n                ssh_pkey=pkey,\n                ssh_proxy=self.host_proxy,\n                local_bind_address=local_bind_address,\n                remote_bind_address=(remote_host, remote_port),\n                host_pkey_directories=[],\n                logger=self.log,\n            )\n\n        return client\n\n    def sftp_get(self, remote_filepath, local_filepath):\n        check.str_param(remote_filepath, "remote_filepath")\n        check.str_param(local_filepath, "local_filepath")\n        conn = self.get_connection()\n        with conn.open_sftp() as sftp_client:\n            local_folder = os.path.dirname(local_filepath)\n\n            # Create intermediate directories if they don't exist\n            mkdir_p(local_folder)\n\n            self.log.info(f"Starting to transfer from {remote_filepath} to {local_filepath}")\n\n            sftp_client.get(remote_filepath, local_filepath)\n\n        conn.close()\n        return local_filepath\n\n    def sftp_put(self, remote_filepath, local_filepath, confirm=True):\n        check.str_param(remote_filepath, "remote_filepath")\n        check.str_param(local_filepath, "local_filepath")\n        conn = self.get_connection()\n        with conn.open_sftp() as sftp_client:\n            self.log.info(f"Starting to transfer file from {local_filepath} to {remote_filepath}")\n\n            sftp_client.put(local_filepath, remote_filepath, confirm=confirm)\n\n        conn.close()\n        return local_filepath\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema={\n "remote_host": Field(\n StringSource, description="remote host to connect to", is_required=True\n ),\n "remote_port": Field(\n IntSource,\n description="port of remote host to connect (Default is paramiko SSH_PORT)",\n is_required=False,\n default_value=SSH_PORT,\n ),\n "username": Field(\n StringSource, description="username to connect to the remote_host", is_required=False\n ),\n "password": Field(\n StringSource,\n description="password of the username to connect to the remote_host",\n is_required=False,\n ),\n "key_file": Field(\n StringSource,\n description="key file to use to connect to the remote_host.",\n is_required=False,\n ),\n "key_string": Field(\n StringSource,\n description="key string to use to connect to remote_host",\n is_required=False,\n ),\n "timeout": Field(\n IntSource,\n description="timeout for the attempt to connect to the remote_host.",\n is_required=False,\n default_value=10,\n ),\n "keepalive_interval": Field(\n IntSource,\n description="send a keepalive packet to remote host every keepalive_interval seconds",\n is_required=False,\n default_value=30,\n ),\n "compress": Field(BoolSource, is_required=False, default_value=True),\n "no_host_key_check": Field(BoolSource, is_required=False, default_value=True),\n "allow_host_key_change": Field(\n BoolSource, description="[Deprecated]", is_required=False, default_value=False\n ),\n }\n)\ndef ssh_resource(init_context):\n args = init_context.resource_config\n args = merge_dicts(init_context.resource_config, {"logger": init_context.log})\n return SSHResource(**args)
\n
", "current_page_name": "_modules/dagster_ssh/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_ssh.resources"}}, "dagster_twilio": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_twilio.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom pydantic import Field\nfrom twilio.rest import Client\n\n\n
[docs]class TwilioResource(ConfigurableResource):\n """This resource is for connecting to Twilio."""\n\n account_sid: str = Field(\n description=(\n "Twilio Account SID, created with yout Twilio account. This can be found on your Twilio"\n " dashboard, see"\n " https://www.twilio.com/blog/twilio-access-tokens-python"\n ),\n )\n auth_token: str = Field(\n description=(\n "Twilio Authentication Token, created with yout Twilio account. This can be found on"\n " your Twilio dashboard, see https://www.twilio.com/blog/twilio-access-tokens-python"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def create_client(self) -> Client:\n return Client(self.account_sid, self.auth_token)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=TwilioResource.to_config_schema(),\n description="This resource is for connecting to Twilio",\n)\ndef twilio_resource(context: InitResourceContext) -> Client:\n return TwilioResource.from_resource_context(context).create_client()
\n
", "current_page_name": "_modules/dagster_twilio/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_twilio.resources"}}, "dagster_wandb": {"io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.io_manager

\nimport datetime\nimport os\nimport pickle\nimport platform\nimport shutil\nimport sys\nimport time\nimport uuid\nfrom contextlib import contextmanager\nfrom typing import List, Optional\n\nfrom dagster import (\n    Field,\n    InitResourceContext,\n    InputContext,\n    Int,\n    IOManager,\n    MetadataValue,\n    OutputContext,\n    String,\n    io_manager,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom wandb import Artifact\nfrom wandb.data_types import WBValue\n\nfrom .resources import WANDB_CLOUD_HOST\nfrom .utils.errors import (\n    WandbArtifactsIOManagerError,\n    raise_on_empty_configuration,\n    raise_on_unknown_partition_keys,\n    raise_on_unknown_read_configuration_keys,\n    raise_on_unknown_write_configuration_keys,\n)\nfrom .utils.pickling import (\n    ACCEPTED_SERIALIZATION_MODULES,\n    pickle_artifact_content,\n    unpickle_artifact_content,\n)\nfrom .version import __version__\n\nif sys.version_info >= (3, 8):\n    from typing import TypedDict\nelse:\n    from typing_extensions import TypedDict\n\n\nclass Config(TypedDict):\n    dagster_run_id: str\n    wandb_host: str\n    wandb_entity: str\n    wandb_project: str\n    wandb_run_name: Optional[str]\n    wandb_run_id: Optional[str]\n    wandb_run_tags: Optional[List[str]]\n    base_dir: str\n    cache_duration_in_minutes: Optional[int]\n\n\nclass ArtifactsIOManager(IOManager):\n    """IO Manager to handle Artifacts in Weights & Biases (W&B) .\n\n    It handles 3 different inputs:\n    - Pickable objects (the serialization module is configurable)\n    - W&B Objects (Audio, Table, Image, etc)\n    - W&B Artifacts\n    """\n\n    def __init__(self, wandb_client, config: Config):\n        self.wandb = wandb_client\n\n        dagster_run_id = config["dagster_run_id"]\n        self.dagster_run_id = dagster_run_id\n        self.wandb_host = config["wandb_host"]\n        self.wandb_entity = config["wandb_entity"]\n        self.wandb_project = config["wandb_project"]\n        self.wandb_run_id = config.get("wandb_run_id") or dagster_run_id\n        self.wandb_run_name = config.get("wandb_run_name") or f"dagster-run-{dagster_run_id[0:8]}"\n        # augments the run tags\n        wandb_run_tags = config["wandb_run_tags"] or []\n        if "dagster_wandb" not in wandb_run_tags:\n            wandb_run_tags = [*wandb_run_tags, "dagster_wandb"]\n        self.wandb_run_tags = wandb_run_tags\n\n        self.base_dir = config["base_dir"]\n        cache_duration_in_minutes = config["cache_duration_in_minutes"]\n        default_cache_expiration_in_minutes = 60 * 24 * 30  # 60 minutes * 24 hours * 30 days\n        self.cache_duration_in_minutes = (\n            cache_duration_in_minutes\n            if cache_duration_in_minutes is not None\n            else default_cache_expiration_in_minutes\n        )\n\n    def _get_local_storage_path(self):\n        path = self.base_dir\n        if os.path.basename(path) != "storage":\n            path = os.path.join(path, "storage")\n        path = os.path.join(path, "wandb_artifacts_manager")\n        os.makedirs(path, exist_ok=True)\n        return path\n\n    def _get_artifacts_path(self, name, version):\n        local_storage_path = self._get_local_storage_path()\n        path = os.path.join(local_storage_path, "artifacts", f"{name}.{version}")\n        os.makedirs(path, exist_ok=True)\n        return path\n\n    def _get_wandb_logs_path(self):\n        local_storage_path = self._get_local_storage_path()\n        # Adding a random uuid to avoid collisions in multi-process context\n        path = os.path.join(local_storage_path, "runs", self.dagster_run_id, str(uuid.uuid4()))\n        os.makedirs(path, exist_ok=True)\n        return path\n\n    def _clean_local_storage_path(self):\n        local_storage_path = self._get_local_storage_path()\n        cache_duration_in_minutes = self.cache_duration_in_minutes\n        current_timestamp = int(time.time())\n        expiration_timestamp = current_timestamp - (\n            cache_duration_in_minutes * 60  # convert to seconds\n        )\n\n        for root, dirs, files in os.walk(local_storage_path, topdown=False):\n            for name in files:\n                current_file_path = os.path.join(root, name)\n                most_recent_access = os.lstat(current_file_path).st_atime\n                if most_recent_access <= expiration_timestamp or cache_duration_in_minutes == 0:\n                    os.remove(current_file_path)\n            for name in dirs:\n                current_dir_path = os.path.join(root, name)\n                if not os.path.islink(current_dir_path):\n                    if len(os.listdir(current_dir_path)) == 0 or cache_duration_in_minutes == 0:\n                        shutil.rmtree(current_dir_path)\n\n    @contextmanager\n    def wandb_run(self):\n        self.wandb.init(\n            id=self.wandb_run_id,\n            name=self.wandb_run_name,\n            project=self.wandb_project,\n            entity=self.wandb_entity,\n            dir=self._get_wandb_logs_path(),\n            tags=self.wandb_run_tags,\n            anonymous="never",\n            resume="allow",\n        )\n        try:\n            yield self.wandb.run\n        finally:\n            self.wandb.finish()\n            self._clean_local_storage_path()\n\n    def _upload_artifact(self, context: OutputContext, obj):\n        if not context.has_partition_key and context.has_asset_partitions:\n            raise WandbArtifactsIOManagerError(\n                "Sorry, but the Weights & Biases (W&B) IO Manager can't handle processing several"\n                " partitions at the same time within a single run. Please process each partition"\n                " separately. If you think this might be an error, don't hesitate to reach out to"\n                " Weights & Biases Support."\n            )\n\n        with self.wandb_run() as run:\n            parameters = {}\n            if context.metadata is not None:\n                parameters = context.metadata.get("wandb_artifact_configuration", {})\n\n            raise_on_unknown_write_configuration_keys(parameters)\n\n            serialization_module = parameters.get("serialization_module", {})\n            serialization_module_name = serialization_module.get("name", "pickle")\n\n            if serialization_module_name not in ACCEPTED_SERIALIZATION_MODULES:\n                raise WandbArtifactsIOManagerError(\n                    f"Oops! It looks like the value you provided, '{serialization_module_name}',"\n                    " isn't recognized as a valid serialization module. Here are the ones we do"\n                    f" support: {ACCEPTED_SERIALIZATION_MODULES}."\n                )\n\n            serialization_module_parameters = serialization_module.get("parameters", {})\n            serialization_module_parameters_with_protocol = {\n                "protocol": (\n                    pickle.HIGHEST_PROTOCOL\n                ),  # we use the highest available protocol if we don't pass one\n                **serialization_module_parameters,\n            }\n\n            artifact_type = parameters.get("type", "artifact")\n            artifact_description = parameters.get("description")\n            artifact_metadata = {\n                "source_integration": "dagster_wandb",\n                "source_integration_version": __version__,\n                "source_dagster_run_id": self.dagster_run_id,\n                "source_created_at": datetime.datetime.now(datetime.timezone.utc).isoformat(),\n                "source_python_version": platform.python_version(),\n            }\n            if isinstance(obj, Artifact):\n                if parameters.get("name") is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've provided a 'name' property in the 'wandb_artifact_configuration'"\n                        " settings. However, this 'name' property should only be used when the"\n                        " output isn't already an Artifact object."\n                    )\n\n                if parameters.get("type") is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've provided a 'type' property in the 'wandb_artifact_configuration'"\n                        " settings. However, this 'type' property should only be used when the"\n                        " output isn't already an Artifact object."\n                    )\n\n                if obj.name is None:\n                    raise WandbArtifactsIOManagerError(\n                        "The Weights & Biases (W&B) Artifact you provided is missing a name."\n                        " Please, assign a name to your Artifact."\n                    )\n\n                if context.has_asset_key and obj.name != context.get_asset_identifier()[0]:\n                    asset_identifier = context.get_asset_identifier()[0]\n                    context.log.warning(\n                        f"Please note, the name '{obj.name}' of your Artifact is overwritten by the"\n                        f" name derived from the AssetKey '{asset_identifier}'. For consistency and"\n                        " to avoid confusion, we advise sharing a constant for both your asset's"\n                        " name and the artifact's name."\n                    )\n                    obj._name = asset_identifier  # noqa: SLF001\n\n                if context.has_partition_key:\n                    artifact_name = f"{obj.name}.{context.partition_key}"\n                    # The Artifact provided is produced in a partitioned execution we add the\n                    # partition as a suffix to the Artifact name\n                    obj._name = artifact_name  # noqa: SLF001\n\n                if len(serialization_module) != 0:  # not an empty dict\n                    context.log.warning(\n                        "You've included a 'serialization_module' in the"\n                        " 'wandb_artifact_configuration' settings. However, this doesn't have any"\n                        " impact when the output is already an Artifact object."\n                    )\n\n                # The obj is already an Artifact we augment its metadata\n                artifact = obj\n\n                artifact.metadata = {**artifact.metadata, **artifact_metadata}\n\n                if artifact.description is not None and artifact_description is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've given a 'description' in the 'wandb_artifact_configuration'"\n                        " settings for an existing Artifact that already has a description. Please,"\n                        " either set the description using 'wandb_artifact_argument' or when"\n                        " creating your Artifact."\n                    )\n                if artifact_description is not None:\n                    artifact.description = artifact_description\n            else:\n                if context.has_asset_key:\n                    if parameters.get("name") is not None:\n                        raise WandbArtifactsIOManagerError(\n                            "You've included a 'name' property in the"\n                            " 'wandb_artifact_configuration' settings. But, a 'name' is only needed"\n                            " when there's no 'AssetKey'. When an Artifact is created from an"\n                            " @asset, it uses the asset name. When it's created from an @op with an"\n                            " 'asset_key' for the output, that value is used. Please remove the"\n                            " 'name' property."\n                        )\n                    artifact_name = context.get_asset_identifier()[0]  # name of asset\n                else:\n                    name_parameter = parameters.get("name")\n                    if name_parameter is None:\n                        raise WandbArtifactsIOManagerError(\n                            "The 'name' property is missing in the 'wandb_artifact_configuration'"\n                            " settings. For Artifacts created from an @op, a 'name' property is"\n                            " needed. You could also use an @asset as an alternative."\n                        )\n                    assert name_parameter is not None\n                    artifact_name = name_parameter\n\n                if context.has_partition_key:\n                    artifact_name = f"{artifact_name}.{context.partition_key}"\n\n                # We replace the | character with - because it is not allowed in artifact names\n                # The | character is used in multi-dimensional partition keys\n                artifact_name = str(artifact_name).replace("|", "-")\n\n                # Creates an artifact to hold the obj\n                artifact = self.wandb.Artifact(\n                    name=artifact_name,\n                    type=artifact_type,\n                    description=artifact_description,\n                    metadata=artifact_metadata,\n                )\n                if isinstance(obj, WBValue):\n                    if len(serialization_module) != 0:  # not an empty dict\n                        context.log.warning(\n                            "You've included a 'serialization_module' in the"\n                            " 'wandb_artifact_configuration' settings. However, this doesn't have"\n                            " any impact when the output is already an W&B object like e.g Table or"\n                            " Image."\n                        )\n                    # Adds the WBValue object using the class name as the name for the file\n                    artifact.add(obj, obj.__class__.__name__)\n                elif obj is not None:\n                    # The output is not a native wandb Object, we serialize it\n                    pickle_artifact_content(\n                        context,\n                        serialization_module_name,\n                        serialization_module_parameters_with_protocol,\n                        artifact,\n                        obj,\n                    )\n\n            # Add any files: https://docs.wandb.ai/ref/python/artifact#add_file\n            add_files = parameters.get("add_files")\n            if add_files is not None and len(add_files) > 0:\n                for add_file in add_files:\n                    artifact.add_file(**add_file)\n\n            # Add any dirs: https://docs.wandb.ai/ref/python/artifact#add_dir\n            add_dirs = parameters.get("add_dirs")\n            if add_dirs is not None and len(add_dirs) > 0:\n                for add_dir in add_dirs:\n                    artifact.add_dir(**add_dir)\n\n            # Add any reference: https://docs.wandb.ai/ref/python/artifact#add_reference\n            add_references = parameters.get("add_references")\n            if add_references is not None and len(add_references) > 0:\n                for add_reference in add_references:\n                    artifact.add_reference(**add_reference)\n\n            # Augments the aliases\n            aliases = parameters.get("aliases", [])\n            aliases.append(f"dagster-run-{self.dagster_run_id[0:8]}")\n            if "latest" not in aliases:\n                aliases.append("latest")\n\n            # Logs the artifact\n            self.wandb.log_artifact(artifact, aliases=aliases)\n            artifact.wait()\n\n            # Adds useful metadata to the output or Asset\n            artifacts_base_url = (\n                "https://wandb.ai"\n                if self.wandb_host == WANDB_CLOUD_HOST\n                else self.wandb_host.rstrip("/")\n            )\n            assert artifact.id is not None\n            output_metadata = {\n                "dagster_run_id": MetadataValue.dagster_run(self.dagster_run_id),\n                "wandb_artifact_id": MetadataValue.text(artifact.id),\n                "wandb_artifact_type": MetadataValue.text(artifact.type),\n                "wandb_artifact_version": MetadataValue.text(artifact.version),\n                "wandb_artifact_size": MetadataValue.int(artifact.size),\n                "wandb_artifact_url": MetadataValue.url(\n                    f"{artifacts_base_url}/{run.entity}/{run.project}/artifacts/{artifact.type}/{'/'.join(artifact.name.rsplit(':', 1))}"\n                ),\n                "wandb_entity": MetadataValue.text(run.entity),\n                "wandb_project": MetadataValue.text(run.project),\n                "wandb_run_id": MetadataValue.text(run.id),\n                "wandb_run_name": MetadataValue.text(run.name),\n                "wandb_run_path": MetadataValue.text(run.path),\n                "wandb_run_url": MetadataValue.url(run.url),\n            }\n            context.add_output_metadata(output_metadata)\n\n    def _download_artifact(self, context: InputContext):\n        with self.wandb_run() as run:\n            parameters = {}\n            if context.metadata is not None:\n                parameters = context.metadata.get("wandb_artifact_configuration", {})\n\n            raise_on_unknown_read_configuration_keys(parameters)\n\n            partitions_configuration = parameters.get("partitions", {})\n\n            if not context.has_asset_partitions and len(partitions_configuration) > 0:\n                raise WandbArtifactsIOManagerError(\n                    "You've included a 'partitions' value in the 'wandb_artifact_configuration'"\n                    " settings but it's not within a partitioned execution. Please only use"\n                    " 'partitions' within a partitioned context."\n                )\n\n            if context.has_asset_partitions:\n                # Note: this is currently impossible to unit test with current Dagster APIs but was\n                # tested thoroughly manually\n                name = parameters.get("get")\n                path = parameters.get("get_path")\n                if name is not None or path is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've given a value for 'get' and/or 'get_path' in the"\n                        " 'wandb_artifact_configuration' settings during a partitioned execution."\n                        " Please use the 'partitions' property to set 'get' or 'get_path' for each"\n                        " individual partition. To set a default value for all partitions, use '*'."\n                    )\n\n                artifact_name = parameters.get("name")\n                if artifact_name is None:\n                    artifact_name = context.asset_key[0][0]  # name of asset\n\n                partitions = [\n                    (key, f"{artifact_name}.{ str(key).replace('|', '-')}")\n                    for key in context.asset_partition_keys\n                ]\n\n                output = {}\n\n                for key, artifact_name in partitions:\n                    context.log.info(f"Handling partition with key '{key}'")\n                    partition_configuration = partitions_configuration.get(\n                        key, partitions_configuration.get("*")\n                    )\n\n                    raise_on_empty_configuration(key, partition_configuration)\n                    raise_on_unknown_partition_keys(key, partition_configuration)\n\n                    partition_version = None\n                    partition_alias = None\n                    if partition_configuration and partition_configuration is not None:\n                        partition_version = partition_configuration.get("version")\n                        partition_alias = partition_configuration.get("alias")\n                        if partition_version is not None and partition_alias is not None:\n                            raise WandbArtifactsIOManagerError(\n                                "You've provided both 'version' and 'alias' for the partition with"\n                                " key '{key}'. You should only use one of these properties at a"\n                                " time. If you choose not to use any, the latest version will be"\n                                " used by default. If this partition is configured with the '*'"\n                                " key, please correct the wildcard configuration."\n                            )\n                    partition_identifier = partition_version or partition_alias or "latest"\n\n                    artifact_uri = (\n                        f"{run.entity}/{run.project}/{artifact_name}:{partition_identifier}"\n                    )\n                    try:\n                        api = self.wandb.Api()\n                        api.artifact(artifact_uri)\n                    except Exception as exception:\n                        raise WandbArtifactsIOManagerError(\n                            "The artifact you're attempting to download might not exist, or you"\n                            " might have forgotten to include the 'name' property in the"\n                            " 'wandb_artifact_configuration' settings."\n                        ) from exception\n\n                    artifact = run.use_artifact(artifact_uri)\n\n                    artifacts_path = self._get_artifacts_path(artifact_name, artifact.version)\n                    if partition_configuration and partition_configuration is not None:\n                        partition_name = partition_configuration.get("get")\n                        partition_path = partition_configuration.get("get_path")\n                        if partition_name is not None and partition_path is not None:\n                            raise WandbArtifactsIOManagerError(\n                                "You've provided both 'get' and 'get_path' in the"\n                                " 'wandb_artifact_configuration' settings for the partition with"\n                                " key '{key}'. Only one of these properties should be used. If you"\n                                " choose not to use any, the whole Artifact will be returned. If"\n                                " this partition is configured with the '*' key, please correct the"\n                                " wildcard configuration."\n                            )\n\n                        if partition_name is not None:\n                            wandb_object = artifact.get(partition_name)\n                            if wandb_object is not None:\n                                output[key] = wandb_object\n                                continue\n\n                        if partition_path is not None:\n                            path = artifact.get_path(partition_path)\n                            download_path = path.download(root=artifacts_path)\n                            if download_path is not None:\n                                output[key] = download_path\n                                continue\n\n                    artifact_dir = artifact.download(root=artifacts_path, recursive=True)\n                    unpickled_content = unpickle_artifact_content(artifact_dir)\n                    if unpickled_content is not None:\n                        output[key] = unpickled_content\n                        continue\n\n                    artifact.verify(root=artifacts_path)\n                    output[key] = artifact\n\n                if len(output) == 1:\n                    # If there's only one partition, return the value directly\n                    return next(iter(output.values()))\n\n                return output\n\n            elif context.has_asset_key:\n                # Input is an asset\n                if parameters.get("name") is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "A conflict has been detected in the provided configuration settings. The"\n                        " 'name' parameter appears to be specified twice - once in the"\n                        " 'wandb_artifact_configuration' metadata dictionary, and again as an"\n                        " AssetKey. Kindly avoid setting the name directly, since the AssetKey will"\n                        " be used for this purpose."\n                    )\n                artifact_name = context.get_asset_identifier()[0]  # name of asset\n            else:\n                artifact_name = parameters.get("name")\n                if artifact_name is None:\n                    raise WandbArtifactsIOManagerError(\n                        "The 'name' property is missing in the 'wandb_artifact_configuration'"\n                        " settings. For Artifacts used in an @op, a 'name' property is required."\n                        " You could use an @asset as an alternative."\n                    )\n\n            if context.has_partition_key:\n                artifact_name = f"{artifact_name}.{context.partition_key}"\n\n            artifact_alias = parameters.get("alias")\n            artifact_version = parameters.get("version")\n\n            if artifact_alias is not None and artifact_version is not None:\n                raise WandbArtifactsIOManagerError(\n                    "You've provided both 'version' and 'alias' in the"\n                    " 'wandb_artifact_configuration' settings. Only one should be used at a time."\n                    " If you decide not to use any, the latest version will be applied"\n                    " automatically."\n                )\n\n            artifact_identifier = artifact_alias or artifact_version or "latest"\n            artifact_uri = f"{run.entity}/{run.project}/{artifact_name}:{artifact_identifier}"\n\n            # This try/except block is a workaround for a bug in the W&B SDK, this should be removed\n            # once the bug is fixed.\n            try:\n                artifact = run.use_artifact(artifact_uri)\n            except Exception:\n                api = self.wandb.Api()\n                artifact = api.artifact(artifact_uri)\n\n            name = parameters.get("get")\n            path = parameters.get("get_path")\n            if name is not None and path is not None:\n                raise WandbArtifactsIOManagerError(\n                    "You've provided both 'get' and 'get_path' in the"\n                    " 'wandb_artifact_configuration' settings. Only one should be used at a time."\n                    " If you decide not to use any, the entire Artifact will be returned."\n                )\n\n            if name is not None:\n                return artifact.get(name)\n\n            artifacts_path = self._get_artifacts_path(artifact_name, artifact.version)\n            if path is not None:\n                path = artifact.get_path(path)\n                return path.download(root=artifacts_path)\n\n            artifact_dir = artifact.download(root=artifacts_path, recursive=True)\n\n            unpickled_content = unpickle_artifact_content(artifact_dir)\n            if unpickled_content is not None:\n                return unpickled_content\n\n            artifact.verify(root=artifacts_path)\n            return artifact\n\n    def handle_output(self, context: OutputContext, obj) -> None:\n        if obj is None:\n            context.log.warning(\n                "The output value given to the Weights & Biases (W&B) IO Manager is empty. If this"\n                " was intended, you can disregard this warning."\n            )\n        else:\n            try:\n                self._upload_artifact(context, obj)\n            except WandbArtifactsIOManagerError as exception:\n                raise exception\n            except Exception as exception:\n                raise WandbArtifactsIOManagerError() from exception\n\n    def load_input(self, context: InputContext):\n        try:\n            return self._download_artifact(context)\n        except WandbArtifactsIOManagerError as exception:\n            raise exception\n        except Exception as exception:\n            raise WandbArtifactsIOManagerError() from exception\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n required_resource_keys={"wandb_resource", "wandb_config"},\n description="IO manager to read and write W&B Artifacts",\n config_schema={\n "run_name": Field(\n String,\n is_required=False,\n description=(\n "Short display name for this run, which is how you'll identify this run in the UI."\n " By default, it`s set to a string with the following format dagster-run-[8 first"\n " characters of the Dagster Run ID] e.g. dagster-run-7e4df022."\n ),\n ),\n "run_id": Field(\n String,\n is_required=False,\n description=(\n "Unique ID for this run, used for resuming. It must be unique in the project, and"\n " if you delete a run you can't reuse the ID. Use the name field for a short"\n " descriptive name, or config for saving hyperparameters to compare across runs."\n r" The ID cannot contain the following special characters: /\\#?%:.. You need to set"\n " the Run ID when you are doing experiment tracking inside Dagster to allow the IO"\n " Manager to resume the run. By default it`s set to the Dagster Run ID e.g "\n " 7e4df022-1bf2-44b5-a383-bb852df4077e."\n ),\n ),\n "run_tags": Field(\n [String],\n is_required=False,\n description=(\n "A list of strings, which will populate the list of tags on this run in the UI."\n " Tags are useful for organizing runs together, or applying temporary labels like"\n " 'baseline' or 'production'. It's easy to add and remove tags in the UI, or filter"\n " down to just runs with a specific tag. Any W&B Run used by the integration will"\n " have the dagster_wandb tag."\n ),\n ),\n "base_dir": Field(\n String,\n is_required=False,\n description=(\n "Base directory used for local storage and caching. W&B Artifacts and W&B Run logs"\n " will be written and read from that directory. By default, it`s using the"\n " DAGSTER_HOME directory."\n ),\n ),\n "cache_duration_in_minutes": Field(\n Int,\n is_required=False,\n description=(\n "Defines the amount of time W&B Artifacts and W&B Run logs should be kept in the"\n " local storage. Only files and directories that were not opened for that amount of"\n " time are removed from the cache. Cache purging happens at the end of an IO"\n " Manager execution. You can set it to 0, if you want to disable caching"\n " completely. Caching improves speed when an Artifact is reused between jobs"\n " running on the same machine. It defaults to 30 days."\n ),\n ),\n },\n)\ndef wandb_artifacts_io_manager(context: InitResourceContext):\n """Dagster IO Manager to create and consume W&B Artifacts.\n\n It allows any Dagster @op or @asset to create and consume W&B Artifacts natively.\n\n For a complete set of documentation, see `Dagster integration <https://docs.wandb.ai/guides/integrations/dagster>`_.\n\n **Example:**\n\n .. code-block:: python\n\n @repository\n def my_repository():\n return [\n *with_resources(\n load_assets_from_current_module(),\n resource_defs={\n "wandb_config": make_values_resource(\n entity=str,\n project=str,\n ),\n "wandb_resource": wandb_resource.configured(\n {"api_key": {"env": "WANDB_API_KEY"}}\n ),\n "wandb_artifacts_manager": wandb_artifacts_io_manager.configured(\n {"cache_duration_in_minutes": 60} # only cache files for one hour\n ),\n },\n resource_config_by_key={\n "wandb_config": {\n "config": {\n "entity": "my_entity",\n "project": "my_project"\n }\n }\n },\n ),\n ]\n\n\n @asset(\n name="my_artifact",\n metadata={\n "wandb_artifact_configuration": {\n "type": "dataset",\n }\n },\n io_manager_key="wandb_artifacts_manager",\n )\n def create_dataset():\n return [1, 2, 3]\n\n """\n wandb_client = context.resources.wandb_resource["sdk"]\n wandb_host = context.resources.wandb_resource["host"]\n wandb_entity = context.resources.wandb_config["entity"]\n wandb_project = context.resources.wandb_config["project"]\n\n wandb_run_name = None\n wandb_run_id = None\n wandb_run_tags = None\n base_dir = (\n context.instance.storage_directory() if context.instance else os.environ["DAGSTER_HOME"]\n )\n cache_duration_in_minutes = None\n if context.resource_config is not None:\n wandb_run_name = context.resource_config.get("run_name")\n wandb_run_id = context.resource_config.get("run_id")\n wandb_run_tags = context.resource_config.get("run_tags")\n base_dir = context.resource_config.get("base_dir", base_dir)\n cache_duration_in_minutes = context.resource_config.get("cache_duration_in_minutes")\n\n if "PYTEST_CURRENT_TEST" in os.environ:\n dagster_run_id = "unit-testing"\n else:\n dagster_run_id = context.run_id\n\n assert dagster_run_id is not None\n\n config: Config = {\n "dagster_run_id": dagster_run_id,\n "wandb_host": wandb_host,\n "wandb_entity": wandb_entity,\n "wandb_project": wandb_project,\n "wandb_run_name": wandb_run_name,\n "wandb_run_id": wandb_run_id,\n "wandb_run_tags": wandb_run_tags,\n "base_dir": base_dir,\n "cache_duration_in_minutes": cache_duration_in_minutes,\n }\n return ArtifactsIOManager(wandb_client, config)
\n
", "current_page_name": "_modules/dagster_wandb/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.io_manager"}, "launch": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.launch.ops

\nfrom dagster import OpExecutionContext, op\nfrom wandb.sdk.launch import launch\nfrom wandb.sdk.launch.launch_add import launch_add\n\nfrom .configs import launch_agent_config, launch_config\n\n\ndef raise_on_invalid_config(context: OpExecutionContext):\n    entity = context.resources.wandb_config["entity"]\n    if entity == "":\n        raise RuntimeError(\n            "(dagster_wandb) An empty string was provided for the 'entity' property of the"\n            " 'wandb_config'."\n        )\n\n    project = context.resources.wandb_config["project"]\n    if project == "":\n        raise RuntimeError(\n            "(dagster_wandb) An empty string was provided for the 'project' property of the"\n            " 'wandb_config'."\n        )\n\n\n
[docs]@op(\n required_resource_keys={"wandb_resource", "wandb_config"},\n config_schema=launch_agent_config(),\n)\ndef run_launch_agent(context: OpExecutionContext):\n """It starts a Launch Agent and runs it as a long running process until stopped manually.\n\n Agents are processes that poll launch queues and execute the jobs (or dispatch them to external\n services to be executed) in order.\n\n **Example:**\n\n .. code-block:: YAML\n\n # config.yaml\n\n resources:\n wandb_config:\n config:\n entity: my_entity\n project: my_project\n ops:\n run_launch_agent:\n config:\n max_jobs: -1\n queues:\n - my_dagster_queue\n\n .. code-block:: python\n\n from dagster_wandb.launch.ops import run_launch_agent\n from dagster_wandb.resources import wandb_resource\n\n from dagster import job, make_values_resource\n\n\n @job(\n resource_defs={\n "wandb_config": make_values_resource(\n entity=str,\n project=str,\n ),\n "wandb_resource": wandb_resource.configured(\n {"api_key": {"env": "WANDB_API_KEY"}}\n ),\n },\n )\n def run_launch_agent_example():\n run_launch_agent()\n\n """\n raise_on_invalid_config(context)\n config = {\n "entity": context.resources.wandb_config["entity"],\n "project": context.resources.wandb_config["project"],\n **context.op_config,\n }\n context.log.info(f"Launch agent configuration: {config}")\n context.log.info("Running Launch agent...")\n launch.create_and_run_agent(api=context.resources.wandb_resource["api"], config=config)
\n\n\n
[docs]@op(\n required_resource_keys={\n "wandb_resource",\n "wandb_config",\n },\n config_schema=launch_config(),\n)\ndef run_launch_job(context: OpExecutionContext):\n """Executes a Launch job.\n\n A Launch job is assigned to a queue in order to be executed. You can create a queue or use the\n default one. Make sure you have an active agent listening to that queue. You can run an agent\n inside your Dagster instance but can also consider using a deployable agent in Kubernetes.\n\n **Example:**\n\n .. code-block:: YAML\n\n # config.yaml\n\n resources:\n wandb_config:\n config:\n entity: my_entity\n project: my_project\n ops:\n my_launched_job:\n config:\n entry_point:\n - python\n - train.py\n queue: my_dagster_queue\n uri: https://github.com/wandb/example-dagster-integration-with-launch\n\n .. code-block:: python\n\n from dagster_wandb.launch.ops import run_launch_job\n from dagster_wandb.resources import wandb_resource\n\n from dagster import job, make_values_resource\n\n\n @job(\n resource_defs={\n "wandb_config": make_values_resource(\n entity=str,\n project=str,\n ),\n "wandb_resource": wandb_resource.configured(\n {"api_key": {"env": "WANDB_API_KEY"}}\n ),\n },\n )\n def run_launch_job_example():\n run_launch_job.alias("my_launched_job")() # we rename the job with an alias\n\n """\n raise_on_invalid_config(context)\n config = {\n "entity": context.resources.wandb_config["entity"],\n "project": context.resources.wandb_config["project"],\n **context.op_config,\n }\n context.log.info(f"Launch job configuration: {config}")\n\n queue = context.op_config.get("queue")\n if queue is None:\n context.log.info("No queue provided, running Launch job locally")\n launch.run(api=context.resources.wandb_resource["api"], config=config)\n else:\n synchronous = config.get("synchronous", True)\n config.pop("synchronous", None)\n queued_run = launch_add(**config)\n if synchronous is True:\n context.log.info(\n f"Synchronous Launch job added to queue with name={queue}. Waiting for"\n " completion..."\n )\n queued_run.wait_until_finished()\n else:\n context.log.info(f"Asynchronous Launch job added to queue with name={queue}")
\n
", "current_page_name": "_modules/dagster_wandb/launch/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.launch.ops"}}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.resources

\nfrom typing import Any, Dict\n\nimport wandb\nfrom dagster import Field, InitResourceContext, String, StringSource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom wandb.sdk.internal.internal_api import Api\n\nWANDB_CLOUD_HOST: str = "https://api.wandb.ai"\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema={\n "api_key": Field(\n StringSource,\n description="W&B API key necessary to communicate with the W&B API.",\n is_required=True,\n ),\n "host": Field(\n String,\n description=(\n "API host server you wish to use. Only required if you are using W&B Server."\n ),\n is_required=False,\n default_value=WANDB_CLOUD_HOST,\n ),\n },\n description="Resource for interacting with Weights & Biases",\n)\ndef wandb_resource(context: InitResourceContext) -> Dict[str, Any]:\n """Dagster resource used to communicate with the W&B API. It's useful when you want to use the\n wandb client within your ops and assets. It's a required resources if you are using the W&B IO\n Manager.\n\n It automatically authenticates using the provided API key.\n\n For a complete set of documentation, see `Dagster integration <https://docs.wandb.ai/guides/integrations/dagster>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Example:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_wandb import wandb_resource\n\n my_wandb_resource = wandb_resource.configured({"api_key": {"env": "WANDB_API_KEY"}})\n\n @job(resource_defs={"wandb_resource": my_wandb_resource})\n def my_wandb_job():\n ...\n\n """\n api_key = context.resource_config["api_key"]\n host = context.resource_config["host"]\n wandb.login(\n key=api_key,\n host=host,\n anonymous="never",\n )\n client_settings = wandb.Settings(\n api_key=api_key,\n base_url=host,\n anonymous="never",\n launch=True,\n )\n api = Api(default_settings=client_settings, load_settings=False)\n return {"sdk": wandb, "api": api, "host": host}
\n
", "current_page_name": "_modules/dagster_wandb/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.types

\nimport sys\n\nif sys.version_info >= (3, 8):\n    from typing import TypedDict\nelse:\n    from typing_extensions import TypedDict\n\nfrom typing import Any, Dict, List\n\n\n
[docs]class SerializationModule(TypedDict, total=False):\n """W&B Artifacts IO Manager configuration of the serialization module. Useful for type checking."""\n\n name: str\n parameters: Dict[str, Any]
\n\n\n
[docs]class WandbArtifactConfiguration(TypedDict, total=False):\n """W&B Artifacts IO Manager configuration. Useful for type checking."""\n\n name: str\n type: str\n description: str\n aliases: List[str]\n add_dirs: List[Dict[str, Any]]\n add_files: List[Dict[str, Any]]\n add_references: List[Dict[str, Any]]\n serialization_module: SerializationModule\n partitions: Dict[str, Dict[str, Any]]
\n
", "current_page_name": "_modules/dagster_wandb/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.types"}, "utils": {"errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.utils.errors

\n
[docs]class WandbArtifactsIOManagerError(Exception):\n """Represents an execution error of the W&B Artifacts IO Manager."""\n\n def __init__(self, message="A W&B Artifacts IO Manager error occurred."):\n self.message = message\n super().__init__(self.message)
\n\n\nSUPPORTED_READ_CONFIG_KEYS = [\n "alias",\n "get_path",\n "get",\n "name",\n "partitions",\n "version",\n]\nSUPPORTED_WRITE_CONFIG_KEYS = [\n "add_dirs",\n "add_files",\n "add_references",\n "aliases",\n "description",\n "name",\n "partitions",\n "serialization_module",\n "type",\n]\nSUPPORTED_PARTITION_CONFIG_KEYS = ["get", "get_path", "version", "alias"]\n\n\ndef raise_on_empty_configuration(partition_key, dictionary):\n if dictionary is not None and len(dictionary) == 0:\n raise WandbArtifactsIOManagerError(\n f"The configuration is empty for the partition identified by the key '{partition_key}'."\n " This happened within the 'wandb_artifact_configuration' metadata dictionary."\n )\n\n\ndef raise_on_unknown_keys(supported_config_keys, dictionary, is_read_config):\n if dictionary is None:\n return\n\n unsupported_keys = [key for key in dictionary.keys() if key not in supported_config_keys]\n if len(unsupported_keys) > 0:\n if is_read_config:\n raise WandbArtifactsIOManagerError(\n f"The configuration keys '{unsupported_keys}' you are trying to use are not"\n " supported within the 'wandb_artifact_configuration' metadata dictionary when"\n " reading an Artifact."\n )\n else:\n raise WandbArtifactsIOManagerError(\n f"The configuration keys '{unsupported_keys}' you are trying to use are not"\n " supported within the 'wandb_artifact_configuration' metadata dictionary when"\n " writing an Artifact."\n )\n\n\ndef raise_on_unknown_write_configuration_keys(dictionary):\n raise_on_unknown_keys(SUPPORTED_WRITE_CONFIG_KEYS, dictionary, False)\n\n\ndef raise_on_unknown_read_configuration_keys(dictionary):\n raise_on_unknown_keys(SUPPORTED_READ_CONFIG_KEYS, dictionary, True)\n\n\ndef raise_on_unknown_partition_keys(partition_key, dictionary):\n if dictionary is None:\n return\n\n unsupported_keys = [\n key for key in dictionary.keys() if key not in SUPPORTED_PARTITION_CONFIG_KEYS\n ]\n if len(unsupported_keys) > 0:\n raise WandbArtifactsIOManagerError(\n f"The configuration keys '{unsupported_keys}' you are trying to use are not supported"\n f" for the partition identified by the key '{partition_key}'. This happened within the"\n " 'wandb_artifact_configuration' metadata dictionary."\n )\n
", "current_page_name": "_modules/dagster_wandb/utils/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.utils.errors"}}}, "dagstermill": {"asset_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.asset_factory

\nimport pickle\nimport tempfile\nfrom typing import Any, Callable, Iterable, Mapping, Optional, Set, Type, Union, cast\n\nimport dagster._check as check\nfrom dagster import (\n    AssetIn,\n    AssetKey,\n    AssetsDefinition,\n    Failure,\n    Output,\n    PartitionsDefinition,\n    ResourceDefinition,\n    RetryPolicy,\n    RetryRequested,\n    SourceAsset,\n    asset,\n)\nfrom dagster._config.pythonic_config import Config, infer_schema_from_config_class\nfrom dagster._config.pythonic_config.type_check_utils import safe_is_subclass\nfrom dagster._core.definitions.events import CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.execution.context.compute import OpExecutionContext\n\nfrom dagstermill.factory import _clean_path_for_windows, execute_notebook\n\n\ndef _make_dagstermill_asset_compute_fn(\n    name: str,\n    notebook_path: str,\n    save_notebook_on_failure: bool,\n) -> Callable:\n    def _t_fn(context: OpExecutionContext, **inputs) -> Iterable:\n        check.param_invariant(\n            isinstance(context.run_config, dict),\n            "context",\n            "StepExecutionContext must have valid run_config",\n        )\n\n        with tempfile.TemporaryDirectory() as output_notebook_dir:\n            executed_notebook_path = execute_notebook(\n                context.get_step_execution_context(),\n                name=name,\n                inputs=inputs,\n                save_notebook_on_failure=save_notebook_on_failure,\n                notebook_path=notebook_path,\n                output_notebook_dir=output_notebook_dir,\n            )\n\n            with open(executed_notebook_path, "rb") as fd:\n                yield Output(fd.read())\n\n            # deferred import for perf\n            import scrapbook\n\n            output_nb = scrapbook.read_notebook(executed_notebook_path)\n\n            for key, value in output_nb.scraps.items():\n                if key.startswith("event-"):\n                    with open(value.data, "rb") as fd:\n                        event = pickle.loads(fd.read())\n                        if isinstance(event, (Failure, RetryRequested)):\n                            raise event\n                        else:\n                            yield event\n\n    return _t_fn\n\n\n
[docs]def define_dagstermill_asset(\n name: str,\n notebook_path: str,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[Union[CoercibleToAssetKey, AssetsDefinition, SourceAsset]]] = None,\n metadata: Optional[Mapping[str, Any]] = None,\n config_schema: Optional[Union[Any, Mapping[str, Any]]] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n description: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n group_name: Optional[str] = None,\n io_manager_key: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n save_notebook_on_failure: bool = False,\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = None,\n) -> AssetsDefinition:\n """Creates a Dagster asset for a Jupyter notebook.\n\n Arguments:\n name (str): The name for the asset\n notebook_path (str): Path to the backing notebook\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name, which defaults to the name of\n the decorated function. Each item in key_prefix must be a valid name in dagster (ie only\n contains letters, numbers, and _) and may not contain python reserved keywords.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n deps (Optional[Sequence[Union[AssetsDefinition, SourceAsset, AssetKey, str]]]): The assets\n that are upstream dependencies, but do not pass an input value to the notebook.\n config_schema (Optional[ConfigSchema): The configuration schema for the asset's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n metadata (Optional[Dict[str, Any]]): A dict of metadata entries for the asset.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by the notebook.\n description (Optional[str]): Description of the asset to display in the Dagster UI.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the asset.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If not provided,\n the name "default" is used.\n resource_defs (Optional[Mapping[str, ResourceDefinition]]):\n (Experimental) A mapping of resource keys to resource definitions. These resources\n will be initialized during execution, and can be accessed from the\n context within the notebook.\n io_manager_key (Optional[str]): A string key for the IO manager used to store the output notebook.\n If not provided, the default key output_notebook_io_manager will be used.\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that computes the asset.\n save_notebook_on_failure (bool): If True and the notebook fails during execution, the failed notebook will be\n written to the Dagster storage directory. The location of the file will be printed in the Dagster logs.\n Defaults to False.\n non_argument_deps (Optional[Union[Set[AssetKey], Set[str]]]): Deprecated, use deps instead. Set of asset keys that are\n upstream dependencies, but do not pass an input to the asset.\n\n Examples:\n .. code-block:: python\n\n from dagstermill import define_dagstermill_asset\n from dagster import asset, AssetIn, AssetKey\n from sklearn import datasets\n import pandas as pd\n import numpy as np\n\n @asset\n def iris_dataset():\n sk_iris = datasets.load_iris()\n return pd.DataFrame(\n data=np.c_[sk_iris["data"], sk_iris["target"]],\n columns=sk_iris["feature_names"] + ["target"],\n )\n\n iris_kmeans_notebook = define_dagstermill_asset(\n name="iris_kmeans_notebook",\n notebook_path="/path/to/iris_kmeans.ipynb",\n ins={\n "iris": AssetIn(key=AssetKey("iris_dataset"))\n }\n )\n """\n check.str_param(name, "name")\n check.str_param(notebook_path, "notebook_path")\n check.bool_param(save_notebook_on_failure, "save_notebook_on_failure")\n\n required_resource_keys = set(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n )\n ins = check.opt_mapping_param(ins, "ins", key_type=str, value_type=AssetIn)\n\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n\n key_prefix = check.opt_list_param(key_prefix, "key_prefix", of_type=str)\n\n default_description = f"This asset is backed by the notebook at {notebook_path}"\n description = check.opt_str_param(description, "description", default=default_description)\n\n io_mgr_key = check.opt_str_param(\n io_manager_key, "io_manager_key", default="output_notebook_io_manager"\n )\n\n user_tags = validate_tags(op_tags)\n if op_tags is not None:\n check.invariant(\n "notebook_path" not in op_tags,\n "user-defined op tags contains the `notebook_path` key, but the `notebook_path` key"\n " is reserved for use by Dagster",\n )\n check.invariant(\n "kind" not in op_tags,\n "user-defined op tags contains the `kind` key, but the `kind` key is reserved for"\n " use by Dagster",\n )\n\n default_tags = {"notebook_path": _clean_path_for_windows(notebook_path), "kind": "ipynb"}\n\n if safe_is_subclass(config_schema, Config):\n config_schema = infer_schema_from_config_class(cast(Type[Config], config_schema))\n\n return asset(\n name=name,\n key_prefix=key_prefix,\n ins=ins,\n deps=deps,\n metadata=metadata,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n resource_defs=resource_defs,\n partitions_def=partitions_def,\n op_tags={**user_tags, **default_tags},\n group_name=group_name,\n output_required=False,\n io_manager_key=io_mgr_key,\n retry_policy=retry_policy,\n non_argument_deps=non_argument_deps,\n )(\n _make_dagstermill_asset_compute_fn(\n name=name,\n notebook_path=notebook_path,\n save_notebook_on_failure=save_notebook_on_failure,\n )\n )
\n
", "current_page_name": "_modules/dagstermill/asset_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.asset_factory"}, "context": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.context

\nfrom typing import AbstractSet, Any, Mapping, Optional, cast\n\nfrom dagster import (\n    DagsterRun,\n    JobDefinition,\n    OpDefinition,\n    _check as check,\n)\nfrom dagster._annotations import public\nfrom dagster._core.definitions.dependency import Node, NodeHandle\nfrom dagster._core.execution.context.compute import AbstractComputeExecutionContext\nfrom dagster._core.execution.context.system import PlanExecutionContext, StepExecutionContext\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.system_config.objects import ResolvedRunConfig\n\n\n
[docs]class DagstermillExecutionContext(AbstractComputeExecutionContext):\n """Dagstermill-specific execution context.\n\n Do not initialize directly: use :func:`dagstermill.get_context`.\n """\n\n def __init__(\n self,\n job_context: PlanExecutionContext,\n job_def: JobDefinition,\n resource_keys_to_init: AbstractSet[str],\n op_name: str,\n node_handle: NodeHandle,\n op_config: Any = None,\n ):\n self._job_context = check.inst_param(job_context, "job_context", PlanExecutionContext)\n self._job_def = check.inst_param(job_def, "job_def", JobDefinition)\n self._resource_keys_to_init = check.set_param(\n resource_keys_to_init, "resource_keys_to_init", of_type=str\n )\n self.op_name = check.str_param(op_name, "op_name")\n self.node_handle = check.inst_param(node_handle, "node_handle", NodeHandle)\n self._op_config = op_config\n\n def has_tag(self, key: str) -> bool:\n """Check if a logging tag is defined on the context.\n\n Args:\n key (str): The key to check.\n\n Returns:\n bool\n """\n check.str_param(key, "key")\n return self._job_context.has_tag(key)\n\n def get_tag(self, key: str) -> Optional[str]:\n """Get a logging tag defined on the context.\n\n Args:\n key (str): The key to get.\n\n Returns:\n str\n """\n check.str_param(key, "key")\n return self._job_context.get_tag(key)\n\n @public\n @property\n def run_id(self) -> str:\n """str: The run_id for the context."""\n return self._job_context.run_id\n\n @public\n @property\n def run_config(self) -> Mapping[str, Any]:\n """dict: The run_config for the context."""\n return self._job_context.run_config\n\n @property\n def resolved_run_config(self) -> ResolvedRunConfig:\n """:class:`dagster.ResolvedRunConfig`: The resolved_run_config for the context."""\n return self._job_context.resolved_run_config\n\n @public\n @property\n def logging_tags(self) -> Mapping[str, str]:\n """dict: The logging tags for the context."""\n return self._job_context.logging_tags\n\n @public\n @property\n def job_name(self) -> str:\n """str: The name of the executing job."""\n return self._job_context.job_name\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """:class:`dagster.JobDefinition`: The job definition for the context.\n\n This will be a dagstermill-specific shim.\n """\n return self._job_def\n\n @property\n def resources(self) -> Any:\n """collections.namedtuple: A dynamically-created type whose properties allow access to\n resources.\n """\n return self._job_context.scoped_resources_builder.build(\n required_resource_keys=self._resource_keys_to_init,\n )\n\n @public\n @property\n def run(self) -> DagsterRun:\n """:class:`dagster.DagsterRun`: The job run for the context."""\n return cast(DagsterRun, self._job_context.dagster_run)\n\n @property\n def log(self) -> DagsterLogManager:\n """:class:`dagster.DagsterLogManager`: The log manager for the context.\n\n Call, e.g., ``log.info()`` to log messages through the Dagster machinery.\n """\n return self._job_context.log\n\n @public\n @property\n def op_def(self) -> OpDefinition:\n """:class:`dagster.OpDefinition`: The op definition for the context.\n\n In interactive contexts, this may be a dagstermill-specific shim, depending whether an\n op definition was passed to ``dagstermill.get_context``.\n """\n return cast(OpDefinition, self._job_def.node_def_named(self.op_name))\n\n @property\n def node(self) -> Node:\n """:class:`dagster.Node`: The node for the context.\n\n In interactive contexts, this may be a dagstermill-specific shim, depending whether an\n op definition was passed to ``dagstermill.get_context``.\n """\n return self.job_def.get_node(self.node_handle)\n\n @public\n @property\n def op_config(self) -> Any:\n """collections.namedtuple: A dynamically-created type whose properties allow access to\n op-specific config.\n """\n if self._op_config:\n return self._op_config\n\n op_config = self.resolved_run_config.ops.get(self.op_name)\n return op_config.config if op_config else None
\n\n\nclass DagstermillRuntimeExecutionContext(DagstermillExecutionContext):\n def __init__(\n self,\n job_context: PlanExecutionContext,\n job_def: JobDefinition,\n resource_keys_to_init: AbstractSet[str],\n op_name: str,\n step_context: StepExecutionContext,\n node_handle: NodeHandle,\n op_config: Any = None,\n ):\n self._step_context = check.inst_param(step_context, "step_context", StepExecutionContext)\n super().__init__(\n job_context,\n job_def,\n resource_keys_to_init,\n op_name,\n node_handle,\n op_config,\n )\n\n @property\n def step_context(self) -> StepExecutionContext:\n return self._step_context\n
", "current_page_name": "_modules/dagstermill/context", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.context"}, "errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.errors

\nfrom dagster._core.errors import DagsterError\n\n\n
[docs]class DagstermillError(DagsterError):\n """Base class for errors raised by dagstermill."""
\n
", "current_page_name": "_modules/dagstermill/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.errors"}, "factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.factory

\nimport copy\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport uuid\nfrom typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Set, Type, Union, cast\n\nimport nbformat\nimport papermill\nfrom dagster import (\n    In,\n    OpDefinition,\n    Out,\n    Output,\n    _check as check,\n    _seven,\n)\nfrom dagster._config.pythonic_config import Config, infer_schema_from_config_class\nfrom dagster._config.pythonic_config.type_check_utils import safe_is_subclass\nfrom dagster._core.definitions.events import AssetMaterialization, Failure, RetryRequested\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dagster._core.execution.context.input import build_input_context\nfrom dagster._core.execution.context.system import StepExecutionContext\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._serdes import pack_value\nfrom dagster._seven import get_system_temp_directory\nfrom dagster._utils import mkdir_p, safe_tempfile_path\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom papermill.engines import papermill_engines\nfrom papermill.iorw import load_notebook_node, write_ipynb\n\nfrom .compat import ExecutionError\nfrom .engine import DagstermillEngine\nfrom .errors import DagstermillError\nfrom .translator import DagsterTranslator\n\n\ndef _clean_path_for_windows(notebook_path: str) -> str:\n    """In windows, the notebook can't render in the Dagster UI unless the C: prefix is removed.\n    os.path.splitdrive will split the path into (drive, tail), so just return the tail.\n    """\n    return os.path.splitdrive(notebook_path)[1]\n\n\n# https://github.com/nteract/papermill/blob/17d4bbb3960c30c263bca835e48baf34322a3530/papermill/parameterize.py\ndef _find_first_tagged_cell_index(nb, tag):\n    parameters_indices = []\n    for idx, cell in enumerate(nb.cells):\n        if tag in cell.metadata.tags:\n            parameters_indices.append(idx)\n    if not parameters_indices:\n        return -1\n    return parameters_indices[0]\n\n\n# This is based on papermill.parameterize.parameterize_notebook\n# Typically, papermill injects the injected-parameters cell *below* the parameters cell\n# but we want to *replace* the parameters cell, which is what this function does.\ndef replace_parameters(context, nb, parameters):\n    """Assigned parameters into the appropriate place in the input notebook.\n\n    Args:\n        nb (NotebookNode): Executable notebook object\n        parameters (dict): Arbitrary keyword arguments to pass to the notebook parameters.\n    """\n    check.dict_param(parameters, "parameters")\n\n    # Copy the nb object to avoid polluting the input\n    nb = copy.deepcopy(nb)\n\n    # papermill method chooses translator based on kernel_name and language, but we just call the\n    # DagsterTranslator to generate parameter content based on the kernel_name\n    param_content = DagsterTranslator.codify(parameters)\n\n    newcell = nbformat.v4.new_code_cell(source=param_content)\n    newcell.metadata["tags"] = ["injected-parameters"]\n\n    param_cell_index = _find_first_tagged_cell_index(nb, "parameters")\n    injected_cell_index = _find_first_tagged_cell_index(nb, "injected-parameters")\n    if injected_cell_index >= 0:\n        # Replace the injected cell with a new version\n        before = nb.cells[:injected_cell_index]\n        after = nb.cells[injected_cell_index + 1 :]\n        check.int_value_param(param_cell_index, -1, "param_cell_index")\n        # We should have blown away the parameters cell if there is an injected-parameters cell\n    elif param_cell_index >= 0:\n        # Replace the parameter cell with the injected-parameters cell\n        before = nb.cells[:param_cell_index]\n        after = nb.cells[param_cell_index + 1 :]\n    else:\n        # Inject to the top of the notebook, presumably first cell includes dagstermill import\n        context.log.debug(\n            "Executing notebook with no tagged parameters cell: injecting boilerplate in first "\n            "cell."\n        )\n        before = []\n        after = nb.cells\n\n    nb.cells = before + [newcell] + after\n    nb.metadata.papermill["parameters"] = _seven.json.dumps(parameters)\n\n    return nb\n\n\ndef get_papermill_parameters(\n    step_context: StepExecutionContext,\n    inputs: Mapping[str, object],\n    output_log_path: str,\n    compute_descriptor: str,\n) -> Mapping[str, object]:\n    check.param_invariant(\n        isinstance(step_context.run_config, dict),\n        "step_context",\n        "StepExecutionContext must have valid run_config",\n    )\n\n    run_id = step_context.run_id\n    temp_dir = get_system_temp_directory()\n    marshal_dir = os.path.normpath(os.path.join(temp_dir, "dagstermill", str(run_id), "marshal"))\n    mkdir_p(marshal_dir)\n\n    if not isinstance(step_context.job, ReconstructableJob):\n        if compute_descriptor == "asset":\n            raise DagstermillError(\n                "Can't execute a dagstermill asset that is not reconstructable. "\n                "Use the reconstructable() function if executing from python"\n            )\n        else:\n            raise DagstermillError(\n                "Can't execute a dagstermill op from a job that is not reconstructable. "\n                "Use the reconstructable() function if executing from python"\n            )\n\n    dm_executable_dict = step_context.job.to_dict()\n\n    dm_context_dict = {\n        "output_log_path": output_log_path,\n        "marshal_dir": marshal_dir,\n        "run_config": step_context.run_config,\n    }\n\n    dm_node_handle_kwargs = step_context.node_handle._asdict()\n    dm_step_key = step_context.step.key\n\n    parameters = {}\n\n    parameters["__dm_context"] = dm_context_dict\n    parameters["__dm_executable_dict"] = dm_executable_dict\n    parameters["__dm_pipeline_run_dict"] = pack_value(step_context.dagster_run)\n    parameters["__dm_node_handle_kwargs"] = dm_node_handle_kwargs\n    parameters["__dm_instance_ref_dict"] = pack_value(step_context.instance.get_ref())\n    parameters["__dm_step_key"] = dm_step_key\n    parameters["__dm_input_names"] = list(inputs.keys())\n\n    return parameters\n\n\ndef execute_notebook(\n    step_context: StepExecutionContext,\n    name: str,\n    save_notebook_on_failure: bool,\n    notebook_path: str,\n    output_notebook_dir: str,\n    inputs: Mapping[str, object],\n) -> str:\n    with safe_tempfile_path() as output_log_path:\n        prefix = str(uuid.uuid4())\n        parameterized_notebook_path = os.path.join(output_notebook_dir, f"{prefix}-inter.ipynb")\n\n        executed_notebook_path = os.path.join(output_notebook_dir, f"{prefix}-out.ipynb")\n\n        # Scaffold the registration here\n        nb = load_notebook_node(notebook_path)\n        compute_descriptor = "op"\n        nb_no_parameters = replace_parameters(\n            step_context,\n            nb,\n            get_papermill_parameters(\n                step_context,\n                inputs,\n                output_log_path,\n                compute_descriptor,\n            ),\n        )\n        write_ipynb(nb_no_parameters, parameterized_notebook_path)\n\n        try:\n            papermill_engines.register("dagstermill", DagstermillEngine)\n            papermill.execute_notebook(\n                input_path=parameterized_notebook_path,\n                output_path=executed_notebook_path,\n                engine_name="dagstermill",\n                log_output=True,\n            )\n\n        except Exception as ex:\n            step_context.log.warn(\n                "Error when attempting to materialize executed notebook: {exc}".format(\n                    exc=str(serializable_error_info_from_exc_info(sys.exc_info()))\n                )\n            )\n\n            if isinstance(ex, ExecutionError):\n                exception_name = ex.ename  # type: ignore\n                if exception_name in ["RetryRequested", "Failure"]:\n                    step_context.log.warn(\n                        f"Encountered raised {exception_name} in notebook. Use"\n                        " dagstermill.yield_event with RetryRequested or Failure to trigger"\n                        " their behavior."\n                    )\n\n            if save_notebook_on_failure:\n                storage_dir = step_context.instance.storage_directory()\n                storage_path = os.path.join(storage_dir, f"{prefix}-out.ipynb")\n                with open(storage_path, "wb") as dest_file_obj:\n                    with open(executed_notebook_path, "rb") as obj:\n                        dest_file_obj.write(obj.read())\n\n                step_context.log.info(f"Failed notebook written to {storage_path}")\n\n            raise\n\n    step_context.log.debug(f"Notebook execution complete for {name} at {executed_notebook_path}.")\n\n    return executed_notebook_path\n\n\ndef _handle_events_from_notebook(\n    step_context: StepExecutionContext, executed_notebook_path: str\n) -> Iterable:\n    # deferred import for perf\n    import scrapbook\n\n    output_nb = scrapbook.read_notebook(executed_notebook_path)\n\n    for output_name in step_context.op_def.output_dict.keys():\n        data_dict = output_nb.scraps.data_dict\n        if output_name in data_dict:\n            # read outputs that were passed out of process via io manager from `yield_result`\n            step_output_handle = StepOutputHandle(\n                step_key=step_context.step.key,\n                output_name=output_name,\n            )\n            output_context = step_context.get_output_context(step_output_handle)\n            io_manager = step_context.get_io_manager(step_output_handle)\n            value = io_manager.load_input(\n                build_input_context(\n                    upstream_output=output_context, dagster_type=output_context.dagster_type\n                )\n            )\n\n            yield Output(value, output_name)\n\n    for key, value in output_nb.scraps.items():\n        if key.startswith("event-"):\n            with open(value.data, "rb") as fd:\n                event = pickle.loads(fd.read())\n                if isinstance(event, (Failure, RetryRequested)):\n                    raise event\n                else:\n                    yield event\n\n\ndef _make_dagstermill_compute_fn(\n    dagster_factory_name: str,\n    name: str,\n    notebook_path: str,\n    output_notebook_name: Optional[str] = None,\n    asset_key_prefix: Optional[Sequence[str]] = None,\n    output_notebook: Optional[str] = None,\n    save_notebook_on_failure: bool = False,\n) -> Callable:\n    def _t_fn(op_context: OpExecutionContext, inputs: Mapping[str, object]) -> Iterable:\n        check.param_invariant(\n            isinstance(op_context.run_config, dict),\n            "context",\n            "StepExecutionContext must have valid run_config",\n        )\n\n        step_context = op_context.get_step_execution_context()\n\n        with tempfile.TemporaryDirectory() as output_notebook_dir:\n            executed_notebook_path = execute_notebook(\n                step_context,\n                name=name,\n                inputs=inputs,\n                save_notebook_on_failure=save_notebook_on_failure,\n                notebook_path=notebook_path,\n                output_notebook_dir=output_notebook_dir,\n            )\n\n            if output_notebook_name is not None:\n                # yield output notebook binary stream as an op output\n                with open(executed_notebook_path, "rb") as fd:\n                    yield Output(fd.read(), output_notebook_name)\n\n            else:\n                # backcompat\n                executed_notebook_file_handle = None\n                try:\n                    # use binary mode when when moving the file since certain file_managers such as S3\n                    # may try to hash the contents\n                    with open(executed_notebook_path, "rb") as fd:\n                        executed_notebook_file_handle = op_context.resources.file_manager.write(\n                            fd, mode="wb", ext="ipynb"\n                        )\n                        executed_notebook_materialization_path = (\n                            executed_notebook_file_handle.path_desc\n                        )\n\n                    yield AssetMaterialization(\n                        asset_key=[*(asset_key_prefix or []), f"{name}_output_notebook"],\n                        description="Location of output notebook in file manager",\n                        metadata={\n                            "path": MetadataValue.path(executed_notebook_materialization_path),\n                        },\n                    )\n\n                except Exception:\n                    # if file manager writing errors, e.g. file manager is not provided, we throw a warning\n                    # and fall back to the previously stored temp executed notebook.\n                    op_context.log.warning(\n                        "Error when attempting to materialize executed notebook using file"\n                        " manager:"\n                        f" {serializable_error_info_from_exc_info(sys.exc_info())}\\nNow"\n                        " falling back to local: notebook execution was temporarily materialized"\n                        f" at {executed_notebook_path}\\nIf you have supplied a file manager and"\n                        " expect to use it for materializing the notebook, please include"\n                        ' "file_manager" in the `required_resource_keys` argument to'\n                        f" `{dagster_factory_name}`"\n                    )\n\n                if output_notebook is not None:\n                    yield Output(executed_notebook_file_handle, output_notebook)\n\n            yield from _handle_events_from_notebook(step_context, executed_notebook_path)\n\n    return _t_fn\n\n\n
[docs]def define_dagstermill_op(\n name: str,\n notebook_path: str,\n ins: Optional[Mapping[str, In]] = None,\n outs: Optional[Mapping[str, Out]] = None,\n config_schema: Optional[Union[Any, Mapping[str, Any]]] = None,\n required_resource_keys: Optional[Set[str]] = None,\n output_notebook_name: Optional[str] = None,\n asset_key_prefix: Optional[Union[Sequence[str], str]] = None,\n description: Optional[str] = None,\n tags: Optional[Mapping[str, Any]] = None,\n io_manager_key: Optional[str] = None,\n save_notebook_on_failure: bool = False,\n) -> OpDefinition:\n """Wrap a Jupyter notebook in a op.\n\n Arguments:\n name (str): The name of the op.\n notebook_path (str): Path to the backing notebook.\n ins (Optional[Mapping[str, In]]): The op's inputs.\n outs (Optional[Mapping[str, Out]]): The op's outputs. Your notebook should\n call :py:func:`~dagstermill.yield_result` to yield each of these outputs.\n required_resource_keys (Optional[Set[str]]): The string names of any required resources.\n output_notebook_name: (Optional[str]): If set, will be used as the name of an injected output\n of type of :py:class:`~dagster.BufferedIOBase` that is the file object of the executed\n notebook (in addition to the :py:class:`~dagster.AssetMaterialization` that is always\n created). It allows the downstream ops to access the executed notebook via a file\n object.\n asset_key_prefix (Optional[Union[List[str], str]]): If set, will be used to prefix the\n asset keys for materialized notebooks.\n description (Optional[str]): If set, description used for op.\n tags (Optional[Dict[str, str]]): If set, additional tags used to annotate op.\n Dagster uses the tag keys `notebook_path` and `kind`, which cannot be\n overwritten by the user.\n io_manager_key (Optional[str]): If using output_notebook_name, you can additionally provide\n a string key for the IO manager used to store the output notebook.\n If not provided, the default key output_notebook_io_manager will be used.\n save_notebook_on_failure (bool): If True and the notebook fails during execution, the failed notebook will be\n written to the Dagster storage directory. The location of the file will be printed in the Dagster logs.\n Defaults to False.\n\n Returns:\n :py:class:`~dagster.OpDefinition`\n """\n check.str_param(name, "name")\n check.str_param(notebook_path, "notebook_path")\n check.bool_param(save_notebook_on_failure, "save_notebook_on_failure")\n\n required_resource_keys = set(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n )\n outs = check.opt_mapping_param(outs, "outs", key_type=str, value_type=Out)\n ins = check.opt_mapping_param(ins, "ins", key_type=str, value_type=In)\n\n if output_notebook_name is not None:\n io_mgr_key = check.opt_str_param(\n io_manager_key, "io_manager_key", default="output_notebook_io_manager"\n )\n required_resource_keys.add(io_mgr_key)\n outs = {\n **outs,\n cast(str, output_notebook_name): Out(io_manager_key=io_mgr_key),\n }\n\n if isinstance(asset_key_prefix, str):\n asset_key_prefix = [asset_key_prefix]\n\n asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n default_description = f"This op is backed by the notebook at {notebook_path}"\n description = check.opt_str_param(description, "description", default=default_description)\n\n user_tags = validate_tags(tags)\n if tags is not None:\n check.invariant(\n "notebook_path" not in tags,\n "user-defined op tags contains the `notebook_path` key, but the `notebook_path` key"\n " is reserved for use by Dagster",\n )\n check.invariant(\n "kind" not in tags,\n "user-defined op tags contains the `kind` key, but the `kind` key is reserved for"\n " use by Dagster",\n )\n default_tags = {"notebook_path": _clean_path_for_windows(notebook_path), "kind": "ipynb"}\n\n if safe_is_subclass(config_schema, Config):\n config_schema = infer_schema_from_config_class(cast(Type[Config], config_schema))\n\n return OpDefinition(\n name=name,\n compute_fn=_make_dagstermill_compute_fn(\n "define_dagstermill_op",\n name,\n notebook_path,\n output_notebook_name,\n asset_key_prefix=asset_key_prefix,\n save_notebook_on_failure=save_notebook_on_failure,\n ),\n ins=ins,\n outs=outs,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n description=description,\n tags={**user_tags, **default_tags},\n )
\n
", "current_page_name": "_modules/dagstermill/factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.factory"}, "io_managers": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.io_managers

\nimport os\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster import (\n    AssetKey,\n    AssetMaterialization,\n    ConfigurableIOManagerFactory,\n    InitResourceContext,\n    IOManager,\n)\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager, io_manager\nfrom dagster._utils import mkdir_p\nfrom pydantic import Field\n\nfrom dagstermill.factory import _clean_path_for_windows\n\n\nclass OutputNotebookIOManager(IOManager):\n    def __init__(self, asset_key_prefix: Optional[Sequence[str]] = None):\n        self.asset_key_prefix = asset_key_prefix if asset_key_prefix else []\n\n    def handle_output(self, context: OutputContext, obj: bytes):\n        raise NotImplementedError\n\n    def load_input(self, context: InputContext) -> Any:\n        raise NotImplementedError\n\n\nclass LocalOutputNotebookIOManager(OutputNotebookIOManager):\n    def __init__(self, base_dir: str, asset_key_prefix: Optional[Sequence[str]] = None):\n        super(LocalOutputNotebookIOManager, self).__init__(asset_key_prefix=asset_key_prefix)\n        self.base_dir = base_dir\n        self.write_mode = "wb"\n        self.read_mode = "rb"\n\n    def _get_path(self, context: OutputContext) -> str:\n        """Automatically construct filepath."""\n        if context.has_asset_key:\n            keys = context.get_asset_identifier()\n        else:\n            keys = context.get_run_scoped_output_identifier()\n        return str(Path(self.base_dir, *keys).with_suffix(".ipynb"))\n\n    def handle_output(self, context: OutputContext, obj: bytes):\n        """obj: bytes."""\n        check.inst_param(context, "context", OutputContext)\n\n        # the output notebook itself is stored at output_file_path\n        output_notebook_path = self._get_path(context)\n        mkdir_p(os.path.dirname(output_notebook_path))\n        with open(output_notebook_path, self.write_mode) as dest_file_obj:\n            dest_file_obj.write(obj)\n\n        metadata = {\n            "Executed notebook": MetadataValue.notebook(\n                _clean_path_for_windows(output_notebook_path)\n            )\n        }\n\n        if context.has_asset_key:\n            context.add_output_metadata(metadata)\n        else:\n            context.log_event(\n                AssetMaterialization(\n                    asset_key=AssetKey(\n                        [*self.asset_key_prefix, f"{context.step_key}_output_notebook"]\n                    ),\n                    metadata=metadata,\n                )\n            )\n\n    def load_input(self, context: InputContext) -> bytes:\n        check.inst_param(context, "context", InputContext)\n        # pass output notebook to downstream ops as File Object\n        output_context = check.not_none(context.upstream_output)\n        with open(self._get_path(output_context), self.read_mode) as file_obj:\n            return file_obj.read()\n\n\n
[docs]class ConfigurableLocalOutputNotebookIOManager(ConfigurableIOManagerFactory):\n """Built-in IO Manager for handling output notebook."""\n\n base_dir: Optional[str] = Field(\n default=None,\n description=(\n "Base directory to use for output notebooks. Defaults to the Dagster instance storage"\n " directory if not provided."\n ),\n )\n asset_key_prefix: List[str] = Field(\n default=[],\n description=(\n "Asset key prefix to apply to assets materialized for output notebooks. Defaults to no"\n " prefix."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def create_io_manager(self, context: InitResourceContext) -> "LocalOutputNotebookIOManager":\n return LocalOutputNotebookIOManager(\n base_dir=self.base_dir or check.not_none(context.instance).storage_directory(),\n asset_key_prefix=self.asset_key_prefix,\n )
\n\n\n@dagster_maintained_io_manager\n@io_manager(config_schema=ConfigurableLocalOutputNotebookIOManager.to_config_schema())\ndef local_output_notebook_io_manager(init_context) -> LocalOutputNotebookIOManager:\n """Built-in IO Manager that handles output notebooks."""\n return ConfigurableLocalOutputNotebookIOManager.from_resource_context(init_context)\n
", "current_page_name": "_modules/dagstermill/io_managers", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.io_managers"}, "manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.manager

\nimport os\nimport pickle\nimport uuid\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Mapping, Optional, cast\n\nfrom dagster import (\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    Failure,\n    LoggerDefinition,\n    ResourceDefinition,\n    StepExecutionContext,\n    TypeCheck,\n    _check as check,\n)\nfrom dagster._core.definitions.dependency import NodeHandle\nfrom dagster._core.definitions.events import RetryRequested\nfrom dagster._core.definitions.graph_definition import GraphDefinition\nfrom dagster._core.definitions.job_base import InMemoryJob\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.resource_definition import ScopedResourcesBuilder\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.api import create_execution_plan, scoped_job_context\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._core.execution.plan.plan import ExecutionPlan\nfrom dagster._core.execution.plan.state import KnownExecutionState\nfrom dagster._core.execution.plan.step import ExecutionStep\nfrom dagster._core.execution.resources_init import (\n    get_required_resource_keys_to_init,\n    resource_initialization_event_generator,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.system_config.objects import ResolvedRunConfig, ResourceConfig\nfrom dagster._core.utils import make_new_run_id\nfrom dagster._loggers import colored_console_logger\nfrom dagster._serdes import unpack_value\nfrom dagster._utils import EventGenerationManager\n\nfrom .context import DagstermillExecutionContext, DagstermillRuntimeExecutionContext\nfrom .errors import DagstermillError\nfrom .serialize import PICKLE_PROTOCOL\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.node_definition import NodeDefinition\n\n\nclass DagstermillResourceEventGenerationManager(EventGenerationManager):\n    """Utility class to explicitly manage setup/teardown of resource events. Overrides the default\n    `generate_teardown_events` method so that teardown is deferred until explicitly called by the\n    dagstermill Manager.\n    """\n\n    def generate_teardown_events(self):\n        return iter(())\n\n    def teardown(self):\n        return [\n            teardown_event\n            for teardown_event in super(\n                DagstermillResourceEventGenerationManager, self\n            ).generate_teardown_events()\n        ]\n\n\nclass Manager:\n    def __init__(self):\n        self.job = None\n        self.op_def: Optional[NodeDefinition] = None\n        self.in_job: bool = False\n        self.marshal_dir: Optional[str] = None\n        self.context = None\n        self.resource_manager = None\n\n    def _setup_resources(\n        self,\n        resource_defs: Mapping[str, ResourceDefinition],\n        resource_configs: Mapping[str, ResourceConfig],\n        log_manager: DagsterLogManager,\n        execution_plan: Optional[ExecutionPlan],\n        dagster_run: Optional[DagsterRun],\n        resource_keys_to_init: Optional[AbstractSet[str]],\n        instance: Optional[DagsterInstance],\n        emit_persistent_events: Optional[bool],\n    ):\n        """Drop-in replacement for\n        `dagster._core.execution.resources_init.resource_initialization_manager`.  It uses a\n        `DagstermillResourceEventGenerationManager` and explicitly calls `teardown` on it.\n        """\n        generator = resource_initialization_event_generator(\n            resource_defs=resource_defs,\n            resource_configs=resource_configs,\n            log_manager=log_manager,\n            execution_plan=execution_plan,\n            dagster_run=dagster_run,\n            resource_keys_to_init=resource_keys_to_init,\n            instance=instance,\n            emit_persistent_events=emit_persistent_events,\n        )\n        self.resource_manager = DagstermillResourceEventGenerationManager(\n            generator, ScopedResourcesBuilder\n        )\n        return self.resource_manager\n\n    def reconstitute_job_context(\n        self,\n        executable_dict: Mapping[str, Any],\n        job_run_dict: Mapping[str, Any],\n        node_handle_kwargs: Mapping[str, Any],\n        instance_ref_dict: Mapping[str, Any],\n        step_key: str,\n        output_log_path: Optional[str] = None,\n        marshal_dir: Optional[str] = None,\n        run_config: Optional[Mapping[str, Any]] = None,\n    ):\n        """Reconstitutes a context for dagstermill-managed execution.\n\n        You'll see this function called to reconstruct a job context within the ``injected\n        parameters`` cell of a dagstermill output notebook. Users should not call this function\n        interactively except when debugging output notebooks.\n\n        Use :func:`dagstermill.get_context` in the ``parameters`` cell of your notebook to define a\n        context for interactive exploration and development. This call will be replaced by one to\n        :func:`dagstermill.reconstitute_job_context` when the notebook is executed by\n        dagstermill.\n        """\n        check.opt_str_param(output_log_path, "output_log_path")\n        check.opt_str_param(marshal_dir, "marshal_dir")\n        run_config = check.opt_mapping_param(run_config, "run_config", key_type=str)\n        check.mapping_param(job_run_dict, "job_run_dict")\n        check.mapping_param(executable_dict, "executable_dict")\n        check.mapping_param(node_handle_kwargs, "node_handle_kwargs")\n        check.mapping_param(instance_ref_dict, "instance_ref_dict")\n        check.str_param(step_key, "step_key")\n\n        job = ReconstructableJob.from_dict(executable_dict)\n        job_def = job.get_definition()\n\n        try:\n            instance_ref = unpack_value(instance_ref_dict, InstanceRef)\n            instance = DagsterInstance.from_ref(instance_ref)\n        except Exception as err:\n            raise DagstermillError(\n                "Error when attempting to resolve DagsterInstance from serialized InstanceRef"\n            ) from err\n\n        dagster_run = unpack_value(job_run_dict, DagsterRun)\n\n        node_handle = NodeHandle.from_dict(node_handle_kwargs)\n        op = job_def.get_node(node_handle)\n        op_def = op.definition\n\n        self.marshal_dir = marshal_dir\n        self.in_job = True\n        self.op_def = op_def\n        self.job = job\n\n        ResolvedRunConfig.build(job_def, run_config)\n\n        execution_plan = create_execution_plan(\n            self.job,\n            run_config,\n            step_keys_to_execute=dagster_run.step_keys_to_execute,\n        )\n\n        with scoped_job_context(\n            execution_plan,\n            job,\n            run_config,\n            dagster_run,\n            instance,\n            scoped_resources_builder_cm=self._setup_resources,\n            # Set this flag even though we're not in test for clearer error reporting\n            raise_on_error=True,\n        ) as job_context:\n            known_state = None\n            if dagster_run.parent_run_id:\n                known_state = KnownExecutionState.build_for_reexecution(\n                    instance=instance,\n                    parent_run=check.not_none(instance.get_run_by_id(dagster_run.parent_run_id)),\n                )\n            self.context = DagstermillRuntimeExecutionContext(\n                job_context=job_context,\n                job_def=job_def,\n                op_config=run_config.get("ops", {}).get(op.name, {}).get("config"),\n                resource_keys_to_init=get_required_resource_keys_to_init(\n                    execution_plan,\n                    job_def,\n                ),\n                op_name=op.name,\n                node_handle=node_handle,\n                step_context=cast(\n                    StepExecutionContext,\n                    job_context.for_step(\n                        cast(ExecutionStep, execution_plan.get_step_by_key(step_key)),\n                        known_state=known_state,\n                    ),\n                ),\n            )\n\n        return self.context\n\n    def get_context(\n        self,\n        op_config: Any = None,\n        resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n        logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n        run_config: Optional[dict] = None,\n    ) -> DagstermillExecutionContext:\n        """Get a dagstermill execution context for interactive exploration and development.\n\n        Args:\n            op_config (Optional[Any]): If specified, this value will be made available on the\n                context as its ``op_config`` property.\n            resource_defs (Optional[Mapping[str, ResourceDefinition]]): Specifies resources to provide to context.\n            logger_defs (Optional[Mapping[str, LoggerDefinition]]): Specifies loggers to provide to context.\n            run_config(Optional[dict]): The config dict with which to construct\n                the context.\n\n        Returns:\n            :py:class:`~dagstermill.DagstermillExecutionContext`\n        """\n        run_config = check.opt_dict_param(run_config, "run_config", key_type=str)\n\n        # If we are running non-interactively, and there is already a context reconstituted, return\n        # that context rather than overwriting it.\n        if self.context is not None and isinstance(\n            self.context, DagstermillRuntimeExecutionContext\n        ):\n            return self.context\n\n        if not logger_defs:\n            logger_defs = {"dagstermill": colored_console_logger}\n            run_config["loggers"] = {"dagstermill": {}}\n        logger_defs = check.opt_mapping_param(logger_defs, "logger_defs")\n        resource_defs = check.opt_mapping_param(resource_defs, "resource_defs")\n\n        op_def = OpDefinition(\n            name="this_op",\n            compute_fn=lambda *args, **kwargs: None,\n            description="Ephemeral op constructed by dagstermill.get_context()",\n            required_resource_keys=set(resource_defs.keys()),\n        )\n\n        job_def = JobDefinition(\n            graph_def=GraphDefinition(name="ephemeral_dagstermill_pipeline", node_defs=[op_def]),\n            logger_defs=logger_defs,\n            resource_defs=resource_defs,\n        )\n\n        run_id = make_new_run_id()\n\n        # construct stubbed DagsterRun for notebook exploration...\n        # The actual dagster run during job execution will be serialized and reconstituted\n        # in the `reconstitute_job_context` call\n        dagster_run = DagsterRun(\n            job_name=job_def.name,\n            run_id=run_id,\n            run_config=run_config,\n            step_keys_to_execute=None,\n            status=DagsterRunStatus.NOT_STARTED,\n            tags=None,\n        )\n\n        self.in_job = False\n        self.op_def = op_def\n        self.job = job_def\n\n        job = InMemoryJob(job_def)\n        execution_plan = create_execution_plan(job, run_config)\n\n        with scoped_job_context(\n            execution_plan,\n            job,\n            run_config,\n            dagster_run,\n            DagsterInstance.ephemeral(),\n            scoped_resources_builder_cm=self._setup_resources,\n        ) as job_context:\n            self.context = DagstermillExecutionContext(\n                job_context=job_context,\n                job_def=job_def,\n                op_config=op_config,\n                resource_keys_to_init=get_required_resource_keys_to_init(\n                    execution_plan,\n                    job_def,\n                ),\n                op_name=op_def.name,\n                node_handle=NodeHandle(op_def.name, parent=None),\n            )\n\n        return self.context\n\n    def yield_result(self, value, output_name="result"):\n        """Yield a result directly from notebook code.\n\n        When called interactively or in development, returns its input.\n\n        Args:\n            value (Any): The value to yield.\n            output_name (Optional[str]): The name of the result to yield (default: ``'result'``).\n        """\n        if not self.in_job:\n            return value\n\n        # deferred import for perf\n        import scrapbook\n\n        if not self.op_def.has_output(output_name):\n            raise DagstermillError(\n                f"Op {self.op_def.name} does not have output named {output_name}.Expected one of"\n                f" {[str(output_def.name) for output_def in self.op_def.output_defs]}"\n            )\n\n        # pass output value cross process boundary using io manager\n        step_context = self.context._step_context  # noqa: SLF001\n        # Note: yield_result currently does not support DynamicOutput\n\n        # dagstermill assets do not support yielding additional results within the notebook:\n        if len(step_context.job_def.asset_layer.asset_keys) > 0:\n            raise DagstermillError(\n                "dagstermill assets do not currently support dagstermill.yield_result"\n            )\n\n        step_output_handle = StepOutputHandle(\n            step_key=step_context.step.key, output_name=output_name\n        )\n        output_context = step_context.get_output_context(step_output_handle)\n        io_manager = step_context.get_io_manager(step_output_handle)\n\n        # Note that we assume io manager is symmetric, i.e handle_input(handle_output(X)) == X\n        io_manager.handle_output(output_context, value)\n\n        # record that the output has been yielded\n        scrapbook.glue(output_name, "")\n\n    def yield_event(self, dagster_event):\n        """Yield a dagster event directly from notebook code.\n\n        When called interactively or in development, returns its input.\n\n        Args:\n            dagster_event (Union[:class:`dagster.AssetMaterialization`, :class:`dagster.ExpectationResult`, :class:`dagster.TypeCheck`, :class:`dagster.Failure`, :class:`dagster.RetryRequested`]):\n                An event to yield back to Dagster.\n        """\n        valid_types = (\n            AssetMaterialization,\n            AssetObservation,\n            ExpectationResult,\n            TypeCheck,\n            Failure,\n            RetryRequested,\n        )\n        if not isinstance(dagster_event, valid_types):\n            raise DagstermillError(\n                f"Received invalid type {dagster_event} in yield_event. Expected a Dagster event"\n                f" type, one of {valid_types}."\n            )\n\n        if not self.in_job:\n            return dagster_event\n\n        # deferred import for perf\n        import scrapbook\n\n        event_id = f"event-{uuid.uuid4()}"\n        out_file_path = os.path.join(self.marshal_dir, event_id)\n        with open(out_file_path, "wb") as fd:\n            fd.write(pickle.dumps(dagster_event, PICKLE_PROTOCOL))\n\n        scrapbook.glue(event_id, out_file_path)\n\n    def teardown_resources(self):\n        if self.resource_manager is not None:\n            self.resource_manager.teardown()\n\n    def load_input_parameter(self, input_name: str):\n        # load input from source\n        dm_context = check.not_none(self.context)\n        if not isinstance(dm_context, DagstermillRuntimeExecutionContext):\n            check.failed("Expected DagstermillRuntimeExecutionContext")\n        step_context = dm_context.step_context\n        step_input = step_context.step.step_input_named(input_name)\n        input_def = step_context.op_def.input_def_named(input_name)\n        for event_or_input_value in step_input.source.load_input_object(step_context, input_def):\n            if isinstance(event_or_input_value, DagsterEvent):\n                continue\n            else:\n                return event_or_input_value\n\n\nMANAGER_FOR_NOTEBOOK_INSTANCE = Manager()\n
", "current_page_name": "_modules/dagstermill/manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.manager"}}} \ No newline at end of file +{"": {"dagster_pandera": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandera

\nimport itertools\nimport re\nfrom typing import TYPE_CHECKING, Callable, Sequence, Type, Union\n\nimport dagster._check as check\nimport pandas as pd\nimport pandera as pa\nfrom dagster import (\n    DagsterType,\n    TableColumn,\n    TableColumnConstraints,\n    TableConstraints,\n    TableSchema,\n    TypeCheck,\n    TypeCheckContext,\n)\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.libraries import DagsterLibraryRegistry\n\nfrom .version import __version__\n\n# NOTE: Pandera supports multiple dataframe libraries. Most of the alternatives\n# to pandas implement a pandas-like API wrapper around an underlying library\n# that can handle big data (a weakness of pandas). Typically this means the\n# data is only partly loaded into memory, or is distributed across multiple\n# nodes. Because Dagster types perform runtime validation within a single\n# Python process, it's not clear at present how to interface the more complex\n# validation computations on distributed dataframes with Dagster Types.\n\n# Therefore, for the time being dagster-pandera only supports pandas dataframes.\n# However, some commented-out scaffolding has been left in place for support of\n# alternatives in the future. These sections are marked with "TODO: pending\n# alternative dataframe support".\n\nif TYPE_CHECKING:\n    ValidatableDataFrame = pd.DataFrame\n\nDagsterLibraryRegistry.register("dagster-pandera", __version__)\n\n# ########################\n# ##### VALID DATAFRAME CLASSES\n# ########################\n\n# This layer of indirection is used because we may support alternative dataframe classes in the\n# future.\nVALID_DATAFRAME_CLASSES = (pd.DataFrame,)\n\n\n# ########################\n# ##### PANDERA SCHEMA TO DAGSTER TYPE\n# ########################\n\n\n
[docs]def pandera_schema_to_dagster_type(\n schema: Union[pa.DataFrameSchema, Type[pa.SchemaModel]],\n) -> DagsterType:\n """Convert a Pandera dataframe schema to a `DagsterType`.\n\n The generated Dagster type will be given an automatically generated `name`. The schema's `title`\n property, `name` property, or class name (in that order) will be used. If neither `title` or\n `name` is defined, a name of the form `DagsterPanderaDataframe<n>` is generated.\n\n Additional metadata is also extracted from the Pandera schema and attached to the returned\n `DagsterType` as a metadata dictionary. The extracted metadata includes:\n\n - Descriptions on the schema and constituent columns and checks.\n - Data types for each column.\n - String representations of all column-wise checks.\n - String representations of all row-wise (i.e. "wide") checks.\n\n The returned `DagsterType` type will call the Pandera schema's `validate()` method in its type\n check function. Validation is done in `lazy` mode, i.e. pandera will attempt to validate all\n values in the dataframe, rather than stopping on the first error.\n\n If validation fails, the returned `TypeCheck` object will contain two pieces of metadata:\n\n - `num_failures` total number of validation errors.\n - `failure_sample` a table containing up to the first 10 validation errors.\n\n Args:\n schema (Union[pa.DataFrameSchema, Type[pa.SchemaModel]]):\n\n Returns:\n DagsterType: Dagster Type constructed from the Pandera schema.\n\n """\n if not (\n isinstance(schema, pa.DataFrameSchema)\n or (isinstance(schema, type) and issubclass(schema, pa.SchemaModel))\n ):\n raise TypeError(\n "schema must be a pandera `DataFrameSchema` or a subclass of a pandera `SchemaModel`"\n )\n\n name = _extract_name_from_pandera_schema(schema)\n norm_schema = (\n schema.to_schema()\n if isinstance(schema, type) and issubclass(schema, pa.SchemaModel)\n else schema\n )\n tschema = _pandera_schema_to_table_schema(norm_schema)\n type_check_fn = _pandera_schema_to_type_check_fn(norm_schema, tschema)\n\n return DagsterType(\n type_check_fn=type_check_fn,\n name=name,\n description=norm_schema.description,\n metadata={\n "schema": MetadataValue.table_schema(tschema),\n },\n typing_type=pd.DataFrame,\n )
\n\n\n# call next() on this to generate next unique Dagster Type name for anonymous schemas\n_anonymous_schema_name_generator = (f"DagsterPanderaDataframe{i}" for i in itertools.count(start=1))\n\n\ndef _extract_name_from_pandera_schema(\n schema: Union[pa.DataFrameSchema, Type[pa.SchemaModel]],\n) -> str:\n if isinstance(schema, type) and issubclass(schema, pa.SchemaModel):\n return (\n getattr(schema.Config, "title", None)\n or getattr(schema.Config, "name", None)\n or schema.__name__\n )\n elif isinstance(schema, pa.DataFrameSchema):\n return schema.title or schema.name or next(_anonymous_schema_name_generator)\n\n\ndef _pandera_schema_to_type_check_fn(\n schema: pa.DataFrameSchema,\n table_schema: TableSchema,\n) -> Callable[[TypeCheckContext, object], TypeCheck]:\n def type_check_fn(_context, value: object) -> TypeCheck:\n if isinstance(value, VALID_DATAFRAME_CLASSES):\n try:\n # `lazy` instructs pandera to capture every (not just the first) validation error\n schema.validate(value, lazy=True)\n except pa.errors.SchemaErrors as e:\n return _pandera_errors_to_type_check(e, table_schema)\n except Exception as e:\n return TypeCheck(\n success=False,\n description=f"Unexpected error during validation: {e}",\n )\n else:\n return TypeCheck(\n success=False,\n description=(\n f"Must be one of {VALID_DATAFRAME_CLASSES}, not {type(value).__name__}."\n ),\n )\n\n return TypeCheck(success=True)\n\n return type_check_fn\n\n\nPANDERA_FAILURE_CASES_SCHEMA = TableSchema(\n columns=[\n TableColumn(\n name="schema_context",\n type="string",\n description="`Column` for column-wise checks, or `DataFrameSchema`",\n ),\n TableColumn(\n name="column",\n type="string",\n description="Column of value that failed the check, or `None` for wide checks.",\n ),\n TableColumn(\n name="check", type="string", description="Description of the failed Pandera check."\n ),\n TableColumn(name="check_number", description="Index of the failed check."),\n TableColumn(\n name="failure_case", type="number | string", description="Value that failed a check."\n ),\n TableColumn(\n name="index",\n type="number | string",\n description="Index (row) of value that failed a check.",\n ),\n ]\n)\n\n\ndef _pandera_errors_to_type_check(\n error: pa.errors.SchemaErrors, _table_schema: TableSchema\n) -> TypeCheck:\n return TypeCheck(\n success=False,\n description=str(error),\n )\n\n\ndef _pandera_schema_to_table_schema(schema: pa.DataFrameSchema) -> TableSchema:\n df_constraints = _pandera_schema_wide_checks_to_table_constraints(schema.checks)\n columns = [_pandera_column_to_table_column(col) for k, col in schema.columns.items()]\n return TableSchema(columns=columns, constraints=df_constraints)\n\n\ndef _pandera_schema_wide_checks_to_table_constraints(\n checks: Sequence[Union[pa.Check, pa.Hypothesis]]\n) -> TableConstraints:\n return TableConstraints(other=[_pandera_check_to_table_constraint(check) for check in checks])\n\n\ndef _pandera_check_to_table_constraint(pa_check: Union[pa.Check, pa.Hypothesis]) -> str:\n return _get_pandera_check_identifier(pa_check)\n\n\ndef _pandera_column_to_table_column(pa_column: pa.Column) -> TableColumn:\n constraints = TableColumnConstraints(\n nullable=pa_column.nullable,\n unique=pa_column.unique,\n other=[_pandera_check_to_column_constraint(pa_check) for pa_check in pa_column.checks],\n )\n name = check.not_none(pa_column.name, "name")\n name = name if isinstance(name, str) else "/".join(name)\n return TableColumn(\n name=name,\n type=str(pa_column.dtype),\n description=pa_column.description,\n constraints=constraints,\n )\n\n\nCHECK_OPERATORS = {\n "equal_to": "==",\n "not_equal_to": "!=",\n "less_than": "<",\n "less_than_or_equal_to": "<=",\n "greater_than": ">",\n "greater_than_or_equal_to": ">=",\n}\n\n\ndef _extract_operand(error_str: str) -> str:\n match = re.search(r"(?<=\\().+(?=\\))", error_str)\n return match.group(0) if match else ""\n\n\ndef _pandera_check_to_column_constraint(pa_check: pa.Check) -> str:\n if pa_check.description:\n return pa_check.description\n elif pa_check.name in CHECK_OPERATORS:\n assert isinstance(\n pa_check.error, str\n ), "Expected pandera check to have string `error` attr."\n return f"{CHECK_OPERATORS[pa_check.name]} {_extract_operand(pa_check.error)}"\n else:\n return _get_pandera_check_identifier(pa_check)\n\n\ndef _get_pandera_check_identifier(pa_check: Union[pa.Check, pa.Hypothesis]) -> str:\n return pa_check.description or pa_check.error or pa_check.name or str(pa_check)\n\n\n__all__ = [\n "pandera_schema_to_dagster_type",\n]\n
", "current_page_name": "_modules/dagster_pandera", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandera"}, "dagster_pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pipes

\nimport base64\nimport datetime\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport warnings\nimport zlib\nfrom abc import ABC, abstractmethod\nfrom contextlib import ExitStack, contextmanager\nfrom io import StringIO\nfrom queue import Queue\nfrom threading import Event, Thread\nfrom typing import (\n    IO,\n    TYPE_CHECKING,\n    Any,\n    ClassVar,\n    Dict,\n    Generic,\n    Iterable,\n    Iterator,\n    Literal,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    TextIO,\n    Type,\n    TypedDict,\n    TypeVar,\n    Union,\n    cast,\n    get_args,\n)\n\nif TYPE_CHECKING:\n    from unittest.mock import MagicMock\n\n# ########################\n# ##### PROTOCOL\n# ########################\n\n# This represents the version of the protocol, rather than the version of the package. It must be\n# manually updated whenever there are changes to the protocol.\nPIPES_PROTOCOL_VERSION = "0.1"\n\nPipesExtras = Mapping[str, Any]\nPipesParams = Mapping[str, Any]\n\n\n# ##### MESSAGE\n\n\ndef _make_message(method: str, params: Optional[Mapping[str, Any]]) -> "PipesMessage":\n    return {\n        PIPES_PROTOCOL_VERSION_FIELD: PIPES_PROTOCOL_VERSION,\n        "method": method,\n        "params": params,\n    }\n\n\n# Can't use a constant for TypedDict key so this value is repeated in `ExtMessage` defn.\nPIPES_PROTOCOL_VERSION_FIELD = "__dagster_pipes_version"\n\n\nclass PipesMessage(TypedDict):\n    """A message sent from the external process to the orchestration process."""\n\n    __dagster_pipes_version: str\n    method: str\n    params: Optional[Mapping[str, Any]]\n\n\n###### PIPES CONTEXT\n\n\nclass PipesContextData(TypedDict):\n    """The serializable data passed from the orchestration process to the external process. This gets\n    wrapped in a :py:class:`PipesContext`.\n    """\n\n    asset_keys: Optional[Sequence[str]]\n    code_version_by_asset_key: Optional[Mapping[str, Optional[str]]]\n    provenance_by_asset_key: Optional[Mapping[str, Optional["PipesDataProvenance"]]]\n    partition_key: Optional[str]\n    partition_key_range: Optional["PipesPartitionKeyRange"]\n    partition_time_window: Optional["PipesTimeWindow"]\n    run_id: str\n    job_name: Optional[str]\n    retry_number: int\n    extras: Mapping[str, Any]\n\n\nclass PipesPartitionKeyRange(TypedDict):\n    """A range of partition keys."""\n\n    start: str\n    end: str\n\n\nclass PipesTimeWindow(TypedDict):\n    """A span of time delimited by a start and end timestamp. This is defined for time-based partitioning schemes."""\n\n    start: str  # timestamp\n    end: str  # timestamp\n\n\nclass PipesDataProvenance(TypedDict):\n    """Provenance information for an asset."""\n\n    code_version: str\n    input_data_versions: Mapping[str, str]\n    is_user_provided: bool\n\n\nPipesAssetCheckSeverity = Literal["WARN", "ERROR"]\n\nPipesMetadataRawValue = Union[int, float, str, Mapping[str, Any], Sequence[Any], bool, None]\n\n\nclass PipesMetadataValue(TypedDict):\n    type: "PipesMetadataType"\n    raw_value: PipesMetadataRawValue\n\n\n# Infer the type from the raw value on the orchestration end\nPIPES_METADATA_TYPE_INFER = "__infer__"\n\nPipesMetadataType = Literal[\n    "__infer__",\n    "text",\n    "url",\n    "path",\n    "notebook",\n    "json",\n    "md",\n    "float",\n    "int",\n    "bool",\n    "dagster_run",\n    "asset",\n    "null",\n]\n\n# ########################\n# ##### UTIL\n# ########################\n\n_T = TypeVar("_T")\n\n\n
[docs]class DagsterPipesError(Exception):\n pass
\n\n\n
[docs]class DagsterPipesWarning(Warning):\n pass
\n\n\ndef _assert_not_none(value: Optional[_T], desc: Optional[str] = None) -> _T:\n if value is None:\n raise DagsterPipesError(f"Missing required property: {desc}")\n return value\n\n\ndef _assert_defined_asset_property(value: Optional[_T], key: str) -> _T:\n return _assert_not_none(value, f"`{key}` is undefined. Current step does not target an asset.")\n\n\n# This should only be called under the precondition that the current step targets assets.\ndef _assert_single_asset(data: PipesContextData, key: str) -> None:\n asset_keys = data["asset_keys"]\n assert asset_keys is not None\n if len(asset_keys) != 1:\n raise DagsterPipesError(f"`{key}` is undefined. Current step targets multiple assets.")\n\n\ndef _resolve_optionally_passed_asset_key(\n data: PipesContextData,\n asset_key: Optional[str],\n method: str,\n) -> str:\n asset_key = _assert_opt_param_type(asset_key, str, method, "asset_key")\n\n defined_asset_keys = data["asset_keys"]\n if defined_asset_keys:\n if asset_key and asset_key not in defined_asset_keys:\n raise DagsterPipesError(\n f"Invalid asset key. Expected one of `{defined_asset_keys}`, got `{asset_key}`."\n )\n if not asset_key:\n if len(defined_asset_keys) != 1:\n raise DagsterPipesError(\n f"Calling `{method}` without passing an asset key is undefined. Current step"\n " targets multiple assets."\n )\n asset_key = defined_asset_keys[0]\n\n if not asset_key:\n raise DagsterPipesError(\n f"Calling `{method}` without passing an asset key is undefined. Current step"\n " does not target a specific asset."\n )\n\n return asset_key\n\n\ndef _assert_defined_partition_property(value: Optional[_T], key: str) -> _T:\n return _assert_not_none(\n value, f"`{key}` is undefined. Current step does not target any partitions."\n )\n\n\n# This should only be called under the precondition that the current steps targets assets.\ndef _assert_single_partition(data: PipesContextData, key: str) -> None:\n partition_key_range = data["partition_key_range"]\n assert partition_key_range is not None\n if partition_key_range["start"] != partition_key_range["end"]:\n raise DagsterPipesError(f"`{key}` is undefined. Current step targets multiple partitions.")\n\n\ndef _assert_defined_extra(extras: PipesExtras, key: str) -> Any:\n if key not in extras:\n raise DagsterPipesError(f"Extra `{key}` is undefined. Extras must be provided by user.")\n return extras[key]\n\n\ndef _assert_param_type(value: _T, expected_type: Any, method: str, param: str) -> _T:\n if not isinstance(value, expected_type):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected `{expected_type}`, got"\n f" `{type(value)}`."\n )\n return value\n\n\ndef _assert_opt_param_type(value: _T, expected_type: Any, method: str, param: str) -> _T:\n if not (isinstance(value, expected_type) or value is None):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected"\n f" `Optional[{expected_type}]`, got `{type(value)}`."\n )\n return value\n\n\ndef _assert_env_param_type(\n env_params: PipesParams, key: str, expected_type: Type[_T], cls: Type\n) -> _T:\n value = env_params.get(key)\n if not isinstance(value, expected_type):\n raise DagsterPipesError(\n f"Invalid type for parameter `{key}` passed from orchestration side to"\n f" `{cls.__name__}`. Expected `{expected_type}`, got `{type(value)}`."\n )\n return value\n\n\ndef _assert_opt_env_param_type(\n env_params: PipesParams, key: str, expected_type: Type[_T], cls: Type\n) -> Optional[_T]:\n value = env_params.get(key)\n if value is not None and not isinstance(value, expected_type):\n raise DagsterPipesError(\n f"Invalid type for parameter `{key}` passed from orchestration side to"\n f" `{cls.__name__}`. Expected `Optional[{expected_type}]`, got `{type(value)}`."\n )\n return value\n\n\ndef _assert_param_value(value: _T, expected_values: Iterable[_T], method: str, param: str) -> _T:\n if value not in expected_values:\n raise DagsterPipesError(\n f"Invalid value for parameter `{param}` of `{method}`. Expected one of"\n f" `{expected_values}`, got `{value}`."\n )\n return value\n\n\ndef _assert_opt_param_value(\n value: _T, expected_values: Sequence[_T], method: str, param: str\n) -> _T:\n if value is not None and value not in expected_values:\n raise DagsterPipesError(\n f"Invalid value for parameter `{param}` of `{method}`. Expected one of"\n f" `{expected_values}`, got `{value}`."\n )\n return value\n\n\ndef _json_serialize_param(value: Any, method: str, param: str) -> str:\n try:\n serialized = json.dumps(value)\n except (TypeError, OverflowError):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected a JSON-serializable"\n f" type, got `{type(value)}`."\n )\n return serialized\n\n\n_METADATA_VALUE_KEYS = frozenset(PipesMetadataValue.__annotations__.keys())\n_METADATA_TYPES = frozenset(get_args(PipesMetadataType))\n\n\ndef _normalize_param_metadata(\n metadata: Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]],\n method: str,\n param: str,\n) -> Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]:\n _assert_param_type(metadata, dict, method, param)\n new_metadata: Dict[str, PipesMetadataValue] = {}\n for key, value in metadata.items():\n if not isinstance(key, str):\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected a dict with string"\n f" keys, got a key `{key}` of type `{type(key)}`."\n )\n elif isinstance(value, dict):\n if not {*value.keys()} == _METADATA_VALUE_KEYS:\n raise DagsterPipesError(\n f"Invalid type for parameter `{param}` of `{method}`. Expected a dict with"\n " string keys and values that are either raw metadata values or dictionaries"\n f" with schema `{{raw_value: ..., type: ...}}`. Got a value `{value}`."\n )\n _assert_param_value(value["type"], _METADATA_TYPES, method, f"{param}.{key}.type")\n new_metadata[key] = cast(PipesMetadataValue, value)\n else:\n new_metadata[key] = {"raw_value": value, "type": PIPES_METADATA_TYPE_INFER}\n return new_metadata\n\n\ndef _param_from_env_var(env_var: str) -> Any:\n raw_value = os.environ.get(env_var)\n return decode_env_var(raw_value) if raw_value is not None else None\n\n\n
[docs]def encode_env_var(value: Any) -> str:\n """Encode value by serializing to JSON, compressing with zlib, and finally encoding with base64.\n `base64_encode(compress(to_json(value)))` in function notation.\n\n Args:\n value (Any): The value to encode. Must be JSON-serializable.\n\n Returns:\n str: The encoded value.\n """\n serialized = _json_serialize_param(value, "encode_env_var", "value")\n compressed = zlib.compress(serialized.encode("utf-8"))\n encoded = base64.b64encode(compressed)\n return encoded.decode("utf-8") # as string
\n\n\n
[docs]def decode_env_var(value: str) -> Any:\n """Decode a value by decoding from base64, decompressing with zlib, and finally deserializing from\n JSON. `from_json(decompress(base64_decode(value)))` in function notation.\n\n Args:\n value (Any): The value to decode.\n\n Returns:\n Any: The decoded value.\n """\n decoded = base64.b64decode(value)\n decompressed = zlib.decompress(decoded)\n return json.loads(decompressed.decode("utf-8"))
\n\n\ndef _emit_orchestration_inactive_warning() -> None:\n warnings.warn(\n "This process was not launched by a Dagster orchestration process. All calls to the"\n " `dagster-pipes` context or attempts to initialize `dagster-pipes` abstractions"\n " are no-ops.",\n category=DagsterPipesWarning,\n )\n\n\ndef _get_mock() -> "MagicMock":\n from unittest.mock import MagicMock\n\n return MagicMock()\n\n\nclass _PipesLogger(logging.Logger):\n def __init__(self, context: "PipesContext") -> None:\n super().__init__(name="dagster-pipes")\n self.addHandler(_PipesLoggerHandler(context))\n\n\nclass _PipesLoggerHandler(logging.Handler):\n def __init__(self, context: "PipesContext") -> None:\n super().__init__()\n self._context = context\n\n def emit(self, record: logging.LogRecord) -> None:\n self._context._write_message( # noqa: SLF001\n "log", {"message": record.getMessage(), "level": record.levelname}\n )\n\n\n# ########################\n# ##### IO - BASE\n# ########################\n\n\n
[docs]class PipesContextLoader(ABC):\n
[docs] @abstractmethod\n @contextmanager\n def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:\n """A `@contextmanager` that loads context data injected by the orchestration process.\n\n This method should read and yield the context data from the location specified by the passed in\n `PipesParams`.\n\n Args:\n params (PipesParams): The params provided by the context injector in the orchestration\n process.\n\n Yields:\n PipesContextData: The context data.\n """
\n\n\nT_MessageChannel = TypeVar("T_MessageChannel", bound="PipesMessageWriterChannel")\n\n\n
[docs]class PipesMessageWriter(ABC, Generic[T_MessageChannel]):\n
[docs] @abstractmethod\n @contextmanager\n def open(self, params: PipesParams) -> Iterator[T_MessageChannel]:\n """A `@contextmanager` that initializes a channel for writing messages back to Dagster.\n\n This method should takes the params passed by the orchestration-side\n :py:class:`PipesMessageReader` and use them to construct and yield a\n :py:class:`PipesMessageWriterChannel`.\n\n Args:\n params (PipesParams): The params provided by the message reader in the orchestration\n process.\n\n Yields:\n PipesMessageWriterChannel: Channel for writing messagse back to Dagster.\n """
\n\n\n
[docs]class PipesMessageWriterChannel(ABC, Generic[T_MessageChannel]):\n """Object that writes messages back to the Dagster orchestration process."""\n\n
[docs] @abstractmethod\n def write_message(self, message: PipesMessage) -> None:\n """Write a message to the orchestration process.\n\n Args:\n message (PipesMessage): The message to write.\n """
\n\n\n
[docs]class PipesParamsLoader(ABC):\n """Object that loads params passed from the orchestration process by the context injector and\n message reader. These params are used to respectively bootstrap the\n :py:class:`PipesContextLoader` and :py:class:`PipesMessageWriter`.\n """\n\n
[docs] @abstractmethod\n def is_dagster_pipes_process(self) -> bool:\n """Whether or not this process has been provided with provided with information to create\n a PipesContext or should instead return a mock.\n """
\n\n
[docs] @abstractmethod\n def load_context_params(self) -> PipesParams:\n """PipesParams: Load params passed by the orchestration-side context injector."""
\n\n
[docs] @abstractmethod\n def load_messages_params(self) -> PipesParams:\n """PipesParams: Load params passed by the orchestration-side message reader."""
\n\n\nT_BlobStoreMessageWriterChannel = TypeVar(\n "T_BlobStoreMessageWriterChannel", bound="PipesBlobStoreMessageWriterChannel"\n)\n\n\n
[docs]class PipesBlobStoreMessageWriter(PipesMessageWriter[T_BlobStoreMessageWriterChannel]):\n """Message writer channel that periodically uploads message chunks to some blob store endpoint."""\n\n def __init__(self, *, interval: float = 10):\n self.interval = interval\n\n
[docs] @contextmanager\n def open(self, params: PipesParams) -> Iterator[T_BlobStoreMessageWriterChannel]:\n """Construct and yield a :py:class:`PipesBlobStoreMessageWriterChannel`.\n\n Args:\n params (PipesParams): The params provided by the message reader in the orchestration\n process.\n\n Yields:\n PipesBlobStoreMessageWriterChannel: Channel that periodically uploads message chunks to\n a blob store.\n """\n channel = self.make_channel(params)\n with channel.buffered_upload_loop():\n yield channel
\n\n
[docs] @abstractmethod\n def make_channel(self, params: PipesParams) -> T_BlobStoreMessageWriterChannel: ...
\n\n\n
[docs]class PipesBlobStoreMessageWriterChannel(PipesMessageWriterChannel):\n """Message writer channel that periodically uploads message chunks to some blob store endpoint."""\n\n def __init__(self, *, interval: float = 10):\n self._interval = interval\n self._buffer: Queue[PipesMessage] = Queue()\n self._counter = 1\n\n
[docs] def write_message(self, message: PipesMessage) -> None:\n self._buffer.put(message)
\n\n
[docs] def flush_messages(self) -> Sequence[PipesMessage]:\n items = []\n while not self._buffer.empty():\n items.append(self._buffer.get())\n return items
\n\n
[docs] @abstractmethod\n def upload_messages_chunk(self, payload: StringIO, index: int) -> None: ...
\n\n
[docs] @contextmanager\n def buffered_upload_loop(self) -> Iterator[None]:\n thread = None\n is_task_complete = Event()\n try:\n thread = Thread(target=self._upload_loop, args=(is_task_complete,), daemon=True)\n thread.start()\n yield\n finally:\n is_task_complete.set()\n if thread:\n thread.join(timeout=60)
\n\n def _upload_loop(self, is_task_complete: Event) -> None:\n start_or_last_upload = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n if self._buffer.empty() and is_task_complete.is_set():\n break\n elif is_task_complete.is_set() or (now - start_or_last_upload).seconds > self._interval:\n payload = "\\n".join([json.dumps(message) for message in self.flush_messages()])\n if len(payload) > 0:\n self.upload_messages_chunk(StringIO(payload), self._counter)\n start_or_last_upload = now\n self._counter += 1\n time.sleep(1)
\n\n\n
[docs]class PipesBufferedFilesystemMessageWriterChannel(PipesBlobStoreMessageWriterChannel):\n """Message writer channel that periodically writes message chunks to an endpoint mounted on the filesystem.\n\n Args:\n interval (float): interval in seconds between chunk uploads\n """\n\n def __init__(self, path: str, *, interval: float = 10):\n super().__init__(interval=interval)\n self._path = path\n\n
[docs] def upload_messages_chunk(self, payload: IO, index: int) -> None:\n message_path = os.path.join(self._path, f"{index}.json")\n with open(message_path, "w") as f:\n f.write(payload.read())
\n\n\n# ########################\n# ##### IO - DEFAULT\n# ########################\n\n\n
[docs]class PipesDefaultContextLoader(PipesContextLoader):\n """Context loader that loads context data from either a file or directly from the provided params.\n\n The location of the context data is configured by the params received by the loader. If the params\n include a key `path`, then the context data will be loaded from a file at the specified path. If\n the params instead include a key `data`, then the corresponding value should be a dict\n representing the context data.\n """\n\n FILE_PATH_KEY = "path"\n DIRECT_KEY = "data"\n\n
[docs] @contextmanager\n def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:\n if self.FILE_PATH_KEY in params:\n path = _assert_env_param_type(params, self.FILE_PATH_KEY, str, self.__class__)\n with open(path, "r") as f:\n data = json.load(f)\n yield data\n elif self.DIRECT_KEY in params:\n data = _assert_env_param_type(params, self.DIRECT_KEY, dict, self.__class__)\n yield cast(PipesContextData, data)\n else:\n raise DagsterPipesError(\n f'Invalid params for {self.__class__.__name__}, expected key "{self.FILE_PATH_KEY}"'\n f' or "{self.DIRECT_KEY}", received {params}',\n )
\n\n\n
[docs]class PipesDefaultMessageWriter(PipesMessageWriter):\n """Message writer that writes messages to either a file or the stdout or stderr stream.\n\n The write location is configured by the params received by the writer. If the params include a\n key `path`, then messages will be written to a file at the specified path. If the params instead\n include a key `stdio`, then messages then the corresponding value must specify either `stderr`\n or `stdout`, and messages will be written to the selected stream.\n """\n\n FILE_PATH_KEY = "path"\n STDIO_KEY = "stdio"\n STDERR = "stderr"\n STDOUT = "stdout"\n\n
[docs] @contextmanager\n def open(self, params: PipesParams) -> Iterator[PipesMessageWriterChannel]:\n if self.FILE_PATH_KEY in params:\n path = _assert_env_param_type(params, self.FILE_PATH_KEY, str, self.__class__)\n yield PipesFileMessageWriterChannel(path)\n elif self.STDIO_KEY in params:\n stream = _assert_env_param_type(params, self.STDIO_KEY, str, self.__class__)\n if stream == self.STDERR:\n yield PipesStreamMessageWriterChannel(sys.stderr)\n elif stream == self.STDOUT:\n yield PipesStreamMessageWriterChannel(sys.stdout)\n else:\n raise DagsterPipesError(\n f'Invalid value for key "std", expected "{self.STDERR}" or "{self.STDOUT}" but'\n f" received {stream}"\n )\n else:\n raise DagsterPipesError(\n f'Invalid params for {self.__class__.__name__}, expected key "path" or "std",'\n f" received {params}"\n )
\n\n\n
[docs]class PipesFileMessageWriterChannel(PipesMessageWriterChannel):\n """Message writer channel that writes one message per line to a file."""\n\n def __init__(self, path: str):\n self._path = path\n\n
[docs] def write_message(self, message: PipesMessage) -> None:\n with open(self._path, "a") as f:\n f.write(json.dumps(message) + "\\n")
\n\n\n
[docs]class PipesStreamMessageWriterChannel(PipesMessageWriterChannel):\n """Message writer channel that writes one message per line to a `TextIO` stream."""\n\n def __init__(self, stream: TextIO):\n self._stream = stream\n\n
[docs] def write_message(self, message: PipesMessage) -> None:\n self._stream.writelines((json.dumps(message), "\\n"))
\n\n\nDAGSTER_PIPES_CONTEXT_ENV_VAR = "DAGSTER_PIPES_CONTEXT"\nDAGSTER_PIPES_MESSAGES_ENV_VAR = "DAGSTER_PIPES_MESSAGES"\n\n\n
[docs]class PipesEnvVarParamsLoader(PipesParamsLoader):\n """Params loader that extracts params from environment variables."""\n\n
[docs] def is_dagster_pipes_process(self) -> bool:\n # use the presence of DAGSTER_PIPES_CONTEXT to discern if we are in a pipes process\n return DAGSTER_PIPES_CONTEXT_ENV_VAR in os.environ
\n\n
[docs] def load_context_params(self) -> PipesParams:\n return _param_from_env_var(DAGSTER_PIPES_CONTEXT_ENV_VAR)
\n\n
[docs] def load_messages_params(self) -> PipesParams:\n return _param_from_env_var(DAGSTER_PIPES_MESSAGES_ENV_VAR)
\n\n\n# ########################\n# ##### IO - S3\n# ########################\n\n\n
[docs]class PipesS3MessageWriter(PipesBlobStoreMessageWriter):\n """Message writer that writes messages by periodically writing message chunks to an S3 bucket.\n\n Args:\n client (Any): A boto3.client("s3") object.\n interval (float): interval in seconds between upload chunk uploads\n """\n\n # client is a boto3.client("s3") object\n def __init__(self, client: Any, *, interval: float = 10):\n super().__init__(interval=interval)\n # Not checking client type for now because it's a boto3.client object and we don't want to\n # depend on boto3.\n self._client = client\n\n
[docs] def make_channel(\n self,\n params: PipesParams,\n ) -> "PipesS3MessageWriterChannel":\n bucket = _assert_env_param_type(params, "bucket", str, self.__class__)\n key_prefix = _assert_opt_env_param_type(params, "key_prefix", str, self.__class__)\n return PipesS3MessageWriterChannel(\n client=self._client,\n bucket=bucket,\n key_prefix=key_prefix,\n interval=self.interval,\n )
\n\n\n
[docs]class PipesS3MessageWriterChannel(PipesBlobStoreMessageWriterChannel):\n """Message writer channel for writing messages by periodically writing message chunks to an S3 bucket.\n\n Args:\n client (Any): A boto3.client("s3") object.\n bucket (str): The name of the S3 bucket to write to.\n key_prefix (Optional[str]): An optional prefix to use for the keys of written blobs.\n interval (float): interval in seconds between upload chunk uploads\n """\n\n # client is a boto3.client("s3") object\n def __init__(\n self, client: Any, bucket: str, key_prefix: Optional[str], *, interval: float = 10\n ):\n super().__init__(interval=interval)\n self._client = client\n self._bucket = bucket\n self._key_prefix = key_prefix\n\n
[docs] def upload_messages_chunk(self, payload: IO, index: int) -> None:\n key = f"{self._key_prefix}/{index}.json" if self._key_prefix else f"{index}.json"\n self._client.put_object(\n Body=payload.read(),\n Bucket=self._bucket,\n Key=key,\n )
\n\n\n# ########################\n# ##### IO - DBFS\n# ########################\n\n\n
[docs]class PipesDbfsContextLoader(PipesContextLoader):\n """Context loader that reads context from a JSON file on DBFS."""\n\n
[docs] @contextmanager\n def load_context(self, params: PipesParams) -> Iterator[PipesContextData]:\n unmounted_path = _assert_env_param_type(params, "path", str, self.__class__)\n path = os.path.join("/dbfs", unmounted_path.lstrip("/"))\n with open(path, "r") as f:\n yield json.load(f)
\n\n\n
[docs]class PipesDbfsMessageWriter(PipesBlobStoreMessageWriter):\n """Message writer that writes messages by periodically writing message chunks to a directory on DBFS."""\n\n
[docs] def make_channel(\n self,\n params: PipesParams,\n ) -> "PipesBufferedFilesystemMessageWriterChannel":\n unmounted_path = _assert_env_param_type(params, "path", str, self.__class__)\n return PipesBufferedFilesystemMessageWriterChannel(\n path=os.path.join("/dbfs", unmounted_path.lstrip("/")),\n interval=self.interval,\n )
\n\n\n# ########################\n# ##### CONTEXT\n# ########################\n\n\n
[docs]def open_dagster_pipes(\n *,\n context_loader: Optional[PipesContextLoader] = None,\n message_writer: Optional[PipesMessageWriter] = None,\n params_loader: Optional[PipesParamsLoader] = None,\n) -> "PipesContext":\n """Initialize the Dagster Pipes context.\n\n This function should be called near the entry point of a pipes process. It will load injected\n context information from Dagster and spin up the machinery for streaming messages back to\n Dagster.\n\n If the process was not launched by Dagster, this function will emit a warning and return a\n `MagicMock` object. This should make all operations on the context no-ops and prevent your code\n from crashing.\n\n Args:\n context_loader (Optional[PipesContextLoader]): The context loader to use. Defaults to\n :py:class:`PipesDefaultContextLoader`.\n message_writer (Optional[PipesMessageWriter]): The message writer to use. Defaults to\n :py:class:`PipesDefaultMessageWriter`.\n params_loader (Optional[PipesParamsLoader]): The params loader to use. Defaults to\n :py:class:`PipesEnvVarParamsLoader`.\n\n Returns:\n PipesContext: The initialized context.\n """\n if PipesContext.is_initialized():\n return PipesContext.get()\n\n params_loader = params_loader or PipesEnvVarParamsLoader()\n if params_loader.is_dagster_pipes_process():\n context_loader = context_loader or PipesDefaultContextLoader()\n message_writer = message_writer or PipesDefaultMessageWriter()\n context = PipesContext(params_loader, context_loader, message_writer)\n else:\n _emit_orchestration_inactive_warning()\n context = _get_mock()\n PipesContext.set(context)\n return context
\n\n\n
[docs]class PipesContext:\n """The context for a Dagster Pipes process.\n\n This class is analogous to :py:class:`~dagster.OpExecutionContext` on the Dagster side of the Pipes\n connection. It provides access to information such as the asset key(s) and partition key(s) in\n scope for the current step. It also provides methods for logging and emitting results that will\n be streamed back to Dagster.\n\n This class should not be directly instantiated by the user. Instead it should be initialized by\n calling :py:func:`open_dagster_pipes()`, which will return the singleton instance of this class.\n After `open_dagster_pipes()` has been called, the singleton instance can also be retrieved by\n calling :py:func:`PipesContext.get`.\n """\n\n _instance: ClassVar[Optional["PipesContext"]] = None\n\n
[docs] @classmethod\n def is_initialized(cls) -> bool:\n """bool: Whether the context has been initialized."""\n return cls._instance is not None
\n\n
[docs] @classmethod\n def set(cls, context: "PipesContext") -> None:\n """Set the singleton instance of the context."""\n cls._instance = context
\n\n
[docs] @classmethod\n def get(cls) -> "PipesContext":\n """Get the singleton instance of the context. Raises an error if the context has not been initialized."""\n if cls._instance is None:\n raise Exception(\n "PipesContext has not been initialized. You must call `open_dagster_pipes()`."\n )\n return cls._instance
\n\n def __init__(\n self,\n params_loader: PipesParamsLoader,\n context_loader: PipesContextLoader,\n message_writer: PipesMessageWriter,\n ) -> None:\n context_params = params_loader.load_context_params()\n messages_params = params_loader.load_messages_params()\n self._io_stack = ExitStack()\n self._data = self._io_stack.enter_context(context_loader.load_context(context_params))\n self._message_channel = self._io_stack.enter_context(message_writer.open(messages_params))\n self._message_channel.write_message(_make_message("opened", {}))\n self._logger = _PipesLogger(self)\n self._materialized_assets: Set[str] = set()\n self._closed: bool = False\n\n def __enter__(self) -> "PipesContext":\n return self\n\n def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n self.close()\n\n
[docs] def close(self) -> None:\n """Close the pipes connection. This will flush all buffered messages to the orchestration\n process and cause any further attempt to write a message to raise an error. This method is\n idempotent-- subsequent calls after the first have no effect.\n """\n if not self._closed:\n self._message_channel.write_message(_make_message("closed", {}))\n self._io_stack.close()\n self._closed = True
\n\n @property\n def is_closed(self) -> bool:\n """bool: Whether the context has been closed."""\n return self._closed\n\n def _write_message(self, method: str, params: Optional[Mapping[str, Any]] = None) -> None:\n if self._closed:\n raise DagsterPipesError("Cannot send message after pipes context is closed.")\n message = _make_message(method, params)\n self._message_channel.write_message(message)\n\n # ########################\n # ##### PUBLIC API\n # ########################\n\n @property\n def is_asset_step(self) -> bool:\n """bool: Whether the current step targets assets."""\n return self._data["asset_keys"] is not None\n\n @property\n def asset_key(self) -> str:\n """str: The AssetKey for the currently scoped asset. Raises an error if 0 or multiple assets\n are in scope.\n """\n asset_keys = _assert_defined_asset_property(self._data["asset_keys"], "asset_key")\n _assert_single_asset(self._data, "asset_key")\n return asset_keys[0]\n\n @property\n def asset_keys(self) -> Sequence[str]:\n """Sequence[str]: The AssetKeys for the currently scoped assets. Raises an error if no\n assets are in scope.\n """\n asset_keys = _assert_defined_asset_property(self._data["asset_keys"], "asset_keys")\n return asset_keys\n\n @property\n def provenance(self) -> Optional[PipesDataProvenance]:\n """Optional[PipesDataProvenance]: The provenance for the currently scoped asset. Raises an\n error if 0 or multiple assets are in scope.\n """\n provenance_by_asset_key = _assert_defined_asset_property(\n self._data["provenance_by_asset_key"], "provenance"\n )\n _assert_single_asset(self._data, "provenance")\n return next(iter(provenance_by_asset_key.values()))\n\n @property\n def provenance_by_asset_key(self) -> Mapping[str, Optional[PipesDataProvenance]]:\n """Mapping[str, Optional[PipesDataProvenance]]: Mapping of asset key to provenance for the\n currently scoped assets. Raises an error if no assets are in scope.\n """\n provenance_by_asset_key = _assert_defined_asset_property(\n self._data["provenance_by_asset_key"], "provenance_by_asset_key"\n )\n return provenance_by_asset_key\n\n @property\n def code_version(self) -> Optional[str]:\n """Optional[str]: The code version for the currently scoped asset. Raises an error if 0 or\n multiple assets are in scope.\n """\n code_version_by_asset_key = _assert_defined_asset_property(\n self._data["code_version_by_asset_key"], "code_version"\n )\n _assert_single_asset(self._data, "code_version")\n return next(iter(code_version_by_asset_key.values()))\n\n @property\n def code_version_by_asset_key(self) -> Mapping[str, Optional[str]]:\n """Mapping[str, Optional[str]]: Mapping of asset key to code version for the currently\n scoped assets. Raises an error if no assets are in scope.\n """\n code_version_by_asset_key = _assert_defined_asset_property(\n self._data["code_version_by_asset_key"], "code_version_by_asset_key"\n )\n return code_version_by_asset_key\n\n @property\n def is_partition_step(self) -> bool:\n """bool: Whether the current step is scoped to one or more partitions."""\n return self._data["partition_key_range"] is not None\n\n @property\n def partition_key(self) -> str:\n """str: The partition key for the currently scoped partition. Raises an error if 0 or\n multiple partitions are in scope.\n """\n partition_key = _assert_defined_partition_property(\n self._data["partition_key"], "partition_key"\n )\n return partition_key\n\n @property\n def partition_key_range(self) -> "PipesPartitionKeyRange":\n """PipesPartitionKeyRange: The partition key range for the currently scoped partition or\n partitions. Raises an error if no partitions are in scope.\n """\n partition_key_range = _assert_defined_partition_property(\n self._data["partition_key_range"], "partition_key_range"\n )\n return partition_key_range\n\n @property\n def partition_time_window(self) -> Optional["PipesTimeWindow"]:\n """Optional[PipesTimeWindow]: The partition time window for the currently scoped partition\n or partitions. Returns None if partitions in scope are not temporal. Raises an error if no\n partitions are in scope.\n """\n # None is a valid value for partition_time_window, but we check that a partition key range\n # is defined.\n _assert_defined_partition_property(\n self._data["partition_key_range"], "partition_time_window"\n )\n return self._data["partition_time_window"]\n\n @property\n def run_id(self) -> str:\n """str: The run ID for the currently executing pipeline run."""\n return self._data["run_id"]\n\n @property\n def job_name(self) -> Optional[str]:\n """Optional[str]: The job name for the currently executing run. Returns None if the run is\n not derived from a job.\n """\n return self._data["job_name"]\n\n @property\n def retry_number(self) -> int:\n """int: The retry number for the currently executing run."""\n return self._data["retry_number"]\n\n
[docs] def get_extra(self, key: str) -> Any:\n """Get the value of an extra provided by the user. Raises an error if the extra is not defined.\n\n Args:\n key (str): The key of the extra.\n\n Returns:\n Any: The value of the extra.\n """\n return _assert_defined_extra(self._data["extras"], key)
\n\n @property\n def extras(self) -> Mapping[str, Any]:\n """Mapping[str, Any]: Key-value map for all extras provided by the user."""\n return self._data["extras"]\n\n # ##### WRITE\n\n
[docs] def report_asset_materialization(\n self,\n metadata: Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]] = None,\n data_version: Optional[str] = None,\n asset_key: Optional[str] = None,\n ) -> None:\n """Report to Dagster that an asset has been materialized. Streams a payload containing\n materialization information back to Dagster. If no assets are in scope, raises an error.\n\n Args:\n metadata (Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]]):\n Metadata for the materialized asset. Defaults to None.\n data_version (Optional[str]): The data version for the materialized asset.\n Defaults to None.\n asset_key (Optional[str]): The asset key for the materialized asset. If only a\n single asset is in scope, default to that asset's key. If multiple assets are in scope,\n this must be set explicitly or an error will be raised.\n """\n asset_key = _resolve_optionally_passed_asset_key(\n self._data, asset_key, "report_asset_materialization"\n )\n if asset_key in self._materialized_assets:\n raise DagsterPipesError(\n f"Calling `report_asset_materialization` with asset key `{asset_key}` is undefined."\n " Asset has already been materialized, so no additional data can be reported"\n " for it."\n )\n metadata = (\n _normalize_param_metadata(metadata, "report_asset_materialization", "metadata")\n if metadata\n else None\n )\n data_version = _assert_opt_param_type(\n data_version, str, "report_asset_materialization", "data_version"\n )\n self._write_message(\n "report_asset_materialization",\n {"asset_key": asset_key, "data_version": data_version, "metadata": metadata},\n )\n self._materialized_assets.add(asset_key)
\n\n
[docs] def report_asset_check(\n self,\n check_name: str,\n passed: bool,\n severity: PipesAssetCheckSeverity = "ERROR",\n metadata: Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]] = None,\n asset_key: Optional[str] = None,\n ) -> None:\n """Report to Dagster that an asset check has been performed. Streams a payload containing\n check result information back to Dagster. If no assets or associated checks are in scope, raises an error.\n\n Args:\n check_name (str): The name of the check.\n passed (bool): Whether the check passed.\n severity (PipesAssetCheckSeverity): The severity of the check. Defaults to "ERROR".\n metadata (Optional[Mapping[str, Union[PipesMetadataRawValue, PipesMetadataValue]]]):\n Metadata for the check. Defaults to None.\n asset_key (Optional[str]): The asset key for the check. If only a single asset is in\n scope, default to that asset's key. If multiple assets are in scope, this must be\n set explicitly or an error will be raised.\n """\n asset_key = _resolve_optionally_passed_asset_key(\n self._data, asset_key, "report_asset_check"\n )\n check_name = _assert_param_type(check_name, str, "report_asset_check", "check_name")\n passed = _assert_param_type(passed, bool, "report_asset_check", "passed")\n metadata = (\n _normalize_param_metadata(metadata, "report_asset_check", "metadata")\n if metadata\n else None\n )\n self._write_message(\n "report_asset_check",\n {\n "asset_key": asset_key,\n "check_name": check_name,\n "passed": passed,\n "metadata": metadata,\n "severity": severity,\n },\n )
\n\n @property\n def log(self) -> logging.Logger:\n """logging.Logger: A logger that streams log messages back to Dagster."""\n return self._logger
\n
", "current_page_name": "_modules/dagster_pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pipes"}, "index": {"alabaster_version": "0.7.13", "body": "

All modules for which code is available

\n", "current_page_name": "_modules/index", "customsidebar": null, "favicon_url": null, "logo_url": null, "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "Overview: module code"}}, "dagster": {"_config": {"config_schema": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.config_schema

\nfrom typing import TYPE_CHECKING, Any, Dict, List, Mapping, Sequence, Type, Union\n\nfrom typing_extensions import TypeAlias\n\nif TYPE_CHECKING:\n    from dagster._config import ConfigType, Field\n\n# Eventually, the below `UserConfigSchema` should be renamed to `ConfigSchema` and the class\n# definition should be dropped. The reason we don't do this now is that sphinx autodoc doesn't\n# support type aliases, so there is no good way to gracefully attach a docstring to this and have it\n# show up in the docs. See: https://github.com/sphinx-doc/sphinx/issues/8934\n#\n# Unfortunately mypy doesn't support recursive types, which would be used to properly define the\n# List/Dict elements of this union: `Dict[str, ConfigSchema]`, `List[ConfigSchema]`.\nUserConfigSchema: TypeAlias = Union[\n    Type[Union[bool, float, int, str]],\n    Type[Union[Dict[Any, Any], List[Any]]],\n    "ConfigType",\n    "Field",\n    Mapping[str, Any],\n    Sequence[Any],\n]\n\n\n
[docs]class ConfigSchema:\n """Placeholder type for config schemas.\n\n Any time that it appears in documentation, it means that any of the following types are\n acceptable:\n\n #. A Python scalar type that resolves to a Dagster config type\n (:py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n or :py:class:`~python:str`). For example:\n\n * ``@op(config_schema=int)``\n * ``@op(config_schema=str)``\n\n #. A built-in python collection (:py:class:`~python:list`, or :py:class:`~python:dict`).\n :py:class:`~python:list` is exactly equivalent to :py:class:`~dagster.Array` [\n :py:class:`~dagster.Any` ] and :py:class:`~python:dict` is equivalent to\n :py:class:`~dagster.Permissive`. For example:\n\n * ``@op(config_schema=list)``\n * ``@op(config_schema=dict)``\n\n #. A Dagster config type:\n\n * :py:data:`~dagster.Any`\n * :py:class:`~dagster.Array`\n * :py:data:`~dagster.Bool`\n * :py:data:`~dagster.Enum`\n * :py:data:`~dagster.Float`\n * :py:data:`~dagster.Int`\n * :py:data:`~dagster.IntSource`\n * :py:data:`~dagster.Noneable`\n * :py:class:`~dagster.Permissive`\n * :py:class:`~dagster.Map`\n * :py:class:`~dagster.ScalarUnion`\n * :py:class:`~dagster.Selector`\n * :py:class:`~dagster.Shape`\n * :py:data:`~dagster.String`\n * :py:data:`~dagster.StringSource`\n\n\n #. A bare python dictionary, which will be automatically wrapped in\n :py:class:`~dagster.Shape`. Values of the dictionary are resolved recursively\n according to the same rules. For example:\n\n * ``{'some_config': str}`` is equivalent to ``Shape({'some_config: str})``.\n\n * ``{'some_config1': {'some_config2': str}}`` is equivalent to\n ``Shape({'some_config1: Shape({'some_config2: str})})``.\n\n #. A bare python list of length one, whose single element will be wrapped in a\n :py:class:`~dagster.Array` is resolved recursively according to the same\n rules. For example:\n\n * ``[str]`` is equivalent to ``Array[str]``.\n\n * ``[[str]]`` is equivalent to ``Array[Array[str]]``.\n\n * ``[{'some_config': str}]`` is equivalent to ``Array(Shape({'some_config: str}))``.\n\n #. An instance of :py:class:`~dagster.Field`.\n """\n\n def __init__(self):\n raise NotImplementedError(\n "ConfigSchema is a placeholder type and should not be instantiated."\n )
\n
", "current_page_name": "_modules/dagster/_config/config_schema", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.config_schema"}, "config_type": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.config_type

\nimport typing\nfrom enum import Enum as PythonEnum\nfrom typing import TYPE_CHECKING, Dict, Iterator, Optional, Sequence, cast\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import UserConfigSchema\nfrom dagster._serdes import whitelist_for_serdes\n\nif TYPE_CHECKING:\n    from .snap import ConfigSchemaSnapshot, ConfigTypeSnap\n\n\n@whitelist_for_serdes\nclass ConfigTypeKind(PythonEnum):\n    ANY = "ANY"\n    SCALAR = "SCALAR"\n    ENUM = "ENUM"\n\n    SELECTOR = "SELECTOR"\n    STRICT_SHAPE = "STRICT_SHAPE"\n    PERMISSIVE_SHAPE = "PERMISSIVE_SHAPE"\n    SCALAR_UNION = "SCALAR_UNION"\n\n    MAP = "MAP"\n\n    # Closed generic types\n    ARRAY = "ARRAY"\n    NONEABLE = "NONEABLE"\n\n    @staticmethod\n    def has_fields(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return kind == ConfigTypeKind.SELECTOR or ConfigTypeKind.is_shape(kind)\n\n    @staticmethod\n    def is_closed_generic(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return (\n            kind == ConfigTypeKind.ARRAY\n            or kind == ConfigTypeKind.NONEABLE\n            or kind == ConfigTypeKind.SCALAR_UNION\n            or kind == ConfigTypeKind.MAP\n        )\n\n    @staticmethod\n    def is_shape(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return kind == ConfigTypeKind.STRICT_SHAPE or kind == ConfigTypeKind.PERMISSIVE_SHAPE\n\n    @staticmethod\n    def is_selector(kind: "ConfigTypeKind") -> bool:\n        check.inst_param(kind, "kind", ConfigTypeKind)\n        return kind == ConfigTypeKind.SELECTOR\n\n\nclass ConfigType:\n    """The class backing DagsterTypes as they are used processing configuration data."""\n\n    def __init__(\n        self,\n        key: str,\n        kind: ConfigTypeKind,\n        given_name: Optional[str] = None,\n        description: Optional[str] = None,\n        type_params: Optional[Sequence["ConfigType"]] = None,\n    ):\n        self.key: str = check.str_param(key, "key")\n        self.kind: ConfigTypeKind = check.inst_param(kind, "kind", ConfigTypeKind)\n        self.given_name: Optional[str] = check.opt_str_param(given_name, "given_name")\n        self._description: Optional[str] = check.opt_str_param(description, "description")\n        self.type_params: Optional[Sequence[ConfigType]] = (\n            check.sequence_param(type_params, "type_params", of_type=ConfigType)\n            if type_params\n            else None\n        )\n\n        # memoized snap representation\n        self._snap: Optional["ConfigTypeSnap"] = None\n\n    @property\n    def description(self) -> Optional[str]:\n        return self._description\n\n    @staticmethod\n    def from_builtin_enum(builtin_enum: typing.Any) -> "ConfigType":\n        check.invariant(BuiltinEnum.contains(builtin_enum), "param must be member of BuiltinEnum")\n        return _CONFIG_MAP[builtin_enum]\n\n    def post_process(self, value):\n        """Implement this in order to take a value provided by the user\n        and perform computation on it. This can be done to coerce data types,\n        fetch things from the environment (e.g. environment variables), or\n        to do custom validation. If the value is not valid, throw a\n        PostProcessingError. Otherwise return the coerced value.\n        """\n        return value\n\n    def get_snapshot(self) -> "ConfigTypeSnap":\n        from .snap import snap_from_config_type\n\n        if self._snap is None:\n            self._snap = snap_from_config_type(self)\n\n        return self._snap\n\n    def type_iterator(self) -> Iterator["ConfigType"]:\n        yield self\n\n    def get_schema_snapshot(self) -> "ConfigSchemaSnapshot":\n        from .snap import ConfigSchemaSnapshot\n\n        return ConfigSchemaSnapshot({ct.key: ct.get_snapshot() for ct in self.type_iterator()})\n\n\n@whitelist_for_serdes\nclass ConfigScalarKind(PythonEnum):\n    INT = "INT"\n    STRING = "STRING"\n    FLOAT = "FLOAT"\n    BOOL = "BOOL"\n\n\n# Scalars, Composites, Selectors, Lists, Optional, Any\n\n\nclass ConfigScalar(ConfigType):\n    def __init__(\n        self,\n        key: str,\n        given_name: Optional[str],\n        scalar_kind: ConfigScalarKind,\n        **kwargs: typing.Any,\n    ):\n        self.scalar_kind = check.inst_param(scalar_kind, "scalar_kind", ConfigScalarKind)\n        super(ConfigScalar, self).__init__(\n            key, kind=ConfigTypeKind.SCALAR, given_name=given_name, **kwargs\n        )\n\n\nclass BuiltinConfigScalar(ConfigScalar):\n    def __init__(self, scalar_kind, description=None):\n        super(BuiltinConfigScalar, self).__init__(\n            key=type(self).__name__,\n            given_name=type(self).__name__,\n            scalar_kind=scalar_kind,\n            description=description,\n        )\n\n\nclass Int(BuiltinConfigScalar):\n    def __init__(self):\n        super(Int, self).__init__(scalar_kind=ConfigScalarKind.INT, description="")\n\n\nclass String(BuiltinConfigScalar):\n    def __init__(self):\n        super(String, self).__init__(scalar_kind=ConfigScalarKind.STRING, description="")\n\n\nclass Bool(BuiltinConfigScalar):\n    def __init__(self):\n        super(Bool, self).__init__(scalar_kind=ConfigScalarKind.BOOL, description="")\n\n\nclass Float(BuiltinConfigScalar):\n    def __init__(self):\n        super(Float, self).__init__(scalar_kind=ConfigScalarKind.FLOAT, description="")\n\n    def post_process(self, value):\n        return float(value)\n\n\nclass Any(ConfigType):\n    def __init__(self):\n        super(Any, self).__init__(\n            key="Any",\n            given_name="Any",\n            kind=ConfigTypeKind.ANY,\n        )\n\n\n
[docs]class Noneable(ConfigType):\n """Defines a configuration type that is the union of ``NoneType`` and the type ``inner_type``.\n\n Args:\n inner_type (type):\n The type of the values that this configuration type can contain.\n\n **Examples:**\n\n .. code-block:: python\n\n config_schema={"name": Noneable(str)}\n\n config={"name": "Hello"} # Ok\n config={"name": None} # Ok\n config={} # Error\n """\n\n def __init__(self, inner_type: object):\n from .field import resolve_to_config_type\n\n self.inner_type = cast(ConfigType, resolve_to_config_type(inner_type))\n super(Noneable, self).__init__(\n key=f"Noneable.{self.inner_type.key}",\n kind=ConfigTypeKind.NONEABLE,\n type_params=[self.inner_type],\n )\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.inner_type.type_iterator()\n yield from super().type_iterator()
\n\n\n
[docs]class Array(ConfigType):\n """Defines an array (list) configuration type that contains values of type ``inner_type``.\n\n Args:\n inner_type (type):\n The type of the values that this configuration type can contain.\n """\n\n def __init__(self, inner_type: object):\n from .field import resolve_to_config_type\n\n self.inner_type = cast(ConfigType, resolve_to_config_type(inner_type))\n super(Array, self).__init__(\n key=f"Array.{self.inner_type.key}",\n type_params=[self.inner_type],\n kind=ConfigTypeKind.ARRAY,\n )\n\n @public\n @property\n def description(self) -> str:\n """A human-readable description of this Array type."""\n return f"List of {self.key}"\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.inner_type.type_iterator()\n yield from super().type_iterator()
\n\n\n
[docs]class EnumValue:\n """Define an entry in a :py:class:`Enum`.\n\n Args:\n config_value (str):\n The string representation of the config to accept when passed.\n python_value (Optional[Any]):\n The python value to convert the enum entry in to. Defaults to the ``config_value``.\n description (Optional[str]):\n A human-readable description of the enum entry.\n\n """\n\n def __init__(\n self,\n config_value: str,\n python_value: Optional[object] = None,\n description: Optional[str] = None,\n ):\n self.config_value = check.str_param(config_value, "config_value")\n self.python_value = config_value if python_value is None else python_value\n self.description = check.opt_str_param(description, "description")
\n\n\n
[docs]class Enum(ConfigType):\n """Defines a enum configuration type that allows one of a defined set of possible values.\n\n Args:\n name (str):\n The name of the enum configuration type.\n enum_values (List[EnumValue]):\n The set of possible values for the enum configuration type.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(\n config_schema=Field(\n Enum(\n 'CowboyType',\n [\n EnumValue('good'),\n EnumValue('bad'),\n EnumValue('ugly'),\n ]\n )\n )\n )\n def resolve_standoff(context):\n # ...\n """\n\n def __init__(self, name: str, enum_values: Sequence[EnumValue]):\n check.str_param(name, "name")\n super(Enum, self).__init__(key=name, given_name=name, kind=ConfigTypeKind.ENUM)\n self.enum_values = check.sequence_param(enum_values, "enum_values", of_type=EnumValue)\n self._valid_python_values = {ev.python_value for ev in enum_values}\n check.invariant(len(self._valid_python_values) == len(enum_values))\n self._valid_config_values = {ev.config_value for ev in enum_values}\n check.invariant(len(self._valid_config_values) == len(enum_values))\n\n @property\n def config_values(self):\n return [ev.config_value for ev in self.enum_values]\n\n def is_valid_config_enum_value(self, config_value):\n return config_value in self._valid_config_values\n\n def post_process(self, value: typing.Any) -> typing.Any:\n if isinstance(value, PythonEnum):\n value = value.name\n\n for ev in self.enum_values:\n if ev.config_value == value:\n return ev.python_value\n\n check.failed(f"Should never reach this. config_value should be pre-validated. Got {value}")\n\n @classmethod\n def from_python_enum(cls, enum, name=None):\n """Create a Dagster enum corresponding to an existing Python enum.\n\n Args:\n enum (enum.EnumMeta):\n The class representing the enum.\n name (Optional[str]):\n The name for the enum. If not present, `enum.__name__` will be used.\n\n Example:\n .. code-block:: python\n\n class Color(enum.Enum):\n RED = enum.auto()\n GREEN = enum.auto()\n BLUE = enum.auto()\n\n @op(\n config_schema={"color": Field(Enum.from_python_enum(Color))}\n )\n def select_color(context):\n assert context.op_config["color"] == Color.RED\n """\n if name is None:\n name = enum.__name__\n return cls(name, [EnumValue(v.name, python_value=v) for v in enum])\n\n @classmethod\n def from_python_enum_direct_values(cls, enum, name=None):\n """Create a Dagster enum corresponding to an existing Python enum, where the direct values are passed instead of symbolic values (IE, enum.symbol.value as opposed to enum.symbol).\n\n This is necessary for internal usage, as the symbolic values are not serializable.\n\n Args:\n enum (enum.EnumMeta):\n The class representing the enum.\n name (Optional[str]):\n The name for the enum. If not present, `enum.__name__` will be used.\n\n Example:\n .. code-block:: python\n\n class Color(enum.Enum):\n RED = enum.auto()\n GREEN = enum.auto()\n BLUE = enum.auto()\n\n @op(\n config_schema={"color": Field(Enum.from_python_enum(Color))}\n )\n def select_color(context):\n assert context.op_config["color"] == Color.RED.value\n """\n if name is None:\n name = enum.__name__\n return cls(name, [EnumValue(v.name, python_value=v.value) for v in enum])
\n\n\n
[docs]class ScalarUnion(ConfigType):\n """Defines a configuration type that accepts a scalar value OR a non-scalar value like a\n :py:class:`~dagster.List`, :py:class:`~dagster.Dict`, or :py:class:`~dagster.Selector`.\n\n This allows runtime scalars to be configured without a dictionary with the key ``value`` and\n instead just use the scalar value directly. However this still leaves the option to\n load scalars from a json or pickle file.\n\n Args:\n scalar_type (type):\n The scalar type of values that this configuration type can hold. For example,\n :py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n or :py:class:`~python:str`.\n non_scalar_schema (ConfigSchema):\n The schema of a non-scalar Dagster configuration type. For example, :py:class:`List`,\n :py:class:`Dict`, or :py:class:`~dagster.Selector`.\n key (Optional[str]):\n The configuation type's unique key. If not set, then the key will be set to\n ``ScalarUnion.{scalar_type}-{non_scalar_schema}``.\n\n **Examples:**\n\n .. code-block:: yaml\n\n graph:\n transform_word:\n inputs:\n word:\n value: foobar\n\n\n becomes, optionally,\n\n\n .. code-block:: yaml\n\n graph:\n transform_word:\n inputs:\n word: foobar\n """\n\n def __init__(\n self,\n scalar_type: typing.Any,\n non_scalar_schema: UserConfigSchema,\n _key: Optional[str] = None,\n ):\n from .field import resolve_to_config_type\n\n self.scalar_type = check.inst(\n cast(ConfigType, resolve_to_config_type(scalar_type)), ConfigType\n )\n self.non_scalar_type = resolve_to_config_type(non_scalar_schema)\n\n check.param_invariant(self.scalar_type.kind == ConfigTypeKind.SCALAR, "scalar_type")\n check.param_invariant(\n self.non_scalar_type.kind\n in {ConfigTypeKind.STRICT_SHAPE, ConfigTypeKind.SELECTOR, ConfigTypeKind.ARRAY},\n "non_scalar_type",\n )\n\n # https://github.com/dagster-io/dagster/issues/2133\n key = check.opt_str_param(\n _key, "_key", f"ScalarUnion.{self.scalar_type.key}-{self.non_scalar_type.key}"\n )\n\n super(ScalarUnion, self).__init__(\n key=key,\n kind=ConfigTypeKind.SCALAR_UNION,\n type_params=[self.scalar_type, self.non_scalar_type],\n )\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.scalar_type.type_iterator()\n yield from self.non_scalar_type.type_iterator()\n yield from super().type_iterator()
\n\n\nConfigAnyInstance: Any = Any()\nConfigBoolInstance: Bool = Bool()\nConfigFloatInstance: Float = Float()\nConfigIntInstance: Int = Int()\nConfigStringInstance: String = String()\n\n_CONFIG_MAP: Dict[check.TypeOrTupleOfTypes, ConfigType] = {\n BuiltinEnum.ANY: ConfigAnyInstance,\n BuiltinEnum.BOOL: ConfigBoolInstance,\n BuiltinEnum.FLOAT: ConfigFloatInstance,\n BuiltinEnum.INT: ConfigIntInstance,\n BuiltinEnum.STRING: ConfigStringInstance,\n}\n\n\n_CONFIG_MAP_BY_NAME: Dict[str, ConfigType] = {\n "Any": ConfigAnyInstance,\n "Bool": ConfigBoolInstance,\n "Float": ConfigFloatInstance,\n "Int": ConfigIntInstance,\n "String": ConfigStringInstance,\n}\n\nALL_CONFIG_BUILTINS = set(_CONFIG_MAP.values())\n\n\ndef get_builtin_scalar_by_name(type_name: str):\n if type_name not in _CONFIG_MAP_BY_NAME:\n check.failed(f"Scalar {type_name} is not supported")\n return _CONFIG_MAP_BY_NAME[type_name]\n
", "current_page_name": "_modules/dagster/_config/config_type", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.config_type"}, "field": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.field

\nfrom typing import Any, Optional, Union, cast, overload\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.errors import DagsterInvalidConfigError, DagsterInvalidDefinitionError\nfrom dagster._serdes import serialize_value\nfrom dagster._seven import is_subclass\nfrom dagster._utils import is_enum_value\nfrom dagster._utils.typing_api import is_closed_python_optional_type, is_typing_type\n\nfrom .config_type import Array, ConfigAnyInstance, ConfigType, ConfigTypeKind\nfrom .field_utils import FIELD_NO_DEFAULT_PROVIDED, Map, all_optional_type\n\n\ndef _is_config_type_class(obj) -> bool:\n    return isinstance(obj, type) and is_subclass(obj, ConfigType)\n\n\ndef helpful_list_error_string() -> str:\n    return "Please use a python list (e.g. [int]) or dagster.Array (e.g. Array(int)) instead."\n\n\nVALID_CONFIG_DESC = """\n1. A Python primitive type that resolve to dagster config\n   types: int, float, bool, str.\n\n2. A dagster config type: Int, Float, Bool, String, StringSource, Path, Any,\n   Array, Noneable, Selector, Shape, Permissive, etc.\n\n3. A bare python dictionary, which is wrapped in Shape. Any\n   values in the dictionary get resolved by the same rules, recursively.\n\n4. A bare python list of length one which itself is config type.\n   Becomes Array with list element as an argument.\n"""\n\n\n@overload\ndef resolve_to_config_type(obj: Union[ConfigType, UserConfigSchema]) -> ConfigType:\n    pass\n\n\n@overload\ndef resolve_to_config_type(obj: object) -> Union[ConfigType, bool]:\n    pass\n\n\ndef resolve_to_config_type(obj: object) -> Union[ConfigType, bool]:\n    from .field_utils import convert_fields_to_dict_type\n\n    # Short circuit if it's already a Config Type\n    if isinstance(obj, ConfigType):\n        return obj\n\n    if isinstance(obj, dict):\n        # Dicts of the special form {type: value} are treated as Maps\n        # mapping from the type to value type, otherwise treat as dict type\n        if len(obj) == 1:\n            key = next(iter(obj.keys()))\n            key_type = resolve_to_config_type(key)\n            if not isinstance(key, str):\n                if not key_type:\n                    raise DagsterInvalidDefinitionError(\n                        f"Invalid key in map specification: {key!r} in map {obj}"\n                    )\n\n                if not key_type.kind == ConfigTypeKind.SCALAR:  # type: ignore\n                    raise DagsterInvalidDefinitionError(\n                        f"Non-scalar key in map specification: {key!r} in map {obj}"\n                    )\n\n                inner_type = resolve_to_config_type(obj[key])\n\n                if not inner_type:\n                    raise DagsterInvalidDefinitionError(\n                        f"Invalid value in map specification: {obj[str]!r} in map {obj}"\n                    )\n                return Map(key_type, inner_type)\n        return convert_fields_to_dict_type(obj)\n\n    if isinstance(obj, list):\n        if len(obj) != 1:\n            raise DagsterInvalidDefinitionError("Array specifications must only be of length 1")\n\n        inner_type = resolve_to_config_type(obj[0])\n\n        if not inner_type:\n            raise DagsterInvalidDefinitionError(\n                f"Invalid member of array specification: {obj[0]!r} in list {obj}"\n            )\n        return Array(inner_type)\n\n    if BuiltinEnum.contains(obj):\n        return ConfigType.from_builtin_enum(obj)\n\n    from .primitive_mapping import (\n        is_supported_config_python_builtin,\n        remap_python_builtin_for_config,\n    )\n\n    if is_supported_config_python_builtin(obj):\n        return remap_python_builtin_for_config(obj)\n\n    if obj is None:\n        return ConfigAnyInstance\n\n    # Special error messages for passing a DagsterType\n    from dagster._core.types.dagster_type import DagsterType, List, ListType\n    from dagster._core.types.python_set import Set, _TypedPythonSet\n    from dagster._core.types.python_tuple import Tuple, _TypedPythonTuple\n\n    if _is_config_type_class(obj):\n        check.param_invariant(\n            False,\n            "dagster_type",\n            f"Cannot pass config type class {obj} to resolve_to_config_type. This error usually"\n            " occurs when you pass a dagster config type class instead of a class instance into"\n            ' another dagster config type. E.g. "Noneable(Permissive)" should instead be'\n            ' "Noneable(Permissive())".',\n        )\n\n    if isinstance(obj, type) and is_subclass(obj, DagsterType):\n        raise DagsterInvalidDefinitionError(\n            f"You have passed a DagsterType class {obj!r} to the config system. "\n            "The DagsterType and config schema systems are separate. "\n            f"Valid config values are:\\n{VALID_CONFIG_DESC}"\n        )\n\n    if is_closed_python_optional_type(obj):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use typing.Optional as a config type. If you want this field to be "\n            "optional, please use Field(<type>, is_required=False), and if you want this field to "\n            "be required, but accept a value of None, use dagster.Noneable(<type>)."\n        )\n\n    if is_typing_type(obj):\n        raise DagsterInvalidDefinitionError(\n            f"You have passed in {obj} to the config system. Types from "\n            "the typing module in python are not allowed in the config system. "\n            "You must use types that are imported from dagster or primitive types "\n            "such as bool, int, etc."\n        )\n\n    if obj is List or isinstance(obj, ListType):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use List in the context of config. " + helpful_list_error_string()\n        )\n\n    if obj is Set or isinstance(obj, _TypedPythonSet):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use Set in the context of a config field. " + helpful_list_error_string()\n        )\n\n    if obj is Tuple or isinstance(obj, _TypedPythonTuple):\n        raise DagsterInvalidDefinitionError(\n            "Cannot use Tuple in the context of a config field. " + helpful_list_error_string()\n        )\n\n    if isinstance(obj, DagsterType):\n        raise DagsterInvalidDefinitionError(\n            f"You have passed an instance of DagsterType {obj.display_name} to the config "\n            f"system (Repr of type: {obj!r}). "\n            "The DagsterType and config schema systems are separate. "\n            f"Valid config values are:\\n{VALID_CONFIG_DESC}",\n        )\n\n    # This means that this is an error and we are return False to a callsite\n    # We do the error reporting there because those callsites have more context\n    return False\n\n\ndef has_implicit_default(config_type):\n    if config_type.kind == ConfigTypeKind.NONEABLE:\n        return True\n\n    return all_optional_type(config_type)\n\n\n
[docs]class Field:\n """Defines the schema for a configuration field.\n\n Fields are used in config schema instead of bare types when one wants to add a description,\n a default value, or to mark it as not required.\n\n Config fields are parsed according to their schemas in order to yield values available at\n job execution time through the config system. Config fields can be set on ops, on\n loaders for custom, and on other pluggable components of the system, such as resources, loggers,\n and executors.\n\n\n Args:\n config (Any): The schema for the config. This value can be any of:\n\n 1. A Python primitive type that resolves to a Dagster config type\n (:py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n :py:class:`~python:str`, or :py:class:`~python:list`).\n\n 2. A Dagster config type:\n\n * :py:data:`~dagster.Any`\n * :py:class:`~dagster.Array`\n * :py:data:`~dagster.Bool`\n * :py:data:`~dagster.Enum`\n * :py:data:`~dagster.Float`\n * :py:data:`~dagster.Int`\n * :py:data:`~dagster.IntSource`\n * :py:data:`~dagster.Noneable`\n * :py:class:`~dagster.Permissive`\n * :py:class:`~dagster.ScalarUnion`\n * :py:class:`~dagster.Selector`\n * :py:class:`~dagster.Shape`\n * :py:data:`~dagster.String`\n * :py:data:`~dagster.StringSource`\n\n 3. A bare python dictionary, which will be automatically wrapped in\n :py:class:`~dagster.Shape`. Values of the dictionary are resolved recursively\n according to the same rules.\n\n 4. A bare python list of length one which itself is config type.\n Becomes :py:class:`Array` with list element as an argument.\n\n default_value (Any):\n A default value for this field, conformant to the schema set by the ``dagster_type``\n argument. If a default value is provided, ``is_required`` should be ``False``.\n\n Note: for config types that do post processing such as Enum, this value must be\n the pre processed version, ie use ``ExampleEnum.VALUE.name`` instead of\n ``ExampleEnum.VALUE``\n\n is_required (bool):\n Whether the presence of this field is required. Defaults to true. If ``is_required``\n is ``True``, no default value should be provided.\n\n description (str):\n A human-readable description of this config field.\n\n Examples:\n .. code-block:: python\n\n @op(\n config_schema={\n 'word': Field(str, description='I am a word.'),\n 'repeats': Field(Int, default_value=1, is_required=False),\n }\n )\n def repeat_word(context):\n return context.op_config['word'] * context.op_config['repeats']\n """\n\n def _resolve_config_arg(self, config):\n if isinstance(config, ConfigType):\n return config\n\n config_type = resolve_to_config_type(config)\n if not config_type:\n raise DagsterInvalidDefinitionError(\n f"Attempted to pass {config!r} to a Field that expects a valid "\n "dagster type usable in config (e.g. Dict, Int, String et al)."\n )\n return config_type\n\n def __init__(\n self,\n config: Any,\n default_value: Any = FIELD_NO_DEFAULT_PROVIDED,\n is_required: Optional[bool] = None,\n description: Optional[str] = None,\n ):\n from .post_process import resolve_defaults\n from .validate import validate_config\n\n self.config_type = check.inst(self._resolve_config_arg(config), ConfigType)\n\n self._description = check.opt_str_param(description, "description")\n\n check.opt_bool_param(is_required, "is_required")\n\n if default_value != FIELD_NO_DEFAULT_PROVIDED:\n check.param_invariant(\n not (callable(default_value)), "default_value", "default_value cannot be a callable"\n )\n\n if is_required is True:\n check.param_invariant(\n default_value == FIELD_NO_DEFAULT_PROVIDED,\n "default_value",\n "required arguments should not specify default values",\n )\n\n self._default_value = default_value\n\n # check explicit default value\n if self.default_provided:\n if self.config_type.kind == ConfigTypeKind.ENUM and is_enum_value(default_value):\n raise DagsterInvalidDefinitionError(\n (\n "You have passed into a python enum value as the default value "\n "into of a config enum type {name}. You must pass in the underlying "\n "string represention as the default value. One of {value_set}."\n ).format(\n value_set=[ev.config_value for ev in self.config_type.enum_values],\n name=self.config_type.given_name,\n )\n )\n\n evr = validate_config(self.config_type, default_value)\n if not evr.success:\n raise DagsterInvalidConfigError(\n "Invalid default_value for Field.",\n evr.errors,\n default_value,\n )\n\n if is_required is None:\n is_optional = has_implicit_default(self.config_type) or self.default_provided\n is_required = not is_optional\n\n # on implicitly optional - set the default value\n # by resolving the defaults of the type\n if is_optional and not self.default_provided:\n evr = resolve_defaults(self.config_type, None)\n if not evr.success:\n raise DagsterInvalidConfigError(\n "Unable to resolve implicit default_value for Field.",\n evr.errors,\n None,\n )\n self._default_value = evr.value\n self._is_required = is_required\n\n @public\n @property\n def is_required(self) -> bool:\n """Whether a value for this field must be provided at runtime.\n\n Cannot be True if a default value is provided.\n """\n return self._is_required\n\n @public\n @property\n def default_provided(self) -> bool:\n """Was a default value provided.\n\n Returns:\n bool: Yes or no\n """\n return self._default_value != FIELD_NO_DEFAULT_PROVIDED\n\n @public\n @property\n def default_value(self) -> Any:\n """The default value for the field.\n\n Raises an exception if no default value was provided.\n """\n check.invariant(self.default_provided, "Asking for default value when none was provided")\n return self._default_value\n\n @public\n @property\n def description(self) -> Optional[str]:\n """A human-readable description of this config field, if provided."""\n return self._description\n\n @property\n def default_value_as_json_str(self) -> str:\n check.invariant(self.default_provided, "Asking for default value when none was provided")\n return serialize_value(self.default_value)\n\n def __repr__(self) -> str:\n return ("Field({config_type}, default={default}, is_required={is_required})").format(\n config_type=self.config_type,\n default=(\n "@" if self._default_value == FIELD_NO_DEFAULT_PROVIDED else self._default_value\n ),\n is_required=self.is_required,\n )
\n\n\ndef check_opt_field_param(obj: object, param_name: str) -> Optional[Field]:\n return check.opt_inst_param(cast(Optional[Field], obj), param_name, Field)\n
", "current_page_name": "_modules/dagster/_config/field", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.field"}, "field_utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.field_utils

\n# encoding: utf-8\nimport hashlib\nimport os\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, List, Mapping, Optional, Sequence, Type\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.errors import DagsterInvalidConfigDefinitionError\n\nfrom .config_type import Array, ConfigType, ConfigTypeKind\n\nif TYPE_CHECKING:\n    from dagster._config import Field\n\n\ndef all_optional_type(config_type: ConfigType) -> bool:\n    check.inst_param(config_type, "config_type", ConfigType)\n\n    if ConfigTypeKind.is_shape(config_type.kind):\n        for field in config_type.fields.values():  # type: ignore\n            if field.is_required:\n                return False\n        return True\n\n    if ConfigTypeKind.is_selector(config_type.kind):\n        if len(config_type.fields) == 1:  # type: ignore\n            for field in config_type.fields.values():  # type: ignore\n                if field.is_required:\n                    return False\n            return True\n\n    return False\n\n\nclass __FieldValueSentinel:\n    pass\n\n\nclass __InferOptionalCompositeFieldSentinel:\n    pass\n\n\nFIELD_NO_DEFAULT_PROVIDED = __FieldValueSentinel\n\nINFER_OPTIONAL_COMPOSITE_FIELD = __InferOptionalCompositeFieldSentinel\n\n\nclass _ConfigHasFields(ConfigType):\n    def __init__(self, fields, **kwargs):\n        self.fields = expand_fields_dict(fields)\n        super(_ConfigHasFields, self).__init__(**kwargs)\n\n    def type_iterator(self) -> Iterator["ConfigType"]:\n        for field in self.fields.values():\n            yield from field.config_type.type_iterator()\n        yield from super().type_iterator()\n\n\nFIELD_HASH_CACHE: Dict[str, Any] = {}\n\n\ndef _memoize_inst_in_field_cache(passed_cls, defined_cls, key):\n    if key in FIELD_HASH_CACHE:\n        return FIELD_HASH_CACHE[key]\n\n    defined_cls_inst = super(defined_cls, passed_cls).__new__(defined_cls)\n    defined_cls_inst._initialized = False  # noqa: SLF001\n    FIELD_HASH_CACHE[key] = defined_cls_inst\n    return defined_cls_inst\n\n\ndef _add_hash(m, string):\n    m.update(string.encode("utf-8"))\n\n\ndef compute_fields_hash(fields, description, field_aliases=None):\n    m = hashlib.sha1()  # so that hexdigest is 40, not 64 bytes\n    if description:\n        _add_hash(m, ":description: " + description)\n\n    for field_name in sorted(list(fields.keys())):\n        field = fields[field_name]\n        _add_hash(m, ":fieldname:" + field_name)\n        if field.default_provided:\n            _add_hash(m, ":default_value: " + field.default_value_as_json_str)\n        _add_hash(m, ":is_required: " + str(field.is_required))\n        _add_hash(m, ":type_key: " + field.config_type.key)\n        if field.description:\n            _add_hash(m, ":description: " + field.description)\n\n    field_aliases = check.opt_dict_param(\n        field_aliases, "field_aliases", key_type=str, value_type=str\n    )\n    for field_name in sorted(list(field_aliases.keys())):\n        field_alias = field_aliases[field_name]\n        _add_hash(m, ":fieldname: " + field_name)\n        _add_hash(m, ":fieldalias: " + field_alias)\n\n    return m.hexdigest()\n\n\ndef _define_shape_key_hash(fields, description, field_aliases):\n    return "Shape." + compute_fields_hash(fields, description, field_aliases=field_aliases)\n\n\n
[docs]class Shape(_ConfigHasFields):\n """Schema for configuration data with string keys and typed values via :py:class:`Field`.\n\n Unlike :py:class:`Permissive`, unspecified fields are not allowed and will throw a\n :py:class:`~dagster.DagsterInvalidConfigError`.\n\n Args:\n fields (Dict[str, Field]):\n The specification of the config dict.\n field_aliases (Dict[str, str]):\n Maps a string key to an alias that can be used instead of the original key. For example,\n an entry {"foo": "bar"} means that someone could use "bar" instead of "foo" as a\n top level string key.\n """\n\n def __new__(\n cls,\n fields,\n description=None,\n field_aliases=None,\n ):\n return _memoize_inst_in_field_cache(\n cls,\n Shape,\n _define_shape_key_hash(expand_fields_dict(fields), description, field_aliases),\n )\n\n def __init__(\n self,\n fields,\n description=None,\n field_aliases=None,\n ):\n # if we hit in the field cache - skip double init\n if self._initialized:\n return\n\n fields = expand_fields_dict(fields)\n super(Shape, self).__init__(\n kind=ConfigTypeKind.STRICT_SHAPE,\n key=_define_shape_key_hash(fields, description, field_aliases),\n description=description,\n fields=fields,\n )\n self.field_aliases = check.opt_dict_param(\n field_aliases, "field_aliases", key_type=str, value_type=str\n )\n self._initialized = True
\n\n\n
[docs]class Map(ConfigType):\n """Defines a config dict with arbitrary scalar keys and typed values.\n\n A map can contrain arbitrary keys of the specified scalar type, each of which has\n type checked values. Unlike :py:class:`Shape` and :py:class:`Permissive`, scalar\n keys other than strings can be used, and unlike :py:class:`Permissive`, all\n values are type checked.\n\n Args:\n key_type (type):\n The type of keys this map can contain. Must be a scalar type.\n inner_type (type):\n The type of the values that this map type can contain.\n key_label_name (string):\n Optional name which describes the role of keys in the map.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(config_schema=Field(Map({str: int})))\n def partially_specified_config(context) -> List:\n return sorted(list(context.op_config.items()))\n """\n\n def __init__(self, key_type, inner_type, key_label_name=None):\n from .field import resolve_to_config_type\n\n self.key_type = resolve_to_config_type(key_type)\n self.inner_type = resolve_to_config_type(inner_type)\n self.given_name = key_label_name\n\n check.inst_param(self.key_type, "key_type", ConfigType)\n check.inst_param(self.inner_type, "inner_type", ConfigType)\n check.param_invariant(\n self.key_type.kind == ConfigTypeKind.SCALAR, "key_type", "Key type must be a scalar"\n )\n check.opt_str_param(self.given_name, "name")\n\n super(Map, self).__init__(\n key="Map.{key_type}.{inner_type}{name_key}".format(\n key_type=self.key_type.key,\n inner_type=self.inner_type.key,\n name_key=f":name: {key_label_name}" if key_label_name else "",\n ),\n # We use the given name field to store the key label name\n # this is used elsewhere to give custom types names\n given_name=key_label_name,\n type_params=[self.key_type, self.inner_type],\n kind=ConfigTypeKind.MAP,\n )\n\n @public\n @property\n def key_label_name(self) -> Optional[str]:\n """Name which describes the role of keys in the map, if provided."""\n return self.given_name\n\n def type_iterator(self) -> Iterator["ConfigType"]:\n yield from self.key_type.type_iterator()\n yield from self.inner_type.type_iterator()\n yield from super().type_iterator()
\n\n\ndef _define_permissive_dict_key(fields, description):\n return (\n "Permissive." + compute_fields_hash(fields, description=description)\n if fields\n else "Permissive"\n )\n\n\n
[docs]class Permissive(_ConfigHasFields):\n """Defines a config dict with a partially specified schema.\n\n A permissive dict allows partial specification of the config schema. Any fields with a\n specified schema will be type checked. Other fields will be allowed, but will be ignored by\n the type checker.\n\n Args:\n fields (Dict[str, Field]): The partial specification of the config dict.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(config_schema=Field(Permissive({'required': Field(String)})))\n def map_config_op(context) -> List:\n return sorted(list(context.op_config.items()))\n """\n\n def __new__(cls, fields=None, description=None):\n return _memoize_inst_in_field_cache(\n cls,\n Permissive,\n _define_permissive_dict_key(\n expand_fields_dict(fields) if fields else None, description\n ),\n )\n\n def __init__(self, fields=None, description=None):\n # if we hit in field cache avoid double init\n if self._initialized:\n return\n\n fields = expand_fields_dict(fields) if fields else None\n super(Permissive, self).__init__(\n key=_define_permissive_dict_key(fields, description),\n kind=ConfigTypeKind.PERMISSIVE_SHAPE,\n fields=fields or dict(),\n description=description,\n )\n self._initialized = True
\n\n\ndef _define_selector_key(fields, description):\n return "Selector." + compute_fields_hash(fields, description=description)\n\n\n
[docs]class Selector(_ConfigHasFields):\n """Define a config field requiring the user to select one option.\n\n Selectors are used when you want to be able to present several different options in config but\n allow only one to be selected. For example, a single input might be read in from either a csv\n file or a parquet file, but not both at once.\n\n Note that in some other type systems this might be called an 'input union'.\n\n Functionally, a selector is like a :py:class:`Dict`, except that only one key from the dict can\n be specified in valid config.\n\n Args:\n fields (Dict[str, Field]): The fields from which the user must select.\n\n **Examples:**\n\n .. code-block:: python\n\n @op(\n config_schema=Field(\n Selector(\n {\n 'haw': {'whom': Field(String, default_value='honua', is_required=False)},\n 'cn': {'whom': Field(String, default_value='\u4e16\u754c', is_required=False)},\n 'en': {'whom': Field(String, default_value='world', is_required=False)},\n }\n ),\n is_required=False,\n default_value={'en': {'whom': 'world'}},\n )\n )\n def hello_world_with_default(context):\n if 'haw' in context.op_config:\n return 'Aloha {whom}!'.format(whom=context.op_config['haw']['whom'])\n if 'cn' in context.op_config:\n return '\u4f60\u597d, {whom}!'.format(whom=context.op_config['cn']['whom'])\n if 'en' in context.op_config:\n return 'Hello, {whom}!'.format(whom=context.op_config['en']['whom'])\n """\n\n def __new__(cls, fields, description=None):\n return _memoize_inst_in_field_cache(\n cls,\n Selector,\n _define_selector_key(expand_fields_dict(fields), description),\n )\n\n def __init__(self, fields, description=None):\n # if we hit in field cache avoid double init\n if self._initialized:\n return\n\n fields = expand_fields_dict(fields)\n super(Selector, self).__init__(\n key=_define_selector_key(fields, description),\n kind=ConfigTypeKind.SELECTOR,\n fields=fields,\n description=description,\n )\n self._initialized = True
\n\n\n# Config syntax expansion code below\n\n\ndef is_potential_field(potential_field: object) -> bool:\n from .field import Field, resolve_to_config_type\n\n return isinstance(potential_field, (Field, dict, list)) or bool(\n resolve_to_config_type(potential_field)\n )\n\n\ndef convert_fields_to_dict_type(fields: Mapping[str, object]):\n return _convert_fields_to_dict_type(fields, fields, [])\n\n\ndef _convert_fields_to_dict_type(\n original_root: object, fields: Mapping[str, object], stack: List[str]\n) -> Shape:\n return Shape(_expand_fields_dict(original_root, fields, stack))\n\n\ndef expand_fields_dict(fields: Mapping[str, object]) -> Mapping[str, "Field"]:\n return _expand_fields_dict(fields, fields, [])\n\n\ndef _expand_fields_dict(\n original_root: object, fields: Mapping[str, object], stack: List[str]\n) -> Mapping[str, "Field"]:\n check.mapping_param(fields, "fields")\n return {\n name: _convert_potential_field(original_root, value, stack + [name])\n for name, value in fields.items()\n }\n\n\ndef expand_list(original_root: object, the_list: Sequence[object], stack: List[str]) -> Array:\n if len(the_list) != 1:\n raise DagsterInvalidConfigDefinitionError(\n original_root, the_list, stack, "List must be of length 1"\n )\n\n inner_type = _convert_potential_type(original_root, the_list[0], stack)\n if not inner_type:\n raise DagsterInvalidConfigDefinitionError(\n original_root,\n the_list,\n stack,\n "List have a single item and contain a valid type i.e. [int]. Got item {}".format(\n repr(the_list[0])\n ),\n )\n\n return Array(inner_type)\n\n\ndef expand_map(original_root: object, the_dict: Mapping[object, object], stack: List[str]) -> Map:\n if len(the_dict) != 1:\n raise DagsterInvalidConfigDefinitionError(\n original_root, the_dict, stack, "Map dict must be of length 1"\n )\n\n key = next(iter(the_dict.keys()))\n key_type = _convert_potential_type(original_root, key, stack)\n if not key_type or not key_type.kind == ConfigTypeKind.SCALAR:\n raise DagsterInvalidConfigDefinitionError(\n original_root,\n the_dict,\n stack,\n f"Map dict must have a scalar type as its only key. Got key {key!r}",\n )\n\n inner_type = _convert_potential_type(original_root, the_dict[key], stack)\n if not inner_type:\n raise DagsterInvalidConfigDefinitionError(\n original_root,\n the_dict,\n stack,\n "Map must have a single value and contain a valid type i.e. {{str: int}}. Got item {}"\n .format(repr(the_dict[key])),\n )\n\n return Map(key_type, inner_type)\n\n\ndef convert_potential_field(potential_field: object) -> "Field":\n return _convert_potential_field(potential_field, potential_field, [])\n\n\ndef _convert_potential_type(original_root: object, potential_type, stack: List[str]):\n from .field import resolve_to_config_type\n\n if isinstance(potential_type, Mapping):\n # A dictionary, containing a single key which is a type (int, str, etc) and not a string is interpreted as a Map\n if len(potential_type) == 1:\n key = next(iter(potential_type.keys()))\n if not isinstance(key, str) and _convert_potential_type(original_root, key, stack):\n return expand_map(original_root, potential_type, stack)\n\n # Otherwise, the dictionary is interpreted as a Shape\n return Shape(_expand_fields_dict(original_root, potential_type, stack))\n\n if isinstance(potential_type, list):\n return expand_list(original_root, potential_type, stack)\n\n return resolve_to_config_type(potential_type)\n\n\ndef _convert_potential_field(\n original_root: object, potential_field: object, stack: List[str]\n) -> "Field":\n from .field import Field\n\n if potential_field is None:\n raise DagsterInvalidConfigDefinitionError(\n original_root, potential_field, stack, reason="Fields cannot be None"\n )\n\n if not is_potential_field(potential_field):\n raise DagsterInvalidConfigDefinitionError(original_root, potential_field, stack)\n\n if isinstance(potential_field, Field):\n return potential_field\n\n return Field(_convert_potential_type(original_root, potential_field, stack))\n\n\ndef config_dictionary_from_values(\n values: Mapping[str, Any], config_field: "Field"\n) -> Dict[str, Any]:\n """Converts a set of config values into a dictionary representation,\n in particular converting EnvVar objects into Dagster config inputs\n and processing data structures such as dicts, lists, and structured Config classes.\n """\n assert ConfigTypeKind.is_shape(config_field.config_type.kind)\n\n from dagster._config.pythonic_config import _config_value_to_dict_representation\n\n return check.is_dict(_config_value_to_dict_representation(None, values))\n\n\ndef _create_direct_access_exception(cls: Type, env_var_name: str) -> Exception:\n return RuntimeError(\n f'Attempted to directly retrieve environment variable {cls.__name__}("{env_var_name}").'\n f" {cls.__name__} defers resolution of the environment variable value until run time, and"\n " should only be used as input to Dagster config or resources.\\n\\nTo access the"\n f" environment variable value, call `get_value` on the {cls.__name__}, or use os.getenv"\n " directly."\n )\n\n\nclass IntEnvVar(int):\n """Class used to represent an environment variable in the Dagster config system.\n\n The environment variable will be resolved to an int value when the config is\n loaded.\n """\n\n name: str\n\n @classmethod\n def create(cls, name: str) -> "IntEnvVar":\n var = IntEnvVar(0)\n var.name = name\n return var\n\n def __int__(self) -> int:\n """Raises an exception of the EnvVar value is directly accessed. Users should instead use\n the `get_value` method, or use the EnvVar as an input to Dagster config or resources.\n """\n raise _create_direct_access_exception(self.__class__, self.env_var_name)\n\n def __str__(self) -> str:\n return str(int(self))\n\n def get_value(self, default: Optional[int] = None) -> Optional[int]:\n """Returns the value of the environment variable, or the default value if the\n environment variable is not set. If no default is provided, None will be returned.\n """\n value = os.getenv(self.name, default=default)\n return int(value) if value else None\n\n @property\n def env_var_name(self) -> str:\n """Returns the name of the environment variable."""\n return self.name\n\n\nclass EnvVar(str):\n """Class used to represent an environment variable in the Dagster config system.\n\n This class is intended to be used to populate config fields or resources.\n The environment variable will be resolved to a string value when the config is\n loaded.\n\n To access the value of the environment variable, use the `get_value` method.\n """\n\n @classmethod\n def int(cls, name: str) -> "IntEnvVar":\n return IntEnvVar.create(name=name)\n\n def __str__(self) -> str:\n """Raises an exception of the EnvVar value is directly accessed. Users should instead use\n the `get_value` method, or use the EnvVar as an input to Dagster config or resources.\n """\n raise _create_direct_access_exception(self.__class__, self.env_var_name)\n\n @property\n def env_var_name(self) -> str:\n """Returns the name of the environment variable."""\n return super().__str__()\n\n def get_value(self, default: Optional[str] = None) -> Optional[str]:\n """Returns the value of the environment variable, or the default value if the\n environment variable is not set. If no default is provided, None will be returned.\n """\n return os.getenv(self.env_var_name, default=default)\n
", "current_page_name": "_modules/dagster/_config/field_utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.field_utils"}, "pythonic_config": {"config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.pythonic_config.config

\nimport re\nfrom enum import Enum\nfrom typing import (\n    Any,\n    Dict,\n    List,\n    Mapping,\n    Optional,\n    Set,\n    Type,\n    cast,\n)\n\nfrom pydantic import BaseModel\nfrom typing_extensions import TypeVar\n\nimport dagster._check as check\nfrom dagster import (\n    Field as DagsterField,\n    Shape,\n)\nfrom dagster._config.field_utils import (\n    EnvVar,\n    IntEnvVar,\n    Permissive,\n)\nfrom dagster._core.definitions.definition_config_schema import (\n    DefinitionConfigSchema,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidConfigDefinitionError,\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidPythonicConfigDefinitionError,\n)\nfrom dagster._utils.cached_method import CACHED_METHOD_FIELD_SUFFIX\n\nfrom .attach_other_object_to_context import (\n    IAttachDifferentObjectToOpContext as IAttachDifferentObjectToOpContext,\n)\nfrom .conversion_utils import _convert_pydantic_field, safe_is_subclass\nfrom .pydantic_compat_layer import (\n    USING_PYDANTIC_2,\n    ModelFieldCompat,\n    model_config,\n    model_fields,\n)\nfrom .typing_utils import BaseConfigMeta\n\ntry:\n    from functools import cached_property  # type: ignore  # (py37 compat)\nexcept ImportError:\n\n    class cached_property:\n        pass\n\n\nINTERNAL_MARKER = "__internal__"\n\n# ensure that this ends with the internal marker so we can do a single check\nassert CACHED_METHOD_FIELD_SUFFIX.endswith(INTERNAL_MARKER)\n\n\ndef _is_frozen_pydantic_error(e: Exception) -> bool:\n    """Parses an error to determine if it is a Pydantic error indicating that the instance\n    is immutable. We use this to attach a more helpful error message.\n    """\n    return "Instance is frozen" in str(  # Pydantic 2.x error\n        e\n    ) or "is immutable and does not support item assignment" in str(  # Pydantic 1.x error\n        e\n    )\n\n\nclass MakeConfigCacheable(BaseModel):\n    """This class centralizes and implements all the chicanery we need in order\n    to support caching decorators. If we decide this is a bad idea we can remove it\n    all in one go.\n    """\n\n    # Pydantic config for this class\n    # Cannot use kwargs for base class as this is not support for pydnatic<1.8\n    class Config:\n        # Various pydantic model config (https://docs.pydantic.dev/usage/model_config/)\n        # Necessary to allow for caching decorators\n        arbitrary_types_allowed = True\n        # Avoid pydantic reading a cached property class as part of the schema\n        if USING_PYDANTIC_2:\n            ignored_types = (cached_property,)\n        else:\n            keep_untouched = (cached_property,)\n        # Ensure the class is serializable, for caching purposes\n        frozen = True\n\n    def __setattr__(self, name: str, value: Any):\n        from .resource import ConfigurableResourceFactory\n\n        # This is a hack to allow us to set attributes on the class that are not part of the\n        # config schema. Pydantic will normally raise an error if you try to set an attribute\n        # that is not part of the schema.\n\n        if self._is_field_internal(name):\n            object.__setattr__(self, name, value)\n            return\n\n        try:\n            return super().__setattr__(name, value)\n        except (TypeError, ValueError) as e:\n            clsname = self.__class__.__name__\n            if _is_frozen_pydantic_error(e):\n                if isinstance(self, ConfigurableResourceFactory):\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic resource and does not support item assignment,"\n                        " as it inherits from 'pydantic.BaseModel' with frozen=True. If trying to"\n                        " maintain state on this resource, consider building a separate, stateful"\n                        " client class, and provide a method on the resource to construct and"\n                        " return the stateful client."\n                    ) from e\n                else:\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic config class and does not support item"\n                        " assignment, as it inherits from 'pydantic.BaseModel' with frozen=True."\n                    ) from e\n            elif "object has no field" in str(e):\n                field_name = check.not_none(\n                    re.search(r"object has no field \\"(.*)\\"", str(e))\n                ).group(1)\n                if isinstance(self, ConfigurableResourceFactory):\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic resource and does not support manipulating"\n                        f" undeclared attribute '{field_name}' as it inherits from"\n                        " 'pydantic.BaseModel' without extra=\\"allow\\". If trying to maintain"\n                        " state on this resource, consider building a separate, stateful client"\n                        " class, and provide a method on the resource to construct and return the"\n                        " stateful client."\n                    ) from e\n                else:\n                    raise DagsterInvalidInvocationError(\n                        f"'{clsname}' is a Pythonic config class and does not support manipulating"\n                        f" undeclared attribute '{field_name}' as it inherits from"\n                        " 'pydantic.BaseModel' without extra=\\"allow\\"."\n                    ) from e\n            else:\n                raise\n\n    def _is_field_internal(self, name: str) -> bool:\n        return name.endswith(INTERNAL_MARKER)\n\n\nT = TypeVar("T")\n\n\ndef ensure_env_vars_set_post_init(set_value: T, input_value: Any) -> T:\n    """Pydantic 2.x utility. Ensures that Pydantic field values are set to the appropriate\n    EnvVar or IntEnvVar objects post-model-instantiation, since Pydantic 2.x will cast\n    EnvVar or IntEnvVar values to raw strings or ints as part of the model instantiation process.\n    """\n    if isinstance(set_value, dict) and isinstance(input_value, dict):\n        for key, value in input_value.items():\n            if isinstance(value, (EnvVar, IntEnvVar)):\n                set_value[key] = value\n            elif isinstance(value, (dict, list)):\n                set_value[key] = ensure_env_vars_set_post_init(set_value[key], value)\n    if isinstance(set_value, List) and isinstance(input_value, List):\n        for i in range(len(set_value)):\n            value = input_value[i]\n            if isinstance(value, (EnvVar, IntEnvVar)):\n                set_value[i] = value\n            elif isinstance(value, (dict, list)):\n                set_value[i] = ensure_env_vars_set_post_init(set_value[i], value)\n\n    return set_value\n\n\n
[docs]class Config(MakeConfigCacheable, metaclass=BaseConfigMeta):\n """Base class for Dagster configuration models, used to specify config schema for\n ops and assets. Subclasses :py:class:`pydantic.BaseModel`.\n\n Example definition:\n\n .. code-block:: python\n\n from pydantic import Field\n\n class MyAssetConfig(Config):\n my_str: str = "my_default_string"\n my_int_list: List[int]\n my_bool_with_metadata: bool = Field(default=False, description="A bool field")\n\n\n Example usage:\n\n .. code-block:: python\n\n @asset\n def asset_with_config(config: MyAssetConfig):\n assert config.my_str == "my_default_string"\n assert config.my_int_list == [1, 2, 3]\n assert config.my_bool_with_metadata == False\n\n asset_with_config(MyAssetConfig(my_int_list=[1, 2, 3], my_bool_with_metadata=True))\n\n """\n\n def __init__(self, **config_dict) -> None:\n """This constructor is overridden to handle any remapping of raw config dicts to\n the appropriate config classes. For example, discriminated unions are represented\n in Dagster config as dicts with a single key, which is the discriminator value.\n """\n modified_data = {}\n for key, value in config_dict.items():\n field = model_fields(self).get(key)\n\n # This is useful in Pydantic 2.x when reconstructing a config object from a dict\n # e.g. when instantiating a resource at runtime from its config dict\n # In Pydantic 1.x, this is a no-op, since a non-required field without a\n # value provided will default to None (required & optional are the same in 1.x)\n if field and not field.is_required() and value is None:\n continue\n\n if field and field.discriminator:\n nested_dict = value\n\n discriminator_key = check.not_none(field.discriminator)\n if isinstance(value, Config):\n nested_dict = _discriminated_union_config_dict_to_selector_config_dict(\n discriminator_key,\n value._get_non_none_public_field_values(), # noqa: SLF001\n )\n\n nested_items = list(check.is_dict(nested_dict).items())\n check.invariant(\n len(nested_items) == 1,\n "Discriminated union must have exactly one key",\n )\n discriminated_value, nested_values = nested_items[0]\n\n modified_data[key] = {\n **nested_values,\n discriminator_key: discriminated_value,\n }\n else:\n modified_data[key] = value\n\n for key, field in model_fields(self).items():\n if field.is_required() and key not in modified_data:\n modified_data[key] = None\n\n super().__init__(**modified_data)\n if USING_PYDANTIC_2:\n self.__dict__ = ensure_env_vars_set_post_init(self.__dict__, modified_data)\n\n def _convert_to_config_dictionary(self) -> Mapping[str, Any]:\n """Converts this Config object to a Dagster config dictionary, in the same format as the dictionary\n accepted as run config or as YAML in the launchpad.\n\n Inner fields are recursively converted to dictionaries, meaning nested config objects\n or EnvVars will be converted to the appropriate dictionary representation.\n """\n public_fields = self._get_non_none_public_field_values()\n return {\n k: _config_value_to_dict_representation(model_fields(self).get(k), v)\n for k, v in public_fields.items()\n }\n\n def _get_non_none_public_field_values(self) -> Mapping[str, Any]:\n """Returns a dictionary representation of this config object,\n ignoring any private fields, and any optional fields that are None.\n\n Inner fields are returned as-is in the dictionary,\n meaning any nested config objects will be returned as config objects, not dictionaries.\n """\n output = {}\n for key, value in self.__dict__.items():\n if self._is_field_internal(key):\n continue\n field = model_fields(self).get(key)\n\n if field:\n resolved_field_name = field.alias or key\n output[resolved_field_name] = value\n else:\n output[key] = value\n return output\n\n @classmethod\n def to_config_schema(cls) -> DefinitionConfigSchema:\n """Converts the config structure represented by this class into a DefinitionConfigSchema."""\n return DefinitionConfigSchema(infer_schema_from_config_class(cls))\n\n @classmethod\n def to_fields_dict(cls) -> Dict[str, DagsterField]:\n """Converts the config structure represented by this class into a dictionary of dagster.Fields.\n This is useful when interacting with legacy code that expects a dictionary of fields but you\n want the source of truth to be a config class.\n """\n return cast(Shape, cls.to_config_schema().as_field().config_type).fields
\n\n\ndef _discriminated_union_config_dict_to_selector_config_dict(\n discriminator_key: str, config_dict: Mapping[str, Any]\n):\n """Remaps a config dictionary which is a member of a discriminated union to\n the appropriate structure for a Dagster config selector.\n\n A discriminated union with key "my_key" and value "my_value" will be represented\n as {"my_key": "my_value", "my_field": "my_field_value"}. When converted to a selector,\n this should be represented as {"my_value": {"my_field": "my_field_value"}}.\n """\n updated_dict = dict(config_dict)\n discriminator_value = updated_dict.pop(discriminator_key)\n wrapped_dict = {discriminator_value: updated_dict}\n return wrapped_dict\n\n\ndef _config_value_to_dict_representation(field: Optional[ModelFieldCompat], value: Any):\n """Converts a config value to a dictionary representation. If a field is provided, it will be used\n to determine the appropriate dictionary representation in the case of discriminated unions.\n """\n from dagster._config.field_utils import EnvVar, IntEnvVar\n\n if isinstance(value, dict):\n return {k: _config_value_to_dict_representation(None, v) for k, v in value.items()}\n elif isinstance(value, list):\n return [_config_value_to_dict_representation(None, v) for v in value]\n elif isinstance(value, EnvVar):\n return {"env": value.env_var_name}\n elif isinstance(value, IntEnvVar):\n return {"env": value.name}\n if isinstance(value, Config):\n if field and field.discriminator:\n return {\n k: v\n for k, v in _discriminated_union_config_dict_to_selector_config_dict(\n field.discriminator,\n value._convert_to_config_dictionary(), # noqa: SLF001\n ).items()\n }\n else:\n return {k: v for k, v in value._convert_to_config_dictionary().items()} # noqa: SLF001\n elif isinstance(value, Enum):\n return value.name\n\n return value\n\n\n
[docs]class PermissiveConfig(Config):\n """Subclass of :py:class:`Config` that allows arbitrary extra fields. This is useful for\n config classes which may have open-ended inputs.\n\n Example definition:\n\n .. code-block:: python\n\n class MyPermissiveOpConfig(PermissiveConfig):\n my_explicit_parameter: bool\n my_other_explicit_parameter: str\n\n\n Example usage:\n\n .. code-block:: python\n\n @op\n def op_with_config(config: MyPermissiveOpConfig):\n assert config.my_explicit_parameter == True\n assert config.my_other_explicit_parameter == "foo"\n assert config.dict().get("my_implicit_parameter") == "bar"\n\n op_with_config(\n MyPermissiveOpConfig(\n my_explicit_parameter=True,\n my_other_explicit_parameter="foo",\n my_implicit_parameter="bar"\n )\n )\n\n """\n\n # Pydantic config for this class\n # Cannot use kwargs for base class as this is not support for pydantic<1.8\n class Config:\n extra = "allow"
\n\n\ndef infer_schema_from_config_class(\n model_cls: Type["Config"],\n description: Optional[str] = None,\n fields_to_omit: Optional[Set[str]] = None,\n) -> DagsterField:\n from .config import Config\n from .resource import ConfigurableResourceFactory, _is_annotated_as_resource_type\n\n """Parses a structured config class and returns a corresponding Dagster config Field."""\n fields_to_omit = fields_to_omit or set()\n\n check.param_invariant(\n safe_is_subclass(model_cls, Config),\n "Config type annotation must inherit from dagster.Config",\n )\n\n fields: Dict[str, DagsterField] = {}\n for key, pydantic_field_info in model_fields(model_cls).items():\n if _is_annotated_as_resource_type(\n pydantic_field_info.annotation, pydantic_field_info.metadata\n ):\n continue\n\n resolved_field_name = pydantic_field_info.alias if pydantic_field_info.alias else key\n if key not in fields_to_omit:\n if isinstance(pydantic_field_info.default, DagsterField):\n raise DagsterInvalidDefinitionError(\n "Using 'dagster.Field' is not supported within a Pythonic config or resource"\n " definition. 'dagster.Field' should only be used in legacy Dagster config"\n " schemas. Did you mean to use 'pydantic.Field' instead?"\n )\n\n try:\n fields[resolved_field_name] = _convert_pydantic_field(pydantic_field_info)\n except DagsterInvalidConfigDefinitionError as e:\n raise DagsterInvalidPythonicConfigDefinitionError(\n config_class=model_cls,\n field_name=key,\n invalid_type=e.current_value,\n is_resource=model_cls is not None\n and safe_is_subclass(model_cls, ConfigurableResourceFactory),\n )\n\n shape_cls = Permissive if model_config(model_cls).get("extra") == "allow" else Shape\n\n docstring = model_cls.__doc__.strip() if model_cls.__doc__ else None\n\n return DagsterField(config=shape_cls(fields), description=description or docstring)\n
", "current_page_name": "_modules/dagster/_config/pythonic_config/config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.pythonic_config.config"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.pythonic_config.io_manager

\nfrom abc import abstractmethod\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    Mapping,\n    Optional,\n    Type,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeVar\n\nfrom dagster._core.definitions.definition_config_schema import (\n    CoercableToConfigSchema,\n)\nfrom dagster._core.definitions.resource_definition import (\n    ResourceDefinition,\n    ResourceFunction,\n)\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.storage.io_manager import IOManager, IOManagerDefinition\nfrom dagster._utils.cached_method import cached_method\n\nfrom .attach_other_object_to_context import (\n    IAttachDifferentObjectToOpContext as IAttachDifferentObjectToOpContext,\n)\nfrom .config import Config\nfrom .conversion_utils import TResValue\nfrom .resource import (\n    AllowDelayedDependencies,\n    ConfigurableResourceFactory,\n    PartialResource,\n    ResourceId,\n    ResourceWithKeyMapping,\n    Self,\n)\nfrom .type_check_utils import safe_is_subclass\n\ntry:\n    from functools import cached_property  # type: ignore  # (py37 compat)\nexcept ImportError:\n\n    class cached_property:\n        pass\n\n\nTIOManagerValue = TypeVar("TIOManagerValue", bound=IOManager)\n\n\nclass ConfigurableIOManagerFactoryResourceDefinition(IOManagerDefinition, AllowDelayedDependencies):\n    def __init__(\n        self,\n        configurable_resource_cls: Type,\n        resource_fn: ResourceFunction,\n        config_schema: Any,\n        description: Optional[str],\n        resolve_resource_keys: Callable[[Mapping[int, str]], AbstractSet[str]],\n        nested_resources: Mapping[str, Any],\n        input_config_schema: Optional[Union[CoercableToConfigSchema, Type[Config]]] = None,\n        output_config_schema: Optional[Union[CoercableToConfigSchema, Type[Config]]] = None,\n        dagster_maintained: bool = False,\n    ):\n        input_config_schema_resolved: CoercableToConfigSchema = (\n            cast(Type[Config], input_config_schema).to_config_schema()\n            if safe_is_subclass(input_config_schema, Config)\n            else cast(CoercableToConfigSchema, input_config_schema)\n        )\n        output_config_schema_resolved: CoercableToConfigSchema = (\n            cast(Type[Config], output_config_schema).to_config_schema()\n            if safe_is_subclass(output_config_schema, Config)\n            else cast(CoercableToConfigSchema, output_config_schema)\n        )\n        super().__init__(\n            resource_fn=resource_fn,\n            config_schema=config_schema,\n            description=description,\n            input_config_schema=input_config_schema_resolved,\n            output_config_schema=output_config_schema_resolved,\n        )\n        self._resolve_resource_keys = resolve_resource_keys\n        self._nested_resources = nested_resources\n        self._configurable_resource_cls = configurable_resource_cls\n        self._dagster_maintained = dagster_maintained\n\n    @property\n    def configurable_resource_cls(self) -> Type:\n        return self._configurable_resource_cls\n\n    @property\n    def nested_resources(\n        self,\n    ) -> Mapping[str, Any]:\n        return self._nested_resources\n\n    def _resolve_required_resource_keys(\n        self, resource_mapping: Mapping[int, str]\n    ) -> AbstractSet[str]:\n        return self._resolve_resource_keys(resource_mapping)\n\n\nclass IOManagerWithKeyMapping(ResourceWithKeyMapping, IOManagerDefinition):\n    """Version of ResourceWithKeyMapping wrapper that also implements IOManagerDefinition."""\n\n    def __init__(\n        self,\n        resource: ResourceDefinition,\n        resource_id_to_key_mapping: Dict[ResourceId, str],\n    ):\n        ResourceWithKeyMapping.__init__(self, resource, resource_id_to_key_mapping)\n        IOManagerDefinition.__init__(\n            self, resource_fn=self.resource_fn, config_schema=resource.config_schema\n        )\n\n\n
[docs]class ConfigurableIOManagerFactory(ConfigurableResourceFactory[TIOManagerValue]):\n """Base class for Dagster IO managers that utilize structured config. This base class\n is useful for cases in which the returned IO manager is not the same as the class itself\n (e.g. when it is a wrapper around the actual IO manager implementation).\n\n This class is a subclass of both :py:class:`IOManagerDefinition` and :py:class:`Config`.\n Implementers should provide an implementation of the :py:meth:`resource_function` method,\n which should return an instance of :py:class:`IOManager`.\n\n\n Example definition:\n\n .. code-block:: python\n\n class ExternalIOManager(IOManager):\n\n def __init__(self, connection):\n self._connection = connection\n\n def handle_output(self, context, obj):\n ...\n\n def load_input(self, context):\n ...\n\n class ConfigurableExternalIOManager(ConfigurableIOManagerFactory):\n username: str\n password: str\n\n def create_io_manager(self, context) -> IOManager:\n with database.connect(username, password) as connection:\n return MyExternalIOManager(connection)\n\n defs = Definitions(\n ...,\n resources={\n "io_manager": ConfigurableExternalIOManager(\n username="dagster",\n password=EnvVar("DB_PASSWORD")\n )\n }\n )\n\n """\n\n def __init__(self, **data: Any):\n ConfigurableResourceFactory.__init__(self, **data)\n\n @abstractmethod\n def create_io_manager(self, context) -> TIOManagerValue:\n """Implement as one would implement a @io_manager decorator function."""\n raise NotImplementedError()\n\n def create_resource(self, context: InitResourceContext) -> TIOManagerValue:\n return self.create_io_manager(context)\n\n @classmethod\n def configure_at_launch(cls: "Type[Self]", **kwargs) -> "PartialIOManager[Self]":\n """Returns a partially initialized copy of the IO manager, with remaining config fields\n set at runtime.\n """\n return PartialIOManager(cls, data=kwargs)\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableIOManagerFactoryResourceDefinition:\n return ConfigurableIOManagerFactoryResourceDefinition(\n self.__class__,\n resource_fn=self._get_initialize_and_run_fn(),\n config_schema=self._config_schema,\n description=self.__doc__,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n input_config_schema=self.__class__.input_config_schema(),\n output_config_schema=self.__class__.output_config_schema(),\n dagster_maintained=self._is_dagster_maintained(),\n )\n\n @classmethod\n def input_config_schema(\n cls,\n ) -> Optional[Union[CoercableToConfigSchema, Type[Config]]]:\n return None\n\n @classmethod\n def output_config_schema(\n cls,\n ) -> Optional[Union[CoercableToConfigSchema, Type[Config]]]:\n return None
\n\n\nclass PartialIOManager(Generic[TResValue], PartialResource[TResValue]):\n def __init__(\n self,\n resource_cls: Type[ConfigurableResourceFactory[TResValue]],\n data: Dict[str, Any],\n ):\n PartialResource.__init__(self, resource_cls, data)\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableIOManagerFactoryResourceDefinition:\n input_config_schema = None\n output_config_schema = None\n if safe_is_subclass(self.resource_cls, ConfigurableIOManagerFactory):\n factory_cls: Type[ConfigurableIOManagerFactory] = cast(\n Type[ConfigurableIOManagerFactory], self.resource_cls\n )\n input_config_schema = factory_cls.input_config_schema()\n output_config_schema = factory_cls.output_config_schema()\n\n return ConfigurableIOManagerFactoryResourceDefinition(\n self.resource_cls,\n resource_fn=self._state__internal__.resource_fn,\n config_schema=self._state__internal__.config_schema,\n description=self._state__internal__.description,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self._state__internal__.nested_resources,\n input_config_schema=input_config_schema,\n output_config_schema=output_config_schema,\n dagster_maintained=self.resource_cls._is_dagster_maintained(), # noqa: SLF001\n )\n\n\n
[docs]class ConfigurableIOManager(ConfigurableIOManagerFactory, IOManager):\n """Base class for Dagster IO managers that utilize structured config.\n\n This class is a subclass of both :py:class:`IOManagerDefinition`, :py:class:`Config`,\n and :py:class:`IOManager`. Implementers must provide an implementation of the\n :py:meth:`handle_output` and :py:meth:`load_input` methods.\n\n Example definition:\n\n .. code-block:: python\n\n class MyIOManager(ConfigurableIOManager):\n path_prefix: List[str]\n\n def _get_path(self, context) -> str:\n return "/".join(context.asset_key.path)\n\n def handle_output(self, context, obj):\n write_csv(self._get_path(context), obj)\n\n def load_input(self, context):\n return read_csv(self._get_path(context))\n\n defs = Definitions(\n ...,\n resources={\n "io_manager": MyIOManager(path_prefix=["my", "prefix"])\n }\n )\n\n """\n\n def create_io_manager(self, context) -> IOManager:\n return self
\n\n\nclass ConfigurableLegacyIOManagerAdapter(ConfigurableIOManagerFactory):\n """Adapter base class for wrapping a decorated, function-style I/O manager\n with structured config.\n\n To use this class, subclass it, define config schema fields using Pydantic,\n and implement the ``wrapped_io_manager`` method.\n\n Example:\n .. code-block:: python\n\n class OldIOManager(IOManager):\n def __init__(self, base_path: str):\n ...\n\n @io_manager(config_schema={"base_path": str})\n def old_io_manager(context):\n base_path = context.resource_config["base_path"]\n\n return OldIOManager(base_path)\n\n class MyIOManager(ConfigurableLegacyIOManagerAdapter):\n base_path: str\n\n @property\n def wrapped_io_manager(self) -> IOManagerDefinition:\n return old_io_manager\n """\n\n @property\n @abstractmethod\n def wrapped_io_manager(self) -> IOManagerDefinition:\n raise NotImplementedError()\n\n def create_io_manager(self, context) -> IOManager:\n raise NotImplementedError(\n "Because we override resource_fn in the adapter, this is never called."\n )\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableIOManagerFactoryResourceDefinition:\n return ConfigurableIOManagerFactoryResourceDefinition(\n self.__class__,\n resource_fn=self.wrapped_io_manager.resource_fn,\n config_schema=self._config_schema,\n description=self.__doc__,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n dagster_maintained=self._is_dagster_maintained(),\n )\n
", "current_page_name": "_modules/dagster/_config/pythonic_config/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.pythonic_config.io_manager"}, "resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.pythonic_config.resource

\nimport contextlib\nimport inspect\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Generator,\n    Generic,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Set,\n    Type,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias, TypeGuard, get_args, get_origin\n\nfrom dagster import (\n    Field as DagsterField,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._config.field_utils import config_dictionary_from_values\nfrom dagster._config.pythonic_config.typing_utils import (\n    TypecheckAllowPartialResourceInitParams,\n)\nfrom dagster._config.validate import validate_config\nfrom dagster._core.definitions.definition_config_schema import (\n    ConfiguredDefinitionConfigSchema,\n    DefinitionConfigSchema,\n)\nfrom dagster._core.errors import DagsterInvalidConfigError\nfrom dagster._core.execution.context.init import InitResourceContext, build_init_resource_context\nfrom dagster._utils.cached_method import cached_method\n\nfrom .attach_other_object_to_context import (\n    IAttachDifferentObjectToOpContext as IAttachDifferentObjectToOpContext,\n)\nfrom .pydantic_compat_layer import (\n    model_fields,\n)\n\ntry:\n    from functools import cached_property  # type: ignore  # (py37 compat)\nexcept ImportError:\n\n    class cached_property:\n        pass\n\n\nfrom abc import ABC, abstractmethod\n\nfrom pydantic import BaseModel\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.resource_definition import (\n    ResourceDefinition,\n    ResourceFunction,\n    ResourceFunctionWithContext,\n    ResourceFunctionWithoutContext,\n    has_at_least_one_parameter,\n)\nfrom dagster._core.storage.io_manager import IOManagerDefinition\n\nfrom .config import Config, MakeConfigCacheable, infer_schema_from_config_class\nfrom .conversion_utils import (\n    TResValue,\n    _curry_config_schema,\n)\nfrom .typing_utils import BaseResourceMeta, LateBoundTypesForResourceTypeChecking\n\nSelf = TypeVar("Self", bound="ConfigurableResourceFactory")\nResourceId: TypeAlias = int\n\n\nclass AllowDelayedDependencies:\n    _nested_partial_resources: Mapping[str, ResourceDefinition] = {}\n\n    def _resolve_required_resource_keys(\n        self, resource_mapping: Mapping[int, str]\n    ) -> AbstractSet[str]:\n        from dagster._core.execution.build_resources import wrap_resource_for_execution\n\n        # All dependent resources which are not fully configured\n        # must be specified to the Definitions object so that the\n        # resource can be configured at runtime by the user\n        nested_partial_resource_keys = {\n            attr_name: resource_mapping.get(id(resource_def))\n            for attr_name, resource_def in self._nested_partial_resources.items()\n        }\n        check.invariant(\n            all(pointer_key is not None for pointer_key in nested_partial_resource_keys.values()),\n            "Any partially configured, nested resources must be provided to Definitions"\n            f" object: {nested_partial_resource_keys}",\n        )\n\n        # Recursively get all nested resource keys\n        nested_resource_required_keys: Set[str] = set()\n        for v in self._nested_partial_resources.values():\n            nested_resource_required_keys.update(\n                _resolve_required_resource_keys_for_resource(v, resource_mapping)\n            )\n\n        resources, _ = separate_resource_params(\n            cast(Type[BaseModel], self.__class__), self.__dict__\n        )\n        for v in resources.values():\n            nested_resource_required_keys.update(\n                _resolve_required_resource_keys_for_resource(\n                    wrap_resource_for_execution(v), resource_mapping\n                )\n            )\n\n        out = set(cast(Set[str], nested_partial_resource_keys.values())).union(\n            nested_resource_required_keys\n        )\n        return out\n\n\nclass InitResourceContextWithKeyMapping(InitResourceContext):\n    """Passes along a mapping from ResourceDefinition id to resource key alongside the\n    InitResourceContext. This is used to resolve the required resource keys for\n    resources which may hold nested partial resources.\n    """\n\n    def __init__(\n        self,\n        context: InitResourceContext,\n        resource_id_to_key_mapping: Mapping[ResourceId, str],\n    ):\n        super().__init__(\n            resource_config=context.resource_config,\n            resources=context.resources,\n            instance=context.instance,\n            resource_def=context.resource_def,\n            dagster_run=context.dagster_run,\n            log_manager=context.log,\n        )\n        self._resource_id_to_key_mapping = resource_id_to_key_mapping\n        self._resources_by_id = {\n            resource_id: getattr(context.resources, resource_key, None)\n            for resource_id, resource_key in resource_id_to_key_mapping.items()\n        }\n\n    @property\n    def resources_by_id(self) -> Mapping[ResourceId, Any]:\n        return self._resources_by_id\n\n    def replace_config(self, config: Any) -> "InitResourceContext":\n        return InitResourceContextWithKeyMapping(\n            super().replace_config(config), self._resource_id_to_key_mapping\n        )\n\n\nclass ResourceWithKeyMapping(ResourceDefinition):\n    """Wrapper around a ResourceDefinition which helps the inner resource resolve its required\n    resource keys. This is useful for resources which may hold nested resources. At construction\n    time, they are unaware of the resource keys of their nested resources - the resource id to\n    key mapping is used to resolve this.\n    """\n\n    def __init__(\n        self,\n        resource: ResourceDefinition,\n        resource_id_to_key_mapping: Dict[ResourceId, str],\n    ):\n        self._resource = resource\n        self._resource_id_to_key_mapping = resource_id_to_key_mapping\n\n        ResourceDefinition.__init__(\n            self,\n            resource_fn=self.setup_context_resources_and_call,\n            config_schema=resource.config_schema,\n            description=resource.description,\n            version=resource.version,\n        )\n\n    def setup_context_resources_and_call(self, context: InitResourceContext):\n        """Wrapper around the wrapped resource's resource_fn which attaches its\n        resource id to key mapping to the context, and then calls the nested resource's resource_fn.\n        """\n        context_with_key_mapping = InitResourceContextWithKeyMapping(\n            context, self._resource_id_to_key_mapping\n        )\n\n        if has_at_least_one_parameter(self._resource.resource_fn):\n            return self._resource.resource_fn(context_with_key_mapping)\n        else:\n            return cast(ResourceFunctionWithoutContext, self._resource.resource_fn)()\n\n    @property\n    def required_resource_keys(self) -> AbstractSet[str]:\n        return _resolve_required_resource_keys_for_resource(\n            self._resource, self._resource_id_to_key_mapping\n        )\n\n    @property\n    def wrapped_resource(self) -> ResourceDefinition:\n        return self._resource\n\n    @property\n    def inner_resource(self):\n        return self._resource\n\n\ndef attach_resource_id_to_key_mapping(\n    resource_def: Any, resource_id_to_key_mapping: Dict[ResourceId, str]\n) -> Any:\n    from .io_manager import IOManagerWithKeyMapping\n\n    if isinstance(resource_def, (ConfigurableResourceFactory, PartialResource)):\n        defn = resource_def.get_resource_definition()\n        return (\n            IOManagerWithKeyMapping(defn, resource_id_to_key_mapping)\n            if isinstance(defn, IOManagerDefinition)\n            else ResourceWithKeyMapping(defn, resource_id_to_key_mapping)\n        )\n    return resource_def\n\n\nCoercibleToResource: TypeAlias = Union[\n    ResourceDefinition, "ConfigurableResourceFactory", "PartialResource"\n]\n\n\ndef is_coercible_to_resource(val: Any) -> TypeGuard[CoercibleToResource]:\n    return isinstance(val, (ResourceDefinition, ConfigurableResourceFactory, PartialResource))\n\n\nclass ConfigurableResourceFactoryResourceDefinition(ResourceDefinition, AllowDelayedDependencies):\n    def __init__(\n        self,\n        configurable_resource_cls: Type,\n        resource_fn: ResourceFunction,\n        config_schema: Any,\n        description: Optional[str],\n        resolve_resource_keys: Callable[[Mapping[int, str]], AbstractSet[str]],\n        nested_resources: Mapping[str, Any],\n        dagster_maintained: bool = False,\n    ):\n        super().__init__(\n            resource_fn=resource_fn,\n            config_schema=config_schema,\n            description=description,\n        )\n        self._configurable_resource_cls = configurable_resource_cls\n        self._resolve_resource_keys = resolve_resource_keys\n        self._nested_resources = nested_resources\n        self._dagster_maintained = dagster_maintained\n\n    @property\n    def configurable_resource_cls(self) -> Type:\n        return self._configurable_resource_cls\n\n    @property\n    def nested_resources(\n        self,\n    ) -> Mapping[str, Any]:\n        return self._nested_resources\n\n    def _resolve_required_resource_keys(\n        self, resource_mapping: Mapping[int, str]\n    ) -> AbstractSet[str]:\n        return self._resolve_resource_keys(resource_mapping)\n\n    def _is_dagster_maintained(self) -> bool:\n        return self._dagster_maintained\n\n\nclass ConfigurableResourceFactoryState(NamedTuple):\n    nested_partial_resources: Mapping[str, Any]\n    resolved_config_dict: Dict[str, Any]\n    config_schema: DefinitionConfigSchema\n    schema: DagsterField\n    nested_resources: Dict[str, Any]\n    resource_context: Optional[InitResourceContext]\n\n\nclass ConfigurableResourceFactory(\n    Generic[TResValue],\n    Config,\n    TypecheckAllowPartialResourceInitParams,\n    AllowDelayedDependencies,\n    ABC,\n    metaclass=BaseResourceMeta,\n):\n    """Base class for creating and managing the lifecycle of Dagster resources that utilize structured config.\n\n    Users should directly inherit from this class when they want the object passed to user-defined\n    code (such as an asset or op) to be different than the object that defines the configuration\n    schema and is passed to the :py:class:`Definitions` object. Cases where this is useful include is\n    when the object passed to user code is:\n\n    * An existing class from a third-party library that the user does not control.\n    * A complex class that requires substantial internal state management or itself requires arguments beyond its config values.\n    * A class with expensive initialization that should not be invoked on code location load, but rather lazily on first use in an op or asset during a run.\n    * A class that you desire to be a plain Python class, rather than a Pydantic class, for whatever reason.\n\n    This class is a subclass of both :py:class:`ResourceDefinition` and :py:class:`Config`, and\n    must implement ``create_resource``, which creates the resource to pass to user code.\n\n    Example definition:\n\n    .. code-block:: python\n\n        class DatabaseResource(ConfigurableResourceFactory[Database]):\n            connection_uri: str\n\n            def create_resource(self, _init_context) -> Database:\n                # For example Database could be from a third-party library or require expensive setup.\n                # Or you could just prefer to separate the concerns of configuration and runtime representation\n                return Database(self.connection_uri)\n\n    To use a resource created by a factory in a job, you must use the Resource type annotation.\n\n    Example usage:\n\n\n    .. code-block:: python\n\n        @asset\n        def asset_that_uses_database(database: ResourceParam[Database]):\n            # Database used directly in user code\n            database.query("SELECT * FROM table")\n\n        defs = Definitions(\n            assets=[asset_that_uses_database],\n            resources={"database": DatabaseResource(connection_uri="some_uri")},\n        )\n\n    """\n\n    def __init__(self, **data: Any):\n        resource_pointers, data_without_resources = separate_resource_params(self.__class__, data)\n\n        schema = infer_schema_from_config_class(\n            self.__class__, fields_to_omit=set(resource_pointers.keys())\n        )\n\n        # Populate config values\n        Config.__init__(self, **{**data_without_resources, **resource_pointers})\n\n        # We pull the values from the Pydantic config object, which may cast values\n        # to the correct type under the hood - useful in particular for enums\n        casted_data_without_resources = {\n            k: v\n            for k, v in self._convert_to_config_dictionary().items()\n            if k in data_without_resources\n        }\n        resolved_config_dict = config_dictionary_from_values(casted_data_without_resources, schema)\n\n        self._state__internal__ = ConfigurableResourceFactoryState(\n            # We keep track of any resources we depend on which are not fully configured\n            # so that we can retrieve them at runtime\n            nested_partial_resources={\n                k: v for k, v in resource_pointers.items() if (not _is_fully_configured(v))\n            },\n            resolved_config_dict=resolved_config_dict,\n            # These are unfortunately named very similarily\n            config_schema=_curry_config_schema(schema, resolved_config_dict),\n            schema=schema,\n            nested_resources={k: v for k, v in resource_pointers.items()},\n            resource_context=None,\n        )\n\n    @property\n    def _schema(self):\n        return self._state__internal__.schema\n\n    @property\n    def _config_schema(self):\n        return self._state__internal__.config_schema\n\n    @property\n    def _nested_partial_resources(self):\n        return self._state__internal__.nested_partial_resources\n\n    @property\n    def _nested_resources(self):\n        return self._state__internal__.nested_resources\n\n    @property\n    def _resolved_config_dict(self):\n        return self._state__internal__.resolved_config_dict\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        """This should be overridden to return True by all dagster maintained resources and IO managers."""\n        return False\n\n    @classmethod\n    def _is_cm_resource_cls(cls: Type["ConfigurableResourceFactory"]) -> bool:\n        return (\n            cls.yield_for_execution != ConfigurableResourceFactory.yield_for_execution\n            or cls.teardown_after_execution != ConfigurableResourceFactory.teardown_after_execution\n        )\n\n    @property\n    def _is_cm_resource(self) -> bool:\n        return self.__class__._is_cm_resource_cls()  # noqa: SLF001\n\n    def _get_initialize_and_run_fn(self) -> Callable:\n        return self._initialize_and_run_cm if self._is_cm_resource else self._initialize_and_run\n\n    @cached_method\n    def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition:\n        return ConfigurableResourceFactoryResourceDefinition(\n            self.__class__,\n            resource_fn=self._get_initialize_and_run_fn(),\n            config_schema=self._config_schema,\n            description=self.__doc__,\n            resolve_resource_keys=self._resolve_required_resource_keys,\n            nested_resources=self.nested_resources,\n            dagster_maintained=self._is_dagster_maintained(),\n        )\n\n    @abstractmethod\n    def create_resource(self, context: InitResourceContext) -> TResValue:\n        """Returns the object that this resource hands to user code, accessible by ops or assets\n        through the context or resource parameters. This works like the function decorated\n        with @resource when using function-based resources.\n        """\n        raise NotImplementedError()\n\n    @property\n    def nested_resources(\n        self,\n    ) -> Mapping[str, Any]:\n        return self._nested_resources\n\n    @classmethod\n    def configure_at_launch(cls: "Type[Self]", **kwargs) -> "PartialResource[Self]":\n        """Returns a partially initialized copy of the resource, with remaining config fields\n        set at runtime.\n        """\n        return PartialResource(cls, data=kwargs)\n\n    def _with_updated_values(\n        self, values: Optional[Mapping[str, Any]]\n    ) -> "ConfigurableResourceFactory[TResValue]":\n        """Returns a new instance of the resource with the given values.\n        Used when initializing a resource at runtime.\n        """\n        values = check.opt_mapping_param(values, "values", key_type=str)\n        # Since Resource extends BaseModel and is a dataclass, we know that the\n        # signature of any __init__ method will always consist of the fields\n        # of this class. We can therefore safely pass in the values as kwargs.\n        out = self.__class__(**{**self._get_non_none_public_field_values(), **values})\n        out._state__internal__ = out._state__internal__._replace(  # noqa: SLF001\n            resource_context=self._state__internal__.resource_context\n        )\n        return out\n\n    @contextlib.contextmanager\n    def _resolve_and_update_nested_resources(\n        self, context: InitResourceContext\n    ) -> Generator["ConfigurableResourceFactory[TResValue]", None, None]:\n        """Updates any nested resources with the resource values from the context.\n        In this case, populating partially configured resources or\n        resources that return plain Python types.\n\n        Returns a new instance of the resource.\n        """\n        from dagster._core.execution.build_resources import wrap_resource_for_execution\n\n        partial_resources_to_update: Dict[str, Any] = {}\n        if self._nested_partial_resources:\n            context_with_mapping = cast(\n                InitResourceContextWithKeyMapping,\n                check.inst(\n                    context,\n                    InitResourceContextWithKeyMapping,\n                    "This ConfiguredResource contains unresolved partially-specified nested"\n                    " resources, and so can only be initialized using a"\n                    " InitResourceContextWithKeyMapping",\n                ),\n            )\n            partial_resources_to_update = {\n                attr_name: context_with_mapping.resources_by_id[id(resource)]\n                for attr_name, resource in self._nested_partial_resources.items()\n            }\n\n        # Also evaluate any resources that are not partial\n        with contextlib.ExitStack() as stack:\n            resources_to_update, _ = separate_resource_params(self.__class__, self.__dict__)\n            resources_to_update = {\n                attr_name: _call_resource_fn_with_default(\n                    stack, wrap_resource_for_execution(resource), context\n                )\n                for attr_name, resource in resources_to_update.items()\n                if attr_name not in partial_resources_to_update\n            }\n\n            to_update = {**resources_to_update, **partial_resources_to_update}\n            yield self._with_updated_values(to_update)\n\n    @deprecated(\n        breaking_version="2.0", additional_warn_text="Use `with_replaced_resource_context` instead"\n    )\n    def with_resource_context(\n        self, resource_context: InitResourceContext\n    ) -> "ConfigurableResourceFactory[TResValue]":\n        return self.with_replaced_resource_context(resource_context)\n\n    def with_replaced_resource_context(\n        self, resource_context: InitResourceContext\n    ) -> "ConfigurableResourceFactory[TResValue]":\n        """Returns a new instance of the resource with the given resource init context bound."""\n        # This utility is used to create a copy of this resource, without adjusting\n        # any values in this case\n        copy = self._with_updated_values({})\n        copy._state__internal__ = copy._state__internal__._replace(  # noqa: SLF001\n            resource_context=resource_context\n        )\n        return copy\n\n    def _initialize_and_run(self, context: InitResourceContext) -> TResValue:\n        with self._resolve_and_update_nested_resources(context) as has_nested_resource:\n            updated_resource = has_nested_resource.with_replaced_resource_context(  # noqa: SLF001\n                context\n            )._with_updated_values(context.resource_config)\n\n            updated_resource.setup_for_execution(context)\n            return updated_resource.create_resource(context)\n\n    @contextlib.contextmanager\n    def _initialize_and_run_cm(\n        self, context: InitResourceContext\n    ) -> Generator[TResValue, None, None]:\n        with self._resolve_and_update_nested_resources(context) as has_nested_resource:\n            updated_resource = has_nested_resource.with_replaced_resource_context(  # noqa: SLF001\n                context\n            )._with_updated_values(context.resource_config)\n\n            with updated_resource.yield_for_execution(context) as value:\n                yield value\n\n    def setup_for_execution(self, context: InitResourceContext) -> None:\n        """Optionally override this method to perform any pre-execution steps\n        needed before the resource is used in execution.\n        """\n        pass\n\n    def teardown_after_execution(self, context: InitResourceContext) -> None:\n        """Optionally override this method to perform any post-execution steps\n        needed after the resource is used in execution.\n\n        teardown_after_execution will be called even if any part of the run fails.\n        It will not be called if setup_for_execution fails.\n        """\n        pass\n\n    @contextlib.contextmanager\n    def yield_for_execution(self, context: InitResourceContext) -> Generator[TResValue, None, None]:\n        """Optionally override this method to perform any lifecycle steps\n        before or after the resource is used in execution. By default, calls\n        setup_for_execution before yielding, and teardown_after_execution after yielding.\n\n        Note that if you override this method and want setup_for_execution or\n        teardown_after_execution to be called, you must invoke them yourself.\n        """\n        self.setup_for_execution(context)\n        try:\n            yield self.create_resource(context)\n        finally:\n            self.teardown_after_execution(context)\n\n    def get_resource_context(self) -> InitResourceContext:\n        """Returns the context that this resource was initialized with."""\n        return check.not_none(\n            self._state__internal__.resource_context,\n            additional_message="Attempted to get context before resource was initialized.",\n        )\n\n    def process_config_and_initialize(self) -> TResValue:\n        """Initializes this resource, fully processing its config and returning the prepared\n        resource value.\n        """\n        from dagster._config.post_process import post_process_config\n\n        return self.from_resource_context(\n            build_init_resource_context(\n                config=post_process_config(\n                    self._config_schema.config_type, self._convert_to_config_dictionary()\n                ).value\n            )\n        )\n\n    @classmethod\n    def from_resource_context(cls, context: InitResourceContext) -> TResValue:\n        """Creates a new instance of this resource from a populated InitResourceContext.\n        Useful when creating a resource from a function-based resource, for backwards\n        compatibility purposes.\n\n        For resources that have custom teardown behavior, use from_resource_context_cm instead.\n\n        Example usage:\n\n        .. code-block:: python\n\n            class MyResource(ConfigurableResource):\n                my_str: str\n\n            @resource(config_schema=MyResource.to_config_schema())\n            def my_resource(context: InitResourceContext) -> MyResource:\n                return MyResource.from_resource_context(context)\n\n        """\n        check.invariant(\n            not cls._is_cm_resource_cls(),\n            "Use from_resource_context_cm for resources which have custom teardown behavior,"\n            " e.g. overriding yield_for_execution or teardown_after_execution",\n        )\n        return cls(**context.resource_config or {})._initialize_and_run(context)  # noqa: SLF001\n\n    @classmethod\n    @contextlib.contextmanager\n    def from_resource_context_cm(\n        cls, context: InitResourceContext\n    ) -> Generator[TResValue, None, None]:\n        """Context which generates a new instance of this resource from a populated InitResourceContext.\n        Useful when creating a resource from a function-based resource, for backwards\n        compatibility purposes. Handles custom teardown behavior.\n\n        Example usage:\n\n        .. code-block:: python\n\n            class MyResource(ConfigurableResource):\n                my_str: str\n\n            @resource(config_schema=MyResource.to_config_schema())\n            def my_resource(context: InitResourceContext) -> Generator[MyResource, None, None]:\n                with MyResource.from_resource_context_cm(context) as my_resource:\n                    yield my_resource\n\n        """\n        with cls(**context.resource_config or {})._initialize_and_run_cm(  # noqa: SLF001\n            context\n        ) as value:\n            yield value\n\n\n
[docs]class ConfigurableResource(ConfigurableResourceFactory[TResValue]):\n """Base class for Dagster resources that utilize structured config.\n\n This class is a subclass of both :py:class:`ResourceDefinition` and :py:class:`Config`.\n\n Example definition:\n\n .. code-block:: python\n\n class WriterResource(ConfigurableResource):\n prefix: str\n\n def output(self, text: str) -> None:\n print(f"{self.prefix}{text}")\n\n Example usage:\n\n .. code-block:: python\n\n @asset\n def asset_that_uses_writer(writer: WriterResource):\n writer.output("text")\n\n defs = Definitions(\n assets=[asset_that_uses_writer],\n resources={"writer": WriterResource(prefix="a_prefix")},\n )\n\n """\n\n def create_resource(self, context: InitResourceContext) -> TResValue:\n """Returns the object that this resource hands to user code, accessible by ops or assets\n through the context or resource parameters. This works like the function decorated\n with @resource when using function-based resources.\n\n For ConfigurableResource, this function will return itself, passing\n the actual ConfigurableResource object to user code.\n """\n return cast(TResValue, self)
\n\n\ndef _is_fully_configured(resource: CoercibleToResource) -> bool:\n from dagster._core.execution.build_resources import wrap_resource_for_execution\n\n actual_resource = wrap_resource_for_execution(resource)\n res = (\n validate_config(\n actual_resource.config_schema.config_type,\n (\n actual_resource.config_schema.default_value\n if actual_resource.config_schema.default_provided\n else {}\n ),\n ).success\n is True\n )\n\n return res\n\n\nclass PartialResourceState(NamedTuple):\n nested_partial_resources: Dict[str, Any]\n config_schema: DagsterField\n resource_fn: Callable[[InitResourceContext], Any]\n description: Optional[str]\n nested_resources: Dict[str, Any]\n\n\nclass PartialResource(Generic[TResValue], AllowDelayedDependencies, MakeConfigCacheable):\n data: Dict[str, Any]\n resource_cls: Type[ConfigurableResourceFactory[TResValue]]\n\n def __init__(\n self,\n resource_cls: Type[ConfigurableResourceFactory[TResValue]],\n data: Dict[str, Any],\n ):\n resource_pointers, _data_without_resources = separate_resource_params(resource_cls, data)\n\n MakeConfigCacheable.__init__(self, data=data, resource_cls=resource_cls) # type: ignore # extends BaseModel, takes kwargs\n\n def resource_fn(context: InitResourceContext):\n instantiated = resource_cls(\n **{**data, **context.resource_config}\n ) # So that collisions are resolved in favor of the latest provided run config\n return instantiated._get_initialize_and_run_fn()(context) # noqa: SLF001\n\n self._state__internal__ = PartialResourceState(\n # We keep track of any resources we depend on which are not fully configured\n # so that we can retrieve them at runtime\n nested_partial_resources={\n k: v for k, v in resource_pointers.items() if (not _is_fully_configured(v))\n },\n config_schema=infer_schema_from_config_class(\n resource_cls, fields_to_omit=set(resource_pointers.keys())\n ),\n resource_fn=resource_fn,\n description=resource_cls.__doc__,\n nested_resources={k: v for k, v in resource_pointers.items()},\n )\n\n # to make AllowDelayedDependencies work\n @property\n def _nested_partial_resources(\n self,\n ) -> Mapping[str, Any]:\n return self._state__internal__.nested_partial_resources\n\n @property\n def nested_resources(\n self,\n ) -> Mapping[str, Any]:\n return self._state__internal__.nested_resources\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition:\n return ConfigurableResourceFactoryResourceDefinition(\n self.resource_cls,\n resource_fn=self._state__internal__.resource_fn,\n config_schema=self._state__internal__.config_schema,\n description=self._state__internal__.description,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n dagster_maintained=self.resource_cls._is_dagster_maintained(), # noqa: SLF001\n )\n\n\nResourceOrPartial: TypeAlias = Union[\n ConfigurableResourceFactory[TResValue], PartialResource[TResValue]\n]\nResourceOrPartialOrValue: TypeAlias = Union[\n ConfigurableResourceFactory[TResValue],\n PartialResource[TResValue],\n ResourceDefinition,\n TResValue,\n]\n\n\nV = TypeVar("V")\n\n\nclass ResourceDependency(Generic[V]):\n def __set_name__(self, _owner, name):\n self._name = name\n\n def __get__(self, obj: "ConfigurableResourceFactory", __owner: Any) -> V:\n return getattr(obj, self._name)\n\n def __set__(self, obj: Optional[object], value: ResourceOrPartialOrValue[V]) -> None:\n setattr(obj, self._name, value)\n\n\nclass ConfigurableLegacyResourceAdapter(ConfigurableResource, ABC):\n """Adapter base class for wrapping a decorated, function-style resource\n with structured config.\n\n To use this class, subclass it, define config schema fields using Pydantic,\n and implement the ``wrapped_resource`` method.\n\n Example:\n .. code-block:: python\n\n @resource(config_schema={"prefix": str})\n def writer_resource(context):\n prefix = context.resource_config["prefix"]\n\n def output(text: str) -> None:\n out_txt.append(f"{prefix}{text}")\n\n return output\n\n class WriterResource(ConfigurableLegacyResourceAdapter):\n prefix: str\n\n @property\n def wrapped_resource(self) -> ResourceDefinition:\n return writer_resource\n """\n\n @property\n @abstractmethod\n def wrapped_resource(self) -> ResourceDefinition:\n raise NotImplementedError()\n\n @cached_method\n def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition:\n return ConfigurableResourceFactoryResourceDefinition(\n self.__class__,\n resource_fn=self.wrapped_resource.resource_fn,\n config_schema=self._config_schema,\n description=self.__doc__,\n resolve_resource_keys=self._resolve_required_resource_keys,\n nested_resources=self.nested_resources,\n dagster_maintained=self._is_dagster_maintained(),\n )\n\n def __call__(self, *args, **kwargs):\n return self.wrapped_resource(*args, **kwargs)\n\n\nclass SeparatedResourceParams(NamedTuple):\n resources: Dict[str, Any]\n non_resources: Dict[str, Any]\n\n\ndef _is_annotated_as_resource_type(annotation: Type, metadata: List[str]) -> bool:\n """Determines if a field in a structured config class is annotated as a resource type or not."""\n from .type_check_utils import safe_is_subclass\n\n if metadata and metadata[0] == "resource_dependency":\n return True\n\n is_annotated_as_resource_dependency = get_origin(annotation) == ResourceDependency or getattr(\n annotation, "__metadata__", None\n ) == ("resource_dependency",)\n\n return is_annotated_as_resource_dependency or safe_is_subclass(\n annotation, (ResourceDefinition, ConfigurableResourceFactory)\n )\n\n\nclass ResourceDataWithAnnotation(NamedTuple):\n key: str\n value: Any\n annotation: Type\n annotation_metadata: List[str]\n\n\ndef separate_resource_params(cls: Type[BaseModel], data: Dict[str, Any]) -> SeparatedResourceParams:\n """Separates out the key/value inputs of fields in a structured config Resource class which\n are marked as resources (ie, using ResourceDependency) from those which are not.\n """\n fields_by_resolved_field_name = {\n field.alias if field.alias else key: field for key, field in model_fields(cls).items()\n }\n data_with_annotation: List[ResourceDataWithAnnotation] = [\n # No longer exists in Pydantic 2.x, will need to be updated when we upgrade\n ResourceDataWithAnnotation(\n key=field_name,\n value=field_value,\n annotation=fields_by_resolved_field_name[field_name].annotation,\n annotation_metadata=fields_by_resolved_field_name[field_name].metadata,\n )\n for field_name, field_value in data.items()\n if field_name in fields_by_resolved_field_name\n ]\n # We need to grab metadata from the annotation in order to tell if\n # this key was annotated with a typing.Annotated annotation (which we use for resource/resource deps),\n # since Pydantic 2.0 strips that info out and sticks any Annotated metadata in the\n # metadata field\n out = SeparatedResourceParams(\n resources={\n d.key: d.value\n for d in data_with_annotation\n if _is_annotated_as_resource_type(\n d.annotation,\n d.annotation_metadata,\n )\n },\n non_resources={\n d.key: d.value\n for d in data_with_annotation\n if not _is_annotated_as_resource_type(\n d.annotation,\n d.annotation_metadata,\n )\n },\n )\n return out\n\n\ndef _call_resource_fn_with_default(\n stack: contextlib.ExitStack, obj: ResourceDefinition, context: InitResourceContext\n) -> Any:\n from dagster._config.validate import process_config\n\n if isinstance(obj.config_schema, ConfiguredDefinitionConfigSchema):\n value = cast(Dict[str, Any], obj.config_schema.resolve_config({}).value)\n context = context.replace_config(value["config"])\n elif obj.config_schema.default_provided:\n # To explain why we need to process config here;\n # - The resource available on the init context (context.resource_config) has already been processed\n # - The nested resource's config has also already been processed, but is only available in the broader run config dictionary.\n # - The only information we have access to here is the unprocessed default value, so we need to process it a second time.\n unprocessed_config = obj.config_schema.default_value\n evr = process_config(\n {"config": obj.config_schema.config_type}, {"config": unprocessed_config}\n )\n if not evr.success:\n raise DagsterInvalidConfigError(\n "Error in config for nested resource ",\n evr.errors,\n unprocessed_config,\n )\n context = context.replace_config(cast(dict, evr.value)["config"])\n\n if has_at_least_one_parameter(obj.resource_fn):\n result = cast(ResourceFunctionWithContext, obj.resource_fn)(context)\n else:\n result = cast(ResourceFunctionWithoutContext, obj.resource_fn)()\n\n is_fn_generator = inspect.isgenerator(obj.resource_fn) or isinstance(\n obj.resource_fn, contextlib.ContextDecorator\n )\n if is_fn_generator:\n return stack.enter_context(cast(contextlib.AbstractContextManager, result))\n else:\n return result\n\n\nLateBoundTypesForResourceTypeChecking.set_actual_types_for_type_checking(\n resource_dep_type=ResourceDependency,\n resource_type=ConfigurableResourceFactory,\n partial_resource_type=PartialResource,\n)\n\n\ndef validate_resource_annotated_function(fn) -> None:\n """Validates any parameters on the decorated function that are annotated with\n :py:class:`dagster.ResourceDefinition`, raising a :py:class:`dagster.DagsterInvalidDefinitionError`\n if any are not also instances of :py:class:`dagster.ConfigurableResource` (these resources should\n instead be wrapped in the :py:func:`dagster.Resource` Annotation).\n """\n from dagster import DagsterInvalidDefinitionError\n from dagster._config.pythonic_config.resource import (\n ConfigurableResource,\n ConfigurableResourceFactory,\n TResValue,\n )\n\n from .type_check_utils import safe_is_subclass\n\n malformed_params = [\n param\n for param in get_function_params(fn)\n if safe_is_subclass(param.annotation, (ResourceDefinition, ConfigurableResourceFactory))\n and not safe_is_subclass(param.annotation, ConfigurableResource)\n ]\n if len(malformed_params) > 0:\n malformed_param = malformed_params[0]\n output_type = None\n if safe_is_subclass(malformed_param.annotation, ConfigurableResourceFactory):\n orig_bases = getattr(malformed_param.annotation, "__orig_bases__", None)\n output_type = get_args(orig_bases[0])[0] if orig_bases and len(orig_bases) > 0 else None\n if output_type == TResValue:\n output_type = None\n\n output_type_name = getattr(output_type, "__name__", str(output_type))\n raise DagsterInvalidDefinitionError(\n """Resource param '{param_name}' is annotated as '{annotation_type}', but '{annotation_type}' outputs {value_message} value to user code such as @ops and @assets. This annotation should instead be {annotation_suggestion}""".format(\n param_name=malformed_param.name,\n annotation_type=malformed_param.annotation,\n value_message=f"a '{output_type}'" if output_type else "an unknown",\n annotation_suggestion=(\n f"'ResourceParam[{output_type_name}]'"\n if output_type\n else "'ResourceParam[Any]' or 'ResourceParam[<output type>]'"\n ),\n )\n )\n\n\ndef _resolve_required_resource_keys_for_resource(\n resource: ResourceDefinition, resource_id_to_key_mapping: Mapping[ResourceId, str]\n) -> AbstractSet[str]:\n """Gets the required resource keys for the provided resource, with the assistance of the passed\n resource-id-to-key mapping. For resources which may hold nested partial resources,\n this mapping is used to obtain the top-level resource keys to depend on.\n """\n if isinstance(resource, AllowDelayedDependencies):\n return resource._resolve_required_resource_keys(resource_id_to_key_mapping) # noqa: SLF001\n return resource.required_resource_keys\n
", "current_page_name": "_modules/dagster/_config/pythonic_config/resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.pythonic_config.resource"}}, "source": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._config.source

\nimport os\n\nimport dagster._check as check\n\nfrom .config_type import ScalarUnion\nfrom .errors import PostProcessingError\nfrom .field_utils import Selector\n\nVALID_STRING_SOURCE_TYPES = (str, dict)\n\n\ndef _ensure_env_variable(var):\n    check.str_param(var, "var")\n    value = os.getenv(var)\n    if value is None:\n        raise PostProcessingError(\n            f'You have attempted to fetch the environment variable "{var}" '\n            "which is not set. In order for this execution to succeed it "\n            "must be set in this environment."\n        )\n    return value\n\n\nclass StringSourceType(ScalarUnion):\n    def __init__(self):\n        super(StringSourceType, self).__init__(\n            scalar_type=str,\n            non_scalar_schema=Selector({"env": str}),\n            _key="StringSourceType",\n        )\n\n    def post_process(self, value):\n        check.param_invariant(isinstance(value, VALID_STRING_SOURCE_TYPES), "value")\n\n        if not isinstance(value, dict):\n            return value\n\n        key, cfg = next(iter(value.items()))\n        check.invariant(key == "env", "Only valid key is env")\n        return str(_ensure_env_variable(cfg))\n\n\nclass IntSourceType(ScalarUnion):\n    def __init__(self):\n        super(IntSourceType, self).__init__(\n            scalar_type=int,\n            non_scalar_schema=Selector({"env": str}),\n            _key="IntSourceType",\n        )\n\n    def post_process(self, value):\n        check.param_invariant(isinstance(value, (dict, int)), "value", "Should be pre-validated")\n\n        if not isinstance(value, dict):\n            return value\n\n        check.invariant(len(value) == 1, "Selector should have one entry")\n\n        key, cfg = next(iter(value.items()))\n        check.invariant(key == "env", "Only valid key is env")\n        value = _ensure_env_variable(cfg)\n        try:\n            return int(value)\n        except ValueError as e:\n            raise PostProcessingError(\n                f'Value "{value}" stored in env variable "{cfg}" cannot be coerced into an int.'\n            ) from e\n\n\nclass BoolSourceType(ScalarUnion):\n    def __init__(self):\n        super(BoolSourceType, self).__init__(\n            scalar_type=bool,\n            non_scalar_schema=Selector({"env": str}),\n            _key="BoolSourceType",\n        )\n\n    def post_process(self, value):\n        check.param_invariant(isinstance(value, (dict, bool)), "value", "Should be pre-validated")\n\n        if not isinstance(value, dict):\n            return value\n\n        check.invariant(len(value) == 1, "Selector should have one entry")\n\n        key, cfg = next(iter(value.items()))\n        check.invariant(key == "env", "Only valid key is env")\n        value = _ensure_env_variable(cfg)\n        try:\n            return bool(value)\n        except ValueError as e:\n            raise PostProcessingError(\n                (\n                    'Value "{value}" stored in env variable "{var}" cannot be coerced into an bool.'\n                ).format(value=value, var=cfg)\n            ) from e\n\n\nStringSource: StringSourceType = StringSourceType()\nIntSource: IntSourceType = IntSourceType()\nBoolSource: BoolSourceType = BoolSourceType()\n
", "current_page_name": "_modules/dagster/_config/source", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._config.source"}}, "_core": {"definitions": {"asset_check_result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_check_result

\nfrom typing import TYPE_CHECKING, Mapping, NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationTargetMaterializationData,\n)\nfrom dagster._core.definitions.asset_check_spec import AssetCheckSeverity\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterInvariantViolationError\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.compute import StepExecutionContext\n\n\n
[docs]@experimental\nclass AssetCheckResult(\n NamedTuple(\n "_AssetCheckResult",\n [\n ("passed", PublicAttr[bool]),\n ("asset_key", PublicAttr[Optional[AssetKey]]),\n ("check_name", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ("severity", PublicAttr[AssetCheckSeverity]),\n ],\n )\n):\n """The result of an asset check.\n\n Attributes:\n asset_key (Optional[AssetKey]):\n The asset key that was checked.\n check_name (Optional[str]):\n The name of the check.\n passed (bool):\n The pass/fail result of the check.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the asset. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n severity (AssetCheckSeverity):\n Severity of the check. Defaults to ERROR.\n\n """\n\n def __new__(\n cls,\n *,\n passed: bool,\n asset_key: Optional[CoercibleToAssetKey] = None,\n check_name: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n severity: AssetCheckSeverity = AssetCheckSeverity.ERROR,\n ):\n normalized_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n return super().__new__(\n cls,\n asset_key=AssetKey.from_coercible(asset_key) if asset_key is not None else None,\n check_name=check.opt_str_param(check_name, "check_name"),\n passed=check.bool_param(passed, "passed"),\n metadata=normalized_metadata,\n severity=check.inst_param(severity, "severity", AssetCheckSeverity),\n )\n\n def to_asset_check_evaluation(\n self, step_context: "StepExecutionContext"\n ) -> AssetCheckEvaluation:\n spec_check_names_by_asset_key = (\n step_context.job_def.asset_layer.get_check_names_by_asset_key_for_node_handle(\n step_context.node_handle.root\n )\n )\n\n asset_keys_with_specs = spec_check_names_by_asset_key.keys()\n\n if self.asset_key is not None:\n if self.asset_key not in asset_keys_with_specs:\n raise DagsterInvariantViolationError(\n "Received unexpected AssetCheckResult. It targets asset"\n f" '{self.asset_key.to_user_string()}' which is not targeted by any of the"\n " checks currently being evaluated. Targeted assets:"\n f" {[asset_key.to_user_string() for asset_key in asset_keys_with_specs]}."\n )\n\n resolved_asset_key = self.asset_key\n\n else:\n if len(spec_check_names_by_asset_key) > 1:\n raise DagsterInvariantViolationError(\n "AssetCheckResult didn't specify an asset key, but there are multiple assets"\n " to choose from:"\n f" {[asset_key.to_user_string() for asset_key in spec_check_names_by_asset_key.keys()]}"\n )\n\n resolved_asset_key = next(iter(asset_keys_with_specs))\n\n check_names_with_specs = spec_check_names_by_asset_key[resolved_asset_key]\n if self.check_name is not None:\n if self.check_name not in check_names_with_specs:\n raise DagsterInvariantViolationError(\n "Received unexpected AssetCheckResult. No checks currently being evaluated"\n f" target asset '{resolved_asset_key.to_user_string()}' and have name"\n f" '{self.check_name}'. Checks being evaluated for this asset:"\n f" {check_names_with_specs}"\n )\n\n resolved_check_name = self.check_name\n else:\n if len(check_names_with_specs) > 1:\n raise DagsterInvariantViolationError(\n "AssetCheckResult result didn't specify a check name, but there are multiple"\n " checks to choose from for the this asset key:"\n f" {check_names_with_specs}"\n )\n\n resolved_check_name = next(iter(check_names_with_specs))\n\n input_asset_info = step_context.get_input_asset_version_info(resolved_asset_key)\n if input_asset_info is not None:\n target_materialization_data = AssetCheckEvaluationTargetMaterializationData(\n run_id=input_asset_info.run_id,\n storage_id=input_asset_info.storage_id,\n timestamp=input_asset_info.timestamp,\n )\n else:\n target_materialization_data = None\n\n return AssetCheckEvaluation(\n check_name=resolved_check_name,\n asset_key=resolved_asset_key,\n passed=self.passed,\n metadata=self.metadata,\n target_materialization_data=target_materialization_data,\n severity=self.severity,\n )\n\n def get_spec_python_identifier(\n self, *, asset_key: Optional[AssetKey] = None, check_name: Optional[str] = None\n ) -> str:\n """Returns a string uniquely identifying the asset check spec associated with this result.\n This is used for the output name associated with an `AssetCheckResult`.\n """\n asset_key = asset_key or self.asset_key\n check_name = check_name or self.check_name\n assert asset_key is not None, "Asset key must be provided if not set on spec"\n assert asset_key is not None, "Asset key must be provided if not set on spec"\n return f"{asset_key.to_python_identifier()}_{self.check_name}"
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_check_result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_check_result"}, "asset_check_spec": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_check_spec

\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Mapping, NamedTuple, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._serdes.serdes import whitelist_for_serdes\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.assets import AssetsDefinition\n    from dagster._core.definitions.source_asset import SourceAsset\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass AssetCheckSeverity(Enum):\n """Severity level for an asset check.\n\n Severities:\n\n - WARN: If the check fails, don't fail the step.\n - ERROR: If the check fails, fail the step and, within the run, skip materialization of any\n assets that are downstream of the asset being checked.\n """\n\n WARN = "WARN"\n ERROR = "ERROR"
\n\n\n
[docs]@experimental\n@whitelist_for_serdes(old_storage_names={"AssetCheckHandle"})\nclass AssetCheckKey(NamedTuple):\n """Check names are expected to be unique per-asset. Thus, this combination of asset key and\n check name uniquely identifies an asset check within a deployment.\n """\n\n asset_key: PublicAttr[AssetKey]\n name: PublicAttr[str]\n\n @staticmethod\n def from_graphql_input(graphql_input: Mapping[str, Any]) -> "AssetCheckKey":\n return AssetCheckKey(\n asset_key=AssetKey.from_graphql_input(graphql_input["assetKey"]),\n name=graphql_input["name"],\n )
\n\n\n
[docs]@experimental\nclass AssetCheckSpec(\n NamedTuple(\n "_AssetCheckSpec",\n [\n ("name", PublicAttr[str]),\n ("asset_key", PublicAttr[AssetKey]),\n ("description", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Defines information about an asset check, except how to execute it.\n\n AssetCheckSpec is often used as an argument to decorators that decorator a function that can\n execute multiple checks - e.g. `@asset`, and `@multi_asset`. It defines one of the checks that\n will be executed inside that function.\n\n Args:\n name (str): Name of the check.\n asset (Union[AssetKey, Sequence[str], str, AssetsDefinition, SourceAsset]): The asset that\n the check applies to.\n description (Optional[str]): Description for the check.\n """\n\n def __new__(\n cls,\n name: str,\n *,\n asset: Union[CoercibleToAssetKey, "AssetsDefinition", "SourceAsset"],\n description: Optional[str] = None,\n ):\n return super().__new__(\n cls,\n name=check.str_param(name, "name"),\n asset_key=AssetKey.from_coercible_or_definition(asset),\n description=check.opt_str_param(description, "description"),\n )\n\n def get_python_identifier(self) -> str:\n """Returns a string uniquely identifying the asset check, that uses only the characters\n allowed in a Python identifier.\n """\n return f"{self.asset_key.to_python_identifier()}_{self.name}"\n\n @property\n def key(self) -> AssetCheckKey:\n return AssetCheckKey(self.asset_key, self.name)
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_check_spec", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_check_spec"}, "asset_dep": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_dep

\nfrom typing import NamedTuple, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_spec import AssetSpec\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.partition_mapping import PartitionMapping\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n)\n\nCoercibleToAssetDep = Union[\n    CoercibleToAssetKey, AssetSpec, AssetsDefinition, SourceAsset, "AssetDep"\n]\n\n\n
[docs]@experimental\nclass AssetDep(\n NamedTuple(\n "_AssetDep",\n [\n ("asset_key", PublicAttr[AssetKey]),\n ("partition_mapping", PublicAttr[Optional[PartitionMapping]]),\n ],\n )\n):\n """Specifies a dependency on an upstream asset.\n\n Attributes:\n asset (Union[AssetKey, str, AssetSpec, AssetsDefinition, SourceAsset]): The upstream asset to depend on.\n partition_mapping (Optional[PartitionMapping]): Defines what partitions to depend on in\n the upstream asset. If not provided and the upstream asset is partitioned, defaults to\n the default partition mapping for the partitions definition, which is typically maps\n partition keys to the same partition keys in upstream assets.\n\n Examples:\n .. code-block:: python\n\n upstream_asset = AssetSpec("upstream_asset")\n downstream_asset = AssetSpec(\n "downstream_asset",\n deps=[\n AssetDep(\n upstream_asset,\n partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)\n )\n ]\n )\n """\n\n def __new__(\n cls,\n asset: Union[CoercibleToAssetKey, AssetSpec, AssetsDefinition, SourceAsset],\n *,\n partition_mapping: Optional[PartitionMapping] = None,\n ):\n if isinstance(asset, list):\n check.list_param(asset, "asset", of_type=str)\n else:\n check.inst_param(\n asset, "asset", (AssetKey, str, AssetSpec, AssetsDefinition, SourceAsset)\n )\n if isinstance(asset, AssetsDefinition) and len(asset.keys) > 1:\n # Only AssetsDefinition with a single asset can be passed\n raise DagsterInvalidDefinitionError(\n "Cannot create an AssetDep from a multi_asset AssetsDefinition."\n " Instead, specify dependencies on the assets created by the multi_asset"\n f" via AssetKeys or strings. For the multi_asset {asset.node_def.name}, the"\n f" available keys are: {asset.keys}."\n )\n\n asset_key = _get_asset_key(asset)\n\n return super().__new__(\n cls,\n asset_key=asset_key,\n partition_mapping=check.opt_inst_param(\n partition_mapping,\n "partition_mapping",\n PartitionMapping,\n ),\n )\n\n @staticmethod\n def from_coercible(arg: "CoercibleToAssetDep") -> "AssetDep":\n # if arg is AssetDep, return the original object to retain partition_mapping\n return arg if isinstance(arg, AssetDep) else AssetDep(asset=arg)
\n\n\ndef _get_asset_key(arg: "CoercibleToAssetDep") -> AssetKey:\n if isinstance(arg, (AssetsDefinition, SourceAsset, AssetSpec)):\n return arg.key\n elif isinstance(arg, AssetDep):\n return arg.asset_key\n else:\n return AssetKey.from_coercible(arg)\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_dep", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_dep"}, "asset_in": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_in

\nfrom typing import Mapping, NamedTuple, Optional, Sequence, Type, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    CoercibleToAssetKeyPrefix,\n)\nfrom dagster._core.definitions.input import NoValueSentinel\nfrom dagster._core.definitions.metadata import ArbitraryMetadataMapping\nfrom dagster._core.types.dagster_type import DagsterType, resolve_dagster_type\n\nfrom .partition_mapping import PartitionMapping\n\n\n
[docs]class AssetIn(\n NamedTuple(\n "_AssetIn",\n [\n ("key", PublicAttr[Optional[AssetKey]]),\n ("metadata", PublicAttr[Optional[ArbitraryMetadataMapping]]),\n ("key_prefix", PublicAttr[Optional[Sequence[str]]]),\n ("input_manager_key", PublicAttr[Optional[str]]),\n ("partition_mapping", PublicAttr[Optional[PartitionMapping]]),\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ],\n )\n):\n """Defines an asset dependency.\n\n Attributes:\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the input name. Only one of the "key_prefix" and\n "key" arguments should be provided.\n key (Optional[Union[str, Sequence[str], AssetKey]]): The asset's key. Only one of the\n "key_prefix" and "key" arguments should be provided.\n metadata (Optional[Dict[str, Any]]): A dict of the metadata for the input.\n For example, if you only need a subset of columns from an upstream table, you could\n include that in metadata and the IO manager that loads the upstream table could use the\n metadata to determine which columns to load.\n partition_mapping (Optional[PartitionMapping]): Defines what partitions to depend on in\n the upstream asset. If not provided, defaults to the default partition mapping for the\n partitions definition, which is typically maps partition keys to the same partition keys\n in upstream assets.\n dagster_type (DagsterType): Allows specifying type validation functions that\n will be executed on the input of the decorated function before it runs.\n """\n\n def __new__(\n cls,\n key: Optional[CoercibleToAssetKey] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n input_manager_key: Optional[str] = None,\n partition_mapping: Optional[PartitionMapping] = None,\n dagster_type: Union[DagsterType, Type[NoValueSentinel]] = NoValueSentinel,\n ):\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n\n check.invariant(\n not (key and key_prefix), "key and key_prefix cannot both be set on AssetIn"\n )\n\n return super(AssetIn, cls).__new__(\n cls,\n key=AssetKey.from_coercible(key) if key is not None else None,\n metadata=check.opt_inst_param(metadata, "metadata", Mapping),\n key_prefix=check.opt_list_param(key_prefix, "key_prefix", of_type=str),\n input_manager_key=check.opt_str_param(input_manager_key, "input_manager_key"),\n partition_mapping=check.opt_inst_param(\n partition_mapping, "partition_mapping", PartitionMapping\n ),\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_in", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_in"}, "asset_out": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_out

\nfrom typing import Any, Mapping, NamedTuple, Optional, Sequence, Type, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.backfill_policy import BackfillPolicy\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    CoercibleToAssetKeyPrefix,\n)\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.input import NoValueSentinel\nfrom dagster._core.definitions.metadata import MetadataUserInput\nfrom dagster._core.definitions.output import Out\nfrom dagster._core.definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom dagster._core.types.dagster_type import DagsterType, resolve_dagster_type\n\n\n
[docs]class AssetOut(\n NamedTuple(\n "_AssetOut",\n [\n ("key", PublicAttr[Optional[AssetKey]]),\n ("key_prefix", PublicAttr[Optional[Sequence[str]]]),\n ("metadata", PublicAttr[Optional[Mapping[str, Any]]]),\n ("io_manager_key", PublicAttr[str]),\n ("description", PublicAttr[Optional[str]]),\n ("is_required", PublicAttr[bool]),\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ("group_name", PublicAttr[Optional[str]]),\n ("code_version", PublicAttr[Optional[str]]),\n ("freshness_policy", PublicAttr[Optional[FreshnessPolicy]]),\n ("auto_materialize_policy", PublicAttr[Optional[AutoMaterializePolicy]]),\n ("backfill_policy", PublicAttr[Optional[BackfillPolicy]]),\n ],\n )\n):\n """Defines one of the assets produced by a :py:func:`@multi_asset <multi_asset>`.\n\n Attributes:\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name. When using ``@multi_asset``, the\n asset name defaults to the key of the "outs" dictionary Only one of the "key_prefix" and\n "key" arguments should be provided.\n key (Optional[Union[str, Sequence[str], AssetKey]]): The asset's key. Only one of the\n "key_prefix" and "key" arguments should be provided.\n dagster_type (Optional[Union[Type, DagsterType]]]):\n The type of this output. Should only be set if the correct type can not\n be inferred directly from the type signature of the decorated function.\n description (Optional[str]): Human-readable description of the output.\n is_required (bool): Whether the presence of this field is required. (default: True)\n io_manager_key (Optional[str]): The resource key of the IO manager used for this output.\n (default: "io_manager").\n metadata (Optional[Dict[str, Any]]): A dict of the metadata for the output.\n For example, users can provide a file path if the data object will be stored in a\n filesystem, or provide information of a database table when it is going to load the data\n into the table.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If\n not provided, the name "default" is used.\n code_version (Optional[str]): The version of the code that generates this asset.\n freshness_policy (Optional[FreshnessPolicy]): A policy which indicates how up to date this\n asset is intended to be.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply to\n the specified asset.\n backfill_policy (Optional[BackfillPolicy]): BackfillPolicy to apply to the specified asset.\n """\n\n def __new__(\n cls,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n key: Optional[CoercibleToAssetKey] = None,\n dagster_type: Union[Type, DagsterType] = NoValueSentinel,\n description: Optional[str] = None,\n is_required: bool = True,\n io_manager_key: Optional[str] = None,\n metadata: Optional[MetadataUserInput] = None,\n group_name: Optional[str] = None,\n code_version: Optional[str] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n ):\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n\n return super(AssetOut, cls).__new__(\n cls,\n key=AssetKey.from_coercible(key) if key is not None else None,\n key_prefix=check.opt_list_param(key_prefix, "key_prefix", of_type=str),\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n description=check.opt_str_param(description, "description"),\n is_required=check.bool_param(is_required, "is_required"),\n io_manager_key=check.opt_str_param(\n io_manager_key, "io_manager_key", default=DEFAULT_IO_MANAGER_KEY\n ),\n metadata=check.opt_mapping_param(metadata, "metadata", key_type=str),\n group_name=check.opt_str_param(group_name, "group_name"),\n code_version=check.opt_str_param(code_version, "code_version"),\n freshness_policy=check.opt_inst_param(\n freshness_policy, "freshness_policy", FreshnessPolicy\n ),\n auto_materialize_policy=check.opt_inst_param(\n auto_materialize_policy, "auto_materialize_policy", AutoMaterializePolicy\n ),\n backfill_policy=check.opt_inst_param(\n backfill_policy, "backfill_policy", BackfillPolicy\n ),\n )\n\n def to_out(self) -> Out:\n return Out(\n dagster_type=self.dagster_type,\n description=self.description,\n metadata=self.metadata,\n is_required=self.is_required,\n io_manager_key=self.io_manager_key,\n code_version=self.code_version,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_out", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_out"}, "asset_selection": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_selection

\nimport collections.abc\nimport operator\nfrom abc import ABC, abstractmethod\nfrom functools import reduce\nfrom typing import AbstractSet, Iterable, Optional, Sequence, Union, cast\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, public\nfrom dagster._core.definitions.asset_checks import AssetChecksDefinition\nfrom dagster._core.errors import DagsterInvalidSubsetError\nfrom dagster._core.selector.subset_selector import (\n    fetch_connected,\n    fetch_sinks,\n    fetch_sources,\n    parse_clause,\n)\n\nfrom .asset_check_spec import AssetCheckKey\nfrom .asset_graph import AssetGraph, InternalAssetGraph\nfrom .assets import AssetsDefinition\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n    CoercibleToAssetKeyPrefix,\n    key_prefix_from_coercible,\n)\nfrom .source_asset import SourceAsset\n\nCoercibleToAssetSelection: TypeAlias = Union[\n    str,\n    Sequence[str],\n    Sequence[AssetKey],\n    Sequence[Union["AssetsDefinition", "SourceAsset"]],\n    "AssetSelection",\n]\n\n\n
[docs]class AssetSelection(ABC):\n """An AssetSelection defines a query over a set of assets and asset checks, normally all that are defined in a code location.\n\n You can use the "|", "&", and "-" operators to create unions, intersections, and differences of selections, respectively.\n\n AssetSelections are typically used with :py:func:`define_asset_job`.\n\n By default, selecting assets will also select all of the asset checks that target those assets.\n\n Examples:\n .. code-block:: python\n\n # Select all assets in group "marketing":\n AssetSelection.groups("marketing")\n\n # Select all assets in group "marketing", as well as the asset with key "promotion":\n AssetSelection.groups("marketing") | AssetSelection.keys("promotion")\n\n # Select all assets in group "marketing" that are downstream of asset "leads":\n AssetSelection.groups("marketing") & AssetSelection.keys("leads").downstream()\n\n # Select a list of assets:\n AssetSelection.assets(*my_assets_list)\n\n # Select all assets except for those in group "marketing"\n AssetSelection.all() - AssetSelection.groups("marketing")\n\n # Select all assets which are materialized by the same op as "projections":\n AssetSelection.keys("projections").required_multi_asset_neighbors()\n\n # Select all assets in group "marketing" and exclude their asset checks:\n AssetSelection.groups("marketing") - AssetSelection.all_asset_checks()\n\n # Select all asset checks that target a list of assets:\n AssetSelection.checks_for_assets(*my_assets_list)\n\n # Select a specific asset check:\n AssetSelection.checks(my_asset_check)\n\n """\n\n
[docs] @public\n @staticmethod\n def all() -> "AllSelection":\n """Returns a selection that includes all assets and asset checks."""\n return AllSelection()
\n\n
[docs] @public\n @staticmethod\n def all_asset_checks() -> "AllAssetCheckSelection":\n """Returns a selection that includes all asset checks."""\n return AllAssetCheckSelection()
\n\n
[docs] @public\n @staticmethod\n def assets(*assets_defs: AssetsDefinition) -> "KeysAssetSelection":\n """Returns a selection that includes all of the provided assets and asset checks that target them."""\n return KeysAssetSelection(*(key for assets_def in assets_defs for key in assets_def.keys))
\n\n
[docs] @public\n @staticmethod\n def keys(*asset_keys: CoercibleToAssetKey) -> "KeysAssetSelection":\n """Returns a selection that includes assets with any of the provided keys and all asset checks that target them.\n\n Examples:\n .. code-block:: python\n\n AssetSelection.keys(AssetKey(["a"]))\n\n AssetSelection.keys("a")\n\n AssetSelection.keys(AssetKey(["a"]), AssetKey(["b"]))\n\n AssetSelection.keys("a", "b")\n\n asset_key_list = [AssetKey(["a"]), AssetKey(["b"])]\n AssetSelection.keys(*asset_key_list)\n """\n _asset_keys = [\n AssetKey.from_user_string(key) if isinstance(key, str) else AssetKey.from_coercible(key)\n for key in asset_keys\n ]\n return KeysAssetSelection(*_asset_keys)
\n\n
[docs] @public\n @staticmethod\n def key_prefixes(\n *key_prefixes: CoercibleToAssetKeyPrefix, include_sources: bool = False\n ) -> "KeyPrefixesAssetSelection":\n """Returns a selection that includes assets that match any of the provided key prefixes and all the asset checks that target them.\n\n Args:\n include_sources (bool): If True, then include source assets matching the key prefix(es)\n in the selection.\n\n Examples:\n .. code-block:: python\n\n # match any asset key where the first segment is equal to "a" or "b"\n # e.g. AssetKey(["a", "b", "c"]) would match, but AssetKey(["abc"]) would not.\n AssetSelection.key_prefixes("a", "b")\n\n # match any asset key where the first two segments are ["a", "b"] or ["a", "c"]\n AssetSelection.key_prefixes(["a", "b"], ["a", "c"])\n """\n _asset_key_prefixes = [key_prefix_from_coercible(key_prefix) for key_prefix in key_prefixes]\n return KeyPrefixesAssetSelection(*_asset_key_prefixes, include_sources=include_sources)
\n\n
[docs] @public\n @staticmethod\n def groups(*group_strs, include_sources: bool = False) -> "GroupsAssetSelection":\n """Returns a selection that includes materializable assets that belong to any of the\n provided groups and all the asset checks that target them.\n\n Args:\n include_sources (bool): If True, then include source assets matching the group in the\n selection.\n """\n check.tuple_param(group_strs, "group_strs", of_type=str)\n return GroupsAssetSelection(*group_strs, include_sources=include_sources)
\n\n
[docs] @public\n @staticmethod\n def checks_for_assets(*assets_defs: AssetsDefinition) -> "AssetChecksForAssetKeys":\n """Returns a selection with the asset checks that target the provided assets."""\n return AssetChecksForAssetKeys(\n [key for assets_def in assets_defs for key in assets_def.keys]\n )
\n\n
[docs] @public\n @staticmethod\n def checks(*asset_checks: AssetChecksDefinition) -> "AssetChecksForHandles":\n """Returns a selection that includes all of the provided asset checks."""\n return AssetChecksForHandles(\n [\n AssetCheckKey(asset_key=AssetKey.from_coercible(spec.asset_key), name=spec.name)\n for checks_def in asset_checks\n for spec in checks_def.specs\n ]\n )
\n\n
[docs] @public\n def downstream(\n self, depth: Optional[int] = None, include_self: bool = True\n ) -> "DownstreamAssetSelection":\n """Returns a selection that includes all assets that are downstream of any of the assets in\n this selection, selecting the assets in this selection by default. Includes the asset checks targeting the returned assets. Iterates through each\n asset in this selection and returns the union of all downstream assets.\n\n depth (Optional[int]): If provided, then only include assets to the given depth. A depth\n of 2 means all assets that are children or grandchildren of the assets in this\n selection.\n include_self (bool): If True, then include the assets in this selection in the result.\n If the include_self flag is False, return each downstream asset that is not part of the\n original selection. By default, set to True.\n """\n check.opt_int_param(depth, "depth")\n check.opt_bool_param(include_self, "include_self")\n return DownstreamAssetSelection(self, depth=depth, include_self=include_self)
\n\n
[docs] @public\n def upstream(\n self, depth: Optional[int] = None, include_self: bool = True\n ) -> "UpstreamAssetSelection":\n """Returns a selection that includes all materializable assets that are upstream of any of\n the assets in this selection, selecting the assets in this selection by default. Includes the asset checks targeting the returned assets. Iterates\n through each asset in this selection and returns the union of all upstream assets.\n\n Because mixed selections of source and materializable assets are currently not supported,\n keys corresponding to `SourceAssets` will not be included as upstream of regular assets.\n\n Args:\n depth (Optional[int]): If provided, then only include assets to the given depth. A depth\n of 2 means all assets that are parents or grandparents of the assets in this\n selection.\n include_self (bool): If True, then include the assets in this selection in the result.\n If the include_self flag is False, return each upstream asset that is not part of the\n original selection. By default, set to True.\n """\n check.opt_int_param(depth, "depth")\n check.opt_bool_param(include_self, "include_self")\n return UpstreamAssetSelection(self, depth=depth, include_self=include_self)
\n\n
[docs] @public\n def sinks(self) -> "SinkAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the sink\n assets within the original asset selection. Includes the asset checks targeting the returned assets.\n\n A sink asset is an asset that has no downstream dependencies within the asset selection.\n The sink asset can have downstream dependencies outside of the asset selection.\n """\n return SinkAssetSelection(self)
\n\n
[docs] @public\n def required_multi_asset_neighbors(self) -> "RequiredNeighborsAssetSelection":\n """Given an asset selection in which some assets are output from a multi-asset compute op\n which cannot be subset, returns a new asset selection that contains all of the assets\n required to execute the original asset selection. Includes the asset checks targeting the returned assets.\n """\n return RequiredNeighborsAssetSelection(self)
\n\n
[docs] @public\n def roots(self) -> "RootAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the root\n assets within the original asset selection. Includes the asset checks targeting the returned assets.\n\n A root asset is an asset that has no upstream dependencies within the asset selection.\n The root asset can have downstream dependencies outside of the asset selection.\n\n Because mixed selections of source and materializable assets are currently not supported,\n keys corresponding to `SourceAssets` will not be included as roots. To select source assets,\n use the `upstream_source_assets` method.\n """\n return RootAssetSelection(self)
\n\n
[docs] @public\n @deprecated(breaking_version="2.0", additional_warn_text="Use AssetSelection.roots instead.")\n def sources(self) -> "RootAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the root\n assets within the original asset selection. Includes the asset checks targeting the returned assets.\n\n A root asset is a materializable asset that has no upstream dependencies within the asset\n selection. The root asset can have downstream dependencies outside of the asset selection.\n\n Because mixed selections of source and materializable assets are currently not supported,\n keys corresponding to `SourceAssets` will not be included as roots. To select source assets,\n use the `upstream_source_assets` method.\n """\n return self.roots()
\n\n
[docs] @public\n def upstream_source_assets(self) -> "SourceAssetSelection":\n """Given an asset selection, returns a new asset selection that contains all of the source\n assets upstream of assets in the original selection. Includes the asset checks targeting the returned assets.\n """\n return SourceAssetSelection(self)
\n\n
[docs] @public\n def without_checks(self) -> "AssetSelection":\n """Removes all asset checks in the selection."""\n return self - AssetSelection.all_asset_checks()
\n\n def __or__(self, other: "AssetSelection") -> "OrAssetSelection":\n check.inst_param(other, "other", AssetSelection)\n return OrAssetSelection(self, other)\n\n def __and__(self, other: "AssetSelection") -> "AndAssetSelection":\n check.inst_param(other, "other", AssetSelection)\n return AndAssetSelection(self, other)\n\n def __sub__(self, other: "AssetSelection") -> "SubAssetSelection":\n check.inst_param(other, "other", AssetSelection)\n return SubAssetSelection(self, other)\n\n def resolve(\n self, all_assets: Union[Iterable[Union[AssetsDefinition, SourceAsset]], AssetGraph]\n ) -> AbstractSet[AssetKey]:\n if isinstance(all_assets, AssetGraph):\n asset_graph = all_assets\n else:\n check.iterable_param(all_assets, "all_assets", (AssetsDefinition, SourceAsset))\n asset_graph = AssetGraph.from_assets(all_assets)\n\n resolved = self.resolve_inner(asset_graph)\n resolved_source_assets = asset_graph.source_asset_keys & resolved\n resolved_regular_assets = resolved - asset_graph.source_asset_keys\n check.invariant(\n not (len(resolved_source_assets) > 0 and len(resolved_regular_assets) > 0),\n "Asset selection specified both regular assets and source assets. This is not"\n " currently supported. Selections must be all regular assets or all source assets.",\n )\n return resolved\n\n @abstractmethod\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n raise NotImplementedError()\n\n def resolve_checks(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n """We don't need this method currently, but it makes things consistent with resolve_inner. Currently\n we don't store checks in the ExternalAssetGraph, so we only support InternalAssetGraph.\n """\n return self.resolve_checks_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n """By default, resolve to checks that target the selected assets. This is overriden for particular selections."""\n asset_keys = self.resolve(asset_graph)\n return {handle for handle in asset_graph.asset_check_keys if handle.asset_key in asset_keys}\n\n @staticmethod\n def _selection_from_string(string: str) -> "AssetSelection":\n from dagster._core.definitions import AssetSelection\n\n if string == "*":\n return AssetSelection.all()\n\n parts = parse_clause(string)\n if not parts:\n check.failed(f"Invalid selection string: {string}")\n u, item, d = parts\n\n selection: AssetSelection = AssetSelection.keys(item)\n if u:\n selection = selection.upstream(u)\n if d:\n selection = selection.downstream(d)\n return selection\n\n @classmethod\n def from_coercible(cls, selection: CoercibleToAssetSelection) -> "AssetSelection":\n if isinstance(selection, str):\n return cls._selection_from_string(selection)\n elif isinstance(selection, AssetSelection):\n return selection\n elif isinstance(selection, collections.abc.Sequence) and all(\n isinstance(el, str) for el in selection\n ):\n return reduce(\n operator.or_, [cls._selection_from_string(cast(str, s)) for s in selection]\n )\n elif isinstance(selection, collections.abc.Sequence) and all(\n isinstance(el, (AssetsDefinition, SourceAsset)) for el in selection\n ):\n return AssetSelection.keys(\n *(\n key\n for el in selection\n for key in (\n el.keys if isinstance(el, AssetsDefinition) else [cast(SourceAsset, el).key]\n )\n )\n )\n elif isinstance(selection, collections.abc.Sequence) and all(\n isinstance(el, AssetKey) for el in selection\n ):\n return cls.keys(*cast(Sequence[AssetKey], selection))\n else:\n check.failed(\n "selection argument must be one of str, Sequence[str], Sequence[AssetKey],"\n " Sequence[AssetsDefinition], Sequence[SourceAsset], AssetSelection. Was"\n f" {type(selection)}."\n )
\n\n\nclass AllSelection(AssetSelection):\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return asset_graph.materializable_asset_keys\n\n\nclass AllAssetCheckSelection(AssetSelection):\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return set()\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return asset_graph.asset_check_keys\n\n\nclass AssetChecksForAssetKeys(AssetSelection):\n def __init__(self, keys: Sequence[AssetKey]):\n self._keys = keys\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return set()\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return {handle for handle in asset_graph.asset_check_keys if handle.asset_key in self._keys}\n\n\nclass AssetChecksForHandles(AssetSelection):\n def __init__(self, asset_check_keys: Sequence[AssetCheckKey]):\n self._asset_check_keys = asset_check_keys\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return set()\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return {\n handle for handle in asset_graph.asset_check_keys if handle in self._asset_check_keys\n }\n\n\nclass AndAssetSelection(AssetSelection):\n def __init__(self, left: AssetSelection, right: AssetSelection):\n self._left = left\n self._right = right\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return self._left.resolve_inner(asset_graph) & self._right.resolve_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return self._left.resolve_checks_inner(asset_graph) & self._right.resolve_checks_inner(\n asset_graph\n )\n\n\nclass SubAssetSelection(AssetSelection):\n def __init__(self, left: AssetSelection, right: AssetSelection):\n self._left = left\n self._right = right\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return self._left.resolve_inner(asset_graph) - self._right.resolve_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return self._left.resolve_checks_inner(asset_graph) - self._right.resolve_checks_inner(\n asset_graph\n )\n\n\nclass SinkAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n return fetch_sinks(asset_graph.asset_dep_graph, selection)\n\n\nclass RequiredNeighborsAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n output = set(selection)\n for asset_key in selection:\n output.update(asset_graph.get_required_multi_asset_keys(asset_key))\n return output\n\n\nclass RootAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n return fetch_sources(asset_graph.asset_dep_graph, selection)\n\n\nclass DownstreamAssetSelection(AssetSelection):\n def __init__(\n self,\n child: AssetSelection,\n *,\n depth: Optional[int] = None,\n include_self: Optional[bool] = True,\n ):\n self._child = child\n self.depth = depth\n self.include_self = include_self\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n return operator.sub(\n reduce(\n operator.or_,\n [\n {asset_key}\n | fetch_connected(\n item=asset_key,\n graph=asset_graph.asset_dep_graph,\n direction="downstream",\n depth=self.depth,\n )\n for asset_key in selection\n ],\n ),\n selection if not self.include_self else set(),\n )\n\n\nclass GroupsAssetSelection(AssetSelection):\n def __init__(self, *groups: str, include_sources: bool):\n self._groups = groups\n self._include_sources = include_sources\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n base_set = (\n asset_graph.all_asset_keys\n if self._include_sources\n else asset_graph.materializable_asset_keys\n )\n return {\n asset_key\n for asset_key, group in asset_graph.group_names_by_key.items()\n if group in self._groups and asset_key in base_set\n }\n\n\nclass KeysAssetSelection(AssetSelection):\n def __init__(self, *keys: AssetKey):\n self._keys = keys\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n specified_keys = set(self._keys)\n invalid_keys = {key for key in specified_keys if key not in asset_graph.all_asset_keys}\n if invalid_keys:\n raise DagsterInvalidSubsetError(\n f"AssetKey(s) {invalid_keys} were selected, but no AssetsDefinition objects supply "\n "these keys. Make sure all keys are spelled correctly, and all AssetsDefinitions "\n "are correctly added to the `Definitions`."\n )\n return specified_keys\n\n\nclass KeyPrefixesAssetSelection(AssetSelection):\n def __init__(self, *key_prefixes: Sequence[str], include_sources: bool):\n self._key_prefixes = key_prefixes\n self._include_sources = include_sources\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n base_set = (\n asset_graph.all_asset_keys\n if self._include_sources\n else asset_graph.materializable_asset_keys\n )\n return {\n key for key in base_set if any(key.has_prefix(prefix) for prefix in self._key_prefixes)\n }\n\n\nclass OrAssetSelection(AssetSelection):\n def __init__(self, left: AssetSelection, right: AssetSelection):\n self._left = left\n self._right = right\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n return self._left.resolve_inner(asset_graph) | self._right.resolve_inner(asset_graph)\n\n def resolve_checks_inner(self, asset_graph: InternalAssetGraph) -> AbstractSet[AssetCheckKey]:\n return self._left.resolve_checks_inner(asset_graph) | self._right.resolve_checks_inner(\n asset_graph\n )\n\n\ndef _fetch_all_upstream(\n selection: AbstractSet[AssetKey],\n asset_graph: AssetGraph,\n depth: Optional[int] = None,\n include_self: bool = True,\n) -> AbstractSet[AssetKey]:\n return operator.sub(\n reduce(\n operator.or_,\n [\n {asset_key}\n | fetch_connected(\n item=asset_key,\n graph=asset_graph.asset_dep_graph,\n direction="upstream",\n depth=depth,\n )\n for asset_key in selection\n ],\n set(),\n ),\n selection if not include_self else set(),\n )\n\n\nclass UpstreamAssetSelection(AssetSelection):\n def __init__(\n self,\n child: AssetSelection,\n *,\n depth: Optional[int] = None,\n include_self: bool = True,\n ):\n self._child = child\n self.depth = depth\n self.include_self = include_self\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n if len(selection) == 0:\n return selection\n all_upstream = _fetch_all_upstream(selection, asset_graph, self.depth, self.include_self)\n return {key for key in all_upstream if key not in asset_graph.source_asset_keys}\n\n\nclass SourceAssetSelection(AssetSelection):\n def __init__(self, child: AssetSelection):\n self._child = child\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n selection = self._child.resolve_inner(asset_graph)\n if len(selection) == 0:\n return selection\n all_upstream = _fetch_all_upstream(selection, asset_graph)\n return {key for key in all_upstream if key in asset_graph.source_asset_keys}\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_selection", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_selection"}, "asset_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_sensor_definition

\nimport inspect\nfrom typing import Any, Callable, NamedTuple, Optional, Sequence, Set\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.resource_annotation import get_resource_args\n\nfrom .events import AssetKey\nfrom .run_request import RunRequest, SkipReason\nfrom .sensor_definition import (\n    DefaultSensorStatus,\n    RawSensorEvaluationFunctionReturn,\n    SensorDefinition,\n    SensorType,\n    validate_and_get_resource_dict,\n)\nfrom .target import ExecutableDefinition\nfrom .utils import check_valid_name\n\n\nclass AssetSensorParamNames(NamedTuple):\n    context_param_name: Optional[str]\n    event_log_entry_param_name: Optional[str]\n\n\ndef get_asset_sensor_param_names(fn: Callable) -> AssetSensorParamNames:\n    """Determines the names of the context and event log entry parameters for an asset sensor function.\n    These are assumed to be the first two non-resource params, in order (context param before event log entry).\n    """\n    resource_params = {param.name for param in get_resource_args(fn)}\n\n    non_resource_params = [\n        param.name for param in get_function_params(fn) if param.name not in resource_params\n    ]\n\n    context_param_name = non_resource_params[0] if len(non_resource_params) > 0 else None\n    event_log_entry_param_name = non_resource_params[1] if len(non_resource_params) > 1 else None\n\n    return AssetSensorParamNames(\n        context_param_name=context_param_name, event_log_entry_param_name=event_log_entry_param_name\n    )\n\n\n
[docs]class AssetSensorDefinition(SensorDefinition):\n """Define an asset sensor that initiates a set of runs based on the materialization of a given\n asset.\n\n If the asset has been materialized multiple times between since the last sensor tick, the\n evaluation function will only be invoked once, with the latest materialization.\n\n Args:\n name (str): The name of the sensor to create.\n asset_key (AssetKey): The asset_key this sensor monitors.\n asset_materialization_fn (Callable[[SensorEvaluationContext, EventLogEntry], Union[Iterator[Union[RunRequest, SkipReason]], RunRequest, SkipReason]]): The core\n evaluation function for the sensor, which is run at an interval to determine whether a\n run should be launched or not. Takes a :py:class:`~dagster.SensorEvaluationContext` and\n an EventLogEntry corresponding to an AssetMaterialization event.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job\n object to target with this sensor.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n """\n\n def __init__(\n self,\n name: str,\n asset_key: AssetKey,\n job_name: Optional[str],\n asset_materialization_fn: Callable[\n ...,\n RawSensorEvaluationFunctionReturn,\n ],\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n self._asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n resource_arg_names: Set[str] = {\n arg.name for arg in get_resource_args(asset_materialization_fn)\n }\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n def _wrap_asset_fn(materialization_fn) -> Any:\n def _fn(context) -> Any:\n after_cursor = None\n if context.cursor:\n try:\n after_cursor = int(context.cursor)\n except ValueError:\n after_cursor = None\n\n event_records = context.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=self._asset_key,\n after_cursor=after_cursor,\n ),\n ascending=False,\n limit=1,\n )\n\n if not event_records:\n yield SkipReason(\n f"No new materialization events found for asset key {self._asset_key}"\n )\n return\n\n event_record = event_records[0]\n\n (\n context_param_name,\n event_log_entry_param_name,\n ) = get_asset_sensor_param_names(materialization_fn)\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n\n # Build asset sensor function args, which can include any subset of\n # context arg, event log entry arg, and any resource args\n args = resource_args_populated\n if context_param_name:\n args[context_param_name] = context\n if event_log_entry_param_name:\n args[event_log_entry_param_name] = event_record.event_log_entry\n\n result = materialization_fn(**args)\n if inspect.isgenerator(result) or isinstance(result, list):\n for item in result:\n yield item\n elif isinstance(result, (SkipReason, RunRequest)):\n yield result\n context.update_cursor(str(event_record.storage_id))\n\n return _fn\n\n super(AssetSensorDefinition, self).__init__(\n name=check_valid_name(name),\n job_name=job_name,\n evaluation_fn=_wrap_asset_fn(\n check.callable_param(asset_materialization_fn, "asset_materialization_fn"),\n ),\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n required_resource_keys=combined_required_resource_keys,\n )\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """AssetKey: The key of the asset targeted by this sensor."""\n return self._asset_key\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.ASSET
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_sensor_definition"}, "asset_spec": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.asset_spec

\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Iterable, Mapping, NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.errors import DagsterInvariantViolationError\n\nfrom .auto_materialize_policy import AutoMaterializePolicy\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n)\nfrom .freshness_policy import FreshnessPolicy\nfrom .metadata import MetadataUserInput\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_dep import AssetDep, CoercibleToAssetDep\n\n# SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE lives on the metadata of an asset\n# (which currently ends up on the Output associated with the asset key)\n# whih encodes the execution type the of asset. "Unexecutable" assets are assets\n# that cannot be materialized in Dagster, but can have events in the event\n# log keyed off of them, making Dagster usable as a observability and lineage tool\n# for externally materialized assets.\nSYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE = "dagster/asset_execution_type"\n\n\nclass AssetExecutionType(Enum):\n    OBSERVATION = "OBSERVATION"\n    UNEXECUTABLE = "UNEXECUTABLE"\n    MATERIALIZATION = "MATERIALIZATION"\n\n    @staticmethod\n    def is_executable(varietal_str: Optional[str]) -> bool:\n        return AssetExecutionType.str_to_enum(varietal_str) in {\n            AssetExecutionType.MATERIALIZATION,\n            AssetExecutionType.OBSERVATION,\n        }\n\n    @staticmethod\n    def str_to_enum(varietal_str: Optional[str]) -> "AssetExecutionType":\n        return (\n            AssetExecutionType.MATERIALIZATION\n            if varietal_str is None\n            else AssetExecutionType(varietal_str)\n        )\n\n\n
[docs]@experimental\nclass AssetSpec(\n NamedTuple(\n "_AssetSpec",\n [\n ("key", PublicAttr[AssetKey]),\n ("deps", PublicAttr[Iterable["AssetDep"]]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Optional[Mapping[str, Any]]]),\n ("group_name", PublicAttr[Optional[str]]),\n ("skippable", PublicAttr[bool]),\n ("code_version", PublicAttr[Optional[str]]),\n ("freshness_policy", PublicAttr[Optional[FreshnessPolicy]]),\n ("auto_materialize_policy", PublicAttr[Optional[AutoMaterializePolicy]]),\n ],\n )\n):\n """Specifies the core attributes of an asset. This object is attached to the decorated\n function that defines how it materialized.\n\n Attributes:\n key (AssetKey): The unique identifier for this asset.\n deps (Optional[AbstractSet[AssetKey]]): The asset keys for the upstream assets that\n materializing this asset depends on.\n description (Optional[str]): Human-readable description of this asset.\n metadata (Optional[Dict[str, Any]]): A dict of static metadata for this asset.\n For example, users can provide information about the database table this\n asset corresponds to.\n skippable (bool): Whether this asset can be omitted during materialization, causing downstream\n dependencies to skip.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If\n not provided, the name "default" is used.\n code_version (Optional[str]): The version of the code for this specific asset,\n overriding the code version of the materialization function\n freshness_policy (Optional[FreshnessPolicy]): A policy which indicates how up to date this\n asset is intended to be.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply to\n the specified asset.\n backfill_policy (Optional[BackfillPolicy]): BackfillPolicy to apply to the specified asset.\n """\n\n def __new__(\n cls,\n key: CoercibleToAssetKey,\n *,\n deps: Optional[Iterable["CoercibleToAssetDep"]] = None,\n description: Optional[str] = None,\n metadata: Optional[MetadataUserInput] = None,\n skippable: bool = False,\n group_name: Optional[str] = None,\n code_version: Optional[str] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n ):\n from dagster._core.definitions.asset_dep import AssetDep\n\n dep_set = {}\n if deps:\n for dep in deps:\n asset_dep = AssetDep.from_coercible(dep)\n\n # we cannot do deduplication via a set because MultiPartitionMappings have an internal\n # dictionary that cannot be hashed. Instead deduplicate by making a dictionary and checking\n # for existing keys.\n if asset_dep.asset_key in dep_set.keys():\n raise DagsterInvariantViolationError(\n f"Cannot set a dependency on asset {asset_dep.asset_key} more than once for"\n f" AssetSpec {key}"\n )\n dep_set[asset_dep.asset_key] = asset_dep\n\n return super().__new__(\n cls,\n key=AssetKey.from_coercible(key),\n deps=list(dep_set.values()),\n description=check.opt_str_param(description, "description"),\n metadata=check.opt_mapping_param(metadata, "metadata", key_type=str),\n skippable=check.bool_param(skippable, "skippable"),\n group_name=check.opt_str_param(group_name, "group_name"),\n code_version=check.opt_str_param(code_version, "code_version"),\n freshness_policy=check.opt_inst_param(\n freshness_policy,\n "freshness_policy",\n FreshnessPolicy,\n ),\n auto_materialize_policy=check.opt_inst_param(\n auto_materialize_policy,\n "auto_materialize_policy",\n AutoMaterializePolicy,\n ),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/asset_spec", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.asset_spec"}, "assets": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.assets

\nimport hashlib\nimport json\nimport warnings\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import experimental_param, public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey, AssetCheckSpec\nfrom dagster._core.definitions.asset_layer import get_dep_node_handles_of_graph_backed_asset\nfrom dagster._core.definitions.asset_spec import AssetExecutionType\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.backfill_policy import BackfillPolicy, BackfillPolicyType\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.metadata import ArbitraryMetadataMapping\nfrom dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\nfrom dagster._core.definitions.op_invocation import direct_invocation_result\nfrom dagster._core.definitions.op_selection import get_graph_subset\nfrom dagster._core.definitions.partition_mapping import MultiPartitionMapping\nfrom dagster._core.definitions.resource_requirement import (\n    RequiresResources,\n    ResourceAddable,\n    ResourceRequirement,\n    merge_resource_defs,\n)\nfrom dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\nfrom dagster._core.definitions.time_window_partitions import TimeWindowPartitionsDefinition\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._utils import IHasInternalInit\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import (\n    disable_dagster_warnings,\n)\n\nfrom .dependency import NodeHandle\nfrom .events import AssetKey, CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom .node_definition import NodeDefinition\nfrom .op_definition import OpDefinition\nfrom .partition import PartitionsDefinition\nfrom .partition_mapping import (\n    PartitionMapping,\n    get_builtin_partition_mapping_types,\n    infer_partition_mapping,\n)\nfrom .resource_definition import ResourceDefinition\nfrom .source_asset import SourceAsset\nfrom .utils import DEFAULT_GROUP_NAME, validate_group_name\n\nif TYPE_CHECKING:\n    from .graph_definition import GraphDefinition\n\n\n
[docs]class AssetsDefinition(ResourceAddable, RequiresResources, IHasInternalInit):\n """Defines a set of assets that are produced by the same op or graph.\n\n AssetsDefinitions are typically not instantiated directly, but rather produced using the\n :py:func:`@asset <asset>` or :py:func:`@multi_asset <multi_asset>` decorators.\n """\n\n _node_def: NodeDefinition\n _keys_by_input_name: Mapping[str, AssetKey]\n _keys_by_output_name: Mapping[str, AssetKey]\n _partitions_def: Optional[PartitionsDefinition]\n _partition_mappings: Mapping[AssetKey, PartitionMapping]\n _asset_deps: Mapping[AssetKey, AbstractSet[AssetKey]]\n _resource_defs: Mapping[str, ResourceDefinition]\n _group_names_by_key: Mapping[AssetKey, str]\n _selected_asset_keys: AbstractSet[AssetKey]\n _can_subset: bool\n _metadata_by_key: Mapping[AssetKey, ArbitraryMetadataMapping]\n _freshness_policies_by_key: Mapping[AssetKey, FreshnessPolicy]\n _auto_materialize_policies_by_key: Mapping[AssetKey, AutoMaterializePolicy]\n _backfill_policy: Optional[BackfillPolicy]\n _code_versions_by_key: Mapping[AssetKey, Optional[str]]\n _descriptions_by_key: Mapping[AssetKey, str]\n _selected_asset_check_keys: AbstractSet[AssetCheckKey]\n\n def __init__(\n self,\n *,\n keys_by_input_name: Mapping[str, AssetKey],\n keys_by_output_name: Mapping[str, AssetKey],\n node_def: NodeDefinition,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[AssetKey, PartitionMapping]] = None,\n asset_deps: Optional[Mapping[AssetKey, AbstractSet[AssetKey]]] = None,\n selected_asset_keys: Optional[AbstractSet[AssetKey]] = None,\n can_subset: bool = False,\n resource_defs: Optional[Mapping[str, object]] = None,\n group_names_by_key: Optional[Mapping[AssetKey, str]] = None,\n metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]] = None,\n freshness_policies_by_key: Optional[Mapping[AssetKey, FreshnessPolicy]] = None,\n auto_materialize_policies_by_key: Optional[Mapping[AssetKey, AutoMaterializePolicy]] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n descriptions_by_key: Optional[Mapping[AssetKey, str]] = None,\n check_specs_by_output_name: Optional[Mapping[str, AssetCheckSpec]] = None,\n selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]] = None,\n # if adding new fields, make sure to handle them in the with_attributes, from_graph, and\n # get_attributes_dict methods\n ):\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n from .graph_definition import GraphDefinition\n\n if isinstance(node_def, GraphDefinition):\n _validate_graph_def(node_def)\n\n self._node_def = node_def\n self._keys_by_input_name = check.mapping_param(\n keys_by_input_name,\n "keys_by_input_name",\n key_type=str,\n value_type=AssetKey,\n )\n self._keys_by_output_name = check.mapping_param(\n keys_by_output_name,\n "keys_by_output_name",\n key_type=str,\n value_type=AssetKey,\n )\n\n check.opt_mapping_param(\n check_specs_by_output_name,\n "check_specs_by_output_name",\n key_type=str,\n value_type=AssetCheckSpec,\n )\n\n # if not specified assume all output assets depend on all input assets\n all_asset_keys = set(keys_by_output_name.values())\n input_asset_keys = set(keys_by_input_name.values())\n\n self._partitions_def = partitions_def\n self._partition_mappings = partition_mappings or {}\n builtin_partition_mappings = get_builtin_partition_mapping_types()\n for asset_key, partition_mapping in self._partition_mappings.items():\n if not isinstance(partition_mapping, builtin_partition_mappings):\n warnings.warn(\n f"Non-built-in PartitionMappings, such as {type(partition_mapping).__name__} "\n "are deprecated and will not work with asset reconciliation. The built-in "\n "partition mappings are "\n + ", ".join(\n builtin_partition_mapping.__name__\n for builtin_partition_mapping in builtin_partition_mappings\n )\n + ".",\n category=DeprecationWarning,\n )\n\n if asset_key not in input_asset_keys:\n check.failed(\n f"While constructing AssetsDefinition outputting {all_asset_keys}, received a"\n f" partition mapping for {asset_key} that is not defined in the set of upstream"\n f" assets: {input_asset_keys}"\n )\n\n self._asset_deps = asset_deps or {\n out_asset_key: set(keys_by_input_name.values()) for out_asset_key in all_asset_keys\n }\n check.invariant(\n set(self._asset_deps.keys()) == all_asset_keys,\n "The set of asset keys with dependencies specified in the asset_deps argument must "\n "equal the set of asset keys produced by this AssetsDefinition. \\n"\n f"asset_deps keys: {set(self._asset_deps.keys())} \\n"\n f"expected keys: {all_asset_keys}",\n )\n self._resource_defs = wrap_resources_for_execution(\n check.opt_mapping_param(resource_defs, "resource_defs")\n )\n\n group_names_by_key = (\n check.mapping_param(group_names_by_key, "group_names_by_key")\n if group_names_by_key\n else {}\n )\n self._group_names_by_key = {}\n # assets that don't have a group name get a DEFAULT_GROUP_NAME\n for key in all_asset_keys:\n group_name = group_names_by_key.get(key)\n self._group_names_by_key[key] = validate_group_name(group_name)\n\n all_check_keys = {spec.key for spec in (check_specs_by_output_name or {}).values()}\n\n # NOTE: this logic mirrors subsetting at the asset layer. This is ripe for consolidation.\n if selected_asset_keys is None and selected_asset_check_keys is None:\n # if no selections, include everything\n self._selected_asset_keys = all_asset_keys\n self._selected_asset_check_keys = all_check_keys\n else:\n self._selected_asset_keys = selected_asset_keys or set()\n\n if selected_asset_check_keys is None:\n # if assets were selected but checks are None, then include all checks for selected\n # assets\n self._selected_asset_check_keys = {\n key for key in all_check_keys if key.asset_key in self._selected_asset_keys\n }\n else:\n # otherwise, use the selected checks\n self._selected_asset_check_keys = selected_asset_check_keys\n\n self._check_specs_by_output_name = {\n name: spec\n for name, spec in (check_specs_by_output_name or {}).items()\n if spec.key in self._selected_asset_check_keys\n }\n self._check_specs_by_key = {\n spec.key: spec for spec in self._check_specs_by_output_name.values()\n }\n\n self._can_subset = can_subset\n\n self._code_versions_by_key = {}\n self._metadata_by_key = dict(\n check.opt_mapping_param(\n metadata_by_key, "metadata_by_key", key_type=AssetKey, value_type=dict\n )\n )\n self._descriptions_by_key = dict(\n check.opt_mapping_param(\n descriptions_by_key, "descriptions_by_key", key_type=AssetKey, value_type=str\n )\n )\n for output_name, asset_key in keys_by_output_name.items():\n output_def, _ = node_def.resolve_output_to_origin(output_name, None)\n self._metadata_by_key[asset_key] = merge_dicts(\n output_def.metadata,\n self._metadata_by_key.get(asset_key, {}),\n )\n # We construct description from three sources of truth here. This\n # highly unfortunate. See commentary in @multi_asset's call to dagster_internal_init.\n description = (\n self._descriptions_by_key.get(asset_key, output_def.description)\n or node_def.description\n )\n if description:\n self._descriptions_by_key[asset_key] = description\n self._code_versions_by_key[asset_key] = output_def.code_version\n\n for key, freshness_policy in (freshness_policies_by_key or {}).items():\n check.param_invariant(\n not (\n freshness_policy\n and self._partitions_def is not None\n and not isinstance(self._partitions_def, TimeWindowPartitionsDefinition)\n ),\n "freshness_policies_by_key",\n "FreshnessPolicies are currently unsupported for assets with partitions of type"\n f" {type(self._partitions_def)}.",\n )\n\n self._freshness_policies_by_key = check.opt_mapping_param(\n freshness_policies_by_key,\n "freshness_policies_by_key",\n key_type=AssetKey,\n value_type=FreshnessPolicy,\n )\n\n self._auto_materialize_policies_by_key = check.opt_mapping_param(\n auto_materialize_policies_by_key,\n "auto_materialize_policies_by_key",\n key_type=AssetKey,\n value_type=AutoMaterializePolicy,\n )\n\n self._backfill_policy = check.opt_inst_param(\n backfill_policy, "backfill_policy", BackfillPolicy\n )\n\n if self._partitions_def is None:\n # check if backfill policy is BackfillPolicyType.SINGLE_RUN if asset is not partitioned\n check.param_invariant(\n (\n backfill_policy.policy_type is BackfillPolicyType.SINGLE_RUN\n if backfill_policy\n else True\n ),\n "backfill_policy",\n "Non partitioned asset can only have single run backfill policy",\n )\n\n _validate_self_deps(\n input_keys=self._keys_by_input_name.values(),\n output_keys=self._selected_asset_keys,\n partition_mappings=self._partition_mappings,\n partitions_def=self._partitions_def,\n )\n\n @staticmethod\n def dagster_internal_init(\n *,\n keys_by_input_name: Mapping[str, AssetKey],\n keys_by_output_name: Mapping[str, AssetKey],\n node_def: NodeDefinition,\n partitions_def: Optional[PartitionsDefinition],\n partition_mappings: Optional[Mapping[AssetKey, PartitionMapping]],\n asset_deps: Optional[Mapping[AssetKey, AbstractSet[AssetKey]]],\n selected_asset_keys: Optional[AbstractSet[AssetKey]],\n can_subset: bool,\n resource_defs: Optional[Mapping[str, object]],\n group_names_by_key: Optional[Mapping[AssetKey, str]],\n metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]],\n freshness_policies_by_key: Optional[Mapping[AssetKey, FreshnessPolicy]],\n auto_materialize_policies_by_key: Optional[Mapping[AssetKey, AutoMaterializePolicy]],\n backfill_policy: Optional[BackfillPolicy],\n descriptions_by_key: Optional[Mapping[AssetKey, str]],\n check_specs_by_output_name: Optional[Mapping[str, AssetCheckSpec]],\n selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],\n ) -> "AssetsDefinition":\n return AssetsDefinition(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n node_def=node_def,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings,\n asset_deps=asset_deps,\n selected_asset_keys=selected_asset_keys,\n can_subset=can_subset,\n resource_defs=resource_defs,\n group_names_by_key=group_names_by_key,\n metadata_by_key=metadata_by_key,\n freshness_policies_by_key=freshness_policies_by_key,\n auto_materialize_policies_by_key=auto_materialize_policies_by_key,\n backfill_policy=backfill_policy,\n descriptions_by_key=descriptions_by_key,\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=selected_asset_check_keys,\n )\n\n def __call__(self, *args: object, **kwargs: object) -> object:\n from .composition import is_in_composition\n from .graph_definition import GraphDefinition\n\n # defer to GraphDefinition.__call__ for graph backed assets, or if invoked in composition\n if isinstance(self.node_def, GraphDefinition) or is_in_composition():\n return self._node_def(*args, **kwargs)\n\n # invoke against self to allow assets def information to be used\n return direct_invocation_result(self, *args, **kwargs)\n\n
[docs] @public\n @experimental_param(param="resource_defs")\n @staticmethod\n def from_graph(\n graph_def: "GraphDefinition",\n *,\n keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,\n keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n group_name: Optional[str] = None,\n group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,\n descriptions_by_output_name: Optional[Mapping[str, str]] = None,\n metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,\n freshness_policies_by_output_name: Optional[Mapping[str, Optional[FreshnessPolicy]]] = None,\n auto_materialize_policies_by_output_name: Optional[\n Mapping[str, Optional[AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n can_subset: bool = False,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n ) -> "AssetsDefinition":\n """Constructs an AssetsDefinition from a GraphDefinition.\n\n Args:\n graph_def (GraphDefinition): The GraphDefinition that is an asset.\n keys_by_input_name (Optional[Mapping[str, AssetKey]]): A mapping of the input\n names of the decorated graph to their corresponding asset keys. If not provided,\n the input asset keys will be created from the graph input names.\n keys_by_output_name (Optional[Mapping[str, AssetKey]]): A mapping of the output\n names of the decorated graph to their corresponding asset keys. If not provided,\n the output asset keys will be created from the graph output names.\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, key_prefix will be prepended\n to each key in keys_by_output_name. Each item in key_prefix must be a valid name in\n dagster (ie only contains letters, numbers, and _) and may not contain python\n reserved keywords.\n internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed\n that all assets produced by the graph depend on all assets that are consumed by that\n graph. If this default is not correct, you pass in a map of output names to a\n corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be\n either used as input to the asset or produced within the graph.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n partition_mappings (Optional[Mapping[str, PartitionMapping]]): Defines how to map partition\n keys for this asset to partition keys of upstream assets. Each key in the dictionary\n correponds to one of the input assets, and each value is a PartitionMapping.\n If no entry is provided for a particular asset dependency, the partition mapping defaults\n to the default partition mapping for the partitions definition, which is typically maps\n partition keys to the same partition keys in upstream assets.\n resource_defs (Optional[Mapping[str, ResourceDefinition]]):\n (Experimental) A mapping of resource keys to resource definitions. These resources\n will be initialized during execution, and can be accessed from the\n body of ops in the graph during execution.\n group_name (Optional[str]): A group name for the constructed asset. Assets without a\n group name are assigned to a group called "default".\n group_names_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a group name to be\n associated with some or all of the output assets for this node. Keys are names of the\n outputs, and values are the group name. Cannot be used with the group_name argument.\n descriptions_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a description to be\n associated with each of the output asstes for this graph.\n metadata_by_output_name (Optional[Mapping[str, Optional[MetadataUserInput]]]): Defines metadata to\n be associated with each of the output assets for this node. Keys are names of the\n outputs, and values are dictionaries of metadata to be associated with the related\n asset.\n freshness_policies_by_output_name (Optional[Mapping[str, Optional[FreshnessPolicy]]]): Defines a\n FreshnessPolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the FreshnessPolicies to be attached\n to the associated asset.\n auto_materialize_policies_by_output_name (Optional[Mapping[str, Optional[AutoMaterializePolicy]]]): Defines an\n AutoMaterializePolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the AutoMaterializePolicies to be attached\n to the associated asset.\n backfill_policy (Optional[BackfillPolicy]): Defines this asset's BackfillPolicy\n """\n return AssetsDefinition._from_node(\n node_def=graph_def,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n key_prefix=key_prefix,\n internal_asset_deps=internal_asset_deps,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings,\n resource_defs=resource_defs,\n group_name=group_name,\n group_names_by_output_name=group_names_by_output_name,\n descriptions_by_output_name=descriptions_by_output_name,\n metadata_by_output_name=metadata_by_output_name,\n freshness_policies_by_output_name=freshness_policies_by_output_name,\n auto_materialize_policies_by_output_name=auto_materialize_policies_by_output_name,\n backfill_policy=backfill_policy,\n can_subset=can_subset,\n check_specs=check_specs,\n )
\n\n
[docs] @public\n @staticmethod\n def from_op(\n op_def: OpDefinition,\n *,\n keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,\n keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,\n group_name: Optional[str] = None,\n group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,\n descriptions_by_output_name: Optional[Mapping[str, str]] = None,\n metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,\n freshness_policies_by_output_name: Optional[Mapping[str, Optional[FreshnessPolicy]]] = None,\n auto_materialize_policies_by_output_name: Optional[\n Mapping[str, Optional[AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n can_subset: bool = False,\n ) -> "AssetsDefinition":\n """Constructs an AssetsDefinition from an OpDefinition.\n\n Args:\n op_def (OpDefinition): The OpDefinition that is an asset.\n keys_by_input_name (Optional[Mapping[str, AssetKey]]): A mapping of the input\n names of the decorated op to their corresponding asset keys. If not provided,\n the input asset keys will be created from the op input names.\n keys_by_output_name (Optional[Mapping[str, AssetKey]]): A mapping of the output\n names of the decorated op to their corresponding asset keys. If not provided,\n the output asset keys will be created from the op output names.\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, key_prefix will be prepended\n to each key in keys_by_output_name. Each item in key_prefix must be a valid name in\n dagster (ie only contains letters, numbers, and _) and may not contain python\n reserved keywords.\n internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed\n that all assets produced by the op depend on all assets that are consumed by that\n op. If this default is not correct, you pass in a map of output names to a\n corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be\n either used as input to the asset or produced within the op.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n partition_mappings (Optional[Mapping[str, PartitionMapping]]): Defines how to map partition\n keys for this asset to partition keys of upstream assets. Each key in the dictionary\n correponds to one of the input assets, and each value is a PartitionMapping.\n If no entry is provided for a particular asset dependency, the partition mapping defaults\n to the default partition mapping for the partitions definition, which is typically maps\n partition keys to the same partition keys in upstream assets.\n group_name (Optional[str]): A group name for the constructed asset. Assets without a\n group name are assigned to a group called "default".\n group_names_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a group name to be\n associated with some or all of the output assets for this node. Keys are names of the\n outputs, and values are the group name. Cannot be used with the group_name argument.\n descriptions_by_output_name (Optional[Mapping[str, Optional[str]]]): Defines a description to be\n associated with each of the output asstes for this graph.\n metadata_by_output_name (Optional[Mapping[str, Optional[MetadataUserInput]]]): Defines metadata to\n be associated with each of the output assets for this node. Keys are names of the\n outputs, and values are dictionaries of metadata to be associated with the related\n asset.\n freshness_policies_by_output_name (Optional[Mapping[str, Optional[FreshnessPolicy]]]): Defines a\n FreshnessPolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the FreshnessPolicies to be attached\n to the associated asset.\n auto_materialize_policies_by_output_name (Optional[Mapping[str, Optional[AutoMaterializePolicy]]]): Defines an\n AutoMaterializePolicy to be associated with some or all of the output assets for this node.\n Keys are the names of the outputs, and values are the AutoMaterializePolicies to be attached\n to the associated asset.\n backfill_policy (Optional[BackfillPolicy]): Defines this asset's BackfillPolicy\n """\n return AssetsDefinition._from_node(\n node_def=op_def,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n key_prefix=key_prefix,\n internal_asset_deps=internal_asset_deps,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings,\n group_name=group_name,\n group_names_by_output_name=group_names_by_output_name,\n descriptions_by_output_name=descriptions_by_output_name,\n metadata_by_output_name=metadata_by_output_name,\n freshness_policies_by_output_name=freshness_policies_by_output_name,\n auto_materialize_policies_by_output_name=auto_materialize_policies_by_output_name,\n backfill_policy=backfill_policy,\n can_subset=can_subset,\n )
\n\n @staticmethod\n def _from_node(\n node_def: Union[OpDefinition, "GraphDefinition"],\n *,\n keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,\n keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_mappings: Optional[Mapping[str, PartitionMapping]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n group_name: Optional[str] = None,\n group_names_by_output_name: Optional[Mapping[str, Optional[str]]] = None,\n descriptions_by_output_name: Optional[Mapping[str, str]] = None,\n metadata_by_output_name: Optional[Mapping[str, Optional[ArbitraryMetadataMapping]]] = None,\n freshness_policies_by_output_name: Optional[Mapping[str, Optional[FreshnessPolicy]]] = None,\n auto_materialize_policies_by_output_name: Optional[\n Mapping[str, Optional[AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n can_subset: bool = False,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n ) -> "AssetsDefinition":\n from dagster._core.definitions.decorators.asset_decorator import (\n _validate_and_assign_output_names_to_check_specs,\n )\n\n node_def = check.inst_param(node_def, "node_def", NodeDefinition)\n keys_by_input_name = _infer_keys_by_input_names(\n node_def,\n check.opt_mapping_param(\n keys_by_input_name, "keys_by_input_name", key_type=str, value_type=AssetKey\n ),\n )\n keys_by_output_name = check.opt_mapping_param(\n keys_by_output_name,\n "keys_by_output_name",\n key_type=str,\n value_type=AssetKey,\n )\n internal_asset_deps = check.opt_mapping_param(\n internal_asset_deps, "internal_asset_deps", key_type=str, value_type=set\n )\n resource_defs = check.opt_mapping_param(\n resource_defs, "resource_defs", key_type=str, value_type=ResourceDefinition\n )\n transformed_internal_asset_deps: Dict[AssetKey, AbstractSet[AssetKey]] = {}\n if internal_asset_deps:\n for output_name, asset_keys in internal_asset_deps.items():\n check.invariant(\n output_name in keys_by_output_name,\n f"output_name {output_name} specified in internal_asset_deps does not exist"\n " in the decorated function",\n )\n transformed_internal_asset_deps[keys_by_output_name[output_name]] = asset_keys\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, list(keys_by_output_name.values())\n )\n\n keys_by_output_name = _infer_keys_by_output_names(\n node_def, keys_by_output_name or {}, check_specs_by_output_name\n )\n\n keys_by_output_name_with_prefix: Dict[str, AssetKey] = {}\n key_prefix_list = [key_prefix] if isinstance(key_prefix, str) else key_prefix\n for output_name, key in keys_by_output_name.items():\n # add key_prefix to the beginning of each asset key\n key_with_key_prefix = AssetKey(\n list(filter(None, [*(key_prefix_list or []), *key.path]))\n )\n keys_by_output_name_with_prefix[output_name] = key_with_key_prefix\n\n check.param_invariant(\n group_name is None or group_names_by_output_name is None,\n "group_name",\n "Cannot use both group_name and group_names_by_output_name",\n )\n\n if group_name:\n group_names_by_key = {\n asset_key: group_name for asset_key in keys_by_output_name_with_prefix.values()\n }\n elif group_names_by_output_name:\n group_names_by_key = {\n keys_by_output_name_with_prefix[output_name]: group_name\n for output_name, group_name in group_names_by_output_name.items()\n if group_name is not None\n }\n else:\n group_names_by_key = None\n\n return AssetsDefinition.dagster_internal_init(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name_with_prefix,\n node_def=node_def,\n asset_deps=transformed_internal_asset_deps or None,\n partitions_def=check.opt_inst_param(\n partitions_def,\n "partitions_def",\n PartitionsDefinition,\n ),\n group_names_by_key=group_names_by_key,\n resource_defs=resource_defs,\n partition_mappings=(\n {\n keys_by_input_name[input_name]: partition_mapping\n for input_name, partition_mapping in partition_mappings.items()\n }\n if partition_mappings\n else None\n ),\n metadata_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: metadata\n for output_name, metadata in metadata_by_output_name.items()\n if metadata is not None\n }\n if metadata_by_output_name\n else None\n ),\n freshness_policies_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: freshness_policy\n for output_name, freshness_policy in freshness_policies_by_output_name.items()\n if freshness_policy is not None\n }\n if freshness_policies_by_output_name\n else None\n ),\n auto_materialize_policies_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: auto_materialize_policy\n for output_name, auto_materialize_policy in auto_materialize_policies_by_output_name.items()\n if auto_materialize_policy is not None\n }\n if auto_materialize_policies_by_output_name\n else None\n ),\n backfill_policy=check.opt_inst_param(\n backfill_policy, "backfill_policy", BackfillPolicy\n ),\n descriptions_by_key=(\n {\n keys_by_output_name_with_prefix[output_name]: description\n for output_name, description in descriptions_by_output_name.items()\n if description is not None\n }\n if descriptions_by_output_name\n else None\n ),\n can_subset=can_subset,\n selected_asset_keys=None, # node has no subselection info\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=None,\n )\n\n @public\n @property\n def can_subset(self) -> bool:\n """bool: If True, indicates that this AssetsDefinition may materialize any subset of its\n asset keys in a given computation (as opposed to being required to materialize all asset\n keys).\n """\n return self._can_subset\n\n @public\n @property\n def group_names_by_key(self) -> Mapping[AssetKey, str]:\n """Mapping[AssetKey, str]: Returns a mapping from the asset keys in this AssetsDefinition\n to the group names assigned to them. If there is no assigned group name for a given AssetKey,\n it will not be present in this dictionary.\n """\n return self._group_names_by_key\n\n @public\n @property\n def descriptions_by_key(self) -> Mapping[AssetKey, str]:\n """Mapping[AssetKey, str]: Returns a mapping from the asset keys in this AssetsDefinition\n to the descriptions assigned to them. If there is no assigned description for a given AssetKey,\n it will not be present in this dictionary.\n """\n return self._descriptions_by_key\n\n @public\n @property\n def op(self) -> OpDefinition:\n """OpDefinition: Returns the OpDefinition that is used to materialize the assets in this\n AssetsDefinition.\n """\n check.invariant(\n isinstance(self._node_def, OpDefinition),\n "The NodeDefinition for this AssetsDefinition is not of type OpDefinition.",\n )\n return cast(OpDefinition, self._node_def)\n\n @public\n @property\n def node_def(self) -> NodeDefinition:\n """NodeDefinition: Returns the OpDefinition or GraphDefinition that is used to materialize\n the assets in this AssetsDefinition.\n """\n return self._node_def\n\n @public\n @property\n def asset_deps(self) -> Mapping[AssetKey, AbstractSet[AssetKey]]:\n """Maps assets that are produced by this definition to assets that they depend on. The\n dependencies can be either "internal", meaning that they refer to other assets that are\n produced by this definition, or "external", meaning that they refer to assets that aren't\n produced by this definition.\n """\n return self._asset_deps\n\n @property\n def input_names(self) -> Iterable[str]:\n """Iterable[str]: The set of input names of the underlying NodeDefinition for this\n AssetsDefinition.\n """\n return self.keys_by_input_name.keys()\n\n @public\n @property\n def key(self) -> AssetKey:\n """AssetKey: The asset key associated with this AssetsDefinition. If this AssetsDefinition\n has more than one asset key, this will produce an error.\n """\n check.invariant(\n len(self.keys) == 1,\n "Tried to retrieve asset key from an assets definition with multiple asset keys: "\n + ", ".join([str(ak.to_string()) for ak in self._keys_by_output_name.values()]),\n )\n\n return next(iter(self.keys))\n\n @public\n @property\n def resource_defs(self) -> Mapping[str, ResourceDefinition]:\n """Mapping[str, ResourceDefinition]: A mapping from resource name to ResourceDefinition for\n the resources bound to this AssetsDefinition.\n """\n return dict(self._resource_defs)\n\n @public\n @property\n def keys(self) -> AbstractSet[AssetKey]:\n """AbstractSet[AssetKey]: The asset keys associated with this AssetsDefinition."""\n return self._selected_asset_keys\n\n @public\n @property\n def dependency_keys(self) -> Iterable[AssetKey]:\n """Iterable[AssetKey]: The asset keys which are upstream of any asset included in this\n AssetsDefinition.\n """\n # the input asset keys that are directly upstream of a selected asset key\n upstream_keys = {dep_key for key in self.keys for dep_key in self.asset_deps[key]}\n input_keys = set(self._keys_by_input_name.values())\n return upstream_keys.intersection(input_keys)\n\n @property\n def node_keys_by_output_name(self) -> Mapping[str, AssetKey]:\n """AssetKey for each output on the underlying NodeDefinition."""\n return self._keys_by_output_name\n\n @property\n def node_keys_by_input_name(self) -> Mapping[str, AssetKey]:\n """AssetKey for each input on the underlying NodeDefinition."""\n return self._keys_by_input_name\n\n @property\n def check_specs_by_output_name(self) -> Mapping[str, AssetCheckSpec]:\n return self._check_specs_by_output_name\n\n def get_spec_for_check_key(self, asset_check_key: AssetCheckKey) -> AssetCheckSpec:\n return self._check_specs_by_key[asset_check_key]\n\n @property\n def keys_by_output_name(self) -> Mapping[str, AssetKey]:\n return {\n name: key for name, key in self.node_keys_by_output_name.items() if key in self.keys\n }\n\n @property\n def keys_by_input_name(self) -> Mapping[str, AssetKey]:\n upstream_keys = {dep_key for key in self.keys for dep_key in self.asset_deps[key]}\n return {\n name: key for name, key in self.node_keys_by_input_name.items() if key in upstream_keys\n }\n\n @property\n def freshness_policies_by_key(self) -> Mapping[AssetKey, FreshnessPolicy]:\n return self._freshness_policies_by_key\n\n @property\n def auto_materialize_policies_by_key(self) -> Mapping[AssetKey, AutoMaterializePolicy]:\n return self._auto_materialize_policies_by_key\n\n @property\n def backfill_policy(self) -> Optional[BackfillPolicy]:\n return self._backfill_policy\n\n @public\n @property\n def partitions_def(self) -> Optional[PartitionsDefinition]:\n """Optional[PartitionsDefinition]: The PartitionsDefinition for this AssetsDefinition (if any)."""\n return self._partitions_def\n\n @property\n def metadata_by_key(self) -> Mapping[AssetKey, ArbitraryMetadataMapping]:\n return self._metadata_by_key\n\n @property\n def code_versions_by_key(self) -> Mapping[AssetKey, Optional[str]]:\n return self._code_versions_by_key\n\n @property\n def partition_mappings(self) -> Mapping[AssetKey, PartitionMapping]:\n return self._partition_mappings\n\n
[docs] @public\n def get_partition_mapping(self, in_asset_key: AssetKey) -> Optional[PartitionMapping]:\n """Returns the partition mapping between keys in this AssetsDefinition and a given input\n asset key (if any).\n """\n return self._partition_mappings.get(in_asset_key)
\n\n @public\n @property\n def check_specs(self) -> Iterable[AssetCheckSpec]:\n """Returns the asset check specs defined on this AssetsDefinition, i.e. the checks that can\n be executed while materializing the assets.\n\n Returns:\n Iterable[AssetsCheckSpec]:\n """\n return self._check_specs_by_output_name.values()\n\n @property\n def check_keys(self) -> AbstractSet[AssetCheckKey]:\n """Returns the selected asset checks associated by this AssetsDefinition.\n\n Returns:\n AbstractSet[Tuple[AssetKey, str]]: The selected asset checks. An asset check is\n identified by the asset key and the name of the check.\n """\n return self._selected_asset_check_keys\n\n def is_asset_executable(self, asset_key: AssetKey) -> bool:\n """Returns True if the asset key is materializable by this AssetsDefinition.\n\n Args:\n asset_key (AssetKey): The asset key to check.\n\n Returns:\n bool: True if the asset key is materializable by this AssetsDefinition.\n """\n from dagster._core.definitions.asset_spec import (\n SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE,\n AssetExecutionType,\n )\n\n return AssetExecutionType.is_executable(\n self._metadata_by_key.get(asset_key, {}).get(SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE)\n )\n\n def asset_execution_type_for_asset(self, asset_key: AssetKey) -> AssetExecutionType:\n from dagster._core.definitions.asset_spec import (\n SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE,\n AssetExecutionType,\n )\n\n return AssetExecutionType.str_to_enum(\n self._metadata_by_key.get(asset_key, {}).get(SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE)\n )\n\n def get_partition_mapping_for_input(self, input_name: str) -> Optional[PartitionMapping]:\n return self._partition_mappings.get(self._keys_by_input_name[input_name])\n\n def infer_partition_mapping(\n self, upstream_asset_key: AssetKey, upstream_partitions_def: Optional[PartitionsDefinition]\n ) -> PartitionMapping:\n with disable_dagster_warnings():\n partition_mapping = self._partition_mappings.get(upstream_asset_key)\n return infer_partition_mapping(\n partition_mapping, self._partitions_def, upstream_partitions_def\n )\n\n def get_output_name_for_asset_key(self, key: AssetKey) -> str:\n for output_name, asset_key in self.keys_by_output_name.items():\n if key == asset_key:\n return output_name\n\n raise DagsterInvariantViolationError(\n f"Asset key {key.to_user_string()} not found in AssetsDefinition"\n )\n\n def get_op_def_for_asset_key(self, key: AssetKey) -> OpDefinition:\n """If this is an op-backed asset, returns the op def. If it's a graph-backed asset,\n returns the op def within the graph that produces the given asset key.\n """\n output_name = self.get_output_name_for_asset_key(key)\n return self.node_def.resolve_output_to_origin_op_def(output_name)\n\n def with_attributes(\n self,\n *,\n output_asset_key_replacements: Optional[Mapping[AssetKey, AssetKey]] = None,\n input_asset_key_replacements: Optional[Mapping[AssetKey, AssetKey]] = None,\n group_names_by_key: Optional[Mapping[AssetKey, str]] = None,\n descriptions_by_key: Optional[Mapping[AssetKey, str]] = None,\n metadata_by_key: Optional[Mapping[AssetKey, ArbitraryMetadataMapping]] = None,\n freshness_policy: Optional[\n Union[FreshnessPolicy, Mapping[AssetKey, FreshnessPolicy]]\n ] = None,\n auto_materialize_policy: Optional[\n Union[AutoMaterializePolicy, Mapping[AssetKey, AutoMaterializePolicy]]\n ] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n ) -> "AssetsDefinition":\n output_asset_key_replacements = check.opt_mapping_param(\n output_asset_key_replacements,\n "output_asset_key_replacements",\n key_type=AssetKey,\n value_type=AssetKey,\n )\n input_asset_key_replacements = check.opt_mapping_param(\n input_asset_key_replacements,\n "input_asset_key_replacements",\n key_type=AssetKey,\n value_type=AssetKey,\n )\n group_names_by_key = check.opt_mapping_param(\n group_names_by_key, "group_names_by_key", key_type=AssetKey, value_type=str\n )\n descriptions_by_key = check.opt_mapping_param(\n descriptions_by_key, "descriptions_by_key", key_type=AssetKey, value_type=str\n )\n metadata_by_key = check.opt_mapping_param(\n metadata_by_key, "metadata_by_key", key_type=AssetKey, value_type=dict\n )\n\n backfill_policy = check.opt_inst_param(backfill_policy, "backfill_policy", BackfillPolicy)\n\n if group_names_by_key:\n group_name_conflicts = [\n asset_key\n for asset_key in group_names_by_key\n if asset_key in self.group_names_by_key\n and self.group_names_by_key[asset_key] != DEFAULT_GROUP_NAME\n ]\n if group_name_conflicts:\n raise DagsterInvalidDefinitionError(\n "Group name already exists on assets"\n f" {', '.join(asset_key.to_user_string() for asset_key in group_name_conflicts)}"\n )\n\n replaced_group_names_by_key = {\n output_asset_key_replacements.get(key, key): group_name\n for key, group_name in self.group_names_by_key.items()\n }\n\n if freshness_policy:\n freshness_policy_conflicts = (\n self.freshness_policies_by_key.keys()\n if isinstance(freshness_policy, FreshnessPolicy)\n else (freshness_policy.keys() & self.freshness_policies_by_key.keys())\n )\n if freshness_policy_conflicts:\n raise DagsterInvalidDefinitionError(\n "FreshnessPolicy already exists on assets"\n f" {', '.join(key.to_string() for key in freshness_policy_conflicts)}"\n )\n\n replaced_freshness_policies_by_key = {}\n for key in self.keys:\n if isinstance(freshness_policy, FreshnessPolicy):\n replaced_freshness_policy = freshness_policy\n elif freshness_policy:\n replaced_freshness_policy = freshness_policy.get(key)\n else:\n replaced_freshness_policy = self.freshness_policies_by_key.get(key)\n\n if replaced_freshness_policy:\n replaced_freshness_policies_by_key[output_asset_key_replacements.get(key, key)] = (\n replaced_freshness_policy\n )\n\n if auto_materialize_policy:\n auto_materialize_policy_conflicts = (\n self.auto_materialize_policies_by_key.keys()\n if isinstance(auto_materialize_policy, AutoMaterializePolicy)\n else (auto_materialize_policy.keys() & self.auto_materialize_policies_by_key.keys())\n )\n if auto_materialize_policy_conflicts:\n raise DagsterInvalidDefinitionError(\n "AutoMaterializePolicy already exists on assets"\n f" {', '.join(key.to_string() for key in auto_materialize_policy_conflicts)}"\n )\n\n replaced_auto_materialize_policies_by_key = {}\n for key in self.keys:\n if isinstance(auto_materialize_policy, AutoMaterializePolicy):\n replaced_auto_materialize_policy = auto_materialize_policy\n elif auto_materialize_policy:\n replaced_auto_materialize_policy = auto_materialize_policy.get(key)\n else:\n replaced_auto_materialize_policy = self.auto_materialize_policies_by_key.get(key)\n\n if replaced_auto_materialize_policy:\n replaced_auto_materialize_policies_by_key[\n output_asset_key_replacements.get(key, key)\n ] = replaced_auto_materialize_policy\n\n replaced_descriptions_by_key = {\n output_asset_key_replacements.get(key, key): description\n for key, description in descriptions_by_key.items()\n }\n\n if not metadata_by_key:\n metadata_by_key = self.metadata_by_key\n\n replaced_metadata_by_key = {\n output_asset_key_replacements.get(key, key): metadata\n for key, metadata in metadata_by_key.items()\n }\n\n replaced_attributes = dict(\n keys_by_input_name={\n input_name: input_asset_key_replacements.get(key, key)\n for input_name, key in self._keys_by_input_name.items()\n },\n keys_by_output_name={\n output_name: output_asset_key_replacements.get(key, key)\n for output_name, key in self._keys_by_output_name.items()\n },\n partition_mappings={\n input_asset_key_replacements.get(key, key): partition_mapping\n for key, partition_mapping in self._partition_mappings.items()\n },\n asset_deps={\n # replace both the keys and the values in this mapping\n output_asset_key_replacements.get(key, key): {\n input_asset_key_replacements.get(\n upstream_key,\n output_asset_key_replacements.get(upstream_key, upstream_key),\n )\n for upstream_key in value\n }\n for key, value in self.asset_deps.items()\n },\n selected_asset_keys={\n output_asset_key_replacements.get(key, key) for key in self._selected_asset_keys\n },\n group_names_by_key={\n **replaced_group_names_by_key,\n **group_names_by_key,\n },\n metadata_by_key=replaced_metadata_by_key,\n freshness_policies_by_key=replaced_freshness_policies_by_key,\n auto_materialize_policies_by_key=replaced_auto_materialize_policies_by_key,\n backfill_policy=backfill_policy if backfill_policy else self.backfill_policy,\n descriptions_by_key=replaced_descriptions_by_key,\n )\n\n return self.__class__(**merge_dicts(self.get_attributes_dict(), replaced_attributes))\n\n def _subset_graph_backed_asset(\n self,\n selected_asset_keys: AbstractSet[AssetKey],\n ):\n from dagster._core.definitions.graph_definition import GraphDefinition\n\n if not isinstance(self.node_def, GraphDefinition):\n raise DagsterInvalidInvocationError(\n "Method _subset_graph_backed_asset cannot subset an asset that is not a graph"\n )\n\n # All asset keys in selected_asset_keys are outputted from the same top-level graph backed asset\n dep_node_handles_by_asset_key = get_dep_node_handles_of_graph_backed_asset(\n self.node_def, self\n )\n op_selection: List[str] = []\n for asset_key in selected_asset_keys:\n dep_node_handles = dep_node_handles_by_asset_key[asset_key]\n for dep_node_handle in dep_node_handles:\n op_selection.append(".".join(dep_node_handle.path[1:]))\n\n return get_graph_subset(self.node_def, op_selection)\n\n def subset_for(\n self,\n selected_asset_keys: AbstractSet[AssetKey],\n selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],\n ) -> "AssetsDefinition":\n """Create a subset of this AssetsDefinition that will only materialize the assets and checks\n in the selected set.\n\n Args:\n selected_asset_keys (AbstractSet[AssetKey]): The total set of asset keys\n selected_asset_check_keys (AbstractSet[AssetCheckKey]): The selected asset checks\n """\n from dagster._core.definitions.graph_definition import GraphDefinition\n\n check.invariant(\n self.can_subset,\n f"Attempted to subset AssetsDefinition for {self.node_def.name}, but can_subset=False.",\n )\n\n # Set of assets within selected_asset_keys which are outputted by this AssetDefinition\n asset_subselection = selected_asset_keys & self.keys\n if selected_asset_check_keys is None:\n # filter to checks that target selected asset keys\n asset_check_subselection = {\n key for key in self.check_keys if key.asset_key in asset_subselection\n }\n else:\n asset_check_subselection = selected_asset_check_keys & self.check_keys\n\n # Early escape if all assets in AssetsDefinition are selected\n if asset_subselection == self.keys and asset_check_subselection == self.check_keys:\n return self\n elif isinstance(self.node_def, GraphDefinition): # Node is graph-backed asset\n check.invariant(\n selected_asset_check_keys == self.check_keys,\n "Subsetting graph-backed assets with checks is not yet supported",\n )\n\n subsetted_node = self._subset_graph_backed_asset(\n asset_subselection,\n )\n\n # The subsetted node should only include asset inputs that are dependencies of the\n # selected set of assets.\n subsetted_input_names = [input_def.name for input_def in subsetted_node.input_defs]\n subsetted_keys_by_input_name = {\n key: value\n for key, value in self.node_keys_by_input_name.items()\n if key in subsetted_input_names\n }\n\n subsetted_output_names = [output_def.name for output_def in subsetted_node.output_defs]\n subsetted_keys_by_output_name = {\n key: value\n for key, value in self.node_keys_by_output_name.items()\n if key in subsetted_output_names\n }\n\n # An op within the graph-backed asset that yields multiple assets will be run\n # any time any of its output assets are selected. Thus, if an op yields multiple assets\n # and only one of them is selected, the op will still run and potentially unexpectedly\n # materialize the unselected asset.\n #\n # Thus, we include unselected assets that may be accidentally materialized in\n # keys_by_output_name and asset_deps so that the webserver can populate an warning when\n # this occurs. This is the same behavior as multi-asset subsetting.\n\n subsetted_asset_deps = {\n out_asset_key: set(self._keys_by_input_name.values())\n for out_asset_key in subsetted_keys_by_output_name.values()\n }\n\n replaced_attributes = dict(\n keys_by_input_name=subsetted_keys_by_input_name,\n keys_by_output_name=subsetted_keys_by_output_name,\n node_def=subsetted_node,\n asset_deps=subsetted_asset_deps,\n selected_asset_keys=selected_asset_keys & self.keys,\n )\n\n return self.__class__(**merge_dicts(self.get_attributes_dict(), replaced_attributes))\n else:\n # multi_asset subsetting\n replaced_attributes = {\n "selected_asset_keys": asset_subselection,\n "selected_asset_check_keys": asset_check_subselection,\n }\n return self.__class__(**merge_dicts(self.get_attributes_dict(), replaced_attributes))\n\n
[docs] @public\n def to_source_assets(self) -> Sequence[SourceAsset]:\n """Returns a SourceAsset for each asset in this definition.\n\n Each produced SourceAsset will have the same key, metadata, io_manager_key, etc. as the\n corresponding asset\n """\n return [\n self._output_to_source_asset(output_name)\n for output_name in self.keys_by_output_name.keys()\n ]
\n\n
[docs] @public\n def to_source_asset(self, key: Optional[CoercibleToAssetKey] = None) -> SourceAsset:\n """Returns a representation of this asset as a :py:class:`SourceAsset`.\n\n If this is a multi-asset, the "key" argument allows selecting which asset to return a\n SourceAsset representation of.\n\n Args:\n key (Optional[Union[str, Sequence[str], AssetKey]]]): If this is a multi-asset, select\n which asset to return a SourceAsset representation of. If not a multi-asset, this\n can be left as None.\n\n Returns:\n SourceAsset\n """\n if len(self.keys) > 1:\n check.invariant(\n key is not None,\n "The 'key' argument is required when there are multiple assets to choose from",\n )\n\n if key is not None:\n resolved_key = AssetKey.from_coercible(key)\n check.invariant(\n resolved_key in self.keys, f"Key {resolved_key} not found in AssetsDefinition"\n )\n else:\n resolved_key = self.key\n\n output_names = [\n output_name\n for output_name, ak in self.keys_by_output_name.items()\n if ak == resolved_key\n ]\n check.invariant(len(output_names) == 1)\n return self._output_to_source_asset(output_names[0])
\n\n def _output_to_source_asset(self, output_name: str) -> SourceAsset:\n with disable_dagster_warnings():\n output_def = self.node_def.resolve_output_to_origin(\n output_name, NodeHandle(self.node_def.name, parent=None)\n )[0]\n key = self._keys_by_output_name[output_name]\n\n return SourceAsset(\n key=key,\n metadata=output_def.metadata,\n io_manager_key=output_def.io_manager_key,\n description=output_def.description,\n resource_defs=self.resource_defs,\n partitions_def=self.partitions_def,\n group_name=self.group_names_by_key[key],\n )\n\n def get_io_manager_key_for_asset_key(self, key: AssetKey) -> str:\n output_name = self.get_output_name_for_asset_key(key)\n return self.node_def.resolve_output_to_origin(\n output_name, NodeHandle(self.node_def.name, parent=None)\n )[0].io_manager_key\n\n def get_resource_requirements(self) -> Iterator[ResourceRequirement]:\n yield from self.node_def.get_resource_requirements() # type: ignore[attr-defined]\n for source_key, resource_def in self.resource_defs.items():\n yield from resource_def.get_resource_requirements(outer_context=source_key)\n\n @public\n @property\n def required_resource_keys(self) -> Set[str]:\n """Set[str]: The set of keys for resources that must be provided to this AssetsDefinition."""\n return {requirement.key for requirement in self.get_resource_requirements()}\n\n def __str__(self):\n if len(self.keys) == 1:\n return f"AssetsDefinition with key {self.key.to_string()}"\n else:\n asset_keys = ", ".join(sorted(([asset_key.to_string() for asset_key in self.keys])))\n return f"AssetsDefinition with keys {asset_keys}"\n\n @property\n def unique_id(self) -> str:\n """A unique identifier for the AssetsDefinition that's stable across processes."""\n return hashlib.md5((json.dumps(sorted(self.keys))).encode("utf-8")).hexdigest()\n\n def with_resources(self, resource_defs: Mapping[str, ResourceDefinition]) -> "AssetsDefinition":\n attributes_dict = self.get_attributes_dict()\n attributes_dict["resource_defs"] = merge_resource_defs(\n old_resource_defs=self.resource_defs,\n resource_defs_to_merge_in=resource_defs,\n requires_resources=self,\n )\n return self.__class__(**attributes_dict)\n\n def get_attributes_dict(self) -> Dict[str, Any]:\n return dict(\n keys_by_input_name=self._keys_by_input_name,\n keys_by_output_name=self._keys_by_output_name,\n node_def=self._node_def,\n partitions_def=self._partitions_def,\n partition_mappings=self._partition_mappings,\n asset_deps=self.asset_deps,\n selected_asset_keys=self._selected_asset_keys,\n can_subset=self._can_subset,\n resource_defs=self._resource_defs,\n group_names_by_key=self._group_names_by_key,\n metadata_by_key=self._metadata_by_key,\n freshness_policies_by_key=self._freshness_policies_by_key,\n auto_materialize_policies_by_key=self._auto_materialize_policies_by_key,\n backfill_policy=self._backfill_policy,\n descriptions_by_key=self._descriptions_by_key,\n check_specs_by_output_name=self._check_specs_by_output_name,\n selected_asset_check_keys=self._selected_asset_check_keys,\n )
\n\n\ndef _infer_keys_by_input_names(\n node_def: Union["GraphDefinition", OpDefinition], keys_by_input_name: Mapping[str, AssetKey]\n) -> Mapping[str, AssetKey]:\n all_input_names = [input_def.name for input_def in node_def.input_defs]\n if keys_by_input_name:\n check.invariant(\n set(keys_by_input_name.keys()) == set(all_input_names),\n "The set of input names keys specified in the keys_by_input_name argument must "\n f"equal the set of asset keys inputted by '{node_def.name}'. \\n"\n f"keys_by_input_name keys: {set(keys_by_input_name.keys())} \\n"\n f"expected keys: {all_input_names}",\n )\n\n # If asset key is not supplied in keys_by_input_name, create asset key\n # from input name\n inferred_input_names_by_asset_key: Dict[str, AssetKey] = {\n input_name: keys_by_input_name.get(input_name, AssetKey([input_name]))\n for input_name in all_input_names\n }\n\n return inferred_input_names_by_asset_key\n\n\ndef _infer_keys_by_output_names(\n node_def: Union["GraphDefinition", OpDefinition],\n keys_by_output_name: Mapping[str, AssetKey],\n check_specs_by_output_name: Mapping[str, AssetCheckSpec],\n) -> Mapping[str, AssetKey]:\n output_names = [output_def.name for output_def in node_def.output_defs]\n if keys_by_output_name:\n overlapping_asset_and_check_outputs = set(keys_by_output_name.keys()) & set(\n check_specs_by_output_name.keys()\n )\n check.invariant(\n not overlapping_asset_and_check_outputs,\n "The set of output names associated with asset keys and checks overlap:"\n f" {overlapping_asset_and_check_outputs}",\n )\n\n union_asset_and_check_outputs = set(keys_by_output_name.keys()) | set(\n check_specs_by_output_name.keys()\n )\n check.invariant(\n union_asset_and_check_outputs == set(output_names),\n "The union of the set of output names keys specified in the keys_by_output_name and"\n " check_specs_by_output_name arguments must equal the set of asset keys outputted by"\n f" {node_def.name}. union keys:"\n f" {union_asset_and_check_outputs} \\nexpected keys: {set(output_names)}",\n )\n\n inferred_keys_by_output_names: Dict[str, AssetKey] = {\n output_name: asset_key for output_name, asset_key in keys_by_output_name.items()\n }\n\n if (\n len(output_names) == 1\n and output_names[0] not in keys_by_output_name\n and output_names[0] not in check_specs_by_output_name\n and output_names[0] == "result"\n ):\n # If there is only one output and the name is the default "result", generate asset key\n # from the name of the node\n inferred_keys_by_output_names[output_names[0]] = AssetKey([node_def.name])\n\n for output_name in output_names:\n if (\n output_name not in inferred_keys_by_output_names\n and output_name not in check_specs_by_output_name\n ):\n inferred_keys_by_output_names[output_name] = AssetKey([output_name])\n return inferred_keys_by_output_names\n\n\ndef _validate_graph_def(graph_def: "GraphDefinition", prefix: Optional[Sequence[str]] = None):\n """Ensure that all leaf nodes are mapped to graph outputs."""\n from dagster._core.definitions.graph_definition import GraphDefinition, create_adjacency_lists\n\n prefix = check.opt_sequence_param(prefix, "prefix")\n\n # recursively validate any sub-graphs\n for inner_node_def in graph_def.node_defs:\n if isinstance(inner_node_def, GraphDefinition):\n _validate_graph_def(inner_node_def, prefix=[*prefix, graph_def.name])\n\n # leaf nodes have no downstream nodes\n forward_edges, _ = create_adjacency_lists(graph_def.nodes, graph_def.dependency_structure)\n leaf_nodes = {\n node_name for node_name, downstream_nodes in forward_edges.items() if not downstream_nodes\n }\n\n # set of nodes that have outputs mapped to a graph output\n mapped_output_nodes = {\n output_mapping.maps_from.node_name for output_mapping in graph_def.output_mappings\n }\n\n # leaf nodes which do not have an associated mapped output\n unmapped_leaf_nodes = {".".join([*prefix, node]) for node in leaf_nodes - mapped_output_nodes}\n\n check.invariant(\n not unmapped_leaf_nodes,\n f"All leaf nodes within graph '{graph_def.name}' must generate outputs which are mapped"\n " to outputs of the graph, and produce assets. The following leaf node(s) are"\n f" non-asset producing ops: {unmapped_leaf_nodes}. This behavior is not currently"\n " supported because these ops are not required for the creation of the associated"\n " asset(s).",\n )\n\n\ndef _validate_self_deps(\n input_keys: Iterable[AssetKey],\n output_keys: Iterable[AssetKey],\n partition_mappings: Mapping[AssetKey, PartitionMapping],\n partitions_def: Optional[PartitionsDefinition],\n) -> None:\n output_keys_set = set(output_keys)\n for input_key in input_keys:\n if input_key in output_keys_set:\n if input_key in partition_mappings:\n partition_mapping = partition_mappings[input_key]\n time_window_partition_mapping = get_self_dep_time_window_partition_mapping(\n partition_mapping, partitions_def\n )\n if (\n time_window_partition_mapping is not None\n and (time_window_partition_mapping.start_offset or 0) < 0\n and (time_window_partition_mapping.end_offset or 0) < 0\n ):\n continue\n\n raise DagsterInvalidDefinitionError(\n f'Asset "{input_key.to_user_string()}" depends on itself. Assets can only depend'\n " on themselves if they are:\\n(a) time-partitioned and each partition depends on"\n " earlier partitions\\n(b) multipartitioned, with one time dimension that depends"\n " on earlier time partitions"\n )\n\n\ndef get_self_dep_time_window_partition_mapping(\n partition_mapping: Optional[PartitionMapping], partitions_def: Optional[PartitionsDefinition]\n) -> Optional[TimeWindowPartitionMapping]:\n """Returns a time window partition mapping dimension of the provided partition mapping,\n if exists.\n """\n if isinstance(partition_mapping, TimeWindowPartitionMapping):\n return partition_mapping\n elif isinstance(partition_mapping, MultiPartitionMapping):\n if not isinstance(partitions_def, MultiPartitionsDefinition):\n return None\n\n time_partition_mapping = partition_mapping.downstream_mappings_by_upstream_dimension.get(\n partitions_def.time_window_dimension.name\n )\n\n if time_partition_mapping is None or not isinstance(\n time_partition_mapping.partition_mapping, TimeWindowPartitionMapping\n ):\n return None\n\n return time_partition_mapping.partition_mapping\n return None\n
", "current_page_name": "_modules/dagster/_core/definitions/assets", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.assets"}, "auto_materialize_policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.auto_materialize_policy

\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, AbstractSet, Dict, FrozenSet, NamedTuple, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._serdes.serdes import (\n    NamedTupleSerializer,\n    UnpackContext,\n    UnpackedValue,\n    whitelist_for_serdes,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.auto_materialize_rule import (\n        AutoMaterializeRule,\n        AutoMaterializeRuleSnapshot,\n    )\n\n\nclass AutoMaterializePolicySerializer(NamedTupleSerializer):\n    def before_unpack(\n        self, context: UnpackContext, unpacked_dict: Dict[str, UnpackedValue]\n    ) -> Dict[str, UnpackedValue]:\n        from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n        backcompat_map = {\n            "on_missing": AutoMaterializeRule.materialize_on_missing(),\n            "on_new_parent_data": AutoMaterializeRule.materialize_on_parent_updated(),\n            "for_freshness": AutoMaterializeRule.materialize_on_required_for_freshness(),\n        }\n\n        # determine if this namedtuple was serialized with the old format (booleans for rules)\n        if any(backcompat_key in unpacked_dict for backcompat_key in backcompat_map):\n            # all old policies had these rules by default\n            rules = {\n                AutoMaterializeRule.skip_on_parent_outdated(),\n                AutoMaterializeRule.skip_on_parent_missing(),\n            }\n            for backcompat_key, rule in backcompat_map.items():\n                if unpacked_dict.get(backcompat_key):\n                    rules.add(rule)\n            unpacked_dict["rules"] = frozenset(rules)\n\n        return unpacked_dict\n\n\nclass AutoMaterializePolicyType(Enum):\n    EAGER = "EAGER"\n    LAZY = "LAZY"\n\n\n
[docs]@experimental\n@whitelist_for_serdes(\n old_fields={"time_window_partition_scope_minutes": 1e-6},\n serializer=AutoMaterializePolicySerializer,\n)\nclass AutoMaterializePolicy(\n NamedTuple(\n "_AutoMaterializePolicy",\n [\n ("rules", FrozenSet["AutoMaterializeRule"]),\n ("max_materializations_per_minute", Optional[int]),\n ],\n )\n):\n """An AutoMaterializePolicy specifies how Dagster should attempt to keep an asset up-to-date.\n\n Each policy consists of a set of AutoMaterializeRules, which are used to determine whether an\n asset or a partition of an asset should or should not be auto-materialized.\n\n The most common policy is `AutoMaterializePolicy.eager()`, which consists of the following rules:\n\n - `AutoMaterializeRule.materialize_on_missing()`\n Materialize an asset or a partition if it has never been materialized.\n - `AutoMaterializeRule.materialize_on_parent_updated()`\n Materialize an asset or a partition if one of its parents have been updated more recently\n than it has.\n - `AutoMaterializeRule.materialize_on_required_for_freshness()`\n Materialize an asset or a partition if it is required to satisfy a freshness policy.\n - `AutoMaterializeRule.skip_on_parent_outdated()`\n Skip materializing an asset or partition if any of its parents have ancestors that have\n been materialized more recently.\n - `AutoMaterializeRule.skip_on_parent_missing()`\n Skip materializing an asset or a partition if any parent has never been materialized or\n observed.\n\n Policies can be customized by adding or removing rules. For example, if you'd like to allow\n an asset to be materialized even if some of its parent partitions are missing:\n\n .. code-block:: python\n\n from dagster import AutoMaterializePolicy, AutoMaterializeRule\n\n my_policy = AutoMaterializePolicy.eager().without_rules(\n AutoMaterializeRule.skip_on_parent_missing(),\n )\n\n If you'd like an asset to wait for all of its parents to be updated before materializing:\n\n .. code-block:: python\n\n from dagster import AutoMaterializePolicy, AutoMaterializeRule\n\n my_policy = AutoMaterializePolicy.eager().with_rules(\n AutoMaterializeRule.skip_on_all_parents_not_updated(),\n )\n\n Lastly, the `max_materializations_per_minute` parameter, which is set to 1 by default,\n rate-limits the number of auto-materializations that can occur for a particular asset within\n a short time interval. This mainly matters for partitioned assets. Its purpose is to provide a\n safeguard against "surprise backfills", where user-error causes auto-materialize to be\n accidentally triggered for large numbers of partitions at once.\n\n **Warning:**\n\n Constructing an AutoMaterializePolicy directly is not recommended as the API is subject to change.\n AutoMaterializePolicy.eager() and AutoMaterializePolicy.lazy() are the recommended API.\n\n """\n\n def __new__(\n cls,\n rules: AbstractSet["AutoMaterializeRule"],\n max_materializations_per_minute: Optional[int] = 1,\n ):\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n check.invariant(\n max_materializations_per_minute is None or max_materializations_per_minute > 0,\n "max_materializations_per_minute must be positive. To disable rate-limiting, set it"\n " to None. To disable auto materializing, remove the policy.",\n )\n\n return super(AutoMaterializePolicy, cls).__new__(\n cls,\n rules=frozenset(check.set_param(rules, "rules", of_type=AutoMaterializeRule)),\n max_materializations_per_minute=max_materializations_per_minute,\n )\n\n @property\n def materialize_rules(self) -> AbstractSet["AutoMaterializeRule"]:\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeDecisionType\n\n return {\n rule\n for rule in self.rules\n if rule.decision_type == AutoMaterializeDecisionType.MATERIALIZE\n }\n\n @property\n def skip_rules(self) -> AbstractSet["AutoMaterializeRule"]:\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeDecisionType\n\n return {\n rule for rule in self.rules if rule.decision_type == AutoMaterializeDecisionType.SKIP\n }\n\n
[docs] @public\n @staticmethod\n def eager(max_materializations_per_minute: Optional[int] = 1) -> "AutoMaterializePolicy":\n """Constructs an eager AutoMaterializePolicy.\n\n Args:\n max_materializations_per_minute (Optional[int]): The maximum number of\n auto-materializations for this asset that may be initiated per minute. If this limit\n is exceeded, the partitions which would have been materialized will be discarded,\n and will require manual materialization in order to be updated. Defaults to 1.\n """\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n return AutoMaterializePolicy(\n rules={\n AutoMaterializeRule.materialize_on_missing(),\n AutoMaterializeRule.materialize_on_parent_updated(),\n AutoMaterializeRule.materialize_on_required_for_freshness(),\n AutoMaterializeRule.skip_on_parent_outdated(),\n AutoMaterializeRule.skip_on_parent_missing(),\n },\n max_materializations_per_minute=check.opt_int_param(\n max_materializations_per_minute, "max_materializations_per_minute"\n ),\n )
\n\n
[docs] @public\n @staticmethod\n def lazy(max_materializations_per_minute: Optional[int] = 1) -> "AutoMaterializePolicy":\n """Constructs a lazy AutoMaterializePolicy.\n\n Args:\n max_materializations_per_minute (Optional[int]): The maximum number of\n auto-materializations for this asset that may be initiated per minute. If this limit\n is exceeded, the partitions which would have been materialized will be discarded,\n and will require manual materialization in order to be updated. Defaults to 1.\n """\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n return AutoMaterializePolicy(\n rules={\n AutoMaterializeRule.materialize_on_required_for_freshness(),\n AutoMaterializeRule.skip_on_parent_outdated(),\n AutoMaterializeRule.skip_on_parent_missing(),\n },\n max_materializations_per_minute=check.opt_int_param(\n max_materializations_per_minute, "max_materializations_per_minute"\n ),\n )
\n\n
[docs] @public\n def without_rules(self, *rules_to_remove: "AutoMaterializeRule") -> "AutoMaterializePolicy":\n """Constructs a copy of this policy with the specified rules removed. Raises an error\n if any of the arguments are not rules in this policy.\n """\n non_matching_rules = set(rules_to_remove).difference(self.rules)\n check.param_invariant(\n not non_matching_rules,\n "rules_to_remove",\n f"Rules {[rule for rule in rules_to_remove if rule in non_matching_rules]} do not"\n " exist in this policy.",\n )\n return self._replace(\n rules=self.rules.difference(set(rules_to_remove)),\n )
\n\n
[docs] @public\n def with_rules(self, *rules_to_add: "AutoMaterializeRule") -> "AutoMaterializePolicy":\n """Constructs a copy of this policy with the specified rules added."""\n return self._replace(rules=self.rules.union(set(rules_to_add)))
\n\n @property\n def policy_type(self) -> AutoMaterializePolicyType:\n from dagster._core.definitions.auto_materialize_rule import AutoMaterializeRule\n\n if AutoMaterializeRule.materialize_on_parent_updated() in self.rules:\n return AutoMaterializePolicyType.EAGER\n return AutoMaterializePolicyType.LAZY\n\n @property\n def rule_snapshots(self) -> Sequence["AutoMaterializeRuleSnapshot"]:\n return [rule.to_snapshot() for rule in self.rules]
\n
", "current_page_name": "_modules/dagster/_core/definitions/auto_materialize_policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.auto_materialize_policy"}, "auto_materialize_rule": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.auto_materialize_rule

\nimport datetime\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Dict,\n    FrozenSet,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.data_time import CachingDataTimeResolver\nfrom dagster._core.definitions.events import AssetKey, AssetKeyPartitionKey\nfrom dagster._core.definitions.freshness_based_auto_materialize import (\n    freshness_evaluation_results_for_asset_key,\n)\nfrom dagster._core.definitions.partition_mapping import IdentityPartitionMapping\nfrom dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\nfrom dagster._serdes.serdes import (\n    NamedTupleSerializer,\n    UnpackContext,\n    UnpackedValue,\n    WhitelistMap,\n    whitelist_for_serdes,\n)\nfrom dagster._utils.caching_instance_queryer import CachingInstanceQueryer\n\nfrom .asset_graph import AssetGraph, sort_key_for_asset_partition\nfrom .partition import SerializedPartitionsSubset\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_daemon_context import AssetDaemonContext\n    from dagster._core.definitions.asset_daemon_cursor import AssetDaemonCursor\n    from dagster._core.instance import DynamicPartitionsStore\n\n\n@whitelist_for_serdes\nclass AutoMaterializeDecisionType(Enum):\n    """Represents the set of results of the auto-materialize logic.\n\n    MATERIALIZE: The asset should be materialized by a run kicked off on this tick\n    SKIP: The asset should not be materialized by a run kicked off on this tick, because future\n        ticks are expected to materialize it.\n    DISCARD: The asset should not be materialized by a run kicked off on this tick, but future\n        ticks are not expected to materialize it.\n    """\n\n    MATERIALIZE = "MATERIALIZE"\n    SKIP = "SKIP"\n    DISCARD = "DISCARD"\n\n\nclass AutoMaterializeRuleEvaluationData(ABC):\n    pass\n\n\n@whitelist_for_serdes\nclass TextRuleEvaluationData(\n    AutoMaterializeRuleEvaluationData,\n    NamedTuple("_TextRuleEvaluationData", [("text", str)]),\n):\n    pass\n\n\n@whitelist_for_serdes\nclass ParentUpdatedRuleEvaluationData(\n    AutoMaterializeRuleEvaluationData,\n    NamedTuple(\n        "_ParentUpdatedRuleEvaluationData",\n        [\n            ("updated_asset_keys", FrozenSet[AssetKey]),\n            ("will_update_asset_keys", FrozenSet[AssetKey]),\n        ],\n    ),\n):\n    pass\n\n\n@whitelist_for_serdes\nclass WaitingOnAssetsRuleEvaluationData(\n    AutoMaterializeRuleEvaluationData,\n    NamedTuple(\n        "_WaitingOnParentRuleEvaluationData",\n        [("waiting_on_asset_keys", FrozenSet[AssetKey])],\n    ),\n):\n    pass\n\n\n@whitelist_for_serdes\nclass AutoMaterializeRuleSnapshot(NamedTuple):\n    """A serializable snapshot of an AutoMaterializeRule for historical evaluations."""\n\n    class_name: str\n    description: str\n    decision_type: AutoMaterializeDecisionType\n\n    @staticmethod\n    def from_rule(rule: "AutoMaterializeRule") -> "AutoMaterializeRuleSnapshot":\n        return AutoMaterializeRuleSnapshot(\n            class_name=rule.__class__.__name__,\n            description=rule.description,\n            decision_type=rule.decision_type,\n        )\n\n\n@whitelist_for_serdes\nclass AutoMaterializeRuleEvaluation(NamedTuple):\n    rule_snapshot: AutoMaterializeRuleSnapshot\n    evaluation_data: Optional[AutoMaterializeRuleEvaluationData]\n\n\nclass RuleEvaluationContext(NamedTuple):\n    asset_key: AssetKey\n    cursor: "AssetDaemonCursor"\n    instance_queryer: CachingInstanceQueryer\n    data_time_resolver: CachingDataTimeResolver\n    will_materialize_mapping: Mapping[AssetKey, AbstractSet[AssetKeyPartitionKey]]\n    expected_data_time_mapping: Mapping[AssetKey, Optional[datetime.datetime]]\n    candidates: AbstractSet[AssetKeyPartitionKey]\n    daemon_context: "AssetDaemonContext"\n\n    @property\n    def asset_graph(self) -> AssetGraph:\n        return self.instance_queryer.asset_graph\n\n    def materializable_in_same_run(self, child_key: AssetKey, parent_key: AssetKey) -> bool:\n        """Returns whether a child asset can be materialized in the same run as a parent asset."""\n        from dagster._core.definitions.external_asset_graph import ExternalAssetGraph\n\n        return (\n            # both assets must be materializable\n            child_key in self.asset_graph.materializable_asset_keys\n            and parent_key in self.asset_graph.materializable_asset_keys\n            # the parent must have the same partitioning\n            and self.asset_graph.have_same_partitioning(child_key, parent_key)\n            # the parent must have a simple partition mapping to the child\n            and (\n                not self.asset_graph.is_partitioned(parent_key)\n                or isinstance(\n                    self.asset_graph.get_partition_mapping(child_key, parent_key),\n                    (TimeWindowPartitionMapping, IdentityPartitionMapping),\n                )\n            )\n            # the parent must be in the same repository to be materialized alongside the candidate\n            and (\n                not isinstance(self.asset_graph, ExternalAssetGraph)\n                or self.asset_graph.get_repository_handle(child_key)\n                == self.asset_graph.get_repository_handle(parent_key)\n            )\n        )\n\n    def get_parents_that_will_not_be_materialized_on_current_tick(\n        self, *, asset_partition: AssetKeyPartitionKey\n    ) -> AbstractSet[AssetKeyPartitionKey]:\n        """Returns the set of parent asset partitions that will not be updated in the same run of\n        this asset partition if we launch a run of this asset partition on this tick.\n        """\n        return {\n            parent\n            for parent in self.asset_graph.get_parents_partitions(\n                dynamic_partitions_store=self.instance_queryer,\n                current_time=self.instance_queryer.evaluation_time,\n                asset_key=asset_partition.asset_key,\n                partition_key=asset_partition.partition_key,\n            ).parent_partitions\n            if parent not in self.will_materialize_mapping.get(parent.asset_key, set())\n            or not self.materializable_in_same_run(asset_partition.asset_key, parent.asset_key)\n        }\n\n    def get_asset_partitions_by_asset_key(\n        self,\n        asset_partitions: AbstractSet[AssetKeyPartitionKey],\n    ) -> Mapping[AssetKey, Set[AssetKeyPartitionKey]]:\n        asset_partitions_by_asset_key: Dict[AssetKey, Set[AssetKeyPartitionKey]] = defaultdict(set)\n        for parent in asset_partitions:\n            asset_partitions_by_asset_key[parent.asset_key].add(parent)\n\n        return asset_partitions_by_asset_key\n\n\nRuleEvaluationResults = Sequence[Tuple[Optional[AutoMaterializeRuleEvaluationData], AbstractSet]]\n\n\n
[docs]class AutoMaterializeRule(ABC):\n """An AutoMaterializeRule defines a bit of logic which helps determine if a materialization\n should be kicked off for a given asset partition.\n\n Each rule can have one of two decision types, `MATERIALIZE` (indicating that an asset partition\n should be materialized) or `SKIP` (indicating that the asset partition should not be\n materialized).\n\n Materialize rules are evaluated first, and skip rules operate over the set of candidates that\n are produced by the materialize rules. Other than that, there is no ordering between rules.\n """\n\n @abstractproperty\n def decision_type(self) -> AutoMaterializeDecisionType:\n """The decision type of the rule (either `MATERIALIZE` or `SKIP`)."""\n ...\n\n @abstractproperty\n def description(self) -> str:\n """A human-readable description of this rule. As a basic guideline, this string should\n complete the sentence: 'Indicates an asset should be (materialize/skipped) when ____'.\n """\n ...\n\n @abstractmethod\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n """The core evaluation function for the rule. This function takes in a context object and\n returns a mapping from evaluated rules to the set of asset partitions that the rule applies\n to.\n """\n ...\n\n
[docs] @public\n @staticmethod\n def materialize_on_required_for_freshness() -> "MaterializeOnRequiredForFreshnessRule":\n """Materialize an asset partition if it is required to satisfy a freshness policy of this\n asset or one of its downstream assets.\n\n Note: This rule has no effect on partitioned assets.\n """\n return MaterializeOnRequiredForFreshnessRule()
\n\n
[docs] @public\n @staticmethod\n def materialize_on_parent_updated() -> "MaterializeOnParentUpdatedRule":\n """Materialize an asset partition if one of its parents has been updated more recently\n than it has.\n\n Note: For time-partitioned or dynamic-partitioned assets downstream of an unpartitioned\n asset, this rule will only fire for the most recent partition of the downstream.\n """\n return MaterializeOnParentUpdatedRule()
\n\n
[docs] @public\n @staticmethod\n def materialize_on_missing() -> "MaterializeOnMissingRule":\n """Materialize an asset partition if it has never been materialized before. This rule will\n not fire for non-root assets unless that asset's parents have been updated.\n """\n return MaterializeOnMissingRule()
\n\n
[docs] @public\n @staticmethod\n def skip_on_parent_missing() -> "SkipOnParentMissingRule":\n """Skip materializing an asset partition if one of its parent asset partitions has never\n been materialized (for regular assets) or observed (for observable source assets).\n """\n return SkipOnParentMissingRule()
\n\n
[docs] @public\n @staticmethod\n def skip_on_parent_outdated() -> "SkipOnParentOutdatedRule":\n """Skip materializing an asset partition if any of its parents has not incorporated the\n latest data from its ancestors.\n """\n return SkipOnParentOutdatedRule()
\n\n
[docs] @public\n @staticmethod\n def skip_on_not_all_parents_updated(\n require_update_for_all_parent_partitions: bool = False,\n ) -> "SkipOnNotAllParentsUpdatedRule":\n """Skip materializing an asset partition if any of its parents have not been updated since\n the asset's last materialization.\n\n Attributes:\n require_update_for_all_parent_partitions (Optional[bool]): Applies only to an unpartitioned\n asset or an asset partition that depends on more than one partition in any upstream asset.\n If true, requires all upstream partitions in each upstream asset to be materialized since\n the downstream asset's last materialization in order to update it. If false, requires at\n least one upstream partition in each upstream asset to be materialized since the downstream\n asset's last materialization in order to update it. Defaults to false.\n """\n return SkipOnNotAllParentsUpdatedRule(require_update_for_all_parent_partitions)
\n\n def to_snapshot(self) -> AutoMaterializeRuleSnapshot:\n """Returns a serializable snapshot of this rule for historical evaluations."""\n return AutoMaterializeRuleSnapshot.from_rule(self)\n\n def __eq__(self, other) -> bool:\n # override the default NamedTuple __eq__ method to factor in types\n return type(self) == type(other) and super().__eq__(other)\n\n def __hash__(self) -> int:\n # override the default NamedTuple __hash__ method to factor in types\n return hash(hash(type(self)) + super().__hash__())
\n\n\n@whitelist_for_serdes\nclass MaterializeOnRequiredForFreshnessRule(\n AutoMaterializeRule, NamedTuple("_MaterializeOnRequiredForFreshnessRule", [])\n):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.MATERIALIZE\n\n @property\n def description(self) -> str:\n return "required to meet this or downstream asset's freshness policy"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n freshness_conditions = freshness_evaluation_results_for_asset_key(\n asset_key=context.asset_key,\n data_time_resolver=context.data_time_resolver,\n asset_graph=context.asset_graph,\n current_time=context.instance_queryer.evaluation_time,\n will_materialize_mapping=context.will_materialize_mapping,\n expected_data_time_mapping=context.expected_data_time_mapping,\n )\n return freshness_conditions\n\n\n@whitelist_for_serdes\nclass MaterializeOnParentUpdatedRule(\n AutoMaterializeRule, NamedTuple("_MaterializeOnParentUpdatedRule", [])\n):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.MATERIALIZE\n\n @property\n def description(self) -> str:\n return "upstream data has changed since latest materialization"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n """Evaluates the set of asset partitions of this asset whose parents have been updated,\n or will update on this tick.\n """\n conditions = defaultdict(set)\n has_parents_that_will_update = set()\n\n # first, get the set of parents that will be materialized this tick, and see if we\n # can materialize this asset with those parents\n will_update_parents_by_asset_partition = defaultdict(set)\n for parent_key in context.asset_graph.get_parents(context.asset_key):\n if not context.materializable_in_same_run(context.asset_key, parent_key):\n continue\n for parent_partition in context.will_materialize_mapping.get(parent_key, set()):\n asset_partition = AssetKeyPartitionKey(\n context.asset_key, parent_partition.partition_key\n )\n will_update_parents_by_asset_partition[asset_partition].add(parent_key)\n has_parents_that_will_update.add(asset_partition)\n\n # next, for each asset partition of this asset which has newly-updated parents, or\n # has a parent that will update, create a ParentUpdatedRuleEvaluationData\n has_or_will_update = (\n context.daemon_context.get_asset_partitions_with_newly_updated_parents_for_key(\n context.asset_key\n )\n | has_parents_that_will_update\n )\n for asset_partition in has_or_will_update:\n parent_asset_partitions = context.asset_graph.get_parents_partitions(\n dynamic_partitions_store=context.instance_queryer,\n current_time=context.instance_queryer.evaluation_time,\n asset_key=asset_partition.asset_key,\n partition_key=asset_partition.partition_key,\n ).parent_partitions\n\n updated_parent_asset_partitions = context.instance_queryer.get_parent_asset_partitions_updated_after_child(\n asset_partition,\n parent_asset_partitions,\n # do a precise check for updated parents, factoring in data versions, as long as\n # we're within reasonable limits on the number of partitions to check\n respect_materialization_data_versions=context.daemon_context.respect_materialization_data_versions\n and len(parent_asset_partitions | has_or_will_update) < 100,\n # ignore self-dependencies when checking for updated parents, to avoid historical\n # rematerializations from causing a chain of materializations to be kicked off\n ignored_parent_keys={context.asset_key},\n )\n updated_parents = {parent.asset_key for parent in updated_parent_asset_partitions}\n will_update_parents = will_update_parents_by_asset_partition[asset_partition]\n\n if updated_parents or will_update_parents:\n conditions[\n ParentUpdatedRuleEvaluationData(\n updated_asset_keys=frozenset(updated_parents),\n will_update_asset_keys=frozenset(will_update_parents),\n )\n ].add(asset_partition)\n if conditions:\n return [(k, v) for k, v in conditions.items()]\n return []\n\n\n@whitelist_for_serdes\nclass MaterializeOnMissingRule(AutoMaterializeRule, NamedTuple("_MaterializeOnMissingRule", [])):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.MATERIALIZE\n\n @property\n def description(self) -> str:\n return "materialization is missing"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n """Evaluates the set of asset partitions for this asset which are missing and were not\n previously discarded. Currently only applies to root asset partitions and asset partitions\n with updated parents.\n """\n missing_asset_partitions = (\n context.daemon_context.get_never_handled_root_asset_partitions_for_key(\n context.asset_key\n )\n )\n # in addition to missing root asset partitions, check any asset partitions with updated\n # parents to see if they're missing\n for (\n candidate\n ) in context.daemon_context.get_asset_partitions_with_newly_updated_parents_for_key(\n context.asset_key\n ):\n if not context.instance_queryer.asset_partition_has_materialization_or_observation(\n candidate\n ):\n missing_asset_partitions |= {candidate}\n if missing_asset_partitions:\n return [(None, missing_asset_partitions)]\n return []\n\n\n@whitelist_for_serdes\nclass SkipOnParentOutdatedRule(AutoMaterializeRule, NamedTuple("_SkipOnParentOutdatedRule", [])):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.SKIP\n\n @property\n def description(self) -> str:\n return "waiting on upstream data to be up to date"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n asset_partitions_by_waiting_on_asset_keys = defaultdict(set)\n for candidate in context.candidates:\n outdated_ancestors = set()\n # find the root cause of why this asset partition's parents are outdated (if any)\n for parent in context.get_parents_that_will_not_be_materialized_on_current_tick(\n asset_partition=candidate\n ):\n outdated_ancestors.update(\n context.instance_queryer.get_outdated_ancestors(asset_partition=parent)\n )\n if outdated_ancestors:\n asset_partitions_by_waiting_on_asset_keys[frozenset(outdated_ancestors)].add(\n candidate\n )\n if asset_partitions_by_waiting_on_asset_keys:\n return [\n (WaitingOnAssetsRuleEvaluationData(waiting_on_asset_keys=k), v)\n for k, v in asset_partitions_by_waiting_on_asset_keys.items()\n ]\n return []\n\n\n@whitelist_for_serdes\nclass SkipOnParentMissingRule(AutoMaterializeRule, NamedTuple("_SkipOnParentMissingRule", [])):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.SKIP\n\n @property\n def description(self) -> str:\n return "waiting on upstream data to be present"\n\n def evaluate_for_asset(\n self,\n context: RuleEvaluationContext,\n ) -> RuleEvaluationResults:\n asset_partitions_by_waiting_on_asset_keys = defaultdict(set)\n for candidate in context.candidates:\n missing_parent_asset_keys = set()\n for parent in context.get_parents_that_will_not_be_materialized_on_current_tick(\n asset_partition=candidate\n ):\n # ignore non-observable sources, which will never have a materialization or observation\n if context.asset_graph.is_source(\n parent.asset_key\n ) and not context.asset_graph.is_observable(parent.asset_key):\n continue\n if not context.instance_queryer.asset_partition_has_materialization_or_observation(\n parent\n ):\n missing_parent_asset_keys.add(parent.asset_key)\n if missing_parent_asset_keys:\n asset_partitions_by_waiting_on_asset_keys[frozenset(missing_parent_asset_keys)].add(\n candidate\n )\n if asset_partitions_by_waiting_on_asset_keys:\n return [\n (WaitingOnAssetsRuleEvaluationData(waiting_on_asset_keys=k), v)\n for k, v in asset_partitions_by_waiting_on_asset_keys.items()\n ]\n return []\n\n\n@whitelist_for_serdes\nclass SkipOnNotAllParentsUpdatedRule(\n AutoMaterializeRule,\n NamedTuple(\n "_SkipOnNotAllParentsUpdatedRule", [("require_update_for_all_parent_partitions", bool)]\n ),\n):\n """An auto-materialize rule that enforces that an asset can only be materialized if all parents\n have been materialized since the asset's last materialization.\n\n Attributes:\n require_update_for_all_parent_partitions (Optional[bool]): Applies only to an unpartitioned\n asset or an asset partition that depends on more than one partition in any upstream asset.\n If true, requires all upstream partitions in each upstream asset to be materialized since\n the downstream asset's last materialization in order to update it. If false, requires at\n least one upstream partition in each upstream asset to be materialized since the downstream\n asset's last materialization in order to update it. Defaults to false.\n """\n\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.SKIP\n\n @property\n def description(self) -> str:\n if self.require_update_for_all_parent_partitions is False:\n return "waiting on upstream data to be updated"\n else:\n return "waiting until all upstream partitions are updated"\n\n def evaluate_for_asset(\n self,\n context: RuleEvaluationContext,\n ) -> RuleEvaluationResults:\n asset_partitions_by_waiting_on_asset_keys = defaultdict(set)\n for candidate in context.candidates:\n parent_partitions = context.asset_graph.get_parents_partitions(\n context.instance_queryer,\n context.instance_queryer.evaluation_time,\n context.asset_key,\n candidate.partition_key,\n ).parent_partitions\n\n updated_parent_partitions = (\n context.instance_queryer.get_parent_asset_partitions_updated_after_child(\n candidate,\n parent_partitions,\n context.daemon_context.respect_materialization_data_versions,\n ignored_parent_keys=set(),\n )\n | set().union(\n *[\n context.will_materialize_mapping.get(parent, set())\n for parent in context.asset_graph.get_parents(context.asset_key)\n ]\n )\n )\n\n if self.require_update_for_all_parent_partitions:\n # All upstream partitions must be updated in order for the candidate to be updated\n non_updated_parent_keys = {\n parent.asset_key for parent in parent_partitions - updated_parent_partitions\n }\n else:\n # At least one upstream partition in each upstream asset must be updated in order\n # for the candidate to be updated\n parent_asset_keys = context.asset_graph.get_parents(context.asset_key)\n updated_parent_partitions_by_asset_key = context.get_asset_partitions_by_asset_key(\n updated_parent_partitions\n )\n non_updated_parent_keys = {\n parent\n for parent in parent_asset_keys\n if not updated_parent_partitions_by_asset_key.get(parent)\n }\n\n # do not require past partitions of this asset to be updated\n non_updated_parent_keys -= {context.asset_key}\n\n if non_updated_parent_keys:\n asset_partitions_by_waiting_on_asset_keys[frozenset(non_updated_parent_keys)].add(\n candidate\n )\n\n if asset_partitions_by_waiting_on_asset_keys:\n return [\n (WaitingOnAssetsRuleEvaluationData(waiting_on_asset_keys=k), v)\n for k, v in asset_partitions_by_waiting_on_asset_keys.items()\n ]\n return []\n\n\n@whitelist_for_serdes\nclass DiscardOnMaxMaterializationsExceededRule(\n AutoMaterializeRule, NamedTuple("_DiscardOnMaxMaterializationsExceededRule", [("limit", int)])\n):\n @property\n def decision_type(self) -> AutoMaterializeDecisionType:\n return AutoMaterializeDecisionType.DISCARD\n\n @property\n def description(self) -> str:\n return f"exceeds {self.limit} materialization(s) per minute"\n\n def evaluate_for_asset(self, context: RuleEvaluationContext) -> RuleEvaluationResults:\n # the set of asset partitions which exceed the limit\n rate_limited_asset_partitions = set(\n sorted(\n context.candidates,\n key=lambda x: sort_key_for_asset_partition(context.asset_graph, x),\n )[self.limit :]\n )\n if rate_limited_asset_partitions:\n return [(None, rate_limited_asset_partitions)]\n return []\n\n\n@whitelist_for_serdes\nclass AutoMaterializeAssetEvaluation(NamedTuple):\n """Represents the results of the auto-materialize logic for a single asset.\n\n Properties:\n asset_key (AssetKey): The asset key that was evaluated.\n partition_subsets_by_condition: The rule evaluations that impact if the asset should be\n materialized, skipped, or discarded. If the asset is partitioned, this will be a list of\n tuples, where the first element is the condition and the second element is the\n serialized subset of partitions that the condition applies to. If it's not partitioned,\n the second element will be None.\n """\n\n asset_key: AssetKey\n partition_subsets_by_condition: Sequence[\n Tuple["AutoMaterializeRuleEvaluation", Optional[SerializedPartitionsSubset]]\n ]\n num_requested: int\n num_skipped: int\n num_discarded: int\n run_ids: Set[str] = set()\n rule_snapshots: Optional[Sequence[AutoMaterializeRuleSnapshot]] = None\n\n @staticmethod\n def from_rule_evaluation_results(\n asset_graph: AssetGraph,\n asset_key: AssetKey,\n asset_partitions_by_rule_evaluation: Sequence[\n Tuple[AutoMaterializeRuleEvaluation, AbstractSet[AssetKeyPartitionKey]]\n ],\n num_requested: int,\n num_skipped: int,\n num_discarded: int,\n dynamic_partitions_store: "DynamicPartitionsStore",\n ) -> "AutoMaterializeAssetEvaluation":\n auto_materialize_policy = asset_graph.auto_materialize_policies_by_key.get(asset_key)\n\n if not auto_materialize_policy:\n check.failed(f"Expected auto materialize policy on asset {asset_key}")\n\n partitions_def = asset_graph.get_partitions_def(asset_key)\n if partitions_def is None:\n return AutoMaterializeAssetEvaluation(\n asset_key=asset_key,\n partition_subsets_by_condition=[\n (rule_evaluation, None)\n for rule_evaluation, _ in asset_partitions_by_rule_evaluation\n ],\n num_requested=num_requested,\n num_skipped=num_skipped,\n num_discarded=num_discarded,\n rule_snapshots=auto_materialize_policy.rule_snapshots,\n )\n else:\n return AutoMaterializeAssetEvaluation(\n asset_key=asset_key,\n partition_subsets_by_condition=[\n (\n rule_evaluation,\n SerializedPartitionsSubset.from_subset(\n subset=partitions_def.empty_subset().with_partition_keys(\n check.not_none(ap.partition_key) for ap in asset_partitions\n ),\n partitions_def=partitions_def,\n dynamic_partitions_store=dynamic_partitions_store,\n ),\n )\n for rule_evaluation, asset_partitions in asset_partitions_by_rule_evaluation\n ],\n num_requested=num_requested,\n num_skipped=num_skipped,\n num_discarded=num_discarded,\n rule_snapshots=auto_materialize_policy.rule_snapshots,\n )\n\n\n# BACKCOMPAT GRAVEYARD\n\n\nclass BackcompatAutoMaterializeConditionSerializer(NamedTupleSerializer):\n """This handles backcompat for the old AutoMaterializeCondition objects, turning them into the\n proper AutoMaterializeRuleEvaluation objects. This is necessary because old\n AutoMaterializeAssetEvaluation objects will have serialized AutoMaterializeCondition objects,\n and we need to be able to deserialize them.\n\n In theory, as these serialized objects happen to be purged periodically, we can remove this\n backcompat logic at some point in the future.\n """\n\n def unpack(\n self,\n unpacked_dict: Dict[str, UnpackedValue],\n whitelist_map: WhitelistMap,\n context: UnpackContext,\n ) -> AutoMaterializeRuleEvaluation:\n if self.klass in (\n FreshnessAutoMaterializeCondition,\n DownstreamFreshnessAutoMaterializeCondition,\n ):\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.materialize_on_required_for_freshness().to_snapshot(),\n evaluation_data=None,\n )\n elif self.klass == MissingAutoMaterializeCondition:\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.materialize_on_missing().to_snapshot(),\n evaluation_data=None,\n )\n elif self.klass == ParentMaterializedAutoMaterializeCondition:\n updated_asset_keys = unpacked_dict.get("updated_asset_keys")\n if isinstance(updated_asset_keys, set):\n updated_asset_keys = cast(FrozenSet[AssetKey], frozenset(updated_asset_keys))\n else:\n updated_asset_keys = frozenset()\n will_update_asset_keys = unpacked_dict.get("will_update_asset_keys")\n if isinstance(will_update_asset_keys, set):\n will_update_asset_keys = cast(\n FrozenSet[AssetKey], frozenset(will_update_asset_keys)\n )\n else:\n will_update_asset_keys = frozenset()\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.materialize_on_parent_updated().to_snapshot(),\n evaluation_data=ParentUpdatedRuleEvaluationData(\n updated_asset_keys=updated_asset_keys,\n will_update_asset_keys=will_update_asset_keys,\n ),\n )\n elif self.klass == ParentOutdatedAutoMaterializeCondition:\n waiting_on_asset_keys = unpacked_dict.get("waiting_on_asset_keys")\n if isinstance(waiting_on_asset_keys, set):\n waiting_on_asset_keys = cast(FrozenSet[AssetKey], frozenset(waiting_on_asset_keys))\n else:\n waiting_on_asset_keys = frozenset()\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=AutoMaterializeRule.skip_on_parent_outdated().to_snapshot(),\n evaluation_data=WaitingOnAssetsRuleEvaluationData(\n waiting_on_asset_keys=waiting_on_asset_keys\n ),\n )\n elif self.klass == MaxMaterializationsExceededAutoMaterializeCondition:\n return AutoMaterializeRuleEvaluation(\n rule_snapshot=DiscardOnMaxMaterializationsExceededRule(limit=1).to_snapshot(),\n evaluation_data=None,\n )\n check.failed(f"Unexpected class {self.klass}")\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass FreshnessAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass DownstreamFreshnessAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass ParentMaterializedAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass MissingAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass ParentOutdatedAutoMaterializeCondition(NamedTuple): ...\n\n\n@whitelist_for_serdes(serializer=BackcompatAutoMaterializeConditionSerializer)\nclass MaxMaterializationsExceededAutoMaterializeCondition(NamedTuple): ...\n
", "current_page_name": "_modules/dagster/_core/definitions/auto_materialize_rule", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.auto_materialize_rule"}, "backfill_policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.backfill_policy

\nfrom enum import Enum\nfrom typing import NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._serdes import whitelist_for_serdes\n\n\nclass BackfillPolicyType(Enum):\n    SINGLE_RUN = "SINGLE_RUN"\n    MULTI_RUN = "MULTI_RUN"\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass BackfillPolicy(\n NamedTuple(\n "_BackfillPolicy",\n [\n ("max_partitions_per_run", Optional[int]),\n ],\n )\n):\n """A BackfillPolicy specifies how Dagster should attempt to backfill a partitioned asset.\n\n There are two main kinds of backfill policies: single-run and multi-run.\n\n An asset with a single-run backfill policy will take a single run to backfill all of its\n partitions at once.\n\n An asset with a multi-run backfill policy will take multiple runs to backfill all of its\n partitions. Each run will backfill a subset of the partitions. The number of partitions to\n backfill in each run is controlled by the `max_partitions_per_run` parameter.\n\n For example:\n\n - If an asset has 100 partitions, and the `max_partitions_per_run` is set to 10, then it will\n be backfilled in 10 runs; each run will backfill 10 partitions.\n\n - If an asset has 100 partitions, and the `max_partitions_per_run` is set to 11, then it will\n be backfilled in 10 runs; the first 9 runs will backfill 11 partitions, and the last one run\n will backfill the remaining 9 partitions.\n\n **Warning:**\n\n Constructing an BackfillPolicy directly is not recommended as the API is subject to change.\n BackfillPolicy.single_run() and BackfillPolicy.multi_run(max_partitions_per_run=x) are the\n recommended APIs.\n """\n\n def __new__(cls, max_partitions_per_run: Optional[int] = 1):\n return super(BackfillPolicy, cls).__new__(\n cls,\n max_partitions_per_run=max_partitions_per_run,\n )\n\n
[docs] @public\n @staticmethod\n def single_run() -> "BackfillPolicy":\n """Creates a BackfillPolicy that executes the entire backfill in a single run."""\n return BackfillPolicy(max_partitions_per_run=None)
\n\n
[docs] @public\n @staticmethod\n def multi_run(max_partitions_per_run: int = 1) -> "BackfillPolicy":\n """Creates a BackfillPolicy that executes the entire backfill in multiple runs.\n Each run will backfill [max_partitions_per_run] number of partitions.\n\n Args:\n max_partitions_per_run (Optional[int]): The maximum number of partitions in each run of\n the multiple runs. Defaults to 1.\n """\n return BackfillPolicy(\n max_partitions_per_run=check.int_param(max_partitions_per_run, "max_partitions_per_run")\n )
\n\n @property\n def policy_type(self) -> BackfillPolicyType:\n if self.max_partitions_per_run:\n return BackfillPolicyType.MULTI_RUN\n else:\n return BackfillPolicyType.SINGLE_RUN
\n
", "current_page_name": "_modules/dagster/_core/definitions/backfill_policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.backfill_policy"}, "config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.config

\nfrom typing import Any, Callable, Mapping, NamedTuple, Optional, Union, cast\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import (\n    ConfigType,\n    is_supported_config_python_builtin,\n    process_config,\n    resolve_defaults,\n    validate_config,\n)\nfrom dagster._core.definitions.definition_config_schema import IDefinitionConfigSchema\nfrom dagster._core.errors import DagsterInvalidConfigError\n\nfrom .definition_config_schema import convert_user_facing_definition_config_schema\n\nConfigMappingFn: TypeAlias = Callable[[Any], Any]\n\n\ndef is_callable_valid_config_arg(config: Union[Callable[..., Any], Mapping[str, object]]) -> bool:\n    return BuiltinEnum.contains(config) or is_supported_config_python_builtin(config)\n\n\n
[docs]class ConfigMapping(\n NamedTuple(\n "_ConfigMapping",\n [\n ("config_fn", Callable[[Any], Any]),\n ("config_schema", IDefinitionConfigSchema),\n ("receive_processed_config_values", Optional[bool]),\n ],\n )\n):\n """Defines a config mapping for a graph (or job).\n\n By specifying a config mapping function, you can override the configuration for the child\n ops and graphs contained within a graph.\n\n Config mappings require the configuration schema to be specified as ``config_schema``, which will\n be exposed as the configuration schema for the graph, as well as a configuration mapping\n function, ``config_fn``, which maps the config provided to the graph to the config\n that will be provided to the child nodes.\n\n Args:\n config_fn (Callable[[dict], dict]): The function that will be called\n to map the graph config to a config appropriate for the child nodes.\n config_schema (ConfigSchema): The schema of the graph config.\n receive_processed_config_values (Optional[bool]): If true, config values provided to the config_fn\n will be converted to their dagster types before being passed in. For example, if this\n value is true, enum config passed to config_fn will be actual enums, while if false,\n then enum config passed to config_fn will be strings.\n """\n\n def __new__(\n cls,\n config_fn: ConfigMappingFn,\n config_schema: Optional[Any] = None,\n receive_processed_config_values: Optional[bool] = None,\n ):\n return super(ConfigMapping, cls).__new__(\n cls,\n config_fn=check.callable_param(config_fn, "config_fn"),\n config_schema=convert_user_facing_definition_config_schema(config_schema),\n receive_processed_config_values=check.opt_bool_param(\n receive_processed_config_values, "receive_processed_config_values"\n ),\n )\n\n def resolve_from_unvalidated_config(self, config: Any) -> Any:\n """Validates config against outer config schema, and calls mapping against validated config."""\n receive_processed_config_values = check.opt_bool_param(\n self.receive_processed_config_values, "receive_processed_config_values", default=True\n )\n if receive_processed_config_values:\n outer_evr = process_config(\n self.config_schema.config_type,\n config,\n )\n else:\n outer_evr = validate_config(\n self.config_schema.config_type,\n config,\n )\n if not outer_evr.success:\n raise DagsterInvalidConfigError(\n "Error in config mapping ",\n outer_evr.errors,\n config,\n )\n\n outer_config = outer_evr.value\n if not receive_processed_config_values:\n outer_config = resolve_defaults(\n cast(ConfigType, self.config_schema.config_type),\n outer_config,\n ).value\n\n return self.config_fn(outer_config)\n\n def resolve_from_validated_config(self, config: Any) -> Any:\n if self.receive_processed_config_values is not None:\n check.failed(\n "`receive_processed_config_values` parameter has been set, but only applies to "\n "unvalidated config."\n )\n\n return self.config_fn(config)
\n
", "current_page_name": "_modules/dagster/_core/definitions/config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.config"}, "configurable": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.configurable

\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, NamedTuple, Optional, Type, TypeVar, Union, cast\n\nfrom typing_extensions import Self\n\nfrom dagster import (\n    Field,\n    _check as check,\n)\nfrom dagster._config import EvaluateValueResult\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.decorator_utils import get_function_params\n\nfrom .definition_config_schema import (\n    CoercableToConfigSchema,\n    ConfiguredDefinitionConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\n\n\nclass ConfigurableDefinition(ABC):\n    @property\n    @abstractmethod\n    def config_schema(self) -> Optional[IDefinitionConfigSchema]:\n        raise NotImplementedError()\n\n    @property\n    def has_config_field(self) -> bool:\n        return self.config_schema is not None and bool(self.config_schema.as_field())\n\n    @property\n    def config_field(self) -> Optional[Field]:\n        return None if not self.config_schema else self.config_schema.as_field()\n\n    # getter for typed access\n    def get_config_field(self) -> Field:\n        field = self.config_field\n        if field is None:\n            check.failed("Must check has_config_Field before calling get_config_field")\n        return field\n\n    def apply_config_mapping(self, config: Any) -> EvaluateValueResult:\n        """Applies user-provided config mapping functions to the given configuration and validates the\n        results against the respective config schema.\n\n        Expects incoming config to be validated and have fully-resolved values (StringSource values\n        resolved, Enum types hydrated, etc.) via process_config() during ResolvedRunConfig\n        construction and Graph config mapping.\n\n        Args:\n            config (Any): A validated and resolved configuration dictionary matching this object's\n            config_schema\n\n        Returns (EvaluateValueResult):\n            If successful, the value is a validated and resolved configuration dictionary for the\n            innermost wrapped object after applying the config mapping transformation function.\n        """\n        # If schema is on a mapped schema this is the innermost resource (base case),\n        # so we aren't responsible for validating against anything farther down.\n        # Returns an EVR for type consistency with config_mapping_fn.\n        return (\n            self.config_schema.resolve_config(config)\n            if isinstance(self.config_schema, ConfiguredDefinitionConfigSchema)\n            else EvaluateValueResult.for_value(config)\n        )\n\n\nclass AnonymousConfigurableDefinition(ConfigurableDefinition):\n    """An interface that makes the `configured` method not accept a name argument."""\n\n    def configured(\n        self,\n        config_or_config_fn: Any,\n        config_schema: CoercableToConfigSchema = None,\n        description: Optional[str] = None,\n    ) -> Self:\n        """Wraps this object in an object of the same type that provides configuration to the inner\n        object.\n\n        Using ``configured`` may result in config values being displayed in\n        the Dagster UI, so it is not recommended to use this API with sensitive values,\n        such as secrets.\n\n        Args:\n            config_or_config_fn (Union[Any, Callable[[Any], Any]]): Either (1) Run configuration\n                that fully satisfies this object's config schema or (2) A function that accepts run\n                configuration and returns run configuration that fully satisfies this object's\n                config schema.  In the latter case, config_schema must be specified.  When\n                passing a function, it's easiest to use :py:func:`configured`.\n            config_schema (ConfigSchema): If config_or_config_fn is a function, the config schema\n                that its input must satisfy.\n            description (Optional[str]): Description of the new definition. If not specified,\n                inherits the description of the definition being configured.\n\n        Returns (ConfigurableDefinition): A configured version of this object.\n        """\n        new_config_schema = ConfiguredDefinitionConfigSchema(\n            self, convert_user_facing_definition_config_schema(config_schema), config_or_config_fn\n        )\n\n        return self.copy_for_configured(description, new_config_schema)\n\n    @abstractmethod\n    def copy_for_configured(\n        self,\n        description: Optional[str],\n        config_schema: IDefinitionConfigSchema,\n    ) -> Self:\n        raise NotImplementedError()\n\n\nclass NamedConfigurableDefinition(ConfigurableDefinition):\n    """An interface that makes the `configured` method require a positional `name` argument."""\n\n    def configured(\n        self,\n        config_or_config_fn: Any,\n        name: str,\n        config_schema: Optional[UserConfigSchema] = None,\n        description: Optional[str] = None,\n    ) -> Self:\n        """Wraps this object in an object of the same type that provides configuration to the inner\n        object.\n\n        Using ``configured`` may result in config values being displayed in\n        the Dagster UI, so it is not recommended to use this API with sensitive values,\n        such as secrets.\n\n        Args:\n            config_or_config_fn (Union[Any, Callable[[Any], Any]]): Either (1) Run configuration\n                that fully satisfies this object's config schema or (2) A function that accepts run\n                configuration and returns run configuration that fully satisfies this object's\n                config schema.  In the latter case, config_schema must be specified.  When\n                passing a function, it's easiest to use :py:func:`configured`.\n            name (str): Name of the new definition. This is a required argument, as this definition\n                type has a name uniqueness constraint.\n            config_schema (ConfigSchema): If config_or_config_fn is a function, the config schema\n                that its input must satisfy.\n            description (Optional[str]): Description of the new definition. If not specified,\n                inherits the description of the definition being configured.\n\n        Returns (ConfigurableDefinition): A configured version of this object.\n        """\n        name = check.str_param(name, "name")\n\n        new_config_schema = ConfiguredDefinitionConfigSchema(\n            self, convert_user_facing_definition_config_schema(config_schema), config_or_config_fn\n        )\n\n        return self.copy_for_configured(name, description, new_config_schema)\n\n    @abstractmethod\n    def copy_for_configured(\n        self,\n        name: str,\n        description: Optional[str],\n        config_schema: IDefinitionConfigSchema,\n    ) -> Self: ...\n\n\ndef _check_configurable_param(configurable: ConfigurableDefinition) -> None:\n    from dagster._core.definitions.composition import PendingNodeInvocation\n\n    check.param_invariant(\n        not isinstance(configurable, PendingNodeInvocation),\n        "configurable",\n        "You have invoked `configured` on a PendingNodeInvocation (an intermediate type), which"\n        " is produced by aliasing or tagging a node definition. To configure a node, you must"\n        " call `configured` on either an OpDefinition and GraphDefinition. To fix"\n        " this error, make sure to call `configured` on the definition object *before* using"\n        " the `tag` or `alias` methods. For usage examples, see"\n        " https://docs.dagster.io/concepts/configuration/configured",\n    )\n    check.inst_param(\n        configurable,\n        "configurable",\n        ConfigurableDefinition,\n        "Only the following types can be used with the `configured` method: ResourceDefinition,"\n        " ExecutorDefinition, GraphDefinition, NodeDefinition, and LoggerDefinition."\n        " For usage examples of `configured`, see"\n        " https://docs.dagster.io/concepts/configuration/configured",\n    )\n\n\nT_Configurable = TypeVar(\n    "T_Configurable", bound=Union["AnonymousConfigurableDefinition", "NamedConfigurableDefinition"]\n)\n\n\nclass FunctionAndConfigSchema(NamedTuple):\n    function: Callable[[Any], Any]\n    config_schema: Optional[UserConfigSchema]\n\n\ndef _wrap_user_fn_if_pythonic_config(\n    user_fn: Any, config_schema: Optional[UserConfigSchema]\n) -> FunctionAndConfigSchema:\n    """Helper function which allows users to provide a Pythonic config object to a @configurable\n    function. Detects if the function has a single parameter annotated with a Config class.\n    If so, wraps the function to convert the config dictionary into the appropriate Config object.\n    """\n    from dagster._config.pythonic_config import (\n        Config,\n        infer_schema_from_config_annotation,\n        safe_is_subclass,\n    )\n\n    if not isinstance(user_fn, Callable):\n        return FunctionAndConfigSchema(function=user_fn, config_schema=config_schema)\n\n    config_fn_params = get_function_params(user_fn)\n    check.invariant(\n        len(config_fn_params) == 1, "@configured function should have exactly one parameter"\n    )\n\n    param = config_fn_params[0]\n\n    # If the parameter is a subclass of Config, we can infer the config schema from the\n    # type annotation. We'll also wrap the config mapping function to convert the config\n    # dictionary into the appropriate Config object.\n    if not safe_is_subclass(param.annotation, Config):\n        return FunctionAndConfigSchema(function=user_fn, config_schema=config_schema)\n\n    check.invariant(\n        config_schema is None,\n        "Cannot provide config_schema to @configured function with Config-annotated param",\n    )\n\n    config_schema_from_class = infer_schema_from_config_annotation(param.annotation, param.default)\n    config_cls = cast(Type[Config], param.annotation)\n\n    param_name = param.name\n\n    def wrapped_fn(config_as_dict) -> Any:\n        config_input = config_cls(**config_as_dict)\n        output = user_fn(**{param_name: config_input})\n\n        if isinstance(output, Config):\n            return output._convert_to_config_dictionary()  # noqa: SLF001\n        else:\n            return output\n\n    return FunctionAndConfigSchema(function=wrapped_fn, config_schema=config_schema_from_class)\n\n\n
[docs]def configured(\n configurable: T_Configurable,\n config_schema: Optional[UserConfigSchema] = None,\n **kwargs: Any,\n) -> Callable[[object], T_Configurable]:\n """A decorator that makes it easy to create a function-configured version of an object.\n\n The following definition types can be configured using this function:\n\n * :py:class:`GraphDefinition`\n * :py:class:`ExecutorDefinition`\n * :py:class:`LoggerDefinition`\n * :py:class:`ResourceDefinition`\n * :py:class:`OpDefinition`\n\n Using ``configured`` may result in config values being displayed in the Dagster UI,\n so it is not recommended to use this API with sensitive values, such as\n secrets.\n\n If the config that will be supplied to the object is constant, you may alternatively invoke this\n and call the result with a dict of config values to be curried. Examples of both strategies\n below.\n\n Args:\n configurable (ConfigurableDefinition): An object that can be configured.\n config_schema (ConfigSchema): The config schema that the inputs to the decorated function\n must satisfy. Alternatively, annotate the config parameter to the decorated function\n with a subclass of :py:class:`Config` and omit this argument.\n **kwargs: Arbitrary keyword arguments that will be passed to the initializer of the returned\n object.\n\n Returns:\n (Callable[[Union[Any, Callable[[Any], Any]]], ConfigurableDefinition])\n\n **Examples:**\n\n .. code-block:: python\n\n class GreetingConfig(Config):\n message: str\n\n @op\n def greeting_op(config: GreetingConfig):\n print(config.message)\n\n class HelloConfig(Config):\n name: str\n\n @configured(greeting_op)\n def hello_op(config: HelloConfig):\n return GreetingConfig(message=f"Hello, {config.name}!")\n\n .. code-block:: python\n\n dev_s3 = configured(S3Resource, name="dev_s3")({'bucket': 'dev'})\n\n @configured(S3Resource)\n def dev_s3(_):\n return {'bucket': 'dev'}\n\n @configured(S3Resource, {'bucket_prefix', str})\n def dev_s3(config):\n return {'bucket': config['bucket_prefix'] + 'dev'}\n\n """\n _check_configurable_param(configurable)\n\n if isinstance(configurable, NamedConfigurableDefinition):\n\n def _configured(config_or_config_fn: object) -> T_Configurable:\n fn_name = (\n getattr(config_or_config_fn, "__name__", None)\n if callable(config_or_config_fn)\n else None\n )\n name: str = check.not_none(kwargs.get("name") or fn_name)\n\n updated_fn, new_config_schema = _wrap_user_fn_if_pythonic_config(\n config_or_config_fn, config_schema\n )\n return configurable.configured(\n config_or_config_fn=updated_fn,\n name=name,\n config_schema=new_config_schema,\n **{k: v for k, v in kwargs.items() if k != "name"},\n )\n\n return _configured\n elif isinstance(configurable, AnonymousConfigurableDefinition):\n\n def _configured(config_or_config_fn: object) -> T_Configurable:\n updated_fn, new_config_schema = _wrap_user_fn_if_pythonic_config(\n config_or_config_fn, config_schema\n )\n return configurable.configured(\n config_schema=new_config_schema, config_or_config_fn=updated_fn, **kwargs\n )\n\n return _configured\n else:\n check.failed(f"Invalid configurable definition type: {type(configurable)}")
\n
", "current_page_name": "_modules/dagster/_core/definitions/configurable", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.configurable"}, "decorators": {"asset_check_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.asset_check_decorator

\nfrom typing import Any, Callable, Mapping, Optional, Set, Tuple, Union, cast\n\nfrom dagster import _check as check\nfrom dagster._annotations import experimental\nfrom dagster._builtins import Nothing\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.asset_check_spec import AssetCheckSpec\nfrom dagster._core.definitions.asset_checks import (\n    AssetChecksDefinition,\n    AssetChecksDefinitionInputOutputProps,\n)\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.output import Out\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.definitions.utils import NoValueSentinel\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ..input import In\nfrom .asset_decorator import (\n    get_function_params_without_context_or_config_or_resources,\n    stringify_asset_key_to_input_name,\n)\nfrom .op_decorator import _Op\n\nAssetCheckFunctionReturn = AssetCheckResult\nAssetCheckFunction = Callable[..., AssetCheckFunctionReturn]\n\n\ndef _build_asset_check_input(\n    name: str, asset_key: AssetKey, fn: Callable\n) -> Mapping[AssetKey, Tuple[str, In]]:\n    asset_params = get_function_params_without_context_or_config_or_resources(fn)\n\n    if len(asset_params) == 0:\n        input_name = stringify_asset_key_to_input_name(asset_key)\n        in_def = In(cast(type, Nothing))\n    elif len(asset_params) == 1:\n        input_name = asset_params[0].name\n        in_def = In(metadata={}, input_manager_key=None, dagster_type=NoValueSentinel)\n    else:\n        raise DagsterInvalidDefinitionError(\n            f"When defining check '{name}', multiple target assets provided as parameters:"\n            f" {[param.name for param in asset_params]}. Only one"\n            " is allowed."\n        )\n\n    return {\n        asset_key: (\n            input_name,\n            in_def,\n        )\n    }\n\n\n
[docs]@experimental\ndef asset_check(\n *,\n asset: Union[CoercibleToAssetKey, AssetsDefinition, SourceAsset],\n name: Optional[str] = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n config_schema: Optional[UserConfigSchema] = None,\n compute_kind: Optional[str] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n) -> Callable[[AssetCheckFunction], AssetChecksDefinition]:\n """Create a definition for how to execute an asset check.\n\n Args:\n asset (Union[AssetKey, Sequence[str], str, AssetsDefinition, SourceAsset]): The\n asset that the check applies to.\n name (Optional[str]): The name of the check. If not specified, the name of the decorated\n function will be used. Checks for the same asset must have unique names.\n description (Optional[str]): The description of the check.\n required_resource_keys (Optional[Set[str]]): A set of keys for resources that are required\n by the function that execute the check. These can alternatively be specified by\n including resource-typed parameters in the function signature.\n config_schema (Optional[ConfigSchema): The configuration schema for the check's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that executes the check.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n compute_kind (Optional[str]): A string to represent the kind of computation that executes\n the check, e.g. "dbt" or "spark".\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that executes the check.\n\n\n Produces an :py:class:`AssetChecksDefinition` object.\n\n\n Example:\n .. code-block:: python\n\n from dagster import asset, asset_check, AssetCheckResult\n\n @asset\n def my_asset() -> None:\n ...\n\n @asset_check(asset=my_asset, description="Check that my asset has enough rows")\n def my_asset_has_enough_rows() -> AssetCheckResult:\n num_rows = ...\n return AssetCheckResult(passed=num_rows > 5, metadata={"num_rows": num_rows})\n\n\n Example with a DataFrame Output:\n .. code-block:: python\n\n from dagster import asset, asset_check, AssetCheckResult\n from pandas import DataFrame\n\n @asset\n def my_asset() -> DataFrame:\n ...\n\n @asset_check(asset=my_asset, description="Check that my asset has enough rows")\n def my_asset_has_enough_rows(my_asset: DataFrame) -> AssetCheckResult:\n num_rows = my_asset.shape[0]\n return AssetCheckResult(passed=num_rows > 5, metadata={"num_rows": num_rows})\n """\n\n def inner(fn: AssetCheckFunction) -> AssetChecksDefinition:\n check.callable_param(fn, "fn")\n resolved_name = name or fn.__name__\n asset_key = AssetKey.from_coercible_or_definition(asset)\n\n out = Out(dagster_type=None)\n input_tuples_by_asset_key = _build_asset_check_input(resolved_name, asset_key, fn)\n if len(input_tuples_by_asset_key) == 0:\n raise DagsterInvalidDefinitionError(\n f"No target asset provided when defining check '{resolved_name}'"\n )\n\n if len(input_tuples_by_asset_key) > 1:\n raise DagsterInvalidDefinitionError(\n f"When defining check '{resolved_name}', Multiple target assets provided:"\n f" {[key.to_user_string() for key in input_tuples_by_asset_key.keys()]}. Only one"\n " is allowed."\n )\n\n resolved_asset_key = next(iter(input_tuples_by_asset_key.keys()))\n spec = AssetCheckSpec(\n name=resolved_name,\n description=description,\n asset=resolved_asset_key,\n )\n\n op_def = _Op(\n name=spec.get_python_identifier(),\n ins=dict(input_tuples_by_asset_key.values()),\n out=out,\n # Any resource requirements specified as arguments will be identified as\n # part of the Op definition instantiation\n required_resource_keys=required_resource_keys,\n tags={\n **({"kind": compute_kind} if compute_kind else {}),\n **(op_tags or {}),\n },\n config_schema=config_schema,\n retry_policy=retry_policy,\n )(fn)\n\n checks_def = AssetChecksDefinition(\n node_def=op_def,\n resource_defs={},\n specs=[spec],\n input_output_props=AssetChecksDefinitionInputOutputProps(\n asset_keys_by_input_name={\n input_tuples_by_asset_key[resolved_asset_key][0]: resolved_asset_key\n },\n asset_check_keys_by_output_name={op_def.output_defs[0].name: spec.key},\n ),\n )\n\n return checks_def\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/asset_check_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.asset_check_decorator"}, "asset_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.asset_decorator

\nfrom collections import Counter\nfrom inspect import Parameter\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n    overload,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param, experimental_param\nfrom dagster._builtins import Nothing\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.decorator_utils import get_function_params, get_valid_name_permutations\nfrom dagster._core.definitions.asset_dep import AssetDep, CoercibleToAssetDep\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.config import ConfigMapping\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.metadata import ArbitraryMetadataMapping, MetadataUserInput\nfrom dagster._core.definitions.partition_mapping import PartitionMapping\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._core.types.dagster_type import DagsterType\nfrom dagster._utils.warnings import (\n    disable_dagster_warnings,\n)\n\nfrom ..asset_check_spec import AssetCheckSpec\nfrom ..asset_in import AssetIn\nfrom ..asset_out import AssetOut\nfrom ..asset_spec import AssetSpec\nfrom ..assets import AssetsDefinition\nfrom ..backfill_policy import BackfillPolicy, BackfillPolicyType\nfrom ..decorators.graph_decorator import graph\nfrom ..decorators.op_decorator import _Op\nfrom ..events import AssetKey, CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom ..input import GraphIn, In\nfrom ..output import GraphOut, Out\nfrom ..partition import PartitionsDefinition\nfrom ..policy import RetryPolicy\nfrom ..resource_definition import ResourceDefinition\nfrom ..utils import DEFAULT_IO_MANAGER_KEY, DEFAULT_OUTPUT, NoValueSentinel\n\n\n@overload\ndef asset(\n    compute_fn: Callable,\n) -> AssetsDefinition: ...\n\n\n@overload\ndef asset(\n    *,\n    name: Optional[str] = ...,\n    key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n    ins: Optional[Mapping[str, AssetIn]] = ...,\n    deps: Optional[Iterable[CoercibleToAssetDep]] = ...,\n    metadata: Optional[Mapping[str, Any]] = ...,\n    description: Optional[str] = ...,\n    config_schema: Optional[UserConfigSchema] = None,\n    required_resource_keys: Optional[Set[str]] = ...,\n    resource_defs: Optional[Mapping[str, object]] = ...,\n    io_manager_def: Optional[object] = ...,\n    io_manager_key: Optional[str] = ...,\n    compute_kind: Optional[str] = ...,\n    dagster_type: Optional[DagsterType] = ...,\n    partitions_def: Optional[PartitionsDefinition] = ...,\n    op_tags: Optional[Mapping[str, Any]] = ...,\n    group_name: Optional[str] = ...,\n    output_required: bool = ...,\n    freshness_policy: Optional[FreshnessPolicy] = ...,\n    auto_materialize_policy: Optional[AutoMaterializePolicy] = ...,\n    backfill_policy: Optional[BackfillPolicy] = ...,\n    retry_policy: Optional[RetryPolicy] = ...,\n    code_version: Optional[str] = ...,\n    key: Optional[CoercibleToAssetKey] = None,\n    non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = ...,\n    check_specs: Optional[Sequence[AssetCheckSpec]] = ...,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]: ...\n\n\n
[docs]@experimental_param(param="resource_defs")\n@experimental_param(param="io_manager_def")\n@experimental_param(param="auto_materialize_policy")\n@experimental_param(param="backfill_policy")\n@deprecated_param(\n param="non_argument_deps", breaking_version="2.0.0", additional_warn_text="use `deps` instead."\n)\ndef asset(\n compute_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[CoercibleToAssetDep]] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n description: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n io_manager_def: Optional[object] = None,\n io_manager_key: Optional[str] = None,\n compute_kind: Optional[str] = None,\n dagster_type: Optional[DagsterType] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n group_name: Optional[str] = None,\n output_required: bool = True,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n key: Optional[CoercibleToAssetKey] = None,\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n) -> Union[AssetsDefinition, Callable[[Callable[..., Any]], AssetsDefinition]]:\n """Create a definition for how to compute an asset.\n\n A software-defined asset is the combination of:\n 1. An asset key, e.g. the name of a table.\n 2. A function, which can be run to compute the contents of the asset.\n 3. A set of upstream assets that are provided as inputs to the function when computing the asset.\n\n Unlike an op, whose dependencies are determined by the graph it lives inside, an asset knows\n about the upstream assets it depends on. The upstream assets are inferred from the arguments\n to the decorated function. The name of the argument designates the name of the upstream asset.\n\n An asset has an op inside it to represent the function that computes it. The name of the op\n will be the segments of the asset key, separated by double-underscores.\n\n Args:\n name (Optional[str]): The name of the asset. If not provided, defaults to the name of the\n decorated function. The asset's name must be a valid name in dagster (ie only contains\n letters, numbers, and _) and may not contain python reserved keywords.\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name, which defaults to the name of\n the decorated function. Each item in key_prefix must be a valid name in dagster (ie only\n contains letters, numbers, and _) and may not contain python reserved keywords.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n deps (Optional[Sequence[Union[AssetDep, AssetsDefinition, SourceAsset, AssetKey, str]]]):\n The assets that are upstream dependencies, but do not correspond to a parameter of the\n decorated function. If the AssetsDefinition for a multi_asset is provided, dependencies on\n all assets created by the multi_asset will be created.\n config_schema (Optional[ConfigSchema): The configuration schema for the asset's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n metadata (Optional[Dict[str, Any]]): A dict of metadata entries for the asset.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by the op.\n io_manager_key (Optional[str]): The resource key of the IOManager used\n for storing the output of the op as an asset, and for loading it in downstream ops\n (default: "io_manager"). Only one of io_manager_key and io_manager_def can be provided.\n io_manager_def (Optional[object]): (Experimental) The IOManager used for\n storing the output of the op as an asset, and for loading it in\n downstream ops. Only one of io_manager_def and io_manager_key can be provided.\n compute_kind (Optional[str]): A string to represent the kind of computation that produces\n the asset, e.g. "dbt" or "spark". It will be displayed in the Dagster UI as a badge on the asset.\n dagster_type (Optional[DagsterType]): Allows specifying type validation functions that\n will be executed on the output of the decorated function after it runs.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the asset.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If not provided,\n the name "default" is used.\n resource_defs (Optional[Mapping[str, object]]):\n (Experimental) A mapping of resource keys to resources. These resources\n will be initialized during execution, and can be accessed from the\n context within the body of the function.\n output_required (bool): Whether the decorated function will always materialize an asset.\n Defaults to True. If False, the function can return None, which will not be materialized to\n storage and will halt execution of downstream assets.\n freshness_policy (FreshnessPolicy): A constraint telling Dagster how often this asset is intended to be updated\n with respect to its root data.\n auto_materialize_policy (AutoMaterializePolicy): (Experimental) Configure Dagster to automatically materialize\n this asset according to its FreshnessPolicy and when upstream dependencies change.\n backfill_policy (BackfillPolicy): (Experimental) Configure Dagster to backfill this asset according to its\n BackfillPolicy.\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that computes the asset.\n code_version (Optional[str]): (Experimental) Version of the code that generates this asset. In\n general, versions should be set only for code that deterministically produces the same\n output when given the same inputs.\n check_specs (Optional[Sequence[AssetCheckSpec]]): (Experimental) Specs for asset checks that\n execute in the decorated function after materializing the asset.\n non_argument_deps (Optional[Union[Set[AssetKey], Set[str]]]): Deprecated, use deps instead.\n Set of asset keys that are upstream dependencies, but do not pass an input to the asset.\n key (Optional[CoeercibleToAssetKey]): The key for this asset. If provided, cannot specify key_prefix or name.\n\n Examples:\n .. code-block:: python\n\n @asset\n def my_asset(my_upstream_asset: int) -> int:\n return my_upstream_asset + 1\n """\n\n def create_asset():\n upstream_asset_deps = _deps_and_non_argument_deps_to_asset_deps(\n deps=deps, non_argument_deps=non_argument_deps\n )\n\n return _Asset(\n name=cast(Optional[str], name), # (mypy bug that it can't infer name is Optional[str])\n key_prefix=key_prefix,\n ins=ins,\n deps=upstream_asset_deps,\n metadata=metadata,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n resource_defs=resource_defs,\n io_manager_key=io_manager_key,\n io_manager_def=io_manager_def,\n compute_kind=check.opt_str_param(compute_kind, "compute_kind"),\n dagster_type=dagster_type,\n partitions_def=partitions_def,\n op_tags=op_tags,\n group_name=group_name,\n output_required=output_required,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n retry_policy=retry_policy,\n code_version=code_version,\n check_specs=check_specs,\n key=key,\n )\n\n if compute_fn is not None:\n return create_asset()(compute_fn)\n\n def inner(fn: Callable[..., Any]) -> AssetsDefinition:\n check.invariant(\n not (io_manager_key and io_manager_def),\n "Both io_manager_key and io_manager_def were provided to `@asset` decorator. Please"\n " provide one or the other. ",\n )\n return create_asset()(fn)\n\n return inner
\n\n\ndef _resolve_key_and_name(\n *,\n key: Optional[CoercibleToAssetKey],\n key_prefix: Optional[CoercibleToAssetKeyPrefix],\n name: Optional[str],\n decorator: str,\n fn: Callable[..., Any],\n) -> Tuple[AssetKey, str]:\n if (name or key_prefix) and key:\n raise DagsterInvalidDefinitionError(\n f"Cannot specify a name or key prefix for {decorator} when the key"\n " argument is provided."\n )\n key_prefix_list = [key_prefix] if isinstance(key_prefix, str) else key_prefix\n key = AssetKey.from_coercible(key) if key else None\n assigned_name = name or fn.__name__\n return (\n (\n # the filter here appears unnecessary per typing, but this exists\n # historically so keeping it here to be conservative in case users\n # can get Nones into the key_prefix_list somehow\n AssetKey(list(filter(None, [*(key_prefix_list or []), assigned_name])))\n if not key\n else key\n ),\n assigned_name,\n )\n\n\nclass _Asset:\n def __init__(\n self,\n name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[AssetDep]] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n description: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n io_manager_key: Optional[str] = None,\n io_manager_def: Optional[object] = None,\n compute_kind: Optional[str] = None,\n dagster_type: Optional[DagsterType] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n group_name: Optional[str] = None,\n output_required: bool = True,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n key: Optional[CoercibleToAssetKey] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n ):\n self.name = name\n self.key_prefix = key_prefix\n self.ins = ins or {}\n self.deps = deps or []\n self.metadata = metadata\n self.description = description\n self.required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys"\n )\n self.io_manager_key = io_manager_key\n self.io_manager_def = io_manager_def\n self.config_schema = config_schema\n self.compute_kind = compute_kind\n self.dagster_type = dagster_type\n self.partitions_def = partitions_def\n self.op_tags = op_tags\n self.resource_defs = dict(check.opt_mapping_param(resource_defs, "resource_defs"))\n self.group_name = group_name\n self.output_required = output_required\n self.freshness_policy = freshness_policy\n self.retry_policy = retry_policy\n self.auto_materialize_policy = auto_materialize_policy\n self.backfill_policy = backfill_policy\n self.code_version = code_version\n self.check_specs = check_specs\n self.key = key\n\n def __call__(self, fn: Callable) -> AssetsDefinition:\n from dagster._config.pythonic_config import (\n validate_resource_annotated_function,\n )\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n validate_resource_annotated_function(fn)\n\n asset_ins = build_asset_ins(fn, self.ins or {}, {dep.asset_key for dep in self.deps})\n\n out_asset_key, asset_name = _resolve_key_and_name(\n key=self.key,\n key_prefix=self.key_prefix,\n name=self.name,\n fn=fn,\n decorator="@asset",\n )\n\n with disable_dagster_warnings():\n arg_resource_keys = {arg.name for arg in get_resource_args(fn)}\n\n bare_required_resource_keys = set(self.required_resource_keys)\n\n resource_defs_dict = self.resource_defs\n resource_defs_keys = set(resource_defs_dict.keys())\n decorator_resource_keys = bare_required_resource_keys | resource_defs_keys\n\n io_manager_key = self.io_manager_key\n if self.io_manager_def:\n if not io_manager_key:\n io_manager_key = out_asset_key.to_python_identifier("io_manager")\n\n if (\n io_manager_key in self.resource_defs\n and self.resource_defs[io_manager_key] != self.io_manager_def\n ):\n raise DagsterInvalidDefinitionError(\n f"Provided conflicting definitions for io manager key '{io_manager_key}'."\n " Please provide only one definition per key."\n )\n\n resource_defs_dict[io_manager_key] = self.io_manager_def\n\n wrapped_resource_defs = wrap_resources_for_execution(resource_defs_dict)\n\n check.param_invariant(\n len(bare_required_resource_keys) == 0 or len(arg_resource_keys) == 0,\n "Cannot specify resource requirements in both @asset decorator and as arguments"\n " to the decorated function",\n )\n\n io_manager_key = cast(str, io_manager_key) if io_manager_key else DEFAULT_IO_MANAGER_KEY\n\n out = Out(\n metadata=self.metadata or {},\n io_manager_key=io_manager_key,\n dagster_type=self.dagster_type if self.dagster_type else NoValueSentinel,\n description=self.description,\n is_required=self.output_required,\n code_version=self.code_version,\n )\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n self.check_specs, [out_asset_key]\n )\n check_outs: Mapping[str, Out] = {\n output_name: Out(dagster_type=None)\n for output_name in check_specs_by_output_name.keys()\n }\n\n op_required_resource_keys = decorator_resource_keys - arg_resource_keys\n\n op = _Op(\n name=out_asset_key.to_python_identifier(),\n description=self.description,\n ins=dict(asset_ins.values()),\n out={DEFAULT_OUTPUT: out, **check_outs},\n # Any resource requirements specified as arguments will be identified as\n # part of the Op definition instantiation\n required_resource_keys=op_required_resource_keys,\n tags={\n **({"kind": self.compute_kind} if self.compute_kind else {}),\n **(self.op_tags or {}),\n },\n config_schema=self.config_schema,\n retry_policy=self.retry_policy,\n code_version=self.code_version,\n )(fn)\n\n # check backfill policy is BackfillPolicyType.SINGLE_RUN for non-partitioned asset\n if self.partitions_def is None:\n check.param_invariant(\n (\n self.backfill_policy.policy_type is BackfillPolicyType.SINGLE_RUN\n if self.backfill_policy\n else True\n ),\n "backfill_policy",\n "Non partitioned asset can only have single run backfill policy",\n )\n\n keys_by_input_name = {\n input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n }\n partition_mappings = {\n keys_by_input_name[input_name]: asset_in.partition_mapping\n for input_name, asset_in in self.ins.items()\n if asset_in.partition_mapping is not None\n }\n\n partition_mappings = _get_partition_mappings_from_deps(\n partition_mappings=partition_mappings, deps=self.deps, asset_name=asset_name\n )\n\n return AssetsDefinition.dagster_internal_init(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name={"result": out_asset_key},\n node_def=op,\n partitions_def=self.partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n resource_defs=wrapped_resource_defs,\n group_names_by_key={out_asset_key: self.group_name} if self.group_name else None,\n freshness_policies_by_key=(\n {out_asset_key: self.freshness_policy} if self.freshness_policy else None\n ),\n auto_materialize_policies_by_key=(\n {out_asset_key: self.auto_materialize_policy}\n if self.auto_materialize_policy\n else None\n ),\n backfill_policy=self.backfill_policy,\n asset_deps=None, # no asset deps in single-asset decorator\n selected_asset_keys=None, # no subselection in decorator\n can_subset=False,\n metadata_by_key={out_asset_key: self.metadata} if self.metadata else None,\n # see comment in @multi_asset's call to dagster_internal_init for the gory details\n # this is best understood as an _override_ which @asset does not support\n descriptions_by_key=None,\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=None, # no subselection in decorator\n )\n\n\n
[docs]@experimental_param(param="resource_defs")\n@deprecated_param(\n param="non_argument_deps", breaking_version="2.0.0", additional_warn_text="use `deps` instead."\n)\ndef multi_asset(\n *,\n outs: Optional[Mapping[str, AssetOut]] = None,\n name: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[CoercibleToAssetDep]] = None,\n description: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[Set[str]] = None,\n compute_kind: Optional[str] = None,\n internal_asset_deps: Optional[Mapping[str, Set[AssetKey]]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n can_subset: bool = False,\n resource_defs: Optional[Mapping[str, object]] = None,\n group_name: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n specs: Optional[Sequence[AssetSpec]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n # deprecated\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = None,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]:\n """Create a combined definition of multiple assets that are computed using the same op and same\n upstream assets.\n\n Each argument to the decorated function references an upstream asset that this asset depends on.\n The name of the argument designates the name of the upstream asset.\n\n You can set I/O managers keys, auto-materialize policies, freshness policies, group names, etc.\n on an individual asset within the multi-asset by attaching them to the :py:class:`AssetOut`\n corresponding to that asset in the `outs` parameter.\n\n Args:\n name (Optional[str]): The name of the op.\n outs: (Optional[Dict[str, AssetOut]]): The AssetOuts representing the assets materialized by\n this function. AssetOuts detail the output, IO management, and core asset properties.\n This argument is required except when AssetSpecs are used.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n deps (Optional[Sequence[Union[AssetsDefinition, SourceAsset, AssetKey, str]]]):\n The assets that are upstream dependencies, but do not correspond to a parameter of the\n decorated function. If the AssetsDefinition for a multi_asset is provided, dependencies on\n all assets created by the multi_asset will be created.\n config_schema (Optional[ConfigSchema): The configuration schema for the asset's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by the underlying op.\n compute_kind (Optional[str]): A string to represent the kind of computation that produces\n the asset, e.g. "dbt" or "spark". It will be displayed in the Dagster UI as a badge on the asset.\n internal_asset_deps (Optional[Mapping[str, Set[AssetKey]]]): By default, it is assumed\n that all assets produced by a multi_asset depend on all assets that are consumed by that\n multi asset. If this default is not correct, you pass in a map of output names to a\n corrected set of AssetKeys that they depend on. Any AssetKeys in this list must be either\n used as input to the asset or produced within the op.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n backfill_policy (Optional[BackfillPolicy]): The backfill policy for the op that computes the asset.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the asset.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n can_subset (bool): If this asset's computation can emit a subset of the asset\n keys based on the context.selected_assets argument. Defaults to False.\n resource_defs (Optional[Mapping[str, object]]):\n (Experimental) A mapping of resource keys to resources. These resources\n will be initialized during execution, and can be accessed from the\n context within the body of the function.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. This\n group name will be applied to all assets produced by this multi_asset.\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that computes the asset.\n code_version (Optional[str]): (Experimental) Version of the code encapsulated by the multi-asset. If set,\n this is used as a default code version for all defined assets.\n specs (Optional[Sequence[AssetSpec]]): (Experimental) The specifications for the assets materialized\n by this function.\n check_specs (Optional[Sequence[AssetCheckSpec]]): (Experimental) Specs for asset checks that\n execute in the decorated function after materializing the assets.\n non_argument_deps (Optional[Union[Set[AssetKey], Set[str]]]): Deprecated, use deps instead. Set of asset keys that are upstream\n dependencies, but do not pass an input to the multi_asset.\n\n Examples:\n .. code-block:: python\n\n # Use IO managers to handle I/O:\n @multi_asset(\n outs={\n "my_string_asset": AssetOut(),\n "my_int_asset": AssetOut(),\n }\n )\n def my_function(upstream_asset: int):\n result = upstream_asset + 1\n return str(result), result\n\n # Handle I/O on your own:\n @multi_asset(\n outs={\n "asset1": AssetOut(),\n "asset2": AssetOut(),\n },\n deps=["asset0"],\n )\n def my_function():\n asset0_value = load(path="asset0")\n asset1_result, asset2_result = do_some_transformation(asset0_value)\n write(asset1_result, path="asset1")\n write(asset2_result, path="asset2")\n return None, None\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n specs = check.opt_list_param(specs, "specs", of_type=AssetSpec)\n\n upstream_asset_deps = _deps_and_non_argument_deps_to_asset_deps(\n deps=deps, non_argument_deps=non_argument_deps\n )\n\n asset_deps = check.opt_mapping_param(\n internal_asset_deps, "internal_asset_deps", key_type=str, value_type=set\n )\n required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n resource_defs = wrap_resources_for_execution(\n check.opt_mapping_param(resource_defs, "resource_defs", key_type=str)\n )\n\n _config_schema = check.opt_mapping_param(\n config_schema, # type: ignore\n "config_schema",\n additional_message="Only dicts are supported for asset config_schema.",\n )\n\n bare_required_resource_keys = set(required_resource_keys)\n resource_defs_keys = set(resource_defs.keys())\n required_resource_keys = bare_required_resource_keys | resource_defs_keys\n\n asset_out_map: Mapping[str, AssetOut] = {} if outs is None else outs\n\n def inner(fn: Callable[..., Any]) -> AssetsDefinition:\n op_name = name or fn.__name__\n\n if asset_out_map and specs:\n raise DagsterInvalidDefinitionError("Must specify only outs or specs but not both.")\n elif specs:\n output_tuples_by_asset_key = {}\n for asset_spec in specs:\n # output names are asset keys joined with _\n output_name = "_".join(asset_spec.key.path)\n output_tuples_by_asset_key[asset_spec.key] = (\n output_name,\n Out(\n Nothing,\n is_required=not (can_subset or asset_spec.skippable),\n description=asset_spec.description,\n ),\n )\n if upstream_asset_deps:\n raise DagsterInvalidDefinitionError(\n "Can not pass deps and specs to @multi_asset, specify deps on the AssetSpecs"\n " directly."\n )\n if internal_asset_deps:\n raise DagsterInvalidDefinitionError(\n "Can not pass internal_asset_deps and specs to @multi_asset, specify deps on"\n " the AssetSpecs directly."\n )\n\n upstream_keys = set()\n for spec in specs:\n for dep in spec.deps:\n if dep.asset_key not in output_tuples_by_asset_key:\n upstream_keys.add(dep.asset_key)\n if (\n dep.asset_key in output_tuples_by_asset_key\n and dep.partition_mapping is not None\n ):\n # self-dependent asset also needs to be considered an upstream_key\n upstream_keys.add(dep.asset_key)\n\n explicit_ins = ins or {}\n # get which asset keys have inputs set\n loaded_upstreams = build_asset_ins(fn, explicit_ins, deps=set())\n unexpected_upstreams = {\n key for key in loaded_upstreams.keys() if key not in upstream_keys\n }\n if unexpected_upstreams:\n raise DagsterInvalidDefinitionError(\n f"Asset inputs {unexpected_upstreams} do not have dependencies on the passed"\n " AssetSpec(s). Set the deps on the appropriate AssetSpec(s)."\n )\n remaining_upstream_keys = {key for key in upstream_keys if key not in loaded_upstreams}\n asset_ins = build_asset_ins(fn, explicit_ins, deps=remaining_upstream_keys)\n else:\n asset_ins = build_asset_ins(\n fn,\n ins or {},\n deps=(\n {dep.asset_key for dep in upstream_asset_deps} if upstream_asset_deps else set()\n ),\n )\n output_tuples_by_asset_key = build_asset_outs(asset_out_map)\n # validate that the asset_deps make sense\n valid_asset_deps = set(asset_ins.keys()) | set(output_tuples_by_asset_key.keys())\n for out_name, asset_keys in asset_deps.items():\n if asset_out_map and out_name not in asset_out_map:\n check.failed(\n f"Invalid out key '{out_name}' supplied to `internal_asset_deps` argument"\n f" for multi-asset {op_name}. Must be one of the outs for this multi-asset"\n f" {list(asset_out_map.keys())[:20]}.",\n )\n invalid_asset_deps = asset_keys.difference(valid_asset_deps)\n check.invariant(\n not invalid_asset_deps,\n f"Invalid asset dependencies: {invalid_asset_deps} specified in"\n f" `internal_asset_deps` argument for multi-asset '{op_name}' on key"\n f" '{out_name}'. Each specified asset key must be associated with an input to"\n " the asset or produced by this asset. Valid keys:"\n f" {list(valid_asset_deps)[:20]}",\n )\n\n arg_resource_keys = {arg.name for arg in get_resource_args(fn)}\n check.param_invariant(\n len(bare_required_resource_keys or []) == 0 or len(arg_resource_keys) == 0,\n "Cannot specify resource requirements in both @multi_asset decorator and as"\n " arguments to the decorated function",\n )\n\n asset_outs_by_output_name: Mapping[str, Out] = dict(output_tuples_by_asset_key.values())\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, list(output_tuples_by_asset_key.keys())\n )\n check_outs_by_output_name: Mapping[str, Out] = {\n output_name: Out(dagster_type=None, is_required=not can_subset)\n for output_name in check_specs_by_output_name.keys()\n }\n overlapping_output_names = (\n asset_outs_by_output_name.keys() & check_outs_by_output_name.keys()\n )\n check.invariant(\n len(overlapping_output_names) == 0,\n f"Check output names overlap with asset output names: {overlapping_output_names}",\n )\n combined_outs_by_output_name: Mapping[str, Out] = {\n **asset_outs_by_output_name,\n **check_outs_by_output_name,\n }\n\n with disable_dagster_warnings():\n op_required_resource_keys = required_resource_keys - arg_resource_keys\n\n op = _Op(\n name=op_name,\n description=description,\n ins=dict(asset_ins.values()),\n out=combined_outs_by_output_name,\n required_resource_keys=op_required_resource_keys,\n tags={\n **({"kind": compute_kind} if compute_kind else {}),\n **(op_tags or {}),\n },\n config_schema=_config_schema,\n retry_policy=retry_policy,\n code_version=code_version,\n )(fn)\n\n keys_by_input_name = {\n input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n }\n keys_by_output_name = {\n output_name: asset_key\n for asset_key, (output_name, _) in output_tuples_by_asset_key.items()\n }\n partition_mappings = {\n keys_by_input_name[input_name]: asset_in.partition_mapping\n for input_name, asset_in in (ins or {}).items()\n if asset_in.partition_mapping is not None\n }\n\n if upstream_asset_deps:\n partition_mappings = _get_partition_mappings_from_deps(\n partition_mappings=partition_mappings, deps=upstream_asset_deps, asset_name=op_name\n )\n\n if specs:\n internal_deps = {\n spec.key: {dep.asset_key for dep in spec.deps}\n for spec in specs\n if spec.deps is not None\n }\n props_by_asset_key: Mapping[AssetKey, Union[AssetSpec, AssetOut]] = {\n spec.key: spec for spec in specs\n }\n # Add PartitionMappings specified via AssetSpec.deps to partition_mappings dictionary. Error on duplicates\n for spec in specs:\n for dep in spec.deps:\n if dep.partition_mapping is None:\n continue\n if partition_mappings.get(dep.asset_key, None) is None:\n partition_mappings[dep.asset_key] = dep.partition_mapping\n continue\n if partition_mappings[dep.asset_key] == dep.partition_mapping:\n continue\n else:\n raise DagsterInvalidDefinitionError(\n f"Two different PartitionMappings for {dep.asset_key} provided for"\n f" multi_asset {op_name}. Please use the same PartitionMapping for"\n f" {dep.asset_key}."\n )\n\n else:\n internal_deps = {keys_by_output_name[name]: asset_deps[name] for name in asset_deps}\n props_by_asset_key = {\n keys_by_output_name[output_name]: asset_out\n for output_name, asset_out in asset_out_map.items()\n }\n\n # handle properties defined ons AssetSpecs or AssetOuts\n group_names_by_key = {\n asset_key: props.group_name\n for asset_key, props in props_by_asset_key.items()\n if props.group_name is not None\n }\n if group_name:\n check.invariant(\n not group_names_by_key,\n "Cannot set group_name parameter on multi_asset if one or more of the"\n " AssetSpecs/AssetOuts supplied to this multi_asset have a group_name defined.",\n )\n group_names_by_key = {asset_key: group_name for asset_key in props_by_asset_key}\n\n freshness_policies_by_key = {\n asset_key: props.freshness_policy\n for asset_key, props in props_by_asset_key.items()\n if props.freshness_policy is not None\n }\n auto_materialize_policies_by_key = {\n asset_key: props.auto_materialize_policy\n for asset_key, props in props_by_asset_key.items()\n if props.auto_materialize_policy is not None\n }\n metadata_by_key = {\n asset_key: props.metadata\n for asset_key, props in props_by_asset_key.items()\n if props.metadata is not None\n }\n\n return AssetsDefinition.dagster_internal_init(\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name=keys_by_output_name,\n node_def=op,\n asset_deps=internal_deps,\n partitions_def=partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n can_subset=can_subset,\n resource_defs=resource_defs,\n group_names_by_key=group_names_by_key,\n freshness_policies_by_key=freshness_policies_by_key,\n auto_materialize_policies_by_key=auto_materialize_policies_by_key,\n backfill_policy=backfill_policy,\n selected_asset_keys=None, # no subselection in decorator\n # descriptions by key is more accurately understood as _overriding_ the descriptions\n # by key that are in the OutputDefinitions associated with the asset key.\n # This is a dangerous construction liable for bugs. Instead there should be a\n # canonical source of asset descriptions in AssetsDefinintion and if we need\n # to create a memoized cached dictionary of asset keys for perf or something we do\n # that in the `__init__` or on demand.\n #\n # This is actually an override. We do not override descriptions\n # in OutputDefinitions in @multi_asset\n descriptions_by_key=None,\n metadata_by_key=metadata_by_key,\n check_specs_by_output_name=check_specs_by_output_name,\n selected_asset_check_keys=None, # no subselection in decorator\n )\n\n return inner
\n\n\ndef get_function_params_without_context_or_config_or_resources(fn: Callable) -> List[Parameter]:\n params = get_function_params(fn)\n is_context_provided = len(params) > 0 and params[0].name in get_valid_name_permutations(\n "context"\n )\n input_params = params[1:] if is_context_provided else params\n\n resource_arg_names = {arg.name for arg in get_resource_args(fn)}\n\n new_input_args = []\n for input_arg in input_params:\n if input_arg.name != "config" and input_arg.name not in resource_arg_names:\n new_input_args.append(input_arg)\n\n return new_input_args\n\n\ndef stringify_asset_key_to_input_name(asset_key: AssetKey) -> str:\n return "_".join(asset_key.path).replace("-", "_")\n\n\ndef build_asset_ins(\n fn: Callable,\n asset_ins: Mapping[str, AssetIn],\n deps: Optional[AbstractSet[AssetKey]],\n) -> Mapping[AssetKey, Tuple[str, In]]:\n """Creates a mapping from AssetKey to (name of input, In object)."""\n deps = check.opt_set_param(deps, "deps", AssetKey)\n\n new_input_args = get_function_params_without_context_or_config_or_resources(fn)\n\n non_var_input_param_names = [\n param.name for param in new_input_args if param.kind == Parameter.POSITIONAL_OR_KEYWORD\n ]\n has_kwargs = any(param.kind == Parameter.VAR_KEYWORD for param in new_input_args)\n\n all_input_names = set(non_var_input_param_names) | asset_ins.keys()\n\n if not has_kwargs:\n for in_key, asset_in in asset_ins.items():\n if in_key not in non_var_input_param_names and (\n not isinstance(asset_in.dagster_type, DagsterType)\n or not asset_in.dagster_type.is_nothing\n ):\n raise DagsterInvalidDefinitionError(\n f"Key '{in_key}' in provided ins dict does not correspond to any of the names "\n "of the arguments to the decorated function"\n )\n\n ins_by_asset_key: Dict[AssetKey, Tuple[str, In]] = {}\n for input_name in all_input_names:\n asset_key = None\n\n if input_name in asset_ins:\n asset_key = asset_ins[input_name].key\n metadata = asset_ins[input_name].metadata or {}\n key_prefix = asset_ins[input_name].key_prefix\n input_manager_key = asset_ins[input_name].input_manager_key\n dagster_type = asset_ins[input_name].dagster_type\n else:\n metadata = {}\n key_prefix = None\n input_manager_key = None\n dagster_type = NoValueSentinel\n\n asset_key = asset_key or AssetKey(list(filter(None, [*(key_prefix or []), input_name])))\n\n ins_by_asset_key[asset_key] = (\n input_name.replace("-", "_"),\n In(metadata=metadata, input_manager_key=input_manager_key, dagster_type=dagster_type),\n )\n\n for asset_key in deps:\n if asset_key in ins_by_asset_key:\n raise DagsterInvalidDefinitionError(\n f"deps value {asset_key} also declared as input/AssetIn"\n )\n # mypy doesn't realize that Nothing is a valid type here\n ins_by_asset_key[asset_key] = (\n stringify_asset_key_to_input_name(asset_key),\n In(cast(type, Nothing)),\n )\n\n return ins_by_asset_key\n\n\n@overload\ndef graph_asset(\n compose_fn: Callable,\n) -> AssetsDefinition: ...\n\n\n@overload\ndef graph_asset(\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n group_name: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n metadata: Optional[MetadataUserInput] = ...,\n freshness_policy: Optional[FreshnessPolicy] = ...,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = ...,\n backfill_policy: Optional[BackfillPolicy] = ...,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = ...,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n key: Optional[CoercibleToAssetKey] = None,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]: ...\n\n\n
[docs]def graph_asset(\n compose_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n group_name: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n metadata: Optional[MetadataUserInput] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n key: Optional[CoercibleToAssetKey] = None,\n) -> Union[AssetsDefinition, Callable[[Callable[..., Any]], AssetsDefinition]]:\n """Creates a software-defined asset that's computed using a graph of ops.\n\n This decorator is meant to decorate a function that composes a set of ops or graphs to define\n the dependencies between them.\n\n Args:\n name (Optional[str]): The name of the asset. If not provided, defaults to the name of the\n decorated function. The asset's name must be a valid name in Dagster (ie only contains\n letters, numbers, and underscores) and may not contain Python reserved keywords.\n description (Optional[str]):\n A human-readable description of the asset.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n config (Optional[Union[ConfigMapping], Mapping[str, Any]):\n Describes how the graph underlying the asset is configured at runtime.\n\n If a :py:class:`ConfigMapping` object is provided, then the graph takes on the config\n schema of this object. The mapping will be applied at runtime to generate the config for\n the graph's constituent nodes.\n\n If a dictionary is provided, then it will be used as the default run config for the\n graph. This means it must conform to the config schema of the underlying nodes. Note\n that the values provided will be viewable and editable in the Dagster UI, so be careful\n with secrets. its constituent nodes.\n\n If no value is provided, then the config schema for the graph is the default (derived\n from the underlying nodes).\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name, which defaults to the name of\n the decorated function. Each item in key_prefix must be a valid name in Dagster (ie only\n contains letters, numbers, and underscores) and may not contain Python reserved keywords.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If\n not provided, the name "default" is used.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n metadata (Optional[MetadataUserInput]): Dictionary of metadata to be associated with\n the asset.\n freshness_policy (Optional[FreshnessPolicy]): A constraint telling Dagster how often this asset is\n intended to be updated with respect to its root data.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): The AutoMaterializePolicy to use\n for this asset.\n backfill_policy (Optional[BackfillPolicy]): The BackfillPolicy to use for this asset.\n key (Optional[CoeercibleToAssetKey]): The key for this asset. If provided, cannot specify key_prefix or name.\n\n Examples:\n .. code-block:: python\n\n @op\n def fetch_files_from_slack(context) -> pd.DataFrame:\n ...\n\n @op\n def store_files_in_table(files) -> None:\n files.to_sql(name="slack_files", con=create_db_connection())\n\n @graph_asset\n def slack_files_table():\n return store_files(fetch_files_from_slack())\n """\n if compose_fn is None:\n return lambda fn: graph_asset( # type: ignore # (decorator pattern)\n fn,\n name=name,\n description=description,\n ins=ins,\n config=config,\n key_prefix=key_prefix,\n group_name=group_name,\n partitions_def=partitions_def,\n metadata=metadata,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n resource_defs=resource_defs,\n check_specs=check_specs,\n key=key,\n )\n else:\n return graph_asset_no_defaults(\n compose_fn=compose_fn,\n name=name,\n description=description,\n ins=ins,\n config=config,\n key_prefix=key_prefix,\n group_name=group_name,\n partitions_def=partitions_def,\n metadata=metadata,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n resource_defs=resource_defs,\n check_specs=check_specs,\n key=key,\n )
\n\n\ndef graph_asset_no_defaults(\n *,\n compose_fn: Callable,\n name: Optional[str],\n description: Optional[str],\n ins: Optional[Mapping[str, AssetIn]],\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]],\n key_prefix: Optional[CoercibleToAssetKeyPrefix],\n group_name: Optional[str],\n partitions_def: Optional[PartitionsDefinition],\n metadata: Optional[MetadataUserInput],\n freshness_policy: Optional[FreshnessPolicy],\n auto_materialize_policy: Optional[AutoMaterializePolicy],\n backfill_policy: Optional[BackfillPolicy],\n resource_defs: Optional[Mapping[str, ResourceDefinition]],\n check_specs: Optional[Sequence[AssetCheckSpec]],\n key: Optional[CoercibleToAssetKey],\n) -> AssetsDefinition:\n ins = ins or {}\n asset_ins = build_asset_ins(compose_fn, ins or {}, set())\n out_asset_key, _asset_name = _resolve_key_and_name(\n key=key,\n key_prefix=key_prefix,\n name=name,\n decorator="@graph_asset",\n fn=compose_fn,\n )\n\n keys_by_input_name = {input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()}\n partition_mappings = {\n input_name: asset_in.partition_mapping\n for input_name, asset_in in ins.items()\n if asset_in.partition_mapping\n }\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, [out_asset_key]\n )\n check_outs_by_output_name: Mapping[str, GraphOut] = {\n output_name: GraphOut() for output_name in check_specs_by_output_name.keys()\n }\n\n combined_outs_by_output_name: Mapping = {\n "result": GraphOut(),\n **check_outs_by_output_name,\n }\n\n op_graph = graph(\n name=out_asset_key.to_python_identifier(),\n description=description,\n config=config,\n ins={input_name: GraphIn() for _, (input_name, _) in asset_ins.items()},\n out=combined_outs_by_output_name,\n )(compose_fn)\n return AssetsDefinition.from_graph(\n op_graph,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name={"result": out_asset_key},\n partitions_def=partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n group_name=group_name,\n metadata_by_output_name={"result": metadata} if metadata else None,\n freshness_policies_by_output_name=(\n {"result": freshness_policy} if freshness_policy else None\n ),\n auto_materialize_policies_by_output_name=(\n {"result": auto_materialize_policy} if auto_materialize_policy else None\n ),\n backfill_policy=backfill_policy,\n descriptions_by_output_name={"result": description} if description else None,\n resource_defs=resource_defs,\n check_specs=check_specs,\n )\n\n\n
[docs]def graph_multi_asset(\n *,\n outs: Mapping[str, AssetOut],\n name: Optional[str] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n group_name: Optional[str] = None,\n can_subset: bool = False,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n check_specs: Optional[Sequence[AssetCheckSpec]] = None,\n) -> Callable[[Callable[..., Any]], AssetsDefinition]:\n """Create a combined definition of multiple assets that are computed using the same graph of\n ops, and the same upstream assets.\n\n Each argument to the decorated function references an upstream asset that this asset depends on.\n The name of the argument designates the name of the upstream asset.\n\n Args:\n name (Optional[str]): The name of the graph.\n outs: (Optional[Dict[str, AssetOut]]): The AssetOuts representing the produced assets.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the assets.\n backfill_policy (Optional[BackfillPolicy]): The backfill policy for the asset.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. This\n group name will be applied to all assets produced by this multi_asset.\n can_subset (bool): Whether this asset's computation can emit a subset of the asset\n keys based on the context.selected_assets argument. Defaults to False.\n """\n\n def inner(fn: Callable) -> AssetsDefinition:\n partition_mappings = {\n input_name: asset_in.partition_mapping\n for input_name, asset_in in (ins or {}).items()\n if asset_in.partition_mapping\n }\n\n asset_ins = build_asset_ins(fn, ins or {}, set())\n keys_by_input_name = {\n input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n }\n asset_outs = build_asset_outs(outs)\n\n check_specs_by_output_name = _validate_and_assign_output_names_to_check_specs(\n check_specs, list(asset_outs.keys())\n )\n check_outs_by_output_name: Mapping[str, GraphOut] = {\n output_name: GraphOut() for output_name in check_specs_by_output_name.keys()\n }\n\n combined_outs_by_output_name = {\n **{output_name: GraphOut() for output_name, _ in asset_outs.values()},\n **check_outs_by_output_name,\n }\n\n op_graph = graph(\n name=name or fn.__name__,\n out=combined_outs_by_output_name,\n )(fn)\n\n # source metadata from the AssetOuts (if any)\n metadata_by_output_name = {\n output_name: out.metadata\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.metadata is not None\n }\n\n # source freshness policies from the AssetOuts (if any)\n freshness_policies_by_output_name = {\n output_name: out.freshness_policy\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.freshness_policy is not None\n }\n\n # source auto materialize policies from the AssetOuts (if any)\n auto_materialize_policies_by_output_name = {\n output_name: out.auto_materialize_policy\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.auto_materialize_policy is not None\n }\n\n # source descriptions from the AssetOuts (if any)\n descriptions_by_output_name = {\n output_name: out.description\n for output_name, out in outs.items()\n if isinstance(out, AssetOut) and out.description is not None\n }\n\n return AssetsDefinition.from_graph(\n op_graph,\n keys_by_input_name=keys_by_input_name,\n keys_by_output_name={\n output_name: asset_key for asset_key, (output_name, _) in asset_outs.items()\n },\n partitions_def=partitions_def,\n partition_mappings=partition_mappings if partition_mappings else None,\n group_name=group_name,\n can_subset=can_subset,\n metadata_by_output_name=metadata_by_output_name,\n freshness_policies_by_output_name=freshness_policies_by_output_name,\n auto_materialize_policies_by_output_name=auto_materialize_policies_by_output_name,\n backfill_policy=backfill_policy,\n descriptions_by_output_name=descriptions_by_output_name,\n resource_defs=resource_defs,\n check_specs=check_specs,\n )\n\n return inner
\n\n\ndef build_asset_outs(asset_outs: Mapping[str, AssetOut]) -> Mapping[AssetKey, Tuple[str, Out]]:\n """Creates a mapping from AssetKey to (name of output, Out object)."""\n outs_by_asset_key: Dict[AssetKey, Tuple[str, Out]] = {}\n for output_name, asset_out in asset_outs.items():\n out = asset_out.to_out()\n asset_key = asset_out.key or AssetKey(\n list(filter(None, [*(asset_out.key_prefix or []), output_name]))\n )\n\n outs_by_asset_key[asset_key] = (output_name.replace("-", "_"), out)\n\n return outs_by_asset_key\n\n\ndef _deps_and_non_argument_deps_to_asset_deps(\n deps: Optional[Iterable[CoercibleToAssetDep]],\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]],\n) -> Optional[Iterable[AssetDep]]:\n """Helper function for managing deps and non_argument_deps while non_argument_deps is still an accepted parameter.\n Ensures only one of deps and non_argument_deps is provided, then converts the deps to AssetDeps.\n """\n if non_argument_deps is not None and deps is not None:\n raise DagsterInvalidDefinitionError(\n "Cannot specify both deps and non_argument_deps to @asset. Use only deps instead."\n )\n\n if deps is not None:\n return _make_asset_deps(deps)\n\n if non_argument_deps is not None:\n check.set_param(non_argument_deps, "non_argument_deps", of_type=(AssetKey, str))\n return _make_asset_deps(non_argument_deps)\n\n\ndef _make_asset_deps(deps: Optional[Iterable[CoercibleToAssetDep]]) -> Optional[Iterable[AssetDep]]:\n if deps is None:\n return None\n\n # expand any multi_assets into a list of keys\n all_deps = []\n for dep in deps:\n if isinstance(dep, AssetsDefinition) and len(dep.keys) > 1:\n all_deps.extend(dep.keys)\n else:\n all_deps.append(dep)\n\n with disable_dagster_warnings():\n dep_dict = {}\n for dep in all_deps:\n asset_dep = AssetDep.from_coercible(dep)\n\n # we cannot do deduplication via a set because MultiPartitionMappings have an internal\n # dictionary that cannot be hashed. Instead deduplicate by making a dictionary and checking\n # for existing keys. If an asset is specified as a dependency more than once, only error if the\n # dependency is different (ie has a different PartitionMapping)\n if (\n asset_dep.asset_key in dep_dict.keys()\n and asset_dep != dep_dict[asset_dep.asset_key]\n ):\n raise DagsterInvariantViolationError(\n f"Cannot set a dependency on asset {asset_dep.asset_key} more than once per"\n " asset."\n )\n dep_dict[asset_dep.asset_key] = asset_dep\n\n return list(dep_dict.values())\n\n\ndef _validate_and_assign_output_names_to_check_specs(\n check_specs: Optional[Sequence[AssetCheckSpec]], valid_asset_keys: Sequence[AssetKey]\n) -> Mapping[str, AssetCheckSpec]:\n check_specs_by_output_name = {spec.get_python_identifier(): spec for spec in check_specs or []}\n if check_specs and len(check_specs_by_output_name) != len(check_specs):\n duplicates = {\n item: count\n for item, count in Counter(\n [(spec.asset_key, spec.name) for spec in check_specs]\n ).items()\n if count > 1\n }\n\n raise DagsterInvalidDefinitionError(f"Duplicate check specs: {duplicates}")\n\n for spec in check_specs_by_output_name.values():\n if spec.asset_key not in valid_asset_keys:\n raise DagsterInvalidDefinitionError(\n f"Invalid asset key {spec.asset_key} in check spec {spec.name}. Must be one of"\n f" {valid_asset_keys}"\n )\n\n return check_specs_by_output_name\n\n\ndef _get_partition_mappings_from_deps(\n partition_mappings: Dict[AssetKey, PartitionMapping], deps: Iterable[AssetDep], asset_name: str\n):\n # Add PartitionMappings specified via AssetDeps to partition_mappings dictionary. Error on duplicates\n for dep in deps:\n if dep.partition_mapping is None:\n continue\n if partition_mappings.get(dep.asset_key, None) is None:\n partition_mappings[dep.asset_key] = dep.partition_mapping\n continue\n if partition_mappings[dep.asset_key] == dep.partition_mapping:\n continue\n else:\n raise DagsterInvalidDefinitionError(\n f"Two different PartitionMappings for {dep.asset_key} provided for"\n f" asset {asset_name}. Please use the same PartitionMapping for"\n f" {dep.asset_key}."\n )\n\n return partition_mappings\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/asset_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.asset_decorator"}, "graph_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.graph_decorator

\nfrom functools import update_wrapper\nfrom typing import Any, Callable, Mapping, Optional, Sequence, Union, overload\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import format_docstring_for_description\n\nfrom ..config import ConfigMapping\nfrom ..graph_definition import GraphDefinition\nfrom ..input import GraphIn, InputDefinition\nfrom ..output import GraphOut, OutputDefinition\n\n\nclass _Graph:\n    name: Optional[str]\n    description: Optional[str]\n    input_defs: Sequence[InputDefinition]\n    output_defs: Optional[Sequence[OutputDefinition]]\n    ins: Optional[Mapping[str, GraphIn]]\n    out: Optional[Union[GraphOut, Mapping[str, GraphOut]]]\n    tags: Optional[Mapping[str, str]]\n    config_mapping: Optional[ConfigMapping]\n\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        input_defs: Optional[Sequence[InputDefinition]] = None,\n        output_defs: Optional[Sequence[OutputDefinition]] = None,\n        ins: Optional[Mapping[str, GraphIn]] = None,\n        out: Optional[Union[GraphOut, Mapping[str, GraphOut]]] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        config_mapping: Optional[ConfigMapping] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.description = check.opt_str_param(description, "description")\n        self.input_defs = check.opt_sequence_param(\n            input_defs, "input_defs", of_type=InputDefinition\n        )\n        self.did_pass_outputs = output_defs is not None or out is not None\n        self.output_defs = check.opt_nullable_sequence_param(\n            output_defs, "output_defs", of_type=OutputDefinition\n        )\n        self.ins = ins\n        self.out = out\n        self.tags = tags\n        self.config_mapping = check.opt_inst_param(config_mapping, "config_mapping", ConfigMapping)\n\n    def __call__(self, fn: Callable[..., Any]) -> GraphDefinition:\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        if self.ins is not None:\n            input_defs = [inp.to_definition(name) for name, inp in self.ins.items()]\n        else:\n            input_defs = check.opt_list_param(\n                self.input_defs, "input_defs", of_type=InputDefinition\n            )\n\n        if self.out is None:\n            output_defs = self.output_defs\n        elif isinstance(self.out, GraphOut):\n            output_defs = [self.out.to_definition(name=None)]\n        else:\n            check.dict_param(self.out, "out", key_type=str, value_type=GraphOut)\n            output_defs = [out.to_definition(name=name) for name, out in self.out.items()]\n\n        from dagster._core.definitions.composition import do_composition\n\n        (\n            input_mappings,\n            output_mappings,\n            dependencies,\n            node_defs,\n            config_mapping,\n            positional_inputs,\n            node_input_source_assets,\n        ) = do_composition(\n            decorator_name="@graph",\n            graph_name=self.name,\n            fn=fn,\n            provided_input_defs=input_defs,\n            provided_output_defs=output_defs,\n            ignore_output_from_composition_fn=False,\n            config_mapping=self.config_mapping,\n        )\n\n        graph_def = GraphDefinition(\n            name=self.name,\n            dependencies=dependencies,\n            node_defs=node_defs,\n            description=self.description or format_docstring_for_description(fn),\n            input_mappings=input_mappings,\n            output_mappings=output_mappings,\n            config=config_mapping,\n            positional_inputs=positional_inputs,\n            tags=self.tags,\n            node_input_source_assets=node_input_source_assets,\n        )\n        update_wrapper(graph_def, fn)\n        return graph_def\n\n\n@overload\ndef graph(compose_fn: Callable) -> GraphDefinition: ...\n\n\n@overload\ndef graph(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    input_defs: Optional[Sequence[InputDefinition]] = ...,\n    output_defs: Optional[Sequence[OutputDefinition]] = ...,\n    ins: Optional[Mapping[str, GraphIn]] = ...,\n    out: Optional[Union[GraphOut, Mapping[str, GraphOut]]] = ...,\n    tags: Optional[Mapping[str, Any]] = ...,\n    config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = ...,\n) -> _Graph: ...\n\n\n
[docs]def graph(\n compose_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n input_defs: Optional[Sequence[InputDefinition]] = None,\n output_defs: Optional[Sequence[OutputDefinition]] = None,\n ins: Optional[Mapping[str, GraphIn]] = None,\n out: Optional[Union[GraphOut, Mapping[str, GraphOut]]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,\n) -> Union[GraphDefinition, _Graph]:\n """Create an op graph with the specified parameters from the decorated composition function.\n\n Using this decorator allows you to build up a dependency graph by writing a\n function that invokes ops (or other graphs) and passes the output to subsequent invocations.\n\n Args:\n name (Optional[str]):\n The name of the op graph. Must be unique within any :py:class:`RepositoryDefinition` containing the graph.\n description (Optional[str]):\n A human-readable description of the graph.\n input_defs (Optional[List[InputDefinition]]):\n Information about the inputs that this graph maps. Information provided here\n will be combined with what can be inferred from the function signature, with these\n explicit InputDefinitions taking precedence.\n\n Uses of inputs in the body of the decorated composition function will determine\n the :py:class:`InputMappings <InputMapping>` passed to the underlying\n :py:class:`GraphDefinition`.\n output_defs (Optional[List[OutputDefinition]]):\n Output definitions for the graph. If not provided explicitly, these will be inferred from typehints.\n\n Uses of these outputs in the body of the decorated composition function, as well as the\n return value of the decorated function, will be used to infer the appropriate set of\n :py:class:`OutputMappings <OutputMapping>` for the underlying\n :py:class:`GraphDefinition`.\n\n To map multiple outputs, return a dictionary from the composition function.\n ins (Optional[Dict[str, GraphIn]]):\n Information about the inputs that this graph maps. Information provided here\n will be combined with what can be inferred from the function signature, with these\n explicit GraphIn taking precedence.\n out (Optional[Union[GraphOut, Dict[str, GraphOut]]]):\n Information about the outputs that this graph maps. Information provided here will be\n combined with what can be inferred from the return type signature if the function does\n not use yield.\n\n To map multiple outputs, return a dictionary from the composition function.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution run of the graph.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n\n config (Optional[Union[ConfigMapping], Mapping[str, Any]):\n Describes how the graph is configured at runtime.\n\n If a :py:class:`ConfigMapping` object is provided, then the graph takes on the config\n schema of this object. The mapping will be applied at runtime to generate the config for\n the graph's constituent nodes.\n\n If a dictionary is provided, then it will be used as the default run config for the\n graph. This means it must conform to the config schema of the underlying nodes. Note\n that the values provided will be viewable and editable in the Dagster UI, so be careful\n with secrets. its constituent nodes.\n\n If no value is provided, then the config schema for the graph is the default (derived\n from the underlying nodes).\n """\n if compose_fn is not None:\n check.invariant(description is None)\n return _Graph()(compose_fn)\n\n config_mapping = None\n # Case 1: a dictionary of config is provided, convert to config mapping.\n if config is not None and not isinstance(config, ConfigMapping):\n config = check.dict_param(config, "config", key_type=str)\n config_mapping = ConfigMapping(config_fn=lambda _: config, config_schema=None)\n # Case 2: actual config mapping is provided.\n else:\n config_mapping = config\n\n return _Graph(\n name=name,\n description=description,\n input_defs=input_defs,\n output_defs=output_defs,\n ins=ins,\n out=out,\n tags=tags,\n config_mapping=config_mapping,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/graph_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.graph_decorator"}, "hook_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.hook_decorator

\nfrom functools import update_wrapper\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n    overload,\n)\n\nimport dagster._check as check\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ...decorator_utils import get_function_params, validate_expected_params\nfrom ..events import HookExecutionResult\nfrom ..hook_definition import HookDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.hook import HookContext\n\n\ndef _validate_hook_fn_params(fn, expected_positionals):\n    params = get_function_params(fn)\n    missing_positional = validate_expected_params(params, expected_positionals)\n    if missing_positional:\n        raise DagsterInvalidDefinitionError(\n            f"'{fn.__name__}' decorated function does not have required positional "\n            f"parameter '{missing_positional}'. Hook functions should only have keyword arguments "\n            "that match input names and a first positional parameter named 'context' and "\n            "a second positional parameter named 'event_list'."\n        )\n\n\nclass _Hook:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        required_resource_keys: Optional[AbstractSet[str]] = None,\n        decorated_fn: Optional[Callable[..., Any]] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.required_resource_keys = check.opt_set_param(\n            required_resource_keys, "required_resource_keys"\n        )\n        self.decorated_fn = check.opt_callable_param(decorated_fn, "decorated_fn")\n\n    def __call__(self, fn) -> HookDefinition:\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        expected_positionals = ["context", "event_list"]\n\n        _validate_hook_fn_params(fn, expected_positionals)\n\n        hook_def = HookDefinition(\n            name=self.name or "",\n            hook_fn=fn,\n            required_resource_keys=self.required_resource_keys,\n            decorated_fn=self.decorated_fn or fn,\n        )\n        update_wrapper(cast(Callable[..., Any], hook_def), fn)\n        return hook_def\n\n\n@overload\ndef event_list_hook(\n    hook_fn: Callable,\n) -> HookDefinition:\n    pass\n\n\n@overload\ndef event_list_hook(\n    *,\n    name: Optional[str] = ...,\n    required_resource_keys: Optional[AbstractSet[str]] = ...,\n    decorated_fn: Optional[Callable[..., Any]] = ...,\n) -> _Hook:\n    pass\n\n\ndef event_list_hook(\n    hook_fn: Optional[Callable] = None,\n    *,\n    name: Optional[str] = None,\n    required_resource_keys: Optional[AbstractSet[str]] = None,\n    decorated_fn: Optional[Callable[..., Any]] = None,\n) -> Union[HookDefinition, _Hook]:\n    """Create a generic hook with the specified parameters from the decorated function.\n\n    This decorator is currently used internally by Dagster machinery to support success_hook and\n    failure_hook.\n\n    The user-defined hook function requires two parameters:\n    - A `context` object is passed as the first parameter. The context is an instance of\n        :py:class:`context <HookContext>`, and provides access to system\n        information, such as loggers (context.log), resources (context.resources), the op\n        (context.op) and its execution step (context.step) which triggers this hook.\n    - An `event_list` object is passed as the second paramter. It provides the full event list of the\n        associated execution step.\n\n    Args:\n        name (Optional[str]): The name of this hook.\n        required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n            hook.\n\n    Examples:\n        .. code-block:: python\n\n            @event_list_hook(required_resource_keys={'slack'})\n            def slack_on_materializations(context, event_list):\n                for event in event_list:\n                    if event.event_type == DagsterEventType.ASSET_MATERIALIZATION:\n                        message = f'{context.op_name} has materialized an asset {event.asset_key}.'\n                        # send a slack message every time a materialization event occurs\n                        context.resources.slack.send_message(message)\n\n\n    """\n    # This case is for when decorator is used bare, without arguments.\n    # e.g. @event_list_hook versus @event_list_hook()\n    if hook_fn is not None:\n        check.invariant(required_resource_keys is None)\n        return _Hook()(hook_fn)\n\n    return _Hook(\n        name=name, required_resource_keys=required_resource_keys, decorated_fn=decorated_fn\n    )\n\n\nSuccessOrFailureHookFn = Callable[["HookContext"], Any]\n\n\n@overload\ndef success_hook(hook_fn: SuccessOrFailureHookFn) -> HookDefinition: ...\n\n\n@overload\ndef success_hook(\n    *,\n    name: Optional[str] = ...,\n    required_resource_keys: Optional[AbstractSet[str]] = ...,\n) -> Callable[[SuccessOrFailureHookFn], HookDefinition]: ...\n\n\n
[docs]def success_hook(\n hook_fn: Optional[SuccessOrFailureHookFn] = None,\n *,\n name: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n) -> Union[HookDefinition, Callable[[SuccessOrFailureHookFn], HookDefinition]]:\n """Create a hook on step success events with the specified parameters from the decorated function.\n\n Args:\n name (Optional[str]): The name of this hook.\n required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n hook.\n\n Examples:\n .. code-block:: python\n\n @success_hook(required_resource_keys={'slack'})\n def slack_message_on_success(context):\n message = 'op {} succeeded'.format(context.op.name)\n context.resources.slack.send_message(message)\n\n @success_hook\n def do_something_on_success(context):\n do_something()\n\n\n """\n\n def wrapper(fn: SuccessOrFailureHookFn) -> HookDefinition:\n check.callable_param(fn, "fn")\n\n expected_positionals = ["context"]\n _validate_hook_fn_params(fn, expected_positionals)\n\n if name is None or callable(name):\n _name = fn.__name__\n else:\n _name = name\n\n @event_list_hook(name=_name, required_resource_keys=required_resource_keys, decorated_fn=fn)\n def _success_hook(\n context: "HookContext", event_list: Sequence["DagsterEvent"]\n ) -> HookExecutionResult:\n for event in event_list:\n if event.is_step_success:\n fn(context)\n return HookExecutionResult(hook_name=_name, is_skipped=False)\n\n # hook is skipped when fn didn't run\n return HookExecutionResult(hook_name=_name, is_skipped=True)\n\n return _success_hook\n\n # This case is for when decorator is used bare, without arguments, i.e. @success_hook\n if hook_fn is not None:\n check.invariant(required_resource_keys is None)\n return wrapper(hook_fn)\n\n return wrapper
\n\n\n@overload\ndef failure_hook(name: SuccessOrFailureHookFn) -> HookDefinition: ...\n\n\n@overload\ndef failure_hook(\n name: Optional[str] = ...,\n required_resource_keys: Optional[AbstractSet[str]] = ...,\n) -> Callable[[SuccessOrFailureHookFn], HookDefinition]: ...\n\n\n
[docs]def failure_hook(\n name: Optional[Union[SuccessOrFailureHookFn, str]] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n) -> Union[HookDefinition, Callable[[SuccessOrFailureHookFn], HookDefinition]]:\n """Create a hook on step failure events with the specified parameters from the decorated function.\n\n Args:\n name (Optional[str]): The name of this hook.\n required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n hook.\n\n Examples:\n .. code-block:: python\n\n @failure_hook(required_resource_keys={'slack'})\n def slack_message_on_failure(context):\n message = 'op {} failed'.format(context.op.name)\n context.resources.slack.send_message(message)\n\n @failure_hook\n def do_something_on_failure(context):\n do_something()\n\n\n """\n\n def wrapper(fn: Callable[["HookContext"], Any]) -> HookDefinition:\n check.callable_param(fn, "fn")\n\n expected_positionals = ["context"]\n _validate_hook_fn_params(fn, expected_positionals)\n\n if name is None or callable(name):\n _name = fn.__name__\n else:\n _name = name\n\n @event_list_hook(name=_name, required_resource_keys=required_resource_keys, decorated_fn=fn)\n def _failure_hook(\n context: "HookContext", event_list: Sequence["DagsterEvent"]\n ) -> HookExecutionResult:\n for event in event_list:\n if event.is_step_failure:\n fn(context)\n return HookExecutionResult(hook_name=_name, is_skipped=False)\n\n # hook is skipped when fn didn't run\n return HookExecutionResult(hook_name=_name, is_skipped=True)\n\n return _failure_hook\n\n # This case is for when decorator is used bare, without arguments, i.e. @failure_hook\n if callable(name):\n check.invariant(required_resource_keys is None)\n return wrapper(name)\n\n return wrapper
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/hook_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.hook_decorator"}, "job_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.job_decorator

\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Callable, Mapping, Optional, Union, overload\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.decorator_utils import format_docstring_for_description\n\nfrom ..config import ConfigMapping\nfrom ..graph_definition import GraphDefinition\nfrom ..hook_definition import HookDefinition\nfrom ..job_definition import JobDefinition\nfrom ..logger_definition import LoggerDefinition\nfrom ..metadata import RawMetadataValue\nfrom ..policy import RetryPolicy\nfrom ..resource_definition import ResourceDefinition\nfrom ..version_strategy import VersionStrategy\n\nif TYPE_CHECKING:\n    from ..executor_definition import ExecutorDefinition\n    from ..partition import PartitionedConfig, PartitionsDefinition\n    from ..run_config import RunConfig\n\n\nclass _Job:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n        resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n        config: Optional[\n            Union[ConfigMapping, Mapping[str, Any], "RunConfig", "PartitionedConfig"]\n        ] = None,\n        logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n        executor_def: Optional["ExecutorDefinition"] = None,\n        hooks: Optional[AbstractSet[HookDefinition]] = None,\n        op_retry_policy: Optional[RetryPolicy] = None,\n        version_strategy: Optional[VersionStrategy] = None,\n        partitions_def: Optional["PartitionsDefinition"] = None,\n        input_values: Optional[Mapping[str, object]] = None,\n    ):\n        from dagster._core.definitions.run_config import convert_config_input\n\n        self.name = name\n        self.description = description\n        self.tags = tags\n        self.metadata = metadata\n        self.resource_defs = resource_defs\n        self.config = convert_config_input(config)\n        self.logger_defs = logger_defs\n        self.executor_def = executor_def\n        self.hooks = hooks\n        self.op_retry_policy = op_retry_policy\n        self.version_strategy = version_strategy\n        self.partitions_def = partitions_def\n        self.input_values = input_values\n\n    def __call__(self, fn: Callable[..., Any]) -> JobDefinition:\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        from dagster._core.definitions.composition import do_composition\n\n        (\n            input_mappings,\n            output_mappings,\n            dependencies,\n            node_defs,\n            config_mapping,\n            positional_inputs,\n            node_input_source_assets,\n        ) = do_composition(\n            decorator_name="@job",\n            graph_name=self.name,\n            fn=fn,\n            provided_input_defs=[],\n            provided_output_defs=[],\n            ignore_output_from_composition_fn=False,\n            config_mapping=None,\n        )\n\n        graph_def = GraphDefinition(\n            name=self.name,\n            dependencies=dependencies,\n            node_defs=node_defs,\n            description=self.description or format_docstring_for_description(fn),\n            input_mappings=input_mappings,\n            output_mappings=output_mappings,\n            config=config_mapping,\n            positional_inputs=positional_inputs,\n            tags=self.tags,\n            node_input_source_assets=node_input_source_assets,\n        )\n\n        job_def = graph_def.to_job(\n            description=self.description or format_docstring_for_description(fn),\n            resource_defs=self.resource_defs,\n            config=self.config,\n            tags=self.tags,\n            metadata=self.metadata,\n            logger_defs=self.logger_defs,\n            executor_def=self.executor_def,\n            hooks=self.hooks,\n            op_retry_policy=self.op_retry_policy,\n            version_strategy=self.version_strategy,\n            partitions_def=self.partitions_def,\n            input_values=self.input_values,\n        )\n        update_wrapper(job_def, fn)\n        return job_def\n\n\n@overload\ndef job(compose_fn: Callable[..., Any]) -> JobDefinition: ...\n\n\n@overload\ndef job(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    resource_defs: Optional[Mapping[str, object]] = ...,\n    config: Union[ConfigMapping, Mapping[str, Any], "RunConfig", "PartitionedConfig"] = ...,\n    tags: Optional[Mapping[str, Any]] = ...,\n    metadata: Optional[Mapping[str, RawMetadataValue]] = ...,\n    logger_defs: Optional[Mapping[str, LoggerDefinition]] = ...,\n    executor_def: Optional["ExecutorDefinition"] = ...,\n    hooks: Optional[AbstractSet[HookDefinition]] = ...,\n    op_retry_policy: Optional[RetryPolicy] = ...,\n    version_strategy: Optional[VersionStrategy] = ...,\n    partitions_def: Optional["PartitionsDefinition"] = ...,\n    input_values: Optional[Mapping[str, object]] = ...,\n) -> _Job: ...\n\n\n
[docs]@deprecated_param(\n param="version_strategy",\n breaking_version="2.0",\n additional_warn_text="Use asset versioning instead.",\n)\ndef job(\n compose_fn: Optional[Callable[..., Any]] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n config: Optional[\n Union[ConfigMapping, Mapping[str, Any], "RunConfig", "PartitionedConfig"]\n ] = None,\n tags: Optional[Mapping[str, Any]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n executor_def: Optional["ExecutorDefinition"] = None,\n hooks: Optional[AbstractSet[HookDefinition]] = None,\n op_retry_policy: Optional[RetryPolicy] = None,\n version_strategy: Optional[VersionStrategy] = None,\n partitions_def: Optional["PartitionsDefinition"] = None,\n input_values: Optional[Mapping[str, object]] = None,\n) -> Union[JobDefinition, _Job]:\n """Creates a job with the specified parameters from the decorated graph/op invocation function.\n\n Using this decorator allows you to build an executable job by writing a function that invokes\n ops (or graphs).\n\n Args:\n compose_fn (Callable[..., Any]:\n The decorated function. The body should contain op or graph invocations. Unlike op\n functions, does not accept a context argument.\n name (Optional[str]):\n The name for the Job. Defaults to the name of the this graph.\n resource_defs (Optional[Mapping[str, object]]):\n Resources that are required by this graph for execution.\n If not defined, `io_manager` will default to filesystem.\n config:\n Describes how the job is parameterized at runtime.\n\n If no value is provided, then the schema for the job's run config is a standard\n format based on its ops and resources.\n\n If a dictionary is provided, then it must conform to the standard config schema, and\n it will be used as the job's run config for the job whenever the job is executed.\n The values provided will be viewable and editable in the Dagster UI, so be\n careful with secrets.\n\n If a :py:class:`RunConfig` object is provided, then it will be used directly as the run config\n for the job whenever the job is executed, similar to providing a dictionary.\n\n If a :py:class:`ConfigMapping` object is provided, then the schema for the job's run config is\n determined by the config mapping, and the ConfigMapping, which should return\n configuration in the standard format to configure the job.\n\n If a :py:class:`PartitionedConfig` object is provided, then it defines a discrete set of config\n values that can parameterize the job, as well as a function for mapping those\n values to the base config. The values provided will be viewable and editable in the\n Dagster UI, so be careful with secrets.\n tags (Optional[Dict[str, Any]]):\n Arbitrary information that will be attached to the execution of the Job.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary information that will be attached to the JobDefinition and be viewable in the Dagster UI.\n Keys must be strings, and values must be python primitive types or one of the provided\n MetadataValue types\n logger_defs (Optional[Dict[str, LoggerDefinition]]):\n A dictionary of string logger identifiers to their implementations.\n executor_def (Optional[ExecutorDefinition]):\n How this Job will be executed. Defaults to :py:class:`multiprocess_executor` .\n op_retry_policy (Optional[RetryPolicy]): The default retry policy for all ops in this job.\n Only used if retry policy is not defined on the op definition or op invocation.\n version_strategy (Optional[VersionStrategy]):\n Defines how each op (and optionally, resource) in the job can be versioned. If\n provided, memoization will be enabled for this job.\n partitions_def (Optional[PartitionsDefinition]): Defines a discrete set of partition keys\n that can parameterize the job. If this argument is supplied, the config argument\n can't also be supplied.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of a job.\n\n Examples:\n .. code-block:: python\n\n @op\n def return_one():\n return 1\n\n @op\n def add_one(in1):\n return in1 + 1\n\n @job\n def job1():\n add_one(return_one())\n """\n if compose_fn is not None:\n check.invariant(description is None)\n return _Job()(compose_fn)\n\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return _Job(\n name=name,\n description=description,\n resource_defs=wrap_resources_for_execution(resource_defs),\n config=config,\n tags=tags,\n metadata=metadata,\n logger_defs=logger_defs,\n executor_def=executor_def,\n hooks=hooks,\n op_retry_policy=op_retry_policy,\n version_strategy=version_strategy,\n partitions_def=partitions_def,\n input_values=input_values,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/job_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.job_decorator"}, "op_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.op_decorator

\nfrom functools import lru_cache, update_wrapper\nfrom inspect import Parameter\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n    overload,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.decorator_utils import (\n    format_docstring_for_description,\n    get_function_params,\n    get_valid_name_permutations,\n    param_is_var_keyword,\n    positional_arg_name_list,\n)\nfrom dagster._core.definitions.inference import infer_input_props\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.types.dagster_type import DagsterTypeKind\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom ..input import In, InputDefinition\nfrom ..output import Out\nfrom ..policy import RetryPolicy\nfrom ..utils import DEFAULT_OUTPUT\n\nif TYPE_CHECKING:\n    from ..op_definition import OpDefinition\n\n\nclass _Op:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        required_resource_keys: Optional[AbstractSet[str]] = None,\n        config_schema: Optional[Union[Any, Mapping[str, Any]]] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        code_version: Optional[str] = None,\n        decorator_takes_context: Optional[bool] = True,\n        retry_policy: Optional[RetryPolicy] = None,\n        ins: Optional[Mapping[str, In]] = None,\n        out: Optional[Union[Out, Mapping[str, Out]]] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.decorator_takes_context = check.bool_param(\n            decorator_takes_context, "decorator_takes_context"\n        )\n\n        self.description = check.opt_str_param(description, "description")\n\n        # these will be checked within OpDefinition\n        self.required_resource_keys = required_resource_keys\n        self.tags = tags\n        self.code_version = code_version\n        self.retry_policy = retry_policy\n\n        # config will be checked within OpDefinition\n        self.config_schema = config_schema\n\n        self.ins = check.opt_nullable_mapping_param(ins, "ins", key_type=str, value_type=In)\n        self.out = out\n\n    def __call__(self, fn: Callable[..., Any]) -> "OpDefinition":\n        from dagster._config.pythonic_config import validate_resource_annotated_function\n\n        from ..op_definition import OpDefinition\n\n        validate_resource_annotated_function(fn)\n\n        if not self.name:\n            self.name = fn.__name__\n\n        compute_fn = (\n            DecoratedOpFunction(decorated_fn=fn)\n            if self.decorator_takes_context\n            else NoContextDecoratedOpFunction(decorated_fn=fn)\n        )\n\n        if compute_fn.has_config_arg():\n            check.param_invariant(\n                self.config_schema is None or self.config_schema == {},\n                "If the @op has a config arg, you cannot specify a config schema",\n            )\n\n            from dagster._config.pythonic_config import infer_schema_from_config_annotation\n\n            # Parse schema from the type annotation of the config arg\n            config_arg = compute_fn.get_config_arg()\n            config_arg_type = config_arg.annotation\n            config_arg_default = config_arg.default\n            self.config_schema = infer_schema_from_config_annotation(\n                config_arg_type, config_arg_default\n            )\n\n        outs: Optional[Mapping[str, Out]] = None\n        if self.out is not None and isinstance(self.out, Out):\n            outs = {DEFAULT_OUTPUT: self.out}\n        elif self.out is not None:\n            outs = check.mapping_param(self.out, "out", key_type=str, value_type=Out)\n\n        arg_resource_keys = {arg.name for arg in compute_fn.get_resource_args()}\n        decorator_resource_keys = set(self.required_resource_keys or [])\n        check.param_invariant(\n            len(decorator_resource_keys) == 0 or len(arg_resource_keys) == 0,\n            "Cannot specify resource requirements in both @op decorator and as arguments to the"\n            " decorated function",\n        )\n        resolved_resource_keys = decorator_resource_keys.union(arg_resource_keys)\n\n        op_def = OpDefinition.dagster_internal_init(\n            name=self.name,\n            ins=self.ins,\n            outs=outs,\n            compute_fn=compute_fn,\n            config_schema=self.config_schema,\n            description=self.description or format_docstring_for_description(fn),\n            required_resource_keys=resolved_resource_keys,\n            tags=self.tags,\n            code_version=self.code_version,\n            retry_policy=self.retry_policy,\n            version=None,  # code_version has replaced version\n        )\n        update_wrapper(op_def, compute_fn.decorated_fn)\n        return op_def\n\n\n@overload\ndef op(compute_fn: Callable[..., Any]) -> "OpDefinition": ...\n\n\n@overload\ndef op(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    ins: Optional[Mapping[str, In]] = ...,\n    out: Optional[Union[Out, Mapping[str, Out]]] = ...,\n    config_schema: Optional[UserConfigSchema] = ...,\n    required_resource_keys: Optional[AbstractSet[str]] = ...,\n    tags: Optional[Mapping[str, Any]] = ...,\n    version: Optional[str] = ...,\n    retry_policy: Optional[RetryPolicy] = ...,\n    code_version: Optional[str] = ...,\n) -> _Op: ...\n\n\n
[docs]@deprecated_param(\n param="version", breaking_version="2.0", additional_warn_text="Use `code_version` instead"\n)\ndef op(\n compute_fn: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n ins: Optional[Mapping[str, In]] = None,\n out: Optional[Union[Out, Mapping[str, Out]]] = None,\n config_schema: Optional[UserConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n version: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n) -> Union["OpDefinition", _Op]:\n """Create an op with the specified parameters from the decorated function.\n\n Ins and outs will be inferred from the type signature of the decorated function\n if not explicitly provided.\n\n The decorated function will be used as the op's compute function. The signature of the\n decorated function is more flexible than that of the ``compute_fn`` in the core API; it may:\n\n 1. Return a value. This value will be wrapped in an :py:class:`Output` and yielded by the compute function.\n 2. Return an :py:class:`Output`. This output will be yielded by the compute function.\n 3. Yield :py:class:`Output` or other :ref:`event objects <events>`. Same as default compute behavior.\n\n Note that options 1) and 2) are incompatible with yielding other events -- if you would like\n to decorate a function that yields events, it must also wrap its eventual output in an\n :py:class:`Output` and yield it.\n\n @op supports ``async def`` functions as well, including async generators when yielding multiple\n events or outputs. Note that async ops will generally be run on their own unless using a custom\n :py:class:`Executor` implementation that supports running them together.\n\n Args:\n name (Optional[str]): Name of op. Must be unique within any :py:class:`GraphDefinition`\n using the op.\n description (Optional[str]): Human-readable description of this op. If not provided, and\n the decorated function has docstring, that docstring will be used as the description.\n ins (Optional[Dict[str, In]]):\n Information about the inputs to the op. Information provided here will be combined\n with what can be inferred from the function signature.\n out (Optional[Union[Out, Dict[str, Out]]]):\n Information about the op outputs. Information provided here will be combined with\n what can be inferred from the return type signature if the function does not use yield.\n config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check\n that config provided for the op matches this schema and fail if it does not. If not\n set, Dagster will accept any config provided for the op.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may\n expect and require certain metadata to be attached to a op. Values that are not strings\n will be json encoded and must meet the criteria that `json.loads(json.dumps(value)) == value`.\n code_version (Optional[str]): (Experimental) Version of the logic encapsulated by the op. If set,\n this is used as a default version for all outputs.\n retry_policy (Optional[RetryPolicy]): The retry policy for this op.\n\n Examples:\n .. code-block:: python\n\n @op\n def hello_world():\n print('hello')\n\n @op\n def echo(msg: str) -> str:\n return msg\n\n @op(\n ins={'msg': In(str)},\n out=Out(str)\n )\n def echo_2(msg): # same as above\n return msg\n\n @op(\n out={'word': Out(), 'num': Out()}\n )\n def multi_out() -> Tuple[str, int]:\n return 'cool', 4\n """\n code_version = normalize_renamed_param(\n code_version,\n "code_version",\n version,\n "version",\n )\n\n if compute_fn is not None:\n check.invariant(description is None)\n check.invariant(config_schema is None)\n check.invariant(required_resource_keys is None)\n check.invariant(tags is None)\n check.invariant(version is None)\n\n return _Op()(compute_fn)\n\n return _Op(\n name=name,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n tags=tags,\n code_version=code_version,\n retry_policy=retry_policy,\n ins=ins,\n out=out,\n )
\n\n\nclass DecoratedOpFunction(NamedTuple):\n """Wrapper around the decorated op function to provide commonly used util methods."""\n\n decorated_fn: Callable[..., Any]\n\n @property\n def name(self):\n return self.decorated_fn.__name__\n\n @lru_cache(maxsize=1)\n def has_context_arg(self) -> bool:\n return is_context_provided(get_function_params(self.decorated_fn))\n\n def get_context_arg(self) -> Parameter:\n if self.has_context_arg():\n return get_function_params(self.decorated_fn)[0]\n check.failed("Requested context arg on function that does not have one")\n\n @lru_cache(maxsize=1)\n def _get_function_params(self) -> Sequence[Parameter]:\n return get_function_params(self.decorated_fn)\n\n def has_config_arg(self) -> bool:\n for param in get_function_params(self.decorated_fn):\n if param.name == "config":\n return True\n\n return False\n\n def get_config_arg(self) -> Parameter:\n for param in get_function_params(self.decorated_fn):\n if param.name == "config":\n return param\n\n check.failed("Requested config arg on function that does not have one")\n\n def get_resource_args(self) -> Sequence[Parameter]:\n return get_resource_args(self.decorated_fn)\n\n def positional_inputs(self) -> Sequence[str]:\n params = self._get_function_params()\n input_args = params[1:] if self.has_context_arg() else params\n resource_arg_names = [arg.name for arg in self.get_resource_args()]\n input_args_filtered = [\n input_arg\n for input_arg in input_args\n if input_arg.name != "config" and input_arg.name not in resource_arg_names\n ]\n return positional_arg_name_list(input_args_filtered)\n\n def has_var_kwargs(self) -> bool:\n params = self._get_function_params()\n # var keyword arg has to be the last argument\n return len(params) > 0 and param_is_var_keyword(params[-1])\n\n def get_output_annotation(self) -> Any:\n from ..inference import infer_output_props\n\n return infer_output_props(self.decorated_fn).annotation\n\n\nclass NoContextDecoratedOpFunction(DecoratedOpFunction):\n """Wrapper around a decorated op function, when the decorator does not permit a context\n parameter.\n """\n\n @lru_cache(maxsize=1)\n def has_context_arg(self) -> bool:\n return False\n\n\ndef is_context_provided(params: Sequence[Parameter]) -> bool:\n if len(params) == 0:\n return False\n return params[0].name in get_valid_name_permutations("context")\n\n\ndef resolve_checked_op_fn_inputs(\n decorator_name: str,\n fn_name: str,\n compute_fn: DecoratedOpFunction,\n explicit_input_defs: Sequence[InputDefinition],\n exclude_nothing: bool,\n) -> Sequence[InputDefinition]:\n """Validate provided input definitions and infer the remaining from the type signature of the compute_fn.\n Returns the resolved set of InputDefinitions.\n\n Args:\n decorator_name (str): Name of the decorator that is wrapping the op function.\n fn_name (str): Name of the decorated function.\n compute_fn (DecoratedOpFunction): The decorated function, wrapped in the\n DecoratedOpFunction wrapper.\n explicit_input_defs (List[InputDefinition]): The input definitions that were explicitly\n provided in the decorator.\n exclude_nothing (bool): True if Nothing type inputs should be excluded from compute_fn\n arguments.\n """\n explicit_names = set()\n if exclude_nothing:\n explicit_names = set(\n inp.name\n for inp in explicit_input_defs\n if not inp.dagster_type.kind == DagsterTypeKind.NOTHING\n )\n nothing_names = set(\n inp.name\n for inp in explicit_input_defs\n if inp.dagster_type.kind == DagsterTypeKind.NOTHING\n )\n else:\n explicit_names = set(inp.name for inp in explicit_input_defs)\n nothing_names = set()\n\n params = get_function_params(compute_fn.decorated_fn)\n\n input_args = params[1:] if compute_fn.has_context_arg() else params\n\n # filter out config arg\n resource_arg_names = {arg.name for arg in compute_fn.get_resource_args()}\n explicit_names = explicit_names - resource_arg_names\n\n if compute_fn.has_config_arg() or resource_arg_names:\n new_input_args = []\n for input_arg in input_args:\n if input_arg.name != "config" and input_arg.name not in resource_arg_names:\n new_input_args.append(input_arg)\n input_args = new_input_args\n\n # Validate input arguments\n used_inputs = set()\n inputs_to_infer = set()\n has_kwargs = False\n\n for param in cast(List[Parameter], input_args):\n if param.kind == Parameter.VAR_KEYWORD:\n has_kwargs = True\n elif param.kind == Parameter.VAR_POSITIONAL:\n raise DagsterInvalidDefinitionError(\n f"{decorator_name} '{fn_name}' decorated function has positional vararg parameter "\n f"'{param}'. {decorator_name} decorated functions should only have keyword "\n "arguments that match input names and, if system information is required, a first "\n "positional parameter named 'context'."\n )\n\n else:\n if param.name not in explicit_names:\n if param.name in nothing_names:\n raise DagsterInvalidDefinitionError(\n f"{decorator_name} '{fn_name}' decorated function has parameter"\n f" '{param.name}' that is one of the input_defs of type 'Nothing' which"\n " should not be included since no data will be passed for it. "\n )\n else:\n inputs_to_infer.add(param.name)\n\n else:\n used_inputs.add(param.name)\n\n undeclared_inputs = explicit_names - used_inputs\n if not has_kwargs and undeclared_inputs:\n undeclared_inputs_printed = ", '".join(undeclared_inputs)\n raise DagsterInvalidDefinitionError(\n f"{decorator_name} '{fn_name}' decorated function does not have argument(s)"\n f" '{undeclared_inputs_printed}'. {decorator_name}-decorated functions should have a"\n " keyword argument for each of their Ins, except for Ins that have the Nothing"\n " dagster_type. Alternatively, they can accept **kwargs."\n )\n\n inferred_props = {\n inferred.name: inferred\n for inferred in infer_input_props(compute_fn.decorated_fn, compute_fn.has_context_arg())\n }\n input_defs = []\n for input_def in explicit_input_defs:\n if input_def.name in inferred_props:\n # combine any information missing on the explicit def that can be inferred\n input_defs.append(input_def.combine_with_inferred(inferred_props[input_def.name]))\n else:\n # pass through those that don't have any inference info, such as Nothing type inputs\n input_defs.append(input_def)\n\n # build defs from the inferred props for those without explicit entries\n inferred_input_defs = [\n InputDefinition.create_from_inferred(inferred)\n for inferred in inferred_props.values()\n if inferred.name in inputs_to_infer\n ]\n\n if exclude_nothing:\n for in_def in inferred_input_defs:\n if in_def.dagster_type.is_nothing:\n raise DagsterInvalidDefinitionError(\n f"Input parameter {in_def.name} is annotated with"\n f" {in_def.dagster_type.display_name} which is a type that represents passing"\n " no data. This type must be used via In() and no parameter should be included"\n f" in the {decorator_name} decorated function."\n )\n\n input_defs.extend(inferred_input_defs)\n\n return input_defs\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/op_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.op_decorator"}, "repository_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.repository_decorator

\nfrom functools import update_wrapper\nfrom typing import (\n    Callable,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    TypeVar,\n    Union,\n    overload,\n)\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.metadata import (\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ..asset_checks import AssetChecksDefinition\nfrom ..executor_definition import ExecutorDefinition\nfrom ..graph_definition import GraphDefinition\nfrom ..job_definition import JobDefinition\nfrom ..logger_definition import LoggerDefinition\nfrom ..partitioned_schedule import UnresolvedPartitionedAssetScheduleDefinition\nfrom ..repository_definition import (\n    VALID_REPOSITORY_DATA_DICT_KEYS,\n    CachingRepositoryData,\n    PendingRepositoryDefinition,\n    PendingRepositoryListDefinition,\n    RepositoryData,\n    RepositoryDefinition,\n    RepositoryListDefinition,\n)\nfrom ..schedule_definition import ScheduleDefinition\nfrom ..sensor_definition import SensorDefinition\nfrom ..unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\nT = TypeVar("T")\n\nRepositoryDictSpec: TypeAlias = Dict[str, Dict[str, RepositoryListDefinition]]\n\n\ndef _flatten(items: Iterable[Union[T, List[T]]]) -> Iterator[T]:\n    for x in items:\n        if isinstance(x, List):\n            # switch to `yield from _flatten(x)` to support multiple layers of nesting\n            yield from x\n        else:\n            yield x\n\n\nclass _Repository:\n    def __init__(\n        self,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        metadata: Optional[Dict[str, RawMetadataValue]] = None,\n        default_executor_def: Optional[ExecutorDefinition] = None,\n        default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n        top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n        resource_key_mapping: Optional[Mapping[int, str]] = None,\n    ):\n        self.name = check.opt_str_param(name, "name")\n        self.description = check.opt_str_param(description, "description")\n        self.metadata = normalize_metadata(\n            check.opt_mapping_param(metadata, "metadata", key_type=str)\n        )\n        self.default_executor_def = check.opt_inst_param(\n            default_executor_def, "default_executor_def", ExecutorDefinition\n        )\n        self.default_logger_defs = check.opt_mapping_param(\n            default_logger_defs, "default_logger_defs", key_type=str, value_type=LoggerDefinition\n        )\n        self.top_level_resources = check.opt_mapping_param(\n            top_level_resources, "top_level_resources", key_type=str, value_type=ResourceDefinition\n        )\n        self.resource_key_mapping = check.opt_mapping_param(\n            resource_key_mapping, "resource_key_mapping", key_type=int, value_type=str\n        )\n\n    @overload\n    def __call__(\n        self,\n        fn: Union[\n            Callable[[], Sequence[RepositoryListDefinition]],\n            Callable[[], RepositoryDictSpec],\n        ],\n    ) -> RepositoryDefinition: ...\n\n    @overload\n    def __call__(\n        self, fn: Callable[[], Sequence[PendingRepositoryListDefinition]]\n    ) -> PendingRepositoryDefinition: ...\n\n    def __call__(\n        self,\n        fn: Union[\n            Callable[[], Sequence[PendingRepositoryListDefinition]],\n            Callable[[], RepositoryDictSpec],\n        ],\n    ) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:\n        from dagster._core.definitions import AssetsDefinition, SourceAsset\n        from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n\n        check.callable_param(fn, "fn")\n\n        if not self.name:\n            self.name = fn.__name__\n\n        repository_definitions = fn()\n\n        repository_data: Optional[Union[CachingRepositoryData, RepositoryData]]\n        if isinstance(repository_definitions, list):\n            bad_defns = []\n            repository_defns = []\n            defer_repository_data = False\n            for i, definition in enumerate(_flatten(repository_definitions)):\n                if isinstance(definition, CacheableAssetsDefinition):\n                    defer_repository_data = True\n                elif not isinstance(\n                    definition,\n                    (\n                        JobDefinition,\n                        ScheduleDefinition,\n                        UnresolvedPartitionedAssetScheduleDefinition,\n                        SensorDefinition,\n                        GraphDefinition,\n                        AssetsDefinition,\n                        SourceAsset,\n                        UnresolvedAssetJobDefinition,\n                        AssetChecksDefinition,\n                    ),\n                ):\n                    bad_defns.append((i, type(definition)))\n                else:\n                    repository_defns.append(definition)\n\n            if bad_defns:\n                bad_definitions_str = ", ".join(\n                    [f"value of type {type_} at index {i}" for i, type_ in bad_defns]\n                )\n                raise DagsterInvalidDefinitionError(\n                    "Bad return value from repository construction function: all elements of list "\n                    "must be of type JobDefinition, GraphDefinition, "\n                    "ScheduleDefinition, SensorDefinition, "\n                    "AssetsDefinition, SourceAsset, or AssetChecksDefinition."\n                    f"Got {bad_definitions_str}."\n                )\n\n            repository_data = (\n                None\n                if defer_repository_data\n                else CachingRepositoryData.from_list(\n                    repository_defns,\n                    default_executor_def=self.default_executor_def,\n                    default_logger_defs=self.default_logger_defs,\n                    top_level_resources=self.top_level_resources,\n                    resource_key_mapping=self.resource_key_mapping,\n                )\n            )\n\n        elif isinstance(repository_definitions, dict):\n            if not set(repository_definitions.keys()).issubset(VALID_REPOSITORY_DATA_DICT_KEYS):\n                raise DagsterInvalidDefinitionError(\n                    "Bad return value from repository construction function: dict must not contain "\n                    "keys other than {{'schedules', 'sensors', 'jobs'}}: found "\n                    "{bad_keys}".format(\n                        bad_keys=", ".join(\n                            [\n                                f"'{key}'"\n                                for key in repository_definitions.keys()\n                                if key not in VALID_REPOSITORY_DATA_DICT_KEYS\n                            ]\n                        )\n                    )\n                )\n            repository_data = CachingRepositoryData.from_dict(repository_definitions)\n        elif isinstance(repository_definitions, RepositoryData):\n            repository_data = repository_definitions\n        else:\n            raise DagsterInvalidDefinitionError(\n                "Bad return value of type {type_} from repository construction function: must "\n                "return list, dict, or RepositoryData. See the @repository decorator docstring for "\n                "details and examples".format(type_=type(repository_definitions)),\n            )\n\n        if isinstance(repository_definitions, list) and repository_data is None:\n            return PendingRepositoryDefinition(\n                self.name,\n                repository_definitions=list(_flatten(repository_definitions)),\n                description=self.description,\n                metadata=self.metadata,\n                default_executor_def=self.default_executor_def,\n                default_logger_defs=self.default_logger_defs,\n                _top_level_resources=self.top_level_resources,\n            )\n        else:\n            repository_def = RepositoryDefinition(\n                name=self.name,\n                description=self.description,\n                metadata=self.metadata,\n                repository_data=repository_data,\n            )\n\n            update_wrapper(repository_def, fn)\n            return repository_def\n\n\n@overload\ndef repository(\n    definitions_fn: Union[\n        Callable[[], Sequence[RepositoryListDefinition]], Callable[[], RepositoryDictSpec]\n    ],\n) -> RepositoryDefinition: ...\n\n\n@overload\ndef repository(\n    definitions_fn: Callable[..., Sequence[PendingRepositoryListDefinition]]\n) -> PendingRepositoryDefinition: ...\n\n\n@overload\ndef repository(\n    *,\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    metadata: Optional[Dict[str, RawMetadataValue]] = ...,\n    default_executor_def: Optional[ExecutorDefinition] = ...,\n    default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = ...,\n    _top_level_resources: Optional[Mapping[str, ResourceDefinition]] = ...,\n    _resource_key_mapping: Optional[Mapping[int, str]] = ...,\n) -> _Repository: ...\n\n\n
[docs]def repository(\n definitions_fn: Optional[\n Union[\n Callable[[], Sequence[PendingRepositoryListDefinition]],\n Callable[[], RepositoryDictSpec],\n ]\n ] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n metadata: Optional[Dict[str, RawMetadataValue]] = None,\n default_executor_def: Optional[ExecutorDefinition] = None,\n default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n _top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n _resource_key_mapping: Optional[Mapping[int, str]] = None,\n) -> Union[RepositoryDefinition, PendingRepositoryDefinition, _Repository]:\n """Create a repository from the decorated function.\n\n The decorated function should take no arguments and its return value should one of:\n\n 1. ``List[Union[JobDefinition, ScheduleDefinition, SensorDefinition]]``.\n Use this form when you have no need to lazy load jobs or other definitions. This is the\n typical use case.\n\n 2. A dict of the form:\n\n .. code-block:: python\n\n {\n 'jobs': Dict[str, Callable[[], JobDefinition]],\n 'schedules': Dict[str, Callable[[], ScheduleDefinition]]\n 'sensors': Dict[str, Callable[[], SensorDefinition]]\n }\n\n This form is intended to allow definitions to be created lazily when accessed by name,\n which can be helpful for performance when there are many definitions in a repository, or\n when constructing the definitions is costly.\n\n 3. A :py:class:`RepositoryData`. Return this object if you need fine-grained\n control over the construction and indexing of definitions within the repository, e.g., to\n create definitions dynamically from .yaml files in a directory.\n\n Args:\n name (Optional[str]): The name of the repository. Defaults to the name of the decorated\n function.\n description (Optional[str]): A string description of the repository.\n metadata (Optional[Dict[str, RawMetadataValue]]): Arbitrary metadata for the repository.\n top_level_resources (Optional[Mapping[str, ResourceDefinition]]): A dict of top-level\n resource keys to defintions, for resources which should be displayed in the UI.\n\n Example:\n .. code-block:: python\n\n ######################################################################\n # A simple repository using the first form of the decorated function\n ######################################################################\n\n @op(config_schema={n: Field(Int)})\n def return_n(context):\n return context.op_config['n']\n\n @job\n def simple_job():\n return_n()\n\n @job\n def some_job():\n ...\n\n @sensor(job=some_job)\n def some_sensor():\n if foo():\n yield RunRequest(\n run_key= ...,\n run_config={\n 'ops': {'return_n': {'config': {'n': bar()}}}\n }\n )\n\n @job\n def my_job():\n ...\n\n my_schedule = ScheduleDefinition(cron_schedule="0 0 * * *", job=my_job)\n\n @repository\n def simple_repository():\n return [simple_job, some_sensor, my_schedule]\n\n ######################################################################\n # A simple repository using the first form of the decorated function\n # and custom metadata that will be displayed in the UI\n ######################################################################\n\n ...\n\n @repository(\n name='my_repo',\n metadata={\n 'team': 'Team A',\n 'repository_version': '1.2.3',\n 'environment': 'production',\n })\n def simple_repository():\n return [simple_job, some_sensor, my_schedule]\n\n ######################################################################\n # A lazy-loaded repository\n ######################################################################\n\n def make_expensive_job():\n @job\n def expensive_job():\n for i in range(10000):\n return_n.alias(f'return_n_{i}')()\n\n return expensive_job\n\n def make_expensive_schedule():\n @job\n def other_expensive_job():\n for i in range(11000):\n return_n.alias(f'my_return_n_{i}')()\n\n return ScheduleDefinition(cron_schedule="0 0 * * *", job=other_expensive_job)\n\n @repository\n def lazy_loaded_repository():\n return {\n 'jobs': {'expensive_job': make_expensive_job},\n 'schedules': {'expensive_schedule': make_expensive_schedule}\n }\n\n\n ######################################################################\n # A complex repository that lazily constructs jobs from a directory\n # of files in a bespoke YAML format\n ######################################################################\n\n class ComplexRepositoryData(RepositoryData):\n def __init__(self, yaml_directory):\n self._yaml_directory = yaml_directory\n\n def get_all_jobs(self):\n return [\n self._construct_job_def_from_yaml_file(\n self._yaml_file_for_job_name(file_name)\n )\n for file_name in os.listdir(self._yaml_directory)\n ]\n\n ...\n\n @repository\n def complex_repository():\n return ComplexRepositoryData('some_directory')\n """\n if definitions_fn is not None:\n check.invariant(description is None)\n check.invariant(len(get_function_params(definitions_fn)) == 0)\n\n return _Repository()(definitions_fn)\n\n return _Repository(\n name=name,\n description=description,\n metadata=metadata,\n default_executor_def=default_executor_def,\n default_logger_defs=default_logger_defs,\n top_level_resources=_top_level_resources,\n resource_key_mapping=_resource_key_mapping,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/repository_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.repository_decorator"}, "schedule_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.schedule_decorator

\nimport copy\nfrom functools import update_wrapper\nfrom typing import (\n    Callable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.definitions.sensor_definition import get_context_param_name\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    ScheduleExecutionError,\n    user_code_error_boundary,\n)\nfrom dagster._utils import ensure_gen\n\nfrom ..run_request import RunRequest, SkipReason\nfrom ..schedule_definition import (\n    DecoratedScheduleFunction,\n    DefaultScheduleStatus,\n    RawScheduleEvaluationFunction,\n    RunRequestIterator,\n    ScheduleDefinition,\n    ScheduleEvaluationContext,\n    has_at_least_one_parameter,\n    validate_and_get_schedule_resource_dict,\n)\nfrom ..target import ExecutableDefinition\nfrom ..utils import validate_tags\n\n\n
[docs]def schedule(\n cron_schedule: Union[str, Sequence[str]],\n *,\n job_name: Optional[str] = None,\n name: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n tags_fn: Optional[Callable[[ScheduleEvaluationContext], Optional[Mapping[str, str]]]] = None,\n should_execute: Optional[Callable[[ScheduleEvaluationContext], bool]] = None,\n environment_vars: Optional[Mapping[str, str]] = None,\n execution_timezone: Optional[str] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[RawScheduleEvaluationFunction], ScheduleDefinition]:\n """Creates a schedule following the provided cron schedule and requests runs for the provided job.\n\n The decorated function takes in a :py:class:`~dagster.ScheduleEvaluationContext` as its only\n argument, and does one of the following:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Return a run config dictionary.\n 6. Yield a `SkipReason` or yield one ore more `RunRequest` objects.\n\n Returns a :py:class:`~dagster.ScheduleDefinition`.\n\n Args:\n cron_schedule (Union[str, Sequence[str]]): A valid cron string or sequence of cron strings\n specifying when the schedule will run, e.g., ``'45 23 * * 6'`` for a schedule that runs\n at 11:45 PM every Saturday. If a sequence is provided, then the schedule will run for\n the union of all execution times for the provided cron strings, e.g.,\n ``['45 23 * * 6', '30 9 * * 0]`` for a schedule that runs at 11:45 PM every Saturday and\n 9:30 AM every Sunday.\n name (Optional[str]): The name of the schedule to create.\n tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the scheduled runs.\n tags_fn (Optional[Callable[[ScheduleEvaluationContext], Optional[Dict[str, str]]]]): A function\n that generates tags to attach to the schedules runs. Takes a\n :py:class:`~dagster.ScheduleEvaluationContext` and returns a dictionary of tags (string\n key-value pairs). You may set only one of ``tags`` and ``tags_fn``.\n should_execute (Optional[Callable[[ScheduleEvaluationContext], bool]]): A function that runs at\n schedule execution time to determine whether a schedule should execute or skip. Takes a\n :py:class:`~dagster.ScheduleEvaluationContext` and returns a boolean (``True`` if the\n schedule should execute). Defaults to a function that always returns ``True``.\n execution_timezone (Optional[str]): Timezone in which the schedule should run.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n description (Optional[str]): A human-readable description of the schedule.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job\n that should execute when this schedule runs.\n default_status (DefaultScheduleStatus): Whether the schedule starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n required_resource_keys (Optional[Set[str]]): The set of resource keys required by the schedule.\n """\n\n def inner(fn: RawScheduleEvaluationFunction) -> ScheduleDefinition:\n from dagster._config.pythonic_config import validate_resource_annotated_function\n\n check.callable_param(fn, "fn")\n validate_resource_annotated_function(fn)\n\n schedule_name = name or fn.__name__\n\n validated_tags = None\n\n # perform upfront validation of schedule tags\n if tags_fn and tags:\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both tags_fn and tags as arguments"\n " to ScheduleDefinition. Must provide only one of the two."\n )\n elif tags:\n validated_tags = validate_tags(tags, allow_reserved_tags=False)\n\n context_param_name = get_context_param_name(fn)\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(fn)}\n\n def _wrapped_fn(context: ScheduleEvaluationContext) -> RunRequestIterator:\n if should_execute:\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: (\n "Error occurred during the execution of should_execute for schedule"\n f" {schedule_name}"\n ),\n ):\n if not should_execute(context):\n yield SkipReason(\n f"should_execute function for {schedule_name} returned false."\n )\n return\n resources = validate_and_get_schedule_resource_dict(\n context.resources, schedule_name, resource_arg_names\n )\n\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: f"Error occurred during the evaluation of schedule {schedule_name}",\n ):\n context_param = {context_param_name: context} if context_param_name else {}\n result = fn(**context_param, **resources)\n\n if isinstance(result, dict):\n # this is the run-config based decorated function, wrap the evaluated run config\n # and tags in a RunRequest\n evaluated_run_config = copy.deepcopy(result)\n evaluated_tags = (\n validated_tags\n or (tags_fn and validate_tags(tags_fn(context), allow_reserved_tags=False))\n or None\n )\n yield RunRequest(\n run_key=None,\n run_config=evaluated_run_config,\n tags=evaluated_tags,\n )\n elif isinstance(result, list):\n yield from cast(List[RunRequest], result)\n else:\n # this is a run-request based decorated function\n yield from cast(RunRequestIterator, ensure_gen(result))\n\n has_context_arg = has_at_least_one_parameter(fn)\n evaluation_fn = DecoratedScheduleFunction(\n decorated_fn=fn,\n wrapped_fn=_wrapped_fn,\n has_context_arg=has_context_arg,\n )\n\n schedule_def = ScheduleDefinition.dagster_internal_init(\n name=schedule_name,\n cron_schedule=cron_schedule,\n job_name=job_name,\n environment_vars=environment_vars,\n execution_timezone=execution_timezone,\n description=description,\n execution_fn=evaluation_fn,\n job=job,\n default_status=default_status,\n required_resource_keys=required_resource_keys,\n run_config=None, # cannot supply run_config or run_config_fn to decorator\n run_config_fn=None,\n tags=None, # cannot supply tags or tags_fn to decorator\n tags_fn=None,\n should_execute=None, # already encompassed in evaluation_fn\n )\n\n update_wrapper(schedule_def, wrapped=fn)\n\n return schedule_def\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/schedule_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.schedule_decorator"}, "sensor_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.decorators.sensor_decorator

\nimport collections.abc\nimport inspect\nfrom functools import update_wrapper\nfrom typing import Any, Callable, Optional, Sequence, Set, Union\n\nimport dagster._check as check\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.asset_selection import AssetSelection\n\nfrom ...errors import DagsterInvariantViolationError\nfrom ..asset_sensor_definition import AssetSensorDefinition\nfrom ..events import AssetKey\nfrom ..multi_asset_sensor_definition import (\n    AssetMaterializationFunction,\n    MultiAssetMaterializationFunction,\n    MultiAssetSensorDefinition,\n)\nfrom ..run_request import SensorResult\nfrom ..sensor_definition import (\n    DefaultSensorStatus,\n    RawSensorEvaluationFunction,\n    RunRequest,\n    SensorDefinition,\n    SkipReason,\n)\nfrom ..target import ExecutableDefinition\n\n\n
[docs]def sensor(\n job_name: Optional[str] = None,\n *,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n asset_selection: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[RawSensorEvaluationFunction], SensorDefinition]:\n """Creates a sensor where the decorated function is used as the sensor's evaluation function.\n\n The decorated function may:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Yield a `SkipReason` or yield one or more `RunRequest` objects.\n\n Takes a :py:class:`~dagster.SensorEvaluationContext`.\n\n Args:\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated\n function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]):\n The job to be executed when the sensor fires.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n asset_selection (AssetSelection): (Experimental) an asset selection to launch a run for if\n the sensor condition is met. This can be provided instead of specifying a job.\n """\n check.opt_str_param(name, "name")\n\n def inner(fn: RawSensorEvaluationFunction) -> SensorDefinition:\n check.callable_param(fn, "fn")\n\n sensor_def = SensorDefinition.dagster_internal_init(\n name=name,\n job_name=job_name,\n evaluation_fn=fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n asset_selection=asset_selection,\n required_resource_keys=required_resource_keys,\n )\n\n update_wrapper(sensor_def, wrapped=fn)\n\n return sensor_def\n\n return inner
\n\n\n
[docs]def asset_sensor(\n asset_key: AssetKey,\n *,\n job_name: Optional[str] = None,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[AssetMaterializationFunction,], AssetSensorDefinition,]:\n """Creates an asset sensor where the decorated function is used as the asset sensor's evaluation\n function.\n\n If the asset has been materialized multiple times between since the last sensor tick, the\n evaluation function will only be invoked once, with the latest materialization.\n\n The decorated function may:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Yield a `SkipReason` or yield one or more `RunRequest` objects.\n\n Takes a :py:class:`~dagster.SensorEvaluationContext` and an EventLogEntry corresponding to an\n AssetMaterialization event.\n\n Args:\n asset_key (AssetKey): The asset_key this sensor monitors.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated\n function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The\n job to be executed when the sensor fires.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n\n\n Example:\n .. code-block:: python\n\n from dagster import AssetKey, EventLogEntry, SensorEvaluationContext, asset_sensor\n\n\n @asset_sensor(asset_key=AssetKey("my_table"), job=my_job)\n def my_asset_sensor(context: SensorEvaluationContext, asset_event: EventLogEntry):\n return RunRequest(\n run_key=context.cursor,\n run_config={\n "ops": {\n "read_materialization": {\n "config": {\n "asset_key": asset_event.dagster_event.asset_key.path,\n }\n }\n }\n },\n )\n """\n check.opt_str_param(name, "name")\n\n def inner(fn: AssetMaterializationFunction) -> AssetSensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n def _wrapped_fn(*args, **kwargs) -> Any:\n result = fn(*args, **kwargs)\n\n if inspect.isgenerator(result) or isinstance(result, list):\n for item in result:\n yield item\n elif isinstance(result, (RunRequest, SkipReason)):\n yield result\n\n elif isinstance(result, SensorResult):\n if result.cursor:\n raise DagsterInvariantViolationError(\n f"Error in asset sensor {sensor_name}: Sensor returned a SensorResult"\n " with a cursor value. The cursor is managed by the asset sensor and"\n " should not be modified by a user."\n )\n yield result\n\n elif result is not None:\n raise DagsterInvariantViolationError(\n f"Error in sensor {sensor_name}: Sensor unexpectedly returned output "\n f"{result} of type {type(result)}. Should only return SkipReason or "\n "RunRequest objects."\n )\n\n # Preserve any resource arguments from the underlying function, for when we inspect the\n # wrapped function later on\n _wrapped_fn = update_wrapper(_wrapped_fn, wrapped=fn)\n\n return AssetSensorDefinition(\n name=sensor_name,\n asset_key=asset_key,\n job_name=job_name,\n asset_materialization_fn=_wrapped_fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n required_resource_keys=required_resource_keys,\n )\n\n return inner
\n\n\n
[docs]@experimental\ndef multi_asset_sensor(\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n *,\n job_name: Optional[str] = None,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_assets: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n) -> Callable[[MultiAssetMaterializationFunction,], MultiAssetSensorDefinition,]:\n """Creates an asset sensor that can monitor multiple assets.\n\n The decorated function is used as the asset sensor's evaluation\n function. The decorated function may:\n\n 1. Return a `RunRequest` object.\n 2. Return a list of `RunRequest` objects.\n 3. Return a `SkipReason` object, providing a descriptive message of why no runs were requested.\n 4. Return nothing (skipping without providing a reason)\n 5. Yield a `SkipReason` or yield one or more `RunRequest` objects.\n\n Takes a :py:class:`~dagster.MultiAssetSensorEvaluationContext`.\n\n Args:\n monitored_assets (Union[Sequence[AssetKey], AssetSelection]): The assets this\n sensor monitors. If an AssetSelection object is provided, it will only apply to assets\n within the Definitions that this sensor is part of.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated\n function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The\n job to be executed when the sensor fires.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_assets (Optional[AssetSelection]): (Experimental) an asset selection to launch a run\n for if the sensor condition is met. This can be provided instead of specifying a job.\n """\n check.opt_str_param(name, "name")\n\n if not isinstance(monitored_assets, AssetSelection) and not (\n isinstance(monitored_assets, collections.abc.Sequence)\n and all(isinstance(el, AssetKey) for el in monitored_assets)\n ):\n check.failed(\n "The value passed to monitored_assets param must be either an AssetSelection"\n f" or a Sequence of AssetKeys, but was a {type(monitored_assets)}"\n )\n\n def inner(fn: MultiAssetMaterializationFunction) -> MultiAssetSensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n sensor_def = MultiAssetSensorDefinition(\n name=sensor_name,\n monitored_assets=monitored_assets,\n job_name=job_name,\n asset_materialization_fn=fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n request_assets=request_assets,\n required_resource_keys=required_resource_keys,\n )\n update_wrapper(sensor_def, wrapped=fn)\n return sensor_def\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/decorators/sensor_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.decorators.sensor_decorator"}}, "definitions_class": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.definitions_class

\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Type,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, experimental, public\nfrom dagster._config.pythonic_config import (\n    attach_resource_id_to_key_mapping,\n)\nfrom dagster._core.definitions.asset_checks import AssetChecksDefinition\nfrom dagster._core.definitions.asset_graph import InternalAssetGraph\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.executor_definition import ExecutorDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.build_resources import wrap_resources_for_execution\nfrom dagster._core.execution.with_resources import with_resources\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._utils.cached_method import cached_method\n\nfrom .assets import AssetsDefinition, SourceAsset\nfrom .cacheable_assets import CacheableAssetsDefinition\nfrom .decorators import repository\nfrom .job_definition import JobDefinition, default_job_io_manager\nfrom .partitioned_schedule import UnresolvedPartitionedAssetScheduleDefinition\nfrom .repository_definition import (\n    SINGLETON_REPOSITORY_NAME,\n    PendingRepositoryDefinition,\n    RepositoryDefinition,\n)\nfrom .schedule_definition import ScheduleDefinition\nfrom .sensor_definition import SensorDefinition\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n\n
[docs]@public\n@experimental\ndef create_repository_using_definitions_args(\n name: str,\n assets: Optional[\n Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]\n ] = None,\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ] = None,\n sensors: Optional[Iterable[SensorDefinition]] = None,\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n executor: Optional[Union[ExecutorDefinition, Executor]] = None,\n loggers: Optional[Mapping[str, LoggerDefinition]] = None,\n asset_checks: Optional[Iterable[AssetChecksDefinition]] = None,\n) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:\n """Create a named repository using the same arguments as :py:class:`Definitions`. In older\n versions of Dagster, repositories were the mechanism for organizing assets, schedules, sensors,\n and jobs. There could be many repositories per code location. This was a complicated ontology but\n gave users a way to organize code locations that contained large numbers of heterogenous definitions.\n\n As a stopgap for those who both want to 1) use the new :py:class:`Definitions` API and 2) but still\n want multiple logical groups of assets in the same code location, we have introduced this function.\n\n Example usage:\n\n .. code-block:: python\n\n named_repo = create_repository_using_definitions_args(\n name="a_repo",\n assets=[asset_one, asset_two],\n schedules=[a_schedule],\n sensors=[a_sensor],\n jobs=[a_job],\n resources={\n "a_resource": some_resource,\n }\n )\n\n """\n return _create_repository_using_definitions_args(\n name=name,\n assets=assets,\n schedules=schedules,\n sensors=sensors,\n jobs=jobs,\n resources=resources,\n executor=executor,\n loggers=loggers,\n asset_checks=asset_checks,\n )
\n\n\nclass _AttachedObjects(NamedTuple):\n jobs: Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]\n schedules: Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n sensors: Iterable[SensorDefinition]\n\n\ndef _io_manager_needs_replacement(job: JobDefinition, resource_defs: Mapping[str, Any]) -> bool:\n """Explicitly replace the default IO manager in jobs that don't specify one, if a top-level\n I/O manager is provided to Definitions.\n """\n return (\n job.resource_defs.get("io_manager") == default_job_io_manager\n and "io_manager" in resource_defs\n )\n\n\ndef _jobs_which_will_have_io_manager_replaced(\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]],\n resource_defs: Mapping[str, Any],\n) -> List[Union[JobDefinition, UnresolvedAssetJobDefinition]]:\n """Returns whether any jobs will have their I/O manager replaced by an `io_manager` override from\n the top-level `resource_defs` provided to `Definitions` in 1.3. We will warn users if this is\n the case.\n """\n jobs = jobs or []\n return [\n job\n for job in jobs\n if isinstance(job, JobDefinition) and _io_manager_needs_replacement(job, resource_defs)\n ]\n\n\ndef _attach_resources_to_jobs_and_instigator_jobs(\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]],\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ],\n sensors: Optional[Iterable[SensorDefinition]],\n resource_defs: Mapping[str, Any],\n) -> _AttachedObjects:\n """Given a list of jobs, schedules, and sensors along with top-level resource definitions,\n attach the resource definitions to the jobs, schedules, and sensors which require them.\n """\n jobs = jobs or []\n schedules = schedules or []\n sensors = sensors or []\n\n # Add jobs in schedules and sensors as well\n jobs = [\n *jobs,\n *[\n schedule.job\n for schedule in schedules\n if isinstance(schedule, ScheduleDefinition)\n and schedule.has_loadable_target()\n and isinstance(schedule.job, (JobDefinition, UnresolvedAssetJobDefinition))\n ],\n *[\n job\n for sensor in sensors\n if sensor.has_loadable_targets()\n for job in sensor.jobs\n if isinstance(job, (JobDefinition, UnresolvedAssetJobDefinition))\n ],\n ]\n # Dedupe\n jobs = list({id(job): job for job in jobs}.values())\n\n # Find unsatisfied jobs\n unsatisfied_jobs = [\n job\n for job in jobs\n if isinstance(job, JobDefinition)\n and (\n job.is_missing_required_resources() or _io_manager_needs_replacement(job, resource_defs)\n )\n ]\n\n # Create a mapping of job id to a version of the job with the resource defs bound\n unsatisfied_job_to_resource_bound_job = {\n id(job): job.with_top_level_resources(\n {\n **resource_defs,\n **job.resource_defs,\n # special case for IO manager - the job-level IO manager does not take precedence\n # if it is the default and a top-level IO manager is provided\n **(\n {"io_manager": resource_defs["io_manager"]}\n if _io_manager_needs_replacement(job, resource_defs)\n else {}\n ),\n }\n )\n for job in jobs\n if job in unsatisfied_jobs\n }\n\n # Update all jobs to use the resource bound version\n jobs_with_resources = [\n unsatisfied_job_to_resource_bound_job[id(job)] if job in unsatisfied_jobs else job\n for job in jobs\n ]\n\n # Update all schedules and sensors to use the resource bound version\n updated_schedules = [\n (\n schedule.with_updated_job(unsatisfied_job_to_resource_bound_job[id(schedule.job)])\n if (\n isinstance(schedule, ScheduleDefinition)\n and schedule.has_loadable_target()\n and schedule.job in unsatisfied_jobs\n )\n else schedule\n )\n for schedule in schedules\n ]\n updated_sensors = [\n (\n sensor.with_updated_jobs(\n [\n (\n unsatisfied_job_to_resource_bound_job[id(job)]\n if job in unsatisfied_jobs\n else job\n )\n for job in sensor.jobs\n ]\n )\n if sensor.has_loadable_targets() and any(job in unsatisfied_jobs for job in sensor.jobs)\n else sensor\n )\n for sensor in sensors\n ]\n\n return _AttachedObjects(jobs_with_resources, updated_schedules, updated_sensors)\n\n\ndef _create_repository_using_definitions_args(\n name: str,\n assets: Optional[\n Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]\n ] = None,\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ] = None,\n sensors: Optional[Iterable[SensorDefinition]] = None,\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n executor: Optional[Union[ExecutorDefinition, Executor]] = None,\n loggers: Optional[Mapping[str, LoggerDefinition]] = None,\n asset_checks: Optional[Iterable[AssetChecksDefinition]] = None,\n):\n check.opt_iterable_param(\n assets, "assets", (AssetsDefinition, SourceAsset, CacheableAssetsDefinition)\n )\n check.opt_iterable_param(\n schedules, "schedules", (ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition)\n )\n check.opt_iterable_param(sensors, "sensors", SensorDefinition)\n check.opt_iterable_param(jobs, "jobs", (JobDefinition, UnresolvedAssetJobDefinition))\n\n check.opt_inst_param(executor, "executor", (ExecutorDefinition, Executor))\n executor_def = (\n executor\n if isinstance(executor, ExecutorDefinition) or executor is None\n else ExecutorDefinition.hardcoded_executor(executor)\n )\n\n # Generate a mapping from each top-level resource instance ID to its resource key\n resource_key_mapping = {id(v): k for k, v in resources.items()} if resources else {}\n\n # Provide this mapping to each resource instance so that it can be used to resolve\n # nested resources\n resources_with_key_mapping = (\n {\n k: attach_resource_id_to_key_mapping(v, resource_key_mapping)\n for k, v in resources.items()\n }\n if resources\n else {}\n )\n\n resource_defs = wrap_resources_for_execution(resources_with_key_mapping)\n\n check.opt_mapping_param(loggers, "loggers", key_type=str, value_type=LoggerDefinition)\n\n # Binds top-level resources to jobs and any jobs attached to schedules or sensors\n (\n jobs_with_resources,\n schedules_with_resources,\n sensors_with_resources,\n ) = _attach_resources_to_jobs_and_instigator_jobs(jobs, schedules, sensors, resource_defs)\n\n @repository(\n name=name,\n default_executor_def=executor_def,\n default_logger_defs=loggers,\n _top_level_resources=resource_defs,\n _resource_key_mapping=resource_key_mapping,\n )\n def created_repo():\n return [\n *with_resources(assets or [], resource_defs),\n *with_resources(asset_checks or [], resource_defs),\n *(schedules_with_resources),\n *(sensors_with_resources),\n *(jobs_with_resources),\n ]\n\n return created_repo\n\n\n@deprecated(\n breaking_version="2.0",\n additional_warn_text=(\n "Instantiations can be removed. Since it's behavior is now the default, this class is now a"\n " no-op."\n ),\n)\nclass BindResourcesToJobs(list):\n """Used to instruct Dagster to bind top-level resources to jobs and any jobs attached to schedules\n and sensors. Now deprecated since this behavior is the default.\n """\n\n\n
[docs]class Definitions:\n """A set of definitions explicitly available and loadable by Dagster tools.\n\n Parameters:\n assets (Optional[Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]]):\n A list of assets. Assets can be created by annotating\n a function with :py:func:`@asset <asset>` or\n :py:func:`@observable_source_asset <observable_source_asset>`.\n Or they can by directly instantiating :py:class:`AssetsDefinition`,\n :py:class:`SourceAsset`, or :py:class:`CacheableAssetsDefinition`.\n\n asset_checks (Optional[Iterable[AssetChecksDefinition]]):\n A list of asset checks.\n\n schedules (Optional[Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]]):\n List of schedules.\n\n sensors (Optional[Iterable[SensorDefinition]]):\n List of sensors, typically created with :py:func:`@sensor <sensor>`.\n\n jobs (Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]]):\n List of jobs. Typically created with :py:func:`define_asset_job <define_asset_job>`\n or with :py:func:`@job <job>` for jobs defined in terms of ops directly.\n Jobs created with :py:func:`@job <job>` must already have resources bound\n at job creation time. They do not respect the `resources` argument here.\n\n resources (Optional[Mapping[str, Any]]): Dictionary of resources to bind to assets.\n The resources dictionary takes raw Python objects,\n not just instances of :py:class:`ResourceDefinition`. If that raw object inherits from\n :py:class:`IOManager`, it gets coerced to an :py:class:`IOManagerDefinition`.\n Any other object is coerced to a :py:class:`ResourceDefinition`.\n These resources will be automatically bound\n to any assets passed to this Definitions instance using\n :py:func:`with_resources <with_resources>`. Assets passed to Definitions with\n resources already bound using :py:func:`with_resources <with_resources>` will\n override this dictionary.\n\n executor (Optional[Union[ExecutorDefinition, Executor]]):\n Default executor for jobs. Individual jobs can override this and define their own executors\n by setting the executor on :py:func:`@job <job>` or :py:func:`define_asset_job <define_asset_job>`\n explicitly. This executor will also be used for materializing assets directly\n outside of the context of jobs. If an :py:class:`Executor` is passed, it is coerced into\n an :py:class:`ExecutorDefinition`.\n\n loggers (Optional[Mapping[str, LoggerDefinition]):\n Default loggers for jobs. Individual jobs\n can define their own loggers by setting them explictly.\n\n Example usage:\n\n .. code-block:: python\n\n defs = Definitions(\n assets=[asset_one, asset_two],\n schedules=[a_schedule],\n sensors=[a_sensor],\n jobs=[a_job],\n resources={\n "a_resource": some_resource,\n },\n asset_checks=[asset_one_check_one]\n )\n\n Dagster separates user-defined code from system tools such the web server and\n the daemon. Rather than loading code directly into process, a tool such as the\n webserver interacts with user-defined code over a serialization boundary.\n\n These tools must be able to locate and load this code when they start. Via CLI\n arguments or config, they specify a Python module to inspect.\n\n A Python module is loadable by Dagster tools if there is a top-level variable\n that is an instance of :py:class:`Definitions`.\n\n Before the introduction of :py:class:`Definitions`,\n :py:func:`@repository <repository>` was the API for organizing defintions.\n :py:class:`Definitions` provides a few conveniences for dealing with resources\n that do not apply to old-style :py:func:`@repository <repository>` declarations:\n\n * It takes a dictionary of top-level resources which are automatically bound\n (via :py:func:`with_resources <with_resources>`) to any asset passed to it.\n If you need to apply different resources to different assets, use legacy\n :py:func:`@repository <repository>` and use\n :py:func:`with_resources <with_resources>` as before.\n * The resources dictionary takes raw Python objects, not just instances\n of :py:class:`ResourceDefinition`. If that raw object inherits from\n :py:class:`IOManager`, it gets coerced to an :py:class:`IOManagerDefinition`.\n Any other object is coerced to a :py:class:`ResourceDefinition`.\n """\n\n def __init__(\n self,\n assets: Optional[\n Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]\n ] = None,\n schedules: Optional[\n Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]\n ] = None,\n sensors: Optional[Iterable[SensorDefinition]] = None,\n jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n executor: Optional[Union[ExecutorDefinition, Executor]] = None,\n loggers: Optional[Mapping[str, LoggerDefinition]] = None,\n asset_checks: Optional[Iterable[AssetChecksDefinition]] = None,\n ):\n self._created_pending_or_normal_repo = _create_repository_using_definitions_args(\n name=SINGLETON_REPOSITORY_NAME,\n assets=assets,\n schedules=schedules,\n sensors=sensors,\n jobs=jobs,\n resources=resources,\n executor=executor,\n loggers=loggers,\n asset_checks=asset_checks,\n )\n\n
[docs] @public\n def get_job_def(self, name: str) -> JobDefinition:\n """Get a job definition by name. If you passed in a an :py:class:`UnresolvedAssetJobDefinition`\n (return value of :py:func:`define_asset_job`) it will be resolved to a :py:class:`JobDefinition` when returned\n from this function.\n """\n check.str_param(name, "name")\n return self.get_repository_def().get_job(name)
\n\n
[docs] @public\n def get_sensor_def(self, name: str) -> SensorDefinition:\n """Get a sensor definition by name."""\n check.str_param(name, "name")\n return self.get_repository_def().get_sensor_def(name)
\n\n
[docs] @public\n def get_schedule_def(self, name: str) -> ScheduleDefinition:\n """Get a schedule definition by name."""\n check.str_param(name, "name")\n return self.get_repository_def().get_schedule_def(name)
\n\n
[docs] @public\n def load_asset_value(\n self,\n asset_key: CoercibleToAssetKey,\n *,\n python_type: Optional[Type] = None,\n instance: Optional[DagsterInstance] = None,\n partition_key: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n ) -> object:\n """Load the contents of an asset as a Python object.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the asset.\n\n If you want to load the values of multiple assets, it's more efficient to use\n :py:meth:`~dagster.Definitions.get_asset_value_loader`, which avoids spinning up\n resources separately for each asset.\n\n Args:\n asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.\n python_type (Optional[Type]): The python type to load the asset as. This is what will\n be returned inside `load_input` by `context.dagster_type.typing_type`.\n partition_key (Optional[str]): The partition of the asset to load.\n metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`\n (is equivalent to setting the metadata argument in `In` or `AssetIn`).\n\n Returns:\n The contents of an asset as a Python object.\n """\n return self.get_repository_def().load_asset_value(\n asset_key=asset_key,\n python_type=python_type,\n instance=instance,\n partition_key=partition_key,\n metadata=metadata,\n )
\n\n
[docs] @public\n def get_asset_value_loader(\n self, instance: Optional[DagsterInstance] = None\n ) -> "AssetValueLoader":\n """Returns an object that can load the contents of assets as Python objects.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the assets. Avoids\n spinning up resources separately for each asset.\n\n Usage:\n\n .. code-block:: python\n\n with defs.get_asset_value_loader() as loader:\n asset1 = loader.load_asset_value("asset1")\n asset2 = loader.load_asset_value("asset2")\n """\n return self.get_repository_def().get_asset_value_loader(\n instance=instance,\n )
\n\n def get_all_job_defs(self) -> Sequence[JobDefinition]:\n """Get all the Job definitions in the code location."""\n return self.get_repository_def().get_all_jobs()\n\n def has_implicit_global_asset_job_def(self) -> bool:\n return self.get_repository_def().has_implicit_global_asset_job_def()\n\n def get_implicit_global_asset_job_def(self) -> JobDefinition:\n """A useful conveninence method when there is a single defined global asset job.\n This occurs when all assets in the code location use a single partitioning scheme.\n If there are multiple partitioning schemes you must use get_implicit_job_def_for_assets\n instead to access to the correct implicit asset one.\n """\n return self.get_repository_def().get_implicit_global_asset_job_def()\n\n def get_implicit_job_def_for_assets(\n self, asset_keys: Iterable[AssetKey]\n ) -> Optional[JobDefinition]:\n return self.get_repository_def().get_implicit_job_def_for_assets(asset_keys)\n\n def get_assets_def(self, key: CoercibleToAssetKey) -> AssetsDefinition:\n asset_key = AssetKey.from_coercible(key)\n for assets_def in self.get_asset_graph().assets:\n if asset_key in assets_def.keys:\n return assets_def\n\n raise DagsterInvariantViolationError(f"Could not find asset {asset_key}")\n\n @cached_method\n def get_repository_def(self) -> RepositoryDefinition:\n """Definitions is implemented by wrapping RepositoryDefinition. Get that underlying object\n in order to access an functionality which is not exposed on Definitions. This method\n also resolves a PendingRepositoryDefinition to a RepositoryDefinition.\n """\n return (\n self._created_pending_or_normal_repo.compute_repository_definition()\n if isinstance(self._created_pending_or_normal_repo, PendingRepositoryDefinition)\n else self._created_pending_or_normal_repo\n )\n\n def get_inner_repository_for_loading_process(\n self,\n ) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:\n """This method is used internally to access the inner repository during the loading process\n at CLI entry points. We explicitly do not want to resolve the pending repo because the entire\n point is to defer that resolution until later.\n """\n return self._created_pending_or_normal_repo\n\n def get_asset_graph(self) -> InternalAssetGraph:\n """Get the AssetGraph for this set of definitions."""\n return self.get_repository_def().asset_graph
\n
", "current_page_name": "_modules/dagster/_core/definitions/definitions_class", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.definitions_class"}, "dependency": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.dependency

\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    DefaultDict,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias, TypeVar\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._serdes.serdes import (\n    whitelist_for_serdes,\n)\nfrom dagster._utils import hash_collection\n\nfrom .hook_definition import HookDefinition\nfrom .input import FanInInputPointer, InputDefinition, InputMapping, InputPointer\nfrom .output import OutputDefinition\nfrom .utils import DEFAULT_OUTPUT, struct_to_string, validate_tags\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.op_definition import OpDefinition\n\n    from .asset_layer import AssetLayer\n    from .composition import MappedInputPlaceholder\n    from .graph_definition import GraphDefinition\n    from .node_definition import NodeDefinition\n    from .resource_requirement import ResourceRequirement\n\nT_DependencyKey = TypeVar("T_DependencyKey", str, "NodeInvocation")\nDependencyMapping: TypeAlias = Mapping[T_DependencyKey, Mapping[str, "IDependencyDefinition"]]\n\n\n
[docs]class NodeInvocation(\n NamedTuple(\n "Node",\n [\n ("name", PublicAttr[str]),\n ("alias", PublicAttr[Optional[str]]),\n ("tags", PublicAttr[Mapping[str, Any]]),\n ("hook_defs", PublicAttr[AbstractSet[HookDefinition]]),\n ("retry_policy", PublicAttr[Optional[RetryPolicy]]),\n ],\n )\n):\n """Identifies an instance of a node in a graph dependency structure.\n\n Args:\n name (str): Name of the node of which this is an instance.\n alias (Optional[str]): Name specific to this instance of the node. Necessary when there are\n multiple instances of the same node.\n tags (Optional[Dict[str, Any]]): Optional tags values to extend or override those\n set on the node definition.\n hook_defs (Optional[AbstractSet[HookDefinition]]): A set of hook definitions applied to the\n node instance.\n\n Examples:\n In general, users should prefer not to construct this class directly or use the\n :py:class:`JobDefinition` API that requires instances of this class. Instead, use the\n :py:func:`@job <job>` API:\n\n .. code-block:: python\n\n from dagster import job\n\n @job\n def my_job():\n other_name = some_op.alias('other_name')\n some_graph(other_name(some_op))\n\n """\n\n def __new__(\n cls,\n name: str,\n alias: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n return super().__new__(\n cls,\n name=check.str_param(name, "name"),\n alias=check.opt_str_param(alias, "alias"),\n tags=check.opt_mapping_param(tags, "tags", value_type=str, key_type=str),\n hook_defs=check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition),\n retry_policy=check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy),\n )\n\n # Needs to be hashable because this class is used as a key in dependencies dicts\n def __hash__(self) -> int:\n if not hasattr(self, "_hash"):\n self._hash = hash_collection(self)\n return self._hash
\n\n\nclass Node(ABC):\n """Node invocation within a graph. Identified by its name inside the graph."""\n\n name: str\n definition: "NodeDefinition"\n graph_definition: "GraphDefinition"\n _additional_tags: Mapping[str, str]\n _hook_defs: AbstractSet[HookDefinition]\n _retry_policy: Optional[RetryPolicy]\n _inputs: Mapping[str, "NodeInput"]\n _outputs: Mapping[str, "NodeOutput"]\n\n def __init__(\n self,\n name: str,\n definition: "NodeDefinition",\n graph_definition: "GraphDefinition",\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n from .graph_definition import GraphDefinition\n from .node_definition import NodeDefinition\n\n self.name = check.str_param(name, "name")\n self.definition = check.inst_param(definition, "definition", NodeDefinition)\n self.graph_definition = check.inst_param(\n graph_definition,\n "graph_definition",\n GraphDefinition,\n )\n self._additional_tags = validate_tags(tags)\n self._hook_defs = check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition)\n self._retry_policy = check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy)\n\n self._inputs = {\n name: NodeInput(self, input_def)\n for name, input_def in self.definition.input_dict.items()\n }\n self._outputs = {\n name: NodeOutput(self, output_def)\n for name, output_def in self.definition.output_dict.items()\n }\n\n def inputs(self) -> Iterable["NodeInput"]:\n return self._inputs.values()\n\n def outputs(self) -> Iterable["NodeOutput"]:\n return self._outputs.values()\n\n def get_input(self, name: str) -> "NodeInput":\n check.str_param(name, "name")\n return self._inputs[name]\n\n def get_output(self, name: str) -> "NodeOutput":\n check.str_param(name, "name")\n return self._outputs[name]\n\n def has_input(self, name: str) -> bool:\n return self.definition.has_input(name)\n\n def input_def_named(self, name: str) -> InputDefinition:\n return self.definition.input_def_named(name)\n\n def has_output(self, name: str) -> bool:\n return self.definition.has_output(name)\n\n def output_def_named(self, name: str) -> OutputDefinition:\n return self.definition.output_def_named(name)\n\n @property\n def input_dict(self) -> Mapping[str, InputDefinition]:\n return self.definition.input_dict\n\n @property\n def output_dict(self) -> Mapping[str, OutputDefinition]:\n return self.definition.output_dict\n\n @property\n def tags(self) -> Mapping[str, str]:\n return {**self.definition.tags, **self._additional_tags}\n\n def container_maps_input(self, input_name: str) -> bool:\n return (\n self.graph_definition.input_mapping_for_pointer(InputPointer(self.name, input_name))\n is not None\n )\n\n def container_mapped_input(self, input_name: str) -> InputMapping:\n mapping = self.graph_definition.input_mapping_for_pointer(\n InputPointer(self.name, input_name)\n )\n if mapping is None:\n check.failed(\n f"container does not map input {input_name}, check container_maps_input first"\n )\n return mapping\n\n def container_maps_fan_in_input(self, input_name: str, fan_in_index: int) -> bool:\n return (\n self.graph_definition.input_mapping_for_pointer(\n FanInInputPointer(self.name, input_name, fan_in_index)\n )\n is not None\n )\n\n def container_mapped_fan_in_input(self, input_name: str, fan_in_index: int) -> InputMapping:\n mapping = self.graph_definition.input_mapping_for_pointer(\n FanInInputPointer(self.name, input_name, fan_in_index)\n )\n if mapping is None:\n check.failed(\n f"container does not map fan-in {input_name} idx {fan_in_index}, check "\n "container_maps_fan_in_input first"\n )\n\n return mapping\n\n @property\n def hook_defs(self) -> AbstractSet[HookDefinition]:\n return self._hook_defs\n\n @property\n def retry_policy(self) -> Optional[RetryPolicy]:\n return self._retry_policy\n\n @abstractmethod\n def describe_node(self) -> str: ...\n\n @abstractmethod\n def get_resource_requirements(\n self,\n outer_container: "GraphDefinition",\n parent_handle: Optional["NodeHandle"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n ) -> Iterator["ResourceRequirement"]: ...\n\n\nclass GraphNode(Node):\n definition: "GraphDefinition"\n\n def __init__(\n self,\n name: str,\n definition: "GraphDefinition",\n graph_definition: "GraphDefinition",\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n from .graph_definition import GraphDefinition\n\n check.inst_param(definition, "definition", GraphDefinition)\n super().__init__(name, definition, graph_definition, tags, hook_defs, retry_policy)\n\n def get_resource_requirements(\n self,\n outer_container: "GraphDefinition",\n parent_handle: Optional["NodeHandle"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n ) -> Iterator["ResourceRequirement"]:\n cur_node_handle = NodeHandle(self.name, parent_handle)\n\n for node in self.definition.node_dict.values():\n yield from node.get_resource_requirements(\n asset_layer=asset_layer,\n outer_container=self.definition,\n parent_handle=cur_node_handle,\n )\n\n def describe_node(self) -> str:\n return f"graph '{self.name}'"\n\n\nclass OpNode(Node):\n definition: "OpDefinition"\n\n def __init__(\n self,\n name: str,\n definition: "OpDefinition",\n graph_definition: "GraphDefinition",\n tags: Optional[Mapping[str, str]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n retry_policy: Optional[RetryPolicy] = None,\n ):\n from .op_definition import OpDefinition\n\n check.inst_param(definition, "definition", OpDefinition)\n super().__init__(name, definition, graph_definition, tags, hook_defs, retry_policy)\n\n def get_resource_requirements(\n self,\n outer_container: "GraphDefinition",\n parent_handle: Optional["NodeHandle"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n ) -> Iterator["ResourceRequirement"]:\n from .resource_requirement import InputManagerRequirement\n\n cur_node_handle = NodeHandle(self.name, parent_handle)\n\n for requirement in self.definition.get_resource_requirements(\n (cur_node_handle, asset_layer)\n ):\n # If requirement is a root input manager requirement, but the corresponding node has an upstream output, then ignore the requirement.\n if (\n isinstance(requirement, InputManagerRequirement)\n and outer_container.dependency_structure.has_deps(\n NodeInput(self, self.definition.input_def_named(requirement.input_name))\n )\n and requirement.root_input\n ):\n continue\n yield requirement\n for hook_def in self.hook_defs:\n yield from hook_def.get_resource_requirements(self.describe_node())\n\n def describe_node(self) -> str:\n return f"op '{self.name}'"\n\n\n@whitelist_for_serdes(storage_name="SolidHandle")\nclass NodeHandle(NamedTuple("_NodeHandle", [("name", str), ("parent", Optional["NodeHandle"])])):\n """A structured object to identify nodes in the potentially recursive graph structure."""\n\n def __new__(cls, name: str, parent: Optional["NodeHandle"]):\n return super(NodeHandle, cls).__new__(\n cls,\n check.str_param(name, "name"),\n check.opt_inst_param(parent, "parent", NodeHandle),\n )\n\n def __str__(self):\n return self.to_string()\n\n @property\n def root(self):\n if self.parent:\n return self.parent.root\n else:\n return self\n\n @property\n def path(self) -> Sequence[str]:\n """Return a list representation of the handle.\n\n Inverse of NodeHandle.from_path.\n\n Returns:\n List[str]:\n """\n path: List[str] = []\n cur = self\n while cur:\n path.append(cur.name)\n cur = cur.parent\n path.reverse()\n return path\n\n def to_string(self) -> str:\n """Return a unique string representation of the handle.\n\n Inverse of NodeHandle.from_string.\n """\n return self.parent.to_string() + "." + self.name if self.parent else self.name\n\n def is_or_descends_from(self, handle: "NodeHandle") -> bool:\n """Check if the handle is or descends from another handle.\n\n Args:\n handle (NodeHandle): The handle to check against.\n\n Returns:\n bool:\n """\n check.inst_param(handle, "handle", NodeHandle)\n\n for idx in range(len(handle.path)):\n if idx >= len(self.path):\n return False\n if self.path[idx] != handle.path[idx]:\n return False\n return True\n\n def pop(self, ancestor: "NodeHandle") -> Optional["NodeHandle"]:\n """Return a copy of the handle with some of its ancestors pruned.\n\n Args:\n ancestor (NodeHandle): Handle to an ancestor of the current handle.\n\n Returns:\n NodeHandle:\n\n Example:\n .. code-block:: python\n\n handle = NodeHandle('baz', NodeHandle('bar', NodeHandle('foo', None)))\n ancestor = NodeHandle('bar', NodeHandle('foo', None))\n assert handle.pop(ancestor) == NodeHandle('baz', None)\n """\n check.inst_param(ancestor, "ancestor", NodeHandle)\n check.invariant(\n self.is_or_descends_from(ancestor),\n f"Handle {self.to_string()} does not descend from {ancestor.to_string()}",\n )\n\n return NodeHandle.from_path(self.path[len(ancestor.path) :])\n\n def with_ancestor(self, ancestor: Optional["NodeHandle"]) -> "NodeHandle":\n """Returns a copy of the handle with an ancestor grafted on.\n\n Args:\n ancestor (NodeHandle): Handle to the new ancestor.\n\n Returns:\n NodeHandle:\n\n Example:\n .. code-block:: python\n\n handle = NodeHandle('baz', NodeHandle('bar', NodeHandle('foo', None)))\n ancestor = NodeHandle('quux' None)\n assert handle.with_ancestor(ancestor) == NodeHandle(\n 'baz', NodeHandle('bar', NodeHandle('foo', NodeHandle('quux', None)))\n )\n """\n check.opt_inst_param(ancestor, "ancestor", NodeHandle)\n\n return NodeHandle.from_path([*(ancestor.path if ancestor else []), *self.path])\n\n @staticmethod\n def from_path(path: Sequence[str]) -> "NodeHandle":\n check.sequence_param(path, "path", of_type=str)\n\n cur: Optional["NodeHandle"] = None\n _path = list(path)\n while len(_path) > 0:\n cur = NodeHandle(name=_path.pop(0), parent=cur)\n\n if cur is None:\n check.failed(f"Invalid handle path {path}")\n\n return cur\n\n @staticmethod\n def from_string(handle_str: str) -> "NodeHandle":\n check.str_param(handle_str, "handle_str")\n\n path = handle_str.split(".")\n return NodeHandle.from_path(path)\n\n @classmethod\n def from_dict(cls, dict_repr: Mapping[str, Any]) -> "NodeHandle":\n """This method makes it possible to load a potentially nested NodeHandle after a\n roundtrip through json.loads(json.dumps(NodeHandle._asdict())).\n """\n check.dict_param(dict_repr, "dict_repr", key_type=str)\n check.invariant(\n "name" in dict_repr, "Dict representation of NodeHandle must have a 'name' key"\n )\n check.invariant(\n "parent" in dict_repr, "Dict representation of NodeHandle must have a 'parent' key"\n )\n\n if isinstance(dict_repr["parent"], (list, tuple)):\n parent = NodeHandle.from_dict(\n {\n "name": dict_repr["parent"][0],\n "parent": dict_repr["parent"][1],\n }\n )\n else:\n parent = dict_repr["parent"]\n\n return NodeHandle(name=dict_repr["name"], parent=parent)\n\n\nclass NodeInputHandle(\n NamedTuple("_NodeInputHandle", [("node_handle", NodeHandle), ("input_name", str)])\n):\n """A structured object to uniquely identify inputs in the potentially recursive graph structure."""\n\n\nclass NodeOutputHandle(\n NamedTuple("_NodeOutputHandle", [("node_handle", NodeHandle), ("output_name", str)])\n):\n """A structured object to uniquely identify outputs in the potentially recursive graph structure."""\n\n\nclass NodeInput(NamedTuple("_NodeInput", [("node", Node), ("input_def", InputDefinition)])):\n def __new__(cls, node: Node, input_def: InputDefinition):\n return super(NodeInput, cls).__new__(\n cls,\n check.inst_param(node, "node", Node),\n check.inst_param(input_def, "input_def", InputDefinition),\n )\n\n def _inner_str(self) -> str:\n return struct_to_string(\n "NodeInput",\n node_name=self.node.name,\n input_name=self.input_def.name,\n )\n\n def __str__(self):\n return self._inner_str()\n\n def __repr__(self):\n return self._inner_str()\n\n def __hash__(self):\n return hash((self.node.name, self.input_def.name))\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, NodeInput)\n and self.node.name == other.node.name\n and self.input_def.name == other.input_def.name\n )\n\n @property\n def node_name(self) -> str:\n return self.node.name\n\n @property\n def input_name(self) -> str:\n return self.input_def.name\n\n\nclass NodeOutput(NamedTuple("_NodeOutput", [("node", Node), ("output_def", OutputDefinition)])):\n def __new__(cls, node: Node, output_def: OutputDefinition):\n return super(NodeOutput, cls).__new__(\n cls,\n check.inst_param(node, "node", Node),\n check.inst_param(output_def, "output_def", OutputDefinition),\n )\n\n def _inner_str(self) -> str:\n return struct_to_string(\n "NodeOutput",\n node_name=self.node.name,\n output_name=self.output_def.name,\n )\n\n def __str__(self):\n return self._inner_str()\n\n def __repr__(self):\n return self._inner_str()\n\n def __hash__(self) -> int:\n return hash((self.node.name, self.output_def.name))\n\n def __eq__(self, other: Any) -> bool:\n return self.node.name == other.node.name and self.output_def.name == other.output_def.name\n\n def describe(self) -> str:\n return f"{self.node_name}:{self.output_def.name}"\n\n @property\n def node_name(self) -> str:\n return self.node.name\n\n @property\n def is_dynamic(self) -> bool:\n return self.output_def.is_dynamic\n\n @property\n def output_name(self) -> str:\n return self.output_def.name\n\n\nclass DependencyType(Enum):\n DIRECT = "DIRECT"\n FAN_IN = "FAN_IN"\n DYNAMIC_COLLECT = "DYNAMIC_COLLECT"\n\n\nclass IDependencyDefinition(ABC):\n @abstractmethod\n def get_node_dependencies(self) -> Sequence["DependencyDefinition"]:\n pass\n\n @abstractmethod\n def is_fan_in(self) -> bool:\n """The result passed to the corresponding input will be a List made from different node outputs."""\n\n\n
[docs]class DependencyDefinition(\n NamedTuple(\n "_DependencyDefinition", [("node", str), ("output", str), ("description", Optional[str])]\n ),\n IDependencyDefinition,\n):\n """Represents an edge in the DAG of nodes (ops or graphs) forming a job.\n\n This object is used at the leaves of a dictionary structure that represents the complete\n dependency structure of a job whose keys represent the dependent node and dependent\n input, so this object only contains information about the dependee.\n\n Concretely, if the input named 'input' of op_b depends on the output named 'result' of\n op_a, and the output named 'other_result' of graph_a, the structure will look as follows:\n\n .. code-block:: python\n\n dependency_structure = {\n 'my_downstream_op': {\n 'input': DependencyDefinition('my_upstream_op', 'result')\n }\n 'my_downstream_op': {\n 'input': DependencyDefinition('my_upstream_graph', 'result')\n }\n }\n\n In general, users should prefer not to construct this class directly or use the\n :py:class:`JobDefinition` API that requires instances of this class. Instead, use the\n :py:func:`@job <job>` API:\n\n .. code-block:: python\n\n @job\n def the_job():\n node_b(node_a())\n\n\n Args:\n node (str): The name of the node (op or graph) that is depended on, that is, from which the value\n passed between the two nodes originates.\n output (Optional[str]): The name of the output that is depended on. (default: "result")\n description (Optional[str]): Human-readable description of this dependency.\n """\n\n def __new__(\n cls,\n node: str,\n output: str = DEFAULT_OUTPUT,\n description: Optional[str] = None,\n ):\n return super(DependencyDefinition, cls).__new__(\n cls,\n check.str_param(node, "node"),\n check.str_param(output, "output"),\n check.opt_str_param(description, "description"),\n )\n\n def get_node_dependencies(self) -> Sequence["DependencyDefinition"]:\n return [self]\n\n
[docs] @public\n def is_fan_in(self) -> bool:\n """Return True if the dependency is fan-in (always False for DependencyDefinition)."""\n return False
\n\n def get_op_dependencies(self) -> Sequence["DependencyDefinition"]:\n return [self]
\n\n\n
[docs]class MultiDependencyDefinition(\n NamedTuple(\n "_MultiDependencyDefinition",\n [\n (\n "dependencies",\n PublicAttr[Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]]],\n )\n ],\n ),\n IDependencyDefinition,\n):\n """Represents a fan-in edge in the DAG of op instances forming a job.\n\n This object is used only when an input of type ``List[T]`` is assembled by fanning-in multiple\n upstream outputs of type ``T``.\n\n This object is used at the leaves of a dictionary structure that represents the complete\n dependency structure of a job whose keys represent the dependent ops or graphs and dependent\n input, so this object only contains information about the dependee.\n\n Concretely, if the input named 'input' of op_c depends on the outputs named 'result' of\n op_a and op_b, this structure will look as follows:\n\n .. code-block:: python\n\n dependency_structure = {\n 'op_c': {\n 'input': MultiDependencyDefinition(\n [\n DependencyDefinition('op_a', 'result'),\n DependencyDefinition('op_b', 'result')\n ]\n )\n }\n }\n\n In general, users should prefer not to construct this class directly or use the\n :py:class:`JobDefinition` API that requires instances of this class. Instead, use the\n :py:func:`@job <job>` API:\n\n .. code-block:: python\n\n @job\n def the_job():\n op_c(op_a(), op_b())\n\n Args:\n dependencies (List[Union[DependencyDefinition, Type[MappedInputPlaceHolder]]]): List of\n upstream dependencies fanned in to this input.\n """\n\n def __new__(\n cls,\n dependencies: Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]],\n ):\n from .composition import MappedInputPlaceholder\n\n deps = check.sequence_param(dependencies, "dependencies")\n seen = {}\n for dep in deps:\n if isinstance(dep, DependencyDefinition):\n key = dep.node + ":" + dep.output\n if key in seen:\n raise DagsterInvalidDefinitionError(\n f'Duplicate dependencies on node "{dep.node}" output "{dep.output}" '\n "used in the same MultiDependencyDefinition."\n )\n seen[key] = True\n elif dep is MappedInputPlaceholder:\n pass\n else:\n check.failed(f"Unexpected dependencies entry {dep}")\n\n return super(MultiDependencyDefinition, cls).__new__(cls, deps)\n\n
[docs] @public\n def get_node_dependencies(self) -> Sequence[DependencyDefinition]:\n """Return the list of :py:class:`DependencyDefinition` contained by this object."""\n return [dep for dep in self.dependencies if isinstance(dep, DependencyDefinition)]
\n\n
[docs] @public\n def is_fan_in(self) -> bool:\n """Return `True` if the dependency is fan-in (always True for MultiDependencyDefinition)."""\n return True
\n\n
[docs] @public\n def get_dependencies_and_mappings(\n self,\n ) -> Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]]:\n """Return the combined list of dependencies contained by this object, inculding of :py:class:`DependencyDefinition` and :py:class:`MappedInputPlaceholder` objects."""\n return self.dependencies
\n\n\nclass BlockingAssetChecksDependencyDefinition(\n IDependencyDefinition,\n NamedTuple(\n "_BlockingAssetChecksDependencyDefinition",\n [\n (\n "asset_check_dependencies",\n Sequence[DependencyDefinition],\n ),\n ("other_dependency", Optional[DependencyDefinition]),\n ],\n ),\n):\n """An input that depends on a set of outputs that correspond to upstream asset checks, and also\n optionally depends on a single upstream output that does not correspond to an asset check.\n\n We model this with a different kind of DependencyDefinition than MultiDependencyDefinition,\n because we treat the value that's passed to the input parameter differently: we ignore the asset\n check dependencies and only pass a single value, instead of a fanned-in list.\n """\n\n @public\n def get_node_dependencies(self) -> Sequence[DependencyDefinition]:\n """Return the list of :py:class:`DependencyDefinition` contained by this object."""\n if self.other_dependency:\n return [*self.asset_check_dependencies, self.other_dependency]\n else:\n return self.asset_check_dependencies\n\n @public\n def is_fan_in(self) -> bool:\n return False\n\n @public\n def get_dependencies_and_mappings(\n self,\n ) -> Sequence[Union[DependencyDefinition, Type["MappedInputPlaceholder"]]]:\n return self.get_node_dependencies()\n\n\nclass DynamicCollectDependencyDefinition(\n NamedTuple("_DynamicCollectDependencyDefinition", [("node_name", str), ("output_name", str)]),\n IDependencyDefinition,\n):\n def get_node_dependencies(self) -> Sequence[DependencyDefinition]:\n return [DependencyDefinition(self.node_name, self.output_name)]\n\n def is_fan_in(self) -> bool:\n return True\n\n\nDepTypeAndOutputs: TypeAlias = Tuple[\n DependencyType,\n Union[NodeOutput, List[Union[NodeOutput, Type["MappedInputPlaceholder"]]]],\n]\n\nInputToOutputMap: TypeAlias = Dict[NodeInput, DepTypeAndOutputs]\n\n\ndef _create_handle_dict(\n node_dict: Mapping[str, Node],\n dep_dict: DependencyMapping[str],\n) -> InputToOutputMap:\n from .composition import MappedInputPlaceholder\n\n check.mapping_param(node_dict, "node_dict", key_type=str, value_type=Node)\n check.two_dim_mapping_param(dep_dict, "dep_dict", value_type=IDependencyDefinition)\n\n handle_dict: InputToOutputMap = {}\n\n for node_name, input_dict in dep_dict.items():\n from_node = node_dict[node_name]\n for input_name, dep_def in input_dict.items():\n if isinstance(\n dep_def, (MultiDependencyDefinition, BlockingAssetChecksDependencyDefinition)\n ):\n handles: List[Union[NodeOutput, Type[MappedInputPlaceholder]]] = []\n for inner_dep in dep_def.get_dependencies_and_mappings():\n if isinstance(inner_dep, DependencyDefinition):\n handles.append(node_dict[inner_dep.node].get_output(inner_dep.output))\n elif inner_dep is MappedInputPlaceholder:\n handles.append(inner_dep)\n else:\n check.failed(\n f"Unexpected MultiDependencyDefinition dependencies type {inner_dep}"\n )\n\n handle_dict[from_node.get_input(input_name)] = (DependencyType.FAN_IN, handles)\n\n elif isinstance(dep_def, DependencyDefinition):\n handle_dict[from_node.get_input(input_name)] = (\n DependencyType.DIRECT,\n node_dict[dep_def.node].get_output(dep_def.output),\n )\n elif isinstance(dep_def, DynamicCollectDependencyDefinition):\n handle_dict[from_node.get_input(input_name)] = (\n DependencyType.DYNAMIC_COLLECT,\n node_dict[dep_def.node_name].get_output(dep_def.output_name),\n )\n\n else:\n check.failed(f"Unknown dependency type {dep_def}")\n\n return handle_dict\n\n\nclass DependencyStructure:\n @staticmethod\n def from_definitions(\n nodes: Mapping[str, Node], dep_dict: DependencyMapping[str]\n ) -> "DependencyStructure":\n return DependencyStructure(\n list(dep_dict.keys()),\n _create_handle_dict(nodes, dep_dict),\n dep_dict,\n )\n\n _node_input_index: DefaultDict[str, Dict[NodeInput, List[NodeOutput]]]\n _node_output_index: Dict[str, DefaultDict[NodeOutput, List[NodeInput]]]\n _dynamic_fan_out_index: Dict[str, NodeOutput]\n _collect_index: Dict[str, Set[NodeOutput]]\n _deps_by_node_name: DependencyMapping[str]\n\n def __init__(\n self,\n node_names: Sequence[str],\n input_to_output_map: InputToOutputMap,\n deps_by_node_name: DependencyMapping[str],\n ):\n self._node_names = node_names\n self._input_to_output_map = input_to_output_map\n self._deps_by_node_name = deps_by_node_name\n\n # Building up a couple indexes here so that one can look up all the upstream output handles\n # or downstream input handles in O(1). Without this, this can become O(N^2) where N is node\n # count during the GraphQL query in particular\n\n # node_name => input_handle => list[output_handle]\n self._node_input_index = defaultdict(dict)\n\n # node_name => output_handle => list[input_handle]\n self._node_output_index = defaultdict(lambda: defaultdict(list))\n\n # node_name => dynamic output_handle that this node will dupe for\n self._dynamic_fan_out_index = {}\n\n # node_name => set of dynamic output_handle this collects over\n self._collect_index = defaultdict(set)\n\n for node_input, (dep_type, node_output_or_list) in self._input_to_output_map.items():\n if dep_type == DependencyType.FAN_IN:\n node_output_list: List[NodeOutput] = []\n for node_output in node_output_or_list:\n if not isinstance(node_output, NodeOutput):\n continue\n\n if node_output.is_dynamic:\n raise DagsterInvalidDefinitionError(\n "Currently, items in a fan-in dependency cannot be downstream of"\n " dynamic outputs. Problematic dependency on dynamic output"\n f' "{node_output.describe()}".'\n )\n if self._dynamic_fan_out_index.get(node_output.node_name):\n raise DagsterInvalidDefinitionError(\n "Currently, items in a fan-in dependency cannot be downstream of"\n " dynamic outputs. Problematic dependency on output"\n f' "{node_output.describe()}", downstream of'\n f' "{self._dynamic_fan_out_index[node_output.node_name].describe()}".'\n )\n\n node_output_list.append(node_output)\n elif dep_type == DependencyType.DIRECT:\n node_output = cast(NodeOutput, node_output_or_list)\n\n if node_output.is_dynamic:\n self._validate_and_set_fan_out(node_input, node_output)\n\n if self._dynamic_fan_out_index.get(node_output.node_name):\n self._validate_and_set_fan_out(\n node_input, self._dynamic_fan_out_index[node_output.node_name]\n )\n\n node_output_list = [node_output]\n elif dep_type == DependencyType.DYNAMIC_COLLECT:\n node_output = cast(NodeOutput, node_output_or_list)\n\n if node_output.is_dynamic:\n self._validate_and_set_collect(node_input, node_output)\n\n elif self._dynamic_fan_out_index.get(node_output.node_name):\n self._validate_and_set_collect(\n node_input,\n self._dynamic_fan_out_index[node_output.node_name],\n )\n else:\n check.failed(\n f"Unexpected dynamic fan in dep created {node_output} -> {node_input}"\n )\n\n node_output_list = [node_output]\n else:\n check.failed(f"Unexpected dep type {dep_type}")\n\n self._node_input_index[node_input.node.name][node_input] = node_output_list\n for node_output in node_output_list:\n self._node_output_index[node_output.node.name][node_output].append(node_input)\n\n def _validate_and_set_fan_out(self, node_input: NodeInput, node_output: NodeOutput) -> None:\n """Helper function for populating _dynamic_fan_out_index."""\n if not node_input.node.definition.input_supports_dynamic_output_dep(node_input.input_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be downstream of dynamic output"\n f' "{node_output.describe()}" since input "{node_input.input_name}" maps to a'\n " node that is already downstream of another dynamic output. Nodes cannot be"\n " downstream of more than one dynamic output"\n )\n\n if self._collect_index.get(node_input.node_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be both downstream of dynamic output "\n f"{node_output.describe()} and collect over dynamic output "\n f"{next(iter(self._collect_index[node_input.node_name])).describe()}."\n )\n\n if self._dynamic_fan_out_index.get(node_input.node_name) is None:\n self._dynamic_fan_out_index[node_input.node_name] = node_output\n return\n\n if self._dynamic_fan_out_index[node_input.node_name] != node_output:\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be downstream of more than one dynamic"\n f' output. It is downstream of both "{node_output.describe()}" and'\n f' "{self._dynamic_fan_out_index[node_input.node_name].describe()}"'\n )\n\n def _validate_and_set_collect(\n self,\n node_input: NodeInput,\n node_output: NodeOutput,\n ) -> None:\n if self._dynamic_fan_out_index.get(node_input.node_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot both collect over dynamic output "\n f"{node_output.describe()} and be downstream of the dynamic output "\n f"{self._dynamic_fan_out_index[node_input.node_name].describe()}."\n )\n\n self._collect_index[node_input.node_name].add(node_output)\n\n # if the output is already fanned out\n if self._dynamic_fan_out_index.get(node_output.node_name):\n raise DagsterInvalidDefinitionError(\n f"{node_input.node.describe_node()} cannot be downstream of more than one dynamic"\n f' output. It is downstream of both "{node_output.describe()}" and'\n f' "{self._dynamic_fan_out_index[node_output.node_name].describe()}"'\n )\n\n def all_upstream_outputs_from_node(self, node_name: str) -> Sequence[NodeOutput]:\n check.str_param(node_name, "node_name")\n\n # flatten out all outputs that feed into the inputs of this node\n return [\n output_handle\n for output_handle_list in self._node_input_index[node_name].values()\n for output_handle in output_handle_list\n ]\n\n def input_to_upstream_outputs_for_node(\n self, node_name: str\n ) -> Mapping[NodeInput, Sequence[NodeOutput]]:\n """Returns a Dict[NodeInput, List[NodeOutput]] that encodes\n where all the the inputs are sourced from upstream. Usually the\n List[NodeOutput] will be a list of one, except for the\n multi-dependency case.\n """\n check.str_param(node_name, "node_name")\n return self._node_input_index[node_name]\n\n def output_to_downstream_inputs_for_node(\n self, node_name: str\n ) -> Mapping[NodeOutput, Sequence[NodeInput]]:\n """Returns a Dict[NodeOutput, List[NodeInput]] that\n represents all the downstream inputs for each output in the\n dictionary.\n """\n check.str_param(node_name, "node_name")\n return self._node_output_index[node_name]\n\n def has_direct_dep(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n if node_input not in self._input_to_output_map:\n return False\n dep_type, _ = self._input_to_output_map[node_input]\n return dep_type == DependencyType.DIRECT\n\n def get_direct_dep(self, node_input: NodeInput) -> NodeOutput:\n check.inst_param(node_input, "node_input", NodeInput)\n dep_type, dep = self._input_to_output_map[node_input]\n check.invariant(\n dep_type == DependencyType.DIRECT,\n f"Cannot call get_direct_dep when dep is not singular, got {dep_type}",\n )\n return cast(NodeOutput, dep)\n\n def get_dependency_definition(self, node_input: NodeInput) -> Optional[IDependencyDefinition]:\n return self._deps_by_node_name[node_input.node_name].get(node_input.input_name)\n\n def has_fan_in_deps(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n if node_input not in self._input_to_output_map:\n return False\n dep_type, _ = self._input_to_output_map[node_input]\n return dep_type == DependencyType.FAN_IN\n\n def get_fan_in_deps(\n self, node_input: NodeInput\n ) -> Sequence[Union[NodeOutput, Type["MappedInputPlaceholder"]]]:\n check.inst_param(node_input, "node_input", NodeInput)\n dep_type, deps = self._input_to_output_map[node_input]\n check.invariant(\n dep_type == DependencyType.FAN_IN,\n f"Cannot call get_multi_dep when dep is not fan in, got {dep_type}",\n )\n return cast(List[Union[NodeOutput, Type["MappedInputPlaceholder"]]], deps)\n\n def has_dynamic_fan_in_dep(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n if node_input not in self._input_to_output_map:\n return False\n dep_type, _ = self._input_to_output_map[node_input]\n return dep_type == DependencyType.DYNAMIC_COLLECT\n\n def get_dynamic_fan_in_dep(self, node_input: NodeInput) -> NodeOutput:\n check.inst_param(node_input, "node_input", NodeInput)\n dep_type, dep = self._input_to_output_map[node_input]\n check.invariant(\n dep_type == DependencyType.DYNAMIC_COLLECT,\n f"Cannot call get_dynamic_fan_in_dep when dep is not, got {dep_type}",\n )\n return cast(NodeOutput, dep)\n\n def has_deps(self, node_input: NodeInput) -> bool:\n check.inst_param(node_input, "node_input", NodeInput)\n return node_input in self._input_to_output_map\n\n def get_deps_list(self, node_input: NodeInput) -> Sequence[NodeOutput]:\n check.inst_param(node_input, "node_input", NodeInput)\n check.invariant(self.has_deps(node_input))\n dep_type, handle_or_list = self._input_to_output_map[node_input]\n if dep_type == DependencyType.DIRECT:\n return [cast(NodeOutput, handle_or_list)]\n elif dep_type == DependencyType.DYNAMIC_COLLECT:\n return [cast(NodeOutput, handle_or_list)]\n elif dep_type == DependencyType.FAN_IN:\n return [handle for handle in handle_or_list if isinstance(handle, NodeOutput)]\n else:\n check.failed(f"Unexpected dep type {dep_type}")\n\n def inputs(self) -> Sequence[NodeInput]:\n return list(self._input_to_output_map.keys())\n\n def get_upstream_dynamic_output_for_node(self, node_name: str) -> Optional[NodeOutput]:\n return self._dynamic_fan_out_index.get(node_name)\n\n def get_dependency_type(self, node_input: NodeInput) -> Optional[DependencyType]:\n result = self._input_to_output_map.get(node_input)\n if result is None:\n return None\n dep_type, _ = result\n return dep_type\n\n def is_dynamic_mapped(self, node_name: str) -> bool:\n return node_name in self._dynamic_fan_out_index\n\n def has_dynamic_downstreams(self, node_name: str) -> bool:\n for node_output in self._dynamic_fan_out_index.values():\n if node_output.node_name == node_name:\n return True\n\n return False\n
", "current_page_name": "_modules/dagster/_core/definitions/dependency", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.dependency"}, "events": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.events

\nimport re\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Generic,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    TypeVar,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import PublicAttr, deprecated, experimental_param, public\nfrom dagster._core.definitions.data_version import DATA_VERSION_TAG, DataVersion\nfrom dagster._core.storage.tags import MULTIDIMENSIONAL_PARTITION_PREFIX, SYSTEM_TAG_PREFIX\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._serdes.serdes import NamedTupleSerializer\n\nfrom .metadata import (\n    MetadataFieldSerializer,\n    MetadataMapping,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom .utils import DEFAULT_OUTPUT, check_valid_name\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.assets import AssetsDefinition\n    from dagster._core.definitions.source_asset import SourceAsset\n    from dagster._core.execution.context.output import OutputContext\n\n\nASSET_KEY_SPLIT_REGEX = re.compile("[^a-zA-Z0-9_]")\nASSET_KEY_DELIMITER = "/"\n\n\ndef parse_asset_key_string(s: str) -> Sequence[str]:\n    return list(filter(lambda x: x, re.split(ASSET_KEY_SPLIT_REGEX, s)))\n\n\n
[docs]@whitelist_for_serdes\nclass AssetKey(NamedTuple("_AssetKey", [("path", PublicAttr[Sequence[str]])])):\n """Object representing the structure of an asset key. Takes in a sanitized string, list of\n strings, or tuple of strings.\n\n Example usage:\n\n .. code-block:: python\n\n from dagster import op\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey('flat_asset_key'),\n metadata={"text_metadata": "Text-based metadata for this event"},\n )\n\n @op\n def structured_asset_key(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey(['parent', 'child', 'grandchild']),\n metadata={"text_metadata": "Text-based metadata for this event"},\n )\n\n @op\n def structured_asset_key_2(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey(('parent', 'child', 'grandchild')),\n metadata={"text_metadata": "Text-based metadata for this event"},\n )\n\n Args:\n path (Sequence[str]): String, list of strings, or tuple of strings. A list of strings\n represent the hierarchical structure of the asset_key.\n """\n\n def __new__(cls, path: Sequence[str]):\n if isinstance(path, str):\n path = [path]\n else:\n path = list(check.sequence_param(path, "path", of_type=str))\n\n return super(AssetKey, cls).__new__(cls, path=path)\n\n def __str__(self):\n return f"AssetKey({self.path})"\n\n def __repr__(self):\n return f"AssetKey({self.path})"\n\n def __hash__(self):\n return hash(tuple(self.path))\n\n def __eq__(self, other):\n if not isinstance(other, AssetKey):\n return False\n if len(self.path) != len(other.path):\n return False\n for i in range(0, len(self.path)):\n if self.path[i] != other.path[i]:\n return False\n return True\n\n def to_string(self) -> str:\n """E.g. '["first_component", "second_component"]'."""\n return seven.json.dumps(self.path)\n\n def to_user_string(self) -> str:\n """E.g. "first_component/second_component"."""\n return ASSET_KEY_DELIMITER.join(self.path)\n\n def to_python_identifier(self, suffix: Optional[str] = None) -> str:\n """Build a valid Python identifier based on the asset key that can be used for\n operation names or I/O manager keys.\n """\n path = list(self.path)\n\n if suffix is not None:\n path.append(suffix)\n\n return "__".join(path).replace("-", "_")\n\n @staticmethod\n def from_user_string(asset_key_string: str) -> "AssetKey":\n return AssetKey(asset_key_string.split(ASSET_KEY_DELIMITER))\n\n @staticmethod\n def from_db_string(asset_key_string: Optional[str]) -> Optional["AssetKey"]:\n if not asset_key_string:\n return None\n if asset_key_string[0] == "[":\n # is a json string\n try:\n path = seven.json.loads(asset_key_string)\n except seven.JSONDecodeError:\n path = parse_asset_key_string(asset_key_string)\n else:\n path = parse_asset_key_string(asset_key_string)\n return AssetKey(path)\n\n @staticmethod\n def get_db_prefix(path: Sequence[str]):\n check.sequence_param(path, "path", of_type=str)\n return seven.json.dumps(path)[:-2] # strip trailing '"]' from json string\n\n @staticmethod\n def from_graphql_input(graphql_input_asset_key: Mapping[str, Sequence[str]]) -> "AssetKey":\n return AssetKey(graphql_input_asset_key["path"])\n\n def to_graphql_input(self) -> Mapping[str, Sequence[str]]:\n return {"path": self.path}\n\n @staticmethod\n def from_coercible(arg: "CoercibleToAssetKey") -> "AssetKey":\n if isinstance(arg, AssetKey):\n return check.inst_param(arg, "arg", AssetKey)\n elif isinstance(arg, str):\n return AssetKey([arg])\n elif isinstance(arg, list):\n check.list_param(arg, "arg", of_type=str)\n return AssetKey(arg)\n elif isinstance(arg, tuple):\n check.tuple_param(arg, "arg", of_type=str)\n return AssetKey(arg)\n else:\n check.failed(f"Unexpected type for AssetKey: {type(arg)}")\n\n @staticmethod\n def from_coercible_or_definition(\n arg: Union["CoercibleToAssetKey", "AssetsDefinition", "SourceAsset"]\n ) -> "AssetKey":\n from dagster._core.definitions.assets import AssetsDefinition\n from dagster._core.definitions.source_asset import SourceAsset\n\n if isinstance(arg, AssetsDefinition):\n return arg.key\n elif isinstance(arg, SourceAsset):\n return arg.key\n else:\n return AssetKey.from_coercible(arg)\n\n # @staticmethod\n # def from_coercible_to_asset_dep(arg: "CoercibleToAssetDep") -> "AssetKey":\n # from dagster._core.definitions.asset_dep import AssetDep\n # from dagster._core.definitions.asset_spec import AssetSpec\n # from dagster._core.definitions.assets import AssetsDefinition\n # from dagster._core.definitions.source_asset import SourceAsset\n\n # if isinstance(arg, AssetsDefinition):\n # if len(arg.keys) > 1:\n # # Only AssetsDefinition with a single asset can be passed\n # raise DagsterInvalidDefinitionError(\n # "Cannot pass a multi_asset AssetsDefinition as an argument to deps."\n # " Instead, specify dependencies on the assets created by the multi_asset"\n # f" via AssetKeys or strings. For the multi_asset {arg.node_def.name}, the"\n # f" available keys are: {arg.keys}."\n # )\n # return arg.key\n # elif isinstance(arg, SourceAsset):\n # return arg.key\n # elif isinstance(arg, AssetDep):\n # return arg.asset_key\n # elif isinstance(arg, AssetSpec):\n # return arg.asset_key\n # else:\n # return AssetKey.from_coercible(arg)\n\n def has_prefix(self, prefix: Sequence[str]) -> bool:\n return len(self.path) >= len(prefix) and self.path[: len(prefix)] == prefix\n\n def with_prefix(self, prefix: "CoercibleToAssetKeyPrefix") -> "AssetKey":\n prefix = key_prefix_from_coercible(prefix)\n return AssetKey(list(prefix) + list(self.path))
\n\n\nclass AssetKeyPartitionKey(NamedTuple):\n """An AssetKey with an (optional) partition key. Refers either to a non-partitioned asset or a\n partition of a partitioned asset.\n """\n\n asset_key: AssetKey\n partition_key: Optional[str] = None\n\n\nCoercibleToAssetKey = Union[AssetKey, str, Sequence[str]]\nCoercibleToAssetKeyPrefix = Union[str, Sequence[str]]\n\n\ndef check_opt_coercible_to_asset_key_prefix_param(\n prefix: Optional[CoercibleToAssetKeyPrefix], param_name: str\n) -> Optional[Sequence[str]]:\n try:\n return key_prefix_from_coercible(prefix) if prefix is not None else None\n except check.CheckError:\n raise check.ParameterCheckError(\n f'Param "{param_name}" is not a string or a sequence of strings'\n )\n\n\ndef key_prefix_from_coercible(key_prefix: CoercibleToAssetKeyPrefix) -> Sequence[str]:\n if isinstance(key_prefix, str):\n return [key_prefix]\n elif isinstance(key_prefix, list):\n return key_prefix\n else:\n check.failed(f"Unexpected type for key_prefix: {type(key_prefix)}")\n\n\nDynamicAssetKey = Callable[["OutputContext"], Optional[AssetKey]]\n\n\n@whitelist_for_serdes\nclass AssetLineageInfo(\n NamedTuple("_AssetLineageInfo", [("asset_key", AssetKey), ("partitions", AbstractSet[str])])\n):\n def __new__(cls, asset_key: AssetKey, partitions: Optional[AbstractSet[str]] = None):\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n partitions = check.opt_set_param(partitions, "partitions", str)\n return super(AssetLineageInfo, cls).__new__(cls, asset_key=asset_key, partitions=partitions)\n\n\nT = TypeVar("T")\n\n\n
[docs]@experimental_param(param="data_version")\nclass Output(Generic[T]):\n """Event corresponding to one of a op's outputs.\n\n Op compute functions must explicitly yield events of this type when they have more than\n one output, or when they also yield events of other types, or when defining a op using the\n :py:class:`OpDefinition` API directly.\n\n Outputs are values produced by ops that will be consumed by downstream ops in a job.\n They are type-checked at op boundaries when their corresponding :py:class:`Out`\n or the downstream :py:class:`In` is typed.\n\n Args:\n value (Any): The value returned by the compute function.\n output_name (Optional[str]): Name of the corresponding out. (default:\n "result")\n metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n data_version (Optional[DataVersion]): (Experimental) A data version to manually set\n for the asset.\n """\n\n def __init__(\n self,\n value: T,\n output_name: Optional[str] = DEFAULT_OUTPUT,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n data_version: Optional[DataVersion] = None,\n ):\n self._value = value\n self._output_name = check.str_param(output_name, "output_name")\n self._data_version = check.opt_inst_param(data_version, "data_version", DataVersion)\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n @property\n def metadata(self) -> MetadataMapping:\n return self._metadata\n\n @public\n @property\n def value(self) -> Any:\n """Any: The value returned by the compute function."""\n return self._value\n\n @public\n @property\n def output_name(self) -> str:\n """str: Name of the corresponding :py:class:`Out`."""\n return self._output_name\n\n @public\n @property\n def data_version(self) -> Optional[DataVersion]:\n """Optional[DataVersion]: A data version that was manually set on the `Output`."""\n return self._data_version\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, Output)\n and self.value == other.value\n and self.output_name == other.output_name\n and self.metadata == other.metadata\n )
\n\n\n
[docs]class DynamicOutput(Generic[T]):\n """Variant of :py:class:`Output <dagster.Output>` used to support\n dynamic mapping & collect. Each ``DynamicOutput`` produced by an op represents\n one item in a set that can be processed individually with ``map`` or gathered\n with ``collect``.\n\n Each ``DynamicOutput`` must have a unique ``mapping_key`` to distinguish it with it's set.\n\n Args:\n value (Any):\n The value returned by the compute function.\n mapping_key (str):\n The key that uniquely identifies this dynamic value relative to its peers.\n This key will be used to identify the downstream ops when mapped, ie\n ``mapped_op[example_mapping_key]``\n output_name (Optional[str]):\n Name of the corresponding :py:class:`DynamicOut` defined on the op.\n (default: "result")\n metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __init__(\n self,\n value: T,\n mapping_key: str,\n output_name: Optional[str] = DEFAULT_OUTPUT,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n ):\n self._mapping_key = check_valid_name(check.str_param(mapping_key, "mapping_key"))\n self._output_name = check.str_param(output_name, "output_name")\n self._value = value\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n @property\n def metadata(self) -> Mapping[str, MetadataValue]:\n return self._metadata\n\n @public\n @property\n def mapping_key(self) -> str:\n """The mapping_key that was set for this DynamicOutput at instantiation."""\n return self._mapping_key\n\n @public\n @property\n def value(self) -> T:\n """The value that is returned by the compute function for this DynamicOut."""\n return self._value\n\n @public\n @property\n def output_name(self) -> str:\n """Name of the :py:class:`DynamicOut` defined on the op that this DynamicOut is associated with."""\n return self._output_name\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, DynamicOutput)\n and self.value == other.value\n and self.output_name == other.output_name\n and self.mapping_key == other.mapping_key\n and self.metadata == other.metadata\n )
\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass AssetObservation(\n NamedTuple(\n "_AssetObservation",\n [\n ("asset_key", PublicAttr[AssetKey]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ("partition", PublicAttr[Optional[str]]),\n ("tags", PublicAttr[Mapping[str, str]]),\n ],\n )\n):\n """Event that captures metadata about an asset at a point in time.\n\n Args:\n asset_key (Union[str, List[str], AssetKey]): A key to identify the asset.\n partition (Optional[str]): The name of a partition of the asset that the metadata\n corresponds to.\n tags (Optional[Mapping[str, str]]): A mapping containing system-populated tags for the\n observation. Users should not pass values into this argument.\n metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):\n Arbitrary metadata about the asset. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n asset_key: CoercibleToAssetKey,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n partition: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n ):\n if isinstance(asset_key, AssetKey):\n check.inst_param(asset_key, "asset_key", AssetKey)\n elif isinstance(asset_key, str):\n asset_key = AssetKey(parse_asset_key_string(asset_key))\n else:\n check.sequence_param(asset_key, "asset_key", of_type=str)\n asset_key = AssetKey(asset_key)\n\n tags = check.opt_mapping_param(tags, "tags", key_type=str, value_type=str)\n if any([not tag.startswith(SYSTEM_TAG_PREFIX) for tag in tags or {}]):\n check.failed(\n "Users should not pass values into the tags argument for AssetMaterializations. "\n "The tags argument is reserved for system-populated tags."\n )\n\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n return super(AssetObservation, cls).__new__(\n cls,\n asset_key=asset_key,\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n tags=tags,\n partition=check.opt_str_param(partition, "partition"),\n )\n\n @property\n def label(self) -> str:\n return " ".join(self.asset_key.path)\n\n @property\n def data_version(self) -> Optional[str]:\n return self.tags.get(DATA_VERSION_TAG)\n\n\nUNDEFINED_ASSET_KEY_PATH = ["__undefined__"]\n\n\nclass AssetMaterializationSerializer(NamedTupleSerializer):\n # There are old `Materialization` objects in storage. We set the default value for asset key to\n # be `AssetKey(["__undefined__"])` to ensure that we can load these objects, without needing to\n # allow for the construction of new `AssetMaterialization` objects with no defined AssetKey.\n def before_unpack(self, context, unpacked_dict: Any) -> Any:\n # cover both the case where "asset_key" is not present at all and where it is None\n if unpacked_dict.get("asset_key") is None:\n unpacked_dict["asset_key"] = AssetKey(UNDEFINED_ASSET_KEY_PATH)\n return unpacked_dict\n\n\n
[docs]@whitelist_for_serdes(\n old_storage_names={"Materialization"},\n serializer=AssetMaterializationSerializer,\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass AssetMaterialization(\n NamedTuple(\n "_AssetMaterialization",\n [\n ("asset_key", PublicAttr[AssetKey]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ("partition", PublicAttr[Optional[str]]),\n ("tags", Optional[Mapping[str, str]]),\n ],\n )\n):\n """Event indicating that an op has materialized an asset.\n\n Op compute functions may yield events of this type whenever they wish to indicate to the\n Dagster framework (and the end user) that they have produced a materialized value as a\n side effect of computation. Unlike outputs, asset materializations can not be passed to other\n ops, and their persistence is controlled by op logic, rather than by the Dagster\n framework.\n\n Op authors should use these events to organize metadata about the side effects of their\n computations, enabling tooling like the Assets dashboard in the Dagster UI.\n\n Args:\n asset_key (Union[str, List[str], AssetKey]): A key to identify the materialized asset across\n job runs\n description (Optional[str]): A longer human-readable description of the materialized value.\n partition (Optional[str]): The name of the partition\n that was materialized.\n tags (Optional[Mapping[str, str]]): A mapping containing system-populated tags for the\n materialization. Users should not pass values into this argument.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the asset. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n asset_key: CoercibleToAssetKey,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n partition: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n ):\n from dagster._core.definitions.multi_dimensional_partitions import MultiPartitionKey\n\n if isinstance(asset_key, AssetKey):\n check.inst_param(asset_key, "asset_key", AssetKey)\n elif isinstance(asset_key, str):\n asset_key = AssetKey(parse_asset_key_string(asset_key))\n else:\n check.sequence_param(asset_key, "asset_key", of_type=str)\n asset_key = AssetKey(asset_key)\n\n check.opt_mapping_param(tags, "tags", key_type=str, value_type=str)\n invalid_tags = [tag for tag in tags or {} if not tag.startswith(SYSTEM_TAG_PREFIX)]\n if len(invalid_tags) > 0:\n check.failed(\n f"Invalid tags: {tags} Users should not pass values into the tags argument for"\n " AssetMaterializations. The tags argument is reserved for system-populated tags."\n )\n\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n partition = check.opt_str_param(partition, "partition")\n\n if not isinstance(partition, MultiPartitionKey):\n # When event log records are unpacked from storage, cast the partition key as a\n # MultiPartitionKey if multi-dimensional partition tags exist\n multi_dimensional_partitions = {\n dimension[len(MULTIDIMENSIONAL_PARTITION_PREFIX) :]: partition_key\n for dimension, partition_key in (tags or {}).items()\n if dimension.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX)\n }\n if multi_dimensional_partitions:\n partition = MultiPartitionKey(multi_dimensional_partitions)\n\n return super(AssetMaterialization, cls).__new__(\n cls,\n asset_key=asset_key,\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n tags=tags,\n partition=partition,\n )\n\n @property\n def label(self) -> str:\n return " ".join(self.asset_key.path)\n\n
[docs] @public\n @staticmethod\n def file(\n path: str,\n description: Optional[str] = None,\n asset_key: Optional[Union[str, Sequence[str], AssetKey]] = None,\n ) -> "AssetMaterialization":\n """Static constructor for standard materializations corresponding to files on disk.\n\n Args:\n path (str): The path to the file.\n description (Optional[str]): A human-readable description of the materialization.\n """\n if not asset_key:\n asset_key = path\n\n return AssetMaterialization(\n asset_key=cast(Union[str, AssetKey, List[str]], asset_key),\n description=description,\n metadata={"path": MetadataValue.path(path)},\n )
\n\n\n
[docs]@deprecated(\n breaking_version="1.7",\n additional_warn_text="Please use AssetCheckResult and @asset_check instead.",\n)\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass ExpectationResult(\n NamedTuple(\n "_ExpectationResult",\n [\n ("success", PublicAttr[bool]),\n ("label", PublicAttr[Optional[str]]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ],\n )\n):\n """Event corresponding to a data quality test.\n\n Op compute functions may yield events of this type whenever they wish to indicate to the\n Dagster framework (and the end user) that a data quality test has produced a (positive or\n negative) result.\n\n Args:\n success (bool): Whether the expectation passed or not.\n label (Optional[str]): Short display name for expectation. Defaults to "result".\n description (Optional[str]): A longer human-readable description of the expectation.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n success: bool,\n label: Optional[str] = None,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n ):\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n return super(ExpectationResult, cls).__new__(\n cls,\n success=check.bool_param(success, "success"),\n label=check.opt_str_param(label, "label", "result"),\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n )
\n\n\n
[docs]@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\n@whitelist_for_serdes\nclass TypeCheck(\n NamedTuple(\n "_TypeCheck",\n [\n ("success", PublicAttr[bool]),\n ("description", PublicAttr[Optional[str]]),\n ("metadata", PublicAttr[Mapping[str, MetadataValue]]),\n ],\n )\n):\n """Event corresponding to a successful typecheck.\n\n Events of this type should be returned by user-defined type checks when they need to encapsulate\n additional metadata about a type check's success or failure. (i.e., when using\n :py:func:`as_dagster_type`, :py:func:`@usable_as_dagster_type <dagster_type>`, or the underlying\n :py:func:`PythonObjectDagsterType` API.)\n\n Op compute functions should generally avoid yielding events of this type to avoid confusion.\n\n Args:\n success (bool): ``True`` if the type check succeeded, ``False`` otherwise.\n description (Optional[str]): A human-readable description of the type check.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n """\n\n def __new__(\n cls,\n success: bool,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n ):\n normed_metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n return super(TypeCheck, cls).__new__(\n cls,\n success=check.bool_param(success, "success"),\n description=check.opt_str_param(description, "description"),\n metadata=normed_metadata,\n )
\n\n\n
[docs]class Failure(Exception):\n """Event indicating op failure.\n\n Raise events of this type from within op compute functions or custom type checks in order to\n indicate an unrecoverable failure in user code to the Dagster machinery and return\n structured metadata about the failure.\n\n Args:\n description (Optional[str]): A human-readable description of the failure.\n metadata (Optional[Dict[str, RawMetadataValue]]):\n Arbitrary metadata about the failure. Keys are displayed string labels, and values are\n one of the following: string, float, int, JSON-serializable dict, JSON-serializable\n list, and one of the data classes returned by a MetadataValue static method.\n allow_retries (Optional[bool]):\n Whether this Failure should respect the retry policy or bypass it and immediately fail.\n Defaults to True, respecting the retry policy and allowing retries.\n """\n\n def __init__(\n self,\n description: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n allow_retries: Optional[bool] = None,\n ):\n super(Failure, self).__init__(description)\n self.description = check.opt_str_param(description, "description")\n self.metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n self.allow_retries = check.opt_bool_param(allow_retries, "allow_retries", True)
\n\n\n
[docs]class RetryRequested(Exception):\n """An exception to raise from an op to indicate that it should be retried.\n\n Args:\n max_retries (Optional[int]):\n The max number of retries this step should attempt before failing\n seconds_to_wait (Optional[Union[float,int]]):\n Seconds to wait before restarting the step after putting the step in\n to the up_for_retry state\n\n Example:\n .. code-block:: python\n\n @op\n def flakes():\n try:\n flakey_operation()\n except Exception as e:\n raise RetryRequested(max_retries=3) from e\n """\n\n def __init__(\n self, max_retries: Optional[int] = 1, seconds_to_wait: Optional[Union[float, int]] = None\n ):\n super(RetryRequested, self).__init__()\n self.max_retries = check.int_param(max_retries, "max_retries")\n self.seconds_to_wait = check.opt_numeric_param(seconds_to_wait, "seconds_to_wait")
\n\n\nclass ObjectStoreOperationType(Enum):\n SET_OBJECT = "SET_OBJECT"\n GET_OBJECT = "GET_OBJECT"\n RM_OBJECT = "RM_OBJECT"\n CP_OBJECT = "CP_OBJECT"\n\n\nclass ObjectStoreOperation(\n NamedTuple(\n "_ObjectStoreOperation",\n [\n ("op", ObjectStoreOperationType),\n ("key", str),\n ("dest_key", Optional[str]),\n ("obj", Any),\n ("serialization_strategy_name", Optional[str]),\n ("object_store_name", Optional[str]),\n ("value_name", Optional[str]),\n ("version", Optional[str]),\n ("mapping_key", Optional[str]),\n ],\n )\n):\n """This event is used internally by Dagster machinery when values are written to and read from\n an ObjectStore.\n\n Users should not import this class or yield events of this type from user code.\n\n Args:\n op (ObjectStoreOperationType): The type of the operation on the object store.\n key (str): The key of the object on which the operation was performed.\n dest_key (Optional[str]): The destination key, if any, to which the object was copied.\n obj (Any): The object, if any, retrieved by the operation.\n serialization_strategy_name (Optional[str]): The name of the serialization strategy, if any,\n employed by the operation\n object_store_name (Optional[str]): The name of the object store that performed the\n operation.\n value_name (Optional[str]): The name of the input/output\n version (Optional[str]): (Experimental) The version of the stored data.\n mapping_key (Optional[str]): The mapping key when a dynamic output is used.\n """\n\n def __new__(\n cls,\n op: ObjectStoreOperationType,\n key: str,\n dest_key: Optional[str] = None,\n obj: Any = None,\n serialization_strategy_name: Optional[str] = None,\n object_store_name: Optional[str] = None,\n value_name: Optional[str] = None,\n version: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ):\n return super(ObjectStoreOperation, cls).__new__(\n cls,\n op=op,\n key=check.str_param(key, "key"),\n dest_key=check.opt_str_param(dest_key, "dest_key"),\n obj=obj,\n serialization_strategy_name=check.opt_str_param(\n serialization_strategy_name, "serialization_strategy_name"\n ),\n object_store_name=check.opt_str_param(object_store_name, "object_store_name"),\n value_name=check.opt_str_param(value_name, "value_name"),\n version=check.opt_str_param(version, "version"),\n mapping_key=check.opt_str_param(mapping_key, "mapping_key"),\n )\n\n @classmethod\n def serializable(cls, inst, **kwargs):\n return cls(\n **dict(\n {\n "op": inst.op.value,\n "key": inst.key,\n "dest_key": inst.dest_key,\n "obj": None,\n "serialization_strategy_name": inst.serialization_strategy_name,\n "object_store_name": inst.object_store_name,\n "value_name": inst.value_name,\n "version": inst.version,\n },\n **kwargs,\n )\n )\n\n\nclass HookExecutionResult(\n NamedTuple("_HookExecutionResult", [("hook_name", str), ("is_skipped", bool)])\n):\n """This event is used internally to indicate the execution result of a hook, e.g. whether the\n user-defined hook function is skipped.\n\n Args:\n hook_name (str): The name of the hook.\n is_skipped (bool): ``False`` if the hook_fn is executed, ``True`` otheriwse.\n """\n\n def __new__(cls, hook_name: str, is_skipped: Optional[bool] = None):\n return super(HookExecutionResult, cls).__new__(\n cls,\n hook_name=check.str_param(hook_name, "hook_name"),\n is_skipped=cast(bool, check.opt_bool_param(is_skipped, "is_skipped", default=False)),\n )\n\n\nUserEvent = Union[AssetMaterialization, AssetObservation, ExpectationResult]\n
", "current_page_name": "_modules/dagster/_core/definitions/events", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.events"}, "executor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.executor_definition

\nfrom enum import Enum as PyEnum\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Mapping, Optional, Sequence, Union, overload\n\nfrom typing_extensions import Self, TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import Int\nfrom dagster._config import Field, Noneable, Selector, UserConfigSchema\nfrom dagster._core.definitions.configurable import (\n    ConfiguredDefinitionConfigSchema,\n    NamedConfigurableDefinition,\n)\nfrom dagster._core.definitions.job_base import IJob\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.errors import DagsterUnmetExecutorRequirementsError\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._core.execution.tags import get_tag_concurrency_limits_config\n\nfrom .definition_config_schema import (\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.executor.base import Executor\n    from dagster._core.executor.in_process import InProcessExecutor\n    from dagster._core.executor.init import InitExecutorContext\n    from dagster._core.executor.multiprocess import MultiprocessExecutor\n    from dagster._core.instance import DagsterInstance\n\n\nclass ExecutorRequirement(PyEnum):\n    """An ExecutorDefinition can include a list of requirements that the system uses to\n    check whether the executor will be able to work for a particular job execution.\n    """\n\n    # The passed in IJob must be reconstructable across process boundaries\n    RECONSTRUCTABLE_PIPELINE = (  # This needs to still exist for folks who may have written their own executor\n        "RECONSTRUCTABLE_PIPELINE"\n    )\n    RECONSTRUCTABLE_JOB = "RECONSTRUCTABLE_PIPELINE"\n\n    # The DagsterInstance must be loadable in a different process\n    NON_EPHEMERAL_INSTANCE = "NON_EPHEMERAL_INSTANCE"\n\n    # Any op outputs on the job must be persisted\n    PERSISTENT_OUTPUTS = "PERSISTENT_OUTPUTS"\n\n\ndef multiple_process_executor_requirements() -> Sequence[ExecutorRequirement]:\n    return [\n        ExecutorRequirement.RECONSTRUCTABLE_JOB,\n        ExecutorRequirement.NON_EPHEMERAL_INSTANCE,\n        ExecutorRequirement.PERSISTENT_OUTPUTS,\n    ]\n\n\nExecutorConfig = Mapping[str, object]\nExecutorCreationFunction: TypeAlias = Callable[["InitExecutorContext"], "Executor"]\nExecutorRequirementsFunction: TypeAlias = Callable[[ExecutorConfig], Sequence[ExecutorRequirement]]\n\n\n
[docs]class ExecutorDefinition(NamedConfigurableDefinition):\n """An executor is responsible for executing the steps of a job.\n\n Args:\n name (str): The name of the executor.\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data\n available in `init_context.executor_config`. If not set, Dagster will accept any config\n provided.\n requirements (Optional[List[ExecutorRequirement]]): Any requirements that must\n be met in order for the executor to be usable for a particular job execution.\n executor_creation_fn(Optional[Callable]): Should accept an :py:class:`InitExecutorContext`\n and return an instance of :py:class:`Executor`\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by the\n executor.\n description (Optional[str]): A description of the executor.\n """\n\n def __init__(\n self,\n name: str,\n config_schema: Optional[UserConfigSchema] = None,\n requirements: Union[\n ExecutorRequirementsFunction, Optional[Sequence[ExecutorRequirement]]\n ] = None,\n executor_creation_fn: Optional[ExecutorCreationFunction] = None,\n description: Optional[str] = None,\n ):\n self._name = check.str_param(name, "name")\n self._requirements_fn: ExecutorRequirementsFunction\n if callable(requirements):\n self._requirements_fn = requirements\n else:\n requirements_lst = check.opt_list_param(\n requirements, "requirements", of_type=ExecutorRequirement\n )\n self._requirements_fn = lambda _: requirements_lst\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._executor_creation_fn = check.opt_callable_param(\n executor_creation_fn, "executor_creation_fn"\n )\n self._description = check.opt_str_param(description, "description")\n\n @public\n @property\n def name(self) -> str:\n """Name of the executor."""\n return self._name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Description of executor, if provided."""\n return self._description\n\n @property\n def config_schema(self) -> IDefinitionConfigSchema:\n return self._config_schema\n\n def get_requirements(\n self, executor_config: Mapping[str, object]\n ) -> Sequence[ExecutorRequirement]:\n return self._requirements_fn(executor_config)\n\n @public\n @property\n def executor_creation_fn(self) -> Optional[ExecutorCreationFunction]:\n """Callable that takes an :py:class:`InitExecutorContext` and returns an instance of\n :py:class:`Executor`.\n """\n return self._executor_creation_fn\n\n def copy_for_configured(self, name, description, config_schema) -> "ExecutorDefinition":\n return ExecutorDefinition(\n name=name,\n config_schema=config_schema, # type: ignore\n executor_creation_fn=self.executor_creation_fn,\n description=description or self.description,\n requirements=self._requirements_fn,\n )\n\n @staticmethod\n def hardcoded_executor(executor: "Executor"):\n return ExecutorDefinition(\n # Executor name was only relevant in the pipeline/solid/mode world, so we\n # can put a dummy value\n name="__executor__",\n executor_creation_fn=lambda _init_context: executor,\n )\n\n # Backcompat: Overrides configured method to provide name as a keyword argument.\n # If no name is provided, the name is pulled off of this ExecutorDefinition.\n
[docs] @public\n def configured(\n self,\n config_or_config_fn: Any,\n name: Optional[str] = None,\n config_schema: Optional[UserConfigSchema] = None,\n description: Optional[str] = None,\n ) -> Self:\n """Wraps this object in an object of the same type that provides configuration to the inner\n object.\n\n Using ``configured`` may result in config values being displayed in\n the Dagster UI, so it is not recommended to use this API with sensitive values,\n such as secrets.\n\n Args:\n config_or_config_fn (Union[Any, Callable[[Any], Any]]): Either (1) Run configuration\n that fully satisfies this object's config schema or (2) A function that accepts run\n configuration and returns run configuration that fully satisfies this object's\n config schema. In the latter case, config_schema must be specified. When\n passing a function, it's easiest to use :py:func:`configured`.\n name (Optional[str]): Name of the new definition. If not provided, the emitted\n definition will inherit the name of the `ExecutorDefinition` upon which this\n function is called.\n config_schema (Optional[ConfigSchema]): If config_or_config_fn is a function, the config\n schema that its input must satisfy. If not set, Dagster will accept any config\n provided.\n description (Optional[str]): Description of the new definition. If not specified,\n inherits the description of the definition being configured.\n\n Returns (ConfigurableDefinition): A configured version of this object.\n """\n name = check.opt_str_param(name, "name")\n\n new_config_schema = ConfiguredDefinitionConfigSchema(\n self, convert_user_facing_definition_config_schema(config_schema), config_or_config_fn\n )\n\n return self.copy_for_configured(name or self.name, description, new_config_schema)
\n\n\n@overload\ndef executor(name: ExecutorCreationFunction) -> ExecutorDefinition: ...\n\n\n@overload\ndef executor(\n name: Optional[str] = ...,\n config_schema: Optional[UserConfigSchema] = ...,\n requirements: Optional[\n Union[ExecutorRequirementsFunction, Sequence[ExecutorRequirement]]\n ] = ...,\n) -> "_ExecutorDecoratorCallable": ...\n\n\n
[docs]def executor(\n name: Union[ExecutorCreationFunction, Optional[str]] = None,\n config_schema: Optional[UserConfigSchema] = None,\n requirements: Optional[\n Union[ExecutorRequirementsFunction, Sequence[ExecutorRequirement]]\n ] = None,\n) -> Union[ExecutorDefinition, "_ExecutorDecoratorCallable"]:\n """Define an executor.\n\n The decorated function should accept an :py:class:`InitExecutorContext` and return an instance\n of :py:class:`Executor`.\n\n Args:\n name (Optional[str]): The name of the executor.\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.executor_config`. If not set, Dagster will accept any config provided for.\n requirements (Optional[List[ExecutorRequirement]]): Any requirements that must\n be met in order for the executor to be usable for a particular job execution.\n """\n if callable(name):\n check.invariant(config_schema is None)\n check.invariant(requirements is None)\n return _ExecutorDecoratorCallable()(name)\n\n return _ExecutorDecoratorCallable(\n name=name, config_schema=config_schema, requirements=requirements\n )
\n\n\nclass _ExecutorDecoratorCallable:\n def __init__(self, name=None, config_schema=None, requirements=None):\n self.name = check.opt_str_param(name, "name")\n self.config_schema = config_schema # type check in definition\n self.requirements = requirements\n\n def __call__(self, fn: ExecutorCreationFunction) -> ExecutorDefinition:\n check.callable_param(fn, "fn")\n\n if not self.name:\n self.name = fn.__name__\n\n executor_def = ExecutorDefinition(\n name=self.name,\n config_schema=self.config_schema,\n executor_creation_fn=fn,\n requirements=self.requirements,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(executor_def, wrapped=fn) # type: ignore\n\n return executor_def\n\n\ndef _core_in_process_executor_creation(config: ExecutorConfig) -> "InProcessExecutor":\n from dagster._core.executor.in_process import InProcessExecutor\n\n return InProcessExecutor(\n # shouldn't need to .get() here - issue with defaults in config setup\n retries=RetryMode.from_config(check.dict_elem(config, "retries")), # type: ignore # (possible none)\n marker_to_close=config.get("marker_to_close"), # type: ignore # (should be str)\n )\n\n\nIN_PROC_CONFIG = Field(\n {\n "retries": get_retries_config(),\n "marker_to_close": Field(\n str,\n is_required=False,\n description="[DEPRECATED]",\n ),\n },\n description="Execute all steps in a single process.",\n)\n\n\n
[docs]@executor(\n name="in_process",\n config_schema=IN_PROC_CONFIG,\n)\ndef in_process_executor(init_context):\n """The in-process executor executes all steps in a single process.\n\n To select it, include the following top-level fragment in config:\n\n .. code-block:: yaml\n\n execution:\n in_process:\n\n Execution priority can be configured using the ``dagster/priority`` tag via op metadata,\n where the higher the number the higher the priority. 0 is the default and both positive\n and negative numbers can be used.\n """\n return _core_in_process_executor_creation(init_context.executor_config)
\n\n\n@executor(name="execute_in_process_executor")\ndef execute_in_process_executor(_) -> "InProcessExecutor":\n """Executor used by execute_in_process.\n\n Use of this executor triggers special behavior in the config system that ignores all incoming\n executor config. This is because someone might set executor config on a job, and when we foist\n this executor onto the job for `execute_in_process`, that config becomes nonsensical.\n """\n from dagster._core.executor.in_process import InProcessExecutor\n\n return InProcessExecutor(\n retries=RetryMode.ENABLED,\n marker_to_close=None,\n )\n\n\ndef _core_multiprocess_executor_creation(config: ExecutorConfig) -> "MultiprocessExecutor":\n from dagster._core.executor.multiprocess import MultiprocessExecutor\n\n # unpack optional selector\n start_method = None\n start_cfg: Dict[str, object] = {}\n start_selector = check.opt_dict_elem(config, "start_method")\n if start_selector:\n start_method, start_cfg = next(iter(start_selector.items()))\n\n return MultiprocessExecutor(\n max_concurrent=check.opt_int_elem(config, "max_concurrent"),\n tag_concurrency_limits=check.opt_list_elem(config, "tag_concurrency_limits"),\n retries=RetryMode.from_config(check.dict_elem(config, "retries")), # type: ignore\n start_method=start_method,\n explicit_forkserver_preload=check.opt_list_elem(start_cfg, "preload_modules", of_type=str),\n )\n\n\nMULTI_PROC_CONFIG = Field(\n {\n "max_concurrent": Field(\n Noneable(Int),\n default_value=None,\n description=(\n "The number of processes that may run concurrently. "\n "By default, this is set to be the return value of `multiprocessing.cpu_count()`."\n ),\n ),\n "tag_concurrency_limits": get_tag_concurrency_limits_config(),\n "start_method": Field(\n Selector(\n fields={\n "spawn": Field(\n {},\n description=(\n "Configure the multiprocess executor to start subprocesses "\n "using `spawn`."\n ),\n ),\n "forkserver": Field(\n {\n "preload_modules": Field(\n [str],\n is_required=False,\n description=(\n "Explicitly specify the modules to preload in the forkserver."\n " Otherwise, there are two cases for default values if modules"\n " are not specified. If the Dagster job was loaded from a"\n " module, the same module will be preloaded. If not, the"\n " `dagster` module is preloaded."\n ),\n ),\n },\n description=(\n "Configure the multiprocess executor to start subprocesses "\n "using `forkserver`."\n ),\n ),\n # fork currently unsupported due to threads usage\n }\n ),\n is_required=False,\n description=(\n "Select how subprocesses are created. By default, `spawn` is selected. See "\n "https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods."\n ),\n ),\n "retries": get_retries_config(),\n },\n description="Execute each step in an individual process.",\n)\n\n\n
[docs]@executor(\n name="multiprocess",\n config_schema=MULTI_PROC_CONFIG,\n requirements=multiple_process_executor_requirements(),\n)\ndef multiprocess_executor(init_context):\n """The multiprocess executor executes each step in an individual process.\n\n Any job that does not specify custom executors will use the multiprocess_executor by default.\n To configure the multiprocess executor, include a fragment such as the following in your run\n config:\n\n .. code-block:: yaml\n\n execution:\n config:\n multiprocess:\n max_concurrent: 4\n\n The ``max_concurrent`` arg is optional and tells the execution engine how many processes may run\n concurrently. By default, or if you set ``max_concurrent`` to be None or 0, this is the return value of\n :py:func:`python:multiprocessing.cpu_count`.\n\n Execution priority can be configured using the ``dagster/priority`` tag via op metadata,\n where the higher the number the higher the priority. 0 is the default and both positive\n and negative numbers can be used.\n """\n return _core_multiprocess_executor_creation(init_context.executor_config)
\n\n\ndef check_cross_process_constraints(init_context: "InitExecutorContext") -> None:\n from dagster._core.executor.init import InitExecutorContext\n\n check.inst_param(init_context, "init_context", InitExecutorContext)\n requirements_lst = init_context.executor_def.get_requirements(init_context.executor_config)\n\n if ExecutorRequirement.RECONSTRUCTABLE_JOB in requirements_lst:\n _check_intra_process_job(init_context.job)\n\n if ExecutorRequirement.NON_EPHEMERAL_INSTANCE in requirements_lst:\n _check_non_ephemeral_instance(init_context.instance)\n\n\ndef _check_intra_process_job(job: IJob) -> None:\n if not isinstance(job, ReconstructableJob):\n raise DagsterUnmetExecutorRequirementsError(\n "You have attempted to use an executor that uses multiple processes with the job"\n f' "{job.get_definition().name}" that is not reconstructable. Job must be loaded in a'\n " way that allows dagster to reconstruct them in a new process. This means: \\n *"\n " using the file, module, or workspace.yaml arguments of"\n " dagster-webserver/dagster-graphql/dagster\\n * loading the job through the"\n " reconstructable() function\\n"\n )\n\n\ndef _check_non_ephemeral_instance(instance: "DagsterInstance") -> None:\n if instance.is_ephemeral:\n raise DagsterUnmetExecutorRequirementsError(\n "You have attempted to use an executor that uses multiple processes with an ephemeral"\n " DagsterInstance. A non-ephemeral instance is needed to coordinate execution between"\n " multiple processes. You can configure your default instance via $DAGSTER_HOME or"\n " ensure a valid one is passed when invoking the python APIs. You can learn more about"\n " setting up a persistent DagsterInstance from the DagsterInstance docs here:"\n " https://docs.dagster.io/deployment/dagster-instance#default-local-behavior"\n )\n\n\ndef _get_default_executor_requirements(\n executor_config: ExecutorConfig,\n) -> Sequence[ExecutorRequirement]:\n return multiple_process_executor_requirements() if "multiprocess" in executor_config else []\n\n\n
[docs]@executor(\n name="multi_or_in_process_executor",\n config_schema=Field(\n Selector(\n {"multiprocess": MULTI_PROC_CONFIG, "in_process": IN_PROC_CONFIG},\n ),\n default_value={"multiprocess": {}},\n ),\n requirements=_get_default_executor_requirements,\n)\ndef multi_or_in_process_executor(init_context: "InitExecutorContext") -> "Executor":\n """The default executor for a job.\n\n This is the executor available by default on a :py:class:`JobDefinition`\n that does not provide custom executors. This executor has a multiprocessing-enabled mode, and a\n single-process mode. By default, multiprocessing mode is enabled. Switching between multiprocess\n mode and in-process mode can be achieved via config.\n\n .. code-block:: yaml\n\n execution:\n config:\n multiprocess:\n\n\n execution:\n config:\n in_process:\n\n When using the multiprocess mode, ``max_concurrent`` and ``retries`` can also be configured.\n\n .. code-block:: yaml\n\n execution:\n config:\n multiprocess:\n max_concurrent: 4\n retries:\n enabled:\n\n The ``max_concurrent`` arg is optional and tells the execution engine how many processes may run\n concurrently. By default, or if you set ``max_concurrent`` to be 0, this is the return value of\n :py:func:`python:multiprocessing.cpu_count`.\n\n When using the in_process mode, then only retries can be configured.\n\n Execution priority can be configured using the ``dagster/priority`` tag via op metadata,\n where the higher the number the higher the priority. 0 is the default and both positive\n and negative numbers can be used.\n """\n if "multiprocess" in init_context.executor_config:\n return _core_multiprocess_executor_creation(\n check.dict_elem(init_context.executor_config, "multiprocess")\n )\n else:\n return _core_in_process_executor_creation(\n check.dict_elem(init_context.executor_config, "in_process")\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/executor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.executor_definition"}, "freshness_policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.freshness_policy

\nimport datetime\nfrom typing import AbstractSet, NamedTuple, Optional\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import experimental\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils.schedules import (\n    is_valid_cron_schedule,\n    reverse_cron_string_iterator,\n)\n\nfrom .events import AssetKey\n\n\nclass FreshnessConstraint(NamedTuple):\n    asset_keys: AbstractSet[AssetKey]\n    required_data_time: datetime.datetime\n    required_by_time: datetime.datetime\n\n\nclass FreshnessMinutes(NamedTuple):\n    overdue_minutes: float\n    lag_minutes: float\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass FreshnessPolicy(\n NamedTuple(\n "_FreshnessPolicy",\n [\n ("maximum_lag_minutes", float),\n ("cron_schedule", Optional[str]),\n ("cron_schedule_timezone", Optional[str]),\n ],\n )\n):\n """A FreshnessPolicy specifies how up-to-date you want a given asset to be.\n\n Attaching a FreshnessPolicy to an asset definition encodes an expectation on the upstream data\n that you expect to be incorporated into the current state of that asset at certain points in time.\n How this is calculated differs depending on if the asset is unpartitioned or time-partitioned\n (other partitioning schemes are not supported).\n\n For time-partitioned assets, the current data time for the asset is simple to calculate. The\n upstream data that is incorporated into the asset is exactly the set of materialized partitions\n for that asset. Thus, the current data time for the asset is simply the time up to which all\n partitions have been materialized.\n\n For unpartitioned assets, the current data time is based on the upstream materialization records\n that were read to generate the current state of the asset. More specifically,\n imagine you have two assets, where A depends on B. If `B` has a FreshnessPolicy defined, this\n means that at time T, the most recent materialization of `B` should have come after a\n materialization of `A` which was no more than `maximum_lag_minutes` ago. This calculation is\n recursive: any given asset is expected to incorporate up-to-date data from all of its upstream\n assets.\n\n It is assumed that all asset definitions with no upstream asset definitions consume from some\n always-updating source. That is, if you materialize that asset at time T, it will incorporate\n all data up to time T.\n\n If `cron_schedule` is not defined, the given asset will be expected to incorporate upstream\n data from no more than `maximum_lag_minutes` ago at all points in time. For example, "The events\n table should always have data from at most 1 hour ago".\n\n If `cron_schedule` is defined, the given asset will be expected to incorporate upstream data\n from no more than `maximum_lag_minutes` ago at each cron schedule tick. For example, "By 9AM,\n the signups table should contain all of yesterday's data".\n\n The freshness status of assets with policies defined will be visible in the UI. If you are using\n an asset reconciliation sensor, this sensor will kick off runs to help keep your assets up to\n date with respect to their FreshnessPolicy.\n\n Args:\n maximum_lag_minutes (float): An upper bound for how old the data contained within this\n asset may be.\n cron_schedule (Optional[str]): A cron schedule string (e.g. ``"0 1 * * *"``) specifying a\n series of times by which the `maximum_lag_minutes` constraint must be satisfied. If\n no cron schedule is provided, then this constraint must be satisfied at all times.\n cron_schedule_timezone (Optional[str]): Timezone in which the cron schedule should be evaluated.\n If not specified, defaults to UTC. Supported strings for timezones are the ones provided\n by the `IANA time zone database <https://www.iana.org/time-zones>` - e.g.\n "America/Los_Angeles".\n\n .. code-block:: python\n\n # At any point in time, this asset must incorporate all upstream data from at least 30 minutes ago.\n @asset(freshness_policy=FreshnessPolicy(maximum_lag_minutes=30))\n def fresh_asset():\n ...\n\n # At any point in time, this asset must incorporate all upstream data from at least 30 minutes ago.\n @asset(freshness_policy=FreshnessPolicy(maximum_lag_minutes=30))\n def cron_up_to_date_asset():\n ...\n\n """\n\n def __new__(\n cls,\n *,\n maximum_lag_minutes: float,\n cron_schedule: Optional[str] = None,\n cron_schedule_timezone: Optional[str] = None,\n ):\n if cron_schedule is not None:\n if not is_valid_cron_schedule(cron_schedule):\n raise DagsterInvalidDefinitionError(f"Invalid cron schedule '{cron_schedule}'.")\n check.param_invariant(\n is_valid_cron_schedule(cron_schedule),\n "cron_schedule",\n f"Invalid cron schedule '{cron_schedule}'.",\n )\n if cron_schedule_timezone is not None:\n check.param_invariant(\n cron_schedule is not None,\n "cron_schedule_timezone",\n "Cannot specify cron_schedule_timezone without a cron_schedule.",\n )\n try:\n # Verify that the timezone can be loaded\n pendulum.tz.timezone(cron_schedule_timezone) # type: ignore\n except Exception as e:\n raise DagsterInvalidDefinitionError(\n "Invalid cron schedule timezone '{cron_schedule_timezone}'. "\n ) from e\n return super(FreshnessPolicy, cls).__new__(\n cls,\n maximum_lag_minutes=float(\n check.numeric_param(maximum_lag_minutes, "maximum_lag_minutes")\n ),\n cron_schedule=check.opt_str_param(cron_schedule, "cron_schedule"),\n cron_schedule_timezone=check.opt_str_param(\n cron_schedule_timezone, "cron_schedule_timezone"\n ),\n )\n\n @classmethod\n def _create(cls, *args):\n """Pickle requires a method with positional arguments to construct\n instances of a class. Since the constructor for this class has\n keyword arguments only, we define this method to be used by pickle.\n """\n return cls(maximum_lag_minutes=args[0], cron_schedule=args[1])\n\n def __reduce__(self):\n return (self._create, (self.maximum_lag_minutes, self.cron_schedule))\n\n @property\n def maximum_lag_delta(self) -> datetime.timedelta:\n return datetime.timedelta(minutes=self.maximum_lag_minutes)\n\n def get_evaluation_tick(\n self,\n evaluation_time: datetime.datetime,\n ) -> Optional[datetime.datetime]:\n if self.cron_schedule:\n # most recent cron schedule tick\n schedule_ticks = reverse_cron_string_iterator(\n end_timestamp=evaluation_time.timestamp(),\n cron_string=self.cron_schedule,\n execution_timezone=self.cron_schedule_timezone,\n )\n return next(schedule_ticks)\n else:\n return evaluation_time\n\n def minutes_overdue(\n self,\n data_time: Optional[datetime.datetime],\n evaluation_time: datetime.datetime,\n ) -> Optional[FreshnessMinutes]:\n """Returns a number of minutes past the specified freshness policy that this asset currently\n is. If the asset is missing upstream data, or is not materialized at all, then it is unknown\n how overdue it is, and this will return None.\n\n Args:\n data_time (Optional[datetime]): The timestamp of the data that was used to create the\n current version of this asset.\n evaluation_time (datetime): The time at which we're evaluating the overdueness of this\n asset. Generally, this is the current time.\n """\n if data_time is None:\n return None\n evaluation_tick = self.get_evaluation_tick(evaluation_time)\n if evaluation_tick is None:\n return None\n required_time = evaluation_tick - self.maximum_lag_delta\n\n return FreshnessMinutes(\n lag_minutes=max(0.0, (evaluation_tick - data_time).total_seconds() / 60),\n overdue_minutes=max(0.0, (required_time - data_time).total_seconds() / 60),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/freshness_policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.freshness_policy"}, "freshness_policy_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.freshness_policy_sensor_definition

\nfrom typing import Callable, Dict, Mapping, NamedTuple, Optional, Set, cast\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_selection import AssetSelection\nfrom dagster._core.definitions.data_time import CachingDataTimeResolver\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.scoped_resources_builder import Resources, ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    FreshnessPolicySensorExecutionError,\n    user_code_error_boundary,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._serdes import (\n    serialize_value,\n    whitelist_for_serdes,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._seven import JSONDecodeError\n\nfrom .sensor_definition import (\n    DefaultSensorStatus,\n    SensorDefinition,\n    SensorEvaluationContext,\n    SensorType,\n    SkipReason,\n    get_context_param_name,\n    get_sensor_context_from_args_or_kwargs,\n    validate_and_get_resource_dict,\n)\n\n\n@whitelist_for_serdes\nclass FreshnessPolicySensorCursor(\n    NamedTuple(\n        "_FreshnessPolicySensorCursor",\n        [("minutes_late_by_key_str", Mapping[str, Optional[float]])],\n    )\n):\n    def __new__(cls, minutes_late_by_key_str: Mapping[str, Optional[float]]):\n        return super(FreshnessPolicySensorCursor, cls).__new__(\n            cls,\n            minutes_late_by_key_str=check.mapping_param(\n                minutes_late_by_key_str, "minutes_late_by_key_str", key_type=str\n            ),\n        )\n\n    @staticmethod\n    def is_valid(json_str: str) -> bool:\n        try:\n            deserialize_value(json_str, FreshnessPolicySensorCursor)\n            return True\n        except (JSONDecodeError, DeserializationError):\n            return False\n\n    @staticmethod\n    def from_dict(\n        minutes_late_by_key: Mapping[AssetKey, Optional[float]]\n    ) -> "FreshnessPolicySensorCursor":\n        return FreshnessPolicySensorCursor(\n            minutes_late_by_key_str={k.to_user_string(): v for k, v in minutes_late_by_key.items()}\n        )\n\n    @property\n    def minutes_late_by_key(self) -> Mapping[AssetKey, Optional[float]]:\n        return {AssetKey.from_user_string(k): v for k, v in self.minutes_late_by_key_str.items()}\n\n    def to_json(self) -> str:\n        return serialize_value(cast(NamedTuple, self))\n\n    @staticmethod\n    def from_json(json_str: str) -> "FreshnessPolicySensorCursor":\n        return deserialize_value(json_str, FreshnessPolicySensorCursor)\n\n\n
[docs]class FreshnessPolicySensorContext(\n NamedTuple(\n "_FreshnessPolicySensorContext",\n [\n ("sensor_name", PublicAttr[str]),\n ("asset_key", PublicAttr[AssetKey]),\n ("freshness_policy", PublicAttr[FreshnessPolicy]),\n ("minutes_overdue", PublicAttr[Optional[float]]),\n ("previous_minutes_overdue", PublicAttr[Optional[float]]),\n ("instance", PublicAttr[DagsterInstance]),\n ("resources", Resources),\n ],\n )\n):\n """The ``context`` object available to a decorated function of ``freshness_policy_sensor``.\n\n Attributes:\n sensor_name (str): the name of the sensor.\n asset_key (AssetKey): the key of the asset being monitored\n freshness_policy (FreshnessPolicy): the freshness policy of the asset being monitored\n minutes_overdue (Optional[float])\n previous_minutes_overdue (Optional[float]): the minutes_overdue value for this asset on the\n previous sensor tick.\n instance (DagsterInstance): the current instance.\n """\n\n def __new__(\n cls,\n sensor_name: str,\n asset_key: AssetKey,\n freshness_policy: FreshnessPolicy,\n minutes_overdue: Optional[float],\n previous_minutes_overdue: Optional[float],\n instance: DagsterInstance,\n resources: Optional[Resources] = None,\n ):\n minutes_overdue = check.opt_numeric_param(minutes_overdue, "minutes_overdue")\n previous_minutes_overdue = check.opt_numeric_param(\n previous_minutes_overdue, "previous_minutes_overdue"\n )\n return super(FreshnessPolicySensorContext, cls).__new__(\n cls,\n sensor_name=check.str_param(sensor_name, "sensor_name"),\n asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n freshness_policy=check.inst_param(freshness_policy, "FreshnessPolicy", FreshnessPolicy),\n minutes_overdue=float(minutes_overdue) if minutes_overdue is not None else None,\n previous_minutes_overdue=(\n float(previous_minutes_overdue) if previous_minutes_overdue is not None else None\n ),\n instance=check.inst_param(instance, "instance", DagsterInstance),\n resources=resources or ScopedResourcesBuilder.build_empty(),\n )
\n\n\n
[docs]@experimental\ndef build_freshness_policy_sensor_context(\n sensor_name: str,\n asset_key: AssetKey,\n freshness_policy: FreshnessPolicy,\n minutes_overdue: Optional[float],\n previous_minutes_overdue: Optional[float] = None,\n instance: Optional[DagsterInstance] = None,\n resources: Optional[Resources] = None,\n) -> FreshnessPolicySensorContext:\n """Builds freshness policy sensor context from provided parameters.\n\n This function can be used to provide the context argument when directly invoking a function\n decorated with `@freshness_policy_sensor`, such as when writing unit tests.\n\n Args:\n sensor_name (str): The name of the sensor the context is being constructed for.\n asset_key (AssetKey): The AssetKey for the monitored asset\n freshness_policy (FreshnessPolicy): The FreshnessPolicy for the monitored asset\n minutes_overdue (Optional[float]): How overdue the monitored asset currently is\n previous_minutes_overdue (Optional[float]): How overdue the monitored asset was on the\n previous tick.\n instance (DagsterInstance): The dagster instance configured for the context.\n\n Examples:\n .. code-block:: python\n\n context = build_freshness_policy_sensor_context(\n sensor_name="freshness_policy_sensor_to_invoke",\n asset_key=AssetKey("some_asset"),\n freshness_policy=FreshnessPolicy(maximum_lag_minutes=30)<\n minutes_overdue=10.0,\n )\n freshness_policy_sensor_to_invoke(context)\n """\n return FreshnessPolicySensorContext(\n sensor_name=sensor_name,\n asset_key=asset_key,\n freshness_policy=freshness_policy,\n minutes_overdue=minutes_overdue,\n previous_minutes_overdue=previous_minutes_overdue,\n instance=instance or DagsterInstance.ephemeral(),\n resources=resources,\n )
\n\n\n
[docs]class FreshnessPolicySensorDefinition(SensorDefinition):\n """Define a sensor that reacts to the status of a given set of asset freshness policies,\n where the decorated function will be evaluated on every sensor tick.\n\n Args:\n name (str): The name of the sensor. Defaults to the name of the decorated function.\n freshness_policy_sensor_fn (Callable[[FreshnessPolicySensorContext], None]): The core\n evaluation function for the sensor. Takes a :py:class:`~dagster.FreshnessPolicySensorContext`.\n asset_selection (AssetSelection): The asset selection monitored by the sensor.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n """\n\n def __init__(\n self,\n name: str,\n asset_selection: AssetSelection,\n freshness_policy_sensor_fn: Callable[..., None],\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n check.str_param(name, "name")\n check.inst_param(asset_selection, "asset_selection", AssetSelection)\n check.opt_int_param(minimum_interval_seconds, "minimum_interval_seconds")\n check.opt_str_param(description, "description")\n check.inst_param(default_status, "default_status", DefaultSensorStatus)\n\n self._freshness_policy_sensor_fn = check.callable_param(\n freshness_policy_sensor_fn, "freshness_policy_sensor_fn"\n )\n\n resource_arg_names: Set[str] = {\n arg.name for arg in get_resource_args(freshness_policy_sensor_fn)\n }\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n def _wrapped_fn(context: SensorEvaluationContext):\n from dagster._utils.caching_instance_queryer import (\n CachingInstanceQueryer, # expensive import\n )\n\n if context.repository_def is None:\n raise DagsterInvalidInvocationError(\n "The `repository_def` property on the `SensorEvaluationContext` passed into a "\n "`FreshnessPolicySensorDefinition` must not be None."\n )\n\n if context.cursor is None or not FreshnessPolicySensorCursor.is_valid(context.cursor):\n new_cursor = FreshnessPolicySensorCursor({})\n context.update_cursor(new_cursor.to_json())\n yield SkipReason(f"Initializing {name}.")\n return\n\n evaluation_time = pendulum.now("UTC")\n asset_graph = context.repository_def.asset_graph\n instance_queryer = CachingInstanceQueryer(\n context.instance, asset_graph, evaluation_time\n )\n data_time_resolver = CachingDataTimeResolver(instance_queryer=instance_queryer)\n monitored_keys = asset_selection.resolve(asset_graph)\n\n # get the previous status from the cursor\n previous_minutes_late_by_key = FreshnessPolicySensorCursor.from_json(\n context.cursor\n ).minutes_late_by_key\n\n minutes_late_by_key: Dict[AssetKey, Optional[float]] = {}\n for asset_key in monitored_keys:\n freshness_policy = asset_graph.freshness_policies_by_key.get(asset_key)\n if freshness_policy is None:\n continue\n\n # get the current minutes_overdue value for this asset\n result = data_time_resolver.get_minutes_overdue(\n evaluation_time=evaluation_time,\n asset_key=asset_key,\n )\n minutes_late_by_key[asset_key] = result.overdue_minutes if result else None\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n context_param_name = get_context_param_name(freshness_policy_sensor_fn)\n freshness_context = FreshnessPolicySensorContext(\n sensor_name=name,\n asset_key=asset_key,\n freshness_policy=freshness_policy,\n minutes_overdue=minutes_late_by_key[asset_key],\n previous_minutes_overdue=previous_minutes_late_by_key.get(asset_key),\n instance=context.instance,\n resources=context.resources,\n )\n\n with user_code_error_boundary(\n FreshnessPolicySensorExecutionError,\n lambda: f'Error occurred during the execution of sensor "{name}".',\n ):\n context_param = (\n {context_param_name: freshness_context} if context_param_name else {}\n )\n result = freshness_policy_sensor_fn(\n **context_param,\n **resource_args_populated,\n )\n if result is not None:\n raise DagsterInvalidDefinitionError(\n "Functions decorated by `@freshness_policy_sensor` may not return or yield"\n " a value."\n )\n\n context.update_cursor(\n FreshnessPolicySensorCursor.from_dict(minutes_late_by_key).to_json()\n )\n\n super(FreshnessPolicySensorDefinition, self).__init__(\n name=name,\n evaluation_fn=_wrapped_fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n default_status=default_status,\n required_resource_keys=combined_required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> None:\n context_param_name = get_context_param_name(self._freshness_policy_sensor_fn)\n\n sensor_context = get_sensor_context_from_args_or_kwargs(\n self._freshness_policy_sensor_fn,\n args,\n kwargs,\n context_type=FreshnessPolicySensorContext,\n )\n context_param = (\n {context_param_name: sensor_context} if context_param_name and sensor_context else {}\n )\n\n resources = validate_and_get_resource_dict(\n sensor_context.resources if sensor_context else ScopedResourcesBuilder.build_empty(),\n self._name,\n self._required_resource_keys,\n )\n\n return self._freshness_policy_sensor_fn(**context_param, **resources)\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.FRESHNESS_POLICY
\n\n\n
[docs]@experimental\ndef freshness_policy_sensor(\n asset_selection: AssetSelection,\n *,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n) -> Callable[[Callable[..., None]], FreshnessPolicySensorDefinition,]:\n """Define a sensor that reacts to the status of a given set of asset freshness policies, where the\n decorated function will be evaluated on every tick for each asset in the selection that has a\n FreshnessPolicy defined.\n\n Note: returning or yielding a value from the annotated function will result in an error.\n\n Takes a :py:class:`~dagster.FreshnessPolicySensorContext`.\n\n Args:\n asset_selection (AssetSelection): The asset selection monitored by the sensor.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated function.\n freshness_policy_sensor_fn (Callable[[FreshnessPolicySensorContext], None]): The core\n evaluation function for the sensor. Takes a :py:class:`~dagster.FreshnessPolicySensorContext`.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n """\n\n def inner(fn: Callable[..., None]) -> FreshnessPolicySensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n return FreshnessPolicySensorDefinition(\n name=sensor_name,\n freshness_policy_sensor_fn=fn,\n asset_selection=asset_selection,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n default_status=default_status,\n )\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/freshness_policy_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.freshness_policy_sensor_definition"}, "graph_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.graph_definition

\nfrom collections import OrderedDict, defaultdict\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom toposort import CircularDependencyError, toposort_flatten\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.config import ConfigMapping\nfrom dagster._core.definitions.definition_config_schema import IDefinitionConfigSchema\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._core.selector.subset_selector import AssetSelectionData\nfrom dagster._core.types.dagster_type import (\n    DagsterType,\n    DagsterTypeKind,\n    construct_dagster_type_dictionary,\n)\n\nfrom .dependency import (\n    DependencyMapping,\n    DependencyStructure,\n    GraphNode,\n    Node,\n    NodeHandle,\n    NodeInput,\n    NodeInputHandle,\n    NodeInvocation,\n)\nfrom .hook_definition import HookDefinition\nfrom .input import FanInInputPointer, InputDefinition, InputMapping, InputPointer\nfrom .logger_definition import LoggerDefinition\nfrom .metadata import RawMetadataValue\nfrom .node_container import create_execution_structure, normalize_dependency_dict\nfrom .node_definition import NodeDefinition\nfrom .output import OutputDefinition, OutputMapping\nfrom .resource_requirement import ResourceRequirement\nfrom .version_strategy import VersionStrategy\n\nif TYPE_CHECKING:\n    from dagster._core.execution.execute_in_process_result import ExecuteInProcessResult\n    from dagster._core.instance import DagsterInstance\n\n    from .asset_layer import AssetLayer\n    from .composition import PendingNodeInvocation\n    from .executor_definition import ExecutorDefinition\n    from .job_definition import JobDefinition\n    from .op_definition import OpDefinition\n    from .partition import PartitionedConfig, PartitionsDefinition\n    from .run_config import RunConfig\n    from .source_asset import SourceAsset\n\nT = TypeVar("T")\n\n\ndef _check_node_defs_arg(\n    graph_name: str, node_defs: Optional[Sequence[NodeDefinition]]\n) -> Sequence[NodeDefinition]:\n    node_defs = node_defs or []\n\n    _node_defs = check.opt_sequence_param(node_defs, "node_defs")\n    for node_def in _node_defs:\n        if isinstance(node_def, NodeDefinition):\n            continue\n        elif callable(node_def):\n            raise DagsterInvalidDefinitionError(\n                """You have passed a lambda or function {func} into {name} that is\n                not a node. You have likely forgetten to annotate this function with\n                the @op or @graph decorators.'\n                """.format(name=graph_name, func=node_def.__name__)\n            )\n        else:\n            raise DagsterInvalidDefinitionError(f"Invalid item in node list: {node_def!r}")\n\n    return node_defs\n\n\ndef create_adjacency_lists(\n    nodes: Sequence[Node],\n    dep_structure: DependencyStructure,\n) -> Tuple[Mapping[str, Set[str]], Mapping[str, Set[str]]]:\n    visit_dict = {s.name: False for s in nodes}\n    forward_edges: Dict[str, Set[str]] = {s.name: set() for s in nodes}\n    backward_edges: Dict[str, Set[str]] = {s.name: set() for s in nodes}\n\n    def visit(node_name: str) -> None:\n        if visit_dict[node_name]:\n            return\n\n        visit_dict[node_name] = True\n\n        for node_output in dep_structure.all_upstream_outputs_from_node(node_name):\n            forward_node = node_output.node.name\n            backward_node = node_name\n            if forward_node in forward_edges:\n                forward_edges[forward_node].add(backward_node)\n                backward_edges[backward_node].add(forward_node)\n                visit(forward_node)\n\n    for s in nodes:\n        visit(s.name)\n\n    return (forward_edges, backward_edges)\n\n\n
[docs]class GraphDefinition(NodeDefinition):\n """Defines a Dagster op graph.\n\n An op graph is made up of\n\n - Nodes, which can either be an op (the functional unit of computation), or another graph.\n - Dependencies, which determine how the values produced by nodes as outputs flow from\n one node to another. This tells Dagster how to arrange nodes into a directed, acyclic graph\n (DAG) of compute.\n\n End users should prefer the :func:`@graph <graph>` decorator. GraphDefinition is generally\n intended to be used by framework authors or for programatically generated graphs.\n\n Args:\n name (str): The name of the graph. Must be unique within any :py:class:`GraphDefinition`\n or :py:class:`JobDefinition` containing the graph.\n description (Optional[str]): A human-readable description of the job.\n node_defs (Optional[Sequence[NodeDefinition]]): The set of ops / graphs used in this graph.\n dependencies (Optional[Dict[Union[str, NodeInvocation], Dict[str, DependencyDefinition]]]):\n A structure that declares the dependencies of each op's inputs on the outputs of other\n ops in the graph. Keys of the top level dict are either the string names of ops in the\n graph or, in the case of aliased ops, :py:class:`NodeInvocations <NodeInvocation>`.\n Values of the top level dict are themselves dicts, which map input names belonging to\n the op or aliased op to :py:class:`DependencyDefinitions <DependencyDefinition>`.\n input_mappings (Optional[Sequence[InputMapping]]): Defines the inputs to the nested graph, and\n how they map to the inputs of its constituent ops.\n output_mappings (Optional[Sequence[OutputMapping]]): Defines the outputs of the nested graph,\n and how they map from the outputs of its constituent ops.\n config (Optional[ConfigMapping]): Defines the config of the graph, and how its schema maps\n to the config of its constituent ops.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution of the graph.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n\n Examples:\n .. code-block:: python\n\n @op\n def return_one():\n return 1\n\n @op\n def add_one(num):\n return num + 1\n\n graph_def = GraphDefinition(\n name='basic',\n node_defs=[return_one, add_one],\n dependencies={'add_one': {'num': DependencyDefinition('return_one')}},\n )\n """\n\n _node_defs: Sequence[NodeDefinition]\n _dagster_type_dict: Mapping[str, DagsterType]\n _dependencies: DependencyMapping[NodeInvocation]\n _dependency_structure: DependencyStructure\n _node_dict: Mapping[str, Node]\n _input_mappings: Sequence[InputMapping]\n _output_mappings: Sequence[OutputMapping]\n _config_mapping: Optional[ConfigMapping]\n _nodes_in_topological_order: Sequence[Node]\n\n # (node name within the graph -> (input name -> SourceAsset to load that input from))\n # Does NOT include keys for:\n # - Inputs to the graph itself\n # - Inputs to nodes within sub-graphs of the graph\n _node_input_source_assets: Mapping[str, Mapping[str, "SourceAsset"]]\n\n def __init__(\n self,\n name: str,\n *,\n description: Optional[str] = None,\n node_defs: Optional[Sequence[NodeDefinition]] = None,\n dependencies: Optional[\n Union[DependencyMapping[str], DependencyMapping[NodeInvocation]]\n ] = None,\n input_mappings: Optional[Sequence[InputMapping]] = None,\n output_mappings: Optional[Sequence[OutputMapping]] = None,\n config: Optional[ConfigMapping] = None,\n tags: Optional[Mapping[str, str]] = None,\n node_input_source_assets: Optional[Mapping[str, Mapping[str, "SourceAsset"]]] = None,\n **kwargs: Any,\n ):\n self._node_defs = _check_node_defs_arg(name, node_defs)\n\n # `dependencies` will be converted to `dependency_structure` and `node_dict`, which may\n # alternatively be passed directly (useful when copying)\n self._dependencies = normalize_dependency_dict(dependencies)\n self._dependency_structure, self._node_dict = create_execution_structure(\n self._node_defs, self._dependencies, graph_definition=self\n )\n\n # Sequence[InputMapping]\n self._input_mappings = check.opt_sequence_param(input_mappings, "input_mappings")\n input_defs = _validate_in_mappings(\n self._input_mappings,\n self._node_dict,\n self._dependency_structure,\n name,\n class_name=type(self).__name__,\n )\n\n # Sequence[OutputMapping]\n self._output_mappings, output_defs = _validate_out_mappings(\n check.opt_sequence_param(output_mappings, "output_mappings"),\n self._node_dict,\n name,\n class_name=type(self).__name__,\n )\n\n self._config_mapping = check.opt_inst_param(config, "config", ConfigMapping)\n\n super(GraphDefinition, self).__init__(\n name=name,\n description=description,\n input_defs=input_defs,\n output_defs=output_defs,\n tags=tags,\n **kwargs,\n )\n\n # must happen after base class construction as properties are assumed to be there\n # eager computation to detect cycles\n self._nodes_in_topological_order = self._get_nodes_in_topological_order()\n self._dagster_type_dict = construct_dagster_type_dictionary([self])\n self._node_input_source_assets = check.opt_mapping_param(\n node_input_source_assets, "node_input_source_assets", key_type=str, value_type=dict\n )\n\n def _get_nodes_in_topological_order(self) -> Sequence[Node]:\n _forward_edges, backward_edges = create_adjacency_lists(\n self.nodes, self.dependency_structure\n )\n\n try:\n order = toposort_flatten(backward_edges)\n except CircularDependencyError as err:\n raise DagsterInvalidDefinitionError(str(err)) from err\n\n return [self.node_named(node_name) for node_name in order]\n\n def get_inputs_must_be_resolved_top_level(\n self, asset_layer: "AssetLayer", handle: Optional[NodeHandle] = None\n ) -> Sequence[InputDefinition]:\n unresolveable_input_defs: List[InputDefinition] = []\n for node in self.node_dict.values():\n cur_handle = NodeHandle(node.name, handle)\n for input_def in node.definition.get_inputs_must_be_resolved_top_level(\n asset_layer, cur_handle\n ):\n if self.dependency_structure.has_deps(NodeInput(node, input_def)):\n continue\n elif not node.container_maps_input(input_def.name):\n raise DagsterInvalidDefinitionError(\n f"Input '{input_def.name}' of {node.describe_node()} "\n "has no way of being resolved. Must provide a resolution to this "\n "input via another op/graph, or via a direct input value mapped from the "\n "top-level graph. To "\n "learn more, see the docs for unconnected inputs: "\n "https://docs.dagster.io/concepts/io-management/unconnected-inputs#unconnected-inputs."\n )\n else:\n mapped_input = node.container_mapped_input(input_def.name)\n unresolveable_input_defs.append(mapped_input.get_definition())\n return unresolveable_input_defs\n\n @property\n def node_type_str(self) -> str:\n return "graph"\n\n @property\n def is_graph_job_op_node(self) -> bool:\n return True\n\n @property\n def nodes(self) -> Sequence[Node]:\n return list(set(self._node_dict.values()))\n\n @property\n def node_dict(self) -> Mapping[str, Node]:\n return self._node_dict\n\n @property\n def node_defs(self) -> Sequence[NodeDefinition]:\n return self._node_defs\n\n @property\n def nodes_in_topological_order(self) -> Sequence[Node]:\n return self._nodes_in_topological_order\n\n @property\n def node_input_source_assets(self) -> Mapping[str, Mapping[str, "SourceAsset"]]:\n return self._node_input_source_assets\n\n def has_node_named(self, name: str) -> bool:\n check.str_param(name, "name")\n return name in self._node_dict\n\n def node_named(self, name: str) -> Node:\n check.str_param(name, "name")\n if name not in self._node_dict:\n raise DagsterInvariantViolationError(f"{self._name} has no op named {name}.")\n\n return self._node_dict[name]\n\n def get_node(self, handle: NodeHandle) -> Node:\n check.inst_param(handle, "handle", NodeHandle)\n current = handle\n lineage: List[str] = []\n while current:\n lineage.append(current.name)\n current = current.parent\n\n name = lineage.pop()\n node = self.node_named(name)\n while lineage:\n name = lineage.pop()\n # We know that this is a current node is a graph while ascending lineage\n definition = cast(GraphDefinition, node.definition)\n node = definition.node_named(name)\n\n return node\n\n def iterate_node_defs(self) -> Iterator[NodeDefinition]:\n yield self\n for outer_node_def in self._node_defs:\n yield from outer_node_def.iterate_node_defs()\n\n def iterate_op_defs(self) -> Iterator["OpDefinition"]:\n for outer_node_def in self._node_defs:\n yield from outer_node_def.iterate_op_defs()\n\n def iterate_node_handles(\n self, parent_node_handle: Optional[NodeHandle] = None\n ) -> Iterator[NodeHandle]:\n for node in self.node_dict.values():\n cur_node_handle = NodeHandle(node.name, parent_node_handle)\n if isinstance(node, GraphNode):\n yield from node.definition.iterate_node_handles(cur_node_handle)\n yield cur_node_handle\n\n @public\n @property\n def input_mappings(self) -> Sequence[InputMapping]:\n """Input mappings for the graph.\n\n An input mapping is a mapping from an input of the graph to an input of a child node.\n """\n return self._input_mappings\n\n @public\n @property\n def output_mappings(self) -> Sequence[OutputMapping]:\n """Output mappings for the graph.\n\n An output mapping is a mapping from an output of the graph to an output of a child node.\n """\n return self._output_mappings\n\n @public\n @property\n def config_mapping(self) -> Optional[ConfigMapping]:\n """The config mapping for the graph, if present.\n\n By specifying a config mapping function, you can override the configuration for the child nodes contained within a graph.\n """\n return self._config_mapping\n\n @property\n def has_config_mapping(self) -> bool:\n return self._config_mapping is not None\n\n def all_dagster_types(self) -> Iterable[DagsterType]:\n return self._dagster_type_dict.values()\n\n def has_dagster_type(self, name: str) -> bool:\n check.str_param(name, "name")\n return name in self._dagster_type_dict\n\n def dagster_type_named(self, name: str) -> DagsterType:\n check.str_param(name, "name")\n return self._dagster_type_dict[name]\n\n def get_input_mapping(self, input_name: str) -> InputMapping:\n check.str_param(input_name, "input_name")\n for mapping in self._input_mappings:\n if mapping.graph_input_name == input_name:\n return mapping\n check.failed(f"Could not find input mapping {input_name}")\n\n def input_mapping_for_pointer(\n self, pointer: Union[InputPointer, FanInInputPointer]\n ) -> Optional[InputMapping]:\n check.inst_param(pointer, "pointer", (InputPointer, FanInInputPointer))\n\n for mapping in self._input_mappings:\n if mapping.maps_to == pointer:\n return mapping\n return None\n\n def get_output_mapping(self, output_name: str) -> OutputMapping:\n check.str_param(output_name, "output_name")\n for mapping in self._output_mappings:\n if mapping.graph_output_name == output_name:\n return mapping\n check.failed(f"Could not find output mapping {output_name}")\n\n T_Handle = TypeVar("T_Handle", bound=Optional[NodeHandle])\n\n def resolve_output_to_origin(\n self, output_name: str, handle: Optional[NodeHandle]\n ) -> Tuple[OutputDefinition, Optional[NodeHandle]]:\n check.str_param(output_name, "output_name")\n check.opt_inst_param(handle, "handle", NodeHandle)\n\n mapping = self.get_output_mapping(output_name)\n check.invariant(mapping, "Can only resolve outputs for valid output names")\n mapped_node = self.node_named(mapping.maps_from.node_name)\n return mapped_node.definition.resolve_output_to_origin(\n mapping.maps_from.output_name,\n NodeHandle(mapped_node.name, handle),\n )\n\n def resolve_output_to_origin_op_def(self, output_name: str) -> "OpDefinition":\n mapping = self.get_output_mapping(output_name)\n check.invariant(mapping, "Can only resolve outputs for valid output names")\n return self.node_named(\n mapping.maps_from.node_name\n ).definition.resolve_output_to_origin_op_def(output_name)\n\n def default_value_for_input(self, input_name: str) -> object:\n check.str_param(input_name, "input_name")\n\n # base case\n if self.input_def_named(input_name).has_default_value:\n return self.input_def_named(input_name).default_value\n\n mapping = self.get_input_mapping(input_name)\n check.invariant(mapping, "Can only resolve inputs for valid input names")\n mapped_node = self.node_named(mapping.maps_to.node_name)\n\n return mapped_node.definition.default_value_for_input(mapping.maps_to.input_name)\n\n def input_has_default(self, input_name: str) -> bool:\n check.str_param(input_name, "input_name")\n\n # base case\n if self.input_def_named(input_name).has_default_value:\n return True\n\n mapping = self.get_input_mapping(input_name)\n check.invariant(mapping, "Can only resolve inputs for valid input names")\n mapped_node = self.node_named(mapping.maps_to.node_name)\n\n return mapped_node.definition.input_has_default(mapping.maps_to.input_name)\n\n @property\n def dependencies(self) -> DependencyMapping[NodeInvocation]:\n return self._dependencies\n\n @property\n def dependency_structure(self) -> DependencyStructure:\n return self._dependency_structure\n\n @property\n def config_schema(self) -> Optional[IDefinitionConfigSchema]:\n return self.config_mapping.config_schema if self.config_mapping is not None else None\n\n def input_supports_dynamic_output_dep(self, input_name: str) -> bool:\n mapping = self.get_input_mapping(input_name)\n target_node = mapping.maps_to.node_name\n # check if input mapped to node which is downstream of another dynamic output within\n if self.dependency_structure.is_dynamic_mapped(target_node):\n return False\n\n # check if input mapped to node which starts new dynamic downstream\n if self.dependency_structure.has_dynamic_downstreams(target_node):\n return False\n\n return self.node_named(target_node).definition.input_supports_dynamic_output_dep(\n mapping.maps_to.input_name\n )\n\n def copy(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n input_mappings: Optional[Sequence[InputMapping]] = None,\n output_mappings: Optional[Sequence[OutputMapping]] = None,\n config: Optional[ConfigMapping] = None,\n tags: Optional[Mapping[str, str]] = None,\n node_input_source_assets: Optional[Mapping[str, Mapping[str, "SourceAsset"]]] = None,\n ) -> Self:\n return GraphDefinition(\n node_defs=self.node_defs,\n dependencies=self.dependencies,\n name=name or self.name,\n description=description or self.description,\n input_mappings=input_mappings or self._input_mappings,\n output_mappings=output_mappings or self._output_mappings,\n config=config or self.config_mapping,\n tags=tags or self.tags,\n node_input_source_assets=node_input_source_assets or self.node_input_source_assets,\n )\n\n def copy_for_configured(\n self,\n name: str,\n description: Optional[str],\n config_schema: Any,\n ) -> "GraphDefinition":\n if not self.has_config_mapping:\n raise DagsterInvalidDefinitionError(\n "Only graphs utilizing config mapping can be pre-configured. The graph "\n f'"{self.name}" does not have a config mapping, and thus has nothing to be '\n "configured."\n )\n config_mapping = cast(ConfigMapping, self.config_mapping)\n return self.copy(\n name=name,\n description=check.opt_str_param(description, "description", default=self.description),\n config=ConfigMapping(\n config_mapping.config_fn,\n config_schema=config_schema,\n receive_processed_config_values=config_mapping.receive_processed_config_values,\n ),\n )\n\n def node_names(self) -> Sequence[str]:\n return list(self._node_dict.keys())\n\n
[docs] @public\n def to_job(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n config: Optional[\n Union["RunConfig", ConfigMapping, Mapping[str, object], "PartitionedConfig"]\n ] = None,\n tags: Optional[Mapping[str, str]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n executor_def: Optional["ExecutorDefinition"] = None,\n hooks: Optional[AbstractSet[HookDefinition]] = None,\n op_retry_policy: Optional[RetryPolicy] = None,\n version_strategy: Optional[VersionStrategy] = None,\n op_selection: Optional[Sequence[str]] = None,\n partitions_def: Optional["PartitionsDefinition"] = None,\n asset_layer: Optional["AssetLayer"] = None,\n input_values: Optional[Mapping[str, object]] = None,\n _asset_selection_data: Optional[AssetSelectionData] = None,\n ) -> "JobDefinition":\n """Make this graph in to an executable Job by providing remaining components required for execution.\n\n Args:\n name (Optional[str]):\n The name for the Job. Defaults to the name of the this graph.\n resource_defs (Optional[Mapping [str, object]]):\n Resources that are required by this graph for execution.\n If not defined, `io_manager` will default to filesystem.\n config:\n Describes how the job is parameterized at runtime.\n\n If no value is provided, then the schema for the job's run config is a standard\n format based on its ops and resources.\n\n If a dictionary is provided, then it must conform to the standard config schema, and\n it will be used as the job's run config for the job whenever the job is executed.\n The values provided will be viewable and editable in the Dagster UI, so be\n careful with secrets.\n\n If a :py:class:`ConfigMapping` object is provided, then the schema for the job's run config is\n determined by the config mapping, and the ConfigMapping, which should return\n configuration in the standard format to configure the job.\n\n If a :py:class:`PartitionedConfig` object is provided, then it defines a discrete set of config\n values that can parameterize the job, as well as a function for mapping those\n values to the base config. The values provided will be viewable and editable in the\n Dagster UI, so be careful with secrets.\n tags (Optional[Mapping[str, Any]]):\n Arbitrary information that will be attached to the execution of the Job.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n metadata (Optional[Mapping[str, RawMetadataValue]]):\n Arbitrary information that will be attached to the JobDefinition and be viewable in the Dagster UI.\n Keys must be strings, and values must be python primitive types or one of the provided\n MetadataValue types\n logger_defs (Optional[Mapping[str, LoggerDefinition]]):\n A dictionary of string logger identifiers to their implementations.\n executor_def (Optional[ExecutorDefinition]):\n How this Job will be executed. Defaults to :py:class:`multi_or_in_process_executor`,\n which can be switched between multi-process and in-process modes of execution. The\n default mode of execution is multi-process.\n op_retry_policy (Optional[RetryPolicy]): The default retry policy for all ops in this job.\n Only used if retry policy is not defined on the op definition or op invocation.\n version_strategy (Optional[VersionStrategy]):\n Defines how each op (and optionally, resource) in the job can be versioned. If\n provided, memoizaton will be enabled for this job.\n partitions_def (Optional[PartitionsDefinition]): Defines a discrete set of partition\n keys that can parameterize the job. If this argument is supplied, the config\n argument can't also be supplied.\n asset_layer (Optional[AssetLayer]): Top level information about the assets this job\n will produce. Generally should not be set manually.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of a job.\n\n Returns:\n JobDefinition\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n from .job_definition import JobDefinition\n\n wrapped_resource_defs = wrap_resources_for_execution(resource_defs)\n\n return JobDefinition.dagster_internal_init(\n name=name,\n description=description or self.description,\n graph_def=self,\n resource_defs=wrapped_resource_defs,\n logger_defs=logger_defs,\n executor_def=executor_def,\n config=config,\n partitions_def=partitions_def,\n tags=tags,\n metadata=metadata,\n hook_defs=hooks,\n version_strategy=version_strategy,\n op_retry_policy=op_retry_policy,\n asset_layer=asset_layer,\n input_values=input_values,\n _subset_selection_data=_asset_selection_data,\n _was_explicitly_provided_resources=None, # None means this is determined by whether resource_defs contains any explicitly provided resources\n ).get_subset(op_selection=op_selection)
\n\n def coerce_to_job(self) -> "JobDefinition":\n # attempt to coerce a Graph in to a Job, raising a useful error if it doesn't work\n try:\n return self.to_job()\n except DagsterInvalidDefinitionError as err:\n raise DagsterInvalidDefinitionError(\n f"Failed attempting to coerce Graph {self.name} in to a Job. "\n "Use to_job instead, passing the required information."\n ) from err\n\n
[docs] @public\n def execute_in_process(\n self,\n run_config: Any = None,\n instance: Optional["DagsterInstance"] = None,\n resources: Optional[Mapping[str, object]] = None,\n raise_on_error: bool = True,\n op_selection: Optional[Sequence[str]] = None,\n run_id: Optional[str] = None,\n input_values: Optional[Mapping[str, object]] = None,\n ) -> "ExecuteInProcessResult":\n """Execute this graph in-process, collecting results in-memory.\n\n Args:\n run_config (Optional[Mapping[str, Any]]):\n Run config to provide to execution. The configuration for the underlying graph\n should exist under the "ops" key.\n instance (Optional[DagsterInstance]):\n The instance to execute against, an ephemeral one will be used if none provided.\n resources (Optional[Mapping[str, Any]]):\n The resources needed if any are required. Can provide resource instances directly,\n or resource definitions.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``True``.\n op_selection (Optional[List[str]]): A list of op selection queries (including single op\n names) to execute. For example:\n * ``['some_op']``: selects ``some_op`` itself.\n * ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n * ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n * ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of the graph.\n\n Returns:\n :py:class:`~dagster.ExecuteInProcessResult`\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n from dagster._core.instance import DagsterInstance\n\n from .executor_definition import execute_in_process_executor\n from .job_definition import JobDefinition\n\n instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n input_values = check.opt_mapping_param(input_values, "input_values")\n\n resource_defs = wrap_resources_for_execution(resources)\n\n ephemeral_job = JobDefinition(\n name=self._name,\n graph_def=self,\n executor_def=execute_in_process_executor,\n resource_defs=resource_defs,\n input_values=input_values,\n ).get_subset(op_selection=op_selection)\n\n run_config = run_config if run_config is not None else {}\n op_selection = check.opt_sequence_param(op_selection, "op_selection", str)\n\n return ephemeral_job.execute_in_process(\n run_config=run_config,\n instance=instance,\n raise_on_error=raise_on_error,\n run_id=run_id,\n )
\n\n @property\n def parent_graph_def(self) -> Optional["GraphDefinition"]:\n return None\n\n @property\n def is_subselected(self) -> bool:\n return False\n\n def get_resource_requirements(\n self, asset_layer: Optional["AssetLayer"] = None\n ) -> Iterator[ResourceRequirement]:\n for node in self.node_dict.values():\n yield from node.get_resource_requirements(outer_container=self, asset_layer=asset_layer)\n\n for dagster_type in self.all_dagster_types():\n yield from dagster_type.get_resource_requirements()\n\n @public\n @property\n def name(self) -> str:\n """The name of the graph."""\n return super(GraphDefinition, self).name\n\n @public\n @property\n def tags(self) -> Mapping[str, str]:\n """The tags associated with the graph."""\n return super(GraphDefinition, self).tags\n\n
[docs] @public\n def alias(self, name: str) -> "PendingNodeInvocation":\n """Aliases the graph with a new name.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.alias("my_graph_alias")\n """\n return super(GraphDefinition, self).alias(name)
\n\n
[docs] @public\n def tag(self, tags: Optional[Mapping[str, str]]) -> "PendingNodeInvocation":\n """Attaches the provided tags to the graph immutably.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.tag({"my_tag": "my_value"})\n """\n return super(GraphDefinition, self).tag(tags)
\n\n
[docs] @public\n def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PendingNodeInvocation":\n """Attaches the provided hooks to the graph immutably.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.with_hooks({my_hook})\n """\n return super(GraphDefinition, self).with_hooks(hook_defs)
\n\n
[docs] @public\n def with_retry_policy(self, retry_policy: RetryPolicy) -> "PendingNodeInvocation":\n """Attaches the provided retry policy to the graph immutably.\n\n Can only be used in the context of a :py:func:`@graph <graph>`, :py:func:`@job <job>`, or :py:func:`@asset_graph <asset_graph>` decorated function.\n\n **Examples:**\n .. code-block:: python\n\n @job\n def do_it_all():\n my_graph.with_retry_policy(RetryPolicy(max_retries=5))\n """\n return super(GraphDefinition, self).with_retry_policy(retry_policy)
\n\n def resolve_input_to_destinations(\n self, input_handle: NodeInputHandle\n ) -> Sequence[NodeInputHandle]:\n all_destinations: List[NodeInputHandle] = []\n for mapping in self.input_mappings:\n if mapping.graph_input_name != input_handle.input_name:\n continue\n # recurse into graph structure\n all_destinations += self.node_named(\n mapping.maps_to.node_name\n ).definition.resolve_input_to_destinations(\n NodeInputHandle(\n NodeHandle(mapping.maps_to.node_name, parent=input_handle.node_handle),\n mapping.maps_to.input_name,\n ),\n )\n\n return all_destinations
\n\n\nclass SubselectedGraphDefinition(GraphDefinition):\n """Defines a subselected graph.\n\n Args:\n parent_graph_def (GraphDefinition): The parent graph that this current graph is subselected\n from. This is used for tracking where the subselected graph originally comes from.\n Note that we allow subselecting a subselected graph, and this field refers to the direct\n parent graph of the current subselection, rather than the original root graph.\n node_defs (Optional[Sequence[NodeDefinition]]): A list of all top level nodes in the graph. A\n node can be an op or a graph that contains other nodes.\n dependencies (Optional[Mapping[Union[str, NodeInvocation], Mapping[str, IDependencyDefinition]]]):\n A structure that declares the dependencies of each op's inputs on the outputs of other\n ops in the subselected graph. Keys of the top level dict are either the string names of\n ops in the graph or, in the case of aliased ops, :py:class:`NodeInvocations <NodeInvocation>`.\n Values of the top level dict are themselves dicts, which map input names belonging to\n the op or aliased op to :py:class:`DependencyDefinitions <DependencyDefinition>`.\n input_mappings (Optional[Sequence[InputMapping]]): Define the inputs to the nested graph, and\n how they map to the inputs of its constituent ops.\n output_mappings (Optional[Sequence[OutputMapping]]): Define the outputs of the nested graph, and\n how they map from the outputs of its constituent ops.\n """\n\n def __init__(\n self,\n parent_graph_def: GraphDefinition,\n node_defs: Optional[Sequence[NodeDefinition]],\n dependencies: Optional[\n Union[\n DependencyMapping[str],\n DependencyMapping[NodeInvocation],\n ]\n ],\n input_mappings: Optional[Sequence[InputMapping]],\n output_mappings: Optional[Sequence[OutputMapping]],\n ):\n self._parent_graph_def = check.inst_param(\n parent_graph_def, "parent_graph_def", GraphDefinition\n )\n super(SubselectedGraphDefinition, self).__init__(\n name=parent_graph_def.name, # should we create special name for subselected graphs\n node_defs=node_defs,\n dependencies=dependencies,\n input_mappings=input_mappings,\n output_mappings=output_mappings,\n config=parent_graph_def.config_mapping,\n tags=parent_graph_def.tags,\n )\n\n @property\n def parent_graph_def(self) -> GraphDefinition:\n return self._parent_graph_def\n\n def get_top_level_omitted_nodes(self) -> Sequence[Node]:\n return [node for node in self.parent_graph_def.nodes if not self.has_node_named(node.name)]\n\n @property\n def is_subselected(self) -> bool:\n return True\n\n\ndef _validate_in_mappings(\n input_mappings: Sequence[InputMapping],\n nodes_by_name: Mapping[str, Node],\n dependency_structure: DependencyStructure,\n name: str,\n class_name: str,\n) -> Sequence[InputDefinition]:\n from .composition import MappedInputPlaceholder\n\n input_defs_by_name: Dict[str, InputDefinition] = OrderedDict()\n mapping_keys: Set[str] = set()\n\n target_input_types_by_graph_input_name: Dict[str, Set[DagsterType]] = defaultdict(set)\n\n for mapping in input_mappings:\n # handle incorrect objects passed in as mappings\n if not isinstance(mapping, InputMapping):\n if isinstance(mapping, InputDefinition):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' you passed an InputDefinition "\n f"named '{mapping.name}' directly in to input_mappings. Return "\n "an InputMapping by calling mapping_to on the InputDefinition."\n )\n else:\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' received unexpected type '{type(mapping)}' in"\n " input_mappings. Provide an InputMapping using InputMapping(...)"\n )\n\n input_defs_by_name[mapping.graph_input_name] = mapping.get_definition()\n\n target_node = nodes_by_name.get(mapping.maps_to.node_name)\n if target_node is None:\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping references node "\n f"'{mapping.maps_to.node_name}' which it does not contain."\n )\n if not target_node.has_input(mapping.maps_to.input_name):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping to node '{mapping.maps_to.node_name}' "\n f"which contains no input named '{mapping.maps_to.input_name}'"\n )\n\n target_input_def = target_node.input_def_named(mapping.maps_to.input_name)\n node_input = NodeInput(target_node, target_input_def)\n\n if mapping.maps_to_fan_in:\n maps_to = cast(FanInInputPointer, mapping.maps_to)\n if not dependency_structure.has_fan_in_deps(node_input):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping target"\n f' "{maps_to.node_name}.{maps_to.input_name}" (index'\n f" {maps_to.fan_in_index} of fan-in) is not a MultiDependencyDefinition."\n )\n inner_deps = dependency_structure.get_fan_in_deps(node_input)\n if (maps_to.fan_in_index >= len(inner_deps)) or (\n inner_deps[maps_to.fan_in_index] is not MappedInputPlaceholder\n ):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping target "\n f'"{maps_to.node_name}.{maps_to.input_name}" index {maps_to.fan_in_index} in '\n "the MultiDependencyDefinition is not a MappedInputPlaceholder"\n )\n mapping_keys.add(f"{maps_to.node_name}.{maps_to.input_name}.{maps_to.fan_in_index}")\n target_input_types_by_graph_input_name[mapping.graph_input_name].add(\n target_input_def.dagster_type.get_inner_type_for_fan_in()\n )\n else:\n if dependency_structure.has_deps(node_input):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' input mapping target "\n f'"{mapping.maps_to.node_name}.{mapping.maps_to.input_name}" '\n "is already satisfied by output"\n )\n\n mapping_keys.add(f"{mapping.maps_to.node_name}.{mapping.maps_to.input_name}")\n target_input_types_by_graph_input_name[mapping.graph_input_name].add(\n target_input_def.dagster_type\n )\n\n for node_input in dependency_structure.inputs():\n if dependency_structure.has_fan_in_deps(node_input):\n for idx, dep in enumerate(dependency_structure.get_fan_in_deps(node_input)):\n if dep is MappedInputPlaceholder:\n mapping_str = f"{node_input.node_name}.{node_input.input_name}.{idx}"\n if mapping_str not in mapping_keys:\n raise DagsterInvalidDefinitionError(\n f"Unsatisfied MappedInputPlaceholder at index {idx} in"\n " MultiDependencyDefinition for"\n f" '{node_input.node_name}.{node_input.input_name}'"\n )\n\n # if the dagster type on a graph input is Any and all its target inputs have the\n # same dagster type, then use that dagster type for the graph input\n for graph_input_name, graph_input_def in input_defs_by_name.items():\n if graph_input_def.dagster_type.kind == DagsterTypeKind.ANY:\n target_input_types = target_input_types_by_graph_input_name[graph_input_name]\n if len(target_input_types) == 1:\n input_defs_by_name[graph_input_name] = graph_input_def.with_dagster_type(\n next(iter(target_input_types))\n )\n\n return list(input_defs_by_name.values())\n\n\ndef _validate_out_mappings(\n output_mappings: Sequence[OutputMapping],\n node_dict: Mapping[str, Node],\n name: str,\n class_name: str,\n) -> Tuple[Sequence[OutputMapping], Sequence[OutputDefinition]]:\n output_defs: List[OutputDefinition] = []\n for mapping in output_mappings:\n if isinstance(mapping, OutputMapping):\n target_node = node_dict.get(mapping.maps_from.node_name)\n if target_node is None:\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' output mapping references node "\n f"'{mapping.maps_from.node_name}' which it does not contain."\n )\n if not target_node.has_output(mapping.maps_from.output_name):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} {name} output mapping from {target_node.describe_node()} "\n f"which contains no output named '{mapping.maps_from.output_name}'"\n )\n\n target_output = target_node.output_def_named(mapping.maps_from.output_name)\n output_def = mapping.get_definition(is_dynamic=target_output.is_dynamic)\n output_defs.append(output_def)\n\n if (\n mapping.dagster_type\n and mapping.dagster_type.kind != DagsterTypeKind.ANY\n and (target_output.dagster_type != mapping.dagster_type)\n and class_name != "GraphDefinition"\n ):\n raise DagsterInvalidDefinitionError(\n f"In {class_name} '{name}' output '{mapping.graph_output_name}' of type"\n f" {mapping.dagster_type.display_name} maps from"\n f" {mapping.maps_from.node_name}.{mapping.maps_from.output_name} of different"\n f" type {target_output.dagster_type.display_name}. OutputMapping source and"\n " destination must have the same type."\n )\n\n elif isinstance(mapping, OutputDefinition):\n raise DagsterInvalidDefinitionError(\n f"You passed an OutputDefinition named '{mapping.name}' directly "\n "in to output_mappings. Return an OutputMapping by calling "\n "mapping_from on the OutputDefinition."\n )\n else:\n raise DagsterInvalidDefinitionError(\n f"Received unexpected type '{type(mapping)}' in output_mappings. "\n "Provide an OutputMapping using OutputDefinition(...).mapping_from(...)"\n )\n return output_mappings, output_defs\n
", "current_page_name": "_modules/dagster/_core/definitions/graph_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.graph_definition"}, "hook_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.hook_definition

\nfrom typing import AbstractSet, Any, Callable, Iterator, NamedTuple, Optional, cast\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\n\nfrom ..decorator_utils import get_function_params\nfrom ..errors import DagsterInvalidInvocationError\nfrom .resource_requirement import HookResourceRequirement, RequiresResources, ResourceRequirement\nfrom .utils import check_valid_name\n\n\n
[docs]class HookDefinition(\n NamedTuple(\n "_HookDefinition",\n [\n ("name", PublicAttr[str]),\n ("hook_fn", PublicAttr[Callable]),\n ("required_resource_keys", PublicAttr[AbstractSet[str]]),\n ("decorated_fn", PublicAttr[Optional[Callable]]),\n ],\n ),\n RequiresResources,\n):\n """Define a hook which can be triggered during a op execution (e.g. a callback on the step\n execution failure event during a op execution).\n\n Args:\n name (str): The name of this hook.\n hook_fn (Callable): The callback function that will be triggered.\n required_resource_keys (Optional[AbstractSet[str]]): Keys for the resources required by the\n hook.\n """\n\n def __new__(\n cls,\n *,\n name: str,\n hook_fn: Callable[..., Any],\n required_resource_keys: Optional[AbstractSet[str]] = None,\n decorated_fn: Optional[Callable[..., Any]] = None,\n ):\n return super(HookDefinition, cls).__new__(\n cls,\n name=check_valid_name(name),\n hook_fn=check.callable_param(hook_fn, "hook_fn"),\n required_resource_keys=frozenset(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n ),\n decorated_fn=check.opt_callable_param(decorated_fn, "decorated_fn"),\n )\n\n def __call__(self, *args, **kwargs):\n """This is invoked when the hook is used as a decorator.\n\n We currently support hooks to decorate the following:\n\n - JobDefinition: when the hook decorates a job definition, it will be added to\n all the op invocations within the job.\n\n Example:\n .. code-block:: python\n\n @success_hook\n def slack_message_on_success(_):\n ...\n\n @slack_message_on_success\n @job\n def a_job():\n foo(bar())\n\n """\n from ..execution.context.hook import HookContext\n from .graph_definition import GraphDefinition\n from .hook_invocation import hook_invocation_result\n from .job_definition import JobDefinition\n\n if len(args) > 0 and isinstance(args[0], (JobDefinition, GraphDefinition)):\n # when it decorates a job, we apply this hook to all the op invocations within\n # the job.\n return args[0].with_hooks({self})\n else:\n if not self.decorated_fn:\n raise DagsterInvalidInvocationError(\n "Only hook definitions created using one of the hook decorators can be invoked."\n )\n fxn_args = get_function_params(self.decorated_fn)\n # If decorated fxn has two arguments, then this is an event list hook fxn, and parameter\n # names are always context and event_list\n if len(fxn_args) == 2:\n context_arg_name = fxn_args[0].name\n event_list_arg_name = fxn_args[1].name\n if len(args) + len(kwargs) != 2:\n raise DagsterInvalidInvocationError(\n "Decorated function expects two parameters, context and event_list, but "\n f"{len(args) + len(kwargs)} were provided."\n )\n if args:\n context = check.opt_inst_param(args[0], "context", HookContext)\n event_list = check.opt_list_param(\n args[1] if len(args) > 1 else kwargs[event_list_arg_name],\n event_list_arg_name,\n )\n else:\n if context_arg_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Could not find expected argument '{context_arg_name}'. Provided "\n f"kwargs: {list(kwargs.keys())}"\n )\n if event_list_arg_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Could not find expected argument '{event_list_arg_name}'. Provided "\n f"kwargs: {list(kwargs.keys())}"\n )\n context = check.opt_inst_param(\n kwargs[context_arg_name], context_arg_name, HookContext\n )\n event_list = check.opt_list_param(\n kwargs[event_list_arg_name], event_list_arg_name\n )\n return hook_invocation_result(self, context, event_list)\n else:\n context_arg_name = fxn_args[0].name\n if len(args) + len(kwargs) != 1:\n raise DagsterInvalidInvocationError(\n f"Decorated function expects one parameter, {context_arg_name}, but "\n f"{len(args) + len(kwargs)} were provided."\n )\n if args:\n context = check.opt_inst_param(args[0], context_arg_name, HookContext)\n else:\n if context_arg_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Could not find expected argument '{context_arg_name}'. Provided "\n f"kwargs: {list(kwargs.keys())}"\n )\n context = check.opt_inst_param(\n kwargs[context_arg_name], context_arg_name, HookContext\n )\n return hook_invocation_result(self, context)\n\n def get_resource_requirements(\n self, outer_context: Optional[object] = None\n ) -> Iterator[ResourceRequirement]:\n # outer_context in this case is a string of (job, job name) or (node, node name)\n attached_to = cast(Optional[str], outer_context)\n for resource_key in sorted(list(self.required_resource_keys)):\n yield HookResourceRequirement(\n key=resource_key, attached_to=attached_to, hook_name=self.name\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/hook_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.hook_definition"}, "input": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.input

\nimport inspect\nfrom types import FunctionType\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Set,\n    Type,\n    TypeVar,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, deprecated_param, experimental_param\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterError, DagsterInvalidDefinitionError\nfrom dagster._core.types.dagster_type import (  # BuiltinScalarDagsterType,\n    DagsterType,\n    resolve_dagster_type,\n)\n\nfrom .inference import InferredInputProps\nfrom .utils import NoValueSentinel, check_valid_name\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.input import InputContext\n\nT = TypeVar("T")\n\n\n# unfortunately since type_check functions need TypeCheckContext which is only available\n# at runtime, we can only check basic types before runtime\ndef _check_default_value(input_name: str, dagster_type: DagsterType, default_value: T) -> T:\n    from dagster._core.types.dagster_type import BuiltinScalarDagsterType\n\n    if default_value is not NoValueSentinel:\n        if dagster_type.is_nothing:\n            raise DagsterInvalidDefinitionError(\n                "Setting a default_value is invalid on InputDefinitions of type Nothing"\n            )\n\n        if isinstance(dagster_type, BuiltinScalarDagsterType):\n            type_check = dagster_type.type_check_scalar_value(default_value)\n            if not type_check.success:\n                raise DagsterInvalidDefinitionError(\n                    "Type check failed for the default_value of InputDefinition "\n                    f"{input_name} of type {dagster_type.display_name}. "\n                    f"Received value {default_value} of type {type(default_value)}",\n                )\n\n    return default_value\n\n\n@experimental_param(param="asset_key")\n@experimental_param(param="asset_partitions")\nclass InputDefinition:\n    """Defines an argument to an op's compute function.\n\n    Inputs may flow from previous op outputs, or be stubbed using config. They may optionally\n    be typed using the Dagster type system.\n\n    Args:\n        name (str): Name of the input.\n        dagster_type (Optional[Union[Type, DagsterType]]]): The type of this input.\n            Users should provide the Python type of the objects that they expect to be passed for\n            this input, or a :py:class:`DagsterType` that defines a runtime check that they want\n            to be run on this input. Defaults to :py:class:`Any`.\n        description (Optional[str]): Human-readable description of the input.\n        default_value (Optional[Any]): The default value to use if no input is provided.\n        metadata (Optional[Dict[str, Any]]): A dict of metadata for the input.\n        asset_key (Optional[Union[AssetKey, InputContext -> AssetKey]]): (Experimental) An AssetKey\n            (or function that produces an AssetKey from the InputContext) which should be associated\n            with this InputDefinition. Used for tracking lineage information through Dagster.\n        asset_partitions (Optional[Union[AbstractSet[str], InputContext -> AbstractSet[str]]]): (Experimental) A\n            set of partitions of the given asset_key (or a function that produces this list of\n            partitions from the InputContext) which should be associated with this InputDefinition.\n        input_manager_key (Optional[str]): (Experimental) The resource key for the\n            :py:class:`InputManager` used for loading this input when it is not connected to an\n            upstream output.\n    """\n\n    _name: str\n    _type_not_set: bool\n    _dagster_type: DagsterType\n    _description: Optional[str]\n    _default_value: Any\n    _input_manager_key: Optional[str]\n    _raw_metadata: ArbitraryMetadataMapping\n    _metadata: Mapping[str, MetadataValue]\n    _asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]]\n    _asset_partitions_fn: Optional[Callable[["InputContext"], Set[str]]]\n\n    def __init__(\n        self,\n        name: str,\n        dagster_type: object = None,\n        description: Optional[str] = None,\n        default_value: object = NoValueSentinel,\n        metadata: Optional[ArbitraryMetadataMapping] = None,\n        asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]] = None,\n        asset_partitions: Optional[Union[Set[str], Callable[["InputContext"], Set[str]]]] = None,\n        input_manager_key: Optional[str] = None,\n        # when adding new params, make sure to update combine_with_inferred and with_dagster_type below\n    ):\n        self._name = check_valid_name(name, allow_list=["config"])\n\n        self._type_not_set = dagster_type is None\n        self._dagster_type = check.inst(resolve_dagster_type(dagster_type), DagsterType)\n\n        self._description = check.opt_str_param(description, "description")\n\n        self._default_value = _check_default_value(self._name, self._dagster_type, default_value)\n\n        self._input_manager_key = check.opt_str_param(input_manager_key, "input_manager_key")\n\n        self._raw_metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n        self._metadata = normalize_metadata(self._raw_metadata, allow_invalid=True)\n\n        if not callable(asset_key):\n            check.opt_inst_param(asset_key, "asset_key", AssetKey)\n\n        self._asset_key = asset_key\n\n        if asset_partitions:\n            check.param_invariant(\n                asset_key is not None,\n                "asset_partitions",\n                'Cannot specify "asset_partitions" argument without also specifying "asset_key"',\n            )\n        if callable(asset_partitions):\n            self._asset_partitions_fn = asset_partitions\n        elif asset_partitions is not None:\n            _asset_partitions = check.set_param(asset_partitions, "asset_partitions", of_type=str)\n            self._asset_partitions_fn = lambda _: _asset_partitions\n        else:\n            self._asset_partitions_fn = None\n\n    @property\n    def name(self) -> str:\n        return self._name\n\n    @property\n    def dagster_type(self) -> DagsterType:\n        return self._dagster_type\n\n    @property\n    def description(self) -> Optional[str]:\n        return self._description\n\n    @property\n    def has_default_value(self) -> bool:\n        return self._default_value is not NoValueSentinel\n\n    @property\n    def default_value(self) -> Any:\n        check.invariant(self.has_default_value, "Can only fetch default_value if has_default_value")\n        return self._default_value\n\n    @property\n    def input_manager_key(self) -> Optional[str]:\n        return self._input_manager_key\n\n    @property\n    def metadata(self) -> ArbitraryMetadataMapping:\n        return self._raw_metadata\n\n    @property\n    def is_asset(self) -> bool:\n        return self._asset_key is not None\n\n    @property\n    def hardcoded_asset_key(self) -> Optional[AssetKey]:\n        if not callable(self._asset_key):\n            return self._asset_key\n        else:\n            return None\n\n    def get_asset_key(self, context: "InputContext") -> Optional[AssetKey]:\n        """Get the AssetKey associated with this InputDefinition for the given\n        :py:class:`InputContext` (if any).\n\n        Args:\n            context (InputContext): The InputContext that this InputDefinition is being evaluated\n                in\n        """\n        if callable(self._asset_key):\n            return self._asset_key(context)\n        else:\n            return self.hardcoded_asset_key\n\n    def get_asset_partitions(self, context: "InputContext") -> Optional[Set[str]]:\n        """Get the set of partitions that this op will read from this InputDefinition for the given\n        :py:class:`InputContext` (if any).\n\n        Args:\n            context (InputContext): The InputContext that this InputDefinition is being evaluated\n                in\n        """\n        if self._asset_partitions_fn is None:\n            return None\n\n        return self._asset_partitions_fn(context)\n\n    def mapping_to(\n        self, node_name: str, input_name: str, fan_in_index: Optional[int] = None\n    ) -> "InputMapping":\n        """Create an input mapping to an input of a child node.\n\n        In a GraphDefinition, you can use this helper function to construct\n        an :py:class:`InputMapping` to the input of a child node.\n\n        Args:\n            node_name (str): The name of the child node to which to map this input.\n            input_name (str): The name of the child node' input to which to map this input.\n            fan_in_index (Optional[int]): The index in to a fanned in input, else None\n\n        Examples:\n            .. code-block:: python\n\n                input_mapping = InputDefinition('composite_input', Int).mapping_to(\n                    'child_node', 'int_input'\n                )\n        """\n        check.str_param(node_name, "node_name")\n        check.str_param(input_name, "input_name")\n        check.opt_int_param(fan_in_index, "fan_in_index")\n\n        return InputMapping(\n            graph_input_name=self.name,\n            mapped_node_name=node_name,\n            mapped_node_input_name=input_name,\n            fan_in_index=fan_in_index,\n            graph_input_description=self.description,\n            dagster_type=self.dagster_type,\n        )\n\n    @staticmethod\n    def create_from_inferred(inferred: InferredInputProps) -> "InputDefinition":\n        return InputDefinition(\n            name=inferred.name,\n            dagster_type=_checked_inferred_type(inferred),\n            description=inferred.description,\n            default_value=inferred.default_value,\n        )\n\n    def combine_with_inferred(self, inferred: InferredInputProps) -> "InputDefinition":\n        """Return a new InputDefinition that merges this ones properties with those inferred from type signature.\n        This can update: dagster_type, description, and default_value if they are not set.\n        """\n        check.invariant(\n            self.name == inferred.name,\n            f"InferredInputProps name {inferred.name} did not align with InputDefinition name"\n            f" {self.name}",\n        )\n\n        dagster_type = self._dagster_type\n        if self._type_not_set:\n            dagster_type = _checked_inferred_type(inferred)\n\n        description = self._description\n        if description is None and inferred.description is not None:\n            description = inferred.description\n\n        default_value = self._default_value\n        if not self.has_default_value:\n            default_value = inferred.default_value\n\n        return InputDefinition(\n            name=self.name,\n            dagster_type=dagster_type,\n            description=description,\n            default_value=default_value,\n            metadata=self.metadata,\n            asset_key=self._asset_key,\n            asset_partitions=self._asset_partitions_fn,\n            input_manager_key=self._input_manager_key,\n        )\n\n    def with_dagster_type(self, dagster_type: DagsterType) -> "InputDefinition":\n        return InputDefinition(\n            name=self.name,\n            dagster_type=dagster_type,\n            description=self.description,\n            default_value=self.default_value if self.has_default_value else NoValueSentinel,\n            metadata=self.metadata,\n            asset_key=self._asset_key,\n            asset_partitions=self._asset_partitions_fn,\n            input_manager_key=self._input_manager_key,\n        )\n\n\ndef _checked_inferred_type(inferred: InferredInputProps) -> DagsterType:\n    try:\n        if inferred.annotation == inspect.Parameter.empty:\n            resolved_type = resolve_dagster_type(None)\n        elif inferred.annotation is None:\n            # When inferred.annotation is None, it means someone explicitly put "None" as the\n            # annotation, so want to map it to a DagsterType that checks for the None type\n            resolved_type = resolve_dagster_type(type(None))\n        else:\n            resolved_type = resolve_dagster_type(inferred.annotation)\n\n    except DagsterError as e:\n        raise DagsterInvalidDefinitionError(\n            f"Problem using type '{inferred.annotation}' from type annotation for argument "\n            f"'{inferred.name}', correct the issue or explicitly set the dagster_type "\n            "via In()."\n        ) from e\n\n    return resolved_type\n\n\nclass InputPointer(NamedTuple("_InputPointer", [("node_name", str), ("input_name", str)])):\n    def __new__(cls, node_name: str, input_name: str):\n        return super(InputPointer, cls).__new__(\n            cls,\n            check.str_param(node_name, "node_name"),\n            check.str_param(input_name, "input_name"),\n        )\n\n\nclass FanInInputPointer(\n    NamedTuple(\n        "_FanInInputPointer", [("node_name", str), ("input_name", str), ("fan_in_index", int)]\n    )\n):\n    def __new__(cls, node_name: str, input_name: str, fan_in_index: int):\n        return super(FanInInputPointer, cls).__new__(\n            cls,\n            check.str_param(node_name, "node_name"),\n            check.str_param(input_name, "input_name"),\n            check.int_param(fan_in_index, "fan_in_index"),\n        )\n\n\n
[docs]@deprecated_param(\n param="dagster_type",\n breaking_version="2.0",\n additional_warn_text="Any defined `dagster_type` should come from the upstream op `Output`.",\n # Disabling warning here since we're passing this internally and I'm not sure whether it is\n # actually used or discarded.\n emit_runtime_warning=False,\n)\nclass InputMapping(NamedTuple):\n """Defines an input mapping for a graph.\n\n Args:\n graph_input_name (str): Name of the input in the graph being mapped from.\n mapped_node_name (str): Named of the node (op/graph) that the input is being mapped to.\n mapped_node_input_name (str): Name of the input in the node (op/graph) that is being mapped to.\n fan_in_index (Optional[int]): The index in to a fanned input, otherwise None.\n graph_input_description (Optional[str]): A description of the input in the graph being mapped from.\n dagster_type (Optional[DagsterType]): The dagster type of the graph's input\n being mapped from.\n\n Examples:\n .. code-block:: python\n\n from dagster import InputMapping, GraphDefinition, op, graph\n\n @op\n def needs_input(x):\n return x + 1\n\n # The following two graph definitions are equivalent\n GraphDefinition(\n name="the_graph",\n node_defs=[needs_input],\n input_mappings=[\n InputMapping(\n graph_input_name="maps_x", mapped_node_name="needs_input",\n mapped_node_input_name="x"\n )\n ]\n )\n\n @graph\n def the_graph(maps_x):\n needs_input(maps_x)\n """\n\n graph_input_name: str\n mapped_node_name: str\n mapped_node_input_name: str\n fan_in_index: Optional[int] = None\n graph_input_description: Optional[str] = None\n dagster_type: Optional[DagsterType] = None\n\n @property\n def maps_to(self) -> Union[InputPointer, FanInInputPointer]:\n if self.fan_in_index is not None:\n return FanInInputPointer(\n self.mapped_node_name, self.mapped_node_input_name, self.fan_in_index\n )\n return InputPointer(self.mapped_node_name, self.mapped_node_input_name)\n\n @property\n def maps_to_fan_in(self) -> bool:\n return isinstance(self.maps_to, FanInInputPointer)\n\n def describe(self) -> str:\n idx = self.maps_to.fan_in_index if isinstance(self.maps_to, FanInInputPointer) else ""\n return f"{self.graph_input_name} -> {self.maps_to.node_name}:{self.maps_to.input_name}{idx}"\n\n def get_definition(self) -> "InputDefinition":\n return InputDefinition(\n name=self.graph_input_name,\n description=self.graph_input_description,\n dagster_type=self.dagster_type,\n )
\n\n\n
[docs]class In(\n NamedTuple(\n "_In",\n [\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ("description", PublicAttr[Optional[str]]),\n ("default_value", PublicAttr[Any]),\n ("metadata", PublicAttr[Optional[Mapping[str, Any]]]),\n (\n "asset_key",\n PublicAttr[Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]]],\n ),\n (\n "asset_partitions",\n PublicAttr[Optional[Union[Set[str], Callable[["InputContext"], Set[str]]]]],\n ),\n ("input_manager_key", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Defines an argument to an op's compute function.\n\n Inputs may flow from previous op's outputs, or be stubbed using config. They may optionally\n be typed using the Dagster type system.\n\n Args:\n dagster_type (Optional[Union[Type, DagsterType]]]):\n The type of this input. Should only be set if the correct type can not\n be inferred directly from the type signature of the decorated function.\n description (Optional[str]): Human-readable description of the input.\n default_value (Optional[Any]): The default value to use if no input is provided.\n metadata (Optional[Dict[str, RawMetadataValue]]): A dict of metadata for the input.\n asset_key (Optional[Union[AssetKey, InputContext -> AssetKey]]): (Experimental) An AssetKey\n (or function that produces an AssetKey from the InputContext) which should be associated\n with this In. Used for tracking lineage information through Dagster.\n asset_partitions (Optional[Union[Set[str], InputContext -> Set[str]]]): (Experimental) A\n set of partitions of the given asset_key (or a function that produces this list of\n partitions from the InputContext) which should be associated with this In.\n input_manager_key (Optional[str]): (Experimental) The resource key for the\n :py:class:`InputManager` used for loading this input when it is not connected to an\n upstream output.\n """\n\n def __new__(\n cls,\n dagster_type: Union[Type, DagsterType] = NoValueSentinel,\n description: Optional[str] = None,\n default_value: Any = NoValueSentinel,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n asset_key: Optional[Union[AssetKey, Callable[["InputContext"], AssetKey]]] = None,\n asset_partitions: Optional[Union[Set[str], Callable[["InputContext"], Set[str]]]] = None,\n input_manager_key: Optional[str] = None,\n ):\n return super(In, cls).__new__(\n cls,\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n description=check.opt_str_param(description, "description"),\n default_value=default_value,\n metadata=check.opt_mapping_param(metadata, "metadata", key_type=str),\n asset_key=check.opt_inst_param(asset_key, "asset_key", (AssetKey, FunctionType)),\n asset_partitions=asset_partitions,\n input_manager_key=check.opt_str_param(input_manager_key, "input_manager_key"),\n )\n\n @staticmethod\n def from_definition(input_def: InputDefinition) -> "In":\n return In(\n dagster_type=input_def.dagster_type,\n description=input_def.description,\n default_value=input_def._default_value, # noqa: SLF001\n metadata=input_def.metadata,\n asset_key=input_def._asset_key, # noqa: SLF001\n asset_partitions=input_def._asset_partitions_fn, # noqa: SLF001\n input_manager_key=input_def.input_manager_key,\n )\n\n def to_definition(self, name: str) -> InputDefinition:\n dagster_type = self.dagster_type if self.dagster_type is not NoValueSentinel else None\n return InputDefinition(\n name=name,\n dagster_type=dagster_type,\n description=self.description,\n default_value=self.default_value,\n metadata=self.metadata,\n asset_key=self.asset_key,\n asset_partitions=self.asset_partitions,\n input_manager_key=self.input_manager_key,\n )
\n\n\n
[docs]class GraphIn(NamedTuple("_GraphIn", [("description", PublicAttr[Optional[str]])])):\n """Represents information about an input that a graph maps.\n\n Args:\n description (Optional[str]): Human-readable description of the input.\n """\n\n def __new__(cls, description: Optional[str] = None):\n return super(GraphIn, cls).__new__(cls, description=description)\n\n def to_definition(self, name: str) -> InputDefinition:\n return InputDefinition(name=name, description=self.description)
\n
", "current_page_name": "_modules/dagster/_core/definitions/input", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.input"}, "job_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.job_definition

\nimport importlib\nimport os\nimport warnings\nfrom datetime import datetime\nfrom functools import update_wrapper\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, experimental_param, public\nfrom dagster._config import Field, Shape, StringSource\nfrom dagster._config.config_type import ConfigType\nfrom dagster._config.validate import validate_config\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.dependency import (\n    Node,\n    NodeHandle,\n    NodeInputHandle,\n    NodeInvocation,\n)\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.node_definition import NodeDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.op_selection import OpSelection, get_graph_subset\nfrom dagster._core.definitions.partition import DynamicPartitionsDefinition\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.resource_requirement import (\n    ResourceRequirement,\n    ensure_requirements_satisfied,\n)\nfrom dagster._core.definitions.utils import check_valid_name\nfrom dagster._core.errors import (\n    DagsterInvalidConfigError,\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidSubsetError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.selector.subset_selector import (\n    AssetSelectionData,\n    OpSelectionData,\n)\nfrom dagster._core.storage.io_manager import (\n    IOManagerDefinition,\n    dagster_maintained_io_manager,\n    io_manager,\n)\nfrom dagster._core.storage.tags import MEMOIZED_RUN_TAG\nfrom dagster._core.types.dagster_type import DagsterType\nfrom dagster._core.utils import str_format_set\nfrom dagster._utils import IHasInternalInit\nfrom dagster._utils.merger import merge_dicts\n\nfrom .asset_layer import AssetLayer, build_asset_selection_job\nfrom .config import ConfigMapping\nfrom .dependency import (\n    DependencyMapping,\n    DependencyStructure,\n    OpNode,\n)\nfrom .executor_definition import ExecutorDefinition, multi_or_in_process_executor\nfrom .graph_definition import GraphDefinition, SubselectedGraphDefinition\nfrom .hook_definition import HookDefinition\nfrom .logger_definition import LoggerDefinition\nfrom .metadata import MetadataValue, RawMetadataValue, normalize_metadata\nfrom .partition import PartitionedConfig, PartitionsDefinition\nfrom .resource_definition import ResourceDefinition\nfrom .run_request import RunRequest\nfrom .utils import DEFAULT_IO_MANAGER_KEY, validate_tags\nfrom .version_strategy import VersionStrategy\n\nif TYPE_CHECKING:\n    from dagster._config.snap import ConfigSchemaSnapshot\n    from dagster._core.definitions.run_config import RunConfig\n    from dagster._core.execution.execute_in_process_result import ExecuteInProcessResult\n    from dagster._core.execution.resources_init import InitResourceContext\n    from dagster._core.host_representation.job_index import JobIndex\n    from dagster._core.instance import DagsterInstance, DynamicPartitionsStore\n    from dagster._core.snap import JobSnapshot\n\n    from .run_config_schema import RunConfigSchema\n\nDEFAULT_EXECUTOR_DEF = multi_or_in_process_executor\n\n\n
[docs]@experimental_param(param="version_strategy")\nclass JobDefinition(IHasInternalInit):\n """Defines a Dagster job."""\n\n _name: str\n _graph_def: GraphDefinition\n _description: Optional[str]\n _tags: Mapping[str, str]\n _metadata: Mapping[str, MetadataValue]\n _current_level_node_defs: Sequence[NodeDefinition]\n _hook_defs: AbstractSet[HookDefinition]\n _op_retry_policy: Optional[RetryPolicy]\n _asset_layer: AssetLayer\n _resource_requirements: Mapping[str, AbstractSet[str]]\n _all_node_defs: Mapping[str, NodeDefinition]\n _cached_run_config_schemas: Dict[str, "RunConfigSchema"]\n _version_strategy: VersionStrategy\n _subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]]\n input_values: Mapping[str, object]\n\n def __init__(\n self,\n *,\n graph_def: GraphDefinition,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n executor_def: Optional[ExecutorDefinition] = None,\n logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n name: Optional[str] = None,\n config: Optional[\n Union[ConfigMapping, Mapping[str, object], PartitionedConfig, "RunConfig"]\n ] = None,\n description: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n tags: Optional[Mapping[str, Any]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n hook_defs: Optional[AbstractSet[HookDefinition]] = None,\n op_retry_policy: Optional[RetryPolicy] = None,\n version_strategy: Optional[VersionStrategy] = None,\n _subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]] = None,\n asset_layer: Optional[AssetLayer] = None,\n input_values: Optional[Mapping[str, object]] = None,\n _was_explicitly_provided_resources: Optional[bool] = None,\n ):\n from dagster._core.definitions.run_config import RunConfig, convert_config_input\n\n self._graph_def = graph_def\n self._current_level_node_defs = self._graph_def.node_defs\n # Recursively explore all nodes in the this job\n self._all_node_defs = _build_all_node_defs(self._current_level_node_defs)\n self._asset_layer = check.opt_inst_param(\n asset_layer, "asset_layer", AssetLayer\n ) or _infer_asset_layer_from_source_asset_deps(graph_def)\n\n # validates\n self._graph_def.get_inputs_must_be_resolved_top_level(self._asset_layer)\n\n self._name = check_valid_name(check.str_param(name, "name")) if name else graph_def.name\n self._executor_def = check.opt_inst_param(executor_def, "executor_def", ExecutorDefinition)\n self._loggers = check.opt_nullable_mapping_param(\n logger_defs,\n "logger_defs",\n key_type=str,\n value_type=LoggerDefinition,\n )\n\n config = check.opt_inst_param(\n config, "config", (Mapping, ConfigMapping, PartitionedConfig, RunConfig)\n )\n config = convert_config_input(config)\n\n partitions_def = check.opt_inst_param(\n partitions_def, "partitions_def", PartitionsDefinition\n )\n # tags and description can exist on graph as well, but since\n # same graph may be in multiple jobs, keep separate layer\n self._description = check.opt_str_param(description, "description")\n self._tags = validate_tags(tags)\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n )\n self._hook_defs = check.opt_set_param(hook_defs, "hook_defs")\n self._op_retry_policy = check.opt_inst_param(\n op_retry_policy, "op_retry_policy", RetryPolicy\n )\n self.version_strategy = check.opt_inst_param(\n version_strategy, "version_strategy", VersionStrategy\n )\n\n _subset_selection_data = check.opt_inst_param(\n _subset_selection_data, "_subset_selection_data", (OpSelectionData, AssetSelectionData)\n )\n input_values = check.opt_mapping_param(input_values, "input_values", key_type=str)\n\n resource_defs = check.opt_mapping_param(\n resource_defs, "resource_defs", key_type=str, value_type=ResourceDefinition\n )\n for key in resource_defs.keys():\n if not key.isidentifier():\n check.failed(f"Resource key '{key}' must be a valid Python identifier.")\n was_provided_resources = (\n bool(resource_defs)\n if _was_explicitly_provided_resources is None\n else _was_explicitly_provided_resources\n )\n self._resource_defs = {\n DEFAULT_IO_MANAGER_KEY: default_job_io_manager,\n **resource_defs,\n }\n self._required_resource_keys = self._get_required_resource_keys(was_provided_resources)\n\n self._config_mapping = None\n self._partitioned_config = None\n self._run_config = None\n self._run_config_schema = None\n self._original_config_argument = config\n\n if partitions_def:\n self._partitioned_config = PartitionedConfig.from_flexible_config(\n config, partitions_def\n )\n else:\n if isinstance(config, ConfigMapping):\n self._config_mapping = config\n elif isinstance(config, PartitionedConfig):\n self._partitioned_config = config\n elif isinstance(config, dict):\n self._run_config = config\n # Using config mapping here is a trick to make it so that the preset will be used even\n # when no config is supplied for the job.\n self._config_mapping = _config_mapping_with_default_value(\n get_run_config_schema_for_job(\n graph_def,\n self.resource_defs,\n self.executor_def,\n self.loggers,\n asset_layer,\n was_explicitly_provided_resources=was_provided_resources,\n ),\n config,\n self.name,\n )\n elif config is not None:\n check.failed(\n "config param must be a ConfigMapping, a PartitionedConfig, or a dictionary,"\n f" but is an object of type {type(config)}"\n )\n\n self._subset_selection_data = _subset_selection_data\n self.input_values = input_values\n for input_name in sorted(list(self.input_values.keys())):\n if not graph_def.has_input(input_name):\n raise DagsterInvalidDefinitionError(\n f"Error when constructing JobDefinition '{self.name}': Input value provided for"\n f" key '{input_name}', but job has no top-level input with that name."\n )\n\n def dagster_internal_init(\n *,\n graph_def: GraphDefinition,\n resource_defs: Optional[Mapping[str, ResourceDefinition]],\n executor_def: Optional[ExecutorDefinition],\n logger_defs: Optional[Mapping[str, LoggerDefinition]],\n name: Optional[str],\n config: Optional[\n Union[ConfigMapping, Mapping[str, object], PartitionedConfig, "RunConfig"]\n ],\n description: Optional[str],\n partitions_def: Optional[PartitionsDefinition],\n tags: Optional[Mapping[str, Any]],\n metadata: Optional[Mapping[str, RawMetadataValue]],\n hook_defs: Optional[AbstractSet[HookDefinition]],\n op_retry_policy: Optional[RetryPolicy],\n version_strategy: Optional[VersionStrategy],\n _subset_selection_data: Optional[Union[OpSelectionData, AssetSelectionData]],\n asset_layer: Optional[AssetLayer],\n input_values: Optional[Mapping[str, object]],\n _was_explicitly_provided_resources: Optional[bool],\n ) -> "JobDefinition":\n return JobDefinition(\n graph_def=graph_def,\n resource_defs=resource_defs,\n executor_def=executor_def,\n logger_defs=logger_defs,\n name=name,\n config=config,\n description=description,\n partitions_def=partitions_def,\n tags=tags,\n metadata=metadata,\n hook_defs=hook_defs,\n op_retry_policy=op_retry_policy,\n version_strategy=version_strategy,\n _subset_selection_data=_subset_selection_data,\n asset_layer=asset_layer,\n input_values=input_values,\n _was_explicitly_provided_resources=_was_explicitly_provided_resources,\n )\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def tags(self) -> Mapping[str, str]:\n return merge_dicts(self._graph_def.tags, self._tags)\n\n @property\n def metadata(self) -> Mapping[str, MetadataValue]:\n return self._metadata\n\n @property\n def description(self) -> Optional[str]:\n return self._description\n\n @property\n def graph(self) -> GraphDefinition:\n return self._graph_def\n\n @property\n def dependency_structure(self) -> DependencyStructure:\n return self._graph_def.dependency_structure\n\n @property\n def dependencies(self) -> DependencyMapping[NodeInvocation]:\n return self._graph_def.dependencies\n\n @public\n @property\n def executor_def(self) -> ExecutorDefinition:\n """Returns the default :py:class:`ExecutorDefinition` for the job.\n\n If the user has not specified an executor definition, then this will default to the :py:func:`multi_or_in_process_executor`. If a default is specified on the :py:class:`Definitions` object the job was provided to, then that will be used instead.\n """\n return self._executor_def or DEFAULT_EXECUTOR_DEF\n\n @public\n @property\n def has_specified_executor(self) -> bool:\n """Returns True if this job has explicitly specified an executor, and False if the executor was inherited through defaults or the :py:class:`Definitions` object the job was provided to."""\n return self._executor_def is not None\n\n @public\n @property\n def resource_defs(self) -> Mapping[str, ResourceDefinition]:\n """Returns the set of ResourceDefinition objects specified on the job.\n\n This may not be the complete set of resources required by the job, since those can also be provided on the :py:class:`Definitions` object the job may be provided to.\n """\n return self._resource_defs\n\n @public\n @property\n def partitioned_config(self) -> Optional[PartitionedConfig]:\n """The partitioned config for the job, if it has one.\n\n A partitioned config defines a way to map partition keys to run config for the job.\n """\n return self._partitioned_config\n\n @public\n @property\n def config_mapping(self) -> Optional[ConfigMapping]:\n """The config mapping for the job, if it has one.\n\n A config mapping defines a way to map a top-level config schema to run config for the job.\n """\n return self._config_mapping\n\n @public\n @property\n def loggers(self) -> Mapping[str, LoggerDefinition]:\n """Returns the set of LoggerDefinition objects specified on the job.\n\n If the user has not specified a mapping of :py:class:`LoggerDefinition` objects, then this will default to the :py:func:`colored_console_logger` under the key `console`. If a default is specified on the :py:class:`Definitions` object the job was provided to, then that will be used instead.\n """\n from dagster._loggers import default_loggers\n\n return self._loggers or default_loggers()\n\n @public\n @property\n def has_specified_loggers(self) -> bool:\n """Returns true if the job explicitly set loggers, and False if loggers were inherited through defaults or the :py:class:`Definitions` object the job was provided to."""\n return self._loggers is not None\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return self._required_resource_keys\n\n @property\n def run_config(self) -> Optional[Mapping[str, Any]]:\n return self._run_config\n\n @property\n def run_config_schema(self) -> "RunConfigSchema":\n if self._run_config_schema is None:\n self._run_config_schema = _create_run_config_schema(self, self.required_resource_keys)\n return self._run_config_schema\n\n @public\n @property\n def partitions_def(self) -> Optional[PartitionsDefinition]:\n """Returns the :py:class:`PartitionsDefinition` for the job, if it has one.\n\n A partitions definition defines the set of partition keys the job operates on.\n """\n return None if not self.partitioned_config else self.partitioned_config.partitions_def\n\n @property\n def hook_defs(self) -> AbstractSet[HookDefinition]:\n return self._hook_defs\n\n @property\n def asset_layer(self) -> AssetLayer:\n return self._asset_layer\n\n @property\n def all_node_defs(self) -> Sequence[NodeDefinition]:\n return list(self._all_node_defs.values())\n\n @property\n def top_level_node_defs(self) -> Sequence[NodeDefinition]:\n return self._current_level_node_defs\n\n def node_def_named(self, name: str) -> NodeDefinition:\n check.str_param(name, "name")\n\n check.invariant(name in self._all_node_defs, f"{name} not found")\n return self._all_node_defs[name]\n\n def has_node(self, name: str) -> bool:\n check.str_param(name, "name")\n return name in self._all_node_defs\n\n def get_node(self, handle: NodeHandle) -> Node:\n return self._graph_def.get_node(handle)\n\n def get_op(self, handle: NodeHandle) -> OpNode:\n node = self.get_node(handle)\n assert isinstance(\n node, OpNode\n ), f"Tried to retrieve node {handle} as op, but it represents a nested graph."\n return node\n\n def has_node_named(self, name: str) -> bool:\n return self._graph_def.has_node_named(name)\n\n def get_node_named(self, name: str) -> Node:\n return self._graph_def.node_named(name)\n\n @property\n def nodes(self) -> Sequence[Node]:\n return self._graph_def.nodes\n\n @property\n def nodes_in_topological_order(self) -> Sequence[Node]:\n return self._graph_def.nodes_in_topological_order\n\n def all_dagster_types(self) -> Iterable[DagsterType]:\n return self._graph_def.all_dagster_types()\n\n def has_dagster_type(self, name: str) -> bool:\n return self._graph_def.has_dagster_type(name)\n\n def dagster_type_named(self, name: str) -> DagsterType:\n return self._graph_def.dagster_type_named(name)\n\n def describe_target(self) -> str:\n return f"job '{self.name}'"\n\n def is_using_memoization(self, run_tags: Mapping[str, str]) -> bool:\n tags = merge_dicts(self.tags, run_tags)\n # If someone provides a false value for memoized run tag, then they are intentionally\n # switching off memoization.\n if tags.get(MEMOIZED_RUN_TAG) == "false":\n return False\n return (\n MEMOIZED_RUN_TAG in tags and tags.get(MEMOIZED_RUN_TAG) == "true"\n ) or self.version_strategy is not None\n\n def get_required_resource_defs(self) -> Mapping[str, ResourceDefinition]:\n return {\n resource_key: resource\n for resource_key, resource in self.resource_defs.items()\n if resource_key in self.required_resource_keys\n }\n\n def _get_required_resource_keys(self, validate_requirements: bool = False) -> AbstractSet[str]:\n from ..execution.resources_init import get_transitive_required_resource_keys\n\n requirements = self._get_resource_requirements()\n if validate_requirements:\n ensure_requirements_satisfied(self.resource_defs, requirements)\n required_keys = {req.key for req in requirements}\n if validate_requirements:\n return required_keys.union(\n get_transitive_required_resource_keys(required_keys, self.resource_defs)\n )\n else:\n return required_keys\n\n def _get_resource_requirements(self) -> Sequence[ResourceRequirement]:\n return [\n *self._graph_def.get_resource_requirements(self.asset_layer),\n *[\n req\n for hook_def in self._hook_defs\n for req in hook_def.get_resource_requirements(outer_context=f"job '{self._name}'")\n ],\n ]\n\n def validate_resource_requirements_satisfied(self) -> None:\n resource_requirements = self._get_resource_requirements()\n ensure_requirements_satisfied(self.resource_defs, resource_requirements)\n\n def is_missing_required_resources(self) -> bool:\n requirements = self._get_resource_requirements()\n for requirement in requirements:\n if not requirement.resources_contain_key(self.resource_defs):\n return True\n return False\n\n def get_all_hooks_for_handle(self, handle: NodeHandle) -> AbstractSet[HookDefinition]:\n """Gather all the hooks for the given node from all places possibly attached with a hook.\n\n A hook can be attached to any of the following objects\n * Node (node invocation)\n * JobDefinition\n\n Args:\n handle (NodeHandle): The node's handle\n\n Returns:\n FrozenSet[HookDefinition]\n """\n check.inst_param(handle, "handle", NodeHandle)\n hook_defs: Set[HookDefinition] = set()\n\n current = handle\n lineage = []\n while current:\n lineage.append(current.name)\n current = current.parent\n\n # hooks on top-level node\n name = lineage.pop()\n node = self._graph_def.node_named(name)\n hook_defs = hook_defs.union(node.hook_defs)\n\n # hooks on non-top-level nodes\n while lineage:\n name = lineage.pop()\n # While lineage is non-empty, definition is guaranteed to be a graph\n definition = cast(GraphDefinition, node.definition)\n node = definition.node_named(name)\n hook_defs = hook_defs.union(node.hook_defs)\n\n # hooks applied to a job definition will run on every node\n hook_defs = hook_defs.union(self.hook_defs)\n\n return frozenset(hook_defs)\n\n def get_retry_policy_for_handle(self, handle: NodeHandle) -> Optional[RetryPolicy]:\n node = self.get_node(handle)\n definition = node.definition\n\n if node.retry_policy:\n return node.retry_policy\n elif isinstance(definition, OpDefinition) and definition.retry_policy:\n return definition.retry_policy\n\n # could be expanded to look in graph containers\n else:\n return self._op_retry_policy\n\n # make Callable for decorator reference updates\n def __call__(self, *args, **kwargs):\n raise DagsterInvariantViolationError(\n f"Attempted to call job '{self.name}' directly. Jobs should be invoked by "\n "using an execution API function (e.g. `job.execute_in_process`)."\n )\n\n
[docs] @public\n def execute_in_process(\n self,\n run_config: Optional[Union[Mapping[str, Any], "RunConfig"]] = None,\n instance: Optional["DagsterInstance"] = None,\n partition_key: Optional[str] = None,\n raise_on_error: bool = True,\n op_selection: Optional[Sequence[str]] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n run_id: Optional[str] = None,\n input_values: Optional[Mapping[str, object]] = None,\n tags: Optional[Mapping[str, str]] = None,\n resources: Optional[Mapping[str, object]] = None,\n ) -> "ExecuteInProcessResult":\n """Execute the Job in-process, gathering results in-memory.\n\n The `executor_def` on the Job will be ignored, and replaced with the in-process executor.\n If using the default `io_manager`, it will switch from filesystem to in-memory.\n\n\n Args:\n run_config (Optional[Mapping[str, Any]]:\n The configuration for the run\n instance (Optional[DagsterInstance]):\n The instance to execute against, an ephemeral one will be used if none provided.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for jobs with partitioned config.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``True``.\n op_selection (Optional[Sequence[str]]): A list of op selection queries (including single op\n names) to execute. For example:\n * ``['some_op']``: selects ``some_op`` itself.\n * ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n * ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n * ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.\n resources (Optional[Mapping[str, Any]]):\n The resources needed if any are required. Can provide resource instances directly,\n or resource definitions.\n\n Returns:\n :py:class:`~dagster.ExecuteInProcessResult`\n\n """\n from dagster._core.definitions.executor_definition import execute_in_process_executor\n from dagster._core.definitions.run_config import convert_config_input\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n from dagster._core.execution.execute_in_process import core_execute_in_process\n\n run_config = check.opt_mapping_param(convert_config_input(run_config), "run_config")\n op_selection = check.opt_sequence_param(op_selection, "op_selection", str)\n asset_selection = check.opt_sequence_param(asset_selection, "asset_selection", AssetKey)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n\n resource_defs = wrap_resources_for_execution(resources)\n\n check.invariant(\n not (op_selection and asset_selection),\n "op_selection and asset_selection cannot both be provided as args to"\n " execute_in_process",\n )\n\n partition_key = check.opt_str_param(partition_key, "partition_key")\n input_values = check.opt_mapping_param(input_values, "input_values")\n\n # Combine provided input values at execute_in_process with input values\n # provided to the definition. Input values provided at\n # execute_in_process will override those provided on the definition.\n input_values = merge_dicts(self.input_values, input_values)\n\n bound_resource_defs = dict(self.resource_defs)\n ephemeral_job = JobDefinition.dagster_internal_init(\n name=self._name,\n graph_def=self._graph_def,\n resource_defs={**_swap_default_io_man(bound_resource_defs, self), **resource_defs},\n executor_def=execute_in_process_executor,\n logger_defs=self._loggers,\n hook_defs=self.hook_defs,\n config=self.config_mapping or self.partitioned_config or self.run_config,\n tags=self.tags,\n op_retry_policy=self._op_retry_policy,\n version_strategy=self.version_strategy,\n asset_layer=self.asset_layer,\n input_values=input_values,\n description=self.description,\n partitions_def=self.partitions_def,\n metadata=self.metadata,\n _subset_selection_data=None, # this is added below\n _was_explicitly_provided_resources=True,\n )\n\n ephemeral_job = ephemeral_job.get_subset(\n op_selection=op_selection,\n asset_selection=frozenset(asset_selection) if asset_selection else None,\n )\n\n merged_tags = merge_dicts(self.tags, tags or {})\n if partition_key:\n if not (self.partitions_def and self.partitioned_config):\n check.failed("Attempted to execute a partitioned run for a non-partitioned job")\n self.partitions_def.validate_partition_key(\n partition_key, dynamic_partitions_store=instance\n )\n\n run_config = (\n run_config\n if run_config\n else self.partitioned_config.get_run_config_for_partition_key(partition_key)\n )\n merged_tags.update(\n self.partitioned_config.get_tags_for_partition_key(\n partition_key, job_name=self.name\n )\n )\n\n return core_execute_in_process(\n ephemeral_job=ephemeral_job,\n run_config=run_config,\n instance=instance,\n output_capturing_enabled=True,\n raise_on_error=raise_on_error,\n run_tags=merged_tags,\n run_id=run_id,\n asset_selection=frozenset(asset_selection),\n )
\n\n @property\n def op_selection_data(self) -> Optional[OpSelectionData]:\n return (\n self._subset_selection_data\n if isinstance(self._subset_selection_data, OpSelectionData)\n else None\n )\n\n @property\n def asset_selection_data(self) -> Optional[AssetSelectionData]:\n return (\n self._subset_selection_data\n if isinstance(self._subset_selection_data, AssetSelectionData)\n else None\n )\n\n @property\n def is_subset(self) -> bool:\n return bool(self._subset_selection_data)\n\n def get_subset(\n self,\n *,\n op_selection: Optional[Iterable[str]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n ) -> Self:\n check.invariant(\n not (op_selection and (asset_selection or asset_check_selection)),\n "op_selection cannot be provided with asset_selection or asset_check_selection to"\n " execute_in_process",\n )\n if op_selection:\n return self._get_job_def_for_op_selection(op_selection)\n if asset_selection or asset_check_selection:\n return self._get_job_def_for_asset_selection(\n asset_selection=asset_selection, asset_check_selection=asset_check_selection\n )\n else:\n return self\n\n def _get_job_def_for_asset_selection(\n self,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n ) -> Self:\n asset_selection = check.opt_set_param(asset_selection, "asset_selection", AssetKey)\n check.opt_set_param(asset_check_selection, "asset_check_selection", AssetCheckKey)\n\n nonexistent_assets = [\n asset\n for asset in asset_selection\n if asset not in self.asset_layer.asset_keys\n and asset not in self.asset_layer.source_assets_by_key\n ]\n nonexistent_asset_strings = [\n asset_str\n for asset_str in (asset.to_string() for asset in nonexistent_assets)\n if asset_str\n ]\n if nonexistent_assets:\n raise DagsterInvalidSubsetError(\n "Assets provided in asset_selection argument "\n f"{', '.join(nonexistent_asset_strings)} do not exist in parent asset group or job."\n )\n\n # Test that selected asset checks exist\n all_check_keys = self.asset_layer.node_output_handles_by_asset_check_key.keys()\n\n nonexistent_asset_checks = [\n asset_check\n for asset_check in asset_check_selection or set()\n if asset_check not in all_check_keys\n ]\n nonexistent_asset_check_strings = [\n str(asset_check) for asset_check in nonexistent_asset_checks\n ]\n if nonexistent_asset_checks:\n raise DagsterInvalidSubsetError(\n "Asset checks provided in asset_check_selection argument"\n f" {', '.join(nonexistent_asset_check_strings)} do not exist in parent asset group"\n " or job."\n )\n\n asset_selection_data = AssetSelectionData(\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n parent_job_def=self,\n )\n\n check.invariant(\n self.asset_layer.assets_defs_by_key is not None,\n "Asset layer must have _asset_defs argument defined",\n )\n\n new_job = build_asset_selection_job(\n name=self.name,\n assets=set(self.asset_layer.assets_defs_by_key.values()),\n source_assets=self.asset_layer.source_assets_by_key.values(),\n executor_def=self.executor_def,\n resource_defs=self.resource_defs,\n description=self.description,\n tags=self.tags,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n asset_selection_data=asset_selection_data,\n config=self.config_mapping or self.partitioned_config,\n asset_checks=self.asset_layer.asset_checks_defs,\n )\n return new_job\n\n def _get_job_def_for_op_selection(self, op_selection: Iterable[str]) -> Self:\n try:\n sub_graph = get_graph_subset(self.graph, op_selection)\n\n # if explicit config was passed the config_mapping that resolves the defaults implicitly is\n # very unlikely to work. The job will still present the default config in the Dagster UI.\n config = (\n None\n if self.run_config is not None\n else self.config_mapping or self.partitioned_config\n )\n\n return self._copy(\n config=config,\n graph_def=sub_graph,\n _subset_selection_data=OpSelectionData(\n op_selection=list(op_selection),\n resolved_op_selection=OpSelection(op_selection).resolve(self.graph),\n parent_job_def=self, # used by job snapshot lineage\n ),\n # TODO: subset this structure.\n # https://github.com/dagster-io/dagster/issues/7541\n asset_layer=self.asset_layer,\n )\n except DagsterInvalidDefinitionError as exc:\n # This handles the case when you construct a subset such that an unsatisfied\n # input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,\n # we re-raise a DagsterInvalidSubsetError.\n node_paths = OpSelection(op_selection).resolve(self.graph)\n raise DagsterInvalidSubsetError(\n f"The attempted subset {str_format_set(node_paths)} for graph "\n f"{self.graph.name} results in an invalid graph."\n ) from exc\n\n
[docs] @public\n @deprecated(\n breaking_version="2.0.0",\n additional_warn_text="Directly instantiate `RunRequest(partition_key=...)` instead.",\n )\n def run_request_for_partition(\n self,\n partition_key: str,\n run_key: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n run_config: Optional[Mapping[str, Any]] = None,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,\n ) -> RunRequest:\n """Creates a RunRequest object for a run that processes the given partition.\n\n Args:\n partition_key: The key of the partition to request a run for.\n run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that\n only one run is created per run key across all sensor evaluations. For schedules,\n ensures that one run is created per tick, across failure recoveries. Passing in a `None`\n value means that a run will always be launched per evaluation.\n tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the launched run.\n run_config (Optional[Mapping[str, Any]]: Configuration for the run. If the job has\n a :py:class:`PartitionedConfig`, this value will override replace the config\n provided by it.\n current_time (Optional[datetime]): Used to determine which time-partitions exist.\n Defaults to now.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when the\n partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n\n Returns:\n RunRequest: an object that requests a run to process the given partition.\n """\n if not (self.partitions_def and self.partitioned_config):\n check.failed("Called run_request_for_partition on a non-partitioned job")\n\n if (\n isinstance(self.partitions_def, DynamicPartitionsDefinition)\n and self.partitions_def.name\n ):\n # Do not support using run_request_for_partition with dynamic partitions,\n # since this requires querying the instance once per run request for the\n # existent dynamic partitions\n check.failed(\n "run_request_for_partition is not supported for dynamic partitions. Instead, use"\n " RunRequest(partition_key=...)"\n )\n\n self.partitions_def.validate_partition_key(\n partition_key,\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n\n run_config = (\n run_config\n if run_config is not None\n else self.partitioned_config.get_run_config_for_partition_key(partition_key)\n )\n run_request_tags = {\n **(tags or {}),\n **self.partitioned_config.get_tags_for_partition_key(\n partition_key,\n job_name=self.name,\n ),\n }\n\n return RunRequest(\n run_key=run_key,\n run_config=run_config,\n tags=run_request_tags,\n job_name=self.name,\n asset_selection=asset_selection,\n partition_key=partition_key,\n )
\n\n def get_config_schema_snapshot(self) -> "ConfigSchemaSnapshot":\n return self.get_job_snapshot().config_schema_snapshot\n\n def get_job_snapshot(self) -> "JobSnapshot":\n return self.get_job_index().job_snapshot\n\n def get_job_index(self) -> "JobIndex":\n from dagster._core.host_representation import JobIndex\n from dagster._core.snap import JobSnapshot\n\n return JobIndex(JobSnapshot.from_job_def(self), self.get_parent_job_snapshot())\n\n def get_job_snapshot_id(self) -> str:\n return self.get_job_index().job_snapshot_id\n\n def get_parent_job_snapshot(self) -> Optional["JobSnapshot"]:\n if self.op_selection_data:\n return self.op_selection_data.parent_job_def.get_job_snapshot()\n elif self.asset_selection_data:\n return self.asset_selection_data.parent_job_def.get_job_snapshot()\n else:\n return None\n\n def has_direct_input_value(self, input_name: str) -> bool:\n return input_name in self.input_values\n\n def get_direct_input_value(self, input_name: str) -> object:\n if input_name not in self.input_values:\n raise DagsterInvalidInvocationError(\n f"On job '{self.name}', attempted to retrieve input value for input named"\n f" '{input_name}', but no value was provided. Provided input values:"\n f" {sorted(list(self.input_values.keys()))}"\n )\n return self.input_values[input_name]\n\n def _copy(self, **kwargs: Any) -> "JobDefinition":\n # dict() calls copy dict props\n base_kwargs = dict(\n graph_def=self.graph,\n resource_defs=dict(self.resource_defs),\n executor_def=self._executor_def,\n logger_defs=self._loggers,\n config=self._original_config_argument,\n name=self._name,\n description=self.description,\n tags=self.tags,\n metadata=self._metadata,\n hook_defs=self.hook_defs,\n op_retry_policy=self._op_retry_policy,\n version_strategy=self.version_strategy,\n _subset_selection_data=self._subset_selection_data,\n asset_layer=self.asset_layer,\n input_values=self.input_values,\n partitions_def=self.partitions_def,\n _was_explicitly_provided_resources=None,\n )\n resolved_kwargs = {**base_kwargs, **kwargs} # base kwargs overwritten for conflicts\n job_def = JobDefinition.dagster_internal_init(**resolved_kwargs)\n update_wrapper(job_def, self, updated=())\n return job_def\n\n
[docs] @public\n def with_top_level_resources(\n self, resource_defs: Mapping[str, ResourceDefinition]\n ) -> "JobDefinition":\n """Apply a set of resources to all op instances within the job."""\n resource_defs = check.mapping_param(resource_defs, "resource_defs", key_type=str)\n return self._copy(resource_defs=resource_defs)
\n\n
[docs] @public\n def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "JobDefinition":\n """Apply a set of hooks to all op instances within the job."""\n hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)\n return self._copy(hook_defs=(hook_defs | self.hook_defs))
\n\n def with_executor_def(self, executor_def: ExecutorDefinition) -> "JobDefinition":\n return self._copy(executor_def=executor_def)\n\n def with_logger_defs(self, logger_defs: Mapping[str, LoggerDefinition]) -> "JobDefinition":\n return self._copy(logger_defs=logger_defs)\n\n @property\n def op_selection(self) -> Optional[AbstractSet[str]]:\n return set(self.op_selection_data.op_selection) if self.op_selection_data else None\n\n @property\n def asset_selection(self) -> Optional[AbstractSet[AssetKey]]:\n return self.asset_selection_data.asset_selection if self.asset_selection_data else None\n\n @property\n def asset_check_selection(self) -> Optional[AbstractSet[AssetCheckKey]]:\n return (\n self.asset_selection_data.asset_check_selection if self.asset_selection_data else None\n )\n\n @property\n def resolved_op_selection(self) -> Optional[AbstractSet[str]]:\n return self.op_selection_data.resolved_op_selection if self.op_selection_data else None
\n\n\ndef _swap_default_io_man(resources: Mapping[str, ResourceDefinition], job: JobDefinition):\n """Used to create the user facing experience of the default io_manager\n switching to in-memory when using execute_in_process.\n """\n from dagster._core.storage.mem_io_manager import mem_io_manager\n\n if (\n resources.get(DEFAULT_IO_MANAGER_KEY) in [default_job_io_manager]\n and job.version_strategy is None\n ):\n updated_resources = dict(resources)\n updated_resources[DEFAULT_IO_MANAGER_KEY] = mem_io_manager\n return updated_resources\n\n return resources\n\n\n@dagster_maintained_io_manager\n@io_manager(\n description="Built-in filesystem IO manager that stores and retrieves values using pickling."\n)\ndef default_job_io_manager(init_context: "InitResourceContext"):\n # support overriding the default io manager via environment variables\n module_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_MODULE")\n attribute_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE")\n silence_failures = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_SILENCE_FAILURES")\n\n if module_name and attribute_name:\n from dagster._core.execution.build_resources import build_resources\n\n try:\n module = importlib.import_module(module_name)\n attr = getattr(module, attribute_name)\n check.invariant(\n isinstance(attr, IOManagerDefinition),\n "DAGSTER_DEFAULT_IO_MANAGER_MODULE and DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE"\n " must specify an IOManagerDefinition",\n )\n with build_resources({"io_manager": attr}, instance=init_context.instance) as resources:\n return resources.io_manager\n except Exception as e:\n if not silence_failures:\n raise\n else:\n warnings.warn(\n f"Failed to load io manager override with module: {module_name} attribute:"\n f" {attribute_name}: {e}\\nFalling back to default io manager."\n )\n\n # normally, default to the fs_io_manager\n from dagster._core.storage.fs_io_manager import PickledObjectFilesystemIOManager\n\n instance = check.not_none(init_context.instance)\n return PickledObjectFilesystemIOManager(base_dir=instance.storage_directory())\n\n\n@dagster_maintained_io_manager\n@io_manager(\n description="Built-in filesystem IO manager that stores and retrieves values using pickling.",\n config_schema={"base_dir": Field(StringSource, is_required=False)},\n)\ndef default_job_io_manager_with_fs_io_manager_schema(init_context: "InitResourceContext"):\n # support overriding the default io manager via environment variables\n module_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_MODULE")\n attribute_name = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE")\n silence_failures = os.getenv("DAGSTER_DEFAULT_IO_MANAGER_SILENCE_FAILURES")\n\n if module_name and attribute_name:\n from dagster._core.execution.build_resources import build_resources\n\n try:\n module = importlib.import_module(module_name)\n attr = getattr(module, attribute_name)\n check.invariant(\n isinstance(attr, IOManagerDefinition),\n "DAGSTER_DEFAULT_IO_MANAGER_MODULE and DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE"\n " must specify an IOManagerDefinition",\n )\n with build_resources({"io_manager": attr}, instance=init_context.instance) as resources:\n return resources.io_manager\n except Exception as e:\n if not silence_failures:\n raise\n else:\n warnings.warn(\n f"Failed to load io manager override with module: {module_name} attribute:"\n f" {attribute_name}: {e}\\nFalling back to default io manager."\n )\n from dagster._core.storage.fs_io_manager import PickledObjectFilesystemIOManager\n\n # normally, default to the fs_io_manager\n base_dir = init_context.resource_config.get(\n "base_dir", init_context.instance.storage_directory() if init_context.instance else None\n )\n\n return PickledObjectFilesystemIOManager(base_dir=base_dir)\n\n\ndef _config_mapping_with_default_value(\n inner_schema: ConfigType,\n default_config: Mapping[str, Any],\n job_name: str,\n) -> ConfigMapping:\n if not isinstance(inner_schema, Shape):\n check.failed("Only Shape (dictionary) config_schema allowed on Job ConfigMapping")\n\n def config_fn(x):\n return x\n\n updated_fields = {}\n field_aliases = inner_schema.field_aliases\n for name, field in inner_schema.fields.items():\n if name in default_config:\n updated_fields[name] = Field(\n config=field.config_type,\n default_value=default_config[name],\n description=field.description,\n )\n elif name in field_aliases and field_aliases[name] in default_config:\n updated_fields[name] = Field(\n config=field.config_type,\n default_value=default_config[field_aliases[name]],\n description=field.description,\n )\n else:\n updated_fields[name] = field\n\n config_schema = Shape(\n fields=updated_fields,\n description=(\n "This run config schema was automatically populated with default values "\n "from `default_config`."\n ),\n field_aliases=inner_schema.field_aliases,\n )\n\n config_evr = validate_config(config_schema, default_config)\n if not config_evr.success:\n raise DagsterInvalidConfigError(\n f"Error in config when building job '{job_name}' ",\n config_evr.errors,\n default_config,\n )\n\n return ConfigMapping(\n config_fn=config_fn, config_schema=config_schema, receive_processed_config_values=False\n )\n\n\ndef get_run_config_schema_for_job(\n graph_def: GraphDefinition,\n resource_defs: Mapping[str, ResourceDefinition],\n executor_def: "ExecutorDefinition",\n logger_defs: Mapping[str, LoggerDefinition],\n asset_layer: Optional[AssetLayer],\n was_explicitly_provided_resources: bool = False,\n) -> ConfigType:\n return JobDefinition(\n name=graph_def.name,\n graph_def=graph_def,\n resource_defs=resource_defs,\n executor_def=executor_def,\n logger_defs=logger_defs,\n asset_layer=asset_layer,\n _was_explicitly_provided_resources=was_explicitly_provided_resources,\n ).run_config_schema.run_config_schema_type\n\n\ndef _infer_asset_layer_from_source_asset_deps(job_graph_def: GraphDefinition) -> AssetLayer:\n """For non-asset jobs that have some inputs that are fed from SourceAssets, constructs an\n AssetLayer that includes those SourceAssets.\n """\n asset_keys_by_node_input_handle: Dict[NodeInputHandle, AssetKey] = {}\n source_assets_list = []\n source_asset_keys_set = set()\n io_manager_keys_by_asset_key: Mapping[AssetKey, str] = {}\n\n # each entry is a graph definition and its handle relative to the job root\n stack: List[Tuple[GraphDefinition, Optional[NodeHandle]]] = [(job_graph_def, None)]\n\n while stack:\n graph_def, parent_node_handle = stack.pop()\n\n for node_name, input_source_assets in graph_def.node_input_source_assets.items():\n node_handle = NodeHandle(node_name, parent_node_handle)\n for input_name, source_asset in input_source_assets.items():\n if source_asset.key not in source_asset_keys_set:\n source_asset_keys_set.add(source_asset.key)\n source_assets_list.append(source_asset)\n\n input_handle = NodeInputHandle(node_handle, input_name)\n asset_keys_by_node_input_handle[input_handle] = source_asset.key\n for resolved_input_handle in graph_def.node_dict[\n node_name\n ].definition.resolve_input_to_destinations(input_handle):\n asset_keys_by_node_input_handle[resolved_input_handle] = source_asset.key\n\n if source_asset.io_manager_key:\n io_manager_keys_by_asset_key[source_asset.key] = source_asset.io_manager_key\n\n for node_name, node in graph_def.node_dict.items():\n if isinstance(node.definition, GraphDefinition):\n stack.append((node.definition, NodeHandle(node_name, parent_node_handle)))\n\n return AssetLayer(\n assets_defs_by_node_handle={},\n asset_keys_by_node_input_handle=asset_keys_by_node_input_handle,\n asset_info_by_node_output_handle={},\n asset_deps={},\n dependency_node_handles_by_asset_key={},\n assets_defs_by_key={},\n source_assets_by_key={\n source_asset.key: source_asset for source_asset in source_assets_list\n },\n io_manager_keys_by_asset_key=io_manager_keys_by_asset_key,\n dep_asset_keys_by_node_output_handle={},\n partition_mappings_by_asset_dep={},\n asset_checks_defs_by_node_handle={},\n node_output_handles_by_asset_check_key={},\n check_names_by_asset_key_by_node_handle={},\n check_key_by_node_output_handle={},\n )\n\n\ndef _build_all_node_defs(node_defs: Sequence[NodeDefinition]) -> Mapping[str, NodeDefinition]:\n all_defs: Dict[str, NodeDefinition] = {}\n for current_level_node_def in node_defs:\n for node_def in current_level_node_def.iterate_node_defs():\n if node_def.name in all_defs:\n if all_defs[node_def.name] != node_def:\n raise DagsterInvalidDefinitionError(\n 'Detected conflicting node definitions with the same name "{name}"'.format(\n name=node_def.name\n )\n )\n else:\n all_defs[node_def.name] = node_def\n\n return all_defs\n\n\ndef _create_run_config_schema(\n job_def: JobDefinition,\n required_resources: AbstractSet[str],\n) -> "RunConfigSchema":\n from .run_config import (\n RunConfigSchemaCreationData,\n construct_config_type_dictionary,\n define_run_config_schema_type,\n )\n from .run_config_schema import RunConfigSchema\n\n # When executing with a subset job, include the missing nodes\n # from the original job as ignored to allow execution with\n # run config that is valid for the original\n ignored_nodes: Sequence[Node] = []\n if job_def.is_subset:\n if isinstance(job_def.graph, SubselectedGraphDefinition): # op selection provided\n ignored_nodes = job_def.graph.get_top_level_omitted_nodes()\n elif job_def.asset_selection_data:\n parent_job = job_def\n while parent_job.asset_selection_data:\n parent_job = parent_job.asset_selection_data.parent_job_def\n\n ignored_nodes = [\n node for node in parent_job.graph.nodes if not job_def.has_node_named(node.name)\n ]\n else:\n ignored_nodes = []\n\n run_config_schema_type = define_run_config_schema_type(\n RunConfigSchemaCreationData(\n job_name=job_def.name,\n nodes=job_def.graph.nodes,\n graph_def=job_def.graph,\n dependency_structure=job_def.graph.dependency_structure,\n executor_def=job_def.executor_def,\n resource_defs=job_def.resource_defs,\n logger_defs=job_def.loggers,\n ignored_nodes=ignored_nodes,\n required_resources=required_resources,\n direct_inputs=job_def.input_values,\n asset_layer=job_def.asset_layer,\n )\n )\n\n if job_def.config_mapping:\n outer_config_type = job_def.config_mapping.config_schema.config_type\n else:\n outer_config_type = run_config_schema_type\n\n if outer_config_type is None:\n check.failed("Unexpected outer_config_type value of None")\n\n config_type_dict_by_name, config_type_dict_by_key = construct_config_type_dictionary(\n job_def.all_node_defs,\n outer_config_type,\n )\n\n return RunConfigSchema(\n run_config_schema_type=run_config_schema_type,\n config_type_dict_by_name=config_type_dict_by_name,\n config_type_dict_by_key=config_type_dict_by_key,\n config_mapping=job_def.config_mapping,\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/job_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.job_definition"}, "load_assets_from_modules": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.load_assets_from_modules

\nimport inspect\nimport os\nimport pkgutil\nfrom importlib import import_module\nfrom types import ModuleType\nfrom typing import Dict, Generator, Iterable, List, Optional, Sequence, Set, Tuple, Union\n\nimport dagster._check as check\nfrom dagster._core.definitions.auto_materialize_policy import AutoMaterializePolicy\nfrom dagster._core.definitions.backfill_policy import BackfillPolicy\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom .assets import AssetsDefinition\nfrom .cacheable_assets import CacheableAssetsDefinition\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKeyPrefix,\n    check_opt_coercible_to_asset_key_prefix_param,\n)\nfrom .source_asset import SourceAsset\n\n\ndef _find_assets_in_module(\n    module: ModuleType,\n) -> Generator[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition], None, None]:\n    """Finds assets in the given module and adds them to the given sets of assets and source assets."""\n    for attr in dir(module):\n        value = getattr(module, attr)\n        if isinstance(value, (AssetsDefinition, SourceAsset, CacheableAssetsDefinition)):\n            yield value\n        elif isinstance(value, list) and all(\n            isinstance(el, (AssetsDefinition, SourceAsset, CacheableAssetsDefinition))\n            for el in value\n        ):\n            yield from value\n\n\ndef assets_from_modules(\n    modules: Iterable[ModuleType], extra_source_assets: Optional[Sequence[SourceAsset]] = None\n) -> Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]:\n    """Constructs three lists, a list of assets, a list of source assets, and a list of cacheable\n    assets from the given modules.\n\n    Args:\n        modules (Iterable[ModuleType]): The Python modules to look for assets inside.\n        extra_source_assets (Optional[Sequence[SourceAsset]]): Source assets to include in the\n            group in addition to the source assets found in the modules.\n\n    Returns:\n        Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]]:\n            A tuple containing a list of assets, a list of source assets, and a list of\n            cacheable assets defined in the given modules.\n    """\n    asset_ids: Set[int] = set()\n    asset_keys: Dict[AssetKey, ModuleType] = dict()\n    source_assets: List[SourceAsset] = list(\n        check.opt_sequence_param(extra_source_assets, "extra_source_assets", of_type=SourceAsset)\n    )\n    cacheable_assets: List[CacheableAssetsDefinition] = []\n    assets: Dict[AssetKey, AssetsDefinition] = {}\n    for module in modules:\n        for asset in _find_assets_in_module(module):\n            if id(asset) not in asset_ids:\n                asset_ids.add(id(asset))\n                if isinstance(asset, CacheableAssetsDefinition):\n                    cacheable_assets.append(asset)\n                else:\n                    keys = asset.keys if isinstance(asset, AssetsDefinition) else [asset.key]\n                    for key in keys:\n                        if key in asset_keys:\n                            modules_str = ", ".join(\n                                set([asset_keys[key].__name__, module.__name__])\n                            )\n                            error_str = (\n                                f"Asset key {key} is defined multiple times. Definitions found in"\n                                f" modules: {modules_str}. "\n                            )\n\n                            if key in assets and isinstance(asset, AssetsDefinition):\n                                if assets[key].node_def == asset.node_def:\n                                    error_str += (\n                                        "One possible cause of this bug is a call to with_resources"\n                                        " outside of a repository definition, causing a duplicate"\n                                        " asset definition."\n                                    )\n\n                            raise DagsterInvalidDefinitionError(error_str)\n                        else:\n                            asset_keys[key] = module\n                            if isinstance(asset, AssetsDefinition):\n                                assets[key] = asset\n                    if isinstance(asset, SourceAsset):\n                        source_assets.append(asset)\n    return list(set(assets.values())), source_assets, cacheable_assets\n\n\n
[docs]def load_assets_from_modules(\n modules: Iterable[ModuleType],\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets and source assets from the given modules.\n\n Args:\n modules (Iterable[ModuleType]): The Python modules to look for assets inside.\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset]]:\n A list containing assets and source assets defined in the given modules.\n """\n group_name = check.opt_str_param(group_name, "group_name")\n key_prefix = check_opt_coercible_to_asset_key_prefix_param(key_prefix, "key_prefix")\n freshness_policy = check.opt_inst_param(freshness_policy, "freshness_policy", FreshnessPolicy)\n auto_materialize_policy = check.opt_inst_param(\n auto_materialize_policy, "auto_materialize_policy", AutoMaterializePolicy\n )\n backfill_policy = check.opt_inst_param(backfill_policy, "backfill_policy", BackfillPolicy)\n\n (\n assets,\n source_assets,\n cacheable_assets,\n ) = assets_from_modules(modules)\n\n return assets_with_attributes(\n assets,\n source_assets,\n cacheable_assets,\n key_prefix=key_prefix,\n group_name=group_name,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n source_key_prefix=source_key_prefix,\n )
\n\n\n
[docs]def load_assets_from_current_module(\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets, source assets, and cacheable assets from the module where\n this function is called.\n\n Args:\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset, CachableAssetsDefinition]]:\n A list containing assets, source assets, and cacheable assets defined in the module.\n """\n caller = inspect.stack()[1]\n module = inspect.getmodule(caller[0])\n if module is None:\n check.failed("Could not find a module for the caller")\n\n return load_assets_from_modules(\n [module],\n group_name=group_name,\n key_prefix=key_prefix,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )
\n\n\ndef assets_from_package_module(\n package_module: ModuleType,\n extra_source_assets: Optional[Sequence[SourceAsset]] = None,\n) -> Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]:\n """Constructs three lists, a list of assets, a list of source assets, and a list of cacheable assets\n from the given package module.\n\n Args:\n package_module (ModuleType): The package module to looks for assets inside.\n extra_source_assets (Optional[Sequence[SourceAsset]]): Source assets to include in the\n group in addition to the source assets found in the modules.\n\n Returns:\n Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset], Sequence[CacheableAssetsDefinition]]:\n A tuple containing a list of assets, a list of source assets, and a list of cacheable assets\n defined in the given modules.\n """\n return assets_from_modules(\n _find_modules_in_package(package_module), extra_source_assets=extra_source_assets\n )\n\n\n
[docs]def load_assets_from_package_module(\n package_module: ModuleType,\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets and source assets that includes all asset\n definitions, source assets, and cacheable assets in all sub-modules of the given package module.\n\n A package module is the result of importing a package.\n\n Args:\n package_module (ModuleType): The package module to looks for assets inside.\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n A list containing assets, source assets, and cacheable assets defined in the module.\n """\n group_name = check.opt_str_param(group_name, "group_name")\n key_prefix = check_opt_coercible_to_asset_key_prefix_param(key_prefix, "key_prefix")\n freshness_policy = check.opt_inst_param(freshness_policy, "freshness_policy", FreshnessPolicy)\n auto_materialize_policy = check.opt_inst_param(\n auto_materialize_policy, "auto_materialize_policy", AutoMaterializePolicy\n )\n backfill_policy = check.opt_inst_param(backfill_policy, "backfill_policy", BackfillPolicy)\n\n (\n assets,\n source_assets,\n cacheable_assets,\n ) = assets_from_package_module(package_module)\n return assets_with_attributes(\n assets,\n source_assets,\n cacheable_assets,\n key_prefix=key_prefix,\n group_name=group_name,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n source_key_prefix=source_key_prefix,\n )
\n\n\n
[docs]def load_assets_from_package_name(\n package_name: str,\n group_name: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *,\n freshness_policy: Optional[FreshnessPolicy] = None,\n auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n backfill_policy: Optional[BackfillPolicy] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n """Constructs a list of assets, source assets, and cacheable assets that includes all asset\n definitions and source assets in all sub-modules of the given package.\n\n Args:\n package_name (str): The name of a Python package to look for assets inside.\n group_name (Optional[str]):\n Group name to apply to the loaded assets. The returned assets will be copies of the\n loaded objects, with the group name added.\n key_prefix (Optional[Union[str, Sequence[str]]]):\n Prefix to prepend to the keys of the loaded assets. The returned assets will be copies\n of the loaded objects, with the prefix prepended.\n freshness_policy (Optional[FreshnessPolicy]): FreshnessPolicy to apply to all the loaded\n assets.\n auto_materialize_policy (Optional[AutoMaterializePolicy]): AutoMaterializePolicy to apply\n to all the loaded assets.\n backfill_policy (Optional[AutoMaterializePolicy]): BackfillPolicy to apply to all the loaded assets.\n source_key_prefix (bool): Prefix to prepend to the keys of loaded SourceAssets. The returned\n assets will be copies of the loaded objects, with the prefix prepended.\n\n Returns:\n Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n A list containing assets, source assets, and cacheable assets defined in the module.\n """\n package_module = import_module(package_name)\n return load_assets_from_package_module(\n package_module,\n group_name=group_name,\n key_prefix=key_prefix,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )
\n\n\ndef _find_modules_in_package(package_module: ModuleType) -> Iterable[ModuleType]:\n yield package_module\n package_path = package_module.__file__\n if package_path:\n for _, modname, is_pkg in pkgutil.walk_packages([os.path.dirname(package_path)]):\n submodule = import_module(f"{package_module.__name__}.{modname}")\n if is_pkg:\n yield from _find_modules_in_package(submodule)\n else:\n yield submodule\n else:\n raise ValueError(\n f"Tried to find modules in package {package_module}, but its __file__ is None"\n )\n\n\ndef prefix_assets(\n assets_defs: Sequence[AssetsDefinition],\n key_prefix: CoercibleToAssetKeyPrefix,\n source_assets: Sequence[SourceAsset],\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix],\n) -> Tuple[Sequence[AssetsDefinition], Sequence[SourceAsset]]:\n """Given a list of assets, prefix the input and output asset keys with key_prefix.\n The prefix is not added to source assets.\n\n Input asset keys that reference other assets within assets_defs are "brought along" -\n i.e. prefixed as well.\n\n Example with a single asset:\n\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n result = prefixed_asset_key_replacements([asset_1], "my_prefix")\n assert result.assets[0].asset_key == AssetKey(["my_prefix", "asset1"])\n\n Example with dependencies within the list of assets:\n\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n @asset\n def asset2(asset1):\n ...\n\n result = prefixed_asset_key_replacements([asset1, asset2], "my_prefix")\n assert result.assets[0].asset_key == AssetKey(["my_prefix", "asset1"])\n assert result.assets[1].asset_key == AssetKey(["my_prefix", "asset2"])\n assert result.assets[1].dependency_keys == {AssetKey(["my_prefix", "asset1"])}\n\n """\n asset_keys = {asset_key for assets_def in assets_defs for asset_key in assets_def.keys}\n source_asset_keys = {source_asset.key for source_asset in source_assets}\n\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.is_list(key_prefix, of_type=str)\n\n result_assets: List[AssetsDefinition] = []\n for assets_def in assets_defs:\n output_asset_key_replacements = {\n asset_key: AssetKey([*key_prefix, *asset_key.path]) for asset_key in assets_def.keys\n }\n input_asset_key_replacements = {}\n for dep_asset_key in assets_def.dependency_keys:\n if dep_asset_key in asset_keys:\n input_asset_key_replacements[dep_asset_key] = AssetKey(\n [*key_prefix, *dep_asset_key.path]\n )\n elif source_key_prefix and dep_asset_key in source_asset_keys:\n input_asset_key_replacements[dep_asset_key] = AssetKey(\n [*source_key_prefix, *dep_asset_key.path]\n )\n\n result_assets.append(\n assets_def.with_attributes(\n output_asset_key_replacements=output_asset_key_replacements,\n input_asset_key_replacements=input_asset_key_replacements,\n )\n )\n\n if source_key_prefix:\n result_source_assets = [\n source_asset.with_attributes(key=AssetKey([*source_key_prefix, *source_asset.key.path]))\n for source_asset in source_assets\n ]\n else:\n result_source_assets = source_assets\n\n return result_assets, result_source_assets\n\n\ndef assets_with_attributes(\n assets_defs: Sequence[AssetsDefinition],\n source_assets: Sequence[SourceAsset],\n cacheable_assets: Sequence[CacheableAssetsDefinition],\n key_prefix: Optional[Sequence[str]],\n group_name: Optional[str],\n freshness_policy: Optional[FreshnessPolicy],\n auto_materialize_policy: Optional[AutoMaterializePolicy],\n backfill_policy: Optional[BackfillPolicy],\n source_key_prefix: Optional[Sequence[str]],\n) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:\n # There is a tricky edge case here where if a non-cacheable asset depends on a cacheable asset,\n # and the assets are prefixed, the non-cacheable asset's dependency will not be prefixed since\n # at prefix-time it is not known that its dependency is one of the cacheable assets.\n # https://github.com/dagster-io/dagster/pull/10389#pullrequestreview-1170913271\n if key_prefix:\n assets_defs, source_assets = prefix_assets(\n assets_defs, key_prefix, source_assets, source_key_prefix\n )\n cacheable_assets = [\n cached_asset.with_prefix_for_all(key_prefix) for cached_asset in cacheable_assets\n ]\n\n if group_name or freshness_policy or auto_materialize_policy or backfill_policy:\n assets_defs = [\n asset.with_attributes(\n group_names_by_key=(\n {asset_key: group_name for asset_key in asset.keys} if group_name else None\n ),\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )\n for asset in assets_defs\n ]\n if group_name:\n source_assets = [\n source_asset.with_attributes(group_name=group_name)\n for source_asset in source_assets\n ]\n cacheable_assets = [\n cached_asset.with_attributes_for_all(\n group_name,\n freshness_policy=freshness_policy,\n auto_materialize_policy=auto_materialize_policy,\n backfill_policy=backfill_policy,\n )\n for cached_asset in cacheable_assets\n ]\n\n return [*assets_defs, *source_assets, *cacheable_assets]\n
", "current_page_name": "_modules/dagster/_core/definitions/load_assets_from_modules", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.load_assets_from_modules"}, "logger_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.logger_definition

\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Union, cast, overload\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.errors import DagsterInvalidInvocationError\n\nfrom ..decorator_utils import get_function_params\nfrom .config import is_callable_valid_config_arg\nfrom .configurable import AnonymousConfigurableDefinition\nfrom .definition_config_schema import (\n    CoercableToConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\n\nif TYPE_CHECKING:\n    import logging\n\n    from dagster._core.definitions import JobDefinition\n    from dagster._core.execution.context.logger import InitLoggerContext, UnboundInitLoggerContext\n\n    InitLoggerFunction = Callable[[InitLoggerContext], logging.Logger]\n\n\n
[docs]class LoggerDefinition(AnonymousConfigurableDefinition):\n """Core class for defining loggers.\n\n Loggers are job-scoped logging handlers, which will be automatically invoked whenever\n dagster messages are logged from within a job.\n\n Args:\n logger_fn (Callable[[InitLoggerContext], logging.Logger]): User-provided function to\n instantiate the logger. This logger will be automatically invoked whenever the methods\n on ``context.log`` are called from within job compute logic.\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.logger_config`. If not set, Dagster will accept any config provided.\n description (Optional[str]): A human-readable description of this logger.\n """\n\n def __init__(\n self,\n logger_fn: "InitLoggerFunction",\n config_schema: Any = None,\n description: Optional[str] = None,\n ):\n self._logger_fn = check.callable_param(logger_fn, "logger_fn")\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._description = check.opt_str_param(description, "description")\n\n def __call__(self, *args, **kwargs):\n from dagster._core.execution.context.logger import UnboundInitLoggerContext\n\n from .logger_invocation import logger_invocation_result\n\n if len(args) == 0 and len(kwargs) == 0:\n raise DagsterInvalidInvocationError(\n "Logger initialization function has context argument, but no context argument was "\n "provided when invoking."\n )\n if len(args) + len(kwargs) > 1:\n raise DagsterInvalidInvocationError(\n "Initialization of logger received multiple arguments. Only a first "\n "positional context parameter should be provided when invoking."\n )\n\n context_param_name = get_function_params(self.logger_fn)[0].name\n\n if args:\n context = check.opt_inst_param(\n args[0],\n context_param_name,\n UnboundInitLoggerContext,\n default=UnboundInitLoggerContext(logger_config=None, job_def=None),\n )\n return logger_invocation_result(self, context)\n else:\n if context_param_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Logger initialization expected argument '{context_param_name}'."\n )\n context = check.opt_inst_param(\n kwargs[context_param_name],\n context_param_name,\n UnboundInitLoggerContext,\n default=UnboundInitLoggerContext(logger_config=None, job_def=None),\n )\n\n return logger_invocation_result(self, context)\n\n @public\n @property\n def logger_fn(self) -> "InitLoggerFunction":\n """Callable[[InitLoggerContext], logging.Logger]: The function that will be invoked to\n instantiate the logger.\n """\n return self._logger_fn\n\n @public\n @property\n def config_schema(self) -> Any:\n """Any: The schema for the logger's config. Configuration data available in `init_context.logger_config`."""\n return self._config_schema\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A human-readable description of the logger."""\n return self._description\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: Any,\n ) -> "LoggerDefinition":\n return LoggerDefinition(\n config_schema=config_schema,\n description=description or self.description,\n logger_fn=self.logger_fn,\n )
\n\n\n@overload\ndef logger(\n config_schema: CoercableToConfigSchema, description: Optional[str] = ...\n) -> Callable[["InitLoggerFunction"], "LoggerDefinition"]: ...\n\n\n@overload\ndef logger(\n config_schema: "InitLoggerFunction", description: Optional[str] = ...\n) -> "LoggerDefinition": ...\n\n\n
[docs]def logger(\n config_schema: Union[CoercableToConfigSchema, "InitLoggerFunction"] = None,\n description: Optional[str] = None,\n) -> Union["LoggerDefinition", Callable[["InitLoggerFunction"], "LoggerDefinition"]]:\n """Define a logger.\n\n The decorated function should accept an :py:class:`InitLoggerContext` and return an instance of\n :py:class:`python:logging.Logger`. This function will become the ``logger_fn`` of an underlying\n :py:class:`LoggerDefinition`.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.logger_config`. If not set, Dagster will accept any config provided.\n description (Optional[str]): A human-readable description of the logger.\n """\n # This case is for when decorator is used bare, without arguments.\n # E.g. @logger versus @logger()\n if callable(config_schema) and not is_callable_valid_config_arg(config_schema):\n return LoggerDefinition(logger_fn=cast("InitLoggerFunction", config_schema))\n\n def _wrap(logger_fn: "InitLoggerFunction") -> "LoggerDefinition":\n return LoggerDefinition(\n logger_fn=logger_fn,\n config_schema=config_schema,\n description=description,\n )\n\n return _wrap
\n\n\n
[docs]def build_init_logger_context(\n logger_config: Any = None,\n job_def: Optional["JobDefinition"] = None,\n) -> "UnboundInitLoggerContext":\n """Builds logger initialization context from provided parameters.\n\n This function can be used to provide the context argument to the invocation of a logger\n definition.\n\n Note that you may only specify one of pipeline_def and job_def.\n\n Args:\n logger_config (Any): The config to provide during initialization of logger.\n job_def (Optional[JobDefinition]): The job definition that the logger will be used with.\n\n Examples:\n .. code-block:: python\n\n context = build_init_logger_context()\n logger_to_init(context)\n """\n from dagster._core.definitions import JobDefinition\n from dagster._core.execution.context.logger import UnboundInitLoggerContext\n\n check.opt_inst_param(job_def, "job_def", JobDefinition)\n\n return UnboundInitLoggerContext(logger_config=logger_config, job_def=job_def)
\n
", "current_page_name": "_modules/dagster/_core/definitions/logger_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.logger_definition"}, "materialize": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.materialize

\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Set, Union\n\nimport dagster._check as check\nfrom dagster._core.definitions.unresolved_asset_job_definition import define_asset_job\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..errors import DagsterInvariantViolationError\nfrom ..instance import DagsterInstance\nfrom ..storage.io_manager import IOManagerDefinition\nfrom ..storage.mem_io_manager import mem_io_manager\nfrom .assets import AssetsDefinition\nfrom .source_asset import SourceAsset\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_selection import CoercibleToAssetSelection\n    from dagster._core.definitions.events import AssetKey\n\n    from ..execution.execute_in_process_result import ExecuteInProcessResult\n\nEPHEMERAL_JOB_NAME = "__ephemeral_asset_job__"\n\n\n
[docs]def materialize(\n assets: Sequence[Union[AssetsDefinition, SourceAsset]],\n run_config: Any = None,\n instance: Optional[DagsterInstance] = None,\n resources: Optional[Mapping[str, object]] = None,\n partition_key: Optional[str] = None,\n raise_on_error: bool = True,\n tags: Optional[Mapping[str, str]] = None,\n selection: Optional["CoercibleToAssetSelection"] = None,\n) -> "ExecuteInProcessResult":\n """Executes a single-threaded, in-process run which materializes provided assets.\n\n By default, will materialize assets to the local filesystem.\n\n Args:\n assets (Sequence[Union[AssetsDefinition, SourceAsset]]):\n The assets to materialize.\n\n Unless you're using `deps` or `non_argument_deps`, you must also include all assets that are\n upstream of the assets that you want to materialize. This is because those upstream\n asset definitions have information that is needed to load their contents while\n materializing the downstream assets.\n\n You can use the `selection` argument to distinguish between assets that you want to\n materialize and assets that are just present for loading.\n resources (Optional[Mapping[str, object]]):\n The resources needed for execution. Can provide resource instances\n directly, or resource definitions. Note that if provided resources\n conflict with resources directly on assets, an error will be thrown.\n run_config (Optional[Any]): The run config to use for the run that materializes the assets.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for assets with partitioned config.\n tags (Optional[Mapping[str, str]]): Tags for the run.\n selection (Optional[Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]]):\n A sub-selection of assets to materialize.\n\n If not provided, then all assets will be materialized.\n\n If providing a string or sequence of strings,\n https://docs.dagster.io/concepts/assets/asset-selection-syntax describes the accepted\n syntax.\n\n Returns:\n ExecuteInProcessResult: The result of the execution.\n\n Examples:\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n @asset\n def asset2(asset1):\n ...\n\n # executes a run that materializes asset1 and then asset2\n materialize([asset1, asset2])\n\n # executes a run that materializes just asset2, loading its input from asset1\n materialize([asset1, asset2], selection=[asset2])\n """\n from dagster._core.definitions.definitions_class import Definitions\n\n assets = check.sequence_param(assets, "assets", of_type=(AssetsDefinition, SourceAsset))\n instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n partition_key = check.opt_str_param(partition_key, "partition_key")\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n\n all_executable_keys: Set[AssetKey] = set()\n for asset in assets:\n if isinstance(asset, AssetsDefinition):\n all_executable_keys = all_executable_keys.union(set(asset.keys))\n\n defs = Definitions(\n jobs=[define_asset_job(name=EPHEMERAL_JOB_NAME, selection=selection)],\n assets=assets,\n resources=resources,\n )\n return check.not_none(\n defs.get_job_def(EPHEMERAL_JOB_NAME),\n "This should always return a job",\n ).execute_in_process(\n run_config=run_config,\n instance=instance,\n partition_key=partition_key,\n raise_on_error=raise_on_error,\n tags=tags,\n )
\n\n\n
[docs]def materialize_to_memory(\n assets: Sequence[Union[AssetsDefinition, SourceAsset]],\n run_config: Any = None,\n instance: Optional[DagsterInstance] = None,\n resources: Optional[Mapping[str, object]] = None,\n partition_key: Optional[str] = None,\n raise_on_error: bool = True,\n tags: Optional[Mapping[str, str]] = None,\n selection: Optional["CoercibleToAssetSelection"] = None,\n) -> "ExecuteInProcessResult":\n """Executes a single-threaded, in-process run which materializes provided assets in memory.\n\n Will explicitly use :py:func:`mem_io_manager` for all required io manager\n keys. If any io managers are directly provided using the `resources`\n argument, a :py:class:`DagsterInvariantViolationError` will be thrown.\n\n Args:\n assets (Sequence[Union[AssetsDefinition, SourceAsset]]):\n The assets to materialize. Can also provide :py:class:`SourceAsset` objects to fill dependencies for asset defs.\n run_config (Optional[Any]): The run config to use for the run that materializes the assets.\n resources (Optional[Mapping[str, object]]):\n The resources needed for execution. Can provide resource instances\n directly, or resource definitions. If provided resources\n conflict with resources directly on assets, an error will be thrown.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for assets with partitioned config.\n tags (Optional[Mapping[str, str]]): Tags for the run.\n selection (Optional[Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]]):\n A sub-selection of assets to materialize.\n\n If not provided, then all assets will be materialized.\n\n If providing a string or sequence of strings,\n https://docs.dagster.io/concepts/assets/asset-selection-syntax describes the accepted\n syntax.\n\n Returns:\n ExecuteInProcessResult: The result of the execution.\n\n Examples:\n .. code-block:: python\n\n @asset\n def asset1():\n ...\n\n @asset\n def asset2(asset1):\n ...\n\n # executes a run that materializes asset1 and then asset2\n materialize([asset1, asset2])\n\n # executes a run that materializes just asset1\n materialize([asset1, asset2], selection=[asset1])\n """\n assets = check.sequence_param(assets, "assets", of_type=(AssetsDefinition, SourceAsset))\n\n # Gather all resource defs for the purpose of checking io managers.\n resources_dict = resources or {}\n all_resource_keys = set(resources_dict.keys())\n for asset in assets:\n all_resource_keys = all_resource_keys.union(asset.resource_defs.keys())\n\n io_manager_keys = _get_required_io_manager_keys(assets)\n for io_manager_key in io_manager_keys:\n if io_manager_key in all_resource_keys:\n raise DagsterInvariantViolationError(\n "Attempted to call `materialize_to_memory` with a resource "\n f"provided for io manager key '{io_manager_key}'. Do not "\n "provide resources for io manager keys when calling "\n "`materialize_to_memory`, as it will override io management "\n "behavior for all keys."\n )\n\n resource_defs = merge_dicts({key: mem_io_manager for key in io_manager_keys}, resources_dict)\n\n return materialize(\n assets=assets,\n run_config=run_config,\n resources=resource_defs,\n instance=instance,\n partition_key=partition_key,\n raise_on_error=raise_on_error,\n tags=tags,\n selection=selection,\n )
\n\n\ndef _get_required_io_manager_keys(\n assets: Sequence[Union[AssetsDefinition, SourceAsset]]\n) -> Set[str]:\n io_manager_keys = set()\n for asset in assets:\n for requirement in asset.get_resource_requirements():\n if requirement.expected_type == IOManagerDefinition:\n io_manager_keys.add(requirement.key)\n return io_manager_keys\n
", "current_page_name": "_modules/dagster/_core/definitions/materialize", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.materialize"}, "metadata": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.metadata

\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import Self, TypeAlias, TypeVar\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import PublicAttr, deprecated, deprecated_param, experimental, public\nfrom dagster._core.errors import DagsterInvalidMetadata\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._serdes.serdes import (\n    FieldSerializer,\n    PackableValue,\n    UnpackContext,\n    WhitelistMap,\n    pack_value,\n)\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n    normalize_renamed_param,\n)\n\nfrom .table import (  # re-exported\n    TableColumn as TableColumn,\n    TableColumnConstraints as TableColumnConstraints,\n    TableConstraints as TableConstraints,\n    TableRecord as TableRecord,\n    TableSchema as TableSchema,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.events import AssetKey\n\nArbitraryMetadataMapping: TypeAlias = Mapping[str, Any]\n\nRawMetadataValue = Union[\n    "MetadataValue",\n    TableSchema,\n    "AssetKey",\n    os.PathLike,\n    Dict[Any, Any],\n    float,\n    int,\n    List[Any],\n    str,\n    None,\n]\n\nMetadataMapping: TypeAlias = Mapping[str, "MetadataValue"]\nMetadataUserInput: TypeAlias = Mapping[str, RawMetadataValue]\n\nT_Packable = TypeVar("T_Packable", bound=PackableValue, default=PackableValue, covariant=True)\n\n# ########################\n# ##### NORMALIZATION\n# ########################\n\n\ndef normalize_metadata(\n    metadata: Mapping[str, RawMetadataValue],\n    allow_invalid: bool = False,\n) -> Mapping[str, "MetadataValue"]:\n    # This is a stopgap measure to deal with unsupported metadata values, which occur when we try\n    # to convert arbitrary metadata (on e.g. OutputDefinition) to a MetadataValue, which is required\n    # for serialization. This will cause unsupported values to be silently replaced with a\n    # string placeholder.\n    normalized_metadata: Dict[str, MetadataValue] = {}\n    for k, v in metadata.items():\n        try:\n            normalized_value = normalize_metadata_value(v)\n        except DagsterInvalidMetadata as e:\n            if allow_invalid:\n                deprecation_warning(\n                    "Support for arbitrary metadata values",\n                    "2.0.0",\n                    additional_warn_text=(\n                        "In the future, all user-supplied metadata values must be one of"\n                        f" {RawMetadataValue}"\n                    ),\n                    stacklevel=4,  # to get the caller of `normalize_metadata`\n                )\n                normalized_value = TextMetadataValue(f"[{v.__class__.__name__}] (unserializable)")\n            else:\n                raise DagsterInvalidMetadata(\n                    f'Could not resolve the metadata value for "{k}" to a known type. {e}'\n                ) from None\n        normalized_metadata[k] = normalized_value\n\n    return normalized_metadata\n\n\ndef normalize_metadata_value(raw_value: RawMetadataValue) -> "MetadataValue[Any]":\n    from dagster._core.definitions.events import AssetKey\n\n    if isinstance(raw_value, MetadataValue):\n        return raw_value\n    elif isinstance(raw_value, str):\n        return MetadataValue.text(raw_value)\n    elif isinstance(raw_value, float):\n        return MetadataValue.float(raw_value)\n    elif isinstance(raw_value, bool):\n        return MetadataValue.bool(raw_value)\n    elif isinstance(raw_value, int):\n        return MetadataValue.int(raw_value)\n    elif isinstance(raw_value, (list, dict)):\n        return MetadataValue.json(raw_value)\n    elif isinstance(raw_value, os.PathLike):\n        return MetadataValue.path(raw_value)\n    elif isinstance(raw_value, AssetKey):\n        return MetadataValue.asset(raw_value)\n    elif isinstance(raw_value, TableSchema):\n        return MetadataValue.table_schema(raw_value)\n    elif raw_value is None:\n        return MetadataValue.null()\n\n    raise DagsterInvalidMetadata(\n        f"Its type was {type(raw_value)}. Consider wrapping the value with the appropriate "\n        "MetadataValue type."\n    )\n\n\n# ########################\n# ##### METADATA VALUE\n# ########################\n\n\n
[docs]class MetadataValue(ABC, Generic[T_Packable]):\n """Utility class to wrap metadata values passed into Dagster events so that they can be\n displayed in the Dagster UI and other tooling.\n\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "my_text_label": "hello",\n "dashboard_url": MetadataValue.url("http://mycoolsite.com/my_dashboard"),\n "num_rows": 0,\n },\n )\n """\n\n @public\n @property\n @abstractmethod\n def value(self) -> T_Packable:\n """The wrapped value."""\n raise NotImplementedError()\n\n
[docs] @public\n @staticmethod\n def text(text: str) -> "TextMetadataValue":\n """Static constructor for a metadata value wrapping text as\n :py:class:`TextMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "my_text_label": MetadataValue.text("hello")\n },\n )\n\n Args:\n text (str): The text string for a metadata entry.\n """\n return TextMetadataValue(text)
\n\n
[docs] @public\n @staticmethod\n def url(url: str) -> "UrlMetadataValue":\n """Static constructor for a metadata value wrapping a URL as\n :py:class:`UrlMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield AssetMaterialization(\n asset_key="my_dashboard",\n metadata={\n "dashboard_url": MetadataValue.url("http://mycoolsite.com/my_dashboard"),\n }\n )\n\n Args:\n url (str): The URL for a metadata entry.\n """\n return UrlMetadataValue(url)
\n\n
[docs] @public\n @staticmethod\n def path(path: Union[str, os.PathLike]) -> "PathMetadataValue":\n """Static constructor for a metadata value wrapping a path as\n :py:class:`PathMetadataValue`.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "filepath": MetadataValue.path("path/to/file"),\n }\n )\n\n Args:\n path (str): The path for a metadata entry.\n """\n return PathMetadataValue(path)
\n\n
[docs] @public\n @staticmethod\n def notebook(path: Union[str, os.PathLike]) -> "NotebookMetadataValue":\n """Static constructor for a metadata value wrapping a notebook path as\n :py:class:`NotebookMetadataValue`.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "notebook_path": MetadataValue.notebook("path/to/notebook.ipynb"),\n }\n )\n\n Args:\n path (str): The path to a notebook for a metadata entry.\n """\n return NotebookMetadataValue(path)
\n\n
[docs] @public\n @staticmethod\n def json(data: Union[Sequence[Any], Mapping[str, Any]]) -> "JsonMetadataValue":\n """Static constructor for a metadata value wrapping a json-serializable list or dict\n as :py:class:`JsonMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield ExpectationResult(\n success=not missing_things,\n label="is_present",\n metadata={\n "about my dataset": MetadataValue.json({"missing_columns": missing_things})\n },\n )\n\n Args:\n data (Union[Sequence[Any], Mapping[str, Any]]): The JSON data for a metadata entry.\n """\n return JsonMetadataValue(data)
\n\n
[docs] @public\n @staticmethod\n def md(data: str) -> "MarkdownMetadataValue":\n """Static constructor for a metadata value wrapping markdown data as\n :py:class:`MarkdownMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, md_str):\n yield AssetMaterialization(\n asset_key="info",\n metadata={\n 'Details': MetadataValue.md(md_str)\n },\n )\n\n Args:\n md_str (str): The markdown for a metadata entry.\n """\n return MarkdownMetadataValue(data)
\n\n
[docs] @public\n @staticmethod\n def python_artifact(python_artifact: Callable) -> "PythonArtifactMetadataValue":\n """Static constructor for a metadata value wrapping a python artifact as\n :py:class:`PythonArtifactMetadataValue`. Can be used as the value type for the\n `metadata` parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "class": MetadataValue.python_artifact(MyClass),\n "function": MetadataValue.python_artifact(my_function),\n }\n )\n\n Args:\n value (Callable): The python class or function for a metadata entry.\n """\n check.callable_param(python_artifact, "python_artifact")\n return PythonArtifactMetadataValue(python_artifact.__module__, python_artifact.__name__)
\n\n
[docs] @public\n @staticmethod\n def float(value: float) -> "FloatMetadataValue":\n """Static constructor for a metadata value wrapping a float as\n :py:class:`FloatMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "size (bytes)": MetadataValue.float(calculate_bytes(df)),\n }\n )\n\n Args:\n value (float): The float value for a metadata entry.\n """\n return FloatMetadataValue(value)
\n\n
[docs] @public\n @staticmethod\n def int(value: int) -> "IntMetadataValue":\n """Static constructor for a metadata value wrapping an int as\n :py:class:`IntMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "number of rows": MetadataValue.int(len(df)),\n },\n )\n\n Args:\n value (int): The int value for a metadata entry.\n """\n return IntMetadataValue(value)
\n\n
[docs] @public\n @staticmethod\n def bool(value: bool) -> "BoolMetadataValue":\n """Static constructor for a metadata value wrapping a bool as\n :py:class:`BoolMetadataValuye`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context, df):\n yield AssetMaterialization(\n asset_key="my_dataset",\n metadata={\n "num rows > 1000": MetadataValue.bool(len(df) > 1000),\n },\n )\n\n Args:\n value (bool): The bool value for a metadata entry.\n """\n return BoolMetadataValue(value)
\n\n
[docs] @public\n @staticmethod\n def dagster_run(run_id: str) -> "DagsterRunMetadataValue":\n """Static constructor for a metadata value wrapping a reference to a Dagster run.\n\n Args:\n run_id (str): The ID of the run.\n """\n return DagsterRunMetadataValue(run_id)
\n\n
[docs] @public\n @staticmethod\n def asset(asset_key: "AssetKey") -> "DagsterAssetMetadataValue":\n """Static constructor for a metadata value referencing a Dagster asset, by key.\n\n For example:\n\n .. code-block:: python\n\n @op\n def validate_table(context, df):\n yield AssetMaterialization(\n asset_key=AssetKey("my_table"),\n metadata={\n "Related asset": MetadataValue.asset(AssetKey('my_other_table')),\n },\n )\n\n Args:\n asset_key (AssetKey): The asset key referencing the asset.\n """\n from dagster._core.definitions.events import AssetKey\n\n check.inst_param(asset_key, "asset_key", AssetKey)\n return DagsterAssetMetadataValue(asset_key)
\n\n
[docs] @public\n @staticmethod\n @experimental\n def table(\n records: Sequence[TableRecord], schema: Optional[TableSchema] = None\n ) -> "TableMetadataValue":\n """Static constructor for a metadata value wrapping arbitrary tabular data as\n :py:class:`TableMetadataValue`. Can be used as the value type for the `metadata`\n parameter for supported events.\n\n Example:\n .. code-block:: python\n\n @op\n def emit_metadata(context):\n yield ExpectationResult(\n success=not has_errors,\n label="is_valid",\n metadata={\n "errors": MetadataValue.table(\n records=[\n TableRecord(code="invalid-data-type", row=2, col="name"),\n ],\n schema=TableSchema(\n columns=[\n TableColumn(name="code", type="string"),\n TableColumn(name="row", type="int"),\n TableColumn(name="col", type="string"),\n ]\n )\n ),\n },\n )\n """\n return TableMetadataValue(records, schema)
\n\n
[docs] @public\n @staticmethod\n def table_schema(\n schema: TableSchema,\n ) -> "TableSchemaMetadataValue":\n """Static constructor for a metadata value wrapping a table schema as\n :py:class:`TableSchemaMetadataValue`. Can be used as the value type\n for the `metadata` parameter for supported events.\n\n Example:\n .. code-block:: python\n\n schema = TableSchema(\n columns = [\n TableColumn(name="id", type="int"),\n TableColumn(name="status", type="bool"),\n ]\n )\n\n DagsterType(\n type_check_fn=some_validation_fn,\n name='MyTable',\n metadata={\n 'my_table_schema': MetadataValue.table_schema(schema),\n }\n )\n\n Args:\n schema (TableSchema): The table schema for a metadata entry.\n """\n return TableSchemaMetadataValue(schema)
\n\n
[docs] @public\n @staticmethod\n def null() -> "NullMetadataValue":\n """Static constructor for a metadata value representing null. Can be used as the value type\n for the `metadata` parameter for supported events.\n """\n return NullMetadataValue()
\n\n\n# ########################\n# ##### METADATA VALUE TYPES\n# ########################\n\n# NOTE: We have `type: ignore` in a few places below because mypy complains about an instance method\n# (e.g. `text`) overriding a static method on the superclass of the same name. This is not a concern\n# for us because these static methods should never be called on instances.\n\n# NOTE: `XMetadataValue` classes are serialized with a storage name of `XMetadataEntryData` to\n# maintain backward compatibility. See docstring of `whitelist_for_serdes` for more info.\n\n\n
[docs]@whitelist_for_serdes(storage_name="TextMetadataEntryData")\nclass TextMetadataValue(\n NamedTuple(\n "_TextMetadataValue",\n [\n ("text", PublicAttr[Optional[str]]),\n ],\n ),\n MetadataValue[str],\n):\n """Container class for text metadata entry data.\n\n Args:\n text (Optional[str]): The text data.\n """\n\n def __new__(cls, text: Optional[str]):\n return super(TextMetadataValue, cls).__new__(\n cls, check.opt_str_param(text, "text", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped text data."""\n return self.text
\n\n\n
[docs]@whitelist_for_serdes(storage_name="UrlMetadataEntryData")\nclass UrlMetadataValue(\n NamedTuple(\n "_UrlMetadataValue",\n [\n ("url", PublicAttr[Optional[str]]),\n ],\n ),\n MetadataValue[str],\n):\n """Container class for URL metadata entry data.\n\n Args:\n url (Optional[str]): The URL as a string.\n """\n\n def __new__(cls, url: Optional[str]):\n return super(UrlMetadataValue, cls).__new__(\n cls, check.opt_str_param(url, "url", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped URL."""\n return self.url
\n\n\n
[docs]@whitelist_for_serdes(storage_name="PathMetadataEntryData")\nclass PathMetadataValue(\n NamedTuple("_PathMetadataValue", [("path", PublicAttr[Optional[str]])]), MetadataValue[str]\n):\n """Container class for path metadata entry data.\n\n Args:\n path (Optional[str]): The path as a string or conforming to os.PathLike.\n """\n\n def __new__(cls, path: Optional[Union[str, os.PathLike]]):\n return super(PathMetadataValue, cls).__new__(\n cls, check.opt_path_param(path, "path", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped path."""\n return self.path
\n\n\n
[docs]@whitelist_for_serdes(storage_name="NotebookMetadataEntryData")\nclass NotebookMetadataValue(\n NamedTuple("_NotebookMetadataValue", [("path", PublicAttr[Optional[str]])]), MetadataValue[str]\n):\n """Container class for notebook metadata entry data.\n\n Args:\n path (Optional[str]): The path to the notebook as a string or conforming to os.PathLike.\n """\n\n def __new__(cls, path: Optional[Union[str, os.PathLike]]):\n return super(NotebookMetadataValue, cls).__new__(\n cls, check.opt_path_param(path, "path", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped path to the notebook as a string."""\n return self.path
\n\n\n
[docs]@whitelist_for_serdes(storage_name="JsonMetadataEntryData")\nclass JsonMetadataValue(\n NamedTuple(\n "_JsonMetadataValue",\n [\n ("data", PublicAttr[Optional[Union[Sequence[Any], Mapping[str, Any]]]]),\n ],\n ),\n MetadataValue[Union[Sequence[Any], Mapping[str, Any]]],\n):\n """Container class for JSON metadata entry data.\n\n Args:\n data (Union[Sequence[Any], Dict[str, Any]]): The JSON data.\n """\n\n def __new__(cls, data: Optional[Union[Sequence[Any], Mapping[str, Any]]]):\n data = check.opt_inst_param(data, "data", (Sequence, Mapping))\n try:\n # check that the value is JSON serializable\n seven.dumps(data)\n except TypeError:\n raise DagsterInvalidMetadata("Value is not JSON serializable.")\n return super(JsonMetadataValue, cls).__new__(cls, data)\n\n @public\n @property\n def value(self) -> Optional[Union[Sequence[Any], Mapping[str, Any]]]:\n """Optional[Union[Sequence[Any], Dict[str, Any]]]: The wrapped JSON data."""\n return self.data
\n\n\n
[docs]@whitelist_for_serdes(storage_name="MarkdownMetadataEntryData")\nclass MarkdownMetadataValue(\n NamedTuple(\n "_MarkdownMetadataValue",\n [\n ("md_str", PublicAttr[Optional[str]]),\n ],\n ),\n MetadataValue[str],\n):\n """Container class for markdown metadata entry data.\n\n Args:\n md_str (Optional[str]): The markdown as a string.\n """\n\n def __new__(cls, md_str: Optional[str]):\n return super(MarkdownMetadataValue, cls).__new__(\n cls, check.opt_str_param(md_str, "md_str", default="")\n )\n\n @public\n @property\n def value(self) -> Optional[str]:\n """Optional[str]: The wrapped markdown as a string."""\n return self.md_str
\n\n\n# This should be deprecated or fixed so that `value` does not return itself.\n
[docs]@whitelist_for_serdes(storage_name="PythonArtifactMetadataEntryData")\nclass PythonArtifactMetadataValue(\n NamedTuple(\n "_PythonArtifactMetadataValue",\n [\n ("module", PublicAttr[str]),\n ("name", PublicAttr[str]),\n ],\n ),\n MetadataValue["PythonArtifactMetadataValue"],\n):\n """Container class for python artifact metadata entry data.\n\n Args:\n module (str): The module where the python artifact can be found\n name (str): The name of the python artifact\n """\n\n def __new__(cls, module: str, name: str):\n return super(PythonArtifactMetadataValue, cls).__new__(\n cls, check.str_param(module, "module"), check.str_param(name, "name")\n )\n\n @public\n @property\n def value(self) -> Self:\n """PythonArtifactMetadataValue: Identity function."""\n return self
\n\n\n
[docs]@whitelist_for_serdes(storage_name="FloatMetadataEntryData")\nclass FloatMetadataValue(\n NamedTuple(\n "_FloatMetadataValue",\n [\n ("value", PublicAttr[Optional[float]]),\n ],\n ),\n MetadataValue[float],\n):\n """Container class for float metadata entry data.\n\n Args:\n value (Optional[float]): The float value.\n """\n\n def __new__(cls, value: Optional[float]):\n return super(FloatMetadataValue, cls).__new__(cls, check.opt_float_param(value, "value"))
\n\n\n
[docs]@whitelist_for_serdes(storage_name="IntMetadataEntryData")\nclass IntMetadataValue(\n NamedTuple(\n "_IntMetadataValue",\n [\n ("value", PublicAttr[Optional[int]]),\n ],\n ),\n MetadataValue[int],\n):\n """Container class for int metadata entry data.\n\n Args:\n value (Optional[int]): The int value.\n """\n\n def __new__(cls, value: Optional[int]):\n return super(IntMetadataValue, cls).__new__(cls, check.opt_int_param(value, "value"))
\n\n\n@whitelist_for_serdes(storage_name="BoolMetadataEntryData")\nclass BoolMetadataValue(\n NamedTuple("_BoolMetadataValue", [("value", PublicAttr[Optional[bool]])]),\n MetadataValue[bool],\n):\n """Container class for bool metadata entry data.\n\n Args:\n value (Optional[bool]): The bool value.\n """\n\n def __new__(cls, value: Optional[bool]):\n return super(BoolMetadataValue, cls).__new__(cls, check.opt_bool_param(value, "value"))\n\n\n
[docs]@whitelist_for_serdes(storage_name="DagsterPipelineRunMetadataEntryData")\nclass DagsterRunMetadataValue(\n NamedTuple(\n "_DagsterRunMetadataValue",\n [\n ("run_id", PublicAttr[str]),\n ],\n ),\n MetadataValue[str],\n):\n """Representation of a dagster run.\n\n Args:\n run_id (str): The run id\n """\n\n def __new__(cls, run_id: str):\n return super(DagsterRunMetadataValue, cls).__new__(cls, check.str_param(run_id, "run_id"))\n\n @public\n @property\n def value(self) -> str:\n """str: The wrapped run id."""\n return self.run_id
\n\n\n
[docs]@whitelist_for_serdes(storage_name="DagsterAssetMetadataEntryData")\nclass DagsterAssetMetadataValue(\n NamedTuple("_DagsterAssetMetadataValue", [("asset_key", PublicAttr["AssetKey"])]),\n MetadataValue["AssetKey"],\n):\n """Representation of a dagster asset.\n\n Args:\n asset_key (AssetKey): The dagster asset key\n """\n\n def __new__(cls, asset_key: "AssetKey"):\n from dagster._core.definitions.events import AssetKey\n\n return super(DagsterAssetMetadataValue, cls).__new__(\n cls, check.inst_param(asset_key, "asset_key", AssetKey)\n )\n\n @public\n @property\n def value(self) -> "AssetKey":\n """AssetKey: The wrapped :py:class:`AssetKey`."""\n return self.asset_key
\n\n\n# This should be deprecated or fixed so that `value` does not return itself.\n
[docs]@experimental\n@whitelist_for_serdes(storage_name="TableMetadataEntryData")\nclass TableMetadataValue(\n NamedTuple(\n "_TableMetadataValue",\n [\n ("records", PublicAttr[Sequence[TableRecord]]),\n ("schema", PublicAttr[TableSchema]),\n ],\n ),\n MetadataValue["TableMetadataValue"],\n):\n """Container class for table metadata entry data.\n\n Args:\n records (TableRecord): The data as a list of records (i.e. rows).\n schema (Optional[TableSchema]): A schema for the table.\n """\n\n
[docs] @public\n @staticmethod\n def infer_column_type(value: object) -> str:\n """str: Infer the :py:class:`TableSchema` column type that will be used for a value."""\n if isinstance(value, bool):\n return "bool"\n elif isinstance(value, int):\n return "int"\n elif isinstance(value, float):\n return "float"\n else:\n return "string"
\n\n def __new__(cls, records: Sequence[TableRecord], schema: Optional[TableSchema]):\n check.sequence_param(records, "records", of_type=TableRecord)\n check.opt_inst_param(schema, "schema", TableSchema)\n\n if len(records) == 0:\n schema = check.not_none(schema, "schema must be provided if records is empty")\n else:\n columns = set(records[0].data.keys())\n for record in records[1:]:\n check.invariant(\n set(record.data.keys()) == columns, "All records must have the same fields"\n )\n schema = schema or TableSchema(\n columns=[\n TableColumn(name=k, type=TableMetadataValue.infer_column_type(v))\n for k, v in records[0].data.items()\n ]\n )\n\n return super(TableMetadataValue, cls).__new__(\n cls,\n records,\n schema,\n )\n\n @public\n @property\n def value(self) -> Self:\n """TableMetadataValue: Identity function."""\n return self
\n\n\n
[docs]@whitelist_for_serdes(storage_name="TableSchemaMetadataEntryData")\nclass TableSchemaMetadataValue(\n NamedTuple("_TableSchemaMetadataValue", [("schema", PublicAttr[TableSchema])]),\n MetadataValue[TableSchema],\n):\n """Representation of a schema for arbitrary tabular data.\n\n Args:\n schema (TableSchema): The dictionary containing the schema representation.\n """\n\n def __new__(cls, schema: TableSchema):\n return super(TableSchemaMetadataValue, cls).__new__(\n cls, check.inst_param(schema, "schema", TableSchema)\n )\n\n @public\n @property\n def value(self) -> TableSchema:\n """TableSchema: The wrapped :py:class:`TableSchema`."""\n return self.schema
\n\n\n@whitelist_for_serdes(storage_name="NullMetadataEntryData")\nclass NullMetadataValue(NamedTuple("_NullMetadataValue", []), MetadataValue[None]):\n """Representation of null."""\n\n @public\n @property\n def value(self) -> None:\n """None: The wrapped null value."""\n return None\n\n\n# ########################\n# ##### METADATA BACKCOMPAT\n# ########################\n\n# Metadata used to be represented as a `List[MetadataEntry]`, but that class has been deleted. But\n# we still serialize metadata dicts to the serialized representation of `List[MetadataEntry]` for\n# backcompat purposes.\n\n\nclass MetadataFieldSerializer(FieldSerializer):\n """Converts between metadata dict (new) and metadata entries list (old)."""\n\n storage_name = "metadata_entries"\n loaded_name = "metadata"\n\n def pack(\n self,\n metadata_dict: Mapping[str, MetadataValue],\n whitelist_map: WhitelistMap,\n descent_path: str,\n ) -> Sequence[Mapping[str, Any]]:\n return [\n {\n "__class__": "EventMetadataEntry",\n "label": k,\n # MetadataValue itself can't inherit from NamedTuple and so isn't a PackableValue,\n # but one of its subclasses will always be returned here.\n "entry_data": pack_value(v, whitelist_map, descent_path), # type: ignore\n "description": None,\n }\n for k, v in metadata_dict.items()\n ]\n\n def unpack(\n self,\n metadata_entries: List["MetadataEntry"],\n whitelist_map: WhitelistMap,\n context: UnpackContext,\n ) -> Mapping[str, MetadataValue]:\n return {e.label: e.entry_data for e in metadata_entries}\n\n\nT_MetadataValue = TypeVar("T_MetadataValue", bound=MetadataValue, covariant=True)\n\n\n# NOTE: MetadataEntry is no longer accessible via the public API-- all metadata APIs use metadata\n# dicts. This clas shas only been preserved to adhere strictly to our backcompat guarantees. It is\n# still instantiated in the above `MetadataFieldSerializer` but that can easily be changed.\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use a dict with `MetadataValue` values instead.",\n)\n@deprecated_param(\n param="entry_data", breaking_version="2.0", additional_warn_text="Use `value` instead."\n)\n@whitelist_for_serdes(storage_name="EventMetadataEntry")\nclass MetadataEntry(\n NamedTuple(\n "_MetadataEntry",\n [\n ("label", PublicAttr[str]),\n ("description", PublicAttr[Optional[str]]),\n ("entry_data", PublicAttr[MetadataValue]),\n ],\n ),\n Generic[T_MetadataValue],\n):\n """A structure for describing metadata for Dagster events.\n\n .. note:: This class is no longer usable in any Dagster API, and will be completely removed in 2.0.\n\n Lists of objects of this type can be passed as arguments to Dagster events and will be displayed\n in the Dagster UI and other tooling.\n\n Should be yielded from within an IO manager to append metadata for a given input/output event.\n For other event types, passing a dict with `MetadataValue` values to the `metadata` argument\n is preferred.\n\n Args:\n label (str): Short display label for this metadata entry.\n description (Optional[str]): A human-readable description of this metadata entry.\n value (MetadataValue): Typed metadata entry data. The different types allow\n for customized display in tools like the Dagster UI.\n """\n\n def __new__(\n cls,\n label: str,\n description: Optional[str] = None,\n entry_data: Optional["RawMetadataValue"] = None,\n value: Optional["RawMetadataValue"] = None,\n ):\n value = cast(\n RawMetadataValue,\n normalize_renamed_param(\n new_val=value,\n new_arg="value",\n old_val=entry_data,\n old_arg="entry_data",\n ),\n )\n value = normalize_metadata_value(value)\n\n return super(MetadataEntry, cls).__new__(\n cls,\n check.str_param(label, "label"),\n check.opt_str_param(description, "description"),\n check.inst_param(value, "value", MetadataValue),\n )\n\n @property\n def value(self):\n """Alias of `entry_data`."""\n return self.entry_data
\n
", "current_page_name": "_modules/dagster/_core/definitions/metadata", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "table": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.metadata.table

\nfrom typing import Mapping, NamedTuple, Optional, Sequence, Union, cast\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental, public\nfrom dagster._serdes.serdes import (\n    whitelist_for_serdes,\n)\n\n# ########################\n# ##### TABLE RECORD\n# ########################\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass TableRecord(\n NamedTuple("TableRecord", [("data", PublicAttr[Mapping[str, Union[str, int, float, bool]]])])\n):\n """Represents one record in a table. Field keys are arbitrary strings-- field values must be\n strings, integers, floats, or bools.\n """\n\n def __new__(cls, data: Mapping[str, Union[str, int, float, bool]]):\n check.dict_param(\n data,\n "data",\n value_type=(str, float, int, bool, type(None)),\n additional_message="Record fields must be one of types: (str, float, int, bool)",\n )\n return super(TableRecord, cls).__new__(cls, data=data)
\n\n\n# ########################\n# ##### TABLE SCHEMA\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableSchema(\n NamedTuple(\n "TableSchema",\n [\n ("columns", PublicAttr[Sequence["TableColumn"]]),\n ("constraints", PublicAttr["TableConstraints"]),\n ],\n )\n):\n """Representation of a schema for tabular data.\n\n Schema is composed of two parts:\n\n - A required list of columns (`TableColumn`). Each column specifies a\n `name`, `type`, set of `constraints`, and (optional) `description`. `type`\n defaults to `string` if unspecified. Column constraints\n (`TableColumnConstraints`) consist of boolean properties `unique` and\n `nullable`, as well as a list of strings `other` containing string\n descriptions of all additional constraints (e.g. `"<= 5"`).\n - An optional list of table-level constraints (`TableConstraints`). A\n table-level constraint cannot be expressed in terms of a single column,\n e.g. col a > col b. Presently, all table-level constraints must be\n expressed as strings under the `other` attribute of a `TableConstraints`\n object.\n\n .. code-block:: python\n\n # example schema\n TableSchema(\n constraints = TableConstraints(\n other = [\n "foo > bar",\n ],\n ),\n columns = [\n TableColumn(\n name = "foo",\n type = "string",\n description = "Foo description",\n constraints = TableColumnConstraints(\n required = True,\n other = [\n "starts with the letter 'a'",\n ],\n ),\n ),\n TableColumn(\n name = "bar",\n type = "string",\n ),\n TableColumn(\n name = "baz",\n type = "custom_type",\n constraints = TableColumnConstraints(\n unique = True,\n )\n ),\n ],\n )\n\n Args:\n columns (List[TableColumn]): The columns of the table.\n constraints (Optional[TableConstraints]): The constraints of the table.\n """\n\n def __new__(\n cls,\n columns: Sequence["TableColumn"],\n constraints: Optional["TableConstraints"] = None,\n ):\n return super(TableSchema, cls).__new__(\n cls,\n columns=check.sequence_param(columns, "columns", of_type=TableColumn),\n constraints=check.opt_inst_param(\n constraints, "constraints", TableConstraints, default=_DEFAULT_TABLE_CONSTRAINTS\n ),\n )\n\n
[docs] @public\n @staticmethod\n def from_name_type_dict(name_type_dict: Mapping[str, str]):\n """Constructs a TableSchema from a dictionary whose keys are column names and values are the\n names of data types of those columns.\n """\n return TableSchema(\n columns=[\n TableColumn(name=name, type=type_str) for name, type_str in name_type_dict.items()\n ]\n )
\n\n\n# ########################\n# ##### TABLE CONSTRAINTS\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableConstraints(\n NamedTuple(\n "TableConstraints",\n [\n ("other", PublicAttr[Sequence[str]]),\n ],\n )\n):\n """Descriptor for "table-level" constraints. Presently only one property,\n `other` is supported. This contains strings describing arbitrary\n table-level constraints. A table-level constraint is a constraint defined\n in terms of multiple columns (e.g. col_A > col_B) or in terms of rows.\n\n Args:\n other (List[str]): Descriptions of arbitrary table-level constraints.\n """\n\n def __new__(\n cls,\n other: Sequence[str],\n ):\n return super(TableConstraints, cls).__new__(\n cls,\n other=check.sequence_param(other, "other", of_type=str),\n )
\n\n\n_DEFAULT_TABLE_CONSTRAINTS = TableConstraints(other=[])\n\n# ########################\n# ##### TABLE COLUMN\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableColumn(\n NamedTuple(\n "TableColumn",\n [\n ("name", PublicAttr[str]),\n ("type", PublicAttr[str]),\n ("description", PublicAttr[Optional[str]]),\n ("constraints", PublicAttr["TableColumnConstraints"]),\n ],\n )\n):\n """Descriptor for a table column. The only property that must be specified\n by the user is `name`. If no `type` is specified, `string` is assumed. If\n no `constraints` are specified, the column is assumed to be nullable\n (i.e. `required = False`) and have no other constraints beyond the data type.\n\n Args:\n name (List[str]): Descriptions of arbitrary table-level constraints.\n type (Optional[str]): The type of the column. Can be an arbitrary\n string. Defaults to `"string"`.\n description (Optional[str]): Description of this column. Defaults to `None`.\n constraints (Optional[TableColumnConstraints]): Column-level constraints.\n If unspecified, column is nullable with no constraints.\n """\n\n def __new__(\n cls,\n name: str,\n type: str = "string", # noqa: A002\n description: Optional[str] = None,\n constraints: Optional["TableColumnConstraints"] = None,\n ):\n return super(TableColumn, cls).__new__(\n cls,\n name=check.str_param(name, "name"),\n type=check.str_param(type, "type"),\n description=check.opt_str_param(description, "description"),\n constraints=cast(\n "TableColumnConstraints",\n check.opt_inst_param(\n constraints,\n "constraints",\n TableColumnConstraints,\n default=_DEFAULT_TABLE_COLUMN_CONSTRAINTS,\n ),\n ),\n )
\n\n\n# ########################\n# ##### TABLE COLUMN CONSTRAINTS\n# ########################\n\n\n
[docs]@whitelist_for_serdes\nclass TableColumnConstraints(\n NamedTuple(\n "TableColumnConstraints",\n [\n ("nullable", PublicAttr[bool]),\n ("unique", PublicAttr[bool]),\n ("other", PublicAttr[Optional[Sequence[str]]]),\n ],\n )\n):\n """Descriptor for a table column's constraints. Nullability and uniqueness are specified with\n boolean properties. All other constraints are described using arbitrary strings under the\n `other` property.\n\n Args:\n nullable (Optional[bool]): If true, this column can hold null values.\n unique (Optional[bool]): If true, all values in this column must be unique.\n other (List[str]): Descriptions of arbitrary column-level constraints\n not expressible by the predefined properties.\n """\n\n def __new__(\n cls,\n nullable: bool = True,\n unique: bool = False,\n other: Optional[Sequence[str]] = None,\n ):\n return super(TableColumnConstraints, cls).__new__(\n cls,\n nullable=check.bool_param(nullable, "nullable"),\n unique=check.bool_param(unique, "unique"),\n other=check.opt_sequence_param(other, "other"),\n )
\n\n\n_DEFAULT_TABLE_COLUMN_CONSTRAINTS = TableColumnConstraints()\n
", "current_page_name": "_modules/dagster/_core/definitions/metadata/table", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}, {"link": "../", "title": "dagster._core.definitions.metadata"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.metadata.table"}, "title": "dagster._core.definitions.metadata"}, "multi_asset_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.multi_asset_sensor_definition

\nimport inspect\nimport json\nfrom collections import OrderedDict, defaultdict\nfrom typing import (\n    TYPE_CHECKING,\n    Callable,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_selection import AssetSelection\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.scoped_resources_builder import ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._utils import normalize_to_repository\n\nfrom .events import AssetKey\nfrom .run_request import RunRequest, SensorResult, SkipReason\nfrom .sensor_definition import (\n    DefaultSensorStatus,\n    SensorDefinition,\n    SensorEvaluationContext,\n    SensorType,\n    get_context_param_name,\n    get_sensor_context_from_args_or_kwargs,\n    validate_and_get_resource_dict,\n)\nfrom .target import ExecutableDefinition\nfrom .utils import check_valid_name\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.definitions_class import Definitions\n    from dagster._core.definitions.repository_definition import RepositoryDefinition\n    from dagster._core.storage.event_log.base import EventLogRecord\n\nMAX_NUM_UNCONSUMED_EVENTS = 25\n\n\nclass MultiAssetSensorAssetCursorComponent(\n    NamedTuple(\n        "_MultiAssetSensorAssetCursorComponent",\n        [\n            ("latest_consumed_event_partition", Optional[str]),\n            ("latest_consumed_event_id", Optional[int]),\n            ("trailing_unconsumed_partitioned_event_ids", Dict[str, int]),\n        ],\n    )\n):\n    """A cursor component that is used to track the cursor for a particular asset in a multi-asset\n    sensor.\n\n    Here's an illustration to help explain how this representation works:\n\n    partition_1  ---|----------a----\n    partition_2  -t-----|-x---------\n    partition_3  ----t------|---a---\n\n\n    The "|", "a", "t", and "x" characters represent materialization events.\n    The x-axis is storage_id, which is basically time. The cursor has been advanced to the "|" event\n    for each partition. latest_evaluated_event_partition would be "partition_3", and\n    "latest_evaluated_event_id" would be the storage_id of the "|" event for partition_3.\n\n    The "t" events aren't directly represented in the cursor, because they trail the event that the\n    the cursor for their partition has advanced to. The "a" events aren't directly represented\n    in the cursor, because they occurred after the "latest_evaluated_event_id".  The "x" event is\n    included in "unevaluated_partitioned_event_ids", because it's after the event that the cursor\n    for its partition has advanced to, but trails "latest_evaluated_event_id".\n\n    Attributes:\n        latest_consumed_event_partition (Optional[str]): The partition of the latest consumed event\n            for this asset.\n        latest_consumed_event_id (Optional[int]): The event ID of the latest consumed event for\n            this asset.\n        trailing_unconsumed_partitioned_event_ids (Dict[str, int]): A mapping containing\n            the partition key mapped to the latest unconsumed materialization event for this\n            partition with an ID less than latest_consumed_event_id.\n    """\n\n    def __new__(\n        cls,\n        latest_consumed_event_partition,\n        latest_consumed_event_id,\n        trailing_unconsumed_partitioned_event_ids,\n    ):\n        return super(MultiAssetSensorAssetCursorComponent, cls).__new__(\n            cls,\n            latest_consumed_event_partition=check.opt_str_param(\n                latest_consumed_event_partition, "latest_consumed_event_partition"\n            ),\n            latest_consumed_event_id=check.opt_int_param(\n                latest_consumed_event_id, "latest_consumed_event_id"\n            ),\n            trailing_unconsumed_partitioned_event_ids=check.dict_param(\n                trailing_unconsumed_partitioned_event_ids,\n                "trailing_unconsumed_partitioned_event_ids",\n                key_type=str,\n                value_type=int,\n            ),\n        )\n\n\nclass MultiAssetSensorContextCursor:\n    # Tracks the state of the cursor within the tick, created for utility purposes.\n    # Must call MultiAssetSensorEvaluationContext._update_cursor_after_evaluation at end of tick\n    # to serialize the cursor.\n    def __init__(self, cursor: Optional[str], context: "MultiAssetSensorEvaluationContext"):\n        loaded_cursor = json.loads(cursor) if cursor else {}\n        self._cursor_component_by_asset_key: Dict[str, MultiAssetSensorAssetCursorComponent] = {}\n\n        # The initial latest consumed event ID at the beginning of the tick\n        self.initial_latest_consumed_event_ids_by_asset_key: Dict[str, Optional[int]] = {}\n\n        for str_asset_key, cursor_list in loaded_cursor.items():\n            if len(cursor_list) != 3:\n                # In this case, the cursor object is not a multi asset sensor asset cursor\n                # component. This cursor is maintained by the asset reconciliation sensor.\n                break\n            else:\n                partition_key, event_id, trailing_unconsumed_partitioned_event_ids = cursor_list\n                self._cursor_component_by_asset_key[str_asset_key] = (\n                    MultiAssetSensorAssetCursorComponent(\n                        latest_consumed_event_partition=partition_key,\n                        latest_consumed_event_id=event_id,\n                        trailing_unconsumed_partitioned_event_ids=trailing_unconsumed_partitioned_event_ids,\n                    )\n                )\n\n                self.initial_latest_consumed_event_ids_by_asset_key[str_asset_key] = event_id\n\n        check.dict_param(self._cursor_component_by_asset_key, "unpacked_cursor", key_type=str)\n        self._context = context\n\n    def get_cursor_for_asset(self, asset_key: AssetKey) -> MultiAssetSensorAssetCursorComponent:\n        return self._cursor_component_by_asset_key.get(\n            str(asset_key), MultiAssetSensorAssetCursorComponent(None, None, {})\n        )\n\n    def get_stringified_cursor(self) -> str:\n        return json.dumps(self._cursor_component_by_asset_key)\n\n\n
[docs]@experimental\nclass MultiAssetSensorEvaluationContext(SensorEvaluationContext):\n """The context object available as the argument to the evaluation function of a\n :py:class:`dagster.MultiAssetSensorDefinition`.\n\n Users should not instantiate this object directly. To construct a\n `MultiAssetSensorEvaluationContext` for testing purposes, use :py:func:`dagster.\n build_multi_asset_sensor_context`.\n\n The `MultiAssetSensorEvaluationContext` contains a cursor object that tracks the state of\n consumed event logs for each monitored asset. For each asset, the cursor stores the storage ID\n of the latest materialization that has been marked as "consumed" (via a call to `advance_cursor`)\n in a `latest_consumed_event_id` field.\n\n For each monitored asset, the cursor will store the latest unconsumed event ID for up to 25\n partitions. Each event ID must be before the `latest_consumed_event_id` field for the asset.\n\n Events marked as consumed via `advance_cursor` will be returned in future ticks until they\n are marked as consumed.\n\n To update the cursor to the latest materialization and clear the unconsumed events, call\n `advance_all_cursors`.\n\n Attributes:\n monitored_assets (Union[Sequence[AssetKey], AssetSelection]): The assets monitored\n by the sensor. If an AssetSelection object is provided, it will only apply to assets\n within the Definitions that this sensor is part of.\n repository_def (Optional[RepositoryDefinition]): The repository that the sensor belongs to.\n If needed by the sensor top-level resource definitions will be pulled from this repository.\n You can provide either this or `definitions`.\n instance_ref (Optional[InstanceRef]): The serialized instance configured to run the schedule\n cursor (Optional[str]): The cursor, passed back from the last sensor evaluation via\n the cursor attribute of SkipReason and RunRequest. Must be a dictionary of asset key\n strings to a stringified tuple of (latest_event_partition, latest_event_storage_id,\n trailing_unconsumed_partitioned_event_ids).\n last_completion_time (float): DEPRECATED The last time that the sensor was consumed (UTC).\n last_run_key (str): DEPRECATED The run key of the RunRequest most recently created by this\n sensor. Use the preferred `cursor` attribute instead.\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n instance (Optional[DagsterInstance]): The deserialized instance can also be passed in\n directly (primarily useful in testing contexts).\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n If needed by the sensor, top-level resource definitions will be pulled from these\n definitions. You can provide either this or `repository_def`.\n\n Example:\n .. code-block:: python\n\n from dagster import multi_asset_sensor, MultiAssetSensorEvaluationContext\n\n @multi_asset_sensor(monitored_assets=[AssetKey("asset_1), AssetKey("asset_2)])\n def the_sensor(context: MultiAssetSensorEvaluationContext):\n ...\n """\n\n def __init__(\n self,\n instance_ref: Optional[InstanceRef],\n last_completion_time: Optional[float],\n last_run_key: Optional[str],\n cursor: Optional[str],\n repository_name: Optional[str],\n repository_def: Optional["RepositoryDefinition"],\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n instance: Optional[DagsterInstance] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n definitions: Optional["Definitions"] = None,\n ):\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n self._repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n )\n self._monitored_asset_keys: Sequence[AssetKey]\n if isinstance(monitored_assets, AssetSelection):\n repo_assets = self._repository_def.assets_defs_by_key.values()\n repo_source_assets = self._repository_def.source_assets_by_key.values()\n self._monitored_asset_keys = list(\n monitored_assets.resolve([*repo_assets, *repo_source_assets])\n )\n else:\n self._monitored_asset_keys = monitored_assets\n\n self._assets_by_key: Dict[AssetKey, Optional[AssetsDefinition]] = {}\n self._partitions_def_by_asset_key: Dict[AssetKey, Optional[PartitionsDefinition]] = {}\n for asset_key in self._monitored_asset_keys:\n assets_def = self._repository_def.assets_defs_by_key.get(asset_key)\n self._assets_by_key[asset_key] = assets_def\n\n source_asset_def = self._repository_def.source_assets_by_key.get(asset_key)\n self._partitions_def_by_asset_key[asset_key] = (\n assets_def.partitions_def\n if assets_def\n else source_asset_def.partitions_def if source_asset_def else None\n )\n\n # Cursor object with utility methods for updating and retrieving cursor information.\n # At the end of each tick, must call update_cursor_after_evaluation to update the serialized\n # cursor.\n self._unpacked_cursor = MultiAssetSensorContextCursor(cursor, self)\n self._cursor_advance_state_mutation = MultiAssetSensorCursorAdvances()\n\n self._initial_unconsumed_events_by_id: Dict[int, EventLogRecord] = {}\n self._fetched_initial_unconsumed_events = False\n\n super(MultiAssetSensorEvaluationContext, self).__init__(\n instance_ref=instance_ref,\n last_completion_time=last_completion_time,\n last_run_key=last_run_key,\n cursor=cursor,\n repository_name=repository_name,\n instance=instance,\n repository_def=repository_def,\n resources=resource_defs,\n )\n\n def _cache_initial_unconsumed_events(self) -> None:\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n # This method caches the initial unconsumed events for each asset key. To generate the\n # current unconsumed events, call get_trailing_unconsumed_events instead.\n if self._fetched_initial_unconsumed_events:\n return\n\n for asset_key in self._monitored_asset_keys:\n unconsumed_event_ids = list(\n self._get_cursor(asset_key).trailing_unconsumed_partitioned_event_ids.values()\n )\n if unconsumed_event_ids:\n event_records = self.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n storage_ids=unconsumed_event_ids,\n )\n )\n self._initial_unconsumed_events_by_id.update(\n {event_record.storage_id: event_record for event_record in event_records}\n )\n\n self._fetched_initial_unconsumed_events = True\n\n def _get_unconsumed_events_with_ids(\n self, event_ids: Sequence[int]\n ) -> Sequence["EventLogRecord"]:\n self._cache_initial_unconsumed_events()\n unconsumed_events = []\n for event_id in sorted(event_ids):\n event = self._initial_unconsumed_events_by_id.get(event_id)\n unconsumed_events.extend([event] if event else [])\n\n return unconsumed_events\n\n
[docs] @public\n def get_trailing_unconsumed_events(self, asset_key: AssetKey) -> Sequence["EventLogRecord"]:\n """Fetches the unconsumed events for a given asset key. Returns only events\n before the latest consumed event ID for the given asset. To mark an event as consumed,\n pass the event to `advance_cursor`. Returns events in ascending order by storage ID.\n\n Args:\n asset_key (AssetKey): The asset key to get unconsumed events for.\n\n Returns:\n Sequence[EventLogRecord]: The unconsumed events for the given asset key.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n return self._get_unconsumed_events_with_ids(\n list(self._get_cursor(asset_key).trailing_unconsumed_partitioned_event_ids.values())\n )
\n\n def _get_partitions_after_cursor(self, asset_key: AssetKey) -> Sequence[str]:\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n partition_key = self._get_cursor(asset_key).latest_consumed_event_partition\n\n partitions_def = self._partitions_def_by_asset_key.get(asset_key)\n\n if not isinstance(partitions_def, PartitionsDefinition):\n raise DagsterInvalidInvocationError(f"No partitions defined for asset key {asset_key}")\n\n partitions_to_fetch = list(\n partitions_def.get_partition_keys(dynamic_partitions_store=self.instance)\n )\n\n if partition_key is not None:\n # Return partitions after the cursor partition, not including the cursor partition\n partitions_to_fetch = partitions_to_fetch[\n partitions_to_fetch.index(partition_key) + 1 :\n ]\n return partitions_to_fetch\n\n def update_cursor_after_evaluation(self) -> None:\n """Updates the cursor after the sensor evaluation function has been called. This method\n should be called at most once per evaluation.\n """\n new_cursor = self._cursor_advance_state_mutation.get_cursor_with_advances(\n self, self._unpacked_cursor\n )\n\n if new_cursor is not None:\n # Cursor was not updated by this context object, so we do not need to update it\n self._cursor = new_cursor\n self._unpacked_cursor = MultiAssetSensorContextCursor(new_cursor, self)\n self._cursor_advance_state_mutation = MultiAssetSensorCursorAdvances()\n self._fetched_initial_unconsumed_events = False\n\n
[docs] @public\n def latest_materialization_records_by_key(\n self,\n asset_keys: Optional[Sequence[AssetKey]] = None,\n ) -> Mapping[AssetKey, Optional["EventLogRecord"]]:\n """Fetches the most recent materialization event record for each asset in asset_keys.\n Only fetches events after the latest consumed event ID for the given asset key.\n\n Args:\n asset_keys (Optional[Sequence[AssetKey]]): list of asset keys to fetch events for. If\n not specified, the latest materialization will be fetched for all assets the\n multi_asset_sensor monitors.\n\n Returns: Mapping of AssetKey to EventLogRecord where the EventLogRecord is the latest\n materialization event for the asset. If there is no materialization event for the asset,\n the value in the mapping will be None.\n """\n # Do not evaluate unconsumed events, only events newer than the cursor\n # if there are no new events after the cursor, the cursor points to the most\n # recent event.\n\n if asset_keys is None:\n asset_keys = self._monitored_asset_keys\n else:\n asset_keys = check.opt_sequence_param(asset_keys, "asset_keys", of_type=AssetKey)\n\n asset_records = self.instance.get_asset_records(asset_keys)\n\n asset_event_records: Dict[AssetKey, Optional[EventLogRecord]] = {\n asset_key: None for asset_key in asset_keys\n }\n for record in asset_records:\n if (\n record.asset_entry.last_materialization_record\n and record.asset_entry.last_materialization_record.storage_id\n > (self._get_cursor(record.asset_entry.asset_key).latest_consumed_event_id or 0)\n ):\n asset_event_records[record.asset_entry.asset_key] = (\n record.asset_entry.last_materialization_record\n )\n\n return asset_event_records
\n\n
[docs] @public\n def materialization_records_for_key(\n self, asset_key: AssetKey, limit: Optional[int] = None\n ) -> Iterable["EventLogRecord"]:\n """Fetches asset materialization event records for asset_key, with the earliest event first.\n\n Only fetches events after the latest consumed event ID for the given asset key.\n\n Args:\n asset_key (AssetKey): The asset to fetch materialization events for\n limit (Optional[int]): The number of events to fetch\n """\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n if asset_key not in self._assets_by_key:\n raise DagsterInvalidInvocationError(f"Asset key {asset_key} not monitored by sensor.")\n\n events = list(\n self.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n after_cursor=self._get_cursor(asset_key).latest_consumed_event_id,\n ),\n ascending=True,\n limit=limit,\n )\n )\n\n return events
\n\n def _get_cursor(self, asset_key: AssetKey) -> MultiAssetSensorAssetCursorComponent:\n """Returns the MultiAssetSensorAssetCursorComponent for the asset key.\n\n For more information, view the docstring for the MultiAssetSensorAssetCursorComponent class.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n return self._unpacked_cursor.get_cursor_for_asset(asset_key)\n\n
[docs] @public\n def latest_materialization_records_by_partition(\n self,\n asset_key: AssetKey,\n after_cursor_partition: Optional[bool] = False,\n ) -> Mapping[str, "EventLogRecord"]:\n """Given an asset, returns a mapping of partition key to the latest materialization event\n for that partition. Fetches only materializations that have not been marked as "consumed"\n via a call to `advance_cursor`.\n\n Args:\n asset_key (AssetKey): The asset to fetch events for.\n after_cursor_partition (Optional[bool]): If True, only materializations with partitions\n after the cursor's current partition will be returned. By default, set to False.\n\n Returns:\n Mapping[str, EventLogRecord]:\n Mapping of AssetKey to a mapping of partitions to EventLogRecords where the\n EventLogRecord is the most recent materialization event for the partition.\n The mapping preserves the order that the materializations occurred.\n\n Example:\n .. code-block:: python\n\n @asset(partitions_def=DailyPartitionsDefinition("2022-07-01"))\n def july_asset():\n return 1\n\n @multi_asset_sensor(asset_keys=[july_asset.key])\n def my_sensor(context):\n context.latest_materialization_records_by_partition(july_asset.key)\n\n # After materializing july_asset for 2022-07-05, latest_materialization_by_partition\n # returns {"2022-07-05": EventLogRecord(...)}\n\n """\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventLogRecord, EventRecordsFilter\n\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n\n if asset_key not in self._assets_by_key:\n raise DagsterInvalidInvocationError(\n f"Asset key {asset_key} not monitored in sensor definition"\n )\n\n partitions_def = self._partitions_def_by_asset_key.get(asset_key)\n if not isinstance(partitions_def, PartitionsDefinition):\n raise DagsterInvariantViolationError(\n "Cannot get latest materialization by partition for assets with no partitions"\n )\n\n partitions_to_fetch = (\n self._get_partitions_after_cursor(asset_key)\n if after_cursor_partition\n else list(partitions_def.get_partition_keys(dynamic_partitions_store=self.instance))\n )\n\n # Retain ordering of materializations\n materialization_by_partition: Dict[str, EventLogRecord] = OrderedDict()\n\n # Add unconsumed events to the materialization by partition dictionary\n # These events came before the cursor, so should be inserted in storage ID ascending order\n for unconsumed_event in sorted(\n self._get_unconsumed_events_with_ids(\n list(self._get_cursor(asset_key).trailing_unconsumed_partitioned_event_ids.values())\n )\n ):\n partition = unconsumed_event.partition_key\n if isinstance(partition, str) and partition in partitions_to_fetch:\n if partition in materialization_by_partition:\n # Remove partition to ensure materialization_by_partition preserves\n # the order of materializations\n materialization_by_partition.pop(partition)\n # Add partition and materialization to the end of the OrderedDict\n materialization_by_partition[partition] = unconsumed_event\n\n partition_materializations = self.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n asset_partitions=partitions_to_fetch,\n after_cursor=self._get_cursor(asset_key).latest_consumed_event_id,\n ),\n ascending=True,\n )\n for materialization in partition_materializations:\n partition = materialization.partition_key\n\n if isinstance(partition, str):\n if partition in materialization_by_partition:\n # Remove partition to ensure materialization_by_partition preserves\n # the order of materializations\n materialization_by_partition.pop(partition)\n # Add partition and materialization to the end of the OrderedDict\n materialization_by_partition[partition] = materialization\n\n return materialization_by_partition
\n\n
[docs] @public\n def latest_materialization_records_by_partition_and_asset(\n self,\n ) -> Mapping[str, Mapping[AssetKey, "EventLogRecord"]]:\n """Finds the most recent unconsumed materialization for each partition for each asset\n monitored by the sensor. Aggregates all materializations into a mapping of partition key\n to a mapping of asset key to the materialization event for that partition.\n\n For example, if the sensor monitors two partitioned assets A and B that are materialized\n for partition_x after the cursor, this function returns:\n\n .. code-block:: python\n\n {\n "partition_x": {asset_a.key: EventLogRecord(...), asset_b.key: EventLogRecord(...)}\n }\n\n This method can only be called when all monitored assets are partitioned and share\n the same partition definition.\n """\n partitions_defs = list(self._partitions_def_by_asset_key.values())\n if not partitions_defs or not all(x == partitions_defs[0] for x in partitions_defs):\n raise DagsterInvalidInvocationError(\n "All assets must be partitioned and share the same partitions definition"\n )\n\n asset_and_materialization_tuple_by_partition: Dict[\n str, Dict[AssetKey, "EventLogRecord"]\n ] = defaultdict(dict)\n\n for asset_key in self._monitored_asset_keys:\n materialization_by_partition = self.latest_materialization_records_by_partition(\n asset_key\n )\n for partition, materialization in materialization_by_partition.items():\n asset_and_materialization_tuple_by_partition[partition][asset_key] = materialization\n\n return asset_and_materialization_tuple_by_partition
\n\n
[docs] @public\n def get_cursor_partition(self, asset_key: Optional[AssetKey]) -> Optional[str]:\n """A utility method to get the current partition the cursor is on."""\n asset_key = check.opt_inst_param(asset_key, "asset_key", AssetKey)\n if asset_key not in self._monitored_asset_keys:\n raise DagsterInvalidInvocationError(\n "Provided asset key must correspond to a provided asset"\n )\n if asset_key:\n partition_key = self._get_cursor(asset_key).latest_consumed_event_partition\n elif self._monitored_asset_keys is not None and len(self._monitored_asset_keys) == 1:\n partition_key = self._get_cursor(\n self._monitored_asset_keys[0]\n ).latest_consumed_event_partition\n else:\n raise DagsterInvalidInvocationError(\n "Asset key must be provided when multiple assets are defined"\n )\n\n return partition_key
\n\n
[docs] @public\n def all_partitions_materialized(\n self, asset_key: AssetKey, partitions: Optional[Sequence[str]] = None\n ) -> bool:\n """A utility method to check if a provided list of partitions have been materialized\n for a particular asset. This method ignores the cursor and checks all materializations\n for the asset.\n\n Args:\n asset_key (AssetKey): The asset to check partitions for.\n partitions (Optional[Sequence[str]]): A list of partitions to check. If not provided,\n all partitions for the asset will be checked.\n\n Returns:\n bool: True if all selected partitions have been materialized, False otherwise.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n if partitions is not None:\n check.sequence_param(partitions, "partitions", of_type=str)\n if len(partitions) == 0:\n raise DagsterInvalidInvocationError("Must provide at least one partition in list")\n\n materialized_partitions = self.instance.get_materialized_partitions(asset_key)\n if not partitions:\n if asset_key not in self._monitored_asset_keys:\n raise DagsterInvariantViolationError(\n f"Asset key {asset_key} not monitored by sensor"\n )\n\n partitions_def = self._partitions_def_by_asset_key.get(asset_key)\n if not partitions_def:\n raise DagsterInvariantViolationError(\n f"Asset key {asset_key} is not partitioned. Cannot check if partitions have"\n " been materialized."\n )\n partitions = partitions_def.get_partition_keys(dynamic_partitions_store=self.instance)\n\n return all([partition in materialized_partitions for partition in partitions])
\n\n def _get_asset(self, asset_key: AssetKey, fn_name: str) -> AssetsDefinition:\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n repo_def = cast(RepositoryDefinition, self._repository_def)\n repository_assets = repo_def.assets_defs_by_key\n if asset_key in self._assets_by_key:\n asset_def = self._assets_by_key[asset_key]\n if asset_def is None:\n raise DagsterInvalidInvocationError(\n f"Asset key {asset_key} does not have an AssetDefinition in this repository"\n f" (likely because it is a SourceAsset). fn context.{fn_name} can only be"\n " called for assets with AssetDefinitions in the repository."\n )\n else:\n return asset_def\n elif asset_key in repository_assets:\n return repository_assets[asset_key]\n else:\n raise DagsterInvalidInvocationError(\n f"Asset key {asset_key} not monitored in sensor and does not exist in target jobs"\n )\n\n
[docs] @public\n def get_downstream_partition_keys(\n self, partition_key: str, from_asset_key: AssetKey, to_asset_key: AssetKey\n ) -> Sequence[str]:\n """Converts a partition key from one asset to the corresponding partition key in a downstream\n asset. Uses the existing partition mapping between the upstream asset and the downstream\n asset if it exists, otherwise, uses the default partition mapping.\n\n Args:\n partition_key (str): The partition key to convert.\n from_asset_key (AssetKey): The asset key of the upstream asset, which the provided\n partition key belongs to.\n to_asset_key (AssetKey): The asset key of the downstream asset. The provided partition\n key will be mapped to partitions within this asset.\n\n Returns:\n Sequence[str]: A list of the corresponding downstream partitions in to_asset_key that\n partition_key maps to.\n """\n partition_key = check.str_param(partition_key, "partition_key")\n\n to_asset = self._get_asset(to_asset_key, fn_name="get_downstream_partition_keys")\n from_asset = self._get_asset(from_asset_key, fn_name="get_downstream_partition_keys")\n\n to_partitions_def = to_asset.partitions_def\n\n if not isinstance(to_partitions_def, PartitionsDefinition):\n raise DagsterInvalidInvocationError(\n f"Asset key {to_asset_key} is not partitioned. Cannot get partition keys."\n )\n if not isinstance(from_asset.partitions_def, PartitionsDefinition):\n raise DagsterInvalidInvocationError(\n f"Asset key {from_asset_key} is not partitioned. Cannot get partition keys."\n )\n\n partition_mapping = to_asset.infer_partition_mapping(\n from_asset_key, from_asset.partitions_def\n )\n downstream_partition_key_subset = (\n partition_mapping.get_downstream_partitions_for_partitions(\n from_asset.partitions_def.empty_subset().with_partition_keys([partition_key]),\n downstream_partitions_def=to_partitions_def,\n dynamic_partitions_store=self.instance,\n )\n )\n\n return list(downstream_partition_key_subset.get_partition_keys())
\n\n
[docs] @public\n def advance_cursor(\n self, materialization_records_by_key: Mapping[AssetKey, Optional["EventLogRecord"]]\n ):\n """Marks the provided materialization records as having been consumed by the sensor.\n\n At the end of the tick, the cursor will be updated to advance past all materializations\n records provided via `advance_cursor`. In the next tick, records that have been consumed\n will no longer be returned.\n\n Passing a partitioned materialization record into this function will mark prior materializations\n with the same asset key and partition as having been consumed.\n\n Args:\n materialization_records_by_key (Mapping[AssetKey, Optional[EventLogRecord]]): Mapping of\n AssetKeys to EventLogRecord or None. If an EventLogRecord is provided, the cursor\n for the AssetKey will be updated and future calls to fetch asset materialization events\n will not fetch this event again. If None is provided, the cursor for the AssetKey\n will not be updated.\n """\n self._cursor_advance_state_mutation.add_advanced_records(materialization_records_by_key)\n self._cursor_updated = True
\n\n
[docs] @public\n def advance_all_cursors(self):\n """Updates the cursor to the most recent materialization event for all assets monitored by\n the multi_asset_sensor.\n\n Marks all materialization events as consumed by the sensor, including unconsumed events.\n """\n materializations_by_key = self.latest_materialization_records_by_key()\n\n self._cursor_advance_state_mutation.add_advanced_records(materializations_by_key)\n self._cursor_advance_state_mutation.advance_all_cursors_called = True\n self._cursor_updated = True
\n\n @public\n @property\n def assets_defs_by_key(self) -> Mapping[AssetKey, Optional[AssetsDefinition]]:\n """Mapping[AssetKey, Optional[AssetsDefinition]]: A mapping from AssetKey to the\n AssetsDefinition object which produces it. If a given asset is monitored by this sensor, but\n is not produced within the same code location as this sensor, then the value will be None.\n """\n return self._assets_by_key\n\n @public\n @property\n def asset_keys(self) -> Sequence[AssetKey]:\n """Sequence[AssetKey]: The asset keys which are monitored by this sensor."""\n return self._monitored_asset_keys
\n\n\nclass MultiAssetSensorCursorAdvances:\n _advanced_record_ids_by_key: Dict[AssetKey, Set[int]]\n _partition_key_by_record_id: Dict[int, Optional[str]]\n advance_all_cursors_called: bool\n\n def __init__(self):\n self._advanced_record_ids_by_key = defaultdict(set)\n self._partition_key_by_record_id = {}\n self.advance_all_cursors_called = False\n\n def add_advanced_records(\n self, materialization_records_by_key: Mapping[AssetKey, Optional["EventLogRecord"]]\n ):\n for asset_key, materialization in materialization_records_by_key.items():\n if materialization:\n self._advanced_record_ids_by_key[asset_key].add(materialization.storage_id)\n\n self._partition_key_by_record_id[materialization.storage_id] = (\n materialization.partition_key\n )\n\n def get_cursor_with_advances(\n self,\n context: MultiAssetSensorEvaluationContext,\n initial_cursor: MultiAssetSensorContextCursor,\n ) -> Optional[str]:\n """Given the multi asset sensor context and the cursor at the start of the tick,\n returns the cursor that should be used in the next tick.\n\n If the cursor has not been updated, returns None\n """\n if len(self._advanced_record_ids_by_key) == 0:\n # No events marked as advanced\n return None\n\n return json.dumps(\n {\n str(asset_key): self.get_asset_cursor_with_advances(\n asset_key, context, initial_cursor\n )\n for asset_key in context.asset_keys\n }\n )\n\n def get_asset_cursor_with_advances(\n self,\n asset_key: AssetKey,\n context: MultiAssetSensorEvaluationContext,\n initial_cursor: MultiAssetSensorContextCursor,\n ) -> MultiAssetSensorAssetCursorComponent:\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n advanced_records: Set[int] = self._advanced_record_ids_by_key.get(asset_key, set())\n if len(advanced_records) == 0:\n # No events marked as advanced for this asset key\n return initial_cursor.get_cursor_for_asset(asset_key)\n\n initial_asset_cursor = initial_cursor.get_cursor_for_asset(asset_key)\n\n latest_consumed_event_id_at_tick_start = initial_asset_cursor.latest_consumed_event_id\n\n greatest_consumed_event_id_in_tick = max(advanced_records)\n latest_consumed_partition_in_tick = self._partition_key_by_record_id[\n greatest_consumed_event_id_in_tick\n ]\n latest_unconsumed_record_by_partition: Dict[str, int] = {}\n\n if not self.advance_all_cursors_called:\n latest_unconsumed_record_by_partition = (\n initial_asset_cursor.trailing_unconsumed_partitioned_event_ids\n )\n unconsumed_events = list(context.get_trailing_unconsumed_events(asset_key)) + list(\n context.instance.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n after_cursor=latest_consumed_event_id_at_tick_start,\n before_cursor=greatest_consumed_event_id_in_tick,\n ),\n ascending=True,\n )\n if greatest_consumed_event_id_in_tick\n > (latest_consumed_event_id_at_tick_start or 0)\n else []\n )\n\n # Iterate through events in ascending order, storing the latest unconsumed\n # event for each partition. If an advanced event exists for a partition, clear\n # the prior unconsumed event for that partition.\n for event in unconsumed_events:\n partition = event.partition_key\n if partition is not None: # Ignore unpartitioned events\n if event.storage_id not in advanced_records:\n latest_unconsumed_record_by_partition[partition] = event.storage_id\n elif partition in latest_unconsumed_record_by_partition:\n latest_unconsumed_record_by_partition.pop(partition)\n\n if (\n latest_consumed_partition_in_tick is not None\n and latest_consumed_partition_in_tick in latest_unconsumed_record_by_partition\n ):\n latest_unconsumed_record_by_partition.pop(latest_consumed_partition_in_tick)\n\n if len(latest_unconsumed_record_by_partition.keys()) >= MAX_NUM_UNCONSUMED_EVENTS:\n raise DagsterInvariantViolationError(f"""\n You have reached the maximum number of trailing unconsumed events\n ({MAX_NUM_UNCONSUMED_EVENTS}) for asset {asset_key} and no more events can be\n added. You can access the unconsumed events by calling the\n `get_trailing_unconsumed_events` method on the sensor context, and\n mark events as consumed by passing them to `advance_cursor`.\n\n Otherwise, you can clear all unconsumed events and reset the cursor to the latest\n materialization for each asset by calling `advance_all_cursors`.\n """)\n\n return MultiAssetSensorAssetCursorComponent(\n latest_consumed_event_partition=(\n latest_consumed_partition_in_tick\n if greatest_consumed_event_id_in_tick\n > (latest_consumed_event_id_at_tick_start or 0)\n else initial_asset_cursor.latest_consumed_event_partition\n ),\n latest_consumed_event_id=(\n greatest_consumed_event_id_in_tick\n if greatest_consumed_event_id_in_tick\n > (latest_consumed_event_id_at_tick_start or 0)\n else latest_consumed_event_id_at_tick_start\n ),\n trailing_unconsumed_partitioned_event_ids=latest_unconsumed_record_by_partition,\n )\n\n\ndef get_cursor_from_latest_materializations(\n asset_keys: Sequence[AssetKey], instance: DagsterInstance\n) -> str:\n from dagster._core.events import DagsterEventType\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n cursor_dict: Dict[str, MultiAssetSensorAssetCursorComponent] = {}\n\n for asset_key in asset_keys:\n materializations = instance.get_event_records(\n EventRecordsFilter(\n DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=asset_key,\n ),\n limit=1,\n )\n if materializations:\n last_materialization = list(materializations)[-1]\n\n cursor_dict[str(asset_key)] = MultiAssetSensorAssetCursorComponent(\n last_materialization.partition_key,\n last_materialization.storage_id,\n {},\n )\n\n cursor_str = json.dumps(cursor_dict)\n return cursor_str\n\n\n
[docs]@experimental\ndef build_multi_asset_sensor_context(\n *,\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n repository_def: Optional["RepositoryDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n cursor: Optional[str] = None,\n repository_name: Optional[str] = None,\n cursor_from_latest_materializations: bool = False,\n resources: Optional[Mapping[str, object]] = None,\n definitions: Optional["Definitions"] = None,\n) -> MultiAssetSensorEvaluationContext:\n """Builds multi asset sensor execution context for testing purposes using the provided parameters.\n\n This function can be used to provide a context to the invocation of a multi asset sensor definition. If\n provided, the dagster instance must be persistent; DagsterInstance.ephemeral() will result in an\n error.\n\n Args:\n monitored_assets (Union[Sequence[AssetKey], AssetSelection]): The assets monitored\n by the sensor. If an AssetSelection object is provided, it will only apply to assets\n within the Definitions that this sensor is part of.\n repository_def (RepositoryDefinition): `RepositoryDefinition` object that\n the sensor is defined in. Must provide `definitions` if this is not provided.\n instance (Optional[DagsterInstance]): The dagster instance configured to run the sensor.\n cursor (Optional[str]): A string cursor to provide to the evaluation of the sensor. Must be\n a dictionary of asset key strings to ints that has been converted to a json string\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n cursor_from_latest_materializations (bool): If True, the cursor will be set to the latest\n materialization for each monitored asset. By default, set to False.\n resources (Optional[Mapping[str, object]]): The resource definitions\n to provide to the sensor.\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n Must provide `repository_def` if this is not provided.\n\n Examples:\n .. code-block:: python\n\n with instance_for_test() as instance:\n context = build_multi_asset_sensor_context(\n monitored_assets=[AssetKey("asset_1"), AssetKey("asset_2")],\n instance=instance,\n )\n my_asset_sensor(context)\n\n """\n from dagster._core.definitions import RepositoryDefinition\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n check.opt_inst_param(instance, "instance", DagsterInstance)\n check.opt_str_param(cursor, "cursor")\n check.opt_str_param(repository_name, "repository_name")\n repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n )\n\n check.bool_param(cursor_from_latest_materializations, "cursor_from_latest_materializations")\n\n if cursor_from_latest_materializations:\n if cursor:\n raise DagsterInvalidInvocationError(\n "Cannot provide both cursor and cursor_from_latest_materializations objects."\n " Dagster will override the provided cursor based on the"\n " cursor_from_latest_materializations object."\n )\n if not instance:\n raise DagsterInvalidInvocationError(\n "Cannot provide cursor_from_latest_materializations object without a Dagster"\n " instance."\n )\n\n asset_keys: Sequence[AssetKey]\n if isinstance(monitored_assets, AssetSelection):\n asset_keys = cast(\n List[AssetKey],\n list(\n monitored_assets.resolve(list(set(repository_def.assets_defs_by_key.values())))\n ),\n )\n else:\n asset_keys = monitored_assets\n\n cursor = get_cursor_from_latest_materializations(asset_keys, instance)\n\n return MultiAssetSensorEvaluationContext(\n instance_ref=None,\n last_completion_time=None,\n last_run_key=None,\n cursor=cursor,\n repository_name=repository_name,\n instance=instance,\n monitored_assets=monitored_assets,\n repository_def=repository_def,\n resource_defs=wrap_resources_for_execution(resources),\n )
\n\n\nAssetMaterializationFunctionReturn = Union[\n Iterator[Union[RunRequest, SkipReason, SensorResult]],\n Sequence[RunRequest],\n RunRequest,\n SkipReason,\n None,\n SensorResult,\n]\nAssetMaterializationFunction = Callable[\n ...,\n AssetMaterializationFunctionReturn,\n]\n\nMultiAssetMaterializationFunction = Callable[\n ...,\n AssetMaterializationFunctionReturn,\n]\n\n\n
[docs]@experimental\nclass MultiAssetSensorDefinition(SensorDefinition):\n """Define an asset sensor that initiates a set of runs based on the materialization of a list of\n assets.\n\n Users should not instantiate this object directly. To construct a\n `MultiAssetSensorDefinition`, use :py:func:`dagster.\n multi_asset_sensor`.\n\n Args:\n name (str): The name of the sensor to create.\n asset_keys (Sequence[AssetKey]): The asset_keys this sensor monitors.\n asset_materialization_fn (Callable[[MultiAssetSensorEvaluationContext], Union[Iterator[Union[RunRequest, SkipReason]], RunRequest, SkipReason]]): The core\n evaluation function for the sensor, which is run at an interval to determine whether a\n run should be launched or not. Takes a :py:class:`~dagster.MultiAssetSensorEvaluationContext`.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job\n object to target with this sensor.\n jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]):\n (experimental) A list of jobs to be executed when the sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_assets (Optional[AssetSelection]): (Experimental) an asset selection to launch a run\n for if the sensor condition is met. This can be provided instead of specifying a job.\n """\n\n def __init__(\n self,\n name: str,\n monitored_assets: Union[Sequence[AssetKey], AssetSelection],\n job_name: Optional[str],\n asset_materialization_fn: MultiAssetMaterializationFunction,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_assets: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n resource_arg_names: Set[str] = {\n arg.name for arg in get_resource_args(asset_materialization_fn)\n }\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n def _wrap_asset_fn(materialization_fn):\n def _fn(context):\n def _check_cursor_not_set(sensor_result: SensorResult):\n if sensor_result.cursor:\n raise DagsterInvariantViolationError(\n "Cannot set cursor in a multi_asset_sensor. Cursor is set automatically"\n " based on the latest materialization for each monitored asset."\n )\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n\n with MultiAssetSensorEvaluationContext(\n instance_ref=context.instance_ref,\n last_completion_time=context.last_completion_time,\n last_run_key=context.last_run_key,\n cursor=context.cursor,\n repository_name=context.repository_def.name,\n repository_def=context.repository_def,\n monitored_assets=monitored_assets,\n instance=context.instance,\n resource_defs=context.resource_defs,\n ) as multi_asset_sensor_context:\n context_param_name = get_context_param_name(materialization_fn)\n context_param = (\n {context_param_name: multi_asset_sensor_context}\n if context_param_name\n else {}\n )\n result = materialization_fn(\n **context_param,\n **resource_args_populated,\n )\n if result is None:\n return\n\n # because the materialization_fn can yield results (see _wrapped_fn in multi_asset_sensor decorator),\n # even if you return None in a sensor, it will still cause in inspect.isgenerator(result) to be True.\n # So keep track to see if we actually return any values and should update the cursor\n runs_yielded = False\n if inspect.isgenerator(result) or isinstance(result, list):\n for item in result:\n if isinstance(item, RunRequest):\n runs_yielded = True\n if isinstance(item, SensorResult):\n raise DagsterInvariantViolationError(\n "Cannot yield a SensorResult from a multi_asset_sensor. Instead"\n " return the SensorResult."\n )\n yield item\n elif isinstance(result, RunRequest):\n runs_yielded = True\n yield result\n elif isinstance(result, SkipReason):\n # if result is a SkipReason, we don't update the cursor, so don't set runs_yielded = True\n yield result\n elif isinstance(result, SensorResult):\n _check_cursor_not_set(result)\n if result.run_requests:\n runs_yielded = True\n yield result\n\n if runs_yielded and not multi_asset_sensor_context.cursor_updated:\n raise DagsterInvalidDefinitionError(\n "Asset materializations have been handled in this sensor, but the cursor"\n " was not updated. This means the same materialization events will be"\n " handled in the next sensor tick. Use context.advance_cursor or"\n " context.advance_all_cursors to update the cursor."\n )\n\n multi_asset_sensor_context.update_cursor_after_evaluation()\n context.update_cursor(multi_asset_sensor_context.cursor)\n\n return _fn\n\n self._raw_asset_materialization_fn = asset_materialization_fn\n\n super(MultiAssetSensorDefinition, self).__init__(\n name=check_valid_name(name),\n job_name=job_name,\n evaluation_fn=_wrap_asset_fn(\n check.callable_param(asset_materialization_fn, "asset_materialization_fn")\n ),\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n asset_selection=request_assets,\n required_resource_keys=combined_required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> AssetMaterializationFunctionReturn:\n context_param_name = get_context_param_name(self._raw_asset_materialization_fn)\n context = get_sensor_context_from_args_or_kwargs(\n self._raw_asset_materialization_fn,\n args,\n kwargs,\n context_type=MultiAssetSensorEvaluationContext,\n )\n\n resources = validate_and_get_resource_dict(\n context.resources if context else ScopedResourcesBuilder.build_empty(),\n self._name,\n self._required_resource_keys,\n )\n\n context_param = {context_param_name: context} if context_param_name and context else {}\n result = self._raw_asset_materialization_fn(**context_param, **resources)\n\n if context:\n context.update_cursor_after_evaluation()\n return result\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.MULTI_ASSET
\n
", "current_page_name": "_modules/dagster/_core/definitions/multi_asset_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.multi_asset_sensor_definition"}, "multi_dimensional_partitions": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.multi_dimensional_partitions

\nimport hashlib\nimport itertools\nfrom datetime import datetime\nfrom functools import lru_cache, reduce\nfrom typing import (\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterUnknownPartitionError,\n)\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._core.storage.tags import (\n    MULTIDIMENSIONAL_PARTITION_PREFIX,\n    get_multidimensional_partition_tag,\n)\n\nfrom .partition import (\n    DefaultPartitionsSubset,\n    DynamicPartitionsDefinition,\n    PartitionsDefinition,\n    PartitionsSubset,\n    StaticPartitionsDefinition,\n)\nfrom .time_window_partitions import TimeWindow, TimeWindowPartitionsDefinition\n\nINVALID_STATIC_PARTITIONS_KEY_CHARACTERS = set(["|", ",", "[", "]"])\n\nMULTIPARTITION_KEY_DELIMITER = "|"\n\n\nclass PartitionDimensionKey(\n    NamedTuple("_PartitionDimensionKey", [("dimension_name", str), ("partition_key", str)])\n):\n    """Representation of a single dimension of a multi-dimensional partition key."""\n\n    def __new__(cls, dimension_name: str, partition_key: str):\n        return super(PartitionDimensionKey, cls).__new__(\n            cls,\n            dimension_name=check.str_param(dimension_name, "dimension_name"),\n            partition_key=check.str_param(partition_key, "partition_key"),\n        )\n\n\n
[docs]class MultiPartitionKey(str):\n """A multi-dimensional partition key stores the partition key for each dimension.\n Subclasses the string class to keep partition key type as a string.\n\n Contains additional methods to access the partition key for each dimension.\n Creates a string representation of the partition key for each dimension, separated by a pipe (|).\n Orders the dimensions by name, to ensure consistent string representation.\n """\n\n dimension_keys: List[PartitionDimensionKey] = []\n\n def __new__(cls, keys_by_dimension: Mapping[str, str]):\n check.mapping_param(\n keys_by_dimension, "partitions_by_dimension", key_type=str, value_type=str\n )\n\n dimension_keys: List[PartitionDimensionKey] = [\n PartitionDimensionKey(dimension, keys_by_dimension[dimension])\n for dimension in sorted(list(keys_by_dimension.keys()))\n ]\n\n str_key = super(MultiPartitionKey, cls).__new__(\n cls,\n MULTIPARTITION_KEY_DELIMITER.join(\n [dim_key.partition_key for dim_key in dimension_keys]\n ),\n )\n\n str_key.dimension_keys = dimension_keys\n\n return str_key\n\n def __getnewargs__(self):\n # When this instance is pickled, replace the argument to __new__ with the\n # dimension key mapping instead of the string representation.\n return ({dim_key.dimension_name: dim_key.partition_key for dim_key in self.dimension_keys},)\n\n @property\n def keys_by_dimension(self) -> Mapping[str, str]:\n return {dim_key.dimension_name: dim_key.partition_key for dim_key in self.dimension_keys}
\n\n\nclass PartitionDimensionDefinition(\n NamedTuple(\n "_PartitionDimensionDefinition",\n [\n ("name", str),\n ("partitions_def", PartitionsDefinition),\n ],\n )\n):\n def __new__(\n cls,\n name: str,\n partitions_def: PartitionsDefinition,\n ):\n return super().__new__(\n cls,\n name=check.str_param(name, "name"),\n partitions_def=check.inst_param(partitions_def, "partitions_def", PartitionsDefinition),\n )\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, PartitionDimensionDefinition)\n and self.name == other.name\n and self.partitions_def == other.partitions_def\n )\n\n\nALLOWED_PARTITION_DIMENSION_TYPES = (\n StaticPartitionsDefinition,\n TimeWindowPartitionsDefinition,\n DynamicPartitionsDefinition,\n)\n\n\ndef _check_valid_partitions_dimensions(\n partitions_dimensions: Mapping[str, PartitionsDefinition]\n) -> None:\n for dim_name, partitions_def in partitions_dimensions.items():\n if not any(isinstance(partitions_def, t) for t in ALLOWED_PARTITION_DIMENSION_TYPES):\n raise DagsterInvalidDefinitionError(\n f"Invalid partitions definition type {type(partitions_def)}. "\n "Only the following partitions definition types are supported: "\n f"{ALLOWED_PARTITION_DIMENSION_TYPES}."\n )\n if isinstance(partitions_def, DynamicPartitionsDefinition) and partitions_def.name is None:\n raise DagsterInvalidDefinitionError(\n "DynamicPartitionsDefinition must have a name to be used in a"\n " MultiPartitionsDefinition."\n )\n\n if isinstance(partitions_def, StaticPartitionsDefinition):\n if any(\n [\n INVALID_STATIC_PARTITIONS_KEY_CHARACTERS & set(key)\n for key in partitions_def.get_partition_keys()\n ]\n ):\n raise DagsterInvalidDefinitionError(\n f"Invalid character in partition key for dimension {dim_name}. "\n "A multi-partitions definition cannot contain partition keys with "\n "the following characters: |, [, ], ,"\n )\n\n\n
[docs]class MultiPartitionsDefinition(PartitionsDefinition[MultiPartitionKey]):\n """Takes the cross-product of partitions from two partitions definitions.\n\n For example, with a static partitions definition where the partitions are ["a", "b", "c"]\n and a daily partitions definition, this partitions definition will have the following\n partitions:\n\n 2020-01-01|a\n 2020-01-01|b\n 2020-01-01|c\n 2020-01-02|a\n 2020-01-02|b\n ...\n\n Args:\n partitions_defs (Mapping[str, PartitionsDefinition]):\n A mapping of dimension name to partitions definition. The total set of partitions will\n be the cross-product of the partitions from each PartitionsDefinition.\n\n Attributes:\n partitions_defs (Sequence[PartitionDimensionDefinition]):\n A sequence of PartitionDimensionDefinition objects, each of which contains a dimension\n name and a PartitionsDefinition. The total set of partitions will be the cross-product\n of the partitions from each PartitionsDefinition. This sequence is ordered by\n dimension name, to ensure consistent ordering of the partitions.\n """\n\n def __init__(self, partitions_defs: Mapping[str, PartitionsDefinition]):\n if not len(partitions_defs.keys()) == 2:\n raise DagsterInvalidInvocationError(\n "Dagster currently only supports multi-partitions definitions with 2 partitions"\n " definitions. Your multi-partitions definition has"\n f" {len(partitions_defs.keys())} partitions definitions."\n )\n check.mapping_param(\n partitions_defs, "partitions_defs", key_type=str, value_type=PartitionsDefinition\n )\n\n _check_valid_partitions_dimensions(partitions_defs)\n\n self._partitions_defs: List[PartitionDimensionDefinition] = sorted(\n [\n PartitionDimensionDefinition(name, partitions_def)\n for name, partitions_def in partitions_defs.items()\n ],\n key=lambda x: x.name,\n )\n\n @property\n def partitions_subset_class(self) -> Type["PartitionsSubset"]:\n return MultiPartitionsSubset\n\n def get_serializable_unique_identifier(\n self, dynamic_partitions_store: Optional[DynamicPartitionsStore] = None\n ) -> str:\n return hashlib.sha1(\n str(\n {\n dim_def.name: dim_def.partitions_def.get_serializable_unique_identifier(\n dynamic_partitions_store\n )\n for dim_def in self.partitions_defs\n }\n ).encode("utf-8")\n ).hexdigest()\n\n @property\n def partition_dimension_names(self) -> List[str]:\n return [dim_def.name for dim_def in self._partitions_defs]\n\n @property\n def partitions_defs(self) -> Sequence[PartitionDimensionDefinition]:\n return self._partitions_defs\n\n def get_partitions_def_for_dimension(self, dimension_name: str) -> PartitionsDefinition:\n for dim_def in self._partitions_defs:\n if dim_def.name == dimension_name:\n return dim_def.partitions_def\n check.failed(f"Invalid dimension name {dimension_name}")\n\n # We override the default implementation of `has_partition_key` for performance.\n def has_partition_key(\n self,\n partition_key: Union[MultiPartitionKey, str],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n partition_key = (\n partition_key\n if isinstance(partition_key, MultiPartitionKey)\n else self.get_partition_key_from_str(partition_key)\n )\n if partition_key.keys_by_dimension.keys() != set(self.partition_dimension_names):\n raise DagsterUnknownPartitionError(\n f"Invalid partition key {partition_key}. The dimensions of the partition key are"\n " not the dimensions of the partitions definition."\n )\n\n for dimension in self.partitions_defs:\n if not dimension.partitions_def.has_partition_key(\n partition_key.keys_by_dimension[dimension.name],\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n ):\n return False\n return True\n\n # store results for repeated calls with the same current_time\n @lru_cache(maxsize=1)\n def _get_partition_keys(\n self, current_time: datetime, dynamic_partitions_store: Optional[DynamicPartitionsStore]\n ) -> Sequence[MultiPartitionKey]:\n partition_key_sequences = [\n partition_dim.partitions_def.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n for partition_dim in self._partitions_defs\n ]\n\n return [\n MultiPartitionKey(\n {self._partitions_defs[i].name: key for i, key in enumerate(partition_key_tuple)}\n )\n for partition_key_tuple in itertools.product(*partition_key_sequences)\n ]\n\n
[docs] @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[MultiPartitionKey]:\n """Returns a list of MultiPartitionKeys representing the partition keys of the\n PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partition dimensions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when a\n dimension is a DynamicPartitionsDefinition with a name defined. Users can pass the\n DagsterInstance fetched via `context.instance` to this argument.\n\n Returns:\n Sequence[MultiPartitionKey]\n """\n return self._get_partition_keys(\n current_time or pendulum.now("UTC"), dynamic_partitions_store\n )
\n\n def filter_valid_partition_keys(\n self, partition_keys: Set[str], dynamic_partitions_store: DynamicPartitionsStore\n ) -> Set[MultiPartitionKey]:\n partition_keys_by_dimension = {\n dim.name: dim.partitions_def.get_partition_keys(\n dynamic_partitions_store=dynamic_partitions_store\n )\n for dim in self.partitions_defs\n }\n validated_partitions = set()\n for partition_key in partition_keys:\n partition_key_strs = partition_key.split(MULTIPARTITION_KEY_DELIMITER)\n if len(partition_key_strs) != len(self.partitions_defs):\n continue\n\n multipartition_key = MultiPartitionKey(\n {dim.name: partition_key_strs[i] for i, dim in enumerate(self._partitions_defs)}\n )\n\n if all(\n key in partition_keys_by_dimension.get(dim, [])\n for dim, key in multipartition_key.keys_by_dimension.items()\n ):\n validated_partitions.add(partition_key)\n\n return validated_partitions\n\n def __eq__(self, other):\n return (\n isinstance(other, MultiPartitionsDefinition)\n and self.partitions_defs == other.partitions_defs\n )\n\n def __hash__(self):\n return hash(\n tuple(\n [\n (partitions_def.name, partitions_def.__repr__())\n for partitions_def in self.partitions_defs\n ]\n )\n )\n\n def __str__(self) -> str:\n dimension_1 = self._partitions_defs[0]\n dimension_2 = self._partitions_defs[1]\n partition_str = (\n "Multi-partitioned, with dimensions: \\n"\n f"{dimension_1.name.capitalize()}: {dimension_1.partitions_def} \\n"\n f"{dimension_2.name.capitalize()}: {dimension_2.partitions_def}"\n )\n return partition_str\n\n def __repr__(self) -> str:\n return f"{type(self).__name__}(dimensions={[str(dim) for dim in self.partitions_defs]}"\n\n def get_partition_key_from_str(self, partition_key_str: str) -> MultiPartitionKey:\n """Given a string representation of a partition key, returns a MultiPartitionKey object."""\n check.str_param(partition_key_str, "partition_key_str")\n\n partition_key_strs = partition_key_str.split(MULTIPARTITION_KEY_DELIMITER)\n check.invariant(\n len(partition_key_strs) == len(self.partitions_defs),\n f"Expected {len(self.partitions_defs)} partition keys in partition key string"\n f" {partition_key_str}, but got {len(partition_key_strs)}",\n )\n\n return MultiPartitionKey(\n {dim.name: partition_key_strs[i] for i, dim in enumerate(self._partitions_defs)}\n )\n\n def _get_primary_and_secondary_dimension(\n self,\n ) -> Tuple[PartitionDimensionDefinition, PartitionDimensionDefinition]:\n # Multipartitions subsets are serialized by primary dimension. If changing\n # the selection of primary/secondary dimension, will need to also update the\n # serialization of MultiPartitionsSubsets\n\n time_dimensions = [\n dim\n for dim in self.partitions_defs\n if isinstance(dim.partitions_def, TimeWindowPartitionsDefinition)\n ]\n if len(time_dimensions) == 1:\n primary_dimension, secondary_dimension = time_dimensions[0], next(\n iter([dim for dim in self.partitions_defs if dim != time_dimensions[0]])\n )\n else:\n primary_dimension, secondary_dimension = (\n self.partitions_defs[0],\n self.partitions_defs[1],\n )\n\n return primary_dimension, secondary_dimension\n\n @property\n def primary_dimension(self) -> PartitionDimensionDefinition:\n return self._get_primary_and_secondary_dimension()[0]\n\n @property\n def secondary_dimension(self) -> PartitionDimensionDefinition:\n return self._get_primary_and_secondary_dimension()[1]\n\n def get_tags_for_partition_key(self, partition_key: str) -> Mapping[str, str]:\n partition_key = cast(MultiPartitionKey, self.get_partition_key_from_str(partition_key))\n tags = {**super().get_tags_for_partition_key(partition_key)}\n tags.update(get_tags_from_multi_partition_key(partition_key))\n return tags\n\n @property\n def time_window_dimension(self) -> PartitionDimensionDefinition:\n time_window_dims = [\n dim\n for dim in self.partitions_defs\n if isinstance(dim.partitions_def, TimeWindowPartitionsDefinition)\n ]\n check.invariant(\n len(time_window_dims) == 1, "Expected exactly one time window partitioned dimension"\n )\n return next(iter(time_window_dims))\n\n def time_window_for_partition_key(self, partition_key: str) -> TimeWindow:\n if not isinstance(partition_key, MultiPartitionKey):\n partition_key = self.get_partition_key_from_str(partition_key)\n\n time_window_dimension = self.time_window_dimension\n return cast(\n TimeWindowPartitionsDefinition, time_window_dimension.partitions_def\n ).time_window_for_partition_key(\n cast(MultiPartitionKey, partition_key).keys_by_dimension[time_window_dimension.name]\n )\n\n def get_multipartition_keys_with_dimension_value(\n self,\n dimension_name: str,\n dimension_partition_key: str,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n current_time: Optional[datetime] = None,\n ) -> Sequence[MultiPartitionKey]:\n check.str_param(dimension_name, "dimension_name")\n check.str_param(dimension_partition_key, "dimension_partition_key")\n\n matching_dimensions = [\n dimension for dimension in self.partitions_defs if dimension.name == dimension_name\n ]\n other_dimensions = [\n dimension for dimension in self.partitions_defs if dimension.name != dimension_name\n ]\n\n check.invariant(\n len(matching_dimensions) == 1,\n f"Dimension {dimension_name} not found in MultiPartitionsDefinition with dimensions"\n f" {[dim.name for dim in self.partitions_defs]}",\n )\n\n partition_sequences = [\n partition_dim.partitions_def.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n for partition_dim in other_dimensions\n ] + [[dimension_partition_key]]\n\n # Names of partitions dimensions in the same order as partition_sequences\n partition_dim_names = [dim.name for dim in other_dimensions] + [dimension_name]\n\n return [\n MultiPartitionKey(\n {\n partition_dim_names[i]: partition_key\n for i, partition_key in enumerate(partitions_tuple)\n }\n )\n for partitions_tuple in itertools.product(*partition_sequences)\n ]\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n # Static partitions definitions can contain duplicate keys (will throw error in 1.3.0)\n # In the meantime, relying on get_num_partitions to handle duplicates to display\n # correct counts in the Dagster UI.\n dimension_counts = [\n dim.partitions_def.get_num_partitions(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n for dim in self.partitions_defs\n ]\n return reduce(lambda x, y: x * y, dimension_counts, 1)
\n\n\nclass MultiPartitionsSubset(DefaultPartitionsSubset):\n def __init__(\n self,\n partitions_def: MultiPartitionsDefinition,\n subset: Optional[Set[str]] = None,\n ):\n check.inst_param(partitions_def, "partitions_def", MultiPartitionsDefinition)\n subset = (\n set(\n [\n partitions_def.get_partition_key_from_str(key)\n for key in subset\n if MULTIPARTITION_KEY_DELIMITER in key\n ]\n )\n if subset\n else set()\n )\n super(MultiPartitionsSubset, self).__init__(partitions_def, subset)\n\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "MultiPartitionsSubset":\n return MultiPartitionsSubset(\n cast(MultiPartitionsDefinition, self._partitions_def),\n self._subset | set(partition_keys),\n )\n\n\ndef get_tags_from_multi_partition_key(multi_partition_key: MultiPartitionKey) -> Mapping[str, str]:\n check.inst_param(multi_partition_key, "multi_partition_key", MultiPartitionKey)\n\n return {\n get_multidimensional_partition_tag(dimension.dimension_name): dimension.partition_key\n for dimension in multi_partition_key.dimension_keys\n }\n\n\ndef get_multipartition_key_from_tags(tags: Mapping[str, str]) -> str:\n partitions_by_dimension: Dict[str, str] = {}\n for tag in tags:\n if tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX):\n dimension = tag[len(MULTIDIMENSIONAL_PARTITION_PREFIX) :]\n partitions_by_dimension[dimension] = tags[tag]\n\n return MultiPartitionKey(partitions_by_dimension)\n
", "current_page_name": "_modules/dagster/_core/definitions/multi_dimensional_partitions", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.multi_dimensional_partitions"}, "op_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.op_definition

\nimport inspect\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias, get_args, get_origin\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, deprecated_param, public\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.definitions.dependency import NodeHandle, NodeInputHandle\nfrom dagster._core.definitions.node_definition import NodeDefinition\nfrom dagster._core.definitions.op_invocation import direct_invocation_result\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.resource_requirement import (\n    InputManagerRequirement,\n    OpDefinitionResourceRequirement,\n    OutputManagerRequirement,\n    ResourceRequirement,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.types.dagster_type import DagsterType, DagsterTypeKind\nfrom dagster._utils import IHasInternalInit\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom .definition_config_schema import (\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom .hook_definition import HookDefinition\nfrom .inference import infer_output_props\nfrom .input import In, InputDefinition\nfrom .output import Out, OutputDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.asset_layer import AssetLayer\n\n    from .composition import PendingNodeInvocation\n    from .decorators.op_decorator import DecoratedOpFunction\n\nOpComputeFunction: TypeAlias = Callable[..., Any]\n\n\n
[docs]@deprecated_param(\n param="version", breaking_version="2.0", additional_warn_text="Use `code_version` instead."\n)\nclass OpDefinition(NodeDefinition, IHasInternalInit):\n """Defines an op, the functional unit of user-defined computation.\n\n For more details on what a op is, refer to the\n `Ops Overview <../../concepts/ops-jobs-graphs/ops>`_ .\n\n End users should prefer the :func:`@op <op>` decorator. OpDefinition is generally intended to be\n used by framework authors or for programatically generated ops.\n\n Args:\n name (str): Name of the op. Must be unique within any :py:class:`GraphDefinition` or\n :py:class:`JobDefinition` that contains the op.\n input_defs (List[InputDefinition]): Inputs of the op.\n compute_fn (Callable): The core of the op, the function that performs the actual\n computation. The signature of this function is determined by ``input_defs``, and\n optionally, an injected first argument, ``context``, a collection of information\n provided by the system.\n\n This function will be coerced into a generator or an async generator, which must yield\n one :py:class:`Output` for each of the op's ``output_defs``, and additionally may\n yield other types of Dagster events, including :py:class:`AssetMaterialization` and\n :py:class:`ExpectationResult`.\n output_defs (List[OutputDefinition]): Outputs of the op.\n config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check\n that the config provided for the op matches this schema and will fail if it does not. If\n not set, Dagster will accept any config provided for the op.\n description (Optional[str]): Human-readable description of the op.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may\n expect and require certain metadata to be attached to a op. Users should generally\n not set metadata directly. Values that are not strings will be json encoded and must meet\n the criteria that `json.loads(json.dumps(value)) == value`.\n required_resource_keys (Optional[Set[str]]): Set of resources handles required by this op.\n code_version (Optional[str]): (Experimental) Version of the code encapsulated by the op. If set,\n this is used as a default code version for all outputs.\n retry_policy (Optional[RetryPolicy]): The retry policy for this op.\n\n\n Examples:\n .. code-block:: python\n\n def _add_one(_context, inputs):\n yield Output(inputs["num"] + 1)\n\n OpDefinition(\n name="add_one",\n ins={"num": In(int)},\n outs={"result": Out(int)},\n compute_fn=_add_one,\n )\n """\n\n _compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"]\n _config_schema: IDefinitionConfigSchema\n _required_resource_keys: AbstractSet[str]\n _version: Optional[str]\n _retry_policy: Optional[RetryPolicy]\n\n def __init__(\n self,\n compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],\n name: str,\n ins: Optional[Mapping[str, In]] = None,\n outs: Optional[Mapping[str, Out]] = None,\n description: Optional[str] = None,\n config_schema: Optional[Union[UserConfigSchema, IDefinitionConfigSchema]] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n version: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n code_version: Optional[str] = None,\n ):\n from .decorators.op_decorator import DecoratedOpFunction, resolve_checked_op_fn_inputs\n\n ins = check.opt_mapping_param(ins, "ins")\n input_defs = [\n inp.to_definition(name) for name, inp in sorted(ins.items(), key=lambda inp: inp[0])\n ] # sort so that input definition order is deterministic\n\n if isinstance(compute_fn, DecoratedOpFunction):\n resolved_input_defs: Sequence[InputDefinition] = resolve_checked_op_fn_inputs(\n decorator_name="@op",\n fn_name=name,\n compute_fn=cast(DecoratedOpFunction, compute_fn),\n explicit_input_defs=input_defs,\n exclude_nothing=True,\n )\n self._compute_fn = compute_fn\n _validate_context_type_hint(self._compute_fn.decorated_fn)\n else:\n resolved_input_defs = input_defs\n self._compute_fn = check.callable_param(compute_fn, "compute_fn")\n _validate_context_type_hint(self._compute_fn)\n\n code_version = normalize_renamed_param(\n code_version,\n "code_version",\n version,\n "version",\n )\n self._version = code_version\n\n check.opt_mapping_param(outs, "outs")\n output_defs = _resolve_output_defs_from_outs(\n compute_fn=compute_fn, outs=outs, default_code_version=code_version\n )\n\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._required_resource_keys = frozenset(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n )\n self._retry_policy = check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy)\n\n positional_inputs = (\n self._compute_fn.positional_inputs()\n if isinstance(self._compute_fn, DecoratedOpFunction)\n else None\n )\n\n super(OpDefinition, self).__init__(\n name=name,\n input_defs=check.sequence_param(resolved_input_defs, "input_defs", InputDefinition),\n output_defs=check.sequence_param(output_defs, "output_defs", OutputDefinition),\n description=description,\n tags=check.opt_mapping_param(tags, "tags", key_type=str),\n positional_inputs=positional_inputs,\n )\n\n def dagster_internal_init(\n *,\n compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],\n name: str,\n ins: Optional[Mapping[str, In]],\n outs: Optional[Mapping[str, Out]],\n description: Optional[str],\n config_schema: Optional[Union[UserConfigSchema, IDefinitionConfigSchema]],\n required_resource_keys: Optional[AbstractSet[str]],\n tags: Optional[Mapping[str, Any]],\n version: Optional[str],\n retry_policy: Optional[RetryPolicy],\n code_version: Optional[str],\n ) -> "OpDefinition":\n return OpDefinition(\n compute_fn=compute_fn,\n name=name,\n ins=ins,\n outs=outs,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n tags=tags,\n version=version,\n retry_policy=retry_policy,\n code_version=code_version,\n )\n\n @property\n def node_type_str(self) -> str:\n return "op"\n\n @property\n def is_graph_job_op_node(self) -> bool:\n return True\n\n @public\n @property\n def name(self) -> str:\n """str: The name of this op."""\n return super(OpDefinition, self).name\n\n @public\n @property\n def ins(self) -> Mapping[str, In]:\n """Mapping[str, In]: A mapping from input name to the In object that represents that input."""\n return {input_def.name: In.from_definition(input_def) for input_def in self.input_defs}\n\n @public\n @property\n def outs(self) -> Mapping[str, Out]:\n """Mapping[str, Out]: A mapping from output name to the Out object that represents that output."""\n return {output_def.name: Out.from_definition(output_def) for output_def in self.output_defs}\n\n @property\n def compute_fn(self) -> Union[Callable[..., Any], "DecoratedOpFunction"]:\n return self._compute_fn\n\n @public\n @property\n def config_schema(self) -> IDefinitionConfigSchema:\n """IDefinitionConfigSchema: The config schema for this op."""\n return self._config_schema\n\n @public\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n """AbstractSet[str]: A set of keys for resources that must be provided to this OpDefinition."""\n return frozenset(self._required_resource_keys)\n\n @public\n @deprecated(breaking_version="2.0", additional_warn_text="Use `code_version` instead.")\n @property\n def version(self) -> Optional[str]:\n """str: Version of the code encapsulated by the op. If set, this is used as a\n default code version for all outputs.\n """\n return self._version\n\n @public\n @property\n def retry_policy(self) -> Optional[RetryPolicy]:\n """Optional[RetryPolicy]: The RetryPolicy for this op."""\n return self._retry_policy\n\n @public\n @property\n def tags(self) -> Mapping[str, str]:\n """Mapping[str, str]: The tags for this op."""\n return super(OpDefinition, self).tags\n\n
[docs] @public\n def alias(self, name: str) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given name."""\n return super(OpDefinition, self).alias(name)
\n\n
[docs] @public\n def tag(self, tags: Optional[Mapping[str, str]]) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given tags."""\n return super(OpDefinition, self).tag(tags)
\n\n
[docs] @public\n def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given hook definitions."""\n return super(OpDefinition, self).with_hooks(hook_defs)
\n\n
[docs] @public\n def with_retry_policy(self, retry_policy: RetryPolicy) -> "PendingNodeInvocation":\n """Creates a copy of this op with the given retry policy."""\n return super(OpDefinition, self).with_retry_policy(retry_policy)
\n\n def is_from_decorator(self) -> bool:\n from .decorators.op_decorator import DecoratedOpFunction\n\n return isinstance(self._compute_fn, DecoratedOpFunction)\n\n def get_output_annotation(self) -> Any:\n if not self.is_from_decorator():\n raise DagsterInvalidInvocationError(\n f"Attempted to get output annotation for {self.node_type_str} '{self.name}', "\n "which was not constructed from a decorated function."\n )\n return cast("DecoratedOpFunction", self.compute_fn).get_output_annotation()\n\n def all_dagster_types(self) -> Iterator[DagsterType]:\n yield from self.all_input_output_types()\n\n def iterate_node_defs(self) -> Iterator[NodeDefinition]:\n yield self\n\n def iterate_op_defs(self) -> Iterator["OpDefinition"]:\n yield self\n\n T_Handle = TypeVar("T_Handle", bound=Optional[NodeHandle])\n\n def resolve_output_to_origin(\n self, output_name: str, handle: T_Handle\n ) -> Tuple[OutputDefinition, T_Handle]:\n return self.output_def_named(output_name), handle\n\n def resolve_output_to_origin_op_def(self, output_name: str) -> "OpDefinition":\n return self\n\n def get_inputs_must_be_resolved_top_level(\n self, asset_layer: "AssetLayer", handle: Optional[NodeHandle] = None\n ) -> Sequence[InputDefinition]:\n handle = cast(NodeHandle, check.inst_param(handle, "handle", NodeHandle))\n unresolveable_input_defs = []\n for input_def in self.input_defs:\n if (\n not input_def.dagster_type.loader\n and not input_def.dagster_type.kind == DagsterTypeKind.NOTHING\n and not input_def.has_default_value\n and not input_def.input_manager_key\n ):\n input_asset_key = asset_layer.asset_key_for_input(handle, input_def.name)\n # If input_asset_key is present, this input can be resolved\n # by a source asset, so input does not need to be resolved\n # at the top level.\n if input_asset_key:\n continue\n unresolveable_input_defs.append(input_def)\n return unresolveable_input_defs\n\n def input_has_default(self, input_name: str) -> bool:\n return self.input_def_named(input_name).has_default_value\n\n def default_value_for_input(self, input_name: str) -> InputDefinition:\n return self.input_def_named(input_name).default_value\n\n def input_supports_dynamic_output_dep(self, input_name: str) -> bool:\n return True\n\n def with_replaced_properties(\n self,\n name: str,\n ins: Optional[Mapping[str, In]] = None,\n outs: Optional[Mapping[str, Out]] = None,\n config_schema: Optional[IDefinitionConfigSchema] = None,\n description: Optional[str] = None,\n ) -> "OpDefinition":\n return OpDefinition.dagster_internal_init(\n name=name,\n ins=ins\n or {input_def.name: In.from_definition(input_def) for input_def in self.input_defs},\n outs=outs\n or {\n output_def.name: Out.from_definition(output_def) for output_def in self.output_defs\n },\n compute_fn=self.compute_fn,\n config_schema=config_schema or self.config_schema,\n description=description or self.description,\n tags=self.tags,\n required_resource_keys=self.required_resource_keys,\n code_version=self._version,\n retry_policy=self.retry_policy,\n version=None, # code_version replaces version\n )\n\n def copy_for_configured(\n self,\n name: str,\n description: Optional[str],\n config_schema: IDefinitionConfigSchema,\n ) -> "OpDefinition":\n return self.with_replaced_properties(\n name=name,\n description=description,\n config_schema=config_schema,\n )\n\n def get_resource_requirements(\n self,\n outer_context: Optional[object] = None,\n ) -> Iterator[ResourceRequirement]:\n # Outer requiree in this context is the outer-calling node handle. If not provided, then\n # just use the op name.\n outer_context = cast(Optional[Tuple[NodeHandle, Optional["AssetLayer"]]], outer_context)\n if not outer_context:\n handle = None\n asset_layer = None\n else:\n handle, asset_layer = outer_context\n node_description = f"{self.node_type_str} '{handle or self.name}'"\n for resource_key in sorted(list(self.required_resource_keys)):\n yield OpDefinitionResourceRequirement(\n key=resource_key, node_description=node_description\n )\n for input_def in self.input_defs:\n if input_def.input_manager_key:\n yield InputManagerRequirement(\n key=input_def.input_manager_key,\n node_description=node_description,\n input_name=input_def.name,\n root_input=False,\n )\n elif asset_layer and handle:\n input_asset_key = asset_layer.asset_key_for_input(handle, input_def.name)\n if input_asset_key:\n io_manager_key = asset_layer.io_manager_key_for_asset(input_asset_key)\n yield InputManagerRequirement(\n key=io_manager_key,\n node_description=node_description,\n input_name=input_def.name,\n root_input=False,\n )\n\n for output_def in self.output_defs:\n yield OutputManagerRequirement(\n key=output_def.io_manager_key,\n node_description=node_description,\n output_name=output_def.name,\n )\n\n def resolve_input_to_destinations(\n self, input_handle: NodeInputHandle\n ) -> Sequence[NodeInputHandle]:\n return [input_handle]\n\n def __call__(self, *args, **kwargs) -> Any:\n from .composition import is_in_composition\n\n if is_in_composition():\n return super(OpDefinition, self).__call__(*args, **kwargs)\n\n return direct_invocation_result(self, *args, **kwargs)
\n\n\ndef _resolve_output_defs_from_outs(\n compute_fn: Union[Callable[..., Any], "DecoratedOpFunction"],\n outs: Optional[Mapping[str, Out]],\n default_code_version: Optional[str],\n) -> Sequence[OutputDefinition]:\n from .decorators.op_decorator import DecoratedOpFunction\n\n if isinstance(compute_fn, DecoratedOpFunction):\n inferred_output_props = infer_output_props(compute_fn.decorated_fn)\n annotation = inferred_output_props.annotation\n description = inferred_output_props.description\n else:\n inferred_output_props = None\n annotation = inspect.Parameter.empty\n description = None\n\n if outs is None:\n return [OutputDefinition.create_from_inferred(inferred_output_props, default_code_version)]\n\n # If only a single entry has been provided to the out dict, then slurp the\n # annotation into the entry.\n if len(outs) == 1:\n name = next(iter(outs.keys()))\n only_out = outs[name]\n return [only_out.to_definition(annotation, name, description, default_code_version)]\n\n output_defs: List[OutputDefinition] = []\n\n # Introspection on type annotations is experimental, so checking\n # metaclass is the best we can do.\n if annotation != inspect.Parameter.empty and not get_origin(annotation) == tuple:\n raise DagsterInvariantViolationError(\n "Expected Tuple annotation for multiple outputs, but received non-tuple annotation."\n )\n if annotation != inspect.Parameter.empty and not len(get_args(annotation)) == len(outs):\n raise DagsterInvariantViolationError(\n "Expected Tuple annotation to have number of entries matching the "\n f"number of outputs for more than one output. Expected {len(outs)} "\n f"outputs but annotation has {len(get_args(annotation))}."\n )\n for idx, (name, cur_out) in enumerate(outs.items()):\n annotation_type = (\n get_args(annotation)[idx]\n if annotation != inspect.Parameter.empty\n else inspect.Parameter.empty\n )\n # Don't provide description when using multiple outputs. Introspection\n # is challenging when faced with multiple inputs.\n output_defs.append(\n cur_out.to_definition(\n annotation_type, name=name, description=None, code_version=default_code_version\n )\n )\n\n return output_defs\n\n\ndef _validate_context_type_hint(fn):\n from inspect import _empty as EmptyAnnotation\n\n from dagster._core.decorator_utils import get_function_params\n from dagster._core.definitions.decorators.op_decorator import is_context_provided\n from dagster._core.execution.context.compute import AssetExecutionContext, OpExecutionContext\n\n params = get_function_params(fn)\n if is_context_provided(params):\n if (\n params[0].annotation is not AssetExecutionContext\n and params[0].annotation is not OpExecutionContext\n and params[0].annotation is not EmptyAnnotation\n ):\n raise DagsterInvalidDefinitionError(\n f"Cannot annotate `context` parameter with type {params[0].annotation}. `context`"\n " must be annotated with AssetExecutionContext, OpExecutionContext, or left blank."\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/op_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.op_definition"}, "output": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.output

\nimport inspect\nfrom typing import (\n    Any,\n    NamedTuple,\n    Optional,\n    Type,\n    TypeVar,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, deprecated_param\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataUserInput,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterError, DagsterInvalidDefinitionError\nfrom dagster._core.types.dagster_type import (\n    DagsterType,\n    is_dynamic_output_annotation,\n    resolve_dagster_type,\n)\n\nfrom .inference import InferredOutputProps\nfrom .input import NoValueSentinel\nfrom .utils import DEFAULT_IO_MANAGER_KEY, DEFAULT_OUTPUT, check_valid_name\n\nTOutputDefinition = TypeVar("TOutputDefinition", bound="OutputDefinition")\nTOut = TypeVar("TOut", bound="Out")\n\n\nclass OutputDefinition:\n    """Defines an output from an op's compute function.\n\n    Ops can have multiple outputs, in which case outputs cannot be anonymous.\n\n    Many ops have only one output, in which case the user can provide a single output definition\n    that will be given the default name, "result".\n\n    Output definitions may be typed using the Dagster type system.\n\n    Args:\n        dagster_type (Optional[Union[Type, DagsterType]]]): The type of this output.\n            Users should provide the Python type of the objects that they expect the op to yield\n            for this output, or a :py:class:`DagsterType` that defines a runtime check that they\n            want to be run on this output. Defaults to :py:class:`Any`.\n        name (Optional[str]): Name of the output. (default: "result")\n        description (Optional[str]): Human-readable description of the output.\n        is_required (Optional[bool]): Whether the presence of this field is required. (default: True)\n        io_manager_key (Optional[str]): The resource key of the IOManager used for storing this\n            output and loading it in downstream steps (default: "io_manager").\n        metadata (Optional[Dict[str, Any]]): A dict of the metadata for the output.\n            For example, users can provide a file path if the data object will be stored in a\n            filesystem, or provide information of a database table when it is going to load the data\n            into the table.\n        code_version (Optional[str]): (Experimental) Version of the code that generates this output. In\n            general, versions should be set only for code that deterministically produces the same\n            output when given the same inputs.\n\n    """\n\n    def __init__(\n        self,\n        dagster_type=None,\n        name: Optional[str] = None,\n        description: Optional[str] = None,\n        is_required: bool = True,\n        io_manager_key: Optional[str] = None,\n        metadata: Optional[ArbitraryMetadataMapping] = None,\n        code_version: Optional[str] = None,\n        # make sure new parameters are updated in combine_with_inferred below\n    ):\n        self._name = check_valid_name(check.opt_str_param(name, "name", DEFAULT_OUTPUT))\n        self._type_not_set = dagster_type is None\n        self._dagster_type = resolve_dagster_type(dagster_type)\n        self._description = check.opt_str_param(description, "description")\n        self._is_required = check.bool_param(is_required, "is_required")\n        self._io_manager_key = check.opt_str_param(\n            io_manager_key,\n            "io_manager_key",\n            default=DEFAULT_IO_MANAGER_KEY,\n        )\n        self._code_version = check.opt_str_param(code_version, "code_version")\n        self._raw_metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n        self._metadata = normalize_metadata(self._raw_metadata, allow_invalid=True)\n\n    @property\n    def name(self) -> str:\n        return self._name\n\n    @property\n    def dagster_type(self) -> DagsterType:\n        return self._dagster_type\n\n    @property\n    def description(self) -> Optional[str]:\n        return self._description\n\n    @property\n    def is_required(self) -> bool:\n        return self._is_required\n\n    @property\n    def io_manager_key(self) -> str:\n        return self._io_manager_key\n\n    @property\n    def code_version(self) -> Optional[str]:\n        return self._code_version\n\n    @property\n    def optional(self) -> bool:\n        return not self.is_required\n\n    @property\n    def metadata(self) -> ArbitraryMetadataMapping:\n        return self._raw_metadata\n\n    @property\n    def is_dynamic(self) -> bool:\n        return False\n\n    def mapping_from(\n        self, node_name: str, output_name: Optional[str] = None, from_dynamic_mapping: bool = False\n    ) -> "OutputMapping":\n        """Create an output mapping from an output of a child node.\n\n        In a GraphDefinition, you can use this helper function to construct\n        an :py:class:`OutputMapping` from the output of a child node.\n\n        Args:\n            node_name (str): The name of the child node from which to map this output.\n            output_name (str): The name of the child node's output from which to map this output.\n\n        Examples:\n            .. code-block:: python\n\n                output_mapping = OutputDefinition(Int).mapping_from('child_node')\n        """\n        return OutputMapping(\n            graph_output_name=self.name,\n            mapped_node_name=node_name,\n            mapped_node_output_name=output_name or DEFAULT_OUTPUT,\n            graph_output_description=self.description,\n            dagster_type=self.dagster_type,\n            from_dynamic_mapping=from_dynamic_mapping or self.is_dynamic,\n        )\n\n    @staticmethod\n    def create_from_inferred(\n        inferred: Optional[InferredOutputProps], code_version: Optional[str] = None\n    ) -> "OutputDefinition":\n        if not inferred:\n            return OutputDefinition(code_version=code_version)\n        if is_dynamic_output_annotation(inferred.annotation):\n            return DynamicOutputDefinition(\n                dagster_type=_checked_inferred_type(inferred.annotation),\n                description=inferred.description,\n                code_version=code_version,\n            )\n        else:\n            return OutputDefinition(\n                dagster_type=_checked_inferred_type(inferred.annotation),\n                description=inferred.description,\n                code_version=code_version,\n            )\n\n    def combine_with_inferred(\n        self: TOutputDefinition, inferred: InferredOutputProps\n    ) -> TOutputDefinition:\n        dagster_type = self.dagster_type\n        if self._type_not_set:\n            dagster_type = _checked_inferred_type(inferred.annotation)\n        if self.description is None:\n            description = inferred.description\n        else:\n            description = self.description\n\n        return self.__class__(\n            name=self.name,\n            dagster_type=dagster_type,\n            description=description,\n            is_required=self.is_required,\n            io_manager_key=self.io_manager_key,\n            metadata=self._metadata,\n        )\n\n\ndef _checked_inferred_type(inferred: Any) -> DagsterType:\n    try:\n        if inferred == inspect.Parameter.empty:\n            return resolve_dagster_type(None)\n        elif inferred is None:\n            # When inferred.annotation is None, it means someone explicitly put "None" as the\n            # annotation, so want to map it to a DagsterType that checks for the None type\n            return resolve_dagster_type(type(None))\n        else:\n            return resolve_dagster_type(inferred)\n\n    except DagsterError as e:\n        raise DagsterInvalidDefinitionError(\n            f"Problem using type '{inferred}' from return type annotation, correct the issue "\n            "or explicitly set the dagster_type via Out()."\n        ) from e\n\n\nclass DynamicOutputDefinition(OutputDefinition):\n    """Variant of :py:class:`OutputDefinition <dagster.OutputDefinition>` for an\n    output that will dynamically alter the graph at runtime.\n\n    When using in a composition function such as :py:func:`@job <dagster.job>`,\n    dynamic outputs must be used with either:\n\n    * ``map`` - clone downstream nodes for each separate :py:class:`DynamicOutput`\n    * ``collect`` - gather across all :py:class:`DynamicOutput` in to a list\n\n    Uses the same constructor as :py:class:`OutputDefinition <dagster.OutputDefinition>`\n\n        .. code-block:: python\n\n            @op(\n                config_schema={\n                    "path": Field(str, default_value=file_relative_path(__file__, "sample"))\n                },\n                output_defs=[DynamicOutputDefinition(str)],\n            )\n            def files_in_directory(context):\n                path = context.op_config["path"]\n                dirname, _, filenames = next(os.walk(path))\n                for file in filenames:\n                    yield DynamicOutput(os.path.join(dirname, file), mapping_key=_clean(file))\n\n            @job\n            def process_directory():\n                files = files_in_directory()\n\n                # use map to invoke an op on each dynamic output\n                file_results = files.map(process_file)\n\n                # use collect to gather the results in to a list\n                summarize_directory(file_results.collect())\n    """\n\n    @property\n    def is_dynamic(self) -> bool:\n        return True\n\n\nclass OutputPointer(NamedTuple("_OutputPointer", [("node_name", str), ("output_name", str)])):\n    def __new__(cls, node_name: str, output_name: Optional[str] = None):\n        return super(OutputPointer, cls).__new__(\n            cls,\n            check.str_param(node_name, "node_name"),\n            check.opt_str_param(output_name, "output_name", DEFAULT_OUTPUT),\n        )\n\n\n
[docs]@deprecated_param(\n param="dagster_type",\n breaking_version="2.0",\n additional_warn_text="Any defined `dagster_type` should come from the underlying op `Output`.",\n # Disabling warning here since we're passing this internally and I'm not sure whether it is\n # actually used or discarded.\n emit_runtime_warning=False,\n)\nclass OutputMapping(NamedTuple):\n """Defines an output mapping for a graph.\n\n Args:\n graph_output_name (str): Name of the output in the graph being mapped to.\n mapped_node_name (str): Named of the node (op/graph) that the output is being mapped from.\n mapped_node_output_name (str): Name of the output in the node (op/graph) that is being mapped from.\n graph_output_description (Optional[str]): A description of the output in the graph being mapped from.\n from_dynamic_mapping (bool): Set to true if the node being mapped to is a mapped dynamic node.\n dagster_type (Optional[DagsterType]): The dagster type of the graph's output being mapped to.\n\n Examples:\n .. code-block:: python\n\n from dagster import OutputMapping, GraphDefinition, op, graph, GraphOut\n\n @op\n def emit_five(x):\n return 5\n\n # The following two graph definitions are equivalent\n GraphDefinition(\n name="the_graph",\n node_defs=[emit_five],\n output_mappings=[\n OutputMapping(\n graph_output_name="result", # Default output name\n mapped_node_name="emit_five",\n mapped_node_output_name="result"\n )\n ]\n )\n\n @graph(out=GraphOut())\n def the_graph:\n return emit_five()\n """\n\n graph_output_name: str\n mapped_node_name: str\n mapped_node_output_name: str\n graph_output_description: Optional[str] = None\n dagster_type: Optional[DagsterType] = None\n from_dynamic_mapping: bool = False\n\n @property\n def maps_from(self) -> OutputPointer:\n return OutputPointer(self.mapped_node_name, self.mapped_node_output_name)\n\n def get_definition(self, is_dynamic: bool) -> "OutputDefinition":\n check.invariant(not is_dynamic or self.from_dynamic_mapping)\n is_dynamic = is_dynamic or self.from_dynamic_mapping\n klass = DynamicOutputDefinition if is_dynamic else OutputDefinition\n return klass(\n name=self.graph_output_name,\n description=self.graph_output_description,\n dagster_type=self.dagster_type,\n )
\n\n\n
[docs]class Out(\n NamedTuple(\n "_Out",\n [\n ("dagster_type", PublicAttr[Union[DagsterType, Type[NoValueSentinel]]]),\n ("description", PublicAttr[Optional[str]]),\n ("is_required", PublicAttr[bool]),\n ("io_manager_key", PublicAttr[str]),\n ("metadata", PublicAttr[Optional[MetadataUserInput]]),\n ("code_version", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Defines an output from an op's compute function.\n\n Ops can have multiple outputs, in which case outputs cannot be anonymous.\n\n Many ops have only one output, in which case the user can provide a single output definition\n that will be given the default name, "result".\n\n Outs may be typed using the Dagster type system.\n\n Args:\n dagster_type (Optional[Union[Type, DagsterType]]]):\n The type of this output. Should only be set if the correct type can not\n be inferred directly from the type signature of the decorated function.\n description (Optional[str]): Human-readable description of the output.\n is_required (bool): Whether the presence of this field is required. (default: True)\n io_manager_key (Optional[str]): The resource key of the output manager used for this output.\n (default: "io_manager").\n metadata (Optional[Dict[str, Any]]): A dict of the metadata for the output.\n For example, users can provide a file path if the data object will be stored in a\n filesystem, or provide information of a database table when it is going to load the data\n into the table.\n code_version (Optional[str]): (Experimental) Version of the code that generates this output. In\n general, versions should be set only for code that deterministically produces the same\n output when given the same inputs.\n """\n\n def __new__(\n cls,\n dagster_type: Union[Type, DagsterType] = NoValueSentinel,\n description: Optional[str] = None,\n is_required: bool = True,\n io_manager_key: Optional[str] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n code_version: Optional[str] = None,\n # make sure new parameters are updated in combine_with_inferred below\n ):\n return super(Out, cls).__new__(\n cls,\n dagster_type=(\n NoValueSentinel\n if dagster_type is NoValueSentinel\n else resolve_dagster_type(dagster_type)\n ),\n description=description,\n is_required=check.bool_param(is_required, "is_required"),\n io_manager_key=check.opt_str_param(\n io_manager_key, "io_manager_key", default=DEFAULT_IO_MANAGER_KEY\n ),\n metadata=metadata,\n code_version=code_version,\n )\n\n @classmethod\n def from_definition(cls, output_def: "OutputDefinition"):\n klass = Out if not output_def.is_dynamic else DynamicOut\n return klass(\n dagster_type=output_def.dagster_type,\n description=output_def.description,\n is_required=output_def.is_required,\n io_manager_key=output_def.io_manager_key,\n metadata=output_def.metadata,\n code_version=output_def.code_version,\n )\n\n def to_definition(\n self,\n annotation_type: type,\n name: Optional[str],\n description: Optional[str],\n code_version: Optional[str],\n ) -> "OutputDefinition":\n dagster_type = (\n self.dagster_type\n if self.dagster_type is not NoValueSentinel\n else _checked_inferred_type(annotation_type)\n )\n\n klass = OutputDefinition if not self.is_dynamic else DynamicOutputDefinition\n\n return klass(\n dagster_type=dagster_type,\n name=name,\n description=self.description or description,\n is_required=self.is_required,\n io_manager_key=self.io_manager_key,\n metadata=self.metadata,\n code_version=self.code_version or code_version,\n )\n\n @property\n def is_dynamic(self) -> bool:\n return False
\n\n\n
[docs]class DynamicOut(Out):\n """Variant of :py:class:`Out <dagster.Out>` for an output that will dynamically alter the graph at\n runtime.\n\n When using in a composition function such as :py:func:`@graph <dagster.graph>`,\n dynamic outputs must be used with either\n\n * ``map`` - clone downstream ops for each separate :py:class:`DynamicOut`\n * ``collect`` - gather across all :py:class:`DynamicOut` in to a list\n\n Uses the same constructor as :py:class:`Out <dagster.Out>`\n\n .. code-block:: python\n\n @op(\n config_schema={\n "path": Field(str, default_value=file_relative_path(__file__, "sample"))\n },\n out=DynamicOut(str),\n )\n def files_in_directory(context):\n path = context.op_config["path"]\n dirname, _, filenames = next(os.walk(path))\n for file in filenames:\n yield DynamicOutput(os.path.join(dirname, file), mapping_key=_clean(file))\n\n @job\n def process_directory():\n files = files_in_directory()\n\n # use map to invoke an op on each dynamic output\n file_results = files.map(process_file)\n\n # use collect to gather the results in to a list\n summarize_directory(file_results.collect())\n """\n\n def to_definition(\n self,\n annotation_type: type,\n name: Optional[str],\n description: Optional[str],\n code_version: Optional[str],\n ) -> "OutputDefinition":\n dagster_type = (\n self.dagster_type\n if self.dagster_type is not NoValueSentinel\n else _checked_inferred_type(annotation_type)\n )\n\n return DynamicOutputDefinition(\n dagster_type=dagster_type,\n name=name,\n description=self.description or description,\n is_required=self.is_required,\n io_manager_key=self.io_manager_key,\n metadata=self.metadata,\n code_version=self.code_version or code_version,\n )\n\n @property\n def is_dynamic(self) -> bool:\n return True
\n\n\n
[docs]class GraphOut(NamedTuple("_GraphOut", [("description", PublicAttr[Optional[str]])])):\n """Represents information about the outputs that a graph maps.\n\n Args:\n description (Optional[str]): Human-readable description of the output.\n """\n\n def __new__(cls, description: Optional[str] = None):\n return super(GraphOut, cls).__new__(cls, description=description)\n\n def to_definition(self, name: Optional[str]) -> "OutputDefinition":\n return OutputDefinition(name=name, description=self.description)
\n
", "current_page_name": "_modules/dagster/_core/definitions/output", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.output"}, "partition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partition

\nimport copy\nimport hashlib\nimport json\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom datetime import (\n    datetime,\n    timedelta,\n)\nfrom enum import Enum\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    Iterable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Type,\n    Union,\n    cast,\n)\n\nfrom dateutil.relativedelta import relativedelta\nfrom typing_extensions import TypeVar\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, deprecated, deprecated_param, public\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.run_request import (\n    AddDynamicPartitionsRequest,\n    DeleteDynamicPartitionsRequest,\n)\nfrom dagster._core.instance import DagsterInstance, DynamicPartitionsStore\nfrom dagster._core.storage.tags import PARTITION_NAME_TAG, PARTITION_SET_TAG\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import xor\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.warnings import (\n    normalize_renamed_param,\n)\n\nfrom ..errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidDeserializationVersionError,\n    DagsterInvalidInvocationError,\n    DagsterUnknownPartitionError,\n)\nfrom .config import ConfigMapping\nfrom .utils import validate_tags\n\nDEFAULT_DATE_FORMAT = "%Y-%m-%d"\n\nT_cov = TypeVar("T_cov", default=Any, covariant=True)\nT_str = TypeVar("T_str", bound=str, default=str, covariant=True)\nT_PartitionsDefinition = TypeVar(\n    "T_PartitionsDefinition",\n    bound="PartitionsDefinition",\n    default="PartitionsDefinition",\n    covariant=True,\n)\n\n# In the Dagster UI users can select partition ranges following the format '2022-01-13...2022-01-14'\n# "..." is an invalid substring in partition keys\n# The other escape characters are characters that may not display in the Dagster UI.\nINVALID_PARTITION_SUBSTRINGS = ["...", "\\a", "\\b", "\\f", "\\n", "\\r", "\\t", "\\v", "\\0"]\n\n\n@deprecated(breaking_version="2.0", additional_warn_text="Use string partition keys instead.")\nclass Partition(Generic[T_cov]):\n    """A Partition represents a single slice of the entire set of a job's possible work. It consists\n    of a value, which is an object that represents that partition, and an optional name, which is\n    used to label the partition in a human-readable way.\n\n    Args:\n        value (Any): The object for this partition\n        name (str): Name for this partition\n    """\n\n    def __init__(self, value: Any, name: Optional[str] = None):\n        self._value = value\n        self._name = check.str_param(name or str(value), "name")\n\n    @property\n    def value(self) -> T_cov:\n        return self._value\n\n    @property\n    def name(self) -> str:\n        return self._name\n\n    def __eq__(self, other: object) -> bool:\n        if not isinstance(other, Partition):\n            return False\n        else:\n            return self.value == other.value and self.name == other.name\n\n\n@whitelist_for_serdes\nclass ScheduleType(Enum):\n    HOURLY = "HOURLY"\n    DAILY = "DAILY"\n    WEEKLY = "WEEKLY"\n    MONTHLY = "MONTHLY"\n\n    @property\n    def ordinal(self):\n        return {"HOURLY": 1, "DAILY": 2, "WEEKLY": 3, "MONTHLY": 4}[self.value]\n\n    @property\n    def delta(self):\n        if self == ScheduleType.HOURLY:\n            return timedelta(hours=1)\n        elif self == ScheduleType.DAILY:\n            return timedelta(days=1)\n        elif self == ScheduleType.WEEKLY:\n            return timedelta(weeks=1)\n        elif self == ScheduleType.MONTHLY:\n            return relativedelta(months=1)\n        else:\n            check.failed(f"Unexpected ScheduleType {self}")\n\n    def __gt__(self, other: "ScheduleType") -> bool:\n        check.inst(other, ScheduleType, "Cannot compare ScheduleType with non-ScheduleType")\n        return self.ordinal > other.ordinal\n\n    def __lt__(self, other: "ScheduleType") -> bool:\n        check.inst(other, ScheduleType, "Cannot compare ScheduleType with non-ScheduleType")\n        return self.ordinal < other.ordinal\n\n\n
[docs]class PartitionsDefinition(ABC, Generic[T_str]):\n """Defines a set of partitions, which can be attached to a software-defined asset or job.\n\n Abstract class with implementations for different kinds of partitions.\n """\n\n @property\n def partitions_subset_class(self) -> Type["PartitionsSubset[T_str]"]:\n return DefaultPartitionsSubset[T_str]\n\n
[docs] @abstractmethod\n @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[T_str]:\n """Returns a list of strings representing the partition keys of the PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partitions definitions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when the\n partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n Returns:\n Sequence[str]\n """\n ...
\n\n def __str__(self) -> str:\n joined_keys = ", ".join([f"'{key}'" for key in self.get_partition_keys()])\n return joined_keys\n\n def get_last_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[T_str]:\n partition_keys = self.get_partition_keys(current_time, dynamic_partitions_store)\n return partition_keys[-1] if partition_keys else None\n\n def get_first_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[T_str]:\n partition_keys = self.get_partition_keys(current_time, dynamic_partitions_store)\n return partition_keys[0] if partition_keys else None\n\n def get_partition_keys_in_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[T_str]:\n keys_exist = {\n partition_key_range.start: self.has_partition_key(\n partition_key_range.start, dynamic_partitions_store=dynamic_partitions_store\n ),\n partition_key_range.end: self.has_partition_key(\n partition_key_range.end, dynamic_partitions_store=dynamic_partitions_store\n ),\n }\n if not all(keys_exist.values()):\n raise DagsterInvalidInvocationError(\n f"""Partition range {partition_key_range.start} to {partition_key_range.end} is\n not a valid range. Nonexistent partition keys:\n {list(key for key in keys_exist if keys_exist[key] is False)}"""\n )\n\n # in the simple case, simply return the single key in the range\n if partition_key_range.start == partition_key_range.end:\n return [cast(T_str, partition_key_range.start)]\n\n # defer this call as it is potentially expensive\n partition_keys = self.get_partition_keys(dynamic_partitions_store=dynamic_partitions_store)\n return partition_keys[\n partition_keys.index(partition_key_range.start) : partition_keys.index(\n partition_key_range.end\n )\n + 1\n ]\n\n def empty_subset(self) -> "PartitionsSubset[T_str]":\n return self.partitions_subset_class.empty_subset(self)\n\n def subset_with_partition_keys(\n self, partition_keys: Iterable[str]\n ) -> "PartitionsSubset[T_str]":\n return self.empty_subset().with_partition_keys(partition_keys)\n\n def subset_with_all_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "PartitionsSubset[T_str]":\n return self.subset_with_partition_keys(\n self.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n )\n\n def deserialize_subset(self, serialized: str) -> "PartitionsSubset[T_str]":\n return self.partitions_subset_class.from_serialized(self, serialized)\n\n def can_deserialize_subset(\n self,\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n return self.partitions_subset_class.can_deserialize(\n self,\n serialized,\n serialized_partitions_def_unique_id,\n serialized_partitions_def_class_name,\n )\n\n def get_serializable_unique_identifier(\n self, dynamic_partitions_store: Optional[DynamicPartitionsStore] = None\n ) -> str:\n return hashlib.sha1(\n json.dumps(\n self.get_partition_keys(dynamic_partitions_store=dynamic_partitions_store)\n ).encode("utf-8")\n ).hexdigest()\n\n def get_tags_for_partition_key(self, partition_key: str) -> Mapping[str, str]:\n tags = {PARTITION_NAME_TAG: partition_key}\n return tags\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n return len(self.get_partition_keys(current_time, dynamic_partitions_store))\n\n def has_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n return partition_key in self.get_partition_keys(\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n\n def validate_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> None:\n if not self.has_partition_key(partition_key, current_time, dynamic_partitions_store):\n raise DagsterUnknownPartitionError(\n f"Could not find a partition with key `{partition_key}`."\n )
\n\n\ndef raise_error_on_invalid_partition_key_substring(partition_keys: Sequence[str]) -> None:\n for partition_key in partition_keys:\n found_invalid_substrs = [\n invalid_substr\n for invalid_substr in INVALID_PARTITION_SUBSTRINGS\n if invalid_substr in partition_key\n ]\n if found_invalid_substrs:\n raise DagsterInvalidDefinitionError(\n f"{found_invalid_substrs} are invalid substrings in a partition key"\n )\n\n\ndef raise_error_on_duplicate_partition_keys(partition_keys: Sequence[str]) -> None:\n counts: Dict[str, int] = defaultdict(lambda: 0)\n for partition_key in partition_keys:\n counts[partition_key] += 1\n found_duplicates = [key for key in counts.keys() if counts[key] > 1]\n if found_duplicates:\n raise DagsterInvalidDefinitionError(\n "Partition keys must be unique. Duplicate instances of partition keys:"\n f" {found_duplicates}."\n )\n\n\n
[docs]class StaticPartitionsDefinition(PartitionsDefinition[str]):\n """A statically-defined set of partitions.\n\n Example:\n .. code-block:: python\n\n from dagster import StaticPartitionsDefinition, asset\n\n oceans_partitions_def = StaticPartitionsDefinition(\n ["arctic", "atlantic", "indian", "pacific", "southern"]\n )\n\n @asset(partitions_def=oceans_partitions_defs)\n def ml_model_for_each_ocean():\n ...\n """\n\n def __init__(self, partition_keys: Sequence[str]):\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n\n raise_error_on_invalid_partition_key_substring(partition_keys)\n raise_error_on_duplicate_partition_keys(partition_keys)\n\n self._partition_keys = partition_keys\n\n
[docs] @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n """Returns a list of strings representing the partition keys of the PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partitions definitions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Only applicable to\n DynamicPartitionsDefinitions.\n\n Returns:\n Sequence[str]\n\n """\n return self._partition_keys
\n\n def __hash__(self):\n return hash(self.__repr__())\n\n def __eq__(self, other) -> bool:\n return isinstance(other, StaticPartitionsDefinition) and (\n self is other or self._partition_keys == other.get_partition_keys()\n )\n\n def __repr__(self) -> str:\n return f"{type(self).__name__}(partition_keys={self._partition_keys})"\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n # We don't currently throw an error when a duplicate partition key is defined\n # in a static partitions definition, though we will at 1.3.0.\n # This ensures that partition counts are correct in the Dagster UI.\n return len(set(self.get_partition_keys(current_time, dynamic_partitions_store)))
\n\n\nclass CachingDynamicPartitionsLoader(DynamicPartitionsStore):\n """A batch loader that caches the partition keys for a given dynamic partitions definition,\n to avoid repeated calls to the database for the same partitions definition.\n """\n\n def __init__(self, instance: DagsterInstance):\n self._instance = instance\n\n @cached_method\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n return self._instance.get_dynamic_partitions(partitions_def_name)\n\n @cached_method\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n return self._instance.has_dynamic_partition(partitions_def_name, partition_key)\n\n\n
[docs]@deprecated_param(\n param="partition_fn",\n breaking_version="2.0",\n additional_warn_text="Provide partition definition name instead.",\n)\nclass DynamicPartitionsDefinition(\n PartitionsDefinition,\n NamedTuple(\n "_DynamicPartitionsDefinition",\n [\n (\n "partition_fn",\n PublicAttr[\n Optional[\n Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]\n ]\n ],\n ),\n ("name", PublicAttr[Optional[str]]),\n ],\n ),\n):\n """A partitions definition whose partition keys can be dynamically added and removed.\n\n This is useful for cases where the set of partitions is not known at definition time,\n but is instead determined at runtime.\n\n Partitions can be added and removed using `instance.add_dynamic_partitions` and\n `instance.delete_dynamic_partition` methods.\n\n Args:\n name (Optional[str]): The name of the partitions definition.\n partition_fn (Optional[Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]]):\n A function that returns the current set of partitions. This argument is deprecated and\n will be removed in 2.0.0.\n\n Examples:\n .. code-block:: python\n\n fruits = DynamicPartitionsDefinition(name="fruits")\n\n @sensor(job=my_job)\n def my_sensor(context):\n return SensorResult(\n run_requests=[RunRequest(partition_key="apple")],\n dynamic_partitions_requests=[fruits.build_add_request(["apple"])]\n )\n """\n\n def __new__(\n cls,\n partition_fn: Optional[\n Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]\n ] = None,\n name: Optional[str] = None,\n ):\n partition_fn = check.opt_callable_param(partition_fn, "partition_fn")\n name = check.opt_str_param(name, "name")\n\n if partition_fn is None and name is None:\n raise DagsterInvalidDefinitionError(\n "Must provide either partition_fn or name to DynamicPartitionsDefinition."\n )\n\n if partition_fn and name:\n raise DagsterInvalidDefinitionError(\n "Cannot provide both partition_fn and name to DynamicPartitionsDefinition."\n )\n\n return super(DynamicPartitionsDefinition, cls).__new__(\n cls,\n partition_fn=check.opt_callable_param(partition_fn, "partition_fn"),\n name=check.opt_str_param(name, "name"),\n )\n\n def _validated_name(self) -> str:\n if self.name is None:\n check.failed(\n "Dynamic partitions definition must have a name to fetch dynamic partitions"\n )\n return self.name\n\n def __eq__(self, other):\n return (\n isinstance(other, DynamicPartitionsDefinition)\n and self.name == other.name\n and self.partition_fn == other.partition_fn\n )\n\n def __hash__(self):\n return hash(tuple(self.__repr__()))\n\n def __str__(self) -> str:\n if self.name:\n return f'Dynamic partitions: "{self._validated_name()}"'\n else:\n return super().__str__()\n\n
[docs] @public\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n """Returns a list of strings representing the partition keys of the\n PartitionsDefinition.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time, only\n applicable to time-based partitions definitions.\n dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n object that is responsible for fetching dynamic partitions. Required when the\n partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n Returns:\n Sequence[str]\n """\n if self.partition_fn:\n partitions = self.partition_fn(current_time)\n if all(isinstance(partition, Partition) for partition in partitions):\n return [partition.name for partition in partitions] # type: ignore # (illegible conditional)\n else:\n return partitions # type: ignore # (illegible conditional)\n else:\n check.opt_inst_param(\n dynamic_partitions_store, "dynamic_partitions_store", DynamicPartitionsStore\n )\n\n if dynamic_partitions_store is None:\n check.failed(\n "The instance is not available to load partitions. You may be seeing this error"\n " when using dynamic partitions with a version of dagster-webserver or"\n " dagster-cloud that is older than 1.1.18."\n )\n\n return dynamic_partitions_store.get_dynamic_partitions(\n partitions_def_name=self._validated_name()\n )
\n\n def has_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n if self.partition_fn:\n return partition_key in self.get_partition_keys(current_time)\n else:\n if dynamic_partitions_store is None:\n check.failed(\n "The instance is not available to load partitions. You may be seeing this error"\n " when using dynamic partitions with a version of dagster-webserver or"\n " dagster-cloud that is older than 1.1.18."\n )\n\n return dynamic_partitions_store.has_dynamic_partition(\n partitions_def_name=self._validated_name(), partition_key=partition_key\n )\n\n def build_add_request(self, partition_keys: Sequence[str]) -> AddDynamicPartitionsRequest:\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n validated_name = self._validated_name()\n return AddDynamicPartitionsRequest(validated_name, partition_keys)\n\n def build_delete_request(self, partition_keys: Sequence[str]) -> DeleteDynamicPartitionsRequest:\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n validated_name = self._validated_name()\n return DeleteDynamicPartitionsRequest(validated_name, partition_keys)
\n\n\n
[docs]@deprecated_param(\n param="run_config_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use `run_config_for_partition_key_fn` instead.",\n)\n@deprecated_param(\n param="tags_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use `tags_for_partition_key_fn` instead.",\n)\nclass PartitionedConfig(Generic[T_PartitionsDefinition]):\n """Defines a way of configuring a job where the job can be run on one of a discrete set of\n partitions, and each partition corresponds to run configuration for the job.\n\n Setting PartitionedConfig as the config for a job allows you to launch backfills for that job\n and view the run history across partitions.\n """\n\n def __init__(\n self,\n partitions_def: T_PartitionsDefinition,\n run_config_for_partition_fn: Optional[Callable[[Partition], Mapping[str, Any]]] = None,\n decorated_fn: Optional[Callable[..., Mapping[str, Any]]] = None,\n tags_for_partition_fn: Optional[Callable[[Partition[Any]], Mapping[str, str]]] = None,\n run_config_for_partition_key_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n ):\n self._partitions = check.inst_param(partitions_def, "partitions_def", PartitionsDefinition)\n self._decorated_fn = decorated_fn\n\n check.invariant(\n xor(run_config_for_partition_fn, run_config_for_partition_key_fn),\n "Must provide exactly one of run_config_for_partition_fn or"\n " run_config_for_partition_key_fn",\n )\n check.invariant(\n not (tags_for_partition_fn and tags_for_partition_key_fn),\n "Cannot provide both of tags_for_partition_fn or tags_for_partition_key_fn",\n )\n\n self._run_config_for_partition_fn = check.opt_callable_param(\n run_config_for_partition_fn, "run_config_for_partition_fn"\n )\n self._run_config_for_partition_key_fn = check.opt_callable_param(\n run_config_for_partition_key_fn, "run_config_for_partition_key_fn"\n )\n self._tags_for_partition_fn = check.opt_callable_param(\n tags_for_partition_fn, "tags_for_partition_fn"\n )\n self._tags_for_partition_key_fn = check.opt_callable_param(\n tags_for_partition_key_fn, "tags_for_partition_key_fn"\n )\n\n @public\n @property\n def partitions_def(\n self,\n ) -> T_PartitionsDefinition:\n """T_PartitionsDefinition: The partitions definition associated with this PartitionedConfig."""\n return self._partitions\n\n @deprecated(\n breaking_version="2.0",\n additional_warn_text="Use `run_config_for_partition_key_fn` instead.",\n )\n @public\n @property\n def run_config_for_partition_fn(\n self,\n ) -> Optional[Callable[[Partition], Mapping[str, Any]]]:\n """Optional[Callable[[Partition], Mapping[str, Any]]]: A function that accepts a partition\n and returns a dictionary representing the config to attach to runs for that partition.\n Deprecated as of 1.3.3.\n """\n return self._run_config_for_partition_fn\n\n @public\n @property\n def run_config_for_partition_key_fn(\n self,\n ) -> Optional[Callable[[str], Mapping[str, Any]]]:\n """Optional[Callable[[str], Mapping[str, Any]]]: A function that accepts a partition key\n and returns a dictionary representing the config to attach to runs for that partition.\n """\n\n @deprecated(\n breaking_version="2.0", additional_warn_text="Use `tags_for_partition_key_fn` instead."\n )\n @public\n @property\n def tags_for_partition_fn(self) -> Optional[Callable[[Partition], Mapping[str, str]]]:\n """Optional[Callable[[Partition], Mapping[str, str]]]: A function that\n accepts a partition and returns a dictionary of tags to attach to runs for\n that partition. Deprecated as of 1.3.3.\n """\n return self._tags_for_partition_fn\n\n @public\n @property\n def tags_for_partition_key_fn(\n self,\n ) -> Optional[Callable[[str], Mapping[str, str]]]:\n """Optional[Callable[[str], Mapping[str, str]]]: A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for\n that partition.\n """\n return self._tags_for_partition_key_fn\n\n
[docs] @public\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Sequence[str]:\n """Returns a list of partition keys, representing the full set of partitions that\n config can be applied to.\n\n Args:\n current_time (Optional[datetime]): A datetime object representing the current time. Only\n applicable to time-based partitions definitions.\n\n Returns:\n Sequence[str]\n """\n return self.partitions_def.get_partition_keys(current_time)
\n\n # Assumes partition key already validated\n def get_run_config_for_partition_key(\n self,\n partition_key: str,\n ) -> Mapping[str, Any]:\n """Generates the run config corresponding to a partition key.\n\n Args:\n partition_key (str): the key for a partition that should be used to generate a run config.\n """\n # _run_config_for_partition_fn is deprecated, we can remove this branching logic in 2.0\n if self._run_config_for_partition_fn:\n run_config = self._run_config_for_partition_fn(Partition(partition_key))\n elif self._run_config_for_partition_key_fn:\n run_config = self._run_config_for_partition_key_fn(partition_key)\n else:\n check.failed("Unreachable.") # one of the above funcs always defined\n return copy.deepcopy(run_config)\n\n # Assumes partition key already validated\n def get_tags_for_partition_key(\n self,\n partition_key: str,\n job_name: Optional[str] = None,\n ) -> Mapping[str, str]:\n from dagster._core.host_representation.external_data import (\n external_partition_set_name_for_job_name,\n )\n\n # _tags_for_partition_fn is deprecated, we can remove this branching logic in 2.0\n if self._tags_for_partition_fn:\n user_tags = self._tags_for_partition_fn(Partition(partition_key))\n elif self._tags_for_partition_key_fn:\n user_tags = self._tags_for_partition_key_fn(partition_key)\n else:\n user_tags = {}\n user_tags = validate_tags(user_tags, allow_reserved_tags=False)\n\n system_tags = {\n **self.partitions_def.get_tags_for_partition_key(partition_key),\n **(\n # `PartitionSetDefinition` has been deleted but we still need to attach this special tag in\n # order for reexecution against partitions to work properly.\n {PARTITION_SET_TAG: external_partition_set_name_for_job_name(job_name)}\n if job_name\n else {}\n ),\n }\n\n return {**user_tags, **system_tags}\n\n @classmethod\n def from_flexible_config(\n cls,\n config: Optional[Union[ConfigMapping, Mapping[str, object], "PartitionedConfig"]],\n partitions_def: PartitionsDefinition,\n ) -> "PartitionedConfig":\n check.invariant(\n not isinstance(config, ConfigMapping),\n "Can't supply a ConfigMapping for 'config' when 'partitions_def' is supplied.",\n )\n\n if isinstance(config, PartitionedConfig):\n check.invariant(\n config.partitions_def == partitions_def,\n "Can't supply a PartitionedConfig for 'config' with a different "\n "PartitionsDefinition than supplied for 'partitions_def'.",\n )\n return config\n else:\n hardcoded_config = config if config else {}\n return cls(\n partitions_def,\n run_config_for_partition_key_fn=lambda _: cast(Mapping, hardcoded_config),\n )\n\n def __call__(self, *args, **kwargs):\n if self._decorated_fn is None:\n raise DagsterInvalidInvocationError(\n "Only PartitionedConfig objects created using one of the partitioned config "\n "decorators can be directly invoked."\n )\n else:\n return self._decorated_fn(*args, **kwargs)
\n\n\n
[docs]@deprecated_param(\n param="tags_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use tags_for_partition_key_fn instead.",\n)\ndef static_partitioned_config(\n partition_keys: Sequence[str],\n tags_for_partition_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n) -> Callable[[Callable[[str], Mapping[str, Any]]], PartitionedConfig[StaticPartitionsDefinition]]:\n """Creates a static partitioned config for a job.\n\n The provided partition_keys is a static list of strings identifying the set of partitions. The\n list of partitions is static, so while the run config returned by the decorated function may\n change over time, the list of valid partition keys does not.\n\n This has performance advantages over `dynamic_partitioned_config` in terms of loading different\n partition views in the Dagster UI.\n\n The decorated function takes in a partition key and returns a valid run config for a particular\n target job.\n\n Args:\n partition_keys (Sequence[str]): A list of valid partition keys, which serve as the range of\n values that can be provided to the decorated run config function.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n tags_for_partition_key_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n\n Returns:\n PartitionedConfig\n """\n check.sequence_param(partition_keys, "partition_keys", str)\n\n tags_for_partition_key_fn = normalize_renamed_param(\n tags_for_partition_key_fn,\n "tags_for_partition_key_fn",\n tags_for_partition_fn,\n "tags_for_partition_fn",\n )\n\n def inner(\n fn: Callable[[str], Mapping[str, Any]]\n ) -> PartitionedConfig[StaticPartitionsDefinition]:\n return PartitionedConfig(\n partitions_def=StaticPartitionsDefinition(partition_keys),\n run_config_for_partition_key_fn=fn,\n decorated_fn=fn,\n tags_for_partition_key_fn=tags_for_partition_key_fn,\n )\n\n return inner
\n\n\ndef partitioned_config(\n partitions_def: PartitionsDefinition,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n) -> Callable[[Callable[[str], Mapping[str, Any]]], PartitionedConfig]:\n """Creates a partitioned config for a job given a PartitionsDefinition.\n\n The partitions_def provides the set of partitions, which may change over time\n (for example, when using a DynamicPartitionsDefinition).\n\n The decorated function takes in a partition key and returns a valid run config for a particular\n target job.\n\n Args:\n partitions_def: (Optional[DynamicPartitionsDefinition]): PartitionsDefinition for the job\n tags_for_partition_key_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n\n Returns:\n PartitionedConfig\n """\n check.opt_callable_param(tags_for_partition_key_fn, "tags_for_partition_key_fn")\n\n def inner(fn: Callable[[str], Mapping[str, Any]]) -> PartitionedConfig:\n return PartitionedConfig(\n partitions_def=partitions_def,\n run_config_for_partition_key_fn=fn,\n decorated_fn=fn,\n tags_for_partition_key_fn=tags_for_partition_key_fn,\n )\n\n return inner\n\n\n
[docs]@deprecated_param(\n param="tags_for_partition_fn",\n breaking_version="2.0",\n additional_warn_text="Use tags_for_partition_key_fn instead.",\n)\ndef dynamic_partitioned_config(\n partition_fn: Callable[[Optional[datetime]], Sequence[str]],\n tags_for_partition_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n tags_for_partition_key_fn: Optional[Callable[[str], Mapping[str, str]]] = None,\n) -> Callable[[Callable[[str], Mapping[str, Any]]], PartitionedConfig]:\n """Creates a dynamic partitioned config for a job.\n\n The provided partition_fn returns a list of strings identifying the set of partitions, given\n an optional datetime argument (representing the current time). The list of partitions returned\n may change over time.\n\n The decorated function takes in a partition key and returns a valid run config for a particular\n target job.\n\n Args:\n partition_fn (Callable[[datetime.datetime], Sequence[str]]): A function that generates a\n list of valid partition keys, which serve as the range of values that can be provided\n to the decorated run config function.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition key and returns a dictionary of tags to attach to runs for that\n partition.\n\n Returns:\n PartitionedConfig\n """\n check.callable_param(partition_fn, "partition_fn")\n\n tags_for_partition_key_fn = normalize_renamed_param(\n tags_for_partition_key_fn,\n "tags_for_partition_key_fn",\n tags_for_partition_fn,\n "tags_for_partition_fn",\n )\n\n def inner(fn: Callable[[str], Mapping[str, Any]]) -> PartitionedConfig:\n return PartitionedConfig(\n partitions_def=DynamicPartitionsDefinition(partition_fn),\n run_config_for_partition_key_fn=fn,\n decorated_fn=fn,\n tags_for_partition_key_fn=tags_for_partition_key_fn,\n )\n\n return inner
\n\n\ndef cron_schedule_from_schedule_type_and_offsets(\n schedule_type: ScheduleType,\n minute_offset: int,\n hour_offset: int,\n day_offset: Optional[int],\n) -> str:\n if schedule_type is ScheduleType.HOURLY:\n return f"{minute_offset} * * * *"\n elif schedule_type is ScheduleType.DAILY:\n return f"{minute_offset} {hour_offset} * * *"\n elif schedule_type is ScheduleType.WEEKLY:\n return f"{minute_offset} {hour_offset} * * {day_offset if day_offset is not None else 0}"\n elif schedule_type is ScheduleType.MONTHLY:\n return f"{minute_offset} {hour_offset} {day_offset if day_offset is not None else 1} * *"\n else:\n check.assert_never(schedule_type)\n\n\nclass PartitionsSubset(ABC, Generic[T_str]):\n """Represents a subset of the partitions within a PartitionsDefinition."""\n\n @abstractmethod\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[T_str]: ...\n\n @abstractmethod\n @public\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[T_str]: ...\n\n @abstractmethod\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]: ...\n\n @abstractmethod\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "PartitionsSubset[T_str]": ...\n\n def with_partition_key_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "PartitionsSubset[T_str]":\n return self.with_partition_keys(\n self.partitions_def.get_partition_keys_in_range(\n partition_key_range, dynamic_partitions_store=dynamic_partitions_store\n )\n )\n\n def __or__(self, other: "PartitionsSubset") -> "PartitionsSubset[T_str]":\n if self is other:\n return self\n return self.with_partition_keys(other.get_partition_keys())\n\n def __sub__(self, other: "PartitionsSubset") -> "PartitionsSubset[T_str]":\n if self is other:\n return self.partitions_def.empty_subset()\n return self.partitions_def.empty_subset().with_partition_keys(\n set(self.get_partition_keys()).difference(set(other.get_partition_keys()))\n )\n\n def __and__(self, other: "PartitionsSubset") -> "PartitionsSubset[T_str]":\n if self is other:\n return self\n return self.partitions_def.empty_subset().with_partition_keys(\n set(self.get_partition_keys()) & set(other.get_partition_keys())\n )\n\n @abstractmethod\n def serialize(self) -> str: ...\n\n @classmethod\n @abstractmethod\n def from_serialized(\n cls, partitions_def: PartitionsDefinition[T_str], serialized: str\n ) -> "PartitionsSubset[T_str]": ...\n\n @classmethod\n @abstractmethod\n def can_deserialize(\n cls,\n partitions_def: PartitionsDefinition,\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool: ...\n\n @property\n @abstractmethod\n def partitions_def(self) -> PartitionsDefinition[T_str]: ...\n\n @abstractmethod\n def __len__(self) -> int: ...\n\n @abstractmethod\n def __contains__(self, value) -> bool: ...\n\n @classmethod\n @abstractmethod\n def empty_subset(\n cls, partitions_def: PartitionsDefinition[T_str]\n ) -> "PartitionsSubset[T_str]": ...\n\n\n@whitelist_for_serdes\nclass SerializedPartitionsSubset(NamedTuple):\n serialized_subset: str\n serialized_partitions_def_unique_id: str\n serialized_partitions_def_class_name: str\n\n @classmethod\n def from_subset(\n cls,\n subset: PartitionsSubset,\n partitions_def: PartitionsDefinition,\n dynamic_partitions_store: DynamicPartitionsStore,\n ):\n return cls(\n serialized_subset=subset.serialize(),\n serialized_partitions_def_unique_id=partitions_def.get_serializable_unique_identifier(\n dynamic_partitions_store\n ),\n serialized_partitions_def_class_name=partitions_def.__class__.__name__,\n )\n\n def can_deserialize(self, partitions_def: Optional[PartitionsDefinition]) -> bool:\n if not partitions_def:\n # Asset had a partitions definition at storage time, but no longer does\n return False\n\n return partitions_def.can_deserialize_subset(\n self.serialized_subset,\n serialized_partitions_def_unique_id=self.serialized_partitions_def_unique_id,\n serialized_partitions_def_class_name=self.serialized_partitions_def_class_name,\n )\n\n def deserialize(self, partitions_def: PartitionsDefinition) -> PartitionsSubset:\n return partitions_def.deserialize_subset(self.serialized_subset)\n\n\nclass DefaultPartitionsSubset(PartitionsSubset[T_str]):\n # Every time we change the serialization format, we should increment the version number.\n # This will ensure that we can gracefully degrade when deserializing old data.\n SERIALIZATION_VERSION = 1\n\n def __init__(\n self, partitions_def: PartitionsDefinition[T_str], subset: Optional[Set[T_str]] = None\n ):\n check.opt_set_param(subset, "subset")\n self._partitions_def = partitions_def\n self._subset = subset or set()\n\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[str]:\n return (\n set(\n self._partitions_def.get_partition_keys(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n )\n - self._subset\n )\n\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[str]:\n return self._subset\n\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]:\n partition_keys = self._partitions_def.get_partition_keys(\n current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n cur_range_start = None\n cur_range_end = None\n result = []\n for partition_key in partition_keys:\n if partition_key in self._subset:\n if cur_range_start is None:\n cur_range_start = partition_key\n cur_range_end = partition_key\n else:\n if cur_range_start is not None and cur_range_end is not None:\n result.append(PartitionKeyRange(cur_range_start, cur_range_end))\n cur_range_start = cur_range_end = None\n\n if cur_range_start is not None and cur_range_end is not None:\n result.append(PartitionKeyRange(cur_range_start, cur_range_end))\n\n return result\n\n def with_partition_keys(\n self, partition_keys: Iterable[T_str]\n ) -> "DefaultPartitionsSubset[T_str]":\n return DefaultPartitionsSubset(\n self._partitions_def,\n self._subset | set(partition_keys),\n )\n\n def serialize(self) -> str:\n # Serialize version number, so attempting to deserialize old versions can be handled gracefully.\n # Any time the serialization format changes, we should increment the version number.\n return json.dumps({"version": self.SERIALIZATION_VERSION, "subset": list(self._subset)})\n\n @classmethod\n def from_serialized(\n cls, partitions_def: PartitionsDefinition[T_str], serialized: str\n ) -> "PartitionsSubset[T_str]":\n # Check the version number, so only valid versions can be deserialized.\n data = json.loads(serialized)\n\n if isinstance(data, list):\n # backwards compatibility\n return cls(subset=set(data), partitions_def=partitions_def)\n else:\n if data.get("version") != cls.SERIALIZATION_VERSION:\n raise DagsterInvalidDeserializationVersionError(\n f"Attempted to deserialize partition subset with version {data.get('version')},"\n f" but only version {cls.SERIALIZATION_VERSION} is supported."\n )\n return cls(subset=set(data.get("subset")), partitions_def=partitions_def)\n\n @classmethod\n def can_deserialize(\n cls,\n partitions_def: PartitionsDefinition[T_str],\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n if serialized_partitions_def_class_name is not None:\n return serialized_partitions_def_class_name == partitions_def.__class__.__name__\n\n data = json.loads(serialized)\n return isinstance(data, list) or (\n data.get("subset") is not None and data.get("version") == cls.SERIALIZATION_VERSION\n )\n\n @property\n def partitions_def(self) -> PartitionsDefinition[T_str]:\n return self._partitions_def\n\n def __eq__(self, other: object) -> bool:\n return (\n isinstance(other, DefaultPartitionsSubset)\n and self._partitions_def == other._partitions_def\n and self._subset == other._subset\n )\n\n def __len__(self) -> int:\n return len(self._subset)\n\n def __contains__(self, value) -> bool:\n return value in self._subset\n\n def __repr__(self) -> str:\n return (\n f"DefaultPartitionsSubset(subset={self._subset}, partitions_def={self._partitions_def})"\n )\n\n @classmethod\n def empty_subset(cls, partitions_def: PartitionsDefinition[T_str]) -> "PartitionsSubset[T_str]":\n return cls(partitions_def=partitions_def)\n
", "current_page_name": "_modules/dagster/_core/definitions/partition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partition"}, "partition_key_range": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partition_key_range

\nfrom typing import NamedTuple\n\nfrom dagster._annotations import PublicAttr\n\n\n
[docs]class PartitionKeyRange(NamedTuple):\n """Defines a range of partitions.\n\n Attributes:\n start (str): The starting partition key in the range (inclusive).\n end (str): The ending partition key in the range (inclusive).\n\n Examples:\n .. code-block:: python\n\n partitions_def = StaticPartitionsDefinition(["a", "b", "c", "d"])\n partition_key_range = PartitionKeyRange(start="a", end="c") # Represents ["a", "b", "c"]\n """\n\n # Inclusive on both sides\n start: PublicAttr[str]\n end: PublicAttr[str]
\n
", "current_page_name": "_modules/dagster/_core/definitions/partition_key_range", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partition_key_range"}, "partition_mapping": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partition_mapping

\nimport collections.abc\nimport itertools\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import (\n    Collection,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental, public\nfrom dagster._core.definitions.multi_dimensional_partitions import (\n    MultiPartitionKey,\n    MultiPartitionsDefinition,\n)\nfrom dagster._core.definitions.partition import (\n    PartitionsDefinition,\n    PartitionsSubset,\n    StaticPartitionsDefinition,\n)\nfrom dagster._core.definitions.time_window_partitions import TimeWindowPartitionsDefinition\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.warnings import disable_dagster_warnings\n\n\nclass UpstreamPartitionsResult(NamedTuple):\n    """Represents the result of mapping a PartitionsSubset to the corresponding\n    partitions in another PartitionsDefinition.\n\n    partitions_subset (PartitionsSubset): The resulting partitions subset that was\n        mapped to. Only contains partitions for existent partitions, filtering out nonexistent partitions.\n    required_but_nonexistent_partition_keys (Sequence[str]): A list containing invalid partition keys in to_partitions_def\n        that partitions in from_partitions_subset were mapped to.\n    """\n\n    partitions_subset: PartitionsSubset\n    required_but_nonexistent_partition_keys: Sequence[str]\n\n\n
[docs]class PartitionMapping(ABC):\n """Defines a correspondence between the partitions in an asset and the partitions in an asset\n that it depends on.\n\n Overriding PartitionMapping outside of Dagster is not supported. The abstract methods of this\n class may change at any time.\n """\n\n
[docs] @public\n @abstractmethod\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n """Returns the subset of partition keys in the downstream asset that use the data in the given\n partition key subset of the upstream asset.\n\n Args:\n upstream_partitions_subset (Union[PartitionKeyRange, PartitionsSubset]): The\n subset of partition keys in the upstream asset.\n downstream_partitions_def (PartitionsDefinition): The partitions definition for the\n downstream asset.\n """
\n\n
[docs] @public\n @abstractmethod\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n """Returns a UpstreamPartitionsResult object containing the partition keys the downstream\n partitions subset was mapped to in the upstream partitions definition.\n\n Valid upstream partitions will be included in UpstreamPartitionsResult.partitions_subset.\n Invalid upstream partitions will be included in UpstreamPartitionsResult.required_but_nonexistent_partition_keys.\n\n For example, if an upstream asset is time-partitioned and starts in June 2023, and the\n downstream asset is time-partitioned and starts in May 2023, this function would return a\n UpstreamPartitionsResult(PartitionsSubset("2023-06-01"), required_but_nonexistent_partition_keys=["2023-05-01"])\n when downstream_partitions_subset contains 2023-05-01 and 2023-06-01.\n """
\n\n\n
[docs]@whitelist_for_serdes\nclass IdentityPartitionMapping(PartitionMapping, NamedTuple("_IdentityPartitionMapping", [])):\n """Expects that the upstream and downstream assets are partitioned in the same way, and maps\n partitions in the downstream asset to the same partition in the upstream asset.\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n if downstream_partitions_subset is None:\n check.failed("downstream asset is not partitioned")\n\n if downstream_partitions_subset.partitions_def == upstream_partitions_def:\n return UpstreamPartitionsResult(downstream_partitions_subset, [])\n\n upstream_partition_keys = set(\n upstream_partitions_def.get_partition_keys(\n dynamic_partitions_store=dynamic_partitions_store\n )\n )\n downstream_partition_keys = set(downstream_partitions_subset.get_partition_keys())\n\n return UpstreamPartitionsResult(\n upstream_partitions_def.subset_with_partition_keys(\n list(upstream_partition_keys & downstream_partition_keys)\n ),\n list(downstream_partition_keys - upstream_partition_keys),\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n if upstream_partitions_subset is None:\n check.failed("upstream asset is not partitioned")\n\n if upstream_partitions_subset.partitions_def == downstream_partitions_def:\n return upstream_partitions_subset\n\n upstream_partition_keys = set(upstream_partitions_subset.get_partition_keys())\n downstream_partition_keys = set(\n downstream_partitions_def.get_partition_keys(\n dynamic_partitions_store=dynamic_partitions_store\n )\n )\n\n return downstream_partitions_def.empty_subset().with_partition_keys(\n list(downstream_partition_keys & upstream_partition_keys)\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass AllPartitionMapping(PartitionMapping, NamedTuple("_AllPartitionMapping", [])):\n """Maps every partition in the downstream asset to every partition in the upstream asset.\n\n Commonly used in the case when the downstream asset is not partitioned, in which the entire\n downstream asset depends on all partitions of the usptream asset.\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n upstream_subset = upstream_partitions_def.subset_with_all_partitions(\n current_time=current_time, dynamic_partitions_store=dynamic_partitions_store\n )\n return UpstreamPartitionsResult(upstream_subset, [])\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n raise NotImplementedError()
\n\n\n
[docs]@whitelist_for_serdes\nclass LastPartitionMapping(PartitionMapping, NamedTuple("_LastPartitionMapping", [])):\n """Maps all dependencies to the last partition in the upstream asset.\n\n Commonly used in the case when the downstream asset is not partitioned, in which the entire\n downstream asset depends on the last partition of the upstream asset.\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n last = upstream_partitions_def.get_last_partition_key(\n current_time=None, dynamic_partitions_store=dynamic_partitions_store\n )\n\n upstream_subset = upstream_partitions_def.empty_subset()\n if last is not None:\n upstream_subset = upstream_subset.with_partition_keys([last])\n\n return UpstreamPartitionsResult(upstream_subset, [])\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n raise NotImplementedError()
\n\n\n
[docs]@whitelist_for_serdes\nclass SpecificPartitionsPartitionMapping(\n PartitionMapping,\n NamedTuple(\n "_SpecificPartitionsPartitionMapping", [("partition_keys", PublicAttr[Sequence[str]])]\n ),\n):\n """Maps to a specific subset of partitions in the upstream asset.\n\n Example:\n .. code-block:: python\n\n from dagster import SpecificPartitionsPartitionMapping, StaticPartitionsDefinition, asset\n\n @asset(partitions_def=StaticPartitionsDefinition(["a", "b", "c"]))\n def upstream():\n ...\n\n @asset(\n ins={\n "upstream": AssetIn(partition_mapping=SpecificPartitionsPartitionMapping(["a"]))\n }\n )\n def a_downstream(upstream):\n ...\n """\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n return UpstreamPartitionsResult(\n upstream_partitions_def.subset_with_partition_keys(self.partition_keys), []\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n # if any of the partition keys in this partition mapping are contained within the upstream\n # partitions subset, then all partitions of the downstream asset are dependencies\n if any(key in upstream_partitions_subset for key in self.partition_keys):\n return downstream_partitions_def.subset_with_all_partitions(\n dynamic_partitions_store=dynamic_partitions_store\n )\n return downstream_partitions_def.empty_subset()
\n\n\nclass DimensionDependency(NamedTuple):\n partition_mapping: PartitionMapping\n upstream_dimension_name: Optional[str] = None\n downstream_dimension_name: Optional[str] = None\n\n\nclass BaseMultiPartitionMapping(ABC):\n @abstractmethod\n def get_dimension_dependencies(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> Sequence[DimensionDependency]: ...\n\n def get_partitions_def(\n self, partitions_def: PartitionsDefinition, dimension_name: Optional[str]\n ) -> PartitionsDefinition:\n if isinstance(partitions_def, MultiPartitionsDefinition):\n if not isinstance(dimension_name, str):\n check.failed("Expected dimension_name to be a string")\n return partitions_def.get_partitions_def_for_dimension(dimension_name)\n return partitions_def\n\n def _get_dependency_partitions_subset(\n self,\n a_partitions_def: PartitionsDefinition,\n a_partitions_subset: PartitionsSubset,\n b_partitions_def: PartitionsDefinition,\n a_upstream_of_b: bool,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n current_time: Optional[datetime] = None,\n ) -> Union[UpstreamPartitionsResult, PartitionsSubset]:\n """Given two partitions definitions a_partitions_def and b_partitions_def that have a dependency\n relationship (a_upstream_of_b is True if a_partitions_def is upstream of b_partitions_def),\n and a_partition_keys, a list of partition keys in a_partitions_def, returns a list of\n partition keys in the partitions definition b_partitions_def that are\n dependencies of the partition keys in a_partition_keys.\n """\n a_partition_keys_by_dimension = defaultdict(set)\n if isinstance(a_partitions_def, MultiPartitionsDefinition):\n for partition_key in a_partitions_subset.get_partition_keys():\n for dimension_name, key in cast(\n MultiPartitionKey, partition_key\n ).keys_by_dimension.items():\n a_partition_keys_by_dimension[dimension_name].add(key)\n else:\n for partition_key in a_partitions_subset.get_partition_keys():\n a_partition_keys_by_dimension[None].add(partition_key)\n\n # Maps the dimension name and key of a partition in a_partitions_def to the list of\n # partition keys in b_partitions_def that are dependencies of that partition\n dep_b_keys_by_a_dim_and_key: Dict[Optional[str], Dict[Optional[str], List[str]]] = (\n defaultdict(lambda: defaultdict(list))\n )\n required_but_nonexistent_upstream_partitions = set()\n\n b_dimension_partitions_def_by_name: Dict[Optional[str], PartitionsDefinition] = (\n {\n dimension.name: dimension.partitions_def\n for dimension in b_partitions_def.partitions_defs\n }\n if isinstance(b_partitions_def, MultiPartitionsDefinition)\n else {None: b_partitions_def}\n )\n\n if a_upstream_of_b:\n # a_partitions_def is upstream of b_partitions_def, so we need to map the\n # dimension names of a_partitions_def to the corresponding dependent dimensions of\n # b_partitions_def\n a_dim_to_dependency_b_dim = {\n dimension_mapping.upstream_dimension_name: (\n dimension_mapping.downstream_dimension_name,\n dimension_mapping.partition_mapping,\n )\n for dimension_mapping in self.get_dimension_dependencies(\n a_partitions_def, b_partitions_def\n )\n }\n\n for a_dim_name, keys in a_partition_keys_by_dimension.items():\n if a_dim_name in a_dim_to_dependency_b_dim:\n (\n b_dim_name,\n dimension_mapping,\n ) = a_dim_to_dependency_b_dim[a_dim_name]\n a_dimension_partitions_def = self.get_partitions_def(\n a_partitions_def, a_dim_name\n )\n b_dimension_partitions_def = self.get_partitions_def(\n b_partitions_def, b_dim_name\n )\n for key in keys:\n # if downstream dimension mapping exists, for a given key, get the list of\n # downstream partition keys that are dependencies of that key\n dep_b_keys_by_a_dim_and_key[a_dim_name][key] = list(\n dimension_mapping.get_downstream_partitions_for_partitions(\n a_dimension_partitions_def.empty_subset().with_partition_keys(\n [key]\n ),\n b_dimension_partitions_def,\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n ).get_partition_keys()\n )\n\n else:\n # a_partitions_def is downstream of b_partitions_def, so we need to map the\n # dimension names of a_partitions_def to the corresponding dependency dimensions of\n # b_partitions_def\n a_dim_to_dependency_b_dim = {\n dimension_mapping.downstream_dimension_name: (\n dimension_mapping.upstream_dimension_name,\n dimension_mapping.partition_mapping,\n )\n for dimension_mapping in self.get_dimension_dependencies(\n b_partitions_def, a_partitions_def\n )\n }\n\n for a_dim_name, keys in a_partition_keys_by_dimension.items():\n if a_dim_name in a_dim_to_dependency_b_dim:\n (\n b_dim_name,\n partition_mapping,\n ) = a_dim_to_dependency_b_dim[a_dim_name]\n a_dimension_partitions_def = self.get_partitions_def(\n a_partitions_def, a_dim_name\n )\n b_dimension_partitions_def = self.get_partitions_def(\n b_partitions_def, b_dim_name\n )\n for key in keys:\n mapped_partitions_result = (\n partition_mapping.get_upstream_mapped_partitions_result_for_partitions(\n a_dimension_partitions_def.empty_subset().with_partition_keys(\n [key]\n ),\n b_dimension_partitions_def,\n current_time=current_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n )\n dep_b_keys_by_a_dim_and_key[a_dim_name][key] = list(\n mapped_partitions_result.partitions_subset.get_partition_keys()\n )\n required_but_nonexistent_upstream_partitions.update(\n set(mapped_partitions_result.required_but_nonexistent_partition_keys)\n )\n\n b_partition_keys = set()\n\n mapped_a_dim_names = a_dim_to_dependency_b_dim.keys()\n mapped_b_dim_names = [mapping[0] for mapping in a_dim_to_dependency_b_dim.values()]\n unmapped_b_dim_names = list(\n set(b_dimension_partitions_def_by_name.keys()) - set(mapped_b_dim_names)\n )\n\n for key in a_partitions_subset.get_partition_keys():\n for b_key_values in itertools.product(\n *(\n [\n dep_b_keys_by_a_dim_and_key[dim_name][\n (\n cast(MultiPartitionKey, key).keys_by_dimension[dim_name]\n if dim_name\n else key\n )\n ]\n for dim_name in mapped_a_dim_names\n ]\n ),\n *[\n b_dimension_partitions_def_by_name[dim_name].get_partition_keys()\n for dim_name in unmapped_b_dim_names\n ],\n ):\n b_partition_keys.add(\n MultiPartitionKey(\n {\n cast(str, (mapped_b_dim_names + unmapped_b_dim_names)[i]): key\n for i, key in enumerate(b_key_values)\n }\n )\n if len(b_key_values) > 1\n else b_key_values[0]\n )\n\n mapped_subset = b_partitions_def.empty_subset().with_partition_keys(b_partition_keys)\n if a_upstream_of_b:\n return mapped_subset\n else:\n return UpstreamPartitionsResult(\n mapped_subset,\n required_but_nonexistent_partition_keys=list(\n required_but_nonexistent_upstream_partitions\n ),\n )\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n if downstream_partitions_subset is None:\n check.failed("downstream asset is not partitioned")\n\n result = self._get_dependency_partitions_subset(\n cast(MultiPartitionsDefinition, downstream_partitions_subset.partitions_def),\n downstream_partitions_subset,\n cast(MultiPartitionsDefinition, upstream_partitions_def),\n a_upstream_of_b=False,\n dynamic_partitions_store=dynamic_partitions_store,\n current_time=current_time,\n )\n\n if not isinstance(result, UpstreamPartitionsResult):\n check.failed("Expected UpstreamPartitionsResult")\n\n return result\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n if upstream_partitions_subset is None:\n check.failed("upstream asset is not partitioned")\n\n result = self._get_dependency_partitions_subset(\n cast(MultiPartitionsDefinition, upstream_partitions_subset.partitions_def),\n upstream_partitions_subset,\n cast(MultiPartitionsDefinition, downstream_partitions_def),\n a_upstream_of_b=True,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n\n if isinstance(result, UpstreamPartitionsResult):\n check.failed("Expected PartitionsSubset")\n\n return result\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass MultiToSingleDimensionPartitionMapping(\n BaseMultiPartitionMapping,\n PartitionMapping,\n NamedTuple(\n "_MultiToSingleDimensionPartitionMapping", [("partition_dimension_name", Optional[str])]\n ),\n):\n """Defines a correspondence between an single-dimensional partitions definition\n and a MultiPartitionsDefinition. The single-dimensional partitions definition must be\n a dimension of the MultiPartitionsDefinition.\n\n This class handles the case where the upstream asset is multipartitioned and the\n downstream asset is single dimensional, and vice versa.\n\n For a partition key X, this partition mapping assumes that any multi-partition key with\n X in the selected dimension is a dependency.\n\n Args:\n partition_dimension_name (Optional[str]): The name of the partition dimension in the\n MultiPartitionsDefinition that matches the single-dimension partitions definition.\n """\n\n def __new__(cls, partition_dimension_name: Optional[str] = None):\n return super(MultiToSingleDimensionPartitionMapping, cls).__new__(\n cls,\n partition_dimension_name=check.opt_str_param(\n partition_dimension_name, "partition_dimension_name"\n ),\n )\n\n def get_dimension_dependencies(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> Sequence[DimensionDependency]:\n infer_mapping_result = _get_infer_single_to_multi_dimension_deps_result(\n upstream_partitions_def, downstream_partitions_def\n )\n\n if not infer_mapping_result.can_infer:\n check.invariant(isinstance(infer_mapping_result.inference_failure_reason, str))\n check.failed(cast(str, infer_mapping_result.inference_failure_reason))\n\n return [cast(DimensionDependency, infer_mapping_result.dimension_dependency)]
\n\n\n@whitelist_for_serdes\nclass DimensionPartitionMapping(\n NamedTuple(\n "_DimensionPartitionMapping",\n [\n ("dimension_name", str),\n ("partition_mapping", PartitionMapping),\n ],\n )\n):\n """A helper class for MultiPartitionMapping that defines a partition mapping used to calculate\n the dependent partition keys in the selected downstream MultiPartitions definition dimension.\n\n Args:\n dimension_name (str): The name of the dimension in the downstream MultiPartitionsDefinition.\n partition_mapping (PartitionMapping): The partition mapping object used to calculate\n the downstream dimension partitions from the upstream dimension partitions and vice versa.\n """\n\n def __new__(\n cls,\n dimension_name: str,\n partition_mapping: PartitionMapping,\n ):\n return super(DimensionPartitionMapping, cls).__new__(\n cls,\n dimension_name=check.str_param(dimension_name, "dimension_name"),\n partition_mapping=check.inst_param(\n partition_mapping, "partition_mapping", PartitionMapping\n ),\n )\n\n\n
[docs]@experimental\n@whitelist_for_serdes\nclass MultiPartitionMapping(\n BaseMultiPartitionMapping,\n PartitionMapping,\n NamedTuple(\n "_MultiPartitionMapping",\n [("downstream_mappings_by_upstream_dimension", Mapping[str, DimensionPartitionMapping])],\n ),\n):\n """Defines a correspondence between two MultiPartitionsDefinitions.\n\n Accepts a mapping of upstream dimension name to downstream DimensionPartitionMapping, representing\n the explicit correspondence between the upstream and downstream MultiPartitions dimensions\n and the partition mapping used to calculate the downstream partitions.\n\n Examples:\n .. code-block:: python\n\n weekly_abc = MultiPartitionsDefinition(\n {\n "abc": StaticPartitionsDefinition(["a", "b", "c"]),\n "weekly": WeeklyPartitionsDefinition("2023-01-01"),\n }\n )\n daily_123 = MultiPartitionsDefinition(\n {\n "123": StaticPartitionsDefinition(["1", "2", "3"]),\n "daily": DailyPartitionsDefinition("2023-01-01"),\n }\n )\n\n MultiPartitionsMapping(\n {\n "abc": DimensionPartitionMapping(\n dimension_name="123",\n partition_mapping=StaticPartitionMapping({"a": "1", "b": "2", "c": "3"}),\n ),\n "weekly": DimensionPartitionMapping(\n dimension_name="daily",\n partition_mapping=TimeWindowPartitionMapping(),\n )\n }\n )\n\n For upstream or downstream dimensions not explicitly defined in the mapping, Dagster will\n assume an `AllPartitionsMapping`, meaning that all upstream partitions in those dimensions\n will be mapped to all downstream partitions in those dimensions.\n\n Examples:\n .. code-block:: python\n\n weekly_abc = MultiPartitionsDefinition(\n {\n "abc": StaticPartitionsDefinition(["a", "b", "c"]),\n "daily": DailyPartitionsDefinition("2023-01-01"),\n }\n )\n daily_123 = MultiPartitionsDefinition(\n {\n "123": StaticPartitionsDefinition(["1", "2", "3"]),\n "daily": DailyPartitionsDefinition("2023-01-01"),\n }\n )\n\n MultiPartitionsMapping(\n {\n "daily": DimensionPartitionMapping(\n dimension_name="daily",\n partition_mapping=IdentityPartitionMapping(),\n )\n }\n )\n\n # Will map `daily_123` partition key {"123": "1", "daily": "2023-01-01"} to the upstream:\n # {"abc": "a", "daily": "2023-01-01"}\n # {"abc": "b", "daily": "2023-01-01"}\n # {"abc": "c", "daily": "2023-01-01"}\n\n Args:\n downstream_mappings_by_upstream_dimension (Mapping[str, DimensionPartitionMapping]): A\n mapping that defines an explicit correspondence between one dimension of the upstream\n MultiPartitionsDefinition and one dimension of the downstream MultiPartitionsDefinition.\n Maps a string representing upstream dimension name to downstream DimensionPartitionMapping,\n containing the downstream dimension name and partition mapping.\n """\n\n def __new__(\n cls, downstream_mappings_by_upstream_dimension: Mapping[str, DimensionPartitionMapping]\n ):\n return super(MultiPartitionMapping, cls).__new__(\n cls,\n downstream_mappings_by_upstream_dimension=check.mapping_param(\n downstream_mappings_by_upstream_dimension,\n "downstream_mappings_by_upstream_dimension",\n key_type=str,\n value_type=DimensionPartitionMapping,\n ),\n )\n\n def get_dimension_dependencies(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> Sequence[DimensionDependency]:\n self._check_all_dimensions_accounted_for(\n upstream_partitions_def,\n downstream_partitions_def,\n )\n\n return [\n DimensionDependency(\n mapping.partition_mapping,\n upstream_dimension_name=upstream_dimension,\n downstream_dimension_name=mapping.dimension_name,\n )\n for upstream_dimension, mapping in self.downstream_mappings_by_upstream_dimension.items()\n ]\n\n def _check_all_dimensions_accounted_for(\n self,\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n ) -> None:\n if any(\n not isinstance(partitions_def, MultiPartitionsDefinition)\n for partitions_def in (upstream_partitions_def, downstream_partitions_def)\n ):\n check.failed(\n "Both partitions defs provided to a MultiPartitionMapping must be multi-partitioned"\n )\n\n upstream_dimension_names = {\n dim.name\n for dim in cast(MultiPartitionsDefinition, upstream_partitions_def).partitions_defs\n }\n dimension_names = {\n dim.name\n for dim in cast(MultiPartitionsDefinition, downstream_partitions_def).partitions_defs\n }\n\n for (\n upstream_dimension_name,\n dimension_mapping,\n ) in self.downstream_mappings_by_upstream_dimension.items():\n if upstream_dimension_name not in upstream_dimension_names:\n check.failed(\n "Dimension mapping has an upstream dimension name that is not in the upstream "\n "partitions def"\n )\n if dimension_mapping.dimension_name not in dimension_names:\n check.failed(\n "Dimension mapping has a downstream dimension name that is not in the"\n " downstream partitions def"\n )\n\n upstream_dimension_names.remove(upstream_dimension_name)\n dimension_names.remove(dimension_mapping.dimension_name)
\n\n\n
[docs]@whitelist_for_serdes\nclass StaticPartitionMapping(\n PartitionMapping,\n NamedTuple(\n "_StaticPartitionMapping",\n [\n (\n "downstream_partition_keys_by_upstream_partition_key",\n PublicAttr[Mapping[str, Union[str, Collection[str]]]],\n )\n ],\n ),\n):\n """Define an explicit correspondence between two StaticPartitionsDefinitions.\n\n Args:\n downstream_partition_keys_by_upstream_partition_key (Dict[str, str | Collection[str]]):\n The single or multi-valued correspondence from upstream keys to downstream keys.\n """\n\n def __init__(\n self,\n downstream_partition_keys_by_upstream_partition_key: Mapping[\n str, Union[str, Collection[str]]\n ],\n ):\n check.mapping_param(\n downstream_partition_keys_by_upstream_partition_key,\n "downstream_partition_keys_by_upstream_partition_key",\n key_type=str,\n value_type=(str, collections.abc.Collection),\n )\n\n # cache forward and reverse mappings\n self._mapping = defaultdict(set)\n for (\n upstream_key,\n downstream_keys,\n ) in downstream_partition_keys_by_upstream_partition_key.items():\n self._mapping[upstream_key] = (\n {downstream_keys} if isinstance(downstream_keys, str) else set(downstream_keys)\n )\n\n self._inverse_mapping = defaultdict(set)\n for upstream_key, downstream_keys in self._mapping.items():\n for downstream_key in downstream_keys:\n self._inverse_mapping[downstream_key].add(upstream_key)\n\n @cached_method\n def _check_upstream(self, *, upstream_partitions_def: PartitionsDefinition):\n """Validate that the mapping from upstream to downstream is only defined on upstream keys."""\n check.inst(\n upstream_partitions_def,\n StaticPartitionsDefinition,\n "StaticPartitionMapping can only be defined between two StaticPartitionsDefinitions",\n )\n upstream_keys = upstream_partitions_def.get_partition_keys()\n extra_keys = set(self._mapping.keys()).difference(upstream_keys)\n if extra_keys:\n raise ValueError(\n f"mapping source partitions not in the upstream partitions definition: {extra_keys}"\n )\n\n @cached_method\n def _check_downstream(self, *, downstream_partitions_def: PartitionsDefinition):\n """Validate that the mapping from upstream to downstream only maps to downstream keys."""\n check.inst(\n downstream_partitions_def,\n StaticPartitionsDefinition,\n "StaticPartitionMapping can only be defined between two StaticPartitionsDefinitions",\n )\n downstream_keys = downstream_partitions_def.get_partition_keys()\n extra_keys = set(self._inverse_mapping.keys()).difference(downstream_keys)\n if extra_keys:\n raise ValueError(\n "mapping target partitions not in the downstream partitions definition:"\n f" {extra_keys}"\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n self._check_downstream(downstream_partitions_def=downstream_partitions_def)\n\n downstream_subset = downstream_partitions_def.empty_subset()\n downstream_keys = set()\n for key in upstream_partitions_subset.get_partition_keys():\n downstream_keys.update(self._mapping[key])\n return downstream_subset.with_partition_keys(downstream_keys)\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n self._check_upstream(upstream_partitions_def=upstream_partitions_def)\n\n upstream_subset = upstream_partitions_def.empty_subset()\n if downstream_partitions_subset is None:\n return UpstreamPartitionsResult(upstream_subset, [])\n\n upstream_keys = set()\n for key in downstream_partitions_subset.get_partition_keys():\n upstream_keys.update(self._inverse_mapping[key])\n\n return UpstreamPartitionsResult(upstream_subset.with_partition_keys(upstream_keys), [])
\n\n\nclass InferSingleToMultiDimensionDepsResult(\n NamedTuple(\n "_InferSingleToMultiDimensionDepsResult",\n [\n ("can_infer", bool),\n ("inference_failure_reason", Optional[str]),\n ("dimension_dependency", Optional[DimensionDependency]),\n ],\n )\n):\n def __new__(\n cls,\n can_infer: bool,\n inference_failure_reason: Optional[str] = None,\n dimension_dependency: Optional[DimensionDependency] = None,\n ):\n if can_infer and dimension_dependency is None:\n check.failed("dimension_dependency must be provided if can_infer is True")\n if not can_infer and inference_failure_reason is None:\n check.failed("inference_failure_reason must be provided if can_infer is False")\n\n return super(InferSingleToMultiDimensionDepsResult, cls).__new__(\n cls,\n can_infer,\n inference_failure_reason,\n dimension_dependency,\n )\n\n\ndef _get_infer_single_to_multi_dimension_deps_result(\n upstream_partitions_def: PartitionsDefinition,\n downstream_partitions_def: PartitionsDefinition,\n partition_dimension_name: Optional[str] = None,\n) -> InferSingleToMultiDimensionDepsResult:\n from dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\n\n upstream_is_multipartitioned = isinstance(upstream_partitions_def, MultiPartitionsDefinition)\n\n multipartitions_defs = [\n partitions_def\n for partitions_def in [upstream_partitions_def, downstream_partitions_def]\n if isinstance(partitions_def, MultiPartitionsDefinition)\n ]\n if len(multipartitions_defs) != 1:\n return InferSingleToMultiDimensionDepsResult(\n False,\n "Can only use MultiToSingleDimensionPartitionMapping when upstream asset is"\n " multipartitioned and the downstream asset is single dimensional, or vice versa."\n f" Instead received {len(multipartitions_defs)} multi-partitioned assets.",\n )\n\n multipartitions_def = cast(MultiPartitionsDefinition, next(iter(multipartitions_defs)))\n\n single_dimension_partitions_def = next(\n iter(\n {\n upstream_partitions_def,\n downstream_partitions_def,\n }\n - set(multipartitions_defs)\n )\n )\n\n filtered_multipartition_dims = (\n multipartitions_def.partitions_defs\n if partition_dimension_name is None\n else [\n dim\n for dim in multipartitions_def.partitions_defs\n if dim.name == partition_dimension_name\n ]\n )\n\n if partition_dimension_name:\n if len(filtered_multipartition_dims) != 1:\n return InferSingleToMultiDimensionDepsResult(\n False,\n f"Provided partition dimension name {partition_dimension_name} not found in"\n f" multipartitions definition {multipartitions_def}.",\n )\n\n matching_dimension_defs = [\n dimension_def\n for dimension_def in filtered_multipartition_dims\n if dimension_def.partitions_def == single_dimension_partitions_def\n ]\n\n if len(matching_dimension_defs) == 1:\n return InferSingleToMultiDimensionDepsResult(\n True,\n dimension_dependency=DimensionDependency(\n IdentityPartitionMapping(),\n upstream_dimension_name=(\n matching_dimension_defs[0].name if upstream_is_multipartitioned else None\n ),\n downstream_dimension_name=(\n matching_dimension_defs[0].name if not upstream_is_multipartitioned else None\n ),\n ),\n )\n elif len(matching_dimension_defs) > 1:\n return InferSingleToMultiDimensionDepsResult(\n False,\n "partition dimension name must be specified when multiple dimensions of the"\n " MultiPartitionsDefinition match the single dimension partitions def",\n )\n\n time_dimensions = [\n dimension_def\n for dimension_def in filtered_multipartition_dims\n if isinstance(dimension_def.partitions_def, TimeWindowPartitionsDefinition)\n ]\n\n if len(time_dimensions) == 1 and isinstance(\n single_dimension_partitions_def, TimeWindowPartitionsDefinition\n ):\n return InferSingleToMultiDimensionDepsResult(\n True,\n dimension_dependency=DimensionDependency(\n TimeWindowPartitionMapping(),\n upstream_dimension_name=(\n time_dimensions[0].name if upstream_is_multipartitioned else None\n ),\n downstream_dimension_name=(\n time_dimensions[0].name if not upstream_is_multipartitioned else None\n ),\n ),\n )\n\n return InferSingleToMultiDimensionDepsResult(\n False,\n "MultiToSingleDimensionPartitionMapping can only be used when: \\n(a) The single dimensional"\n " partitions definition is a dimension of the MultiPartitionsDefinition.\\n(b) The single"\n " dimensional partitions definition is a TimeWindowPartitionsDefinition and the"\n " MultiPartitionsDefinition has a single time dimension.",\n )\n\n\ndef infer_partition_mapping(\n partition_mapping: Optional[PartitionMapping],\n downstream_partitions_def: Optional[PartitionsDefinition],\n upstream_partitions_def: Optional[PartitionsDefinition],\n) -> PartitionMapping:\n from .time_window_partition_mapping import TimeWindowPartitionMapping\n\n if partition_mapping is not None:\n return partition_mapping\n elif upstream_partitions_def and downstream_partitions_def:\n if _get_infer_single_to_multi_dimension_deps_result(\n upstream_partitions_def, downstream_partitions_def\n ).can_infer:\n with disable_dagster_warnings():\n return MultiToSingleDimensionPartitionMapping()\n elif isinstance(upstream_partitions_def, TimeWindowPartitionsDefinition) and isinstance(\n downstream_partitions_def, TimeWindowPartitionsDefinition\n ):\n return TimeWindowPartitionMapping()\n else:\n return IdentityPartitionMapping()\n else:\n return AllPartitionMapping()\n\n\ndef get_builtin_partition_mapping_types() -> Tuple[Type[PartitionMapping], ...]:\n from dagster._core.definitions.time_window_partition_mapping import TimeWindowPartitionMapping\n\n return (\n AllPartitionMapping,\n IdentityPartitionMapping,\n LastPartitionMapping,\n SpecificPartitionsPartitionMapping,\n StaticPartitionMapping,\n TimeWindowPartitionMapping,\n MultiToSingleDimensionPartitionMapping,\n MultiPartitionMapping,\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/partition_mapping", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partition_mapping"}, "partitioned_schedule": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.partitioned_schedule

\nfrom typing import Callable, Mapping, NamedTuple, Optional, Union, cast\n\nimport dagster._check as check\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom .decorators.schedule_decorator import schedule\nfrom .job_definition import JobDefinition\nfrom .multi_dimensional_partitions import MultiPartitionsDefinition\nfrom .partition import PartitionsDefinition\nfrom .run_request import RunRequest, SkipReason\nfrom .schedule_definition import (\n    DefaultScheduleStatus,\n    RunRequestIterator,\n    ScheduleDefinition,\n    ScheduleEvaluationContext,\n)\nfrom .time_window_partitions import (\n    TimeWindowPartitionsDefinition,\n    get_time_partitions_def,\n    has_one_dimension_time_window_partitioning,\n)\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\n\nclass UnresolvedPartitionedAssetScheduleDefinition(NamedTuple):\n    """Points to an unresolved asset job. The asset selection isn't resolved yet, so we can't resolve\n    the PartitionsDefinition, so we can't resolve the schedule cadence.\n    """\n\n    name: str\n    job: UnresolvedAssetJobDefinition\n    description: Optional[str]\n    default_status: DefaultScheduleStatus\n    minute_of_hour: Optional[int]\n    hour_of_day: Optional[int]\n    day_of_week: Optional[int]\n    day_of_month: Optional[int]\n    tags: Optional[Mapping[str, str]]\n\n    def resolve(self, resolved_job: JobDefinition) -> ScheduleDefinition:\n        partitions_def = resolved_job.partitions_def\n        if partitions_def is None:\n            check.failed(\n                f"Job '{resolved_job.name}' provided to build_schedule_from_partitioned_job must"\n                " contain partitioned assets or a partitions definition."\n            )\n\n        partitions_def = _check_valid_schedule_partitions_def(partitions_def)\n        time_partitions_def = check.not_none(get_time_partitions_def(partitions_def))\n\n        return ScheduleDefinition(\n            job=resolved_job,\n            name=self.name,\n            execution_fn=_get_schedule_evaluation_fn(partitions_def, resolved_job, self.tags),\n            execution_timezone=time_partitions_def.timezone,\n            cron_schedule=time_partitions_def.get_cron_schedule(\n                self.minute_of_hour, self.hour_of_day, self.day_of_week, self.day_of_month\n            ),\n        )\n\n\n
[docs]def build_schedule_from_partitioned_job(\n job: Union[JobDefinition, UnresolvedAssetJobDefinition],\n description: Optional[str] = None,\n name: Optional[str] = None,\n minute_of_hour: Optional[int] = None,\n hour_of_day: Optional[int] = None,\n day_of_week: Optional[int] = None,\n day_of_month: Optional[int] = None,\n default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,\n tags: Optional[Mapping[str, str]] = None,\n) -> Union[UnresolvedPartitionedAssetScheduleDefinition, ScheduleDefinition]:\n """Creates a schedule from a time window-partitioned job or a job that targets\n time window-partitioned assets. The job can also be multipartitioned, as long as one\n of the partitions dimensions is time-partitioned.\n\n The schedule executes at the cadence specified by the time partitioning of the job or assets.\n\n Examples:\n .. code-block:: python\n\n ######################################\n # Job that targets partitioned assets\n ######################################\n\n from dagster import (\n DailyPartitionsDefinition,\n asset,\n build_schedule_from_partitioned_job,\n define_asset_job,\n )\n\n @asset(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))\n def asset1():\n ...\n\n asset1_job = define_asset_job("asset1_job", selection=[asset1])\n\n # The created schedule will fire daily\n asset1_job_schedule = build_schedule_from_partitioned_job(asset1_job)\n\n defs = Definitions(assets=[asset1], schedules=[asset1_job_schedule])\n\n ################\n # Non-asset job\n ################\n\n from dagster import DailyPartitionsDefinition, build_schedule_from_partitioned_job, jog\n\n\n @job(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))\n def do_stuff_partitioned():\n ...\n\n # The created schedule will fire daily\n do_stuff_partitioned_schedule = build_schedule_from_partitioned_job(\n do_stuff_partitioned,\n )\n\n defs = Definitions(schedules=[do_stuff_partitioned_schedule])\n """\n check.invariant(\n not (day_of_week and day_of_month),\n "Cannot provide both day_of_month and day_of_week parameter to"\n " build_schedule_from_partitioned_job.",\n )\n\n if isinstance(job, UnresolvedAssetJobDefinition) and job.partitions_def is None:\n return UnresolvedPartitionedAssetScheduleDefinition(\n job=job,\n default_status=default_status,\n name=check.opt_str_param(name, "name", f"{job.name}_schedule"),\n description=check.opt_str_param(description, "description"),\n minute_of_hour=minute_of_hour,\n hour_of_day=hour_of_day,\n day_of_week=day_of_week,\n day_of_month=day_of_month,\n tags=tags,\n )\n else:\n partitions_def = job.partitions_def\n if partitions_def is None:\n check.failed("The provided job is not partitioned")\n\n partitions_def = _check_valid_schedule_partitions_def(partitions_def)\n time_partitions_def = check.not_none(get_time_partitions_def(partitions_def))\n\n return schedule(\n cron_schedule=time_partitions_def.get_cron_schedule(\n minute_of_hour, hour_of_day, day_of_week, day_of_month\n ),\n job=job,\n default_status=default_status,\n execution_timezone=time_partitions_def.timezone,\n name=check.opt_str_param(name, "name", f"{job.name}_schedule"),\n description=check.opt_str_param(description, "description"),\n )(_get_schedule_evaluation_fn(partitions_def, job, tags))
\n\n\ndef _get_schedule_evaluation_fn(\n partitions_def: PartitionsDefinition,\n job: Union[JobDefinition, UnresolvedAssetJobDefinition],\n tags: Optional[Mapping[str, str]] = None,\n) -> Callable[[ScheduleEvaluationContext], Union[SkipReason, RunRequest, RunRequestIterator]]:\n def schedule_fn(context):\n # Run for the latest partition. Prior partitions will have been handled by prior ticks.\n if isinstance(partitions_def, TimeWindowPartitionsDefinition):\n partition_key = partitions_def.get_last_partition_key(context.scheduled_execution_time)\n if partition_key is None:\n return SkipReason("The job's PartitionsDefinition has no partitions")\n\n return job.run_request_for_partition(\n partition_key=partition_key,\n run_key=partition_key,\n tags=tags,\n current_time=context.scheduled_execution_time,\n )\n else:\n check.invariant(isinstance(partitions_def, MultiPartitionsDefinition))\n time_window_dimension = partitions_def.time_window_dimension\n partition_key = time_window_dimension.partitions_def.get_last_partition_key(\n context.scheduled_execution_time\n )\n if partition_key is None:\n return SkipReason("The job's PartitionsDefinition has no partitions")\n\n return [\n job.run_request_for_partition(\n partition_key=key,\n run_key=key,\n tags=tags,\n current_time=context.scheduled_execution_time,\n dynamic_partitions_store=context.instance if context.instance_ref else None,\n )\n for key in partitions_def.get_multipartition_keys_with_dimension_value(\n time_window_dimension.name,\n partition_key,\n dynamic_partitions_store=context.instance if context.instance_ref else None,\n )\n ]\n\n return schedule_fn\n\n\ndef _check_valid_schedule_partitions_def(\n partitions_def: PartitionsDefinition,\n) -> Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition]:\n if not has_one_dimension_time_window_partitioning(partitions_def):\n raise DagsterInvalidDefinitionError(\n "Tried to build a partitioned schedule from an asset job, but received an invalid"\n " partitions definition. The permitted partitions definitions are: \\n1."\n " TimeWindowPartitionsDefinition\\n2. MultiPartitionsDefinition with a single"\n " TimeWindowPartitionsDefinition dimension"\n )\n\n return cast(Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition], partitions_def)\n\n\nschedule_from_partitions = build_schedule_from_partitioned_job\n
", "current_page_name": "_modules/dagster/_core/definitions/partitioned_schedule", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.partitioned_schedule"}, "policy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.policy

\nfrom enum import Enum\nfrom random import random\nfrom typing import NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\n\n
[docs]class Backoff(Enum):\n """A modifier for delay as a function of attempt number.\n\n LINEAR: `attempt_num * delay`\n EXPONENTIAL: `((2 ^ attempt_num) - 1) * delay`\n """\n\n LINEAR = "LINEAR"\n EXPONENTIAL = "EXPONENTIAL"
\n\n\n
[docs]class Jitter(Enum):\n """A randomizing modifier for delay, applied after backoff calculation.\n\n FULL: between 0 and the calculated delay based on backoff: `random() * backoff_delay`\n PLUS_MINUS: +/- the delay: `backoff_delay + ((2 * (random() * delay)) - delay)`\n """\n\n FULL = "FULL"\n PLUS_MINUS = "PLUS_MINUS"
\n\n\n
[docs]class RetryPolicy(\n NamedTuple(\n "_RetryPolicy",\n [\n ("max_retries", PublicAttr[int]),\n ("delay", PublicAttr[Optional[check.Numeric]]),\n # declarative time modulation to allow calc witout running user function\n ("backoff", PublicAttr[Optional[Backoff]]),\n ("jitter", PublicAttr[Optional[Jitter]]),\n ],\n ),\n):\n """A declarative policy for when to request retries when an exception occurs during op execution.\n\n Args:\n max_retries (int):\n The maximum number of retries to attempt. Defaults to 1.\n delay (Optional[Union[int,float]]):\n The time in seconds to wait between the retry being requested and the next attempt\n being started. This unit of time can be modulated as a function of attempt number\n with backoff and randomly with jitter.\n backoff (Optional[Backoff]):\n A modifier for delay as a function of retry attempt number.\n jitter (Optional[Jitter]):\n A randomizing modifier for delay, applied after backoff calculation.\n """\n\n def __new__(\n cls,\n max_retries: int = 1,\n delay: Optional[check.Numeric] = None,\n backoff: Optional[Backoff] = None,\n jitter: Optional[Jitter] = None,\n ):\n if backoff is not None and delay is None:\n raise DagsterInvalidDefinitionError(\n "Can not set jitter on RetryPolicy without also setting delay"\n )\n\n if jitter is not None and delay is None:\n raise DagsterInvalidDefinitionError(\n "Can not set backoff on RetryPolicy without also setting delay"\n )\n\n return super().__new__(\n cls,\n max_retries=check.int_param(max_retries, "max_retries"),\n delay=check.opt_numeric_param(delay, "delay"),\n backoff=check.opt_inst_param(backoff, "backoff", Backoff),\n jitter=check.opt_inst_param(jitter, "jitter", Jitter),\n )\n\n def calculate_delay(self, attempt_num: int) -> check.Numeric:\n return calculate_delay(\n attempt_num=attempt_num,\n backoff=self.backoff,\n jitter=self.jitter,\n base_delay=self.delay or 0,\n )
\n\n\ndef calculate_delay(\n attempt_num: int, backoff: Optional[Backoff], jitter: Optional[Jitter], base_delay: float\n) -> float:\n if backoff is Backoff.EXPONENTIAL:\n calc_delay = ((2**attempt_num) - 1) * base_delay\n elif backoff is Backoff.LINEAR:\n calc_delay = base_delay * attempt_num\n elif backoff is None:\n calc_delay = base_delay\n else:\n check.assert_never(backoff)\n\n if jitter is Jitter.FULL:\n calc_delay = random() * calc_delay\n elif jitter is Jitter.PLUS_MINUS:\n calc_delay = calc_delay + ((2 * (random() * base_delay)) - base_delay)\n elif jitter is None:\n pass\n else:\n check.assert_never(jitter)\n\n return calc_delay\n
", "current_page_name": "_modules/dagster/_core/definitions/policy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.policy"}, "reconstruct": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.reconstruct

\nimport inspect\nimport json\nimport os\nimport sys\nfrom functools import lru_cache\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n    overload,\n)\n\nfrom typing_extensions import Self, TypeAlias\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import experimental\nfrom dagster._core.code_pointer import (\n    CodePointer,\n    CustomPointer,\n    FileCodePointer,\n    ModuleCodePointer,\n    get_python_file_from_target,\n)\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.origin import (\n    DEFAULT_DAGSTER_ENTRY_POINT,\n    JobPythonOrigin,\n    RepositoryPythonOrigin,\n)\nfrom dagster._serdes import pack_value, unpack_value, whitelist_for_serdes\nfrom dagster._serdes.serdes import NamedTupleSerializer\nfrom dagster._utils import hash_collection\n\nfrom .events import AssetKey\nfrom .job_base import IJob\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.assets import AssetsDefinition\n    from dagster._core.definitions.job_definition import JobDefinition\n    from dagster._core.definitions.repository_definition import (\n        PendingRepositoryDefinition,\n        RepositoryLoadData,\n    )\n    from dagster._core.definitions.source_asset import SourceAsset\n\n    from .graph_definition import GraphDefinition\n    from .repository_definition import RepositoryDefinition\n\n\ndef get_ephemeral_repository_name(job_name: str) -> str:\n    check.str_param(job_name, "job_name")\n    return f"__repository__{job_name}"\n\n\n@whitelist_for_serdes\nclass ReconstructableRepository(\n    NamedTuple(\n        "_ReconstructableRepository",\n        [\n            ("pointer", CodePointer),\n            ("container_image", Optional[str]),\n            ("executable_path", Optional[str]),\n            ("entry_point", Sequence[str]),\n            ("container_context", Optional[Mapping[str, Any]]),\n            ("repository_load_data", Optional["RepositoryLoadData"]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        pointer: CodePointer,\n        container_image: Optional[str] = None,\n        executable_path: Optional[str] = None,\n        entry_point: Optional[Sequence[str]] = None,\n        container_context: Optional[Mapping[str, Any]] = None,\n        repository_load_data: Optional["RepositoryLoadData"] = None,\n    ):\n        from dagster._core.definitions.repository_definition import RepositoryLoadData\n\n        return super(ReconstructableRepository, cls).__new__(\n            cls,\n            pointer=check.inst_param(pointer, "pointer", CodePointer),\n            container_image=check.opt_str_param(container_image, "container_image"),\n            executable_path=check.opt_str_param(executable_path, "executable_path"),\n            entry_point=(\n                check.sequence_param(entry_point, "entry_point", of_type=str)\n                if entry_point is not None\n                else DEFAULT_DAGSTER_ENTRY_POINT\n            ),\n            container_context=(\n                check.mapping_param(container_context, "container_context")\n                if container_context is not None\n                else None\n            ),\n            repository_load_data=check.opt_inst_param(\n                repository_load_data, "repository_load_data", RepositoryLoadData\n            ),\n        )\n\n    def with_repository_load_data(\n        self, metadata: Optional["RepositoryLoadData"]\n    ) -> "ReconstructableRepository":\n        return self._replace(repository_load_data=metadata)\n\n    def get_definition(self) -> "RepositoryDefinition":\n        return repository_def_from_pointer(self.pointer, self.repository_load_data)\n\n    def get_reconstructable_job(self, name: str) -> "ReconstructableJob":\n        return ReconstructableJob(self, name)\n\n    @classmethod\n    def for_file(\n        cls,\n        file: str,\n        fn_name: str,\n        working_directory: Optional[str] = None,\n        container_image: Optional[str] = None,\n        container_context: Optional[Mapping[str, Any]] = None,\n    ) -> "ReconstructableRepository":\n        if not working_directory:\n            working_directory = os.getcwd()\n        return cls(\n            FileCodePointer(file, fn_name, working_directory),\n            container_image=container_image,\n            container_context=container_context,\n        )\n\n    @classmethod\n    def for_module(\n        cls,\n        module: str,\n        fn_name: str,\n        working_directory: Optional[str] = None,\n        container_image: Optional[str] = None,\n        container_context: Optional[Mapping[str, Any]] = None,\n    ) -> "ReconstructableRepository":\n        return cls(\n            ModuleCodePointer(module, fn_name, working_directory),\n            container_image=container_image,\n            container_context=container_context,\n        )\n\n    def get_python_origin(self) -> RepositoryPythonOrigin:\n        return RepositoryPythonOrigin(\n            executable_path=self.executable_path if self.executable_path else sys.executable,\n            code_pointer=self.pointer,\n            container_image=self.container_image,\n            entry_point=self.entry_point,\n            container_context=self.container_context,\n        )\n\n    def get_python_origin_id(self) -> str:\n        return self.get_python_origin().get_id()\n\n    # Allow this to be hashed for use in `lru_cache`. This is needed because:\n    # - `ReconstructableJob` uses `lru_cache`\n    # - `ReconstructableJob` has a `ReconstructableRepository` attribute\n    # - `ReconstructableRepository` has `Sequence` attributes that are unhashable by default\n    def __hash__(self) -> int:\n        if not hasattr(self, "_hash"):\n            self._hash = hash_collection(self)\n        return self._hash\n\n\nclass ReconstructableJobSerializer(NamedTupleSerializer):\n    def before_unpack(self, _, unpacked_dict: Dict[str, Any]) -> Dict[str, Any]:\n        solid_selection_str = unpacked_dict.get("solid_selection_str")\n        solids_to_execute = unpacked_dict.get("solids_to_execute")\n        if solid_selection_str:\n            unpacked_dict["op_selection"] = json.loads(solid_selection_str)\n        elif solids_to_execute:\n            unpacked_dict["op_selection"] = solids_to_execute\n        return unpacked_dict\n\n    def after_pack(self, **packed_dict: Any) -> Dict[str, Any]:\n        if packed_dict["op_selection"]:\n            packed_dict["solid_selection_str"] = json.dumps(packed_dict["op_selection"]["__set__"])\n        else:\n            packed_dict["solid_selection_str"] = None\n        del packed_dict["op_selection"]\n        return packed_dict\n\n\n@whitelist_for_serdes(\n    serializer=ReconstructableJobSerializer,\n    storage_name="ReconstructablePipeline",\n    storage_field_names={\n        "job_name": "pipeline_name",\n    },\n)\nclass ReconstructableJob(\n    NamedTuple(\n        "_ReconstructableJob",\n        [\n            ("repository", ReconstructableRepository),\n            ("job_name", str),\n            ("op_selection", Optional[AbstractSet[str]]),\n            ("asset_selection", Optional[AbstractSet[AssetKey]]),\n            ("asset_check_selection", Optional[AbstractSet[AssetCheckKey]]),\n        ],\n    ),\n    IJob,\n):\n    """Defines a reconstructable job. When your job must cross process boundaries, Dagster must know\n    how to reconstruct the job on the other side of the process boundary.\n\n    Args:\n        repository (ReconstructableRepository): The reconstructable representation of the repository\n            the job belongs to.\n        job_name (str): The name of the job.\n        op_selection (Optional[AbstractSet[str]]): A set of op query strings. Ops matching any of\n            these queries will be selected. None if no selection is specified.\n        asset_selection (Optional[AbstractSet[AssetKey]]) A set of assets to execute. None if no selection\n            is specified, i.e. the entire job will be run.\n    """\n\n    def __new__(\n        cls,\n        repository: ReconstructableRepository,\n        job_name: str,\n        op_selection: Optional[Iterable[str]] = None,\n        asset_selection: Optional[AbstractSet[AssetKey]] = None,\n        asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n    ):\n        op_selection = set(op_selection) if op_selection else None\n        return super(ReconstructableJob, cls).__new__(\n            cls,\n            repository=check.inst_param(repository, "repository", ReconstructableRepository),\n            job_name=check.str_param(job_name, "job_name"),\n            op_selection=check.opt_nullable_set_param(op_selection, "op_selection", of_type=str),\n            asset_selection=check.opt_nullable_set_param(\n                asset_selection, "asset_selection", AssetKey\n            ),\n            asset_check_selection=check.opt_nullable_set_param(\n                asset_check_selection, "asset_check_selection", AssetCheckKey\n            ),\n        )\n\n    def with_repository_load_data(\n        self, metadata: Optional["RepositoryLoadData"]\n    ) -> "ReconstructableJob":\n        return self._replace(repository=self.repository.with_repository_load_data(metadata))\n\n    # Keep the most recent 1 definition (globally since this is a NamedTuple method)\n    # This allows repeated calls to get_definition in execution paths to not reload the job\n    @lru_cache(maxsize=1)\n    def get_definition(self) -> "JobDefinition":\n        return self.repository.get_definition().get_maybe_subset_job_def(\n            self.job_name,\n            self.op_selection,\n            self.asset_selection,\n            self.asset_check_selection,\n        )\n\n    def get_reconstructable_repository(self) -> ReconstructableRepository:\n        return self.repository\n\n    def get_subset(\n        self,\n        *,\n        op_selection: Optional[Iterable[str]] = None,\n        asset_selection: Optional[AbstractSet[AssetKey]] = None,\n        asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n    ) -> Self:\n        if op_selection and (asset_selection or asset_check_selection):\n            check.failed(\n                "op_selection and asset_selection or asset_check_selection cannot both be provided"\n                " as arguments",\n            )\n        op_selection = set(op_selection) if op_selection else None\n        return ReconstructableJob(\n            repository=self.repository,\n            job_name=self.job_name,\n            op_selection=op_selection,\n            asset_selection=asset_selection,\n            asset_check_selection=asset_check_selection,\n        )\n\n    def describe(self) -> str:\n        return f'"{self.job_name}" in repository ({self.repository.pointer.describe})'\n\n    @staticmethod\n    def for_file(python_file: str, fn_name: str) -> "ReconstructableJob":\n        return bootstrap_standalone_recon_job(FileCodePointer(python_file, fn_name, os.getcwd()))\n\n    @staticmethod\n    def for_module(module: str, fn_name: str) -> "ReconstructableJob":\n        return bootstrap_standalone_recon_job(ModuleCodePointer(module, fn_name, os.getcwd()))\n\n    def to_dict(self) -> Mapping[str, object]:\n        return pack_value(self)\n\n    @staticmethod\n    def from_dict(val: Mapping[str, Any]) -> "ReconstructableJob":\n        check.mapping_param(val, "val")\n\n        inst = unpack_value(val)\n        check.invariant(\n            isinstance(inst, ReconstructableJob),\n            f"Deserialized object is not instance of ReconstructableJob, got {type(inst)}",\n        )\n        return inst  # type: ignore  # (illegible runtime check)\n\n    def get_python_origin(self) -> JobPythonOrigin:\n        return JobPythonOrigin(self.job_name, self.repository.get_python_origin())\n\n    def get_python_origin_id(self) -> str:\n        return self.get_python_origin().get_id()\n\n    def get_module(self) -> Optional[str]:\n        """Return the module the job is found in, the origin is a module code pointer."""\n        pointer = self.get_python_origin().get_repo_pointer()\n        if isinstance(pointer, ModuleCodePointer):\n            return pointer.module\n\n        return None\n\n    # Allow this to be hashed for `lru_cache` in `get_definition`\n    def __hash__(self) -> int:\n        if not hasattr(self, "_hash"):\n            self._hash = hash_collection(self)\n        return self._hash\n\n\n
[docs]def reconstructable(target: Callable[..., "JobDefinition"]) -> ReconstructableJob:\n """Create a :py:class:`~dagster._core.definitions.reconstructable.ReconstructableJob` from a\n function that returns a :py:class:`~dagster.JobDefinition`/:py:class:`~dagster.JobDefinition`,\n or a function decorated with :py:func:`@job <dagster.job>`.\n\n When your job must cross process boundaries, e.g., for execution on multiple nodes or\n in different systems (like ``dagstermill``), Dagster must know how to reconstruct the job\n on the other side of the process boundary.\n\n Passing a job created with ``~dagster.GraphDefinition.to_job`` to ``reconstructable()``,\n requires you to wrap that job's definition in a module-scoped function, and pass that function\n instead:\n\n .. code-block:: python\n\n from dagster import graph, reconstructable\n\n @graph\n def my_graph():\n ...\n\n def define_my_job():\n return my_graph.to_job()\n\n reconstructable(define_my_job)\n\n This function implements a very conservative strategy for reconstruction, so that its behavior\n is easy to predict, but as a consequence it is not able to reconstruct certain kinds of jobs\n or jobs, such as those defined by lambdas, in nested scopes (e.g., dynamically within a method\n call), or in interactive environments such as the Python REPL or Jupyter notebooks.\n\n If you need to reconstruct objects constructed in these ways, you should use\n :py:func:`~dagster.reconstructable.build_reconstructable_job` instead, which allows you to\n specify your own reconstruction strategy.\n\n Examples:\n .. code-block:: python\n\n from dagster import job, reconstructable\n\n @job\n def foo_job():\n ...\n\n reconstructable_foo_job = reconstructable(foo_job)\n\n\n @graph\n def foo():\n ...\n\n def make_bar_job():\n return foo.to_job()\n\n reconstructable_bar_job = reconstructable(make_bar_job)\n """\n from dagster._core.definitions import JobDefinition\n\n if not seven.is_function_or_decorator_instance_of(target, JobDefinition):\n if isinstance(target, JobDefinition):\n raise DagsterInvariantViolationError(\n "Reconstructable target was not a function returning a job definition, or a job "\n "definition produced by a decorated function. If your job was constructed using "\n "``GraphDefinition.to_job``, you must wrap the ``to_job`` call in a function at "\n "module scope, ie not within any other functions. "\n "To learn more, check out the docs on ``reconstructable``: "\n "https://docs.dagster.io/_apidocs/execution#dagster.reconstructable"\n )\n raise DagsterInvariantViolationError(\n "Reconstructable target should be a function or definition produced "\n f"by a decorated function, got {type(target)}.",\n )\n\n if seven.is_lambda(target):\n raise DagsterInvariantViolationError(\n "Reconstructable target can not be a lambda. Use a function or "\n "decorated function defined at module scope instead, or use "\n "build_reconstructable_job."\n )\n\n if seven.qualname_differs(target):\n raise DagsterInvariantViolationError(\n f'Reconstructable target "{target.__name__}" has a different '\n f'__qualname__ "{target.__qualname__}" indicating it is not '\n "defined at module scope. Use a function or decorated function "\n "defined at module scope instead, or use build_reconstructable_job."\n )\n\n try:\n if (\n hasattr(target, "__module__")\n and hasattr(target, "__name__")\n and getattr(inspect.getmodule(target), "__name__", None) != "__main__"\n ):\n return ReconstructableJob.for_module(target.__module__, target.__name__)\n except:\n pass\n\n python_file = get_python_file_from_target(target)\n if not python_file:\n raise DagsterInvariantViolationError(\n "reconstructable() can not reconstruct jobs defined in interactive "\n "environments like <stdin>, IPython, or Jupyter notebooks. "\n "Use a job defined in a module or file instead, or use build_reconstructable_job."\n )\n\n pointer = FileCodePointer(\n python_file=python_file, fn_name=target.__name__, working_directory=os.getcwd()\n )\n\n return bootstrap_standalone_recon_job(pointer)
\n\n\n
[docs]@experimental\ndef build_reconstructable_job(\n reconstructor_module_name: str,\n reconstructor_function_name: str,\n reconstructable_args: Optional[Tuple[object]] = None,\n reconstructable_kwargs: Optional[Mapping[str, object]] = None,\n reconstructor_working_directory: Optional[str] = None,\n) -> ReconstructableJob:\n """Create a :py:class:`dagster._core.definitions.reconstructable.ReconstructableJob`.\n\n When your job must cross process boundaries, e.g., for execution on multiple nodes or in\n different systems (like ``dagstermill``), Dagster must know how to reconstruct the job\n on the other side of the process boundary.\n\n This function allows you to use the strategy of your choice for reconstructing jobs, so\n that you can reconstruct certain kinds of jobs that are not supported by\n :py:func:`~dagster.reconstructable`, such as those defined by lambdas, in nested scopes (e.g.,\n dynamically within a method call), or in interactive environments such as the Python REPL or\n Jupyter notebooks.\n\n If you need to reconstruct jobs constructed in these ways, use this function instead of\n :py:func:`~dagster.reconstructable`.\n\n Args:\n reconstructor_module_name (str): The name of the module containing the function to use to\n reconstruct the job.\n reconstructor_function_name (str): The name of the function to use to reconstruct the\n job.\n reconstructable_args (Tuple): Args to the function to use to reconstruct the job.\n Values of the tuple must be JSON serializable.\n reconstructable_kwargs (Dict[str, Any]): Kwargs to the function to use to reconstruct the\n job. Values of the dict must be JSON serializable.\n\n Examples:\n .. code-block:: python\n\n # module: mymodule\n\n from dagster import JobDefinition, job, build_reconstructable_job\n\n class JobFactory:\n def make_job(*args, **kwargs):\n\n @job\n def _job(...):\n ...\n\n return _job\n\n def reconstruct_job(*args):\n factory = JobFactory()\n return factory.make_job(*args)\n\n factory = JobFactory()\n\n foo_job_args = (...,...)\n\n foo_job_kwargs = {...:...}\n\n foo_job = factory.make_job(*foo_job_args, **foo_job_kwargs)\n\n reconstructable_foo_job = build_reconstructable_job(\n 'mymodule',\n 'reconstruct_job',\n foo_job_args,\n foo_job_kwargs,\n )\n """\n check.str_param(reconstructor_module_name, "reconstructor_module_name")\n check.str_param(reconstructor_function_name, "reconstructor_function_name")\n check.opt_str_param(\n reconstructor_working_directory, "reconstructor_working_directory", os.getcwd()\n )\n\n _reconstructable_args: List[object] = list(\n check.opt_tuple_param(reconstructable_args, "reconstructable_args")\n )\n _reconstructable_kwargs: List[List[Union[str, object]]] = list(\n (\n [key, value]\n for key, value in check.opt_mapping_param(\n reconstructable_kwargs, "reconstructable_kwargs", key_type=str\n ).items()\n )\n )\n\n reconstructor_pointer = ModuleCodePointer(\n reconstructor_module_name,\n reconstructor_function_name,\n working_directory=reconstructor_working_directory,\n )\n\n pointer = CustomPointer(reconstructor_pointer, _reconstructable_args, _reconstructable_kwargs)\n\n job_def = job_def_from_pointer(pointer)\n\n return ReconstructableJob(\n repository=ReconstructableRepository(pointer), # creates ephemeral repo\n job_name=job_def.name,\n )
\n\n\ndef bootstrap_standalone_recon_job(pointer: CodePointer) -> ReconstructableJob:\n # So this actually straps the the job for the sole\n # purpose of getting the job name. If we changed ReconstructableJob\n # to get the job on demand in order to get name, we could avoid this.\n job_def = job_def_from_pointer(pointer)\n return ReconstructableJob(\n repository=ReconstructableRepository(pointer), # creates ephemeral repo\n job_name=job_def.name,\n )\n\n\nLoadableDefinition: TypeAlias = Union[\n "JobDefinition",\n "RepositoryDefinition",\n "PendingRepositoryDefinition",\n "GraphDefinition",\n "Sequence[Union[AssetsDefinition, SourceAsset]]",\n]\n\nT_LoadableDefinition = TypeVar("T_LoadableDefinition", bound=LoadableDefinition)\n\n\ndef _is_list_of_assets(\n definition: LoadableDefinition,\n) -> bool:\n from dagster._core.definitions.assets import AssetsDefinition\n from dagster._core.definitions.source_asset import SourceAsset\n\n return isinstance(definition, list) and all(\n isinstance(item, (AssetsDefinition, SourceAsset)) for item in definition\n )\n\n\ndef _check_is_loadable(definition: T_LoadableDefinition) -> T_LoadableDefinition:\n from .definitions_class import Definitions\n from .graph_definition import GraphDefinition\n from .job_definition import JobDefinition\n from .repository_definition import PendingRepositoryDefinition, RepositoryDefinition\n\n if not (\n isinstance(\n definition,\n (\n JobDefinition,\n RepositoryDefinition,\n PendingRepositoryDefinition,\n GraphDefinition,\n Definitions,\n ),\n )\n or _is_list_of_assets(definition)\n ):\n raise DagsterInvariantViolationError(\n "Loadable attributes must be either a JobDefinition, GraphDefinition, "\n f"or RepositoryDefinition. Got {definition!r}."\n )\n return definition\n\n\ndef load_def_in_module(\n module_name: str, attribute: str, working_directory: Optional[str]\n) -> LoadableDefinition:\n return def_from_pointer(CodePointer.from_module(module_name, attribute, working_directory))\n\n\ndef load_def_in_package(\n package_name: str, attribute: str, working_directory: Optional[str]\n) -> LoadableDefinition:\n return def_from_pointer(\n CodePointer.from_python_package(package_name, attribute, working_directory)\n )\n\n\ndef load_def_in_python_file(\n python_file: str, attribute: str, working_directory: Optional[str]\n) -> LoadableDefinition:\n return def_from_pointer(CodePointer.from_python_file(python_file, attribute, working_directory))\n\n\ndef def_from_pointer(\n pointer: CodePointer,\n) -> LoadableDefinition:\n target = pointer.load_target()\n\n from .graph_definition import GraphDefinition\n from .job_definition import JobDefinition\n from .repository_definition import PendingRepositoryDefinition, RepositoryDefinition\n\n if isinstance(\n target,\n (\n GraphDefinition,\n JobDefinition,\n PendingRepositoryDefinition,\n RepositoryDefinition,\n ),\n ) or not callable(target):\n return _check_is_loadable(target) # type: ignore\n\n # if its a function invoke it - otherwise we are pointing to a\n # artifact in module scope, likely decorator output\n\n if seven.get_arg_names(target):\n raise DagsterInvariantViolationError(\n f"Error invoking function at {pointer.describe()} with no arguments. "\n "Reconstructable target must be callable with no arguments"\n )\n\n return _check_is_loadable(target())\n\n\ndef job_def_from_pointer(pointer: CodePointer) -> "JobDefinition":\n from .job_definition import JobDefinition\n\n target = def_from_pointer(pointer)\n\n if isinstance(target, JobDefinition):\n return target\n\n raise DagsterInvariantViolationError(\n "CodePointer ({str}) must resolve to a JobDefinition (or JobDefinition for legacy"\n " code). Received a {type}".format(str=pointer.describe(), type=type(target))\n )\n\n\n@overload\ndef repository_def_from_target_def(\n target: Union["RepositoryDefinition", "JobDefinition", "GraphDefinition"],\n repository_load_data: Optional["RepositoryLoadData"] = None,\n) -> "RepositoryDefinition": ...\n\n\n@overload\ndef repository_def_from_target_def(\n target: object, repository_load_data: Optional["RepositoryLoadData"] = None\n) -> None: ...\n\n\ndef repository_def_from_target_def(\n target: object, repository_load_data: Optional["RepositoryLoadData"] = None\n) -> Optional["RepositoryDefinition"]:\n from .assets import AssetsDefinition\n from .definitions_class import Definitions\n from .graph_definition import GraphDefinition\n from .job_definition import JobDefinition\n from .repository_definition import (\n SINGLETON_REPOSITORY_NAME,\n CachingRepositoryData,\n PendingRepositoryDefinition,\n RepositoryDefinition,\n )\n from .source_asset import SourceAsset\n\n if isinstance(target, Definitions):\n # reassign to handle both repository and pending repo case\n target = target.get_inner_repository_for_loading_process()\n\n # special case - we can wrap a single job in a repository\n if isinstance(target, (JobDefinition, GraphDefinition)):\n # consider including job name in generated repo name\n return RepositoryDefinition(\n name=get_ephemeral_repository_name(target.name),\n repository_data=CachingRepositoryData.from_list([target]),\n )\n elif isinstance(target, list) and all(\n isinstance(item, (AssetsDefinition, SourceAsset)) for item in target\n ):\n return RepositoryDefinition(\n name=SINGLETON_REPOSITORY_NAME,\n repository_data=CachingRepositoryData.from_list(target),\n )\n elif isinstance(target, RepositoryDefinition):\n return target\n elif isinstance(target, PendingRepositoryDefinition):\n # must load repository from scratch\n if repository_load_data is None:\n return target.compute_repository_definition()\n # can use the cached data to more efficiently load data\n return target.reconstruct_repository_definition(repository_load_data)\n else:\n return None\n\n\ndef repository_def_from_pointer(\n pointer: CodePointer, repository_load_data: Optional["RepositoryLoadData"] = None\n) -> "RepositoryDefinition":\n target = def_from_pointer(pointer)\n repo_def = repository_def_from_target_def(target, repository_load_data)\n if not repo_def:\n raise DagsterInvariantViolationError(\n f"CodePointer ({pointer.describe()}) must resolve to a "\n "RepositoryDefinition, JobDefinition, or JobDefinition. "\n f"Received a {type(target)}"\n )\n return repo_def\n
", "current_page_name": "_modules/dagster/_core/definitions/reconstruct", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.reconstruct"}, "repository_definition": {"repository_data": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.repository_definition.repository_data

\nfrom abc import ABC, abstractmethod\nfrom types import FunctionType\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Mapping,\n    Optional,\n    Sequence,\n    TypeVar,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.executor_definition import ExecutorDefinition\nfrom dagster._core.definitions.graph_definition import SubselectedGraphDefinition\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.schedule_definition import ScheduleDefinition\nfrom dagster._core.definitions.sensor_definition import SensorDefinition\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\n\nfrom .caching_index import CacheingDefinitionIndex\nfrom .valid_definitions import RepositoryListDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import AssetsDefinition\n\n\nT = TypeVar("T")\nResolvable = Callable[[], T]\n\n\n
[docs]class RepositoryData(ABC):\n """Users should usually rely on the :py:func:`@repository <repository>` decorator to create new\n repositories, which will in turn call the static constructors on this class. However, users may\n subclass :py:class:`RepositoryData` for fine-grained control over access to and lazy creation\n of repository members.\n """\n\n @abstractmethod\n def get_resource_key_mapping(self) -> Mapping[int, str]:\n pass\n\n @abstractmethod\n def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]:\n """Return all top-level resources in the repository as a list,\n such as those provided to the Definitions constructor.\n\n Returns:\n List[ResourceDefinition]: All top-level resources in the repository.\n """\n\n @abstractmethod\n def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]:\n pass\n\n
[docs] @abstractmethod\n @public\n def get_all_jobs(self) -> Sequence[JobDefinition]:\n """Return all jobs in the repository as a list.\n\n Returns:\n List[JobDefinition]: All jobs in the repository.\n """
\n\n
[docs] @public\n def get_job_names(self) -> Sequence[str]:\n """Get the names of all jobs in the repository.\n\n Returns:\n List[str]\n """\n return [job_def.name for job_def in self.get_all_jobs()]
\n\n
[docs] @public\n def has_job(self, job_name: str) -> bool:\n """Check if a job with a given name is present in the repository.\n\n Args:\n job_name (str): The name of the job.\n\n Returns:\n bool\n """\n return job_name in self.get_job_names()
\n\n
[docs] @public\n def get_job(self, job_name: str) -> JobDefinition:\n """Get a job by name.\n\n Args:\n job_name (str): Name of the job to retrieve.\n\n Returns:\n JobDefinition: The job definition corresponding to the given name.\n """\n match = next(job for job in self.get_all_jobs() if job.name == job_name)\n if match is None:\n raise DagsterInvariantViolationError(f"Could not find job {job_name} in repository")\n return match
\n\n
[docs] @public\n def get_schedule_names(self) -> Sequence[str]:\n """Get the names of all schedules in the repository.\n\n Returns:\n List[str]\n """\n return [schedule.name for schedule in self.get_all_schedules()]
\n\n
[docs] @public\n def get_all_schedules(self) -> Sequence[ScheduleDefinition]:\n """Return all schedules in the repository as a list.\n\n Returns:\n List[ScheduleDefinition]: All jobs in the repository.\n """\n return []
\n\n
[docs] @public\n def get_schedule(self, schedule_name: str) -> ScheduleDefinition:\n """Get a schedule by name.\n\n Args:\n schedule_name (str): name of the schedule to retrieve.\n\n Returns:\n ScheduleDefinition: The schedule definition corresponding to the given name.\n """\n schedules_with_name = [\n schedule for schedule in self.get_all_schedules() if schedule.name == schedule_name\n ]\n if not schedules_with_name:\n raise DagsterInvariantViolationError(\n f"Could not find schedule {schedule_name} in repository"\n )\n return schedules_with_name[0]
\n\n
[docs] @public\n def has_schedule(self, schedule_name: str) -> bool:\n """Check if a schedule with a given name is present in the repository."""\n return schedule_name in self.get_schedule_names()
\n\n
[docs] @public\n def get_all_sensors(self) -> Sequence[SensorDefinition]:\n """Sequence[SensorDefinition]: Return all sensors in the repository as a list."""\n return []
\n\n
[docs] @public\n def get_sensor_names(self) -> Sequence[str]:\n """Sequence[str]: Get the names of all sensors in the repository."""\n return [sensor.name for sensor in self.get_all_sensors()]
\n\n
[docs] @public\n def get_sensor(self, sensor_name: str) -> SensorDefinition:\n """Get a sensor by name.\n\n Args:\n sensor_name (str): name of the sensor to retrieve.\n\n Returns:\n SensorDefinition: The sensor definition corresponding to the given name.\n """\n sensors_with_name = [\n sensor for sensor in self.get_all_sensors() if sensor.name == sensor_name\n ]\n if not sensors_with_name:\n raise DagsterInvariantViolationError(\n f"Could not find sensor {sensor_name} in repository"\n )\n return sensors_with_name[0]
\n\n
[docs] @public\n def has_sensor(self, sensor_name: str) -> bool:\n """Check if a sensor with a given name is present in the repository."""\n return sensor_name in self.get_sensor_names()
\n\n
[docs] @public\n def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]:\n """Mapping[AssetKey, SourceAsset]: Get the source assets for the repository."""\n return {}
\n\n
[docs] @public\n def get_assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]:\n """Mapping[AssetKey, AssetsDefinition]: Get the asset definitions for the repository."""\n return {}
\n\n def load_all_definitions(self):\n # force load of all lazy constructed code artifacts\n self.get_all_jobs()\n self.get_all_schedules()\n self.get_all_sensors()\n self.get_source_assets_by_key()
\n\n\nclass CachingRepositoryData(RepositoryData):\n """Default implementation of RepositoryData used by the :py:func:`@repository <repository>` decorator."""\n\n _all_jobs: Optional[Sequence[JobDefinition]]\n _all_pipelines: Optional[Sequence[JobDefinition]]\n\n def __init__(\n self,\n jobs: Mapping[str, Union[JobDefinition, Resolvable[JobDefinition]]],\n schedules: Mapping[str, Union[ScheduleDefinition, Resolvable[ScheduleDefinition]]],\n sensors: Mapping[str, Union[SensorDefinition, Resolvable[SensorDefinition]]],\n source_assets_by_key: Mapping[AssetKey, SourceAsset],\n assets_defs_by_key: Mapping[AssetKey, "AssetsDefinition"],\n top_level_resources: Mapping[str, ResourceDefinition],\n utilized_env_vars: Mapping[str, AbstractSet[str]],\n resource_key_mapping: Mapping[int, str],\n ):\n """Constructs a new CachingRepositoryData object.\n\n You may pass pipeline, job, and schedule definitions directly, or you may pass callables\n with no arguments that will be invoked to lazily construct definitions when accessed by\n name. This can be helpful for performance when there are many definitions in a repository,\n or when constructing the definitions is costly.\n\n Note that when lazily constructing a definition, the name of the definition must match its\n key in its dictionary index, or a :py:class:`DagsterInvariantViolationError` will be thrown\n at retrieval time.\n\n Args:\n jobs (Mapping[str, Union[JobDefinition, Callable[[], JobDefinition]]]):\n The job definitions belonging to the repository.\n schedules (Mapping[str, Union[ScheduleDefinition, Callable[[], ScheduleDefinition]]]):\n The schedules belonging to the repository.\n sensors (Mapping[str, Union[SensorDefinition, Callable[[], SensorDefinition]]]):\n The sensors belonging to a repository.\n source_assets_by_key (Mapping[AssetKey, SourceAsset]): The source assets belonging to a repository.\n assets_defs_by_key (Mapping[AssetKey, AssetsDefinition]): The assets definitions\n belonging to a repository.\n top_level_resources (Mapping[str, ResourceDefinition]): A dict of top-level\n resource keys to defintions, for resources which should be displayed in the UI.\n """\n from dagster._core.definitions import AssetsDefinition\n\n check.mapping_param(jobs, "jobs", key_type=str, value_type=(JobDefinition, FunctionType))\n check.mapping_param(\n schedules, "schedules", key_type=str, value_type=(ScheduleDefinition, FunctionType)\n )\n check.mapping_param(\n sensors, "sensors", key_type=str, value_type=(SensorDefinition, FunctionType)\n )\n check.mapping_param(\n source_assets_by_key, "source_assets_by_key", key_type=AssetKey, value_type=SourceAsset\n )\n check.mapping_param(\n assets_defs_by_key, "assets_defs_by_key", key_type=AssetKey, value_type=AssetsDefinition\n )\n check.mapping_param(\n top_level_resources, "top_level_resources", key_type=str, value_type=ResourceDefinition\n )\n check.mapping_param(\n utilized_env_vars,\n "utilized_resources",\n key_type=str,\n )\n check.mapping_param(\n resource_key_mapping, "resource_key_mapping", key_type=int, value_type=str\n )\n\n self._jobs = CacheingDefinitionIndex(\n JobDefinition,\n "JobDefinition",\n "job",\n jobs,\n self._validate_job,\n )\n\n self._schedules = CacheingDefinitionIndex(\n ScheduleDefinition,\n "ScheduleDefinition",\n "schedule",\n schedules,\n self._validate_schedule,\n )\n # load all schedules to force validation\n self._schedules.get_all_definitions()\n\n self._source_assets_by_key = source_assets_by_key\n self._assets_defs_by_key = assets_defs_by_key\n self._top_level_resources = top_level_resources\n self._utilized_env_vars = utilized_env_vars\n self._resource_key_mapping = resource_key_mapping\n\n self._sensors = CacheingDefinitionIndex(\n SensorDefinition,\n "SensorDefinition",\n "sensor",\n sensors,\n self._validate_sensor,\n )\n # load all sensors to force validation\n self._sensors.get_all_definitions()\n\n self._all_jobs = None\n\n @staticmethod\n def from_dict(repository_definitions: Dict[str, Dict[str, Any]]) -> "CachingRepositoryData":\n """Static constructor.\n\n Args:\n repository_definition (Dict[str, Dict[str, ...]]): A dict of the form:\n\n {\n 'jobs': Dict[str, Callable[[], JobDefinition]],\n 'schedules': Dict[str, Callable[[], ScheduleDefinition]]\n }\n\n This form is intended to allow definitions to be created lazily when accessed by name,\n which can be helpful for performance when there are many definitions in a repository, or\n when constructing the definitions is costly.\n """\n from .repository_data_builder import build_caching_repository_data_from_dict\n\n return build_caching_repository_data_from_dict(repository_definitions)\n\n @classmethod\n def from_list(\n cls,\n repository_definitions: Sequence[RepositoryListDefinition],\n default_executor_def: Optional[ExecutorDefinition] = None,\n default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n resource_key_mapping: Optional[Mapping[int, str]] = None,\n ) -> "CachingRepositoryData":\n """Static constructor.\n\n Args:\n repository_definitions (List[Union[JobDefinition, ScheduleDefinition, SensorDefinition, GraphDefinition]]):\n Use this constructor when you have no need to lazy load jobs or other definitions.\n top_level_resources (Optional[Mapping[str, ResourceDefinition]]): A dict of top-level\n resource keys to defintions, for resources which should be displayed in the UI.\n """\n from .repository_data_builder import build_caching_repository_data_from_list\n\n return build_caching_repository_data_from_list(\n repository_definitions=repository_definitions,\n default_executor_def=default_executor_def,\n default_logger_defs=default_logger_defs,\n top_level_resources=top_level_resources,\n resource_key_mapping=resource_key_mapping,\n )\n\n def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]:\n return self._utilized_env_vars\n\n def get_resource_key_mapping(self) -> Mapping[int, str]:\n return self._resource_key_mapping\n\n def get_job_names(self) -> Sequence[str]:\n """Get the names of all jobs in the repository.\n\n Returns:\n List[str]\n """\n return self._jobs.get_definition_names()\n\n def has_job(self, job_name: str) -> bool:\n """Check if a job with a given name is present in the repository.\n\n Args:\n job_name (str): The name of the job.\n\n Returns:\n bool\n """\n check.str_param(job_name, "job_name")\n return self._jobs.has_definition(job_name)\n\n def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]:\n return self._top_level_resources\n\n def get_all_jobs(self) -> Sequence[JobDefinition]:\n """Return all jobs in the repository as a list.\n\n Note that this will construct any job that has not yet been constructed.\n\n Returns:\n List[JobDefinition]: All jobs in the repository.\n """\n if self._all_jobs is not None:\n return self._all_jobs\n\n self._all_jobs = self._jobs.get_all_definitions()\n self._check_node_defs(self._all_jobs)\n return self._all_jobs\n\n def get_job(self, job_name: str) -> JobDefinition:\n """Get a job by name.\n\n If this job has not yet been constructed, only this job is constructed, and will\n be cached for future calls.\n\n Args:\n job_name (str): Name of the job to retrieve.\n\n Returns:\n JobDefinition: The job definition corresponding to the given name.\n """\n check.str_param(job_name, "job_name")\n return self._jobs.get_definition(job_name)\n\n def get_schedule_names(self) -> Sequence[str]:\n """Get the names of all schedules in the repository.\n\n Returns:\n List[str]\n """\n return self._schedules.get_definition_names()\n\n def get_all_schedules(self) -> Sequence[ScheduleDefinition]:\n """Return all schedules in the repository as a list.\n\n Note that this will construct any schedule that has not yet been constructed.\n\n Returns:\n List[ScheduleDefinition]: All schedules in the repository.\n """\n return self._schedules.get_all_definitions()\n\n def get_schedule(self, schedule_name: str) -> ScheduleDefinition:\n """Get a schedule by name.\n\n if this schedule has not yet been constructed, only this schedule is constructed, and will\n be cached for future calls.\n\n Args:\n schedule_name (str): name of the schedule to retrieve.\n\n Returns:\n ScheduleDefinition: The schedule definition corresponding to the given name.\n """\n check.str_param(schedule_name, "schedule_name")\n\n return self._schedules.get_definition(schedule_name)\n\n def has_schedule(self, schedule_name: str) -> bool:\n check.str_param(schedule_name, "schedule_name")\n\n return self._schedules.has_definition(schedule_name)\n\n def get_all_sensors(self) -> Sequence[SensorDefinition]:\n return self._sensors.get_all_definitions()\n\n def get_sensor_names(self) -> Sequence[str]:\n return self._sensors.get_definition_names()\n\n def get_sensor(self, sensor_name: str) -> SensorDefinition:\n return self._sensors.get_definition(sensor_name)\n\n def has_sensor(self, sensor_name: str) -> bool:\n return self._sensors.has_definition(sensor_name)\n\n def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]:\n return self._source_assets_by_key\n\n def get_assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]:\n return self._assets_defs_by_key\n\n def _check_node_defs(self, job_defs: Sequence[JobDefinition]) -> None:\n node_defs = {}\n node_to_job = {}\n for job_def in job_defs:\n for node_def in [*job_def.all_node_defs, job_def.graph]:\n # skip checks for subselected graphs because they don't have their own names\n if isinstance(node_def, SubselectedGraphDefinition):\n break\n\n if node_def.name not in node_defs:\n node_defs[node_def.name] = node_def\n node_to_job[node_def.name] = job_def.name\n\n if node_defs[node_def.name] is not node_def:\n first_name, second_name = sorted([node_to_job[node_def.name], job_def.name])\n raise DagsterInvalidDefinitionError(\n f"Conflicting definitions found in repository with name '{node_def.name}'."\n " Op/Graph definition names must be unique within a repository."\n f" {node_def.__class__.__name__} is defined in"\n f" job '{first_name}' and in"\n f" job '{second_name}'."\n )\n\n def _validate_job(self, job: JobDefinition) -> JobDefinition:\n return job\n\n def _validate_schedule(self, schedule: ScheduleDefinition) -> ScheduleDefinition:\n job_names = self.get_job_names()\n\n if schedule.job_name not in job_names:\n raise DagsterInvalidDefinitionError(\n f'ScheduleDefinition "{schedule.name}" targets job "{schedule.job_name}" '\n "which was not found in this repository."\n )\n\n return schedule\n\n def _validate_sensor(self, sensor: SensorDefinition) -> SensorDefinition:\n job_names = self.get_job_names()\n if len(sensor.targets) == 0:\n # skip validation when the sensor does not target a job\n return sensor\n\n for target in sensor.targets:\n if target.job_name not in job_names:\n raise DagsterInvalidDefinitionError(\n f'SensorDefinition "{sensor.name}" targets job "{sensor.job_name}" '\n "which was not found in this repository."\n )\n\n return sensor\n
", "current_page_name": "_modules/dagster/_core/definitions/repository_definition/repository_data", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.repository_definition.repository_data"}, "repository_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.repository_definition.repository_definition

\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Type,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.asset_graph import AssetGraph, InternalAssetGraph\nfrom dagster._core.definitions.assets_job import (\n    ASSET_BASE_JOB_PREFIX,\n)\nfrom dagster._core.definitions.cacheable_assets import AssetsDefinitionCacheableData\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.executor_definition import ExecutorDefinition\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.definitions.metadata import MetadataMapping\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.schedule_definition import ScheduleDefinition\nfrom dagster._core.definitions.sensor_definition import SensorDefinition\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.definitions.utils import check_valid_name\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import hash_collection\n\nfrom .repository_data import CachingRepositoryData, RepositoryData\nfrom .valid_definitions import (\n    RepositoryListDefinition as RepositoryListDefinition,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import AssetsDefinition\n    from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n    from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n\n@whitelist_for_serdes\nclass RepositoryLoadData(\n    NamedTuple(\n        "_RepositoryLoadData",\n        [\n            ("cached_data_by_key", Mapping[str, Sequence[AssetsDefinitionCacheableData]]),\n        ],\n    )\n):\n    def __new__(cls, cached_data_by_key: Mapping[str, Sequence[AssetsDefinitionCacheableData]]):\n        return super(RepositoryLoadData, cls).__new__(\n            cls,\n            cached_data_by_key=(\n                check.mapping_param(\n                    cached_data_by_key,\n                    "cached_data_by_key",\n                    key_type=str,\n                    value_type=list,\n                )\n            ),\n        )\n\n    # Allow this to be hashed for use in `lru_cache`. This is needed because:\n    # - `ReconstructableJob` uses `lru_cache`\n    # - `ReconstructableJob` has a `ReconstructableRepository` attribute\n    # - `ReconstructableRepository` has a `RepositoryLoadData` attribute\n    # - `RepositoryLoadData` has collection attributes that are unhashable by default\n    def __hash__(self) -> int:\n        if not hasattr(self, "_hash"):\n            self._hash = hash_collection(self)\n        return self._hash\n\n\n
[docs]class RepositoryDefinition:\n """Define a repository that contains a group of definitions.\n\n Users should typically not create objects of this class directly. Instead, use the\n :py:func:`@repository` decorator.\n\n Args:\n name (str): The name of the repository.\n repository_data (RepositoryData): Contains the definitions making up the repository.\n description (Optional[str]): A string description of the repository.\n metadata (Optional[MetadataMapping]): A map of arbitrary metadata for the repository.\n """\n\n def __init__(\n self,\n name,\n *,\n repository_data,\n description=None,\n metadata=None,\n repository_load_data=None,\n ):\n self._name = check_valid_name(name)\n self._description = check.opt_str_param(description, "description")\n self._repository_data: RepositoryData = check.inst_param(\n repository_data, "repository_data", RepositoryData\n )\n self._metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n self._repository_load_data = check.opt_inst_param(\n repository_load_data, "repository_load_data", RepositoryLoadData\n )\n\n @property\n def repository_load_data(self) -> Optional[RepositoryLoadData]:\n return self._repository_load_data\n\n @public\n @property\n def name(self) -> str:\n """str: The name of the repository."""\n return self._name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A human-readable description of the repository."""\n return self._description\n\n @public\n @property\n def metadata(self) -> Optional[MetadataMapping]:\n """Optional[MetadataMapping]: Arbitrary metadata for the repository."""\n return self._metadata\n\n def load_all_definitions(self):\n # force load of all lazy constructed code artifacts\n self._repository_data.load_all_definitions()\n\n @public\n @property\n def job_names(self) -> Sequence[str]:\n """List[str]: Names of all jobs in the repository."""\n return self._repository_data.get_job_names()\n\n def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]:\n return self._repository_data.get_top_level_resources()\n\n def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]:\n return self._repository_data.get_env_vars_by_top_level_resource()\n\n def get_resource_key_mapping(self) -> Mapping[int, str]:\n return self._repository_data.get_resource_key_mapping()\n\n
[docs] @public\n def has_job(self, name: str) -> bool:\n """Check if a job with a given name is present in the repository.\n\n Args:\n name (str): The name of the job.\n\n Returns:\n bool\n """\n return self._repository_data.has_job(name)
\n\n
[docs] @public\n def get_job(self, name: str) -> JobDefinition:\n """Get a job by name.\n\n If this job is present in the lazily evaluated dictionary passed to the\n constructor, but has not yet been constructed, only this job is constructed, and\n will be cached for future calls.\n\n Args:\n name (str): Name of the job to retrieve.\n\n Returns:\n JobDefinition: The job definition corresponding to\n the given name.\n """\n return self._repository_data.get_job(name)
\n\n
[docs] @public\n def get_all_jobs(self) -> Sequence[JobDefinition]:\n """Return all jobs in the repository as a list.\n\n Note that this will construct any job in the lazily evaluated dictionary that has\n not yet been constructed.\n\n Returns:\n List[JobDefinition]: All jobs in the repository.\n """\n return self._repository_data.get_all_jobs()
\n\n @public\n @property\n def schedule_defs(self) -> Sequence[ScheduleDefinition]:\n """List[ScheduleDefinition]: All schedules in the repository."""\n return self._repository_data.get_all_schedules()\n\n
[docs] @public\n def get_schedule_def(self, name: str) -> ScheduleDefinition:\n """Get a schedule definition by name.\n\n Args:\n name (str): The name of the schedule.\n\n Returns:\n ScheduleDefinition: The schedule definition.\n """\n return self._repository_data.get_schedule(name)
\n\n
[docs] @public\n def has_schedule_def(self, name: str) -> bool:\n """bool: Check if a schedule with a given name is present in the repository."""\n return self._repository_data.has_schedule(name)
\n\n @public\n @property\n def sensor_defs(self) -> Sequence[SensorDefinition]:\n """Sequence[SensorDefinition]: All sensors in the repository."""\n return self._repository_data.get_all_sensors()\n\n
[docs] @public\n def get_sensor_def(self, name: str) -> SensorDefinition:\n """Get a sensor definition by name.\n\n Args:\n name (str): The name of the sensor.\n\n Returns:\n SensorDefinition: The sensor definition.\n """\n return self._repository_data.get_sensor(name)
\n\n
[docs] @public\n def has_sensor_def(self, name: str) -> bool:\n """bool: Check if a sensor with a given name is present in the repository."""\n return self._repository_data.has_sensor(name)
\n\n @property\n def source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]:\n return self._repository_data.get_source_assets_by_key()\n\n @property\n def assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]:\n return self._repository_data.get_assets_defs_by_key()\n\n def has_implicit_global_asset_job_def(self) -> bool:\n """Returns true is there is a single implicit asset job for all asset keys in a repository."""\n return self.has_job(ASSET_BASE_JOB_PREFIX)\n\n def get_implicit_global_asset_job_def(self) -> JobDefinition:\n """A useful conveninence method for repositories where there are a set of assets with\n the same partitioning schema and one wants to access their corresponding implicit job\n easily.\n """\n if not self.has_job(ASSET_BASE_JOB_PREFIX):\n raise DagsterInvariantViolationError(\n "There is no single global asset job, likely due to assets using "\n "different partitioning schemes via their partitions_def parameter. You must "\n "use get_implicit_job_def_for_assets in order to access the correct implicit job."\n )\n\n return self.get_job(ASSET_BASE_JOB_PREFIX)\n\n def get_implicit_asset_job_names(self) -> Sequence[str]:\n return [\n job_name for job_name in self.job_names if job_name.startswith(ASSET_BASE_JOB_PREFIX)\n ]\n\n def get_implicit_job_def_for_assets(\n self, asset_keys: Iterable[AssetKey]\n ) -> Optional[JobDefinition]:\n """Returns the asset base job that contains all the given assets, or None if there is no such\n job.\n """\n if self.has_job(ASSET_BASE_JOB_PREFIX):\n base_job = self.get_job(ASSET_BASE_JOB_PREFIX)\n if all(\n key in base_job.asset_layer.assets_defs_by_key\n or base_job.asset_layer.is_observable_for_asset(key)\n for key in asset_keys\n ):\n return base_job\n else:\n i = 0\n while self.has_job(f"{ASSET_BASE_JOB_PREFIX}_{i}"):\n base_job = self.get_job(f"{ASSET_BASE_JOB_PREFIX}_{i}")\n\n if all(\n key in base_job.asset_layer.assets_defs_by_key\n or base_job.asset_layer.is_observable_for_asset(key)\n for key in asset_keys\n ):\n return base_job\n\n i += 1\n\n return None\n\n def get_maybe_subset_job_def(\n self,\n job_name: str,\n op_selection: Optional[Iterable[str]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n ):\n defn = self.get_job(job_name)\n return defn.get_subset(\n op_selection=op_selection,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n )\n\n
[docs] @public\n def load_asset_value(\n self,\n asset_key: CoercibleToAssetKey,\n *,\n python_type: Optional[Type] = None,\n instance: Optional[DagsterInstance] = None,\n partition_key: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n resource_config: Optional[Any] = None,\n ) -> object:\n """Load the contents of an asset as a Python object.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the asset.\n\n If you want to load the values of multiple assets, it's more efficient to use\n :py:meth:`~dagster.RepositoryDefinition.get_asset_value_loader`, which avoids spinning up\n resources separately for each asset.\n\n Args:\n asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.\n python_type (Optional[Type]): The python type to load the asset as. This is what will\n be returned inside `load_input` by `context.dagster_type.typing_type`.\n partition_key (Optional[str]): The partition of the asset to load.\n metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`\n (is equivalent to setting the metadata argument in `In` or `AssetIn`).\n resource_config (Optional[Any]): A dictionary of resource configurations to be passed\n to the :py:class:`IOManager`.\n\n Returns:\n The contents of an asset as a Python object.\n """\n from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n with AssetValueLoader(\n self.assets_defs_by_key, self.source_assets_by_key, instance=instance\n ) as loader:\n return loader.load_asset_value(\n asset_key,\n python_type=python_type,\n partition_key=partition_key,\n metadata=metadata,\n resource_config=resource_config,\n )
\n\n
[docs] @public\n def get_asset_value_loader(\n self, instance: Optional[DagsterInstance] = None\n ) -> "AssetValueLoader":\n """Returns an object that can load the contents of assets as Python objects.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the assets. Avoids\n spinning up resources separately for each asset.\n\n Usage:\n\n .. code-block:: python\n\n with my_repo.get_asset_value_loader() as loader:\n asset1 = loader.load_asset_value("asset1")\n asset2 = loader.load_asset_value("asset2")\n\n """\n from dagster._core.storage.asset_value_loader import AssetValueLoader\n\n return AssetValueLoader(\n self.assets_defs_by_key, self.source_assets_by_key, instance=instance\n )
\n\n @property\n def asset_graph(self) -> InternalAssetGraph:\n return AssetGraph.from_assets(\n [*set(self.assets_defs_by_key.values()), *self.source_assets_by_key.values()]\n )\n\n # If definition comes from the @repository decorator, then the __call__ method will be\n # overwritten. Therefore, we want to maintain the call-ability of repository definitions.\n def __call__(self, *args, **kwargs):\n return self
\n\n\nclass PendingRepositoryDefinition:\n def __init__(\n self,\n name: str,\n repository_definitions: Sequence[\n Union[RepositoryListDefinition, "CacheableAssetsDefinition"]\n ],\n description: Optional[str] = None,\n metadata: Optional[MetadataMapping] = None,\n default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n default_executor_def: Optional[ExecutorDefinition] = None,\n _top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,\n _resource_key_mapping: Optional[Mapping[int, str]] = None,\n ):\n self._repository_definitions = check.list_param(\n repository_definitions,\n "repository_definition",\n additional_message=(\n "PendingRepositoryDefinition supports only list-based repository data at this time."\n ),\n )\n self._name = name\n self._description = description\n self._metadata = metadata\n self._default_logger_defs = default_logger_defs\n self._default_executor_def = default_executor_def\n self._top_level_resources = _top_level_resources\n self._resource_key_mapping = _resource_key_mapping\n\n @property\n def name(self) -> str:\n return self._name\n\n def _compute_repository_load_data(self) -> RepositoryLoadData:\n from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n\n return RepositoryLoadData(\n cached_data_by_key={\n defn.unique_id: defn.compute_cacheable_data()\n for defn in self._repository_definitions\n if isinstance(defn, CacheableAssetsDefinition)\n }\n )\n\n def _get_repository_definition(\n self, repository_load_data: RepositoryLoadData\n ) -> RepositoryDefinition:\n from dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\n\n resolved_definitions: List[RepositoryListDefinition] = []\n for defn in self._repository_definitions:\n if isinstance(defn, CacheableAssetsDefinition):\n # should always have metadata for each cached defn at this point\n check.invariant(\n defn.unique_id in repository_load_data.cached_data_by_key,\n "No metadata found for CacheableAssetsDefinition with unique_id"\n f" {defn.unique_id}.",\n )\n # use the emtadata to generate definitions\n resolved_definitions.extend(\n defn.build_definitions(\n data=repository_load_data.cached_data_by_key[defn.unique_id]\n )\n )\n else:\n resolved_definitions.append(defn)\n\n repository_data = CachingRepositoryData.from_list(\n resolved_definitions,\n default_executor_def=self._default_executor_def,\n default_logger_defs=self._default_logger_defs,\n top_level_resources=self._top_level_resources,\n resource_key_mapping=self._resource_key_mapping,\n )\n\n return RepositoryDefinition(\n self._name,\n repository_data=repository_data,\n description=self._description,\n metadata=self._metadata,\n repository_load_data=repository_load_data,\n )\n\n def reconstruct_repository_definition(\n self, repository_load_data: RepositoryLoadData\n ) -> RepositoryDefinition:\n """Use the provided RepositoryLoadData to construct and return a RepositoryDefinition."""\n check.inst_param(repository_load_data, "repository_load_data", RepositoryLoadData)\n return self._get_repository_definition(repository_load_data)\n\n def compute_repository_definition(self) -> RepositoryDefinition:\n """Compute the required RepositoryLoadData and use it to construct and return a RepositoryDefinition."""\n repository_load_data = self._compute_repository_load_data()\n return self._get_repository_definition(repository_load_data)\n
", "current_page_name": "_modules/dagster/_core/definitions/repository_definition/repository_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.repository_definition.repository_definition"}}, "resource_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.resource_definition

\nfrom functools import update_wrapper\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    Optional,\n    Union,\n    cast,\n    overload,\n)\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import experimental_param, public\nfrom dagster._core.decorator_utils import format_docstring_for_description\nfrom dagster._core.definitions.config import is_callable_valid_config_arg\nfrom dagster._core.definitions.configurable import AnonymousConfigurableDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvalidInvocationError\nfrom dagster._utils import IHasInternalInit\n\nfrom ..decorator_utils import (\n    get_function_params,\n    has_at_least_one_parameter,\n    is_required_param,\n    positional_arg_name_list,\n    validate_expected_params,\n)\nfrom .definition_config_schema import (\n    CoercableToConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom .resource_invocation import resource_invocation_result\nfrom .resource_requirement import (\n    RequiresResources,\n    ResourceDependencyRequirement,\n    ResourceRequirement,\n)\nfrom .scoped_resources_builder import (  # re-exported\n    IContainsGenerator as IContainsGenerator,\n    Resources as Resources,\n    ScopedResourcesBuilder as ScopedResourcesBuilder,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.execution.resources_init import InitResourceContext\n\nResourceFunctionWithContext: TypeAlias = Callable[["InitResourceContext"], Any]\nResourceFunctionWithoutContext: TypeAlias = Callable[[], Any]\nResourceFunction: TypeAlias = Union[\n    ResourceFunctionWithContext,\n    ResourceFunctionWithoutContext,\n]\n\n\n
[docs]@experimental_param(param="version")\nclass ResourceDefinition(AnonymousConfigurableDefinition, RequiresResources, IHasInternalInit):\n """Core class for defining resources.\n\n Resources are scoped ways to make external resources (like database connections) available to\n ops and assets during job execution and to clean up after execution resolves.\n\n If resource_fn yields once rather than returning (in the manner of functions decorable with\n :py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then the body of the\n function after the yield will be run after execution resolves, allowing users to write their\n own teardown/cleanup logic.\n\n Depending on your executor, resources may be instantiated and cleaned up more than once in a\n job execution.\n\n Args:\n resource_fn (Callable[[InitResourceContext], Any]): User-provided function to instantiate\n the resource, which will be made available to executions keyed on the\n ``context.resources`` object.\n config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check\n that config provided for the resource matches this schema and fail if it does not. If\n not set, Dagster will accept any config provided for the resource.\n description (Optional[str]): A human-readable description of the resource.\n required_resource_keys: (Optional[Set[str]]) Keys for the resources required by this\n resource. A DagsterInvariantViolationError will be raised during initialization if\n dependencies are cyclic.\n version (Optional[str]): (Experimental) The version of the resource's definition fn. Two\n wrapped resource functions should only have the same version if they produce the same\n resource definition when provided with the same inputs.\n """\n\n def __init__(\n self,\n resource_fn: ResourceFunction,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n ):\n self._resource_fn = check.callable_param(resource_fn, "resource_fn")\n self._config_schema = convert_user_facing_definition_config_schema(config_schema)\n self._description = check.opt_str_param(description, "description")\n self._required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys"\n )\n self._version = check.opt_str_param(version, "version")\n\n # this attribute will be updated by the @dagster_maintained_resource and @dagster_maintained_io_manager decorators\n self._dagster_maintained = False\n self._hardcoded_resource_type = None\n\n @staticmethod\n def dagster_internal_init(\n *,\n resource_fn: ResourceFunction,\n config_schema: CoercableToConfigSchema,\n description: Optional[str],\n required_resource_keys: Optional[AbstractSet[str]],\n version: Optional[str],\n ) -> "ResourceDefinition":\n return ResourceDefinition(\n resource_fn=resource_fn,\n config_schema=config_schema,\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )\n\n @property\n def resource_fn(self) -> ResourceFunction:\n return self._resource_fn\n\n @property\n def config_schema(self) -> IDefinitionConfigSchema:\n return self._config_schema\n\n @public\n @property\n def description(self) -> Optional[str]:\n """A human-readable description of the resource."""\n return self._description\n\n @public\n @property\n def version(self) -> Optional[str]:\n """A string which can be used to identify a particular code version of a resource definition."""\n return self._version\n\n @public\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n """A set of the resource keys that this resource depends on. These keys will be made available\n to the resource's init context during execution, and the resource will not be instantiated\n until all required resources are available.\n """\n return self._required_resource_keys\n\n def _is_dagster_maintained(self) -> bool:\n return self._dagster_maintained\n\n
[docs] @public\n @staticmethod\n def none_resource(description: Optional[str] = None) -> "ResourceDefinition":\n """A helper function that returns a none resource.\n\n Args:\n description ([Optional[str]]): The description of the resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A resource that does nothing.\n """\n return ResourceDefinition.hardcoded_resource(value=None, description=description)
\n\n
[docs] @public\n @staticmethod\n def hardcoded_resource(value: Any, description: Optional[str] = None) -> "ResourceDefinition":\n """A helper function that creates a ``ResourceDefinition`` with a hardcoded object.\n\n Args:\n value (Any): The value that will be accessible via context.resources.resource_name.\n description ([Optional[str]]): The description of the resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A hardcoded resource.\n """\n resource_def = ResourceDefinition(\n resource_fn=lambda _init_context: value, description=description\n )\n # Make sure telemetry info gets passed in to hardcoded resources\n if hasattr(value, "_is_dagster_maintained"):\n resource_def._dagster_maintained = value._is_dagster_maintained() # noqa: SLF001\n resource_def._hardcoded_resource_type = type(value) # noqa: SLF001\n\n return resource_def
\n\n
[docs] @public\n @staticmethod\n def mock_resource(description: Optional[str] = None) -> "ResourceDefinition":\n """A helper function that creates a ``ResourceDefinition`` which wraps a ``mock.MagicMock``.\n\n Args:\n description ([Optional[str]]): The description of the resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A resource that creates the magic methods automatically and helps\n you mock existing resources.\n """\n from unittest import mock\n\n return ResourceDefinition(\n resource_fn=lambda _init_context: mock.MagicMock(), description=description\n )
\n\n
[docs] @public\n @staticmethod\n def string_resource(description: Optional[str] = None) -> "ResourceDefinition":\n """Creates a ``ResourceDefinition`` which takes in a single string as configuration\n and returns this configured string to any ops or assets which depend on it.\n\n Args:\n description ([Optional[str]]): The description of the string resource. Defaults to None.\n\n Returns:\n [ResourceDefinition]: A resource that takes in a single string as configuration and\n returns that string.\n """\n return ResourceDefinition(\n resource_fn=lambda init_context: init_context.resource_config,\n config_schema=str,\n description=description,\n )
\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: CoercableToConfigSchema,\n ) -> "ResourceDefinition":\n resource_def = ResourceDefinition.dagster_internal_init(\n config_schema=config_schema,\n description=description or self.description,\n resource_fn=self.resource_fn,\n required_resource_keys=self.required_resource_keys,\n version=self.version,\n )\n\n resource_def._dagster_maintained = self._is_dagster_maintained() # noqa: SLF001\n\n return resource_def\n\n def __call__(self, *args, **kwargs):\n from dagster._core.execution.context.init import UnboundInitResourceContext\n\n if has_at_least_one_parameter(self.resource_fn):\n if len(args) + len(kwargs) == 0:\n raise DagsterInvalidInvocationError(\n "Resource initialization function has context argument, but no context was"\n " provided when invoking."\n )\n if len(args) + len(kwargs) > 1:\n raise DagsterInvalidInvocationError(\n "Initialization of resource received multiple arguments. Only a first "\n "positional context parameter should be provided when invoking."\n )\n\n context_param_name = get_function_params(self.resource_fn)[0].name\n\n if args:\n check.opt_inst_param(args[0], context_param_name, UnboundInitResourceContext)\n return resource_invocation_result(\n self, cast(Optional[UnboundInitResourceContext], args[0])\n )\n else:\n if context_param_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Resource initialization expected argument '{context_param_name}'."\n )\n check.opt_inst_param(\n kwargs[context_param_name], context_param_name, UnboundInitResourceContext\n )\n\n return resource_invocation_result(\n self, cast(Optional[UnboundInitResourceContext], kwargs[context_param_name])\n )\n elif len(args) + len(kwargs) > 0:\n raise DagsterInvalidInvocationError(\n "Attempted to invoke resource with argument, but underlying function has no context"\n " argument. Either specify a context argument on the resource function, or remove"\n " the passed-in argument."\n )\n else:\n return resource_invocation_result(self, None)\n\n def get_resource_requirements(\n self, outer_context: Optional[object] = None\n ) -> Iterator[ResourceRequirement]:\n source_key = cast(str, outer_context)\n for resource_key in sorted(list(self.required_resource_keys)):\n yield ResourceDependencyRequirement(key=resource_key, source_key=source_key)
\n\n\ndef dagster_maintained_resource(\n resource_def: ResourceDefinition,\n) -> ResourceDefinition:\n resource_def._dagster_maintained = True # noqa: SLF001\n return resource_def\n\n\nclass _ResourceDecoratorCallable:\n def __init__(\n self,\n config_schema: Optional[Mapping[str, Any]] = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n ):\n self.config_schema = config_schema # checked by underlying definition\n self.description = check.opt_str_param(description, "description")\n self.version = check.opt_str_param(version, "version")\n self.required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys"\n )\n\n def __call__(self, resource_fn: ResourceFunction) -> ResourceDefinition:\n check.callable_param(resource_fn, "resource_fn")\n\n any_name = ["*"] if has_at_least_one_parameter(resource_fn) else []\n\n params = get_function_params(resource_fn)\n\n missing_positional = validate_expected_params(params, any_name)\n if missing_positional:\n raise DagsterInvalidDefinitionError(\n f"@resource decorated function '{resource_fn.__name__}' expects a single "\n "positional argument."\n )\n\n extras = params[len(any_name) :]\n\n required_extras = list(filter(is_required_param, extras))\n if required_extras:\n raise DagsterInvalidDefinitionError(\n f"@resource decorated function '{resource_fn.__name__}' expects only a single"\n " positional required argument. Got required extra params"\n f" {', '.join(positional_arg_name_list(required_extras))}"\n )\n\n resource_def = ResourceDefinition.dagster_internal_init(\n resource_fn=resource_fn,\n config_schema=self.config_schema,\n description=self.description or format_docstring_for_description(resource_fn),\n version=self.version,\n required_resource_keys=self.required_resource_keys,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(resource_def, wrapped=resource_fn) # type: ignore\n\n return resource_def\n\n\n@overload\ndef resource(config_schema: ResourceFunction) -> ResourceDefinition: ...\n\n\n@overload\ndef resource(\n config_schema: CoercableToConfigSchema = ...,\n description: Optional[str] = ...,\n required_resource_keys: Optional[AbstractSet[str]] = ...,\n version: Optional[str] = ...,\n) -> Callable[[ResourceFunction], "ResourceDefinition"]: ...\n\n\n
[docs]def resource(\n config_schema: Union[ResourceFunction, CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n) -> Union[Callable[[ResourceFunction], "ResourceDefinition"], "ResourceDefinition"]:\n """Define a resource.\n\n The decorated function should accept an :py:class:`InitResourceContext` and return an instance of\n the resource. This function will become the ``resource_fn`` of an underlying\n :py:class:`ResourceDefinition`.\n\n If the decorated function yields once rather than returning (in the manner of functions\n decorable with :py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then\n the body of the function after the yield will be run after execution resolves, allowing users\n to write their own teardown/cleanup logic.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in\n `init_context.resource_config`. If not set, Dagster will accept any config provided.\n description(Optional[str]): A human-readable description of the resource.\n version (Optional[str]): (Experimental) The version of a resource function. Two wrapped\n resource functions should only have the same version if they produce the same resource\n definition when provided with the same inputs.\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by this resource.\n """\n # This case is for when decorator is used bare, without arguments.\n # E.g. @resource versus @resource()\n if callable(config_schema) and not is_callable_valid_config_arg(config_schema):\n return _ResourceDecoratorCallable()(config_schema)\n\n def _wrap(resource_fn: ResourceFunction) -> "ResourceDefinition":\n return _ResourceDecoratorCallable(\n config_schema=cast(Optional[Dict[str, Any]], config_schema),\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )(resource_fn)\n\n return _wrap
\n\n\n
[docs]def make_values_resource(**kwargs: Any) -> ResourceDefinition:\n """A helper function that creates a ``ResourceDefinition`` to take in user-defined values.\n\n This is useful for sharing values between ops.\n\n Args:\n **kwargs: Arbitrary keyword arguments that will be passed to the config schema of the\n returned resource definition. If not set, Dagster will accept any config provided for\n the resource.\n\n For example:\n\n .. code-block:: python\n\n @op(required_resource_keys={"globals"})\n def my_op(context):\n print(context.resources.globals["my_str_var"])\n\n @job(resource_defs={"globals": make_values_resource(my_str_var=str, my_int_var=int)})\n def my_job():\n my_op()\n\n Returns:\n ResourceDefinition: A resource that passes in user-defined values.\n """\n return ResourceDefinition(\n resource_fn=lambda init_context: init_context.resource_config,\n config_schema=kwargs or Any,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/resource_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.resource_definition"}, "result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.result

\nfrom typing import NamedTuple, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.data_version import DataVersion\n\nfrom .events import (\n    AssetKey,\n    CoercibleToAssetKey,\n)\nfrom .metadata import MetadataUserInput\n\n\n
[docs]@experimental\nclass MaterializeResult(\n NamedTuple(\n "_MaterializeResult",\n [\n ("asset_key", PublicAttr[Optional[AssetKey]]),\n ("metadata", PublicAttr[Optional[MetadataUserInput]]),\n ("check_results", PublicAttr[Sequence[AssetCheckResult]]),\n ("data_version", PublicAttr[Optional[DataVersion]]),\n ],\n )\n):\n """An object representing a successful materialization of an asset. These can be returned from\n @asset and @multi_asset decorated functions to pass metadata or specify specific assets were\n materialized.\n\n Attributes:\n asset_key (Optional[AssetKey]): Optional in @asset, required in @multi_asset to discern which asset this refers to.\n metadata (Optional[MetadataUserInput]): Metadata to record with the corresponding AssetMaterialization event.\n """\n\n def __new__(\n cls,\n *, # enforce kwargs\n asset_key: Optional[CoercibleToAssetKey] = None,\n metadata: Optional[MetadataUserInput] = None,\n check_results: Optional[Sequence[AssetCheckResult]] = None,\n data_version: Optional[DataVersion] = None,\n ):\n asset_key = AssetKey.from_coercible(asset_key) if asset_key else None\n\n return super().__new__(\n cls,\n asset_key=asset_key,\n metadata=check.opt_nullable_mapping_param(\n metadata,\n "metadata",\n key_type=str,\n ),\n check_results=check.opt_sequence_param(\n check_results, "check_results", of_type=AssetCheckResult\n ),\n data_version=check.opt_inst_param(data_version, "data_version", DataVersion),\n )\n\n def check_result_named(self, check_name: str) -> AssetCheckResult:\n for check_result in self.check_results:\n if check_result.check_name == check_name:\n return check_result\n\n check.failed(f"Could not find check result named {check_name}")
\n
", "current_page_name": "_modules/dagster/_core/definitions/result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.result"}, "run_config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.run_config

\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterator,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias\n\nfrom dagster._config import (\n    ALL_CONFIG_BUILTINS,\n    ConfigType,\n    Field,\n    Permissive,\n    Selector,\n    Shape,\n)\nfrom dagster._config.pythonic_config import Config\nfrom dagster._core.definitions.asset_layer import AssetLayer\nfrom dagster._core.definitions.executor_definition import (\n    ExecutorDefinition,\n    execute_in_process_executor,\n    in_process_executor,\n)\nfrom dagster._core.definitions.input import InputDefinition\nfrom dagster._core.definitions.output import OutputDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.storage.input_manager import IInputManagerDefinition\nfrom dagster._core.storage.output_manager import IOutputManagerDefinition\nfrom dagster._core.types.dagster_type import ALL_RUNTIME_BUILTINS, construct_dagster_type_dictionary\nfrom dagster._utils import check\n\nfrom .configurable import ConfigurableDefinition\nfrom .definition_config_schema import IDefinitionConfigSchema\nfrom .dependency import DependencyStructure, GraphNode, Node, NodeHandle, NodeInput, OpNode\nfrom .graph_definition import GraphDefinition\nfrom .logger_definition import LoggerDefinition\nfrom .op_definition import NodeDefinition, OpDefinition\nfrom .resource_definition import ResourceDefinition\n\nif TYPE_CHECKING:\n    from .source_asset import SourceAsset\n\n\ndef define_resource_dictionary_cls(\n    resource_defs: Mapping[str, ResourceDefinition],\n    required_resources: AbstractSet[str],\n) -> Shape:\n    fields = {}\n    for resource_name, resource_def in resource_defs.items():\n        if resource_def.config_schema:\n            is_required = None\n            if resource_name not in required_resources:\n                # explicitly make section not required if resource is not required\n                # for the current mode\n                is_required = False\n\n            fields[resource_name] = def_config_field(\n                resource_def,\n                is_required=is_required,\n                description=resource_def.description,\n            )\n\n    return Shape(fields=fields)\n\n\ndef remove_none_entries(ddict: Mapping[Any, Any]) -> dict:\n    return {k: v for k, v in ddict.items() if v is not None}\n\n\ndef def_config_field(\n    configurable_def: ConfigurableDefinition,\n    is_required: Optional[bool] = None,\n    description: Optional[str] = None,\n) -> Field:\n    return Field(\n        Shape(\n            {"config": configurable_def.config_field} if configurable_def.has_config_field else {}\n        ),\n        is_required=is_required,\n        description=description,\n    )\n\n\nclass RunConfigSchemaCreationData(NamedTuple):\n    job_name: str\n    nodes: Sequence[Node]\n    graph_def: GraphDefinition\n    dependency_structure: DependencyStructure\n    executor_def: ExecutorDefinition\n    resource_defs: Mapping[str, ResourceDefinition]\n    logger_defs: Mapping[str, LoggerDefinition]\n    ignored_nodes: Sequence[Node]\n    required_resources: AbstractSet[str]\n    direct_inputs: Mapping[str, Any]\n    asset_layer: AssetLayer\n\n\ndef define_logger_dictionary_cls(creation_data: RunConfigSchemaCreationData) -> Shape:\n    return Shape(\n        {\n            logger_name: def_config_field(logger_definition, is_required=False)\n            for logger_name, logger_definition in creation_data.logger_defs.items()\n        }\n    )\n\n\ndef define_execution_field(executor_defs: Sequence[ExecutorDefinition], description: str) -> Field:\n    default_in_process = False\n    for executor_def in executor_defs:\n        if executor_def == in_process_executor:\n            default_in_process = True\n\n    selector = selector_for_named_defs(executor_defs)\n\n    if default_in_process:\n        return Field(\n            selector, default_value={in_process_executor.name: {}}, description=description\n        )\n\n    # If we are using the execute_in_process executor, then ignore all executor config.\n    if len(executor_defs) == 1 and executor_defs[0] == execute_in_process_executor:\n        return Field(Permissive(), is_required=False, default_value={}, description=description)\n\n    return Field(selector, description=description)\n\n\ndef define_single_execution_field(executor_def: ExecutorDefinition, description: str) -> Field:\n    return def_config_field(executor_def, description=description)\n\n\ndef define_run_config_schema_type(creation_data: RunConfigSchemaCreationData) -> ConfigType:\n    execution_field = define_single_execution_field(\n        creation_data.executor_def,\n        "Configure how steps are executed within a run.",\n    )\n\n    top_level_node = GraphNode(\n        name=creation_data.graph_def.name,\n        definition=creation_data.graph_def,\n        graph_definition=creation_data.graph_def,\n    )\n\n    fields = {\n        "execution": execution_field,\n        "loggers": Field(\n            define_logger_dictionary_cls(creation_data),\n            description="Configure how loggers emit messages within a run.",\n        ),\n        "resources": Field(\n            define_resource_dictionary_cls(\n                creation_data.resource_defs,\n                creation_data.required_resources,\n            ),\n            description="Configure how shared resources are implemented within a run.",\n        ),\n        "inputs": get_inputs_field(\n            node=top_level_node,\n            handle=NodeHandle(top_level_node.name, parent=None),\n            dependency_structure=creation_data.dependency_structure,\n            resource_defs=creation_data.resource_defs,\n            node_ignored=False,\n            direct_inputs=creation_data.direct_inputs,\n            input_source_assets={},\n            asset_layer=creation_data.asset_layer,\n        ),\n    }\n\n    if creation_data.graph_def.has_config_mapping:\n        config_schema = cast(IDefinitionConfigSchema, creation_data.graph_def.config_schema)\n        nodes_field = Field(\n            {"config": config_schema.as_field()},\n            description="Configure runtime parameters for ops or assets.",\n        )\n    else:\n        nodes_field = Field(\n            define_node_shape(\n                nodes=creation_data.nodes,\n                ignored_nodes=creation_data.ignored_nodes,\n                dependency_structure=creation_data.dependency_structure,\n                resource_defs=creation_data.resource_defs,\n                asset_layer=creation_data.asset_layer,\n                node_input_source_assets=creation_data.graph_def.node_input_source_assets,\n            ),\n            description="Configure runtime parameters for ops or assets.",\n        )\n\n    fields["ops"] = nodes_field\n\n    return Shape(\n        fields=remove_none_entries(fields),\n    )\n\n\n# Common pattern for a set of named definitions (e.g. executors)\n# to build a selector so that one of them is selected\ndef selector_for_named_defs(named_defs) -> Selector:\n    return Selector({named_def.name: def_config_field(named_def) for named_def in named_defs})\n\n\ndef get_inputs_field(\n    node: Node,\n    handle: NodeHandle,\n    dependency_structure: DependencyStructure,\n    resource_defs: Mapping[str, ResourceDefinition],\n    node_ignored: bool,\n    asset_layer: AssetLayer,\n    input_source_assets: Mapping[str, "SourceAsset"],\n    direct_inputs: Optional[Mapping[str, Any]] = None,\n) -> Optional[Field]:\n    direct_inputs = check.opt_mapping_param(direct_inputs, "direct_inputs")\n    inputs_field_fields = {}\n    for name, inp in node.definition.input_dict.items():\n        inp_handle = NodeInput(node, inp)\n        has_upstream = input_has_upstream(dependency_structure, inp_handle, node, name)\n        if inp.input_manager_key:\n            input_field = get_input_manager_input_field(node, inp, resource_defs)\n        elif (\n            # if you have asset definitions, input will be loaded from the source asset\n            asset_layer.has_assets_defs\n            or asset_layer.has_asset_check_defs\n            and asset_layer.asset_key_for_input(handle, name)\n            and not has_upstream\n        ):\n            input_field = None\n        elif name in direct_inputs and not has_upstream:\n            input_field = None\n        elif name in input_source_assets and not has_upstream:\n            input_field = None\n        elif inp.dagster_type.loader and not has_upstream:\n            input_field = get_type_loader_input_field(node, name, inp)\n        else:\n            input_field = None\n\n        if input_field:\n            inputs_field_fields[name] = input_field\n\n    if not inputs_field_fields:\n        return None\n    if node_ignored:\n        return Field(\n            Shape(inputs_field_fields),\n            is_required=False,\n            description=(\n                "This op is not present in the current op selection, "\n                "the input config values are allowed but ignored."\n            ),\n        )\n    else:\n        return Field(Shape(inputs_field_fields))\n\n\ndef input_has_upstream(\n    dependency_structure: DependencyStructure,\n    input_handle: NodeInput,\n    node: Node,\n    input_name: str,\n) -> bool:\n    return dependency_structure.has_deps(input_handle) or node.container_maps_input(input_name)\n\n\ndef get_input_manager_input_field(\n    node: Node,\n    input_def: InputDefinition,\n    resource_defs: Mapping[str, ResourceDefinition],\n) -> Optional[Field]:\n    if input_def.input_manager_key:\n        if input_def.input_manager_key not in resource_defs:\n            raise DagsterInvalidDefinitionError(\n                f"Input '{input_def.name}' for {node.describe_node()} requires input_manager_key"\n                f" '{input_def.input_manager_key}', but no resource has been provided. Please"\n                " include a resource definition for that key in the provided resource_defs."\n            )\n\n        input_manager = resource_defs[input_def.input_manager_key]\n        if not isinstance(input_manager, IInputManagerDefinition):\n            raise DagsterInvalidDefinitionError(\n                f"Input '{input_def.name}' for {node.describe_node()} requires input_manager_key "\n                f"'{input_def.input_manager_key}', but the resource definition provided is not an "\n                "IInputManagerDefinition"\n            )\n\n        input_config_schema = input_manager.input_config_schema\n        if input_config_schema:\n            return input_config_schema.as_field()\n        return None\n\n    return None\n\n\ndef get_type_loader_input_field(node: Node, input_name: str, input_def: InputDefinition) -> Field:\n    loader = check.not_none(input_def.dagster_type.loader)\n    return Field(\n        loader.schema_type,\n        is_required=(not node.definition.input_has_default(input_name)),\n    )\n\n\ndef get_outputs_field(\n    node: Node,\n    resource_defs: Mapping[str, ResourceDefinition],\n) -> Optional[Field]:\n    output_manager_fields = {}\n    for name, output_def in node.definition.output_dict.items():\n        output_manager_output_field = get_output_manager_output_field(\n            node, output_def, resource_defs\n        )\n        if output_manager_output_field:\n            output_manager_fields[name] = output_manager_output_field\n\n    return Field(Shape(output_manager_fields)) if output_manager_fields else None\n\n\ndef get_output_manager_output_field(\n    node: Node, output_def: OutputDefinition, resource_defs: Mapping[str, ResourceDefinition]\n) -> Optional[ConfigType]:\n    if output_def.io_manager_key not in resource_defs:\n        raise DagsterInvalidDefinitionError(\n            f'Output "{output_def.name}" for {node.describe_node()} requires io_manager_key '\n            f'"{output_def.io_manager_key}", but no resource has been provided. Please include a '\n            "resource definition for that key in the provided resource_defs."\n        )\n    if not isinstance(resource_defs[output_def.io_manager_key], IOutputManagerDefinition):\n        raise DagsterInvalidDefinitionError(\n            f'Output "{output_def.name}" for {node.describe_node()} requires io_manager_key '\n            f'"{output_def.io_manager_key}", but the resource definition provided is not an '\n            "IOutputManagerDefinition"\n        )\n    output_manager_def = resource_defs[output_def.io_manager_key]\n    if (\n        output_manager_def\n        and isinstance(output_manager_def, IOutputManagerDefinition)\n        and output_manager_def.output_config_schema\n    ):\n        return output_manager_def.output_config_schema.as_field()\n\n    return None\n\n\ndef node_config_field(fields: Mapping[str, Optional[Field]], ignored: bool) -> Optional[Field]:\n    trimmed_fields = remove_none_entries(fields)\n    if trimmed_fields:\n        if ignored:\n            return Field(\n                Shape(trimmed_fields),\n                is_required=False,\n                description=(\n                    "This op is not present in the current op selection, "\n                    "the config values are allowed but ignored."\n                ),\n            )\n        else:\n            return Field(Shape(trimmed_fields))\n    else:\n        return None\n\n\ndef construct_leaf_node_config(\n    node: Node,\n    handle: NodeHandle,\n    dependency_structure: DependencyStructure,\n    config_schema: Optional[IDefinitionConfigSchema],\n    resource_defs: Mapping[str, ResourceDefinition],\n    ignored: bool,\n    asset_layer: AssetLayer,\n    input_source_assets: Mapping[str, "SourceAsset"],\n) -> Optional[Field]:\n    return node_config_field(\n        {\n            "inputs": get_inputs_field(\n                node,\n                handle,\n                dependency_structure,\n                resource_defs,\n                ignored,\n                asset_layer,\n                input_source_assets,\n            ),\n            "outputs": get_outputs_field(node, resource_defs),\n            "config": config_schema.as_field() if config_schema else None,\n        },\n        ignored=ignored,\n    )\n\n\ndef define_node_field(\n    node: Node,\n    handle: NodeHandle,\n    dependency_structure: DependencyStructure,\n    resource_defs: Mapping[str, ResourceDefinition],\n    ignored: bool,\n    asset_layer: AssetLayer,\n    input_source_assets: Mapping[str, "SourceAsset"],\n) -> Optional[Field]:\n    # All nodes regardless of compositing status get the same inputs and outputs\n    # config. The only thing the varies is on extra element of configuration\n    # 1) Vanilla op definition: a 'config' key with the config_schema as the value\n    # 2) Graph with field mapping: a 'config' key with the config_schema of\n    #    the config mapping (via GraphDefinition#config_schema)\n    # 3) Graph without field mapping: an 'ops' key with recursively defined\n    #    ops dictionary\n    # 4) `configured` graph with field mapping: a 'config' key with the config_schema that was\n    #    provided when `configured` was called (via GraphDefinition#config_schema)\n\n    assert isinstance(node, (OpNode, GraphNode)), f"Invalid node type: {type(node)}"\n\n    if isinstance(node, OpNode):\n        return construct_leaf_node_config(\n            node,\n            handle,\n            dependency_structure,\n            node.definition.config_schema,\n            resource_defs,\n            ignored,\n            asset_layer,\n            input_source_assets,\n        )\n\n    graph_def = node.definition\n\n    if graph_def.has_config_mapping:\n        # has_config_mapping covers cases 2 & 4 from above (only config mapped graphs can\n        # be `configured`)...\n        return construct_leaf_node_config(\n            node,\n            handle,\n            dependency_structure,\n            # ...and in both cases, the correct schema for 'config' key is exposed by this property:\n            graph_def.config_schema,\n            resource_defs,\n            ignored,\n            asset_layer,\n            input_source_assets,\n        )\n        # This case omits an 'ops' key, thus if a graph is `configured` or has a field\n        # mapping, the user cannot stub any config, inputs, or outputs for inner (child) nodes.\n    else:\n        fields = {\n            "inputs": get_inputs_field(\n                node,\n                handle,\n                dependency_structure,\n                resource_defs,\n                ignored,\n                asset_layer,\n                input_source_assets,\n            ),\n            "outputs": get_outputs_field(node, resource_defs),\n            "ops": Field(\n                define_node_shape(\n                    nodes=graph_def.nodes,\n                    ignored_nodes=None,\n                    dependency_structure=graph_def.dependency_structure,\n                    parent_handle=handle,\n                    resource_defs=resource_defs,\n                    asset_layer=asset_layer,\n                    node_input_source_assets=graph_def.node_input_source_assets,\n                )\n            ),\n        }\n\n        return node_config_field(fields, ignored=ignored)\n\n\ndef define_node_shape(\n    nodes: Sequence[Node],\n    ignored_nodes: Optional[Sequence[Node]],\n    dependency_structure: DependencyStructure,\n    resource_defs: Mapping[str, ResourceDefinition],\n    asset_layer: AssetLayer,\n    node_input_source_assets: Mapping[str, Mapping[str, "SourceAsset"]],\n    parent_handle: Optional[NodeHandle] = None,\n) -> Shape:\n    """Examples of what this method is used to generate the schema for:\n    1.\n        inputs: ...\n        ops:\n      >    op1: ...\n      >    op2: ...\n\n    2.\n        inputs:\n        ops:\n          graph1: ...\n            inputs: ...\n            ops:\n      >       op1: ...\n      >       inner_graph: ...\n\n\n    """\n    ignored_nodes = check.opt_sequence_param(ignored_nodes, "ignored_nodes", of_type=Node)\n\n    fields = {}\n    for node in nodes:\n        node_field = define_node_field(\n            node,\n            NodeHandle(node.name, parent_handle),\n            dependency_structure,\n            resource_defs,\n            ignored=False,\n            asset_layer=asset_layer,\n            input_source_assets=node_input_source_assets.get(node.name, {}),\n        )\n\n        if node_field:\n            fields[node.name] = node_field\n\n    for node in ignored_nodes:\n        node_field = define_node_field(\n            node,\n            NodeHandle(node.name, parent_handle),\n            dependency_structure,\n            resource_defs,\n            ignored=True,\n            asset_layer=asset_layer,\n            input_source_assets=node_input_source_assets.get(node.name, {}),\n        )\n        if node_field:\n            fields[node.name] = node_field\n\n    return Shape(fields)\n\n\ndef iterate_node_def_config_types(node_def: NodeDefinition) -> Iterator[ConfigType]:\n    if isinstance(node_def, OpDefinition):\n        if node_def.has_config_field:\n            yield from node_def.get_config_field().config_type.type_iterator()\n    elif isinstance(node_def, GraphDefinition):\n        for node in node_def.nodes:\n            yield from iterate_node_def_config_types(node.definition)\n\n    else:\n        check.invariant(f"Unexpected NodeDefinition type {type(node_def)}")\n\n\ndef _gather_all_schemas(node_defs: Sequence[NodeDefinition]) -> Iterator[ConfigType]:\n    dagster_types = construct_dagster_type_dictionary(node_defs)\n    for dagster_type in list(dagster_types.values()) + list(ALL_RUNTIME_BUILTINS):\n        if dagster_type.loader:\n            yield from dagster_type.loader.schema_type.type_iterator()\n\n\ndef _gather_all_config_types(\n    node_defs: Sequence[NodeDefinition], run_config_schema_type: ConfigType\n) -> Iterator[ConfigType]:\n    for node_def in node_defs:\n        yield from iterate_node_def_config_types(node_def)\n\n    yield from run_config_schema_type.type_iterator()\n\n\ndef construct_config_type_dictionary(\n    node_defs: Sequence[NodeDefinition],\n    run_config_schema_type: ConfigType,\n) -> Tuple[Mapping[str, ConfigType], Mapping[str, ConfigType]]:\n    type_dict_by_name = {t.given_name: t for t in ALL_CONFIG_BUILTINS if t.given_name}\n    type_dict_by_key = {t.key: t for t in ALL_CONFIG_BUILTINS}\n    all_types = list(_gather_all_config_types(node_defs, run_config_schema_type)) + list(\n        _gather_all_schemas(node_defs)\n    )\n\n    for config_type in all_types:\n        name = config_type.given_name\n        if name and name in type_dict_by_name:\n            if type(config_type) is not type(type_dict_by_name[name]):\n                raise DagsterInvalidDefinitionError(\n                    "Type names must be unique. You have constructed two different "\n                    f'instances of types with the same name "{name}".'\n                )\n        elif name:\n            type_dict_by_name[name] = config_type\n\n        type_dict_by_key[config_type.key] = config_type\n\n    return type_dict_by_name, type_dict_by_key\n\n\ndef _convert_config_classes_inner(configs: Any) -> Any:\n    if not isinstance(configs, dict):\n        return configs\n\n    return {\n        k: (\n            {"config": v._convert_to_config_dictionary()}  # noqa: SLF001\n            if isinstance(v, Config)\n            else _convert_config_classes_inner(v)\n        )\n        for k, v in configs.items()\n    }\n\n\ndef _convert_config_classes(configs: Dict[str, Any]) -> Dict[str, Any]:\n    return _convert_config_classes_inner(configs)\n\n\n
[docs]class RunConfig:\n """Container for all the configuration that can be passed to a run. Accepts Pythonic definitions\n for op and asset config and resources and converts them under the hood to the appropriate config dictionaries.\n\n Example usage:\n\n .. code-block:: python\n\n class MyAssetConfig(Config):\n a_str: str\n\n @asset\n def my_asset(config: MyAssetConfig):\n assert config.a_str == "foo"\n\n materialize(\n [my_asset],\n run_config=RunConfig(\n ops={"my_asset": MyAssetConfig(a_str="foo")}\n )\n )\n\n """\n\n def __init__(\n self,\n ops: Optional[Dict[str, Any]] = None,\n resources: Optional[Dict[str, Any]] = None,\n loggers: Optional[Dict[str, Any]] = None,\n execution: Optional[Dict[str, Any]] = None,\n ):\n self.ops = check.opt_dict_param(ops, "ops")\n self.resources = check.opt_dict_param(resources, "resources")\n self.loggers = check.opt_dict_param(loggers, "loggers")\n self.execution = check.opt_dict_param(execution, "execution")\n\n def to_config_dict(self):\n return {\n "loggers": self.loggers,\n "resources": _convert_config_classes(self.resources),\n "ops": _convert_config_classes(self.ops),\n "execution": self.execution,\n }
\n\n\nCoercibleToRunConfig: TypeAlias = Union[Dict[str, Any], RunConfig]\n\nT = TypeVar("T")\n\n\ndef convert_config_input(inp: Union[CoercibleToRunConfig, T]) -> Union[T, Mapping[str, Any]]:\n if isinstance(inp, RunConfig):\n return inp.to_config_dict()\n else:\n return inp\n
", "current_page_name": "_modules/dagster/_core/definitions/run_config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.run_config"}, "run_request": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.run_request

\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental_param\nfrom dagster._core.definitions.asset_check_evaluation import AssetCheckEvaluation\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization, AssetObservation\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.storage.tags import PARTITION_NAME_TAG\nfrom dagster._serdes.serdes import whitelist_for_serdes\nfrom dagster._utils.error import SerializableErrorInfo\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.job_definition import JobDefinition\n    from dagster._core.definitions.partition import PartitionsDefinition\n    from dagster._core.definitions.run_config import RunConfig\n    from dagster._core.definitions.unresolved_asset_job_definition import (\n        UnresolvedAssetJobDefinition,\n    )\n\n\n@whitelist_for_serdes(old_storage_names={"JobType"})\nclass InstigatorType(Enum):\n    SCHEDULE = "SCHEDULE"\n    SENSOR = "SENSOR"\n    AUTO_MATERIALIZE = "AUTO_MATERIALIZE"\n\n\n
[docs]@whitelist_for_serdes\nclass SkipReason(NamedTuple("_SkipReason", [("skip_message", PublicAttr[Optional[str]])])):\n """Represents a skipped evaluation, where no runs are requested. May contain a message to indicate\n why no runs were requested.\n\n Attributes:\n skip_message (Optional[str]): A message displayed in the Dagster UI for why this evaluation resulted\n in no requested runs.\n """\n\n def __new__(cls, skip_message: Optional[str] = None):\n return super(SkipReason, cls).__new__(\n cls,\n skip_message=check.opt_str_param(skip_message, "skip_message"),\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass AddDynamicPartitionsRequest(\n NamedTuple(\n "_AddDynamicPartitionsRequest",\n [\n ("partitions_def_name", str),\n ("partition_keys", Sequence[str]),\n ],\n )\n):\n """A request to add partitions to a dynamic partitions definition, to be evaluated by a sensor or schedule."""\n\n def __new__(\n cls,\n partitions_def_name: str,\n partition_keys: Sequence[str],\n ):\n return super(AddDynamicPartitionsRequest, cls).__new__(\n cls,\n partitions_def_name=check.str_param(partitions_def_name, "partitions_def_name"),\n partition_keys=check.list_param(partition_keys, "partition_keys", of_type=str),\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass DeleteDynamicPartitionsRequest(\n NamedTuple(\n "_AddDynamicPartitionsRequest",\n [\n ("partitions_def_name", str),\n ("partition_keys", Sequence[str]),\n ],\n )\n):\n """A request to delete partitions to a dynamic partitions definition, to be evaluated by a sensor or schedule."""\n\n def __new__(\n cls,\n partitions_def_name: str,\n partition_keys: Sequence[str],\n ):\n return super(DeleteDynamicPartitionsRequest, cls).__new__(\n cls,\n partitions_def_name=check.str_param(partitions_def_name, "partitions_def_name"),\n partition_keys=check.list_param(partition_keys, "partition_keys", of_type=str),\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass RunRequest(\n NamedTuple(\n "_RunRequest",\n [\n ("run_key", PublicAttr[Optional[str]]),\n ("run_config", PublicAttr[Mapping[str, Any]]),\n ("tags", PublicAttr[Mapping[str, str]]),\n ("job_name", PublicAttr[Optional[str]]),\n ("asset_selection", PublicAttr[Optional[Sequence[AssetKey]]]),\n ("stale_assets_only", PublicAttr[bool]),\n ("partition_key", PublicAttr[Optional[str]]),\n ],\n )\n):\n """Represents all the information required to launch a single run. Must be returned by a\n SensorDefinition or ScheduleDefinition's evaluation function for a run to be launched.\n\n Attributes:\n run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that\n only one run is created per run key across all sensor evaluations. For schedules,\n ensures that one run is created per tick, across failure recoveries. Passing in a `None`\n value means that a run will always be launched per evaluation.\n run_config (Optional[Mapping[str, Any]]: Configuration for the run. If the job has\n a :py:class:`PartitionedConfig`, this value will override replace the config\n provided by it.\n tags (Optional[Dict[str, Any]]): A dictionary of tags (string key-value pairs) to attach\n to the launched run.\n job_name (Optional[str]): (Experimental) The name of the job this run request will launch.\n Required for sensors that target multiple jobs.\n asset_selection (Optional[Sequence[AssetKey]]): A sequence of AssetKeys that should be\n launched with this run.\n stale_assets_only (bool): Set to true to further narrow the asset\n selection to stale assets. If passed without an asset selection, all stale assets in the\n job will be materialized. If the job does not materialize assets, this flag is ignored.\n partition_key (Optional[str]): The partition key for this run request.\n """\n\n def __new__(\n cls,\n run_key: Optional[str] = None,\n run_config: Optional[Union["RunConfig", Mapping[str, Any]]] = None,\n tags: Optional[Mapping[str, Any]] = None,\n job_name: Optional[str] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n stale_assets_only: bool = False,\n partition_key: Optional[str] = None,\n ):\n from dagster._core.definitions.run_config import convert_config_input\n\n return super(RunRequest, cls).__new__(\n cls,\n run_key=check.opt_str_param(run_key, "run_key"),\n run_config=check.opt_mapping_param(\n convert_config_input(run_config), "run_config", key_type=str\n ),\n tags=validate_tags(check.opt_mapping_param(tags, "tags", key_type=str)),\n job_name=check.opt_str_param(job_name, "job_name"),\n asset_selection=check.opt_nullable_sequence_param(\n asset_selection, "asset_selection", of_type=AssetKey\n ),\n stale_assets_only=check.bool_param(stale_assets_only, "stale_assets_only"),\n partition_key=check.opt_str_param(partition_key, "partition_key"),\n )\n\n def with_replaced_attrs(self, **kwargs: Any) -> "RunRequest":\n fields = self._asdict()\n for k in fields.keys():\n if k in kwargs:\n fields[k] = kwargs[k]\n return RunRequest(**fields)\n\n def with_resolved_tags_and_config(\n self,\n target_definition: Union["JobDefinition", "UnresolvedAssetJobDefinition"],\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "RunRequest":\n from dagster._core.definitions.job_definition import JobDefinition\n from dagster._core.definitions.partition import (\n PartitionedConfig,\n PartitionsDefinition,\n )\n\n if self.partition_key is None:\n check.failed(\n "Cannot resolve partition for run request without partition key",\n )\n\n partitions_def = target_definition.partitions_def\n if partitions_def is None:\n check.failed(\n "Cannot resolve partition for run request when target job"\n f" '{target_definition.name}' is unpartitioned.",\n )\n partitions_def = cast(PartitionsDefinition, partitions_def)\n\n partitioned_config = (\n target_definition.partitioned_config\n if isinstance(target_definition, JobDefinition)\n else PartitionedConfig.from_flexible_config(target_definition.config, partitions_def)\n )\n if partitioned_config is None:\n check.failed(\n "Cannot resolve partition for run request on unpartitioned job",\n )\n\n _check_valid_partition_key_after_dynamic_partitions_requests(\n self.partition_key,\n partitions_def,\n dynamic_partitions_requests,\n current_time,\n dynamic_partitions_store,\n )\n\n tags = {\n **(self.tags or {}),\n **partitioned_config.get_tags_for_partition_key(\n self.partition_key,\n job_name=target_definition.name,\n ),\n }\n\n return self.with_replaced_attrs(\n run_config=(\n self.run_config\n if self.run_config\n else partitioned_config.get_run_config_for_partition_key(self.partition_key)\n ),\n tags=tags,\n )\n\n def has_resolved_partition(self) -> bool:\n # Backcompat run requests yielded via `run_request_for_partition` already have resolved\n # partitioning\n return self.tags.get(PARTITION_NAME_TAG) is not None if self.partition_key else True
\n\n\ndef _check_valid_partition_key_after_dynamic_partitions_requests(\n partition_key: str,\n partitions_def: "PartitionsDefinition",\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n):\n from dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\n from dagster._core.definitions.partition import (\n DynamicPartitionsDefinition,\n )\n\n if isinstance(partitions_def, MultiPartitionsDefinition):\n multipartition_key = partitions_def.get_partition_key_from_str(partition_key)\n\n for dimension in partitions_def.partitions_defs:\n _check_valid_partition_key_after_dynamic_partitions_requests(\n multipartition_key.keys_by_dimension[dimension.name],\n dimension.partitions_def,\n dynamic_partitions_requests,\n current_time,\n dynamic_partitions_store,\n )\n\n elif isinstance(partitions_def, DynamicPartitionsDefinition) and partitions_def.name:\n if not dynamic_partitions_store:\n check.failed(\n "Cannot resolve partition for run request on dynamic partitions without"\n " dynamic_partitions_store"\n )\n\n add_partition_keys: Set[str] = set()\n delete_partition_keys: Set[str] = set()\n for req in dynamic_partitions_requests:\n if isinstance(req, AddDynamicPartitionsRequest):\n if req.partitions_def_name == partitions_def.name:\n add_partition_keys.update(set(req.partition_keys))\n elif isinstance(req, DeleteDynamicPartitionsRequest):\n if req.partitions_def_name == partitions_def.name:\n delete_partition_keys.update(set(req.partition_keys))\n\n partition_keys_after_requests_resolved = (\n set(\n dynamic_partitions_store.get_dynamic_partitions(\n partitions_def_name=partitions_def.name\n )\n )\n | add_partition_keys\n ) - delete_partition_keys\n\n if partition_key not in partition_keys_after_requests_resolved:\n check.failed(\n f"Dynamic partition key {partition_key} for partitions def"\n f" '{partitions_def.name}' is invalid. After dynamic partitions requests are"\n " applied, it does not exist in the set of valid partition keys."\n )\n\n else:\n partitions_def.validate_partition_key(\n partition_key,\n dynamic_partitions_store=dynamic_partitions_store,\n current_time=current_time,\n )\n\n\n@whitelist_for_serdes(\n storage_name="PipelineRunReaction",\n storage_field_names={\n "dagster_run": "pipeline_run",\n },\n)\nclass DagsterRunReaction(\n NamedTuple(\n "_DagsterRunReaction",\n [\n ("dagster_run", Optional[DagsterRun]),\n ("error", Optional[SerializableErrorInfo]),\n ("run_status", Optional[DagsterRunStatus]),\n ],\n )\n):\n """Represents a request that reacts to an existing dagster run. If success, it will report logs\n back to the run.\n\n Attributes:\n dagster_run (Optional[DagsterRun]): The dagster run that originates this reaction.\n error (Optional[SerializableErrorInfo]): user code execution error.\n run_status: (Optional[DagsterRunStatus]): The run status that triggered the reaction.\n """\n\n def __new__(\n cls,\n dagster_run: Optional[DagsterRun],\n error: Optional[SerializableErrorInfo] = None,\n run_status: Optional[DagsterRunStatus] = None,\n ):\n return super(DagsterRunReaction, cls).__new__(\n cls,\n dagster_run=check.opt_inst_param(dagster_run, "dagster_run", DagsterRun),\n error=check.opt_inst_param(error, "error", SerializableErrorInfo),\n run_status=check.opt_inst_param(run_status, "run_status", DagsterRunStatus),\n )\n\n\n
[docs]@experimental_param(\n param="asset_events", additional_warn_text="Runless asset events are experimental"\n)\nclass SensorResult(\n NamedTuple(\n "_SensorResult",\n [\n ("run_requests", Optional[Sequence[RunRequest]]),\n ("skip_reason", Optional[SkipReason]),\n ("cursor", Optional[str]),\n (\n "dynamic_partitions_requests",\n Optional[\n Sequence[Union[DeleteDynamicPartitionsRequest, AddDynamicPartitionsRequest]]\n ],\n ),\n (\n "asset_events",\n List[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]],\n ),\n ],\n )\n):\n """The result of a sensor evaluation.\n\n Attributes:\n run_requests (Optional[Sequence[RunRequest]]): A list\n of run requests to be executed.\n skip_reason (Optional[Union[str, SkipReason]]): A skip message indicating why sensor\n evaluation was skipped.\n cursor (Optional[str]): The cursor value for this sensor, which will be provided on the\n context for the next sensor evaluation.\n dynamic_partitions_requests (Optional[Sequence[Union[DeleteDynamicPartitionsRequest,\n AddDynamicPartitionsRequest]]]): A list of dynamic partition requests to request dynamic\n partition addition and deletion. Run requests will be evaluated using the state of the\n partitions with these changes applied.\n asset_events (Optional[Sequence[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]]]): (Experimental) A\n list of materializations, observations, and asset check evaluations that the system\n will persist on your behalf at the end of sensor evaluation. These events will be not\n be associated with any particular run, but will be queryable and viewable in the asset catalog.\n\n\n """\n\n def __new__(\n cls,\n run_requests: Optional[Sequence[RunRequest]] = None,\n skip_reason: Optional[Union[str, SkipReason]] = None,\n cursor: Optional[str] = None,\n dynamic_partitions_requests: Optional[\n Sequence[Union[DeleteDynamicPartitionsRequest, AddDynamicPartitionsRequest]]\n ] = None,\n asset_events: Optional[\n Sequence[Union[AssetObservation, AssetMaterialization, AssetCheckEvaluation]]\n ] = None,\n ):\n if skip_reason and len(run_requests if run_requests else []) > 0:\n check.failed(\n "Expected a single skip reason or one or more run requests: received values for "\n "both run_requests and skip_reason"\n )\n\n skip_reason = check.opt_inst_param(skip_reason, "skip_reason", (SkipReason, str))\n if isinstance(skip_reason, str):\n skip_reason = SkipReason(skip_reason)\n\n return super(SensorResult, cls).__new__(\n cls,\n run_requests=check.opt_sequence_param(run_requests, "run_requests", RunRequest),\n skip_reason=skip_reason,\n cursor=check.opt_str_param(cursor, "cursor"),\n dynamic_partitions_requests=check.opt_sequence_param(\n dynamic_partitions_requests,\n "dynamic_partitions_requests",\n (AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest),\n ),\n asset_events=list(\n check.opt_sequence_param(\n asset_events,\n "asset_check_evaluations",\n (AssetObservation, AssetMaterialization, AssetCheckEvaluation),\n )\n ),\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/run_request", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.run_request"}, "run_status_sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.run_status_sensor_definition

\nimport functools\nimport logging\nfrom contextlib import ExitStack\nfrom datetime import datetime\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Iterator,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n    overload,\n)\n\nimport pendulum\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated_param, public\nfrom dagster._core.definitions.instigation_logger import InstigationLogger\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.scoped_resources_builder import Resources, ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvariantViolationError,\n    RunStatusSensorExecutionError,\n    user_code_error_boundary,\n)\nfrom dagster._core.events import PIPELINE_RUN_STATUS_TO_EVENT_TYPE, DagsterEvent, DagsterEventType\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus, RunsFilter\nfrom dagster._serdes import (\n    serialize_value,\n    whitelist_for_serdes,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._seven import JSONDecodeError\nfrom dagster._utils import utc_datetime_from_timestamp\nfrom dagster._utils.error import serializable_error_info_from_exc_info\n\nfrom .graph_definition import GraphDefinition\nfrom .job_definition import JobDefinition\nfrom .sensor_definition import (\n    DagsterRunReaction,\n    DefaultSensorStatus,\n    RawSensorEvaluationFunctionReturn,\n    RunRequest,\n    SensorDefinition,\n    SensorEvaluationContext,\n    SensorResult,\n    SensorType,\n    SkipReason,\n    get_context_param_name,\n    get_sensor_context_from_args_or_kwargs,\n    validate_and_get_resource_dict,\n)\nfrom .target import ExecutableDefinition\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.resource_definition import ResourceDefinition\n    from dagster._core.definitions.selector import (\n        CodeLocationSelector,\n        JobSelector,\n        RepositorySelector,\n    )\n\nRunStatusSensorEvaluationFunction: TypeAlias = Union[\n    Callable[..., RawSensorEvaluationFunctionReturn],\n    Callable[..., RawSensorEvaluationFunctionReturn],\n]\nRunFailureSensorEvaluationFn: TypeAlias = Union[\n    Callable[..., RawSensorEvaluationFunctionReturn],\n    Callable[..., RawSensorEvaluationFunctionReturn],\n]\n\n\n@whitelist_for_serdes(old_storage_names={"PipelineSensorCursor"})\nclass RunStatusSensorCursor(\n    NamedTuple(\n        "_RunStatusSensorCursor",\n        [("record_id", int), ("update_timestamp", str)],\n    )\n):\n    def __new__(cls, record_id, update_timestamp):\n        return super(RunStatusSensorCursor, cls).__new__(\n            cls,\n            record_id=check.int_param(record_id, "record_id"),\n            update_timestamp=check.str_param(update_timestamp, "update_timestamp"),\n        )\n\n    @staticmethod\n    def is_valid(json_str: str) -> bool:\n        try:\n            obj = deserialize_value(json_str, RunStatusSensorCursor)\n            return isinstance(obj, RunStatusSensorCursor)\n        except (JSONDecodeError, DeserializationError):\n            return False\n\n    def to_json(self) -> str:\n        return serialize_value(cast(NamedTuple, self))\n\n    @staticmethod\n    def from_json(json_str: str) -> "RunStatusSensorCursor":\n        return deserialize_value(json_str, RunStatusSensorCursor)\n\n\n
[docs]class RunStatusSensorContext:\n """The ``context`` object available to a decorated function of ``run_status_sensor``."""\n\n def __init__(\n self,\n sensor_name,\n dagster_run,\n dagster_event,\n instance,\n context: Optional[\n SensorEvaluationContext\n ] = None, # deprecated arg, but we need to keep it for backcompat\n resource_defs: Optional[Mapping[str, "ResourceDefinition"]] = None,\n logger: Optional[logging.Logger] = None,\n partition_key: Optional[str] = None,\n _resources: Optional[Resources] = None,\n _cm_scope_entered: bool = False,\n ) -> None:\n self._exit_stack = ExitStack()\n self._sensor_name = check.str_param(sensor_name, "sensor_name")\n self._dagster_run = check.inst_param(dagster_run, "dagster_run", DagsterRun)\n self._dagster_event = check.inst_param(dagster_event, "dagster_event", DagsterEvent)\n self._instance = check.inst_param(instance, "instance", DagsterInstance)\n self._logger: Optional[logging.Logger] = logger or (context.log if context else None)\n self._partition_key = check.opt_str_param(partition_key, "partition_key")\n\n # Wait to set resources unless they're accessed\n self._resource_defs = resource_defs\n self._resources = _resources\n self._cm_scope_entered = _cm_scope_entered\n\n def for_run_failure(self) -> "RunFailureSensorContext":\n """Converts RunStatusSensorContext to RunFailureSensorContext."""\n return RunFailureSensorContext(\n sensor_name=self._sensor_name,\n dagster_run=self._dagster_run,\n dagster_event=self._dagster_event,\n instance=self._instance,\n logger=self._logger,\n partition_key=self._partition_key,\n resource_defs=self._resource_defs,\n _resources=self._resources,\n _cm_scope_entered=self._cm_scope_entered,\n )\n\n @property\n def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]:\n return self._resource_defs\n\n @property\n def resources(self) -> Resources:\n from dagster._core.definitions.scoped_resources_builder import (\n IContainsGenerator,\n )\n from dagster._core.execution.build_resources import build_resources\n\n if not self._resources:\n """\n This is similar to what we do in e.g. the op context - we set up a resource\n building context manager, and immediately enter it. This is so that in cases\n where a user is not using any context-manager based resources, they don't\n need to enter this SensorEvaluationContext themselves.\n\n For example:\n\n my_sensor(build_sensor_context(resources={"my_resource": my_non_cm_resource})\n\n will work ok, but for a CM resource we must do\n\n with build_sensor_context(resources={"my_resource": my_cm_resource}) as context:\n my_sensor(context)\n """\n\n instance = self.instance if self._instance else None\n\n resources_cm = build_resources(resources=self._resource_defs or {}, instance=instance)\n self._resources = self._exit_stack.enter_context(resources_cm)\n\n if isinstance(self._resources, IContainsGenerator) and not self._cm_scope_entered:\n self._exit_stack.close()\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access"\n " resources outside of context manager scope. You can use the following syntax"\n " to open a context manager: `with build_schedule_context(...) as context:`"\n )\n\n return self._resources\n\n @public\n @property\n def sensor_name(self) -> str:\n """The name of the sensor."""\n return self._sensor_name\n\n @public\n @property\n def dagster_run(self) -> DagsterRun:\n """The run of the job."""\n return self._dagster_run\n\n @public\n @property\n def dagster_event(self) -> DagsterEvent:\n """The event associated with the job run status."""\n return self._dagster_event\n\n @public\n @property\n def instance(self) -> DagsterInstance:\n """The current instance."""\n return self._instance\n\n @public\n @property\n def log(self) -> logging.Logger:\n """The logger for the current sensor evaluation."""\n if not self._logger:\n self._logger = InstigationLogger()\n\n return self._logger\n\n @public\n @property\n def partition_key(self) -> Optional[str]:\n """Optional[str]: The partition key of the relevant run."""\n return self._partition_key\n\n def __enter__(self) -> "RunStatusSensorContext":\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc) -> None:\n self._exit_stack.close()\n self._logger = None
\n\n\n
[docs]class RunFailureSensorContext(RunStatusSensorContext):\n """The ``context`` object available to a decorated function of ``run_failure_sensor``.\n\n Attributes:\n sensor_name (str): the name of the sensor.\n dagster_run (DagsterRun): the failed run.\n """\n\n @public\n @property\n def failure_event(self) -> DagsterEvent:\n """The run failure event.\n\n If the run failed because of an error inside a step, get_step_failure_events will have more\n details on the step failure.\n """\n return self.dagster_event\n\n
[docs] @public\n def get_step_failure_events(self) -> Sequence[DagsterEvent]:\n """The step failure event for each step in the run that failed.\n\n Examples:\n .. code-block:: python\n\n error_strings_by_step_key = {\n # includes the stack trace\n event.step_key: event.event_specific_data.error.to_string()\n for event in context.get_step_failure_events()\n }\n """\n records = self.instance.get_records_for_run(\n run_id=self.dagster_run.run_id, of_type=DagsterEventType.STEP_FAILURE\n ).records\n return [cast(DagsterEvent, record.event_log_entry.dagster_event) for record in records]
\n\n\n
[docs]def build_run_status_sensor_context(\n sensor_name: str,\n dagster_event: DagsterEvent,\n dagster_instance: DagsterInstance,\n dagster_run: DagsterRun,\n context: Optional[SensorEvaluationContext] = None,\n resources: Optional[Mapping[str, object]] = None,\n partition_key: Optional[str] = None,\n) -> RunStatusSensorContext:\n """Builds run status sensor context from provided parameters.\n\n This function can be used to provide the context argument when directly invoking a function\n decorated with `@run_status_sensor` or `@run_failure_sensor`, such as when writing unit tests.\n\n Args:\n sensor_name (str): The name of the sensor the context is being constructed for.\n dagster_event (DagsterEvent): A DagsterEvent with the same event type as the one that\n triggers the run_status_sensor\n dagster_instance (DagsterInstance): The dagster instance configured for the context.\n dagster_run (DagsterRun): DagsterRun object from running a job\n resources (Optional[Mapping[str, object]]): A dictionary of resources to be made available\n to the sensor.\n\n Examples:\n .. code-block:: python\n\n instance = DagsterInstance.ephemeral()\n result = my_job.execute_in_process(instance=instance)\n\n dagster_run = result.dagster_run\n dagster_event = result.get_job_success_event() # or get_job_failure_event()\n\n context = build_run_status_sensor_context(\n sensor_name="run_status_sensor_to_invoke",\n dagster_instance=instance,\n dagster_run=dagster_run,\n dagster_event=dagster_event,\n )\n run_status_sensor_to_invoke(context)\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return RunStatusSensorContext(\n sensor_name=sensor_name,\n instance=dagster_instance,\n dagster_run=dagster_run,\n dagster_event=dagster_event,\n resource_defs=wrap_resources_for_execution(resources),\n logger=context.log if context else None,\n partition_key=partition_key,\n )
\n\n\n@overload\ndef run_failure_sensor(\n name: RunFailureSensorEvaluationFn,\n) -> SensorDefinition: ...\n\n\n@overload\ndef run_failure_sensor(\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n) -> Callable[[RunFailureSensorEvaluationFn], SensorDefinition,]: ...\n\n\n
[docs]@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef run_failure_sensor(\n name: Optional[Union[RunFailureSensorEvaluationFn, str]] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n) -> Union[SensorDefinition, Callable[[RunFailureSensorEvaluationFn], SensorDefinition,]]:\n """Creates a sensor that reacts to job failure events, where the decorated function will be\n run when a run fails.\n\n Takes a :py:class:`~dagster.RunFailureSensorContext`.\n\n Args:\n name (Optional[str]): The name of the job failure sensor. Defaults to the name of the\n decorated function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n The jobs in the current repository that will be monitored by this failure sensor.\n Defaults to None, which means the alert will be sent when any job in the current\n repository fails.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n (deprecated in favor of monitored_jobs) The jobs in the current repository that will be\n monitored by this failure sensor. Defaults to None, which means the alert will be sent\n when any job in the repository fails.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJob]]): The job a RunRequest should\n execute if yielded from the sensor.\n request_jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJob]]]): (experimental)\n A list of jobs to be executed if RunRequests are yielded from the sensor.\n """\n\n def inner(\n fn: RunFailureSensorEvaluationFn,\n ) -> SensorDefinition:\n check.callable_param(fn, "fn")\n if name is None or callable(name):\n sensor_name = fn.__name__\n else:\n sensor_name = name\n\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n @run_status_sensor(\n run_status=DagsterRunStatus.FAILURE,\n name=sensor_name,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n monitored_jobs=jobs,\n monitor_all_repositories=monitor_all_repositories,\n default_status=default_status,\n request_job=request_job,\n request_jobs=request_jobs,\n )\n @functools.wraps(fn)\n def _run_failure_sensor(*args, **kwargs) -> Any:\n args_modified = [\n arg.for_run_failure() if isinstance(arg, RunStatusSensorContext) else arg\n for arg in args\n ]\n kwargs_modified = {\n k: v.for_run_failure() if isinstance(v, RunStatusSensorContext) else v\n for k, v in kwargs.items()\n }\n return fn(*args_modified, **kwargs_modified)\n\n return _run_failure_sensor\n\n # This case is for when decorator is used bare, without arguments\n if callable(name):\n return inner(name)\n\n return inner
\n\n\n
[docs]class RunStatusSensorDefinition(SensorDefinition):\n """Define a sensor that reacts to a given status of job execution, where the decorated\n function will be evaluated when a run is at the given status.\n\n Args:\n name (str): The name of the sensor. Defaults to the name of the decorated function.\n run_status (DagsterRunStatus): The status of a run which will be\n monitored by the sensor.\n run_status_sensor_fn (Callable[[RunStatusSensorContext], Union[SkipReason, DagsterRunReaction]]): The core\n evaluation function for the sensor. Takes a :py:class:`~dagster.RunStatusSensorContext`.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, JobSelector, RepositorySelector, CodeLocationSelector]]]):\n The jobs in the current repository that will be monitored by this sensor. Defaults to\n None, which means the alert will be sent when any job in the repository fails.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_job (Optional[Union[GraphDefinition, JobDefinition]]): The job a RunRequest should\n execute if yielded from the sensor.\n request_jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition]]]): (experimental)\n A list of jobs to be executed if RunRequests are yielded from the sensor.\n """\n\n def __init__(\n self,\n name: str,\n run_status: DagsterRunStatus,\n run_status_sensor_fn: RunStatusSensorEvaluationFunction,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n from dagster._core.definitions.selector import (\n CodeLocationSelector,\n JobSelector,\n RepositorySelector,\n )\n from dagster._core.event_api import RunShardedEventsCursor\n from dagster._core.storage.event_log.base import EventRecordsFilter\n\n check.str_param(name, "name")\n check.inst_param(run_status, "run_status", DagsterRunStatus)\n check.callable_param(run_status_sensor_fn, "run_status_sensor_fn")\n check.opt_int_param(minimum_interval_seconds, "minimum_interval_seconds")\n check.opt_str_param(description, "description")\n check.opt_list_param(\n monitored_jobs,\n "monitored_jobs",\n (\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n RepositorySelector,\n JobSelector,\n CodeLocationSelector,\n ),\n )\n check.inst_param(default_status, "default_status", DefaultSensorStatus)\n\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(run_status_sensor_fn)}\n\n combined_required_resource_keys = (\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n | resource_arg_names\n )\n\n # coerce CodeLocationSelectors to RepositorySelectors with repo name "__repository__"\n monitored_jobs = [\n job.to_repository_selector() if isinstance(job, CodeLocationSelector) else job\n for job in (monitored_jobs or [])\n ]\n\n self._run_status_sensor_fn = check.callable_param(\n run_status_sensor_fn, "run_status_sensor_fn"\n )\n event_type = PIPELINE_RUN_STATUS_TO_EVENT_TYPE[run_status]\n\n # split monitored_jobs into external repos, external jobs, and jobs in the current repo\n other_repos = (\n [x for x in monitored_jobs if isinstance(x, RepositorySelector)]\n if monitored_jobs\n else []\n )\n\n other_repo_jobs = (\n [x for x in monitored_jobs if isinstance(x, JobSelector)] if monitored_jobs else []\n )\n\n current_repo_jobs = (\n [x for x in monitored_jobs if not isinstance(x, (JobSelector, RepositorySelector))]\n if monitored_jobs\n else []\n )\n\n def _wrapped_fn(\n context: SensorEvaluationContext,\n ) -> Iterator[Union[RunRequest, SkipReason, DagsterRunReaction, SensorResult]]:\n # initiate the cursor to (most recent event id, current timestamp) when:\n # * it's the first time starting the sensor\n # * or, the cursor isn't in valid format (backcompt)\n if context.cursor is None or not RunStatusSensorCursor.is_valid(context.cursor):\n most_recent_event_records = list(\n context.instance.get_event_records(\n EventRecordsFilter(event_type=event_type), ascending=False, limit=1\n )\n )\n most_recent_event_id = (\n most_recent_event_records[0].storage_id\n if len(most_recent_event_records) == 1\n else -1\n )\n\n new_cursor = RunStatusSensorCursor(\n update_timestamp=pendulum.now("UTC").isoformat(),\n record_id=most_recent_event_id,\n )\n context.update_cursor(new_cursor.to_json())\n yield SkipReason(f"Initiating {name}. Set cursor to {new_cursor}")\n return\n\n record_id, update_timestamp = RunStatusSensorCursor.from_json(context.cursor)\n\n # Fetch events after the cursor id\n # * we move the cursor forward to the latest visited event's id to avoid revisits\n # * when the daemon is down, bc we persist the cursor info, we can go back to where we\n # left and backfill alerts for the qualified events (up to 5 at a time) during the downtime\n # Note: this is a cross-run query which requires extra handling in sqlite, see details in SqliteEventLogStorage.\n event_records = context.instance.get_event_records(\n EventRecordsFilter(\n after_cursor=RunShardedEventsCursor(\n id=record_id,\n run_updated_after=cast(datetime, pendulum.parse(update_timestamp)),\n ),\n event_type=event_type,\n ),\n ascending=True,\n limit=5,\n )\n\n for event_record in event_records:\n event_log_entry = event_record.event_log_entry\n storage_id = event_record.storage_id\n\n # get run info\n run_records = context.instance.get_run_records(\n filters=RunsFilter(run_ids=[event_log_entry.run_id])\n )\n\n # skip if we couldn't find the right run\n if len(run_records) != 1:\n # bc we couldn't find the run, we use the event timestamp as the approximate\n # run update timestamp\n approximate_update_timestamp = utc_datetime_from_timestamp(\n event_log_entry.timestamp\n )\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id,\n update_timestamp=approximate_update_timestamp.isoformat(),\n ).to_json()\n )\n continue\n\n dagster_run = run_records[0].dagster_run\n update_timestamp = run_records[0].update_timestamp\n\n job_match = False\n\n # if monitor_all_repositories is provided, then we want to run the sensor for all jobs in all repositories\n if monitor_all_repositories:\n job_match = True\n\n # check if the run is in the current repository and (if provided) one of jobs specified in monitored_jobs\n if (\n not job_match\n and\n # the job has a repository (not manually executed)\n dagster_run.external_job_origin\n and\n # the job belongs to the current repository\n dagster_run.external_job_origin.external_repository_origin.repository_name\n == context.repository_name\n ):\n if monitored_jobs:\n if dagster_run.job_name in map(lambda x: x.name, current_repo_jobs):\n job_match = True\n else:\n job_match = True\n\n if not job_match:\n # check if the run is one of the jobs specified by JobSelector or RepositorySelector (ie in another repo)\n # make a JobSelector for the run in question\n external_repository_origin = check.not_none(\n dagster_run.external_job_origin\n ).external_repository_origin\n run_job_selector = JobSelector(\n location_name=external_repository_origin.code_location_origin.location_name,\n repository_name=external_repository_origin.repository_name,\n job_name=dagster_run.job_name,\n )\n if run_job_selector in other_repo_jobs:\n job_match = True\n\n # make a RepositorySelector for the run in question\n run_repo_selector = RepositorySelector(\n location_name=external_repository_origin.code_location_origin.location_name,\n repository_name=external_repository_origin.repository_name,\n )\n if run_repo_selector in other_repos:\n job_match = True\n\n if not job_match:\n # the run in question doesn't match any of the criteria for we advance the cursor and move on\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id, update_timestamp=update_timestamp.isoformat()\n ).to_json()\n )\n continue\n\n serializable_error = None\n\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, name, resource_arg_names\n )\n\n try:\n with RunStatusSensorContext(\n sensor_name=name,\n dagster_run=dagster_run,\n dagster_event=event_log_entry.dagster_event,\n instance=context.instance,\n resource_defs=context.resource_defs,\n logger=context.log,\n partition_key=dagster_run.tags.get("dagster/partition"),\n ) as sensor_context, user_code_error_boundary(\n RunStatusSensorExecutionError,\n lambda: f'Error occurred during the execution sensor "{name}".',\n ):\n context_param_name = get_context_param_name(run_status_sensor_fn)\n context_param = (\n {context_param_name: sensor_context} if context_param_name else {}\n )\n\n sensor_return = run_status_sensor_fn(\n **context_param,\n **resource_args_populated,\n )\n\n if sensor_return is not None:\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id,\n update_timestamp=update_timestamp.isoformat(),\n ).to_json()\n )\n\n if isinstance(sensor_return, SensorResult):\n if sensor_return.cursor:\n raise DagsterInvariantViolationError(\n f"Error in run status sensor {name}: Sensor returned a"\n " SensorResult with a cursor value. The cursor is managed"\n " by the sensor and should not be modified by a user."\n )\n yield sensor_return\n elif isinstance(\n sensor_return,\n (RunRequest, SkipReason, DagsterRunReaction),\n ):\n yield sensor_return\n else:\n yield from sensor_return\n return\n except RunStatusSensorExecutionError as run_status_sensor_execution_error:\n # When the user code errors, we report error to the sensor tick not the original run.\n serializable_error = serializable_error_info_from_exc_info(\n run_status_sensor_execution_error.original_exc_info\n )\n\n context.update_cursor(\n RunStatusSensorCursor(\n record_id=storage_id, update_timestamp=update_timestamp.isoformat()\n ).to_json()\n )\n\n # Yield DagsterRunReaction to indicate the execution success/failure.\n # The sensor machinery would\n # * report back to the original run if success\n # * update cursor and job state\n yield DagsterRunReaction(\n dagster_run=dagster_run,\n run_status=run_status,\n error=serializable_error,\n )\n\n super(RunStatusSensorDefinition, self).__init__(\n name=name,\n evaluation_fn=_wrapped_fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n default_status=default_status,\n job=request_job,\n jobs=request_jobs,\n required_resource_keys=combined_required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> RawSensorEvaluationFunctionReturn:\n context_param_name = get_context_param_name(self._run_status_sensor_fn)\n context = get_sensor_context_from_args_or_kwargs(\n self._run_status_sensor_fn,\n args,\n kwargs,\n context_type=RunStatusSensorContext,\n )\n context_param = {context_param_name: context} if context_param_name and context else {}\n\n resources = validate_and_get_resource_dict(\n context.resources if context else ScopedResourcesBuilder.build_empty(),\n self._name,\n self._required_resource_keys,\n )\n return self._run_status_sensor_fn(**context_param, **resources)\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.RUN_STATUS
\n\n\n
[docs]@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef run_status_sensor(\n run_status: DagsterRunStatus,\n name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n request_job: Optional[ExecutableDefinition] = None,\n request_jobs: Optional[Sequence[ExecutableDefinition]] = None,\n) -> Callable[[RunStatusSensorEvaluationFunction], RunStatusSensorDefinition,]:\n """Creates a sensor that reacts to a given status of job execution, where the decorated\n function will be run when a job is at the given status.\n\n Takes a :py:class:`~dagster.RunStatusSensorContext`.\n\n Args:\n run_status (DagsterRunStatus): The status of run execution which will be\n monitored by the sensor.\n name (Optional[str]): The name of the sensor. Defaults to the name of the decorated function.\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n Jobs in the current repository that will be monitored by this sensor. Defaults to None, which means the alert will\n be sent when any job in the repository matches the requested run_status. Jobs in external repositories can be monitored by using\n RepositorySelector or JobSelector.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the Dagster instance.\n If set to True, an error will be raised if you also specify monitored_jobs or job_selection.\n Defaults to False.\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSelector]]]):\n (deprecated in favor of monitored_jobs) Jobs in the current repository that will be\n monitored by this sensor. Defaults to None, which means the alert will be sent when\n any job in the repository matches the requested run_status.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n request_job (Optional[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]): The job that should be\n executed if a RunRequest is yielded from the sensor.\n request_jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]]): (experimental)\n A list of jobs to be executed if RunRequests are yielded from the sensor.\n """\n\n def inner(\n fn: RunStatusSensorEvaluationFunction,\n ) -> RunStatusSensorDefinition:\n check.callable_param(fn, "fn")\n sensor_name = name or fn.__name__\n\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n if jobs and monitor_all_repositories:\n DagsterInvalidDefinitionError(\n "Cannot specify both monitor_all_repositories and"\n f" {'monitored_jobs' if monitored_jobs else 'job_selection'}."\n )\n\n return RunStatusSensorDefinition(\n name=sensor_name,\n run_status=run_status,\n run_status_sensor_fn=fn,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n monitored_jobs=jobs,\n monitor_all_repositories=monitor_all_repositories,\n default_status=default_status,\n request_job=request_job,\n request_jobs=request_jobs,\n )\n\n return inner
\n
", "current_page_name": "_modules/dagster/_core/definitions/run_status_sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.run_status_sensor_definition"}, "schedule_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.schedule_definition

\nimport copy\nimport logging\nfrom contextlib import ExitStack\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    TypeVar,\n    Union,\n    cast,\n)\n\nimport pendulum\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, deprecated_param, public\nfrom dagster._core.definitions.instigation_logger import InstigationLogger\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.scoped_resources_builder import Resources, ScopedResourcesBuilder\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import IHasInternalInit, ensure_gen\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.schedules import is_valid_cron_schedule\n\nfrom ..decorator_utils import has_at_least_one_parameter\nfrom ..errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n    ScheduleExecutionError,\n    user_code_error_boundary,\n)\nfrom ..instance import DagsterInstance\nfrom ..instance.ref import InstanceRef\nfrom ..storage.dagster_run import DagsterRun\nfrom .graph_definition import GraphDefinition\nfrom .job_definition import JobDefinition\nfrom .run_request import RunRequest, SkipReason\nfrom .target import DirectTarget, ExecutableDefinition, RepoRelativeTarget\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom .utils import check_valid_name, validate_tags\n\nif TYPE_CHECKING:\n    from dagster import ResourceDefinition\n    from dagster._core.definitions.repository_definition import RepositoryDefinition\nT = TypeVar("T")\n\nRunConfig: TypeAlias = Mapping[str, Any]\nRunRequestIterator: TypeAlias = Iterator[Union[RunRequest, SkipReason]]\n\nScheduleEvaluationFunctionReturn: TypeAlias = Union[\n    RunRequest, SkipReason, RunConfig, RunRequestIterator, Sequence[RunRequest]\n]\nRawScheduleEvaluationFunction: TypeAlias = Callable[..., ScheduleEvaluationFunctionReturn]\n\nScheduleRunConfigFunction: TypeAlias = Union[\n    Callable[["ScheduleEvaluationContext"], RunConfig],\n    Callable[[], RunConfig],\n]\n\nScheduleTagsFunction: TypeAlias = Callable[["ScheduleEvaluationContext"], Mapping[str, str]]\nScheduleShouldExecuteFunction: TypeAlias = Callable[["ScheduleEvaluationContext"], bool]\nScheduleExecutionFunction: TypeAlias = Union[\n    Callable[["ScheduleEvaluationContext"], Any],\n    "DecoratedScheduleFunction",\n]\n\n\n@whitelist_for_serdes\nclass DefaultScheduleStatus(Enum):\n    RUNNING = "RUNNING"\n    STOPPED = "STOPPED"\n\n\ndef get_or_create_schedule_context(\n    fn: Callable, *args: Any, **kwargs: Any\n) -> "ScheduleEvaluationContext":\n    """Based on the passed resource function and the arguments passed to it, returns the\n    user-passed ScheduleEvaluationContext or creates one if it is not passed.\n\n    Raises an exception if the user passes more than one argument or if the user-provided\n    function requires a context parameter but none is passed.\n    """\n    from dagster._config.pythonic_config import is_coercible_to_resource\n    from dagster._core.definitions.sensor_definition import get_context_param_name\n\n    context_param_name = get_context_param_name(fn)\n\n    kwarg_keys_non_resource = set(kwargs.keys()) - {param.name for param in get_resource_args(fn)}\n    if len(args) + len(kwarg_keys_non_resource) > 1:\n        raise DagsterInvalidInvocationError(\n            "Schedule invocation received multiple non-resource arguments. Only a first "\n            "positional context parameter should be provided when invoking."\n        )\n\n    if any(is_coercible_to_resource(arg) for arg in args):\n        raise DagsterInvalidInvocationError(\n            "If directly invoking a schedule, you may not provide resources as"\n            " positional arguments, only as keyword arguments."\n        )\n\n    context: Optional[ScheduleEvaluationContext] = None\n\n    if len(args) > 0:\n        context = check.opt_inst(args[0], ScheduleEvaluationContext)\n    elif len(kwargs) > 0:\n        if context_param_name and context_param_name not in kwargs:\n            raise DagsterInvalidInvocationError(\n                f"Schedule invocation expected argument '{context_param_name}'."\n            )\n        context = check.opt_inst(\n            kwargs.get(context_param_name or "context"), ScheduleEvaluationContext\n        )\n    elif context_param_name:\n        # If the context parameter is present but no value was provided, we error\n        raise DagsterInvalidInvocationError(\n            "Schedule evaluation function expected context argument, but no context argument "\n            "was provided when invoking."\n        )\n\n    context = context or build_schedule_context()\n    resource_args_from_kwargs = {}\n\n    resource_args = {param.name for param in get_resource_args(fn)}\n    for resource_arg in resource_args:\n        if resource_arg in kwargs:\n            resource_args_from_kwargs[resource_arg] = kwargs[resource_arg]\n\n    if resource_args_from_kwargs:\n        return context.merge_resources(resource_args_from_kwargs)\n\n    return context\n\n\n
[docs]class ScheduleEvaluationContext:\n """The context object available as the first argument various functions defined on a :py:class:`dagster.ScheduleDefinition`.\n\n A `ScheduleEvaluationContext` object is passed as the first argument to ``run_config_fn``, ``tags_fn``,\n and ``should_execute``.\n\n Users should not instantiate this object directly. To construct a `ScheduleEvaluationContext` for testing purposes, use :py:func:`dagster.build_schedule_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import schedule, ScheduleEvaluationContext\n\n @schedule\n def the_schedule(context: ScheduleEvaluationContext):\n ...\n\n """\n\n __slots__ = [\n "_instance_ref",\n "_scheduled_execution_time",\n "_exit_stack",\n "_instance",\n "_log_key",\n "_logger",\n "_repository_name",\n "_resource_defs",\n "_schedule_name",\n "_resources_cm",\n "_resources",\n "_cm_scope_entered",\n "_repository_def",\n ]\n\n def __init__(\n self,\n instance_ref: Optional[InstanceRef],\n scheduled_execution_time: Optional[datetime],\n repository_name: Optional[str] = None,\n schedule_name: Optional[str] = None,\n resources: Optional[Mapping[str, "ResourceDefinition"]] = None,\n repository_def: Optional["RepositoryDefinition"] = None,\n ):\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n self._exit_stack = ExitStack()\n self._instance = None\n\n self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)\n self._scheduled_execution_time = check.opt_inst_param(\n scheduled_execution_time, "scheduled_execution_time", datetime\n )\n self._log_key = (\n [\n repository_name,\n schedule_name,\n scheduled_execution_time.strftime("%Y%m%d_%H%M%S"),\n ]\n if repository_name and schedule_name and scheduled_execution_time\n else None\n )\n self._logger = None\n self._repository_name = repository_name\n self._schedule_name = schedule_name\n\n # Wait to set resources unless they're accessed\n self._resource_defs = resources\n self._resources = None\n self._cm_scope_entered = False\n self._repository_def = check.opt_inst_param(\n repository_def, "repository_def", RepositoryDefinition\n )\n\n def __enter__(self) -> "ScheduleEvaluationContext":\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc) -> None:\n self._exit_stack.close()\n self._logger = None\n\n @property\n def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]:\n return self._resource_defs\n\n @public\n @property\n def resources(self) -> Resources:\n """Mapping of resource key to resource definition to be made available\n during schedule execution.\n """\n from dagster._core.definitions.scoped_resources_builder import (\n IContainsGenerator,\n )\n from dagster._core.execution.build_resources import build_resources\n\n if not self._resources:\n # Early exit if no resources are defined. This skips unnecessary initialization\n # entirely. This allows users to run user code servers in cases where they\n # do not have access to the instance if they use a subset of features do\n # that do not require instance access. In this case, if they do not use\n # resources on schedules they do not require the instance, so we do not\n # instantiate it\n #\n # Tracking at https://github.com/dagster-io/dagster/issues/14345\n if not self._resource_defs:\n self._resources = ScopedResourcesBuilder.build_empty()\n return self._resources\n\n instance = self.instance if self._instance or self._instance_ref else None\n\n resources_cm = build_resources(resources=self._resource_defs, instance=instance)\n self._resources = self._exit_stack.enter_context(resources_cm)\n\n if isinstance(self._resources, IContainsGenerator) and not self._cm_scope_entered:\n self._exit_stack.close()\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access"\n " resources outside of context manager scope. You can use the following syntax"\n " to open a context manager: `with build_sensor_context(...) as context:`"\n )\n\n return self._resources\n\n def merge_resources(self, resources_dict: Mapping[str, Any]) -> "ScheduleEvaluationContext":\n """Merge the specified resources into this context.\n This method is intended to be used by the Dagster framework, and should not be called by user code.\n\n Args:\n resources_dict (Mapping[str, Any]): The resources to replace in the context.\n """\n check.invariant(\n self._resources is None, "Cannot merge resources in context that has been initialized."\n )\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return ScheduleEvaluationContext(\n instance_ref=self._instance_ref,\n scheduled_execution_time=self._scheduled_execution_time,\n repository_name=self._repository_name,\n schedule_name=self._schedule_name,\n resources={\n **(self._resource_defs or {}),\n **wrap_resources_for_execution(resources_dict),\n },\n repository_def=self._repository_def,\n )\n\n @public\n @property\n def instance(self) -> "DagsterInstance":\n """DagsterInstance: The current DagsterInstance."""\n # self._instance_ref should only ever be None when this ScheduleEvaluationContext was\n # constructed under test.\n if not self._instance_ref:\n raise DagsterInvariantViolationError(\n "Attempted to initialize dagster instance, but no instance reference was provided."\n )\n if not self._instance:\n self._instance = self._exit_stack.enter_context(\n DagsterInstance.from_ref(self._instance_ref)\n )\n return cast(DagsterInstance, self._instance)\n\n @property\n def instance_ref(self) -> Optional[InstanceRef]:\n """The serialized instance configured to run the schedule."""\n return self._instance_ref\n\n @public\n @property\n def scheduled_execution_time(self) -> datetime:\n """The time in which the execution was scheduled to happen. May differ slightly\n from both the actual execution time and the time at which the run config is computed.\n """\n if self._scheduled_execution_time is None:\n check.failed(\n "Attempting to access scheduled_execution_time, but no scheduled_execution_time was"\n " set on this context"\n )\n\n return self._scheduled_execution_time\n\n @property\n def log(self) -> logging.Logger:\n if self._logger:\n return self._logger\n\n if not self._instance_ref:\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n repository_name=self._repository_name,\n name=self._schedule_name,\n )\n )\n\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n self.instance,\n repository_name=self._repository_name,\n name=self._schedule_name,\n )\n )\n return cast(InstigationLogger, self._logger)\n\n def has_captured_logs(self):\n return self._logger and self._logger.has_captured_logs()\n\n @property\n def log_key(self) -> Optional[List[str]]:\n return self._log_key\n\n @property\n def repository_def(self) -> "RepositoryDefinition":\n if not self._repository_def:\n raise DagsterInvariantViolationError(\n "Attempted to access repository_def, but no repository_def was provided."\n )\n return self._repository_def
\n\n\nclass DecoratedScheduleFunction(NamedTuple):\n """Wrapper around the decorated schedule function. Keeps track of both to better support the\n optimal return value for direct invocation of the evaluation function.\n """\n\n decorated_fn: RawScheduleEvaluationFunction\n wrapped_fn: Callable[[ScheduleEvaluationContext], RunRequestIterator]\n has_context_arg: bool\n\n\n
[docs]def build_schedule_context(\n instance: Optional[DagsterInstance] = None,\n scheduled_execution_time: Optional[datetime] = None,\n resources: Optional[Mapping[str, object]] = None,\n repository_def: Optional["RepositoryDefinition"] = None,\n instance_ref: Optional["InstanceRef"] = None,\n) -> ScheduleEvaluationContext:\n """Builds schedule execution context using the provided parameters.\n\n The instance provided to ``build_schedule_context`` must be persistent;\n DagsterInstance.ephemeral() will result in an error.\n\n Args:\n instance (Optional[DagsterInstance]): The dagster instance configured to run the schedule.\n scheduled_execution_time (datetime): The time in which the execution was scheduled to\n happen. May differ slightly from both the actual execution time and the time at which\n the run config is computed.\n\n Examples:\n .. code-block:: python\n\n context = build_schedule_context(instance)\n\n """\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n check.opt_inst_param(instance, "instance", DagsterInstance)\n\n return ScheduleEvaluationContext(\n instance_ref=(\n instance_ref\n if instance_ref\n else instance.get_ref() if instance and instance.is_persistent else None\n ),\n scheduled_execution_time=check.opt_inst_param(\n scheduled_execution_time, "scheduled_execution_time", datetime\n ),\n resources=wrap_resources_for_execution(resources),\n repository_def=repository_def,\n )
\n\n\n@whitelist_for_serdes\nclass ScheduleExecutionData(\n NamedTuple(\n "_ScheduleExecutionData",\n [\n ("run_requests", Optional[Sequence[RunRequest]]),\n ("skip_message", Optional[str]),\n ("captured_log_key", Optional[Sequence[str]]),\n ],\n )\n):\n def __new__(\n cls,\n run_requests: Optional[Sequence[RunRequest]] = None,\n skip_message: Optional[str] = None,\n captured_log_key: Optional[Sequence[str]] = None,\n ):\n check.opt_sequence_param(run_requests, "run_requests", RunRequest)\n check.opt_str_param(skip_message, "skip_message")\n check.opt_list_param(captured_log_key, "captured_log_key", str)\n check.invariant(\n not (run_requests and skip_message), "Found both skip data and run request data"\n )\n return super(ScheduleExecutionData, cls).__new__(\n cls,\n run_requests=run_requests,\n skip_message=skip_message,\n captured_log_key=captured_log_key,\n )\n\n\ndef validate_and_get_schedule_resource_dict(\n resources: Resources, schedule_name: str, required_resource_keys: Set[str]\n) -> Dict[str, Any]:\n """Validates that the context has all the required resources and returns a dictionary of\n resource key to resource object.\n """\n for k in required_resource_keys:\n if not hasattr(resources, k):\n raise DagsterInvalidDefinitionError(\n f"Resource with key '{k}' required by schedule '{schedule_name}' was not provided."\n )\n\n return {k: getattr(resources, k) for k in required_resource_keys}\n\n\n
[docs]@deprecated_param(\n param="environment_vars",\n breaking_version="2.0",\n additional_warn_text=(\n "It is no longer necessary. Schedules will have access to all environment variables set in"\n " the containing environment, and can safely be deleted."\n ),\n)\nclass ScheduleDefinition(IHasInternalInit):\n """Define a schedule that targets a job.\n\n Args:\n name (Optional[str]): The name of the schedule to create. Defaults to the job name plus\n "_schedule".\n cron_schedule (Union[str, Sequence[str]]): A valid cron string or sequence of cron strings\n specifying when the schedule will run, e.g., ``'45 23 * * 6'`` for a schedule that runs\n at 11:45 PM every Saturday. If a sequence is provided, then the schedule will run for\n the union of all execution times for the provided cron strings, e.g.,\n ``['45 23 * * 6', '30 9 * * 0]`` for a schedule that runs at 11:45 PM every Saturday and\n 9:30 AM every Sunday.\n execution_fn (Callable[ScheduleEvaluationContext]): The core evaluation function for the\n schedule, which is run at an interval to determine whether a run should be launched or\n not. Takes a :py:class:`~dagster.ScheduleEvaluationContext`.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n run_config (Optional[Mapping]): The config that parameterizes this execution,\n as a dict.\n run_config_fn (Optional[Callable[[ScheduleEvaluationContext], [Mapping]]]): A function that\n takes a ScheduleEvaluationContext object and returns the run configuration that\n parameterizes this execution, as a dict. You may set only one of ``run_config``,\n ``run_config_fn``, and ``execution_fn``.\n tags (Optional[Mapping[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the scheduled runs.\n tags_fn (Optional[Callable[[ScheduleEvaluationContext], Optional[Mapping[str, str]]]]): A\n function that generates tags to attach to the schedules runs. Takes a\n :py:class:`~dagster.ScheduleEvaluationContext` and returns a dictionary of tags (string\n key-value pairs). You may set only one of ``tags``, ``tags_fn``, and ``execution_fn``.\n should_execute (Optional[Callable[[ScheduleEvaluationContext], bool]]): A function that runs\n at schedule execution time to determine whether a schedule should execute or skip. Takes\n a :py:class:`~dagster.ScheduleEvaluationContext` and returns a boolean (``True`` if the\n schedule should execute). Defaults to a function that always returns ``True``.\n execution_timezone (Optional[str]): Timezone in which the schedule should run.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n description (Optional[str]): A human-readable description of the schedule.\n job (Optional[Union[GraphDefinition, JobDefinition]]): The job that should execute when this\n schedule runs.\n default_status (DefaultScheduleStatus): Whether the schedule starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n required_resource_keys (Optional[Set[str]]): The set of resource keys required by the schedule.\n """\n\n def with_updated_job(self, new_job: ExecutableDefinition) -> "ScheduleDefinition":\n """Returns a copy of this schedule with the job replaced.\n\n Args:\n job (ExecutableDefinition): The job that should execute when this\n schedule runs.\n """\n return ScheduleDefinition.dagster_internal_init(\n name=self.name,\n cron_schedule=self._cron_schedule,\n job_name=self.job_name,\n execution_timezone=self.execution_timezone,\n execution_fn=self._execution_fn,\n description=self.description,\n job=new_job,\n default_status=self.default_status,\n environment_vars=self._environment_vars,\n required_resource_keys=self._raw_required_resource_keys,\n run_config=None, # run_config, tags, should_execute encapsulated in execution_fn\n run_config_fn=None,\n tags=None,\n tags_fn=None,\n should_execute=None,\n )\n\n def __init__(\n self,\n name: Optional[str] = None,\n *,\n cron_schedule: Optional[Union[str, Sequence[str]]] = None,\n job_name: Optional[str] = None,\n run_config: Optional[Any] = None,\n run_config_fn: Optional[ScheduleRunConfigFunction] = None,\n tags: Optional[Mapping[str, str]] = None,\n tags_fn: Optional[ScheduleTagsFunction] = None,\n should_execute: Optional[ScheduleShouldExecuteFunction] = None,\n environment_vars: Optional[Mapping[str, str]] = None,\n execution_timezone: Optional[str] = None,\n execution_fn: Optional[ScheduleExecutionFunction] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n self._cron_schedule = check.inst_param(cron_schedule, "cron_schedule", (str, Sequence))\n if not isinstance(self._cron_schedule, str):\n check.sequence_param(self._cron_schedule, "cron_schedule", of_type=str) # type: ignore\n\n if not is_valid_cron_schedule(self._cron_schedule): # type: ignore\n raise DagsterInvalidDefinitionError(\n f"Found invalid cron schedule '{self._cron_schedule}' for schedule '{name}''. "\n "Dagster recognizes standard cron expressions consisting of 5 fields."\n )\n\n if job is not None:\n self._target: Union[DirectTarget, RepoRelativeTarget] = DirectTarget(job)\n else:\n self._target = RepoRelativeTarget(\n job_name=check.str_param(job_name, "job_name"),\n op_selection=None,\n )\n\n if name:\n self._name = check_valid_name(name)\n elif job_name:\n self._name = job_name + "_schedule"\n elif job:\n self._name = job.name + "_schedule"\n\n self._description = check.opt_str_param(description, "description")\n\n self._environment_vars = check.opt_mapping_param(\n environment_vars, "environment_vars", key_type=str, value_type=str\n )\n\n self._execution_timezone = check.opt_str_param(execution_timezone, "execution_timezone")\n\n if execution_fn and (run_config_fn or tags_fn or should_execute or tags or run_config):\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both execution_fn and individual run_config/tags arguments "\n "to ScheduleDefinition. Must provide only one of the two."\n )\n elif execution_fn:\n self._execution_fn: Optional[Union[Callable[..., Any], DecoratedScheduleFunction]] = (\n None\n )\n if isinstance(execution_fn, DecoratedScheduleFunction):\n self._execution_fn = execution_fn\n else:\n self._execution_fn = check.opt_callable_param(execution_fn, "execution_fn")\n self._run_config_fn = None\n else:\n if run_config_fn and run_config:\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both run_config_fn and run_config as arguments"\n " to ScheduleDefinition. Must provide only one of the two."\n )\n\n def _default_run_config_fn(context: ScheduleEvaluationContext) -> RunConfig:\n return check.opt_dict_param(run_config, "run_config")\n\n self._run_config_fn = check.opt_callable_param(\n run_config_fn, "run_config_fn", default=_default_run_config_fn\n )\n\n if tags_fn and tags:\n raise DagsterInvalidDefinitionError(\n "Attempted to provide both tags_fn and tags as arguments"\n " to ScheduleDefinition. Must provide only one of the two."\n )\n elif tags:\n tags = validate_tags(tags, allow_reserved_tags=False)\n tags_fn = lambda _context: tags\n else:\n tags_fn = check.opt_callable_param(\n tags_fn, "tags_fn", default=lambda _context: cast(Mapping[str, str], {})\n )\n self._tags_fn = tags_fn\n self._tags = tags\n\n self._should_execute: ScheduleShouldExecuteFunction = check.opt_callable_param(\n should_execute, "should_execute", default=lambda _context: True\n )\n\n # Several type-ignores are present in this function to work around bugs in mypy\n # inference.\n def _execution_fn(context: ScheduleEvaluationContext) -> RunRequestIterator:\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: (\n f"Error occurred during the execution of should_execute for schedule {name}"\n ),\n ):\n if not self._should_execute(context):\n yield SkipReason(f"should_execute function for {name} returned false.")\n return\n\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: (\n f"Error occurred during the execution of run_config_fn for schedule {name}"\n ),\n ):\n _run_config_fn = check.not_none(self._run_config_fn)\n evaluated_run_config = copy.deepcopy(\n _run_config_fn(context)\n if has_at_least_one_parameter(_run_config_fn)\n else _run_config_fn() # type: ignore # (strict type guard)\n )\n\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: f"Error occurred during the execution of tags_fn for schedule {name}",\n ):\n evaluated_tags = validate_tags(tags_fn(context), allow_reserved_tags=False)\n\n yield RunRequest(\n run_key=None,\n run_config=evaluated_run_config,\n tags=evaluated_tags,\n )\n\n self._execution_fn = _execution_fn\n\n if self._execution_timezone:\n try:\n # Verify that the timezone can be loaded\n pendulum.tz.timezone(self._execution_timezone) # type: ignore\n except Exception as e:\n raise DagsterInvalidDefinitionError(\n f"Invalid execution timezone {self._execution_timezone} for {name}"\n ) from e\n\n self._default_status = check.inst_param(\n default_status, "default_status", DefaultScheduleStatus\n )\n\n resource_arg_names: Set[str] = (\n {arg.name for arg in get_resource_args(self._execution_fn.decorated_fn)}\n if isinstance(self._execution_fn, DecoratedScheduleFunction)\n else set()\n )\n\n check.param_invariant(\n len(required_resource_keys or []) == 0 or len(resource_arg_names) == 0,\n "Cannot specify resource requirements in both @schedule decorator and as arguments to"\n " the decorated function",\n )\n\n self._raw_required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n self._required_resource_keys = self._raw_required_resource_keys or resource_arg_names\n\n @staticmethod\n def dagster_internal_init(\n *,\n name: Optional[str],\n cron_schedule: Optional[Union[str, Sequence[str]]],\n job_name: Optional[str],\n run_config: Optional[Any],\n run_config_fn: Optional[ScheduleRunConfigFunction],\n tags: Optional[Mapping[str, str]],\n tags_fn: Optional[ScheduleTagsFunction],\n should_execute: Optional[ScheduleShouldExecuteFunction],\n environment_vars: Optional[Mapping[str, str]],\n execution_timezone: Optional[str],\n execution_fn: Optional[ScheduleExecutionFunction],\n description: Optional[str],\n job: Optional[ExecutableDefinition],\n default_status: DefaultScheduleStatus,\n required_resource_keys: Optional[Set[str]],\n ) -> "ScheduleDefinition":\n return ScheduleDefinition(\n name=name,\n cron_schedule=cron_schedule,\n job_name=job_name,\n run_config=run_config,\n run_config_fn=run_config_fn,\n tags=tags,\n tags_fn=tags_fn,\n should_execute=should_execute,\n environment_vars=environment_vars,\n execution_timezone=execution_timezone,\n execution_fn=execution_fn,\n description=description,\n job=job,\n default_status=default_status,\n required_resource_keys=required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> ScheduleEvaluationFunctionReturn:\n from dagster._core.definitions.sensor_definition import get_context_param_name\n\n from .decorators.schedule_decorator import DecoratedScheduleFunction\n\n if not isinstance(self._execution_fn, DecoratedScheduleFunction):\n raise DagsterInvalidInvocationError(\n "Schedule invocation is only supported for schedules created via the schedule "\n "decorators."\n )\n\n context_param_name = get_context_param_name(self._execution_fn.decorated_fn)\n context = get_or_create_schedule_context(self._execution_fn.decorated_fn, *args, **kwargs)\n context_param = {context_param_name: context} if context_param_name else {}\n\n resources = validate_and_get_schedule_resource_dict(\n context.resources, self._name, self._required_resource_keys\n )\n result = self._execution_fn.decorated_fn(**context_param, **resources)\n\n if isinstance(result, dict):\n return copy.deepcopy(result)\n else:\n return result\n\n @public\n @property\n def name(self) -> str:\n """str: The name of the schedule."""\n return self._name\n\n @public\n @property\n def job_name(self) -> str:\n """str: The name of the job targeted by this schedule."""\n return self._target.job_name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A description for this schedule."""\n return self._description\n\n @public\n @property\n def cron_schedule(self) -> Union[str, Sequence[str]]:\n """Union[str, Sequence[str]]: The cron schedule representing when this schedule will be evaluated."""\n return self._cron_schedule # type: ignore\n\n @public\n @deprecated(\n breaking_version="2.0",\n additional_warn_text="Setting this property no longer has any effect.",\n )\n @property\n def environment_vars(self) -> Mapping[str, str]:\n """Mapping[str, str]: Environment variables to export to the cron schedule."""\n return self._environment_vars\n\n @public\n @property\n def required_resource_keys(self) -> Set[str]:\n """Set[str]: The set of keys for resources that must be provided to this schedule."""\n return self._required_resource_keys\n\n @public\n @property\n def execution_timezone(self) -> Optional[str]:\n """Optional[str]: The timezone in which this schedule will be evaluated."""\n return self._execution_timezone\n\n @public\n @property\n def job(self) -> Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]:\n """Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]: The job that is\n targeted by this schedule.\n """\n if isinstance(self._target, DirectTarget):\n return self._target.target\n raise DagsterInvalidDefinitionError("No job was provided to ScheduleDefinition.")\n\n def evaluate_tick(self, context: "ScheduleEvaluationContext") -> ScheduleExecutionData:\n """Evaluate schedule using the provided context.\n\n Args:\n context (ScheduleEvaluationContext): The context with which to evaluate this schedule.\n\n Returns:\n ScheduleExecutionData: Contains list of run requests, or skip message if present.\n\n """\n from dagster._core.definitions.partition import CachingDynamicPartitionsLoader\n\n check.inst_param(context, "context", ScheduleEvaluationContext)\n execution_fn: Callable[..., "ScheduleEvaluationFunctionReturn"]\n if isinstance(self._execution_fn, DecoratedScheduleFunction):\n execution_fn = self._execution_fn.wrapped_fn\n else:\n execution_fn = cast(\n Callable[..., "ScheduleEvaluationFunctionReturn"],\n self._execution_fn,\n )\n\n result = list(ensure_gen(execution_fn(context)))\n\n skip_message: Optional[str] = None\n\n run_requests: List[RunRequest] = []\n if not result or result == [None]:\n run_requests = []\n skip_message = "Schedule function returned an empty result"\n elif len(result) == 1:\n item = check.inst(result[0], (SkipReason, RunRequest))\n if isinstance(item, RunRequest):\n run_requests = [item]\n skip_message = None\n elif isinstance(item, SkipReason):\n run_requests = []\n skip_message = item.skip_message\n else:\n # NOTE: mypy is not correctly reading this cast-- not sure why\n # (pyright reads it fine). Hence the type-ignores below.\n result = cast(List[RunRequest], check.is_list(result, of_type=RunRequest))\n check.invariant(\n not any(not request.run_key for request in result),\n "Schedules that return multiple RunRequests must specify a run_key in each"\n " RunRequest",\n )\n run_requests = result\n skip_message = None\n\n dynamic_partitions_store = (\n CachingDynamicPartitionsLoader(context.instance) if context.instance_ref else None\n )\n\n # clone all the run requests with resolved tags and config\n resolved_run_requests = []\n for run_request in run_requests:\n if run_request.partition_key and not run_request.has_resolved_partition():\n if context.repository_def is None:\n raise DagsterInvariantViolationError(\n "Must provide repository def to build_schedule_context when yielding"\n " partitioned run requests"\n )\n\n scheduled_target = context.repository_def.get_job(self._target.job_name)\n resolved_request = run_request.with_resolved_tags_and_config(\n target_definition=scheduled_target,\n dynamic_partitions_requests=[],\n current_time=context.scheduled_execution_time,\n dynamic_partitions_store=dynamic_partitions_store,\n )\n else:\n resolved_request = run_request\n\n resolved_run_requests.append(\n resolved_request.with_replaced_attrs(\n tags=merge_dicts(resolved_request.tags, DagsterRun.tags_for_schedule(self))\n )\n )\n\n return ScheduleExecutionData(\n run_requests=resolved_run_requests,\n skip_message=skip_message,\n captured_log_key=context.log_key if context.has_captured_logs() else None,\n )\n\n def has_loadable_target(self):\n return isinstance(self._target, DirectTarget)\n\n @property\n def targets_unresolved_asset_job(self) -> bool:\n return self.has_loadable_target() and isinstance(\n self.load_target(), UnresolvedAssetJobDefinition\n )\n\n def load_target(\n self,\n ) -> Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]:\n if isinstance(self._target, DirectTarget):\n return self._target.load()\n\n check.failed("Target is not loadable")\n\n @public\n @property\n def default_status(self) -> DefaultScheduleStatus:\n """DefaultScheduleStatus: The default status for this schedule when it is first loaded in\n a code location.\n """\n return self._default_status
\n
", "current_page_name": "_modules/dagster/_core/definitions/schedule_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.schedule_definition"}, "selector": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.selector

\nfrom typing import AbstractSet, Iterable, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.repository_definition import SINGLETON_REPOSITORY_NAME\nfrom dagster._serdes import create_snapshot_id, whitelist_for_serdes\n\n\nclass JobSubsetSelector(\n    NamedTuple(\n        "_JobSubsetSelector",\n        [\n            ("location_name", str),\n            ("repository_name", str),\n            ("job_name", str),\n            ("op_selection", Optional[Sequence[str]]),\n            ("asset_selection", Optional[AbstractSet[AssetKey]]),\n            ("asset_check_selection", Optional[AbstractSet[AssetCheckKey]]),\n        ],\n    )\n):\n    """The information needed to resolve a job within a host process."""\n\n    def __new__(\n        cls,\n        location_name: str,\n        repository_name: str,\n        job_name: str,\n        op_selection: Optional[Sequence[str]],\n        asset_selection: Optional[Iterable[AssetKey]] = None,\n        asset_check_selection: Optional[Iterable[AssetCheckKey]] = None,\n    ):\n        asset_selection = set(asset_selection) if asset_selection else None\n        asset_check_selection = (\n            set(asset_check_selection) if asset_check_selection is not None else None\n        )\n        return super(JobSubsetSelector, cls).__new__(\n            cls,\n            location_name=check.str_param(location_name, "location_name"),\n            repository_name=check.str_param(repository_name, "repository_name"),\n            job_name=check.str_param(job_name, "job_name"),\n            op_selection=check.opt_nullable_sequence_param(op_selection, "op_selection", str),\n            asset_selection=check.opt_nullable_set_param(\n                asset_selection, "asset_selection", AssetKey\n            ),\n            asset_check_selection=check.opt_nullable_set_param(\n                asset_check_selection, "asset_check_selection", AssetCheckKey\n            ),\n        )\n\n    def to_graphql_input(self):\n        return {\n            "repositoryLocationName": self.location_name,\n            "repositoryName": self.repository_name,\n            "pipelineName": self.job_name,\n            "solidSelection": self.op_selection,\n        }\n\n    def with_op_selection(self, op_selection: Optional[Sequence[str]]) -> Self:\n        check.invariant(\n            self.op_selection is None,\n            f"Can not invoke with_op_selection when op_selection={self.op_selection} is"\n            " already set",\n        )\n        return JobSubsetSelector(\n            self.location_name, self.repository_name, self.job_name, op_selection\n        )\n\n\n
[docs]@whitelist_for_serdes\nclass JobSelector(\n NamedTuple(\n "_JobSelector", [("location_name", str), ("repository_name", str), ("job_name", str)]\n )\n):\n def __new__(\n cls,\n location_name: str,\n repository_name: Optional[str] = None,\n job_name: Optional[str] = None,\n ):\n return super(JobSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.opt_str_param(\n repository_name,\n "repository_name",\n default=SINGLETON_REPOSITORY_NAME,\n ),\n job_name=check.str_param(\n job_name,\n "job_name",\n "Must provide job_name argument even though it is marked as optional in the "\n "function signature. repository_name, a truly optional parameter, is before "\n "that argument and actually optional. Use of keyword arguments is "\n "recommended to avoid confusion.",\n ),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "jobName": self.job_name,\n }\n\n @property\n def selector_id(self):\n return create_snapshot_id(self)\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return JobSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n job_name=graphql_data["jobName"],\n )
\n\n\n
[docs]@whitelist_for_serdes\nclass RepositorySelector(\n NamedTuple("_RepositorySelector", [("location_name", str), ("repository_name", str)])\n):\n def __new__(cls, location_name: str, repository_name: str):\n return super(RepositorySelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n }\n\n @property\n def selector_id(self):\n return create_snapshot_id(self)\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return RepositorySelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n )
\n\n\nclass CodeLocationSelector(NamedTuple("_CodeLocationSelector", [("location_name", str)])):\n def __new__(cls, location_name: str):\n return super(CodeLocationSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n )\n\n def to_repository_selector(self) -> RepositorySelector:\n return RepositorySelector(\n location_name=self.location_name, repository_name=SINGLETON_REPOSITORY_NAME\n )\n\n\nclass ScheduleSelector(\n NamedTuple(\n "_ScheduleSelector",\n [("location_name", str), ("repository_name", str), ("schedule_name", str)],\n )\n):\n def __new__(cls, location_name: str, repository_name: str, schedule_name: str):\n return super(ScheduleSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n schedule_name=check.str_param(schedule_name, "schedule_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "scheduleName": self.schedule_name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return ScheduleSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n schedule_name=graphql_data["scheduleName"],\n )\n\n\nclass ResourceSelector(NamedTuple):\n location_name: str\n repository_name: str\n resource_name: str\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "resourceName": self.resource_name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return ResourceSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n resource_name=graphql_data["resourceName"],\n )\n\n\nclass SensorSelector(\n NamedTuple(\n "_SensorSelector", [("location_name", str), ("repository_name", str), ("sensor_name", str)]\n )\n):\n def __new__(cls, location_name: str, repository_name: str, sensor_name: str):\n return super(SensorSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n sensor_name=check.str_param(sensor_name, "sensor_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "sensorName": self.sensor_name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return SensorSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n sensor_name=graphql_data["sensorName"],\n )\n\n\n@whitelist_for_serdes\nclass InstigatorSelector(\n NamedTuple(\n "_InstigatorSelector", [("location_name", str), ("repository_name", str), ("name", str)]\n )\n):\n def __new__(cls, location_name: str, repository_name: str, name: str):\n return super(InstigatorSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n name=check.str_param(name, "name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "name": self.name,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return InstigatorSelector(\n location_name=graphql_data["repositoryLocationName"],\n repository_name=graphql_data["repositoryName"],\n name=graphql_data["name"],\n )\n\n\nclass GraphSelector(\n NamedTuple(\n "_GraphSelector", [("location_name", str), ("repository_name", str), ("graph_name", str)]\n )\n):\n """The information needed to resolve a graph within a host process."""\n\n def __new__(cls, location_name: str, repository_name: str, graph_name: str):\n return super(GraphSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n graph_name=check.str_param(graph_name, "graph_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "graphName": self.graph_name,\n }\n\n\n@whitelist_for_serdes\nclass PartitionSetSelector(\n NamedTuple(\n "_PartitionSetSelector",\n [("location_name", str), ("repository_name", str), ("partition_set_name", str)],\n )\n):\n """The information needed to resolve a partition set within a host process."""\n\n def __new__(cls, location_name: str, repository_name: str, partition_set_name: str):\n return super(PartitionSetSelector, cls).__new__(\n cls,\n location_name=check.str_param(location_name, "location_name"),\n repository_name=check.str_param(repository_name, "repository_name"),\n partition_set_name=check.str_param(partition_set_name, "partition_set_name"),\n )\n\n def to_graphql_input(self):\n return {\n "repositoryLocationName": self.location_name,\n "repositoryName": self.repository_name,\n "partitionSetName": self.partition_set_name,\n }\n\n\nclass PartitionRangeSelector(\n NamedTuple(\n "_PartitionRangeSelector",\n [("start", str), ("end", str)],\n )\n):\n """The information needed to resolve a partition range."""\n\n def __new__(cls, start: str, end: str):\n return super(PartitionRangeSelector, cls).__new__(\n cls,\n start=check.inst_param(start, "start", str),\n end=check.inst_param(end, "end", str),\n )\n\n def to_graphql_input(self):\n return {\n "start": self.start,\n "end": self.end,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return PartitionRangeSelector(\n start=graphql_data["start"],\n end=graphql_data["end"],\n )\n\n\nclass PartitionsSelector(\n NamedTuple(\n "_PartitionsSelector",\n [("partition_range", PartitionRangeSelector)],\n )\n):\n """The information needed to define selection partitions.\n Using partition_range as property name to avoid shadowing Python 'range' builtin .\n """\n\n def __new__(cls, partition_range: PartitionRangeSelector):\n return super(PartitionsSelector, cls).__new__(\n cls,\n partition_range=check.inst_param(partition_range, "range", PartitionRangeSelector),\n )\n\n def to_graphql_input(self):\n return {\n "range": self.partition_range.to_graphql_input(),\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n return PartitionsSelector(\n partition_range=PartitionRangeSelector.from_graphql_input(graphql_data["range"])\n )\n\n\nclass PartitionsByAssetSelector(\n NamedTuple(\n "PartitionsByAssetSelector",\n [\n ("asset_key", AssetKey),\n ("partitions", Optional[PartitionsSelector]),\n ],\n )\n):\n """The information needed to define partitions selection for a given asset key."""\n\n def __new__(cls, asset_key: AssetKey, partitions: Optional[PartitionsSelector] = None):\n return super(PartitionsByAssetSelector, cls).__new__(\n cls,\n asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n partitions=check.opt_inst_param(partitions, "partitions", PartitionsSelector),\n )\n\n def to_graphql_input(self):\n return {\n "assetKey": self.asset_key.to_graphql_input(),\n "partitions": self.partitions.to_graphql_input() if self.partitions else None,\n }\n\n @staticmethod\n def from_graphql_input(graphql_data):\n asset_key = graphql_data["assetKey"]\n partitions = graphql_data.get("partitions")\n return PartitionsByAssetSelector(\n asset_key=AssetKey.from_graphql_input(asset_key),\n partitions=PartitionsSelector.from_graphql_input(partitions) if partitions else None,\n )\n
", "current_page_name": "_modules/dagster/_core/definitions/selector", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.selector"}, "sensor_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.sensor_definition

\nimport inspect\nimport logging\nfrom collections import defaultdict\nfrom contextlib import ExitStack\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    TypeVar,\n    Union,\n    cast,\n)\n\nimport pendulum\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.asset_check_evaluation import AssetCheckEvaluation\nfrom dagster._core.definitions.events import (\n    AssetMaterialization,\n    AssetObservation,\n)\nfrom dagster._core.definitions.instigation_logger import InstigationLogger\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.partition import (\n    CachingDynamicPartitionsLoader,\n)\nfrom dagster._core.definitions.resource_annotation import (\n    get_resource_args,\n)\nfrom dagster._core.definitions.resource_definition import (\n    Resources,\n)\nfrom dagster._core.definitions.scoped_resources_builder import ScopedResourcesBuilder\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidSubsetError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._utils import IHasInternalInit, normalize_to_repository\n\nfrom ..decorator_utils import (\n    get_function_params,\n)\nfrom .asset_selection import AssetSelection\nfrom .graph_definition import GraphDefinition\nfrom .run_request import (\n    AddDynamicPartitionsRequest,\n    DagsterRunReaction,\n    DeleteDynamicPartitionsRequest,\n    RunRequest,\n    SensorResult,\n    SkipReason,\n)\nfrom .target import DirectTarget, ExecutableDefinition, RepoRelativeTarget\nfrom .unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom .utils import check_valid_name\n\nif TYPE_CHECKING:\n    from dagster import ResourceDefinition\n    from dagster._core.definitions.definitions_class import Definitions\n    from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n\n@whitelist_for_serdes\nclass DefaultSensorStatus(Enum):\n    RUNNING = "RUNNING"\n    STOPPED = "STOPPED"\n\n\n@whitelist_for_serdes\nclass SensorType(Enum):\n    STANDARD = "STANDARD"\n    RUN_STATUS = "RUN_STATUS"\n    ASSET = "ASSET"\n    MULTI_ASSET = "MULTI_ASSET"\n    FRESHNESS_POLICY = "FRESHNESS_POLICY"\n    UNKNOWN = "UNKNOWN"\n\n\nDEFAULT_SENSOR_DAEMON_INTERVAL = 30\n\n\n
[docs]class SensorEvaluationContext:\n """The context object available as the argument to the evaluation function of a :py:class:`dagster.SensorDefinition`.\n\n Users should not instantiate this object directly. To construct a\n `SensorEvaluationContext` for testing purposes, use :py:func:`dagster.\n build_sensor_context`.\n\n Attributes:\n instance_ref (Optional[InstanceRef]): The serialized instance configured to run the schedule\n cursor (Optional[str]): The cursor, passed back from the last sensor evaluation via\n the cursor attribute of SkipReason and RunRequest\n last_completion_time (float): DEPRECATED The last time that the sensor was evaluated (UTC).\n last_run_key (str): DEPRECATED The run key of the RunRequest most recently created by this\n sensor. Use the preferred `cursor` attribute instead.\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n repository_def (Optional[RepositoryDefinition]): The repository or that\n the sensor belongs to. If needed by the sensor top-level resource definitions will be\n pulled from this repository. You can provide either this or `definitions`.\n instance (Optional[DagsterInstance]): The deserialized instance can also be passed in\n directly (primarily useful in testing contexts).\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n If needed by the sensor, top-level resource definitions will be pulled from these\n definitions. You can provide either this or `repository_def`.\n resources (Optional[Dict[str, Any]]): A dict of resource keys to resource\n definitions to be made available during sensor execution.\n\n Example:\n .. code-block:: python\n\n from dagster import sensor, SensorEvaluationContext\n\n @sensor\n def the_sensor(context: SensorEvaluationContext):\n ...\n\n """\n\n def __init__(\n self,\n instance_ref: Optional[InstanceRef],\n last_completion_time: Optional[float],\n last_run_key: Optional[str],\n cursor: Optional[str],\n repository_name: Optional[str],\n repository_def: Optional["RepositoryDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n sensor_name: Optional[str] = None,\n resources: Optional[Mapping[str, "ResourceDefinition"]] = None,\n definitions: Optional["Definitions"] = None,\n ):\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n\n self._exit_stack = ExitStack()\n self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)\n self._last_completion_time = check.opt_float_param(\n last_completion_time, "last_completion_time"\n )\n self._last_run_key = check.opt_str_param(last_run_key, "last_run_key")\n self._cursor = check.opt_str_param(cursor, "cursor")\n self._repository_name = check.opt_str_param(repository_name, "repository_name")\n self._repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n error_on_none=False,\n )\n self._instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n self._sensor_name = sensor_name\n\n # Wait to set resources unless they're accessed\n self._resource_defs = resources\n self._resources = None\n self._cm_scope_entered = False\n\n self._log_key = (\n [\n repository_name,\n sensor_name,\n pendulum.now("UTC").strftime("%Y%m%d_%H%M%S"),\n ]\n if repository_name and sensor_name\n else None\n )\n self._logger: Optional[InstigationLogger] = None\n self._cursor_updated = False\n\n def __enter__(self) -> "SensorEvaluationContext":\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc) -> None:\n self._exit_stack.close()\n self._logger = None\n\n @property\n def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]:\n return self._resource_defs\n\n @property\n def sensor_name(self) -> str:\n return check.not_none(self._sensor_name, "Only valid when sensor name provided")\n\n def merge_resources(self, resources_dict: Mapping[str, Any]) -> "SensorEvaluationContext":\n """Merge the specified resources into this context.\n\n This method is intended to be used by the Dagster framework, and should not be called by user code.\n\n Args:\n resources_dict (Mapping[str, Any]): The resources to replace in the context.\n """\n check.invariant(\n self._resources is None, "Cannot merge resources in context that has been initialized."\n )\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n return SensorEvaluationContext(\n instance_ref=self._instance_ref,\n last_completion_time=self._last_completion_time,\n last_run_key=self._last_run_key,\n cursor=self._cursor,\n repository_name=self._repository_name,\n repository_def=self._repository_def,\n instance=self._instance,\n sensor_name=self._sensor_name,\n resources={\n **(self._resource_defs or {}),\n **wrap_resources_for_execution(resources_dict),\n },\n )\n\n @public\n @property\n def resources(self) -> Resources:\n """Resources: A mapping from resource key to instantiated resources for this sensor."""\n from dagster._core.definitions.scoped_resources_builder import (\n IContainsGenerator,\n )\n from dagster._core.execution.build_resources import build_resources\n\n if not self._resources:\n """\n This is similar to what we do in e.g. the op context - we set up a resource\n building context manager, and immediately enter it. This is so that in cases\n where a user is not using any context-manager based resources, they don't\n need to enter this SensorEvaluationContext themselves.\n\n For example:\n\n my_sensor(build_sensor_context(resources={"my_resource": my_non_cm_resource})\n\n will work ok, but for a CM resource we must do\n\n with build_sensor_context(resources={"my_resource": my_cm_resource}) as context:\n my_sensor(context)\n """\n\n # Early exit if no resources are defined. This skips unnecessary initialization\n # entirely. This allows users to run user code servers in cases where they\n # do not have access to the instance if they use a subset of features do\n # that do not require instance access. In this case, if they do not use\n # resources on sensors they do not require the instance, so we do not\n # instantiate it\n #\n # Tracking at https://github.com/dagster-io/dagster/issues/14345\n if not self._resource_defs:\n self._resources = ScopedResourcesBuilder.build_empty()\n return self._resources\n\n instance = self.instance if self._instance or self._instance_ref else None\n\n resources_cm = build_resources(resources=self._resource_defs or {}, instance=instance)\n self._resources = self._exit_stack.enter_context(resources_cm)\n\n if isinstance(self._resources, IContainsGenerator) and not self._cm_scope_entered:\n self._exit_stack.close()\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access"\n " resources outside of context manager scope. You can use the following syntax"\n " to open a context manager: `with build_schedule_context(...) as context:`"\n )\n\n return self._resources\n\n @public\n @property\n def instance(self) -> DagsterInstance:\n """DagsterInstance: The current DagsterInstance."""\n # self._instance_ref should only ever be None when this SensorEvaluationContext was\n # constructed under test.\n if not self._instance:\n if not self._instance_ref:\n raise DagsterInvariantViolationError(\n "Attempted to initialize dagster instance, but no instance reference was"\n " provided."\n )\n self._instance = self._exit_stack.enter_context(\n DagsterInstance.from_ref(self._instance_ref)\n )\n return cast(DagsterInstance, self._instance)\n\n @property\n def instance_ref(self) -> Optional[InstanceRef]:\n return self._instance_ref\n\n @public\n @property\n def last_completion_time(self) -> Optional[float]:\n """Optional[float]: Timestamp representing the last time this sensor completed an evaluation."""\n return self._last_completion_time\n\n @public\n @property\n def last_run_key(self) -> Optional[str]:\n """Optional[str]: The run key supplied to the most recent RunRequest produced by this sensor."""\n return self._last_run_key\n\n @public\n @property\n def cursor(self) -> Optional[str]:\n """The cursor value for this sensor, which was set in an earlier sensor evaluation."""\n return self._cursor\n\n
[docs] @public\n def update_cursor(self, cursor: Optional[str]) -> None:\n """Updates the cursor value for this sensor, which will be provided on the context for the\n next sensor evaluation.\n\n This can be used to keep track of progress and avoid duplicate work across sensor\n evaluations.\n\n Args:\n cursor (Optional[str]):\n """\n self._cursor = check.opt_str_param(cursor, "cursor")\n self._cursor_updated = True
\n\n @property\n def cursor_updated(self) -> bool:\n return self._cursor_updated\n\n @public\n @property\n def repository_name(self) -> Optional[str]:\n """Optional[str]: The name of the repository that this sensor resides in."""\n return self._repository_name\n\n @public\n @property\n def repository_def(self) -> Optional["RepositoryDefinition"]:\n """Optional[RepositoryDefinition]: The RepositoryDefinition that this sensor resides in."""\n return self._repository_def\n\n @property\n def log(self) -> logging.Logger:\n if self._logger:\n return self._logger\n\n if not self._instance_ref:\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n repository_name=self._repository_name,\n name=self._sensor_name,\n )\n )\n return cast(logging.Logger, self._logger)\n\n self._logger = self._exit_stack.enter_context(\n InstigationLogger(\n self._log_key,\n self.instance,\n repository_name=self._repository_name,\n name=self._sensor_name,\n )\n )\n return cast(logging.Logger, self._logger)\n\n def has_captured_logs(self):\n return self._logger and self._logger.has_captured_logs()\n\n @property\n def log_key(self) -> Optional[List[str]]:\n return self._log_key
\n\n\nRawSensorEvaluationFunctionReturn = Union[\n Iterator[Union[SkipReason, RunRequest, DagsterRunReaction, SensorResult]],\n Sequence[RunRequest],\n SkipReason,\n RunRequest,\n DagsterRunReaction,\n SensorResult,\n]\nRawSensorEvaluationFunction: TypeAlias = Callable[..., RawSensorEvaluationFunctionReturn]\n\nSensorEvaluationFunction: TypeAlias = Callable[..., Sequence[Union[SkipReason, RunRequest]]]\n\n\ndef get_context_param_name(fn: Callable) -> Optional[str]:\n """Determines the sensor's context parameter name by excluding all resource parameters."""\n resource_params = {param.name for param in get_resource_args(fn)}\n\n return next(\n (param.name for param in get_function_params(fn) if param.name not in resource_params), None\n )\n\n\ndef validate_and_get_resource_dict(\n resources: Resources, sensor_name: str, required_resource_keys: Set[str]\n) -> Dict[str, Any]:\n """Validates that the context has all the required resources and returns a dictionary of\n resource key to resource object.\n """\n for k in required_resource_keys:\n if not hasattr(resources, k):\n raise DagsterInvalidDefinitionError(\n f"Resource with key '{k}' required by sensor '{sensor_name}' was not provided."\n )\n\n return {k: getattr(resources, k) for k in required_resource_keys}\n\n\ndef _check_dynamic_partitions_requests(\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n) -> None:\n req_keys_to_add_by_partitions_def_name = defaultdict(set)\n req_keys_to_delete_by_partitions_def_name = defaultdict(set)\n\n for req in dynamic_partitions_requests:\n duplicate_req_keys_to_delete = req_keys_to_delete_by_partitions_def_name.get(\n req.partitions_def_name, set()\n ).intersection(req.partition_keys)\n duplicate_req_keys_to_add = req_keys_to_add_by_partitions_def_name.get(\n req.partitions_def_name, set()\n ).intersection(req.partition_keys)\n if isinstance(req, AddDynamicPartitionsRequest):\n if duplicate_req_keys_to_delete:\n raise DagsterInvariantViolationError(\n "Dynamic partition requests cannot contain both add and delete requests for"\n " the same partition keys.Invalid request: partitions_def_name"\n f" '{req.partitions_def_name}', partition_keys: {duplicate_req_keys_to_delete}"\n )\n elif duplicate_req_keys_to_add:\n raise DagsterInvariantViolationError(\n "Cannot request to add duplicate dynamic partition keys: \\npartitions_def_name"\n f" '{req.partitions_def_name}', partition_keys: {duplicate_req_keys_to_add}"\n )\n req_keys_to_add_by_partitions_def_name[req.partitions_def_name].update(\n req.partition_keys\n )\n elif isinstance(req, DeleteDynamicPartitionsRequest):\n if duplicate_req_keys_to_delete:\n raise DagsterInvariantViolationError(\n "Cannot request to add duplicate dynamic partition keys: \\npartitions_def_name"\n f" '{req.partitions_def_name}', partition_keys:"\n f" {req_keys_to_add_by_partitions_def_name}"\n )\n elif duplicate_req_keys_to_add:\n raise DagsterInvariantViolationError(\n "Dynamic partition requests cannot contain both add and delete requests for"\n " the same partition keys.Invalid request: partitions_def_name"\n f" '{req.partitions_def_name}', partition_keys: {duplicate_req_keys_to_add}"\n )\n req_keys_to_delete_by_partitions_def_name[req.partitions_def_name].update(\n req.partition_keys\n )\n else:\n check.failed(f"Unexpected dynamic partition request type: {req}")\n\n\n
[docs]class SensorDefinition(IHasInternalInit):\n """Define a sensor that initiates a set of runs based on some external state.\n\n Args:\n evaluation_fn (Callable[[SensorEvaluationContext]]): The core evaluation function for the\n sensor, which is run at an interval to determine whether a run should be launched or\n not. Takes a :py:class:`~dagster.SensorEvaluationContext`.\n\n This function must return a generator, which must yield either a single SkipReason\n or one or more RunRequest objects.\n name (Optional[str]): The name of the sensor to create. Defaults to name of evaluation_fn\n minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n description (Optional[str]): A human-readable description of the sensor.\n job (Optional[GraphDefinition, JobDefinition, UnresolvedAssetJob]): The job to execute when this sensor fires.\n jobs (Optional[Sequence[GraphDefinition, JobDefinition, UnresolvedAssetJob]]): (experimental) A list of jobs to execute when this sensor fires.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n asset_selection (AssetSelection): (Experimental) an asset selection to launch a run for if\n the sensor condition is met. This can be provided instead of specifying a job.\n """\n\n def with_updated_jobs(self, new_jobs: Sequence[ExecutableDefinition]) -> "SensorDefinition":\n """Returns a copy of this sensor with the jobs replaced.\n\n Args:\n job (ExecutableDefinition): The job that should execute when this\n schedule runs.\n """\n return SensorDefinition.dagster_internal_init(\n name=self.name,\n evaluation_fn=self._raw_fn,\n minimum_interval_seconds=self.minimum_interval_seconds,\n description=self.description,\n job_name=None, # if original init was passed job name, was resolved to a job\n jobs=new_jobs if len(new_jobs) > 1 else None,\n job=new_jobs[0] if len(new_jobs) == 1 else None,\n default_status=self.default_status,\n asset_selection=self.asset_selection,\n required_resource_keys=self._raw_required_resource_keys,\n )\n\n def with_updated_job(self, new_job: ExecutableDefinition) -> "SensorDefinition":\n """Returns a copy of this sensor with the job replaced.\n\n Args:\n job (ExecutableDefinition): The job that should execute when this\n schedule runs.\n """\n return self.with_updated_jobs([new_job])\n\n def __init__(\n self,\n name: Optional[str] = None,\n *,\n evaluation_fn: Optional[RawSensorEvaluationFunction] = None,\n job_name: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n description: Optional[str] = None,\n job: Optional[ExecutableDefinition] = None,\n jobs: Optional[Sequence[ExecutableDefinition]] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n asset_selection: Optional[AssetSelection] = None,\n required_resource_keys: Optional[Set[str]] = None,\n ):\n from dagster._config.pythonic_config import validate_resource_annotated_function\n\n if evaluation_fn is None:\n raise DagsterInvalidDefinitionError("Must provide evaluation_fn to SensorDefinition.")\n\n if (\n sum(\n [\n int(job is not None),\n int(jobs is not None),\n int(job_name is not None),\n int(asset_selection is not None),\n ]\n )\n > 1\n ):\n raise DagsterInvalidDefinitionError(\n "Attempted to provide more than one of 'job', 'jobs', 'job_name', and "\n "'asset_selection' params to SensorDefinition. Must provide only one."\n )\n\n jobs = jobs if jobs else [job] if job else None\n\n targets: Optional[List[Union[RepoRelativeTarget, DirectTarget]]] = None\n if job_name:\n targets = [\n RepoRelativeTarget(\n job_name=check.str_param(job_name, "job_name"),\n op_selection=None,\n )\n ]\n elif job:\n targets = [DirectTarget(job)]\n elif jobs:\n targets = [DirectTarget(job) for job in jobs]\n elif asset_selection:\n targets = []\n\n if name:\n self._name = check_valid_name(name)\n else:\n self._name = evaluation_fn.__name__\n\n self._raw_fn: RawSensorEvaluationFunction = check.callable_param(\n evaluation_fn, "evaluation_fn"\n )\n self._evaluation_fn: Union[\n SensorEvaluationFunction,\n Callable[\n [SensorEvaluationContext],\n List[Union[SkipReason, RunRequest, DagsterRunReaction]],\n ],\n ] = wrap_sensor_evaluation(self._name, evaluation_fn)\n self._min_interval = check.opt_int_param(\n minimum_interval_seconds, "minimum_interval_seconds", DEFAULT_SENSOR_DAEMON_INTERVAL\n )\n self._description = check.opt_str_param(description, "description")\n self._targets: Sequence[Union[RepoRelativeTarget, DirectTarget]] = check.opt_list_param(\n targets, "targets", (DirectTarget, RepoRelativeTarget)\n )\n self._default_status = check.inst_param(\n default_status, "default_status", DefaultSensorStatus\n )\n self._asset_selection = check.opt_inst_param(\n asset_selection, "asset_selection", AssetSelection\n )\n validate_resource_annotated_function(self._raw_fn)\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(self._raw_fn)}\n\n check.param_invariant(\n len(required_resource_keys or []) == 0 or len(resource_arg_names) == 0,\n "Cannot specify resource requirements in both @sensor decorator and as arguments to"\n " the decorated function",\n )\n self._raw_required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n self._required_resource_keys = self._raw_required_resource_keys or resource_arg_names\n\n @staticmethod\n def dagster_internal_init(\n *,\n name: Optional[str],\n evaluation_fn: Optional[RawSensorEvaluationFunction],\n job_name: Optional[str],\n minimum_interval_seconds: Optional[int],\n description: Optional[str],\n job: Optional[ExecutableDefinition],\n jobs: Optional[Sequence[ExecutableDefinition]],\n default_status: DefaultSensorStatus,\n asset_selection: Optional[AssetSelection],\n required_resource_keys: Optional[Set[str]],\n ) -> "SensorDefinition":\n return SensorDefinition(\n name=name,\n evaluation_fn=evaluation_fn,\n job_name=job_name,\n minimum_interval_seconds=minimum_interval_seconds,\n description=description,\n job=job,\n jobs=jobs,\n default_status=default_status,\n asset_selection=asset_selection,\n required_resource_keys=required_resource_keys,\n )\n\n def __call__(self, *args, **kwargs) -> RawSensorEvaluationFunctionReturn:\n context_param_name_if_present = get_context_param_name(self._raw_fn)\n context = get_or_create_sensor_context(self._raw_fn, *args, **kwargs)\n\n context_param = (\n {context_param_name_if_present: context} if context_param_name_if_present else {}\n )\n\n resources = validate_and_get_resource_dict(\n context.resources, self.name, self._required_resource_keys\n )\n return self._raw_fn(**context_param, **resources)\n\n @public\n @property\n def required_resource_keys(self) -> Set[str]:\n """Set[str]: The set of keys for resources that must be provided to this sensor."""\n return self._required_resource_keys\n\n @public\n @property\n def name(self) -> str:\n """str: The name of this sensor."""\n return self._name\n\n @public\n @property\n def description(self) -> Optional[str]:\n """Optional[str]: A description for this sensor."""\n return self._description\n\n @public\n @property\n def minimum_interval_seconds(self) -> Optional[int]:\n """Optional[int]: The minimum number of seconds between sequential evaluations of this sensor."""\n return self._min_interval\n\n @property\n def targets(self) -> Sequence[Union[DirectTarget, RepoRelativeTarget]]:\n return self._targets\n\n @public\n @property\n def job(self) -> Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition]:\n """Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]: The job that is\n targeted by this schedule.\n """\n if self._targets:\n if len(self._targets) == 1 and isinstance(self._targets[0], DirectTarget):\n return self._targets[0].target\n elif len(self._targets) > 1:\n raise DagsterInvalidDefinitionError(\n "Job property not available when SensorDefinition has multiple jobs."\n )\n raise DagsterInvalidDefinitionError("No job was provided to SensorDefinition.")\n\n @public\n @property\n def jobs(self) -> List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition]]:\n """List[Union[GraphDefinition, JobDefinition, UnresolvedAssetJobDefinition]]: A list of jobs\n that are targeted by this schedule.\n """\n if self._targets and all(isinstance(target, DirectTarget) for target in self._targets):\n return [target.target for target in self._targets] # type: ignore # (illegible conditional)\n raise DagsterInvalidDefinitionError("No job was provided to SensorDefinition.")\n\n @property\n def sensor_type(self) -> SensorType:\n return SensorType.STANDARD\n\n def evaluate_tick(self, context: "SensorEvaluationContext") -> "SensorExecutionData":\n """Evaluate sensor using the provided context.\n\n Args:\n context (SensorEvaluationContext): The context with which to evaluate this sensor.\n\n Returns:\n SensorExecutionData: Contains list of run requests, or skip message if present.\n\n """\n context = check.inst_param(context, "context", SensorEvaluationContext)\n\n result = self._evaluation_fn(context)\n\n skip_message: Optional[str] = None\n run_requests: List[RunRequest] = []\n dagster_run_reactions: List[DagsterRunReaction] = []\n dynamic_partitions_requests: Optional[\n Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]\n ] = []\n updated_cursor = context.cursor\n asset_events = []\n\n if not result or result == [None]:\n skip_message = "Sensor function returned an empty result"\n elif len(result) == 1:\n item = result[0]\n check.inst(item, (SkipReason, RunRequest, DagsterRunReaction, SensorResult))\n\n if isinstance(item, SensorResult):\n run_requests = list(item.run_requests) if item.run_requests else []\n skip_message = (\n item.skip_reason.skip_message\n if item.skip_reason\n else (None if run_requests else "Sensor function returned an empty result")\n )\n\n _check_dynamic_partitions_requests(\n item.dynamic_partitions_requests or [],\n )\n dynamic_partitions_requests = item.dynamic_partitions_requests or []\n\n if item.cursor and context.cursor_updated:\n raise DagsterInvariantViolationError(\n "SensorResult.cursor cannot be set if context.update_cursor() was called."\n )\n updated_cursor = item.cursor\n asset_events = item.asset_events\n\n elif isinstance(item, RunRequest):\n run_requests = [item]\n elif isinstance(item, SkipReason):\n skip_message = item.skip_message if isinstance(item, SkipReason) else None\n elif isinstance(item, DagsterRunReaction):\n dagster_run_reactions = (\n [cast(DagsterRunReaction, item)] if isinstance(item, DagsterRunReaction) else []\n )\n else:\n check.failed(f"Unexpected type {type(item)} in sensor result")\n else:\n if any(isinstance(item, SensorResult) for item in result):\n check.failed(\n "When a SensorResult is returned from a sensor, it must be the only object"\n " returned."\n )\n\n check.is_list(result, (SkipReason, RunRequest, DagsterRunReaction))\n has_skip = any(map(lambda x: isinstance(x, SkipReason), result))\n run_requests = [item for item in result if isinstance(item, RunRequest)]\n dagster_run_reactions = [\n item for item in result if isinstance(item, DagsterRunReaction)\n ]\n\n if has_skip:\n if len(run_requests) > 0:\n check.failed(\n "Expected a single SkipReason or one or more RunRequests: received both "\n "RunRequest and SkipReason"\n )\n elif len(dagster_run_reactions) > 0:\n check.failed(\n "Expected a single SkipReason or one or more DagsterRunReaction: "\n "received both DagsterRunReaction and SkipReason"\n )\n else:\n check.failed("Expected a single SkipReason: received multiple SkipReasons")\n\n _check_dynamic_partitions_requests(dynamic_partitions_requests)\n resolved_run_requests = self.resolve_run_requests(\n run_requests, context, self._asset_selection, dynamic_partitions_requests\n )\n\n return SensorExecutionData(\n resolved_run_requests,\n skip_message,\n updated_cursor,\n dagster_run_reactions,\n captured_log_key=context.log_key if context.has_captured_logs() else None,\n dynamic_partitions_requests=dynamic_partitions_requests,\n asset_events=asset_events,\n )\n\n def has_loadable_targets(self) -> bool:\n for target in self._targets:\n if isinstance(target, DirectTarget):\n return True\n return False\n\n def load_targets(\n self,\n ) -> Sequence[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition]]:\n """Returns job/graph definitions that have been directly passed into the sensor definition.\n Any jobs or graphs that are referenced by name will not be loaded.\n """\n targets = []\n for target in self._targets:\n if isinstance(target, DirectTarget):\n targets.append(target.load())\n return targets\n\n def resolve_run_requests(\n self,\n run_requests: Sequence[RunRequest],\n context: SensorEvaluationContext,\n asset_selection: Optional[AssetSelection],\n dynamic_partitions_requests: Sequence[\n Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]\n ],\n ) -> Sequence[RunRequest]:\n def _get_repo_job_by_name(context: SensorEvaluationContext, job_name: str) -> JobDefinition:\n if context.repository_def is None:\n raise DagsterInvariantViolationError(\n "Must provide repository def to build_sensor_context when yielding partitioned"\n " run requests"\n )\n return context.repository_def.get_job(job_name)\n\n has_multiple_targets = len(self._targets) > 1\n target_names = [target.job_name for target in self._targets]\n\n if run_requests and len(self._targets) == 0 and not self._asset_selection:\n raise Exception(\n f"Error in sensor {self._name}: Sensor evaluation function returned a RunRequest "\n "for a sensor lacking a specified target (job_name, job, or jobs). Targets "\n "can be specified by providing job, jobs, or job_name to the @sensor "\n "decorator."\n )\n\n if asset_selection:\n run_requests = [\n *_run_requests_with_base_asset_jobs(run_requests, context, asset_selection)\n ]\n\n dynamic_partitions_store = (\n CachingDynamicPartitionsLoader(context.instance) if context.instance_ref else None\n )\n\n # Run requests may contain an invalid target, or a partition key that does not exist.\n # We will resolve these run requests, applying the target and partition config/tags.\n resolved_run_requests = []\n for run_request in run_requests:\n if run_request.job_name is None and has_multiple_targets:\n raise Exception(\n f"Error in sensor {self._name}: Sensor returned a RunRequest that did not"\n " specify job_name for the requested run. Expected one of:"\n f" {target_names}"\n )\n elif (\n run_request.job_name\n and run_request.job_name not in target_names\n and not asset_selection\n ):\n raise Exception(\n f"Error in sensor {self._name}: Sensor returned a RunRequest with job_name "\n f"{run_request.job_name}. Expected one of: {target_names}"\n )\n\n if run_request.partition_key and not run_request.has_resolved_partition():\n selected_job = _get_repo_job_by_name(\n context, run_request.job_name if run_request.job_name else target_names[0]\n )\n resolved_run_requests.append(\n run_request.with_resolved_tags_and_config(\n target_definition=selected_job,\n current_time=None,\n dynamic_partitions_store=dynamic_partitions_store,\n dynamic_partitions_requests=dynamic_partitions_requests,\n )\n )\n else:\n resolved_run_requests.append(run_request)\n\n return resolved_run_requests\n\n @property\n def _target(self) -> Optional[Union[DirectTarget, RepoRelativeTarget]]:\n return self._targets[0] if self._targets else None\n\n @public\n @property\n def job_name(self) -> Optional[str]:\n """Optional[str]: The name of the job that is targeted by this sensor."""\n if len(self._targets) > 1:\n raise DagsterInvalidInvocationError(\n f"Cannot use `job_name` property for sensor {self.name}, which targets multiple"\n " jobs."\n )\n return self._targets[0].job_name\n\n @public\n @property\n def default_status(self) -> DefaultSensorStatus:\n """DefaultSensorStatus: The default status for this sensor when it is first loaded in\n a code location.\n """\n return self._default_status\n\n @property\n def asset_selection(self) -> Optional[AssetSelection]:\n return self._asset_selection
\n\n\n@whitelist_for_serdes(\n storage_field_names={"dagster_run_reactions": "pipeline_run_reactions"},\n)\nclass SensorExecutionData(\n NamedTuple(\n "_SensorExecutionData",\n [\n ("run_requests", Optional[Sequence[RunRequest]]),\n ("skip_message", Optional[str]),\n ("cursor", Optional[str]),\n ("dagster_run_reactions", Optional[Sequence[DagsterRunReaction]]),\n ("captured_log_key", Optional[Sequence[str]]),\n (\n "dynamic_partitions_requests",\n Optional[\n Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]\n ],\n ),\n (\n "asset_events",\n Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]],\n ),\n ],\n )\n):\n dagster_run_reactions: Optional[Sequence[DagsterRunReaction]]\n\n def __new__(\n cls,\n run_requests: Optional[Sequence[RunRequest]] = None,\n skip_message: Optional[str] = None,\n cursor: Optional[str] = None,\n dagster_run_reactions: Optional[Sequence[DagsterRunReaction]] = None,\n captured_log_key: Optional[Sequence[str]] = None,\n dynamic_partitions_requests: Optional[\n Sequence[Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]]\n ] = None,\n asset_events: Optional[\n Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]]\n ] = None,\n ):\n check.opt_sequence_param(run_requests, "run_requests", RunRequest)\n check.opt_str_param(skip_message, "skip_message")\n check.opt_str_param(cursor, "cursor")\n check.opt_sequence_param(dagster_run_reactions, "dagster_run_reactions", DagsterRunReaction)\n check.opt_list_param(captured_log_key, "captured_log_key", str)\n check.opt_sequence_param(\n dynamic_partitions_requests,\n "dynamic_partitions_requests",\n (AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest),\n )\n check.opt_sequence_param(\n asset_events,\n "asset_events",\n (AssetMaterialization, AssetObservation, AssetCheckEvaluation),\n )\n check.invariant(\n not (run_requests and skip_message), "Found both skip data and run request data"\n )\n return super(SensorExecutionData, cls).__new__(\n cls,\n run_requests=run_requests,\n skip_message=skip_message,\n cursor=cursor,\n dagster_run_reactions=dagster_run_reactions,\n captured_log_key=captured_log_key,\n dynamic_partitions_requests=dynamic_partitions_requests,\n asset_events=asset_events or [],\n )\n\n\ndef wrap_sensor_evaluation(\n sensor_name: str,\n fn: RawSensorEvaluationFunction,\n) -> SensorEvaluationFunction:\n resource_arg_names: Set[str] = {arg.name for arg in get_resource_args(fn)}\n\n def _wrapped_fn(context: SensorEvaluationContext):\n resource_args_populated = validate_and_get_resource_dict(\n context.resources, sensor_name, resource_arg_names\n )\n\n context_param_name_if_present = get_context_param_name(fn)\n context_param = (\n {context_param_name_if_present: context} if context_param_name_if_present else {}\n )\n raw_evaluation_result = fn(**context_param, **resource_args_populated)\n\n def check_returned_scalar(scalar):\n if isinstance(scalar, (SkipReason, RunRequest, SensorResult)):\n return scalar\n elif scalar is not None:\n raise Exception(\n f"Error in sensor {sensor_name}: Sensor unexpectedly returned output "\n f"{scalar} of type {type(scalar)}. Should only return SkipReason or "\n "RunRequest objects."\n )\n\n if inspect.isgenerator(raw_evaluation_result):\n result = []\n try:\n while True:\n result.append(next(raw_evaluation_result))\n except StopIteration as e:\n # captures the case where the evaluation function has a yield and also returns a\n # value\n if e.value is not None:\n result.append(check_returned_scalar(e.value))\n\n return result\n elif isinstance(raw_evaluation_result, list):\n return raw_evaluation_result\n else:\n return [check_returned_scalar(raw_evaluation_result)]\n\n return _wrapped_fn\n\n\n
[docs]def build_sensor_context(\n instance: Optional[DagsterInstance] = None,\n cursor: Optional[str] = None,\n repository_name: Optional[str] = None,\n repository_def: Optional["RepositoryDefinition"] = None,\n sensor_name: Optional[str] = None,\n resources: Optional[Mapping[str, object]] = None,\n definitions: Optional["Definitions"] = None,\n instance_ref: Optional["InstanceRef"] = None,\n) -> SensorEvaluationContext:\n """Builds sensor execution context using the provided parameters.\n\n This function can be used to provide a context to the invocation of a sensor definition.If\n provided, the dagster instance must be persistent; DagsterInstance.ephemeral() will result in an\n error.\n\n Args:\n instance (Optional[DagsterInstance]): The dagster instance configured to run the sensor.\n cursor (Optional[str]): A cursor value to provide to the evaluation of the sensor.\n repository_name (Optional[str]): The name of the repository that the sensor belongs to.\n repository_def (Optional[RepositoryDefinition]): The repository that the sensor belongs to.\n If needed by the sensor top-level resource definitions will be pulled from this repository.\n You can provide either this or `definitions`.\n resources (Optional[Mapping[str, ResourceDefinition]]): A set of resource definitions\n to provide to the sensor. If passed, these will override any resource definitions\n provided by the repository.\n definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.\n If needed by the sensor, top-level resource definitions will be pulled from these\n definitions. You can provide either this or `repository_def`.\n\n Examples:\n .. code-block:: python\n\n context = build_sensor_context()\n my_sensor(context)\n\n """\n from dagster._core.definitions.definitions_class import Definitions\n from dagster._core.definitions.repository_definition import RepositoryDefinition\n from dagster._core.execution.build_resources import wrap_resources_for_execution\n\n check.opt_inst_param(instance, "instance", DagsterInstance)\n check.opt_str_param(cursor, "cursor")\n check.opt_str_param(repository_name, "repository_name")\n repository_def = normalize_to_repository(\n check.opt_inst_param(definitions, "definitions", Definitions),\n check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),\n error_on_none=False,\n )\n\n return SensorEvaluationContext(\n instance_ref=instance_ref,\n last_completion_time=None,\n last_run_key=None,\n cursor=cursor,\n repository_name=repository_name,\n instance=instance,\n repository_def=repository_def,\n sensor_name=sensor_name,\n resources=wrap_resources_for_execution(resources),\n )
\n\n\nT = TypeVar("T")\n\n\ndef get_sensor_context_from_args_or_kwargs(\n fn: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n context_type: Type[T],\n) -> Optional[T]:\n from dagster._config.pythonic_config import is_coercible_to_resource\n\n context_param_name = get_context_param_name(fn)\n\n kwarg_keys_non_resource = set(kwargs.keys()) - {param.name for param in get_resource_args(fn)}\n if len(args) + len(kwarg_keys_non_resource) > 1:\n raise DagsterInvalidInvocationError(\n "Sensor invocation received multiple non-resource arguments. Only a first "\n "positional context parameter should be provided when invoking."\n )\n\n if any(is_coercible_to_resource(arg) for arg in args):\n raise DagsterInvalidInvocationError(\n "If directly invoking a sensor, you may not provide resources as"\n " positional"\n " arguments, only as keyword arguments."\n )\n\n context: Optional[T] = None\n\n if len(args) > 0:\n context = check.opt_inst(args[0], context_type)\n elif len(kwargs) > 0:\n if context_param_name and context_param_name not in kwargs:\n raise DagsterInvalidInvocationError(\n f"Sensor invocation expected argument '{context_param_name}'."\n )\n context = check.opt_inst(kwargs.get(context_param_name or "context"), context_type)\n elif context_param_name:\n # If the context parameter is present but no value was provided, we error\n raise DagsterInvalidInvocationError(\n "Sensor evaluation function expected context argument, but no context argument "\n "was provided when invoking."\n )\n\n return context\n\n\ndef get_or_create_sensor_context(\n fn: Callable,\n *args: Any,\n **kwargs: Any,\n) -> SensorEvaluationContext:\n """Based on the passed resource function and the arguments passed to it, returns the\n user-passed SensorEvaluationContext or creates one if it is not passed.\n\n Raises an exception if the user passes more than one argument or if the user-provided\n function requires a context parameter but none is passed.\n """\n context = (\n get_sensor_context_from_args_or_kwargs(\n fn,\n args,\n kwargs,\n context_type=SensorEvaluationContext,\n )\n or build_sensor_context()\n )\n resource_args_from_kwargs = {}\n\n resource_args = {param.name for param in get_resource_args(fn)}\n for resource_arg in resource_args:\n if resource_arg in kwargs:\n resource_args_from_kwargs[resource_arg] = kwargs[resource_arg]\n\n if resource_args_from_kwargs:\n return context.merge_resources(resource_args_from_kwargs)\n\n return context\n\n\ndef _run_requests_with_base_asset_jobs(\n run_requests: Iterable[RunRequest],\n context: SensorEvaluationContext,\n outer_asset_selection: AssetSelection,\n) -> Sequence[RunRequest]:\n """For sensors that target asset selections instead of jobs, finds the corresponding base asset\n for a selected set of assets.\n """\n asset_graph = context.repository_def.asset_graph # type: ignore # (possible none)\n result = []\n for run_request in run_requests:\n if run_request.asset_selection:\n asset_keys = run_request.asset_selection\n\n unexpected_asset_keys = (\n AssetSelection.keys(*asset_keys) - outer_asset_selection\n ).resolve(asset_graph)\n if unexpected_asset_keys:\n raise DagsterInvalidSubsetError(\n "RunRequest includes asset keys that are not part of sensor's asset_selection:"\n f" {unexpected_asset_keys}"\n )\n else:\n asset_keys = outer_asset_selection.resolve(asset_graph)\n\n base_job = context.repository_def.get_implicit_job_def_for_assets(asset_keys) # type: ignore # (possible none)\n result.append(\n run_request.with_replaced_attrs(\n job_name=base_job.name, asset_selection=list(asset_keys) # type: ignore # (possible none)\n )\n )\n\n return result\n
", "current_page_name": "_modules/dagster/_core/definitions/sensor_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.sensor_definition"}, "source_asset": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.source_asset

\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    Optional,\n    cast,\n)\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental_param, public\nfrom dagster._core.decorator_utils import get_function_params\nfrom dagster._core.definitions.data_version import (\n    DATA_VERSION_TAG,\n    DataVersion,\n    DataVersionsByPartition,\n)\nfrom dagster._core.definitions.events import AssetKey, AssetObservation, CoercibleToAssetKey\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataMapping,\n    normalize_metadata,\n)\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition\nfrom dagster._core.definitions.resource_annotation import get_resource_args\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.resource_requirement import (\n    ResourceAddable,\n    ResourceRequirement,\n    SourceAssetIOManagerRequirement,\n    ensure_requirements_satisfied,\n    get_resource_key_conflicts,\n)\nfrom dagster._core.definitions.utils import (\n    DEFAULT_GROUP_NAME,\n    DEFAULT_IO_MANAGER_KEY,\n    validate_group_name,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidInvocationError,\n    DagsterInvalidObservationError,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.decorators.op_decorator import (\n        DecoratedOpFunction,\n    )\nfrom dagster._core.storage.io_manager import IOManagerDefinition\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import disable_dagster_warnings\n\n# Going with this catch-all for the time-being to permit pythonic resources\nSourceAssetObserveFunction: TypeAlias = Callable[..., Any]\n\n\ndef wrap_source_asset_observe_fn_in_op_compute_fn(\n    source_asset: "SourceAsset",\n) -> "DecoratedOpFunction":\n    from dagster._core.definitions.decorators.op_decorator import (\n        DecoratedOpFunction,\n        is_context_provided,\n    )\n    from dagster._core.execution.context.compute import (\n        OpExecutionContext,\n    )\n\n    check.not_none(source_asset.observe_fn, "Must be an observable source asset")\n    assert source_asset.observe_fn  # for type checker\n\n    observe_fn = source_asset.observe_fn\n\n    observe_fn_has_context = is_context_provided(get_function_params(observe_fn))\n\n    def fn(context: OpExecutionContext) -> None:\n        resource_kwarg_keys = [param.name for param in get_resource_args(observe_fn)]\n        resource_kwargs = {key: getattr(context.resources, key) for key in resource_kwarg_keys}\n        observe_fn_return_value = (\n            observe_fn(context, **resource_kwargs)\n            if observe_fn_has_context\n            else observe_fn(**resource_kwargs)\n        )\n\n        if isinstance(observe_fn_return_value, DataVersion):\n            if source_asset.partitions_def is not None:\n                raise DagsterInvalidObservationError(\n                    f"{source_asset.key} is partitioned, so its observe function should return a"\n                    " DataVersionsByPartition, not a DataVersion"\n                )\n\n            context.log_event(\n                AssetObservation(\n                    asset_key=source_asset.key,\n                    tags={DATA_VERSION_TAG: observe_fn_return_value.value},\n                )\n            )\n        elif isinstance(observe_fn_return_value, DataVersionsByPartition):\n            if source_asset.partitions_def is None:\n                raise DagsterInvalidObservationError(\n                    f"{source_asset.key} is not partitioned, so its observe function should return"\n                    " a DataVersion, not a DataVersionsByPartition"\n                )\n\n            for (\n                partition_key,\n                data_version,\n            ) in observe_fn_return_value.data_versions_by_partition.items():\n                context.log_event(\n                    AssetObservation(\n                        asset_key=source_asset.key,\n                        tags={DATA_VERSION_TAG: data_version.value},\n                        partition=partition_key,\n                    )\n                )\n        else:\n            raise DagsterInvalidObservationError(\n                f"Observe function for {source_asset.key} must return a DataVersion or"\n                " DataVersionsByPartition, but returned a value of type"\n                f" {type(observe_fn_return_value)}"\n            )\n\n    return DecoratedOpFunction(fn)\n\n\n
[docs]@experimental_param(param="resource_defs")\n@experimental_param(param="io_manager_def")\nclass SourceAsset(ResourceAddable):\n """A SourceAsset represents an asset that will be loaded by (but not updated by) Dagster.\n\n Attributes:\n key (Union[AssetKey, Sequence[str], str]): The key of the asset.\n metadata (Mapping[str, MetadataValue]): Metadata associated with the asset.\n io_manager_key (Optional[str]): The key for the IOManager that will be used to load the contents of\n the asset when it's used as an input to other assets inside a job.\n io_manager_def (Optional[IOManagerDefinition]): (Experimental) The definition of the IOManager that will be used to load the contents of\n the asset when it's used as an input to other assets inside a job.\n resource_defs (Optional[Mapping[str, ResourceDefinition]]): (Experimental) resource definitions that may be required by the :py:class:`dagster.IOManagerDefinition` provided in the `io_manager_def` argument.\n description (Optional[str]): The description of the asset.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n observe_fn (Optional[SourceAssetObserveFunction]) Observation function for the source asset.\n """\n\n key: PublicAttr[AssetKey]\n metadata: PublicAttr[MetadataMapping]\n raw_metadata: PublicAttr[ArbitraryMetadataMapping]\n io_manager_key: PublicAttr[Optional[str]]\n _io_manager_def: PublicAttr[Optional[IOManagerDefinition]]\n description: PublicAttr[Optional[str]]\n partitions_def: PublicAttr[Optional[PartitionsDefinition]]\n group_name: PublicAttr[str]\n resource_defs: PublicAttr[Dict[str, ResourceDefinition]]\n observe_fn: PublicAttr[Optional[SourceAssetObserveFunction]]\n _node_def: Optional[OpDefinition] # computed lazily\n auto_observe_interval_minutes: Optional[float]\n\n def __init__(\n self,\n key: CoercibleToAssetKey,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n io_manager_key: Optional[str] = None,\n io_manager_def: Optional[object] = None,\n description: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n group_name: Optional[str] = None,\n resource_defs: Optional[Mapping[str, object]] = None,\n observe_fn: Optional[SourceAssetObserveFunction] = None,\n *,\n auto_observe_interval_minutes: Optional[float] = None,\n # This is currently private because it is necessary for source asset observation functions,\n # but we have not yet decided on a final API for associated one or more ops with a source\n # asset. If we were to make this public, then we would have a canonical public\n # `required_resource_keys` used for observation that might end up conflicting with a set of\n # required resource keys for a different operation.\n _required_resource_keys: Optional[AbstractSet[str]] = None,\n # Add additional fields to with_resources and with_group below\n ):\n from dagster._core.execution.build_resources import (\n wrap_resources_for_execution,\n )\n\n self.key = AssetKey.from_coercible(key)\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n self.raw_metadata = metadata\n self.metadata = normalize_metadata(metadata, allow_invalid=True)\n\n resource_defs_dict = dict(check.opt_mapping_param(resource_defs, "resource_defs"))\n if io_manager_def:\n if not io_manager_key:\n io_manager_key = self.key.to_python_identifier("io_manager")\n\n if (\n io_manager_key in resource_defs_dict\n and resource_defs_dict[io_manager_key] != io_manager_def\n ):\n raise DagsterInvalidDefinitionError(\n f"Provided conflicting definitions for io manager key '{io_manager_key}'."\n " Please provide only one definition per key."\n )\n\n resource_defs_dict[io_manager_key] = io_manager_def\n\n self.resource_defs = wrap_resources_for_execution(resource_defs_dict)\n\n self.io_manager_key = check.opt_str_param(io_manager_key, "io_manager_key")\n self.partitions_def = check.opt_inst_param(\n partitions_def, "partitions_def", PartitionsDefinition\n )\n self.group_name = validate_group_name(group_name)\n self.description = check.opt_str_param(description, "description")\n self.observe_fn = check.opt_callable_param(observe_fn, "observe_fn")\n self._required_resource_keys = check.opt_set_param(\n _required_resource_keys, "_required_resource_keys", of_type=str\n )\n self._node_def = None\n self.auto_observe_interval_minutes = check.opt_numeric_param(\n auto_observe_interval_minutes, "auto_observe_interval_minutes"\n )\n\n def get_io_manager_key(self) -> str:\n return self.io_manager_key or DEFAULT_IO_MANAGER_KEY\n\n @property\n def io_manager_def(self) -> Optional[IOManagerDefinition]:\n io_manager_key = self.get_io_manager_key()\n return cast(\n Optional[IOManagerDefinition],\n self.resource_defs.get(io_manager_key) if io_manager_key else None,\n )\n\n @public\n @property\n def op(self) -> OpDefinition:\n """OpDefinition: The OpDefinition associated with the observation function of an observable\n source asset.\n\n Throws an error if the asset is not observable.\n """\n check.invariant(\n isinstance(self.node_def, OpDefinition),\n "The NodeDefinition for this AssetsDefinition is not of type OpDefinition.",\n )\n return cast(OpDefinition, self.node_def)\n\n @public\n @property\n def is_observable(self) -> bool:\n """bool: Whether the asset is observable."""\n return self.node_def is not None\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return {requirement.key for requirement in self.get_resource_requirements()}\n\n @property\n def node_def(self) -> Optional[OpDefinition]:\n """Op that generates observation metadata for a source asset."""\n if self.observe_fn is None:\n return None\n\n if self._node_def is None:\n self._node_def = OpDefinition(\n compute_fn=wrap_source_asset_observe_fn_in_op_compute_fn(self),\n name=self.key.to_python_identifier(),\n description=self.description,\n required_resource_keys=self._required_resource_keys,\n )\n return self._node_def\n\n def with_resources(self, resource_defs) -> "SourceAsset":\n from dagster._core.execution.resources_init import get_transitive_required_resource_keys\n\n overlapping_keys = get_resource_key_conflicts(self.resource_defs, resource_defs)\n if overlapping_keys:\n raise DagsterInvalidInvocationError(\n f"SourceAsset with key {self.key} has conflicting resource "\n "definitions with provided resources for the following keys: "\n f"{sorted(list(overlapping_keys))}. Either remove the existing "\n "resources from the asset or change the resource keys so that "\n "they don't overlap."\n )\n\n merged_resource_defs = merge_dicts(resource_defs, self.resource_defs)\n\n # Ensure top-level resource requirements are met - except for\n # io_manager, since that is a default it can be resolved later.\n ensure_requirements_satisfied(merged_resource_defs, list(self.get_resource_requirements()))\n\n io_manager_def = merged_resource_defs.get(self.get_io_manager_key())\n if not io_manager_def and self.get_io_manager_key() != DEFAULT_IO_MANAGER_KEY:\n raise DagsterInvalidDefinitionError(\n f"SourceAsset with asset key {self.key} requires IO manager with key"\n f" '{self.get_io_manager_key()}', but none was provided."\n )\n relevant_keys = get_transitive_required_resource_keys(\n {*self._required_resource_keys, self.get_io_manager_key()}, merged_resource_defs\n )\n\n relevant_resource_defs = {\n key: resource_def\n for key, resource_def in merged_resource_defs.items()\n if key in relevant_keys\n }\n\n io_manager_key = (\n self.get_io_manager_key()\n if self.get_io_manager_key() != DEFAULT_IO_MANAGER_KEY\n else None\n )\n with disable_dagster_warnings():\n return SourceAsset(\n key=self.key,\n io_manager_key=io_manager_key,\n description=self.description,\n partitions_def=self.partitions_def,\n metadata=self.raw_metadata,\n resource_defs=relevant_resource_defs,\n group_name=self.group_name,\n observe_fn=self.observe_fn,\n auto_observe_interval_minutes=self.auto_observe_interval_minutes,\n _required_resource_keys=self._required_resource_keys,\n )\n\n def with_attributes(\n self, group_name: Optional[str] = None, key: Optional[AssetKey] = None\n ) -> "SourceAsset":\n if group_name is not None and self.group_name != DEFAULT_GROUP_NAME:\n raise DagsterInvalidDefinitionError(\n "A group name has already been provided to source asset"\n f" {self.key.to_user_string()}"\n )\n\n with disable_dagster_warnings():\n return SourceAsset(\n key=key or self.key,\n metadata=self.raw_metadata,\n io_manager_key=self.io_manager_key,\n io_manager_def=self.io_manager_def,\n description=self.description,\n partitions_def=self.partitions_def,\n group_name=group_name,\n resource_defs=self.resource_defs,\n observe_fn=self.observe_fn,\n auto_observe_interval_minutes=self.auto_observe_interval_minutes,\n _required_resource_keys=self._required_resource_keys,\n )\n\n def get_resource_requirements(self) -> Iterator[ResourceRequirement]:\n if self.node_def is not None:\n yield from self.node_def.get_resource_requirements()\n yield SourceAssetIOManagerRequirement(\n key=self.get_io_manager_key(), asset_key=self.key.to_string()\n )\n for source_key, resource_def in self.resource_defs.items():\n yield from resource_def.get_resource_requirements(outer_context=source_key)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, SourceAsset):\n return False\n else:\n return (\n self.key == other.key\n and self.raw_metadata == other.raw_metadata\n and self.io_manager_key == other.io_manager_key\n and self.description == other.description\n and self.group_name == other.group_name\n and self.resource_defs == other.resource_defs\n and self.observe_fn == other.observe_fn\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/source_asset", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.source_asset"}, "step_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.step_launcher

\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Iterator, Mapping, NamedTuple, Optional\n\nimport dagster._check as check\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nif TYPE_CHECKING:\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import StepExecutionContext\n    from dagster._core.execution.plan.state import KnownExecutionState\n\n\n
[docs]class StepRunRef(\n NamedTuple(\n "_StepRunRef",\n [\n ("run_config", Mapping[str, object]),\n ("dagster_run", DagsterRun),\n ("run_id", str),\n ("retry_mode", RetryMode),\n ("step_key", str),\n ("recon_job", ReconstructableJob),\n ("known_state", Optional["KnownExecutionState"]),\n ],\n )\n):\n """A serializable object that specifies what's needed to hydrate a step so\n that it can be executed in a process outside the plan process.\n\n Users should not instantiate this class directly.\n """\n\n def __new__(\n cls,\n run_config: Mapping[str, object],\n dagster_run: DagsterRun,\n run_id: str,\n retry_mode: RetryMode,\n step_key: str,\n recon_job: ReconstructableJob,\n known_state: Optional["KnownExecutionState"],\n ):\n from dagster._core.execution.plan.state import KnownExecutionState\n\n return super(StepRunRef, cls).__new__(\n cls,\n check.mapping_param(run_config, "run_config", key_type=str),\n check.inst_param(dagster_run, "dagster_run", DagsterRun),\n check.str_param(run_id, "run_id"),\n check.inst_param(retry_mode, "retry_mode", RetryMode),\n check.str_param(step_key, "step_key"),\n check.inst_param(recon_job, "recon_job", ReconstructableJob),\n check.opt_inst_param(known_state, "known_state", KnownExecutionState),\n )
\n\n\n
[docs]class StepLauncher(ABC):\n """A StepLauncher is responsible for executing steps, either in-process or in an external process."""\n\n @abstractmethod\n def launch_step(self, step_context: "StepExecutionContext") -> Iterator["DagsterEvent"]:\n """Args:\n step_context (StepExecutionContext): The context that we're executing the step in.\n\n Returns:\n Iterator[DagsterEvent]: The events for the step.\n """
\n
", "current_page_name": "_modules/dagster/_core/definitions/step_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.step_launcher"}, "time_window_partition_mapping": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.time_window_partition_mapping

\nfrom datetime import datetime\nfrom typing import NamedTuple, Optional, cast\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, experimental_param\nfrom dagster._core.definitions.partition import PartitionsDefinition, PartitionsSubset\nfrom dagster._core.definitions.partition_mapping import PartitionMapping, UpstreamPartitionsResult\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    TimeWindowPartitionsDefinition,\n    TimeWindowPartitionsSubset,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._serdes import whitelist_for_serdes\n\n\n
[docs]@whitelist_for_serdes\n@experimental_param(param="allow_nonexistent_upstream_partitions")\nclass TimeWindowPartitionMapping(\n PartitionMapping,\n NamedTuple(\n "_TimeWindowPartitionMapping",\n [\n ("start_offset", PublicAttr[int]),\n ("end_offset", PublicAttr[int]),\n ("allow_nonexistent_upstream_partitions", PublicAttr[bool]),\n ],\n ),\n):\n """The default mapping between two TimeWindowPartitionsDefinitions.\n\n A partition in the downstream partitions definition is mapped to all partitions in the upstream\n asset whose time windows overlap it.\n\n This means that, if the upstream and downstream partitions definitions share the same time\n period, then this mapping is essentially the identity partition mapping - plus conversion of\n datetime formats.\n\n If the upstream time period is coarser than the downstream time period, then each partition in\n the downstream asset will map to a single (larger) upstream partition. E.g. if the downstream is\n hourly and the upstream is daily, then each hourly partition in the downstream will map to the\n daily partition in the upstream that contains that hour.\n\n If the upstream time period is finer than the downstream time period, then each partition in the\n downstream asset will map to multiple upstream partitions. E.g. if the downstream is daily and\n the upstream is hourly, then each daily partition in the downstream asset will map to the 24\n hourly partitions in the upstream that occur on that day.\n\n Attributes:\n start_offset (int): If not 0, then the starts of the upstream windows are shifted by this\n offset relative to the starts of the downstream windows. For example, if start_offset=-1\n and end_offset=0, then the downstream partition "2022-07-04" would map to the upstream\n partitions "2022-07-03" and "2022-07-04". Only permitted to be non-zero when the\n upstream and downstream PartitionsDefinitions are the same. Defaults to 0.\n end_offset (int): If not 0, then the ends of the upstream windows are shifted by this\n offset relative to the ends of the downstream windows. For example, if start_offset=0\n and end_offset=1, then the downstream partition "2022-07-04" would map to the upstream\n partitions "2022-07-04" and "2022-07-05". Only permitted to be non-zero when the\n upstream and downstream PartitionsDefinitions are the same. Defaults to 0.\n allow_nonexistent_upstream_partitions (bool): Defaults to false. If true, does not\n raise an error when mapped upstream partitions fall outside the start-end time window of the\n partitions def. For example, if the upstream partitions def starts on "2023-01-01" but\n the downstream starts on "2022-01-01", setting this bool to true would return no\n partition keys when get_upstream_partitions_for_partitions is called with "2022-06-01".\n When set to false, would raise an error.\n\n Examples:\n .. code-block:: python\n\n from dagster import DailyPartitionsDefinition, TimeWindowPartitionMapping, AssetIn, asset\n\n partitions_def = DailyPartitionsDefinition(start_date="2020-01-01")\n\n @asset(partitions_def=partitions_def)\n def asset1():\n ...\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "asset1": AssetIn(\n partition_mapping=TimeWindowPartitionMapping(start_offset=-1)\n )\n }\n )\n def asset2(asset1):\n ...\n """\n\n def __new__(\n cls,\n start_offset: int = 0,\n end_offset: int = 0,\n allow_nonexistent_upstream_partitions: bool = False,\n ):\n return super(TimeWindowPartitionMapping, cls).__new__(\n cls,\n start_offset=check.int_param(start_offset, "start_offset"),\n end_offset=check.int_param(end_offset, "end_offset"),\n allow_nonexistent_upstream_partitions=check.bool_param(\n allow_nonexistent_upstream_partitions,\n "allow_nonexistent_upstream_partitions",\n ),\n )\n\n def get_upstream_mapped_partitions_result_for_partitions(\n self,\n downstream_partitions_subset: Optional[PartitionsSubset],\n upstream_partitions_def: PartitionsDefinition,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> UpstreamPartitionsResult:\n if not isinstance(downstream_partitions_subset, TimeWindowPartitionsSubset):\n check.failed("downstream_partitions_subset must be a TimeWindowPartitionsSubset")\n\n return self._map_partitions(\n downstream_partitions_subset.partitions_def,\n upstream_partitions_def,\n downstream_partitions_subset,\n start_offset=self.start_offset,\n end_offset=self.end_offset,\n current_time=current_time,\n )\n\n def get_downstream_partitions_for_partitions(\n self,\n upstream_partitions_subset: PartitionsSubset,\n downstream_partitions_def: Optional[PartitionsDefinition],\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> PartitionsSubset:\n """Returns the partitions in the downstream asset that map to the given upstream partitions.\n\n Filters for partitions that exist at the given current_time, fetching the current time\n if not provided.\n """\n return self._map_partitions(\n upstream_partitions_subset.partitions_def,\n downstream_partitions_def,\n upstream_partitions_subset,\n end_offset=-self.start_offset,\n start_offset=-self.end_offset,\n current_time=current_time,\n ).partitions_subset\n\n def _map_partitions(\n self,\n from_partitions_def: PartitionsDefinition,\n to_partitions_def: Optional[PartitionsDefinition],\n from_partitions_subset: PartitionsSubset,\n start_offset: int,\n end_offset: int,\n current_time: Optional[datetime] = None,\n ) -> UpstreamPartitionsResult:\n """Maps the partitions in from_partitions_subset to partitions in to_partitions_def.\n\n If partitions in from_partitions_subset represent time windows that do not exist in\n to_partitions_def, raises an error if raise_error_on_invalid_mapped_partition is True.\n Otherwise, filters out the partitions that do not exist in to_partitions_def and returns\n the filtered subset, also returning a bool indicating whether there were mapped time windows\n that did not exist in to_partitions_def.\n """\n if not isinstance(from_partitions_subset, TimeWindowPartitionsSubset):\n check.failed("from_partitions_subset must be a TimeWindowPartitionsSubset")\n\n if not isinstance(from_partitions_def, TimeWindowPartitionsDefinition):\n check.failed("from_partitions_def must be a TimeWindowPartitionsDefinition")\n\n if not isinstance(to_partitions_def, TimeWindowPartitionsDefinition):\n check.failed("to_partitions_def must be a TimeWindowPartitionsDefinition")\n\n if (start_offset != 0 or end_offset != 0) and (\n from_partitions_def.cron_schedule != to_partitions_def.cron_schedule\n ):\n raise DagsterInvalidDefinitionError(\n "Can't use the start_offset or end_offset parameters of"\n " TimeWindowPartitionMapping when the cron schedule of the upstream"\n " PartitionsDefinition is different than the cron schedule of the downstream"\n f" one. Attempted to map from cron schedule '{from_partitions_def.cron_schedule}' "\n f"to cron schedule '{to_partitions_def.cron_schedule}'."\n )\n\n if to_partitions_def.timezone != from_partitions_def.timezone:\n raise DagsterInvalidDefinitionError("Timezones don't match")\n\n # skip fancy mapping logic in the simple case\n if from_partitions_def == to_partitions_def and start_offset == 0 and end_offset == 0:\n return UpstreamPartitionsResult(from_partitions_subset, [])\n\n time_windows = []\n for from_partition_time_window in from_partitions_subset.included_time_windows:\n from_start_dt, from_end_dt = from_partition_time_window\n offsetted_start_dt = _offsetted_datetime(\n from_partitions_def, from_start_dt, start_offset\n )\n offsetted_end_dt = _offsetted_datetime(from_partitions_def, from_end_dt, end_offset)\n\n to_start_partition_key = (\n to_partitions_def.get_partition_key_for_timestamp(\n offsetted_start_dt.timestamp(), end_closed=False\n )\n if offsetted_start_dt is not None\n else None\n )\n to_end_partition_key = (\n to_partitions_def.get_partition_key_for_timestamp(\n offsetted_end_dt.timestamp(), end_closed=True\n )\n if offsetted_end_dt is not None\n else None\n )\n\n if to_start_partition_key is not None or to_end_partition_key is not None:\n window_start = (\n to_partitions_def.start_time_for_partition_key(to_start_partition_key)\n if to_start_partition_key\n else cast(TimeWindow, to_partitions_def.get_first_partition_window()).start\n )\n window_end = (\n to_partitions_def.end_time_for_partition_key(to_end_partition_key)\n if to_end_partition_key\n else cast(TimeWindow, to_partitions_def.get_last_partition_window()).end\n )\n\n if window_start < window_end:\n time_windows.append(TimeWindow(window_start, window_end))\n\n first_window = to_partitions_def.get_first_partition_window(current_time=current_time)\n last_window = to_partitions_def.get_last_partition_window(current_time=current_time)\n\n filtered_time_windows = []\n required_but_nonexistent_partition_keys = set()\n\n for time_window in time_windows:\n if (\n first_window\n and last_window\n and time_window.start <= last_window.start\n and time_window.end >= first_window.end\n ):\n window_start = max(time_window.start, first_window.start)\n window_end = min(time_window.end, last_window.end)\n filtered_time_windows.append(TimeWindow(window_start, window_end))\n\n if self.allow_nonexistent_upstream_partitions:\n # If allowed to have nonexistent upstream partitions, do not consider\n # out of range partitions to be invalid\n continue\n else:\n invalid_time_window = None\n if not (first_window and last_window) or (\n time_window.start < first_window.start and time_window.end > last_window.end\n ):\n invalid_time_window = time_window\n elif time_window.start < first_window.start:\n invalid_time_window = TimeWindow(\n time_window.start, min(time_window.end, first_window.start)\n )\n elif time_window.end > last_window.end:\n invalid_time_window = TimeWindow(\n max(time_window.start, last_window.end), time_window.end\n )\n\n if invalid_time_window:\n required_but_nonexistent_partition_keys.update(\n set(\n to_partitions_def.get_partition_keys_in_time_window(\n time_window=invalid_time_window\n )\n )\n )\n\n return UpstreamPartitionsResult(\n TimeWindowPartitionsSubset(\n to_partitions_def,\n num_partitions=sum(\n len(to_partitions_def.get_partition_keys_in_time_window(time_window))\n for time_window in filtered_time_windows\n ),\n included_time_windows=filtered_time_windows,\n ),\n sorted(list(required_but_nonexistent_partition_keys)),\n )
\n\n\ndef _offsetted_datetime(\n partitions_def: TimeWindowPartitionsDefinition, dt: datetime, offset: int\n) -> Optional[datetime]:\n for _ in range(abs(offset)):\n if offset < 0:\n prev_window = partitions_def.get_prev_partition_window(dt)\n if prev_window is None:\n return None\n\n dt = prev_window.start\n else:\n # TODO: what if we're at the end of the line?\n next_window = partitions_def.get_next_partition_window(dt)\n if next_window is None:\n return None\n\n dt = next_window.end\n\n return dt\n
", "current_page_name": "_modules/dagster/_core/definitions/time_window_partition_mapping", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.time_window_partition_mapping"}, "time_window_partitions": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.time_window_partitions

\nimport functools\nimport hashlib\nimport json\nimport re\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    FrozenSet,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport pendulum\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.instance import DynamicPartitionsStore\nfrom dagster._utils.partitions import DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE\nfrom dagster._utils.schedules import (\n    cron_string_iterator,\n    is_valid_cron_schedule,\n    reverse_cron_string_iterator,\n)\n\nfrom ..errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidDeserializationVersionError,\n)\nfrom .partition import (\n    DEFAULT_DATE_FORMAT,\n    PartitionedConfig,\n    PartitionsDefinition,\n    PartitionsSubset,\n    ScheduleType,\n    cron_schedule_from_schedule_type_and_offsets,\n)\nfrom .partition_key_range import PartitionKeyRange\n\n\n
[docs]class TimeWindow(NamedTuple):\n """An interval that is closed at the start and open at the end.\n\n Attributes:\n start (datetime): A pendulum datetime that marks the start of the window.\n end (datetime): A pendulum datetime that marks the end of the window.\n """\n\n start: PublicAttr[datetime]\n end: PublicAttr[datetime]
\n\n\n
[docs]class TimeWindowPartitionsDefinition(\n PartitionsDefinition,\n NamedTuple(\n "_TimeWindowPartitionsDefinition",\n [\n ("start", PublicAttr[datetime]),\n ("timezone", PublicAttr[str]),\n ("end", PublicAttr[Optional[datetime]]),\n ("fmt", PublicAttr[str]),\n ("end_offset", PublicAttr[int]),\n ("cron_schedule", PublicAttr[str]),\n ],\n ),\n):\n r"""A set of partitions where each partitions corresponds to a time window.\n\n The provided cron_schedule determines the bounds of the time windows. E.g. a cron_schedule of\n "0 0 \\\\* \\\\* \\\\*" will result in daily partitions that start at midnight and end at midnight of the\n following day.\n\n The string partition_key associated with each partition corresponds to the start of the\n partition's time window.\n\n The first partition in the set will start on at the first cron_schedule tick that is equal to\n or after the given start datetime. The last partition in the set will end before the current\n time, unless the end_offset argument is set to a positive number.\n\n Args:\n cron_schedule (str): Determines the bounds of the time windows.\n start (datetime): The first partition in the set will start on at the first cron_schedule\n tick that is equal to or after this value.\n timezone (Optional[str]): The timezone in which each time should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n end (datetime): The last partition (excluding) in the set.\n fmt (str): The date format to use for partition_keys.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n """\n\n def __new__(\n cls,\n start: Union[datetime, str],\n fmt: str,\n end: Union[datetime, str, None] = None,\n schedule_type: Optional[ScheduleType] = None,\n timezone: Optional[str] = None,\n end_offset: int = 0,\n minute_offset: Optional[int] = None,\n hour_offset: Optional[int] = None,\n day_offset: Optional[int] = None,\n cron_schedule: Optional[str] = None,\n ):\n check.opt_str_param(timezone, "timezone")\n timezone = timezone or "UTC"\n\n if isinstance(start, datetime):\n start_dt = pendulum.instance(start, tz=timezone)\n else:\n start_dt = pendulum.instance(datetime.strptime(start, fmt), tz=timezone)\n\n if not end:\n end_dt = None\n elif isinstance(end, datetime):\n end_dt = pendulum.instance(end, tz=timezone)\n else:\n end_dt = pendulum.instance(datetime.strptime(end, fmt), tz=timezone)\n\n if cron_schedule is not None:\n check.invariant(\n schedule_type is None and not minute_offset and not hour_offset and not day_offset,\n "If cron_schedule argument is provided, then schedule_type, minute_offset, "\n "hour_offset, and day_offset can't also be provided",\n )\n else:\n if schedule_type is None:\n check.failed("One of schedule_type and cron_schedule must be provided")\n\n cron_schedule = cron_schedule_from_schedule_type_and_offsets(\n schedule_type=schedule_type,\n minute_offset=minute_offset or 0,\n hour_offset=hour_offset or 0,\n day_offset=day_offset or 0,\n )\n\n if not is_valid_cron_schedule(cron_schedule):\n raise DagsterInvalidDefinitionError(\n f"Found invalid cron schedule '{cron_schedule}' for a"\n " TimeWindowPartitionsDefinition."\n )\n\n return super(TimeWindowPartitionsDefinition, cls).__new__(\n cls, start_dt, timezone, end_dt, fmt, end_offset, cron_schedule\n )\n\n def get_current_timestamp(self, current_time: Optional[datetime] = None) -> float:\n return (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n ).timestamp()\n\n def get_num_partitions(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> int:\n # Method added for performance reasons.\n # Fetching partition keys requires significantly more compute time to\n # string format datetimes.\n current_timestamp = self.get_current_timestamp(current_time=current_time)\n\n partitions_past_current_time = 0\n\n num_partitions = 0\n for time_window in self._iterate_time_windows(self.start):\n if self.end and time_window.end.timestamp() > self.end.timestamp():\n break\n if (\n time_window.end.timestamp() <= current_timestamp\n or partitions_past_current_time < self.end_offset\n ):\n num_partitions += 1\n\n if time_window.end.timestamp() > current_timestamp:\n partitions_past_current_time += 1\n else:\n break\n\n if self.end_offset < 0:\n num_partitions += self.end_offset\n\n return num_partitions\n\n def get_partition_keys_between_indexes(\n self, start_idx: int, end_idx: int, current_time: Optional[datetime] = None\n ) -> List[str]:\n # Fetches the partition keys between the given start and end indices.\n # Start index is inclusive, end index is exclusive.\n # Method added for performance reasons, to only string format\n # partition keys included within the indices.\n current_timestamp = self.get_current_timestamp(current_time=current_time)\n\n partitions_past_current_time = 0\n partition_keys = []\n reached_end = False\n\n for idx, time_window in enumerate(self._iterate_time_windows(self.start)):\n if time_window.end.timestamp() >= current_timestamp:\n reached_end = True\n if self.end and time_window.end.timestamp() > self.end.timestamp():\n reached_end = True\n if (\n time_window.end.timestamp() <= current_timestamp\n or partitions_past_current_time < self.end_offset\n ):\n if idx >= start_idx and idx < end_idx:\n partition_keys.append(time_window.start.strftime(self.fmt))\n if time_window.end.timestamp() > current_timestamp:\n partitions_past_current_time += 1\n else:\n break\n if len(partition_keys) >= end_idx - start_idx:\n break\n\n if reached_end and self.end_offset < 0:\n partition_keys = partition_keys[: self.end_offset]\n\n return partition_keys\n\n def get_partition_keys(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n current_timestamp = self.get_current_timestamp(current_time=current_time)\n\n partitions_past_current_time = 0\n partition_keys: List[str] = []\n for time_window in self._iterate_time_windows(self.start):\n if self.end and time_window.end.timestamp() > self.end.timestamp():\n break\n if (\n time_window.end.timestamp() <= current_timestamp\n or partitions_past_current_time < self.end_offset\n ):\n partition_keys.append(time_window.start.strftime(self.fmt))\n\n if time_window.end.timestamp() > current_timestamp:\n partitions_past_current_time += 1\n else:\n break\n\n if self.end_offset < 0:\n partition_keys = partition_keys[: self.end_offset]\n\n return partition_keys\n\n def _get_validated_time_window_for_partition_key(\n self, partition_key: str, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n """Returns a TimeWindow for the given partition key if it is valid, otherwise returns None."""\n try:\n time_window = self.time_window_for_partition_key(partition_key)\n except ValueError:\n return None\n\n first_partition_window = self.get_first_partition_window(current_time=current_time)\n last_partition_window = self.get_last_partition_window(current_time=current_time)\n if (\n first_partition_window is None\n or last_partition_window is None\n or time_window.start < first_partition_window.start\n or time_window.start > last_partition_window.start\n or time_window.start.strftime(self.fmt) != partition_key\n ):\n return None\n\n return time_window\n\n def __str__(self) -> str:\n schedule_str = (\n self.schedule_type.value.capitalize() if self.schedule_type else self.cron_schedule\n )\n partition_def_str = (\n f"{schedule_str}, starting {self.start.strftime(self.fmt)} {self.timezone}."\n )\n if self.end_offset != 0:\n partition_def_str += (\n " End offsetted by"\n f" {self.end_offset} partition{'' if self.end_offset == 1 else 's'}."\n )\n return partition_def_str\n\n def __repr__(self):\n # Between python 3.8 and 3.9 the repr of a datetime object changed.\n # Replaces start time with timestamp as a workaround to make sure the repr is consistent across versions.\n return (\n f"TimeWindowPartitionsDefinition(start={self.start.timestamp()},"\n f" timezone='{self.timezone}', fmt='{self.fmt}', end_offset={self.end_offset},"\n f" cron_schedule='{self.cron_schedule}')"\n )\n\n def __hash__(self):\n return hash(tuple(self.__repr__()))\n\n @functools.lru_cache(maxsize=100)\n def _time_window_for_partition_key(self, *, partition_key: str) -> TimeWindow:\n partition_key_dt = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n return next(iter(self._iterate_time_windows(partition_key_dt)))\n\n def time_window_for_partition_key(self, partition_key: str) -> TimeWindow:\n return self._time_window_for_partition_key(partition_key=partition_key)\n\n @functools.lru_cache(maxsize=5)\n def time_windows_for_partition_keys(\n self,\n partition_keys: FrozenSet[str],\n validate: bool = True,\n ) -> Sequence[TimeWindow]:\n if len(partition_keys) == 0:\n return []\n\n sorted_pks = sorted(partition_keys, key=lambda pk: datetime.strptime(pk, self.fmt))\n cur_windows_iterator = iter(\n self._iterate_time_windows(\n pendulum.instance(datetime.strptime(sorted_pks[0], self.fmt), tz=self.timezone)\n )\n )\n partition_key_time_windows: List[TimeWindow] = []\n for partition_key in sorted_pks:\n next_window = next(cur_windows_iterator)\n if next_window.start.strftime(self.fmt) == partition_key:\n partition_key_time_windows.append(next_window)\n else:\n cur_windows_iterator = iter(\n self._iterate_time_windows(\n pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n )\n )\n partition_key_time_windows.append(next(cur_windows_iterator))\n\n if validate:\n start_time_window = self.get_first_partition_window()\n end_time_window = self.get_last_partition_window()\n\n if start_time_window is None or end_time_window is None:\n check.failed("No partitions in the PartitionsDefinition")\n\n start_timestamp = start_time_window.start.timestamp()\n end_timestamp = end_time_window.end.timestamp()\n\n partition_key_time_windows = [\n tw\n for tw in partition_key_time_windows\n if tw.start.timestamp() >= start_timestamp and tw.end.timestamp() <= end_timestamp\n ]\n return partition_key_time_windows\n\n def start_time_for_partition_key(self, partition_key: str) -> datetime:\n partition_key_dt = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n # the datetime format might not include granular components, so we need to recover them\n # we make the assumption that the parsed partition key is <= the start datetime\n return next(iter(self._iterate_time_windows(partition_key_dt))).start\n\n def get_next_partition_key(\n self, partition_key: str, current_time: Optional[datetime] = None\n ) -> Optional[str]:\n last_partition_window = self.get_last_partition_window(current_time)\n if last_partition_window is None:\n return None\n\n partition_key_dt = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n windows_iter = iter(self._iterate_time_windows(partition_key_dt))\n next(windows_iter)\n start_time = next(windows_iter).start\n if start_time >= last_partition_window.end:\n return None\n else:\n return start_time.strftime(self.fmt)\n\n def get_next_partition_window(\n self, end_dt: datetime, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n last_partition_window = self.get_last_partition_window(current_time)\n if last_partition_window is None:\n return None\n\n windows_iter = iter(self._iterate_time_windows(end_dt))\n next_window = next(windows_iter)\n if next_window.start >= last_partition_window.end:\n return None\n else:\n return next_window\n\n def get_prev_partition_window(self, start_dt: datetime) -> Optional[TimeWindow]:\n windows_iter = iter(self._reverse_iterate_time_windows(start_dt))\n prev_window = next(windows_iter)\n first_partition_window = self.get_first_partition_window()\n if first_partition_window is None or prev_window.start < first_partition_window.start:\n return None\n else:\n return prev_window\n\n @functools.lru_cache(maxsize=5)\n def _get_first_partition_window(self, *, current_time: datetime) -> Optional[TimeWindow]:\n current_timestamp = current_time.timestamp()\n\n time_window = next(iter(self._iterate_time_windows(self.start)))\n\n if self.end_offset == 0:\n return time_window if time_window.end.timestamp() <= current_timestamp else None\n elif self.end_offset > 0:\n iterator = iter(self._iterate_time_windows(current_time))\n # first returned time window is time window of current time\n curr_window_plus_offset = next(iterator)\n for _ in range(self.end_offset):\n curr_window_plus_offset = next(iterator)\n return (\n time_window\n if time_window.end.timestamp() <= curr_window_plus_offset.start.timestamp()\n else None\n )\n else:\n # end offset < 0\n end_window = None\n iterator = iter(self._reverse_iterate_time_windows(current_time))\n for _ in range(abs(self.end_offset)):\n end_window = next(iterator)\n\n if end_window is None:\n check.failed("end_window should not be None")\n\n return (\n time_window if time_window.end.timestamp() <= end_window.start.timestamp() else None\n )\n\n def get_first_partition_window(\n self, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n current_time = cast(\n datetime,\n (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n ),\n )\n return self._get_first_partition_window(current_time=current_time)\n\n @functools.lru_cache(maxsize=5)\n def _get_last_partition_window(self, *, current_time: datetime) -> Optional[TimeWindow]:\n if self.get_first_partition_window(current_time) is None:\n return None\n\n current_time = (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n )\n\n if self.end and self.end < current_time:\n current_time = self.end\n\n if self.end_offset == 0:\n return next(iter(self._reverse_iterate_time_windows(current_time)))\n else:\n # TODO: make this efficient\n last_partition_key = super().get_last_partition_key(current_time)\n return (\n self.time_window_for_partition_key(last_partition_key)\n if last_partition_key\n else None\n )\n\n def get_last_partition_window(\n self, current_time: Optional[datetime] = None\n ) -> Optional[TimeWindow]:\n current_time = cast(\n datetime,\n (\n pendulum.instance(current_time, tz=self.timezone)\n if current_time\n else pendulum.now(self.timezone)\n ),\n )\n return self._get_last_partition_window(current_time=current_time)\n\n def get_first_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[str]:\n first_window = self.get_first_partition_window(current_time)\n if first_window is None:\n return None\n\n return first_window.start.strftime(self.fmt)\n\n def get_last_partition_key(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Optional[str]:\n last_window = self.get_last_partition_window(current_time)\n if last_window is None:\n return None\n\n return last_window.start.strftime(self.fmt)\n\n def end_time_for_partition_key(self, partition_key: str) -> datetime:\n return self.time_window_for_partition_key(partition_key).end\n\n @functools.lru_cache(maxsize=5)\n def get_partition_keys_in_time_window(self, time_window: TimeWindow) -> Sequence[str]:\n result: List[str] = []\n for partition_time_window in self._iterate_time_windows(time_window.start):\n if partition_time_window.start < time_window.end:\n result.append(partition_time_window.start.strftime(self.fmt))\n else:\n break\n return result\n\n def get_partition_key_range_for_time_window(self, time_window: TimeWindow) -> PartitionKeyRange:\n start_partition_key = self.get_partition_key_for_timestamp(time_window.start.timestamp())\n end_partition_key = self.get_partition_key_for_timestamp(\n cast(TimeWindow, self.get_prev_partition_window(time_window.end)).start.timestamp()\n )\n\n return PartitionKeyRange(start_partition_key, end_partition_key)\n\n def get_partition_keys_in_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[str]:\n start_time = self.start_time_for_partition_key(partition_key_range.start)\n end_time = self.end_time_for_partition_key(partition_key_range.end)\n\n return self.get_partition_keys_in_time_window(TimeWindow(start_time, end_time))\n\n @public\n @property\n def schedule_type(self) -> Optional[ScheduleType]:\n """Optional[ScheduleType]: An enum representing the partition cadence (hourly, daily,\n weekly, or monthly).\n """\n if re.fullmatch(r"\\d+ \\* \\* \\* \\*", self.cron_schedule):\n return ScheduleType.HOURLY\n elif re.fullmatch(r"\\d+ \\d+ \\* \\* \\*", self.cron_schedule):\n return ScheduleType.DAILY\n elif re.fullmatch(r"\\d+ \\d+ \\* \\* \\d+", self.cron_schedule):\n return ScheduleType.WEEKLY\n elif re.fullmatch(r"\\d+ \\d+ \\d+ \\* \\*", self.cron_schedule):\n return ScheduleType.MONTHLY\n else:\n return None\n\n @public\n @property\n def minute_offset(self) -> int:\n """int: Number of minutes past the hour to "split" partitions. Defaults to 0.\n\n For example, returns 15 if each partition starts at 15 minutes past the hour.\n """\n match = re.fullmatch(r"(\\d+) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no minute offset")\n return int(match.groups()[0])\n\n @public\n @property\n def hour_offset(self) -> int:\n """int: Number of hours past 00:00 to "split" partitions. Defaults to 0.\n\n For example, returns 1 if each partition starts at 01:00.\n """\n match = re.fullmatch(r"(\\d+|\\*) (\\d+) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no hour offset")\n return int(match.groups()[1])\n\n @public\n @property\n def day_offset(self) -> int:\n """int: For a weekly or monthly partitions definition, returns the day to "split" partitions\n by. Each partition will start on this day, and end before this day in the following\n week/month. Returns 0 if the day_offset parameter is unset in the\n WeeklyPartitionsDefinition, MonthlyPartitionsDefinition, or the provided cron schedule.\n\n For weekly partitions, returns a value between 0 (representing Sunday) and 6 (representing\n Saturday). Providing a value of 1 means that a partition will exist weekly from Monday to\n the following Sunday.\n\n For monthly partitions, returns a value between 0 (the first day of the month) and 31 (the\n last possible day of the month).\n """\n schedule_type = self.schedule_type\n if schedule_type == ScheduleType.WEEKLY:\n match = re.fullmatch(r"(\\d+|\\*) (\\d+|\\*) (\\d+|\\*) (\\d+|\\*) (\\d+)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no day offset")\n return int(match.groups()[4])\n elif schedule_type == ScheduleType.MONTHLY:\n match = re.fullmatch(r"(\\d+|\\*) (\\d+|\\*) (\\d+) (\\d+|\\*) (\\d+|\\*)", self.cron_schedule)\n if match is None:\n check.failed(f"{self.cron_schedule} has no day offset")\n return int(match.groups()[2])\n else:\n check.failed(f"Unsupported schedule type for day_offset: {schedule_type}")\n\n
[docs] @public\n def get_cron_schedule(\n self,\n minute_of_hour: Optional[int] = None,\n hour_of_day: Optional[int] = None,\n day_of_week: Optional[int] = None,\n day_of_month: Optional[int] = None,\n ) -> str:\n """The schedule executes at the cadence specified by the partitioning, but may overwrite\n the minute/hour/day offset of the partitioning.\n\n This is useful e.g. if you have partitions that span midnight to midnight but you want to\n schedule a job that runs at 2 am.\n """\n if (\n minute_of_hour is None\n and hour_of_day is None\n and day_of_week is None\n and day_of_month is None\n ):\n return self.cron_schedule\n\n schedule_type = self.schedule_type\n if schedule_type is None:\n check.failed(\n f"{self.cron_schedule} does not support"\n " minute_of_hour/hour_of_day/day_of_week/day_of_month arguments"\n )\n\n minute_of_hour = cast(\n int,\n check.opt_int_param(minute_of_hour, "minute_of_hour", default=self.minute_offset),\n )\n\n if schedule_type == ScheduleType.HOURLY:\n check.invariant(\n hour_of_day is None, "Cannot set hour parameter with hourly partitions."\n )\n else:\n hour_of_day = cast(\n int, check.opt_int_param(hour_of_day, "hour_of_day", default=self.hour_offset)\n )\n\n if schedule_type == ScheduleType.DAILY:\n check.invariant(\n day_of_week is None, "Cannot set day of week parameter with daily partitions."\n )\n check.invariant(\n day_of_month is None, "Cannot set day of month parameter with daily partitions."\n )\n\n if schedule_type == ScheduleType.MONTHLY:\n default = self.day_offset or 1\n day_offset = check.opt_int_param(day_of_month, "day_of_month", default=default)\n elif schedule_type == ScheduleType.WEEKLY:\n default = self.day_offset or 0\n day_offset = check.opt_int_param(day_of_week, "day_of_week", default=default)\n else:\n day_offset = 0\n\n return cron_schedule_from_schedule_type_and_offsets(\n schedule_type,\n minute_offset=minute_of_hour,\n hour_offset=hour_of_day or 0,\n day_offset=day_offset,\n )
\n\n def _iterate_time_windows(self, start: datetime) -> Iterable[TimeWindow]:\n """Returns an infinite generator of time windows that start after the given start time."""\n start_timestamp = pendulum.instance(start, tz=self.timezone).timestamp()\n iterator = cron_string_iterator(\n start_timestamp=start_timestamp,\n cron_string=self.cron_schedule,\n execution_timezone=self.timezone,\n )\n prev_time = next(iterator)\n while prev_time.timestamp() < start_timestamp:\n prev_time = next(iterator)\n\n while True:\n next_time = next(iterator)\n yield TimeWindow(prev_time, next_time)\n prev_time = next_time\n\n def _reverse_iterate_time_windows(self, end: datetime) -> Iterable[TimeWindow]:\n """Returns an infinite generator of time windows that end before the given end time."""\n end_timestamp = pendulum.instance(end, tz=self.timezone).timestamp()\n iterator = reverse_cron_string_iterator(\n end_timestamp=end_timestamp,\n cron_string=self.cron_schedule,\n execution_timezone=self.timezone,\n )\n\n prev_time = next(iterator)\n while prev_time.timestamp() > end_timestamp:\n prev_time = next(iterator)\n\n while True:\n next_time = next(iterator)\n yield TimeWindow(next_time, prev_time)\n prev_time = next_time\n\n def get_partition_key_for_timestamp(self, timestamp: float, end_closed: bool = False) -> str:\n """Args:\n timestamp (float): Timestamp from the unix epoch, UTC.\n end_closed (bool): Whether the interval is closed at the end or at the beginning.\n """\n iterator = cron_string_iterator(\n timestamp, self.cron_schedule, self.timezone, start_offset=-1\n )\n # prev will be < timestamp\n prev = next(iterator)\n # prev_next will be >= timestamp\n prev_next = next(iterator)\n\n if end_closed or prev_next.timestamp() > timestamp:\n return prev.strftime(self.fmt)\n else:\n return prev_next.strftime(self.fmt)\n\n def less_than(self, partition_key1: str, partition_key2: str) -> bool:\n """Returns true if the partition_key1 is earlier than partition_key2."""\n return self.start_time_for_partition_key(\n partition_key1\n ) < self.start_time_for_partition_key(partition_key2)\n\n @property\n def partitions_subset_class(self) -> Type["PartitionsSubset"]:\n return TimeWindowPartitionsSubset\n\n def empty_subset(self) -> "PartitionsSubset":\n return self.partitions_subset_class.empty_subset(self)\n\n def is_valid_partition_key(self, partition_key: str) -> bool:\n try:\n partition_time = pendulum.instance(\n datetime.strptime(partition_key, self.fmt), tz=self.timezone\n )\n return partition_time >= self.start\n except ValueError:\n return False\n\n def get_serializable_unique_identifier(\n self, dynamic_partitions_store: Optional[DynamicPartitionsStore] = None\n ) -> str:\n return hashlib.sha1(self.__repr__().encode("utf-8")).hexdigest()\n\n def has_partition_key(\n self,\n partition_key: str,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> bool:\n return bool(self._get_validated_time_window_for_partition_key(partition_key, current_time))
\n\n\n
[docs]class DailyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """A set of daily partitions.\n\n The first partition in the set will start at the start_date at midnight. The last partition\n in the set will end before the current time, unless the end_offset argument is set to a\n positive number. If minute_offset and/or hour_offset are used, the start and end times of\n each partition will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n DailyPartitionsDefinition(start_date="2022-03-12")\n # creates partitions (2022-03-12-00:00, 2022-03-13-00:00), (2022-03-13-00:00, 2022-03-14-00:00), ...\n\n DailyPartitionsDefinition(start_date="2022-03-12", minute_offset=15, hour_offset=16)\n # creates partitions (2022-03-12-16:15, 2022-03-13-16:15), (2022-03-13-16:15, 2022-03-14-16:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n hour_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_DATE_FORMAT\n\n return super(DailyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.DAILY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\ndef wrap_time_window_run_config_fn(\n run_config_fn: Optional[Callable[[datetime, datetime], Mapping[str, Any]]],\n partitions_def: TimeWindowPartitionsDefinition,\n) -> Callable[[str], Mapping[str, Any]]:\n def _run_config_wrapper(key: str) -> Mapping[str, Any]:\n if not run_config_fn:\n return {}\n time_window = partitions_def.time_window_for_partition_key(key)\n return run_config_fn(time_window.start, time_window.end)\n\n return _run_config_wrapper\n\n\ndef wrap_time_window_tags_fn(\n tags_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]],\n partitions_def: TimeWindowPartitionsDefinition,\n) -> Callable[[str], Mapping[str, str]]:\n def _tag_wrapper(key: str) -> Mapping[str, str]:\n if not tags_fn:\n return {}\n time_window = partitions_def.time_window_for_partition_key(key)\n return tags_fn(time_window.start, time_window.end)\n\n return _tag_wrapper\n\n\n
[docs]def daily_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n hour_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[DailyPartitionsDefinition],\n]:\n """Defines run config over a set of daily partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the bounds\n of the date partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at the start_date at midnight. The last partition in\n the set will end before the current time, unless the end_offset argument is set to a positive\n number. If minute_offset and/or hour_offset are used, the start and end times of each partition\n will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @daily_partitioned_config(start_date="2022-03-12")\n # creates partitions (2022-03-12-00:00, 2022-03-13-00:00), (2022-03-13-00:00, 2022-03-14-00:00), ...\n\n @daily_partitioned_config(start_date="2022-03-12", minute_offset=15, hour_offset=16)\n # creates partitions (2022-03-12-16:15, 2022-03-13-16:15), (2022-03-13-16:15, 2022-03-14-16:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[DailyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = DailyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\n
[docs]class HourlyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """A set of hourly partitions.\n\n The first partition in the set will start on the start_date at midnight. The last partition\n in the set will end before the current time, unless the end_offset argument is set to a\n positive number. If minute_offset is provided, the start and end times of each partition\n will be minute_offset past the hour.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n HourlyPartitionsDefinition(start_date=datetime(2022, 03, 12))\n # creates partitions (2022-03-12-00:00, 2022-03-12-01:00), (2022-03-12-01:00, 2022-03-12-02:00), ...\n\n HourlyPartitionsDefinition(start_date=datetime(2022, 03, 12), minute_offset=15)\n # creates partitions (2022-03-12-00:15, 2022-03-12-01:15), (2022-03-12-01:15, 2022-03-12-02:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE\n\n return super(HourlyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.HOURLY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\n
[docs]def hourly_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[HourlyPartitionsDefinition],\n]:\n """Defines run config over a set of hourly partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the date\n partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at the start_date at midnight. The last partition in\n the set will end before the current time, unless the end_offset argument is set to a positive\n number. If minute_offset is provided, the start and end times of each partition will be\n minute_offset past the hour.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions. Can\n provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @hourly_partitioned_config(start_date=datetime(2022, 03, 12))\n # creates partitions (2022-03-12-00:00, 2022-03-12-01:00), (2022-03-12-01:00, 2022-03-12-02:00), ...\n\n @hourly_partitioned_config(start_date=datetime(2022, 03, 12), minute_offset=15)\n # creates partitions (2022-03-12-00:15, 2022-03-12-01:15), (2022-03-12-01:15, 2022-03-12-02:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[HourlyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = HourlyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\n
[docs]class MonthlyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """A set of monthly partitions.\n\n The first partition in the set will start at the soonest first of the month after start_date\n at midnight. The last partition in the set will end before the current time, unless the\n end_offset argument is set to a positive number. If day_offset is provided, the start and\n end date of each partition will be day_offset. If minute_offset and/or hour_offset are used,\n the start and end times of each partition will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will be\n midnight the sonnest first of the month following start_date. Can provide in either a\n datetime or string format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the month to "split" the partition. Defaults to 1.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n MonthlyPartitionsDefinition(start_date="2022-03-12")\n # creates partitions (2022-04-01-00:00, 2022-05-01-00:00), (2022-05-01-00:00, 2022-06-01-00:00), ...\n\n MonthlyPartitionsDefinition(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=5)\n # creates partitions (2022-04-05-03:15, 2022-05-05-03:15), (2022-05-05-03:15, 2022-06-05-03:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 1,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_DATE_FORMAT\n\n return super(MonthlyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.MONTHLY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\n
[docs]def monthly_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 1,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[MonthlyPartitionsDefinition],\n]:\n """Defines run config over a set of monthly partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the date\n partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at midnight on the soonest first of the month after\n start_date. The last partition in the set will end before the current time, unless the\n end_offset argument is set to a positive number. If day_offset is provided, the start and end\n date of each partition will be day_offset. If minute_offset and/or hour_offset are used, the\n start and end times of each partition will be hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will be\n midnight the sonnest first of the month following start_date. Can provide in either a\n datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the month to "split" the partition. Defaults to 1.\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @monthly_partitioned_config(start_date="2022-03-12")\n # creates partitions (2022-04-01-00:00, 2022-05-01-00:00), (2022-05-01-00:00, 2022-06-01-00:00), ...\n\n @monthly_partitioned_config(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=5)\n # creates partitions (2022-04-05-03:15, 2022-05-05-03:15), (2022-05-05-03:15, 2022-06-05-03:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[MonthlyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = MonthlyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\n
[docs]class WeeklyPartitionsDefinition(TimeWindowPartitionsDefinition):\n """Defines a set of weekly partitions.\n\n The first partition in the set will start at the start_date. The last partition in the set will\n end before the current time, unless the end_offset argument is set to a positive number. If\n day_offset is provided, the start and end date of each partition will be day of the week\n corresponding to day_offset (0 indexed with Sunday as the start of the week). If\n minute_offset and/or hour_offset are used, the start and end times of each partition will be\n hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will\n Sunday at midnight following start_date. Can provide in either a datetime or string\n format.\n end_date (Union[datetime.datetime, str, None]): The last date(excluding) in the set of partitions.\n Default is None. Can provide in either a datetime or string format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the week to "split" the partition. Defaults to 0 (Sunday).\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n\n .. code-block:: python\n\n WeeklyPartitionsDefinition(start_date="2022-03-12")\n # creates partitions (2022-03-13-00:00, 2022-03-20-00:00), (2022-03-20-00:00, 2022-03-27-00:00), ...\n\n WeeklyPartitionsDefinition(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=6)\n # creates partitions (2022-03-12-03:15, 2022-03-19-03:15), (2022-03-19-03:15, 2022-03-26-03:15), ...\n """\n\n def __new__(\n cls,\n start_date: Union[datetime, str],\n end_date: Union[datetime, str, None] = None,\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n ):\n _fmt = fmt or DEFAULT_DATE_FORMAT\n\n return super(WeeklyPartitionsDefinition, cls).__new__(\n cls,\n schedule_type=ScheduleType.WEEKLY,\n start=start_date,\n end=end_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=_fmt,\n end_offset=end_offset,\n )
\n\n\n
[docs]def weekly_partitioned_config(\n start_date: Union[datetime, str],\n minute_offset: int = 0,\n hour_offset: int = 0,\n day_offset: int = 0,\n timezone: Optional[str] = None,\n fmt: Optional[str] = None,\n end_offset: int = 0,\n tags_for_partition_fn: Optional[Callable[[datetime, datetime], Mapping[str, str]]] = None,\n) -> Callable[\n [Callable[[datetime, datetime], Mapping[str, Any]]],\n PartitionedConfig[WeeklyPartitionsDefinition],\n]:\n """Defines run config over a set of weekly partitions.\n\n The decorated function should accept a start datetime and end datetime, which represent the date\n partition the config should delineate.\n\n The decorated function should return a run config dictionary.\n\n The resulting object created by this decorator can be provided to the config argument of a Job.\n The first partition in the set will start at the start_date. The last partition in the set will\n end before the current time, unless the end_offset argument is set to a positive number. If\n day_offset is provided, the start and end date of each partition will be day of the week\n corresponding to day_offset (0 indexed with Sunday as the start of the week). If\n minute_offset and/or hour_offset are used, the start and end times of each partition will be\n hour_offset:minute_offset of each day.\n\n Args:\n start_date (Union[datetime.datetime, str]): The first date in the set of partitions will\n Sunday at midnight following start_date. Can provide in either a datetime or string\n format.\n minute_offset (int): Number of minutes past the hour to "split" the partition. Defaults\n to 0.\n hour_offset (int): Number of hours past 00:00 to "split" the partition. Defaults to 0.\n day_offset (int): Day of the week to "split" the partition. Defaults to 0 (Sunday).\n timezone (Optional[str]): The timezone in which each date should exist.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n fmt (Optional[str]): The date format to use. Defaults to `%Y-%m-%d`.\n end_offset (int): Extends the partition set by a number of partitions equal to the value\n passed. If end_offset is 0 (the default), the last partition ends before the current\n time. If end_offset is 1, the second-to-last partition ends before the current time,\n and so on.\n tags_for_partition_fn (Optional[Callable[[str], Mapping[str, str]]]): A function that\n accepts a partition time window and returns a dictionary of tags to attach to runs for\n that partition.\n\n .. code-block:: python\n\n @weekly_partitioned_config(start_date="2022-03-12")\n # creates partitions (2022-03-13-00:00, 2022-03-20-00:00), (2022-03-20-00:00, 2022-03-27-00:00), ...\n\n @weekly_partitioned_config(start_date="2022-03-12", minute_offset=15, hour_offset=3, day_offset=6)\n # creates partitions (2022-03-12-03:15, 2022-03-19-03:15), (2022-03-19-03:15, 2022-03-26-03:15), ...\n """\n\n def inner(\n fn: Callable[[datetime, datetime], Mapping[str, Any]]\n ) -> PartitionedConfig[WeeklyPartitionsDefinition]:\n check.callable_param(fn, "fn")\n\n partitions_def = WeeklyPartitionsDefinition(\n start_date=start_date,\n minute_offset=minute_offset,\n hour_offset=hour_offset,\n day_offset=day_offset,\n timezone=timezone,\n fmt=fmt,\n end_offset=end_offset,\n )\n return PartitionedConfig(\n run_config_for_partition_key_fn=wrap_time_window_run_config_fn(fn, partitions_def),\n partitions_def=partitions_def,\n decorated_fn=fn,\n tags_for_partition_key_fn=wrap_time_window_tags_fn(\n tags_for_partition_fn, partitions_def\n ),\n )\n\n return inner
\n\n\nclass TimeWindowPartitionsSubset(PartitionsSubset):\n # Every time we change the serialization format, we should increment the version number.\n # This will ensure that we can gracefully degrade when deserializing old data.\n SERIALIZATION_VERSION = 1\n\n def __init__(\n self,\n partitions_def: TimeWindowPartitionsDefinition,\n num_partitions: int,\n included_time_windows: Optional[Sequence[TimeWindow]] = None,\n included_partition_keys: Optional[AbstractSet[str]] = None,\n ):\n self._partitions_def = check.inst_param(\n partitions_def, "partitions_def", TimeWindowPartitionsDefinition\n )\n self._included_time_windows = included_time_windows\n self._num_partitions = num_partitions\n\n check.param_invariant(\n not (included_partition_keys and included_time_windows),\n "Cannot specify both included_partition_keys and included_time_windows",\n )\n self._included_time_windows = check.opt_nullable_sequence_param(\n included_time_windows, "included_time_windows", of_type=TimeWindow\n )\n\n self._included_partition_keys = check.opt_nullable_set_param(\n included_partition_keys, "included_partition_keys", of_type=str\n )\n\n @property\n def included_time_windows(self) -> Sequence[TimeWindow]:\n if self._included_time_windows is None:\n result_time_windows, _ = self._add_partitions_to_time_windows(\n initial_windows=[],\n partition_keys=list(check.not_none(self._included_partition_keys)),\n validate=False,\n )\n self._included_time_windows = result_time_windows\n return self._included_time_windows\n\n def _get_partition_time_windows_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n ) -> Sequence[TimeWindow]:\n """Returns a list of partition time windows that are not in the subset.\n Each time window is a single partition.\n """\n first_tw = self._partitions_def.get_first_partition_window(current_time=current_time)\n last_tw = self._partitions_def.get_last_partition_window(current_time=current_time)\n\n if not first_tw or not last_tw:\n check.failed("No partitions found")\n\n if len(self.included_time_windows) == 0:\n return [TimeWindow(first_tw.start, last_tw.end)]\n\n time_windows = []\n if first_tw.start < self.included_time_windows[0].start:\n time_windows.append(TimeWindow(first_tw.start, self.included_time_windows[0].start))\n\n for i in range(len(self.included_time_windows) - 1):\n if self.included_time_windows[i].start >= last_tw.end:\n break\n if self.included_time_windows[i].end < last_tw.end:\n if self.included_time_windows[i + 1].start <= last_tw.end:\n time_windows.append(\n TimeWindow(\n self.included_time_windows[i].end,\n self.included_time_windows[i + 1].start,\n )\n )\n else:\n time_windows.append(\n TimeWindow(\n self.included_time_windows[i].end,\n last_tw.end,\n )\n )\n\n if last_tw.end > self.included_time_windows[-1].end:\n time_windows.append(TimeWindow(self.included_time_windows[-1].end, last_tw.end))\n\n return time_windows\n\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[str]:\n partition_keys: List[str] = []\n for tw in self._get_partition_time_windows_not_in_subset(current_time):\n partition_keys.extend(self._partitions_def.get_partition_keys_in_time_window(tw))\n return partition_keys\n\n @public\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[str]:\n if self._included_partition_keys is None:\n return [\n pk\n for time_window in self.included_time_windows\n for pk in self._partitions_def.get_partition_keys_in_time_window(time_window)\n ]\n return list(self._included_partition_keys) if self._included_partition_keys else []\n\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]:\n return [\n self._partitions_def.get_partition_key_range_for_time_window(window)\n for window in self.included_time_windows\n ]\n\n def _add_partitions_to_time_windows(\n self,\n initial_windows: Sequence[TimeWindow],\n partition_keys: Sequence[str],\n validate: bool = True,\n ) -> Tuple[Sequence[TimeWindow], int]:\n """Merges a set of partition keys into an existing set of time windows, returning the\n minimized set of time windows and the number of partitions added.\n """\n result_windows = [*initial_windows]\n time_windows = self._partitions_def.time_windows_for_partition_keys(\n frozenset(partition_keys), validate=validate\n )\n num_added_partitions = 0\n for window in sorted(time_windows):\n # go in reverse order because it's more common to add partitions at the end than the\n # beginning\n for i in reversed(range(len(result_windows))):\n included_window = result_windows[i]\n lt_end_of_range = window.start < included_window.end\n gte_start_of_range = window.start >= included_window.start\n\n if lt_end_of_range and gte_start_of_range:\n break\n\n if not lt_end_of_range:\n merge_with_range = included_window.end == window.start\n merge_with_later_range = i + 1 < len(result_windows) and (\n window.end == result_windows[i + 1].start\n )\n\n if merge_with_range and merge_with_later_range:\n result_windows[i] = TimeWindow(\n included_window.start, result_windows[i + 1].end\n )\n del result_windows[i + 1]\n elif merge_with_range:\n result_windows[i] = TimeWindow(included_window.start, window.end)\n elif merge_with_later_range:\n result_windows[i + 1] = TimeWindow(window.start, result_windows[i + 1].end)\n else:\n result_windows.insert(i + 1, window)\n\n num_added_partitions += 1\n break\n else:\n if result_windows and window.start == result_windows[0].start:\n result_windows[0] = TimeWindow(window.start, included_window.end) # type: ignore\n else:\n result_windows.insert(0, window)\n\n num_added_partitions += 1\n\n return result_windows, num_added_partitions\n\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "TimeWindowPartitionsSubset":\n # if we are representing things as a static set of keys, continue doing so\n if self._included_partition_keys is not None:\n new_partitions = {*self._included_partition_keys, *partition_keys}\n return TimeWindowPartitionsSubset(\n self._partitions_def,\n num_partitions=len(new_partitions),\n included_partition_keys=new_partitions,\n )\n\n result_windows, added_partitions = self._add_partitions_to_time_windows(\n self.included_time_windows, list(partition_keys)\n )\n\n return TimeWindowPartitionsSubset(\n self._partitions_def,\n num_partitions=self._num_partitions + added_partitions,\n included_time_windows=result_windows,\n )\n\n @classmethod\n def from_serialized(\n cls, partitions_def: PartitionsDefinition, serialized: str\n ) -> "PartitionsSubset":\n if not isinstance(partitions_def, TimeWindowPartitionsDefinition):\n check.failed("Partitions definition must be a TimeWindowPartitionsDefinition")\n partitions_def = cast(TimeWindowPartitionsDefinition, partitions_def)\n\n loaded = json.loads(serialized)\n\n def tuples_to_time_windows(tuples):\n return [\n TimeWindow(\n pendulum.from_timestamp(tup[0], tz=partitions_def.timezone),\n pendulum.from_timestamp(tup[1], tz=partitions_def.timezone),\n )\n for tup in tuples\n ]\n\n if isinstance(loaded, list):\n # backwards compatibility\n time_windows = tuples_to_time_windows(loaded)\n num_partitions = sum(\n len(partitions_def.get_partition_keys_in_time_window(time_window))\n for time_window in time_windows\n )\n elif isinstance(loaded, dict) and (\n "version" not in loaded or loaded["version"] == cls.SERIALIZATION_VERSION\n ): # version 1\n time_windows = tuples_to_time_windows(loaded["time_windows"])\n num_partitions = loaded["num_partitions"]\n else:\n raise DagsterInvalidDeserializationVersionError(\n f"Attempted to deserialize partition subset with version {loaded.get('version')},"\n f" but only version {cls.SERIALIZATION_VERSION} is supported."\n )\n\n return TimeWindowPartitionsSubset(\n partitions_def, num_partitions=num_partitions, included_time_windows=time_windows\n )\n\n @classmethod\n def can_deserialize(\n cls,\n partitions_def: PartitionsDefinition,\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n if serialized_partitions_def_unique_id:\n return (\n partitions_def.get_serializable_unique_identifier()\n == serialized_partitions_def_unique_id\n )\n\n if (\n serialized_partitions_def_class_name\n # note: all TimeWindowPartitionsDefinition subclasses will get serialized as raw\n # TimeWindowPartitionsDefinitions, so this class name check will not always pass,\n # hence the unique id check above\n and serialized_partitions_def_class_name != partitions_def.__class__.__name__\n ):\n return False\n\n data = json.loads(serialized)\n return isinstance(data, list) or (\n isinstance(data, dict)\n and data.get("time_windows") is not None\n and data.get("num_partitions") is not None\n )\n\n @classmethod\n def empty_subset(cls, partitions_def: PartitionsDefinition) -> "PartitionsSubset":\n if not isinstance(partitions_def, TimeWindowPartitionsDefinition):\n check.failed("Partitions definition must be a TimeWindowPartitionsDefinition")\n partitions_def = cast(TimeWindowPartitionsDefinition, partitions_def)\n return cls(partitions_def, 0, [], set())\n\n def serialize(self) -> str:\n return json.dumps(\n {\n "version": self.SERIALIZATION_VERSION,\n "time_windows": [\n (window.start.timestamp(), window.end.timestamp())\n for window in self.included_time_windows\n ],\n "num_partitions": self._num_partitions,\n }\n )\n\n @property\n def partitions_def(self) -> PartitionsDefinition:\n return self._partitions_def\n\n def __eq__(self, other):\n return (\n isinstance(other, TimeWindowPartitionsSubset)\n and self._partitions_def == other._partitions_def\n and (\n # faster comparison, but will not catch all cases\n (\n self._included_time_windows == other._included_time_windows\n and self._included_partition_keys == other._included_partition_keys\n )\n # slower comparison, catches all cases\n or self.included_time_windows == other.included_time_windows\n )\n )\n\n def __len__(self) -> int:\n return self._num_partitions\n\n def __contains__(self, partition_key: str) -> bool:\n if self._included_partition_keys is not None:\n return partition_key in self._included_partition_keys\n\n time_window = self._partitions_def.time_window_for_partition_key(partition_key)\n\n return any(\n time_window.start >= included_time_window.start\n and time_window.start < included_time_window.end\n for included_time_window in self.included_time_windows\n )\n\n def __repr__(self) -> str:\n return f"TimeWindowPartitionsSubset({self.get_partition_key_ranges()})"\n\n\nclass PartitionRangeStatus(Enum):\n MATERIALIZING = "MATERIALIZING"\n MATERIALIZED = "MATERIALIZED"\n FAILED = "FAILED"\n\n\nPARTITION_RANGE_STATUS_PRIORITY = [\n PartitionRangeStatus.MATERIALIZING,\n PartitionRangeStatus.FAILED,\n PartitionRangeStatus.MATERIALIZED,\n]\n\n\nclass PartitionTimeWindowStatus:\n def __init__(self, time_window: TimeWindow, status: PartitionRangeStatus):\n self.time_window = time_window\n self.status = status\n\n def __repr__(self):\n return f"({self.time_window.start} - {self.time_window.end}): {self.status.value}"\n\n def __eq__(self, other):\n return (\n isinstance(other, PartitionTimeWindowStatus)\n and self.time_window == other.time_window\n and self.status == other.status\n )\n\n\ndef _flatten(\n high_pri_time_windows: List[PartitionTimeWindowStatus],\n low_pri_time_windows: List[PartitionTimeWindowStatus],\n) -> List[PartitionTimeWindowStatus]:\n high_pri_time_windows = sorted(high_pri_time_windows, key=lambda t: t.time_window.start)\n low_pri_time_windows = sorted(low_pri_time_windows, key=lambda t: t.time_window.start)\n\n high_pri_idx = 0\n low_pri_idx = 0\n\n filtered_low_pri: List[PartitionTimeWindowStatus] = []\n\n # slice and dice the low pri time windows so there's no overlap with high pri\n while True:\n if low_pri_idx >= len(low_pri_time_windows):\n # reached end of materialized\n break\n if high_pri_idx >= len(high_pri_time_windows):\n # reached end of failed, add all remaining materialized bc there's no overlap\n filtered_low_pri.extend(low_pri_time_windows[low_pri_idx:])\n break\n\n low_pri_tw = low_pri_time_windows[low_pri_idx]\n high_pri_tw = high_pri_time_windows[high_pri_idx]\n\n if low_pri_tw.time_window.start < high_pri_tw.time_window.start:\n if low_pri_tw.time_window.end <= high_pri_tw.time_window.start:\n # low_pri_tw is entirely before high pri\n filtered_low_pri.append(low_pri_tw)\n low_pri_idx += 1\n else:\n # high pri cuts the low pri short\n filtered_low_pri.append(\n PartitionTimeWindowStatus(\n TimeWindow(\n low_pri_tw.time_window.start,\n high_pri_tw.time_window.start,\n ),\n low_pri_tw.status,\n )\n )\n\n if low_pri_tw.time_window.end > high_pri_tw.time_window.end:\n # the low pri time window will continue on the other end of the high pri\n # and get split in two. Modify low_pri[low_pri_idx] to be\n # the second half of the low pri time window. It will be added in the next iteration.\n # (don't add it now, because we need to check if it overlaps with the next high pri)\n low_pri_time_windows[low_pri_idx] = PartitionTimeWindowStatus(\n TimeWindow(high_pri_tw.time_window.end, low_pri_tw.time_window.end),\n low_pri_tw.status,\n )\n high_pri_idx += 1\n else:\n # the rest of the low pri time window is inside the high pri time window\n low_pri_idx += 1\n else:\n if low_pri_tw.time_window.start >= high_pri_tw.time_window.end:\n # high pri is entirely before low pri. The next high pri may overlap\n high_pri_idx += 1\n elif low_pri_tw.time_window.end <= high_pri_tw.time_window.end:\n # low pri is entirely within high pri, skip it\n low_pri_idx += 1\n else:\n # high pri cuts out the start of the low pri. It will continue on the other end.\n # Modify low_pri[low_pri_idx] to shorten the start. It will be added\n # in the next iteration. (don't add it now, because we need to check if it overlaps with the next high pri)\n low_pri_time_windows[low_pri_idx] = PartitionTimeWindowStatus(\n TimeWindow(high_pri_tw.time_window.end, low_pri_tw.time_window.end),\n low_pri_tw.status,\n )\n high_pri_idx += 1\n\n # combine the high pri windwos with the filtered low pri windows\n flattened_time_windows = high_pri_time_windows\n flattened_time_windows.extend(filtered_low_pri)\n flattened_time_windows.sort(key=lambda t: t.time_window.start)\n return flattened_time_windows\n\n\ndef fetch_flattened_time_window_ranges(\n subsets: Mapping[PartitionRangeStatus, TimeWindowPartitionsSubset]\n) -> Sequence[PartitionTimeWindowStatus]:\n """Given potentially overlapping subsets, return a flattened list of timewindows where the highest priority status wins\n on overlaps.\n """\n prioritized_subsets = sorted(\n [(status, subset) for status, subset in subsets.items()],\n key=lambda t: PARTITION_RANGE_STATUS_PRIORITY.index(t[0]),\n )\n\n # progressively add lower priority time windows to the list of higher priority time windows\n flattened_time_window_statuses = []\n for status, subset in prioritized_subsets:\n subset_time_window_statuses = [\n PartitionTimeWindowStatus(tw, status) for tw in subset.included_time_windows\n ]\n flattened_time_window_statuses = _flatten(\n flattened_time_window_statuses, subset_time_window_statuses\n )\n\n return flattened_time_window_statuses\n\n\ndef has_one_dimension_time_window_partitioning(\n partitions_def: Optional[PartitionsDefinition],\n) -> bool:\n from .multi_dimensional_partitions import MultiPartitionsDefinition\n\n if isinstance(partitions_def, TimeWindowPartitionsDefinition):\n return True\n elif isinstance(partitions_def, MultiPartitionsDefinition):\n time_window_dims = [\n dim\n for dim in partitions_def.partitions_defs\n if isinstance(dim.partitions_def, TimeWindowPartitionsDefinition)\n ]\n if len(time_window_dims) == 1:\n return True\n\n return False\n\n\ndef get_time_partitions_def(\n partitions_def: Optional[PartitionsDefinition],\n) -> Optional[TimeWindowPartitionsDefinition]:\n """For a given PartitionsDefinition, return the associated TimeWindowPartitionsDefinition if it\n exists.\n """\n from .multi_dimensional_partitions import MultiPartitionsDefinition\n\n if partitions_def is None:\n return None\n elif isinstance(partitions_def, TimeWindowPartitionsDefinition):\n return partitions_def\n elif isinstance(\n partitions_def, MultiPartitionsDefinition\n ) and has_one_dimension_time_window_partitioning(partitions_def):\n return cast(\n TimeWindowPartitionsDefinition, partitions_def.time_window_dimension.partitions_def\n )\n else:\n return None\n\n\ndef get_time_partition_key(\n partitions_def: Optional[PartitionsDefinition], partition_key: Optional[str]\n) -> str:\n from .multi_dimensional_partitions import MultiPartitionsDefinition\n\n if partitions_def is None or partition_key is None:\n check.failed(\n "Cannot get time partitions key from when partitions def is None or partition key is"\n " None"\n )\n elif isinstance(partitions_def, TimeWindowPartitionsDefinition):\n return partition_key\n elif isinstance(partitions_def, MultiPartitionsDefinition):\n return partitions_def.get_partition_key_from_str(partition_key).keys_by_dimension[\n partitions_def.time_window_dimension.name\n ]\n else:\n check.failed(f"Cannot get time partition from non-time partitions def {partitions_def}")\n
", "current_page_name": "_modules/dagster/_core/definitions/time_window_partitions", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.time_window_partitions"}, "unresolved_asset_job_definition": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.unresolved_asset_job_definition

\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Mapping, NamedTuple, Optional, Sequence, Union\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated\nfrom dagster._core.definitions import AssetKey\nfrom dagster._core.definitions.run_request import RunRequest\nfrom dagster._core.errors import DagsterInvalidDefinitionError\nfrom dagster._core.instance import DynamicPartitionsStore\n\nfrom .asset_layer import build_asset_selection_job\nfrom .config import ConfigMapping\nfrom .metadata import RawMetadataValue\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import (\n        AssetSelection,\n        ExecutorDefinition,\n        HookDefinition,\n        JobDefinition,\n        PartitionedConfig,\n        PartitionsDefinition,\n        ResourceDefinition,\n    )\n    from dagster._core.definitions.asset_graph import InternalAssetGraph\n    from dagster._core.definitions.asset_selection import CoercibleToAssetSelection\n    from dagster._core.definitions.run_config import RunConfig\n\n\nclass UnresolvedAssetJobDefinition(\n    NamedTuple(\n        "_UnresolvedAssetJobDefinition",\n        [\n            ("name", str),\n            ("selection", "AssetSelection"),\n            (\n                "config",\n                Optional[Union[ConfigMapping, Mapping[str, Any], "PartitionedConfig"]],\n            ),\n            ("description", Optional[str]),\n            ("tags", Optional[Mapping[str, Any]]),\n            ("metadata", Optional[Mapping[str, RawMetadataValue]]),\n            ("partitions_def", Optional["PartitionsDefinition"]),\n            ("executor_def", Optional["ExecutorDefinition"]),\n            ("hooks", Optional[AbstractSet["HookDefinition"]]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        name: str,\n        selection: "AssetSelection",\n        config: Optional[\n            Union[ConfigMapping, Mapping[str, Any], "PartitionedConfig", "RunConfig"]\n        ] = None,\n        description: Optional[str] = None,\n        tags: Optional[Mapping[str, Any]] = None,\n        metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n        partitions_def: Optional["PartitionsDefinition"] = None,\n        executor_def: Optional["ExecutorDefinition"] = None,\n        hooks: Optional[AbstractSet["HookDefinition"]] = None,\n    ):\n        from dagster._core.definitions import (\n            AssetSelection,\n            ExecutorDefinition,\n            HookDefinition,\n            PartitionsDefinition,\n        )\n        from dagster._core.definitions.run_config import convert_config_input\n\n        return super(UnresolvedAssetJobDefinition, cls).__new__(\n            cls,\n            name=check.str_param(name, "name"),\n            selection=check.inst_param(selection, "selection", AssetSelection),\n            config=convert_config_input(config),\n            description=check.opt_str_param(description, "description"),\n            tags=check.opt_mapping_param(tags, "tags"),\n            metadata=check.opt_mapping_param(metadata, "metadata"),\n            partitions_def=check.opt_inst_param(\n                partitions_def, "partitions_def", PartitionsDefinition\n            ),\n            executor_def=check.opt_inst_param(executor_def, "partitions_def", ExecutorDefinition),\n            hooks=check.opt_nullable_set_param(hooks, "hooks", of_type=HookDefinition),\n        )\n\n    @deprecated(\n        breaking_version="2.0.0",\n        additional_warn_text="Directly instantiate `RunRequest(partition_key=...)` instead.",\n    )\n    def run_request_for_partition(\n        self,\n        partition_key: str,\n        run_key: Optional[str] = None,\n        tags: Optional[Mapping[str, str]] = None,\n        asset_selection: Optional[Sequence[AssetKey]] = None,\n        run_config: Optional[Mapping[str, Any]] = None,\n        current_time: Optional[datetime] = None,\n        dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n    ) -> RunRequest:\n        """Creates a RunRequest object for a run that processes the given partition.\n\n        Args:\n            partition_key: The key of the partition to request a run for.\n            run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that\n                only one run is created per run key across all sensor evaluations.  For schedules,\n                ensures that one run is created per tick, across failure recoveries. Passing in a `None`\n                value means that a run will always be launched per evaluation.\n            tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach\n                to the launched run.\n            run_config (Optional[Mapping[str, Any]]: Configuration for the run. If the job has\n                a :py:class:`PartitionedConfig`, this value will override replace the config\n                provided by it.\n            current_time (Optional[datetime]): Used to determine which time-partitions exist.\n                Defaults to now.\n            dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore\n                object that is responsible for fetching dynamic partitions. Required when the\n                partitions definition is a DynamicPartitionsDefinition with a name defined. Users\n                can pass the DagsterInstance fetched via `context.instance` to this argument.\n\n        Returns:\n            RunRequest: an object that requests a run to process the given partition.\n        """\n        from dagster._core.definitions.partition import (\n            DynamicPartitionsDefinition,\n            PartitionedConfig,\n        )\n\n        if not self.partitions_def:\n            check.failed("Called run_request_for_partition on a non-partitioned job")\n\n        partitioned_config = PartitionedConfig.from_flexible_config(\n            self.config, self.partitions_def\n        )\n\n        if (\n            isinstance(self.partitions_def, DynamicPartitionsDefinition)\n            and self.partitions_def.name\n        ):\n            # Do not support using run_request_for_partition with dynamic partitions,\n            # since this requires querying the instance once per run request for the\n            # existent dynamic partitions\n            check.failed(\n                "run_request_for_partition is not supported for dynamic partitions. Instead, use"\n                " RunRequest(partition_key=...)"\n            )\n\n        self.partitions_def.validate_partition_key(\n            partition_key,\n            current_time=current_time,\n            dynamic_partitions_store=dynamic_partitions_store,\n        )\n\n        run_config = (\n            run_config\n            if run_config is not None\n            else partitioned_config.get_run_config_for_partition_key(partition_key)\n        )\n        run_request_tags = {\n            **(tags or {}),\n            **partitioned_config.get_tags_for_partition_key(partition_key),\n        }\n\n        return RunRequest(\n            job_name=self.name,\n            run_key=run_key,\n            run_config=run_config,\n            tags=run_request_tags,\n            asset_selection=asset_selection,\n            partition_key=partition_key,\n        )\n\n    def resolve(\n        self,\n        asset_graph: "InternalAssetGraph",\n        default_executor_def: Optional["ExecutorDefinition"] = None,\n        resource_defs: Optional[Mapping[str, "ResourceDefinition"]] = None,\n    ) -> "JobDefinition":\n        """Resolve this UnresolvedAssetJobDefinition into a JobDefinition."""\n        assets = asset_graph.assets\n        source_assets = asset_graph.source_assets\n        selected_asset_keys = self.selection.resolve(asset_graph)\n        selected_asset_checks = self.selection.resolve_checks(asset_graph)\n\n        asset_keys_by_partitions_def = defaultdict(set)\n        for asset_key in selected_asset_keys:\n            partitions_def = asset_graph.get_partitions_def(asset_key)\n            if partitions_def is not None:\n                asset_keys_by_partitions_def[partitions_def].add(asset_key)\n\n        if len(asset_keys_by_partitions_def) > 1:\n            keys_by_partitions_def_str = "\\n".join(\n                f"{partitions_def}: {asset_keys}"\n                for partitions_def, asset_keys in asset_keys_by_partitions_def.items()\n            )\n            raise DagsterInvalidDefinitionError(\n                f"Multiple partitioned assets exist in assets job '{self.name}'. Selected assets"\n                " must have the same partitions definitions, but the selected assets have"\n                f" different partitions definitions: \\n{keys_by_partitions_def_str}"\n            )\n\n        inferred_partitions_def = (\n            next(iter(asset_keys_by_partitions_def.keys()))\n            if asset_keys_by_partitions_def\n            else None\n        )\n        if (\n            inferred_partitions_def\n            and self.partitions_def != inferred_partitions_def\n            and self.partitions_def is not None\n        ):\n            raise DagsterInvalidDefinitionError(\n                f"Job '{self.name}' received a partitions_def of {self.partitions_def}, but the"\n                f" selected assets {next(iter(asset_keys_by_partitions_def.values()))} have a"\n                f" non-matching partitions_def of {inferred_partitions_def}"\n            )\n\n        return build_asset_selection_job(\n            name=self.name,\n            assets=assets,\n            asset_checks=asset_graph.asset_checks,\n            config=self.config,\n            source_assets=source_assets,\n            description=self.description,\n            tags=self.tags,\n            metadata=self.metadata,\n            asset_selection=selected_asset_keys,\n            asset_check_selection=selected_asset_checks,\n            partitions_def=self.partitions_def if self.partitions_def else inferred_partitions_def,\n            executor_def=self.executor_def or default_executor_def,\n            hooks=self.hooks,\n            resource_defs=resource_defs,\n        )\n\n\n
[docs]def define_asset_job(\n name: str,\n selection: Optional["CoercibleToAssetSelection"] = None,\n config: Optional[\n Union[ConfigMapping, Mapping[str, Any], "PartitionedConfig", "RunConfig"]\n ] = None,\n description: Optional[str] = None,\n tags: Optional[Mapping[str, Any]] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n partitions_def: Optional["PartitionsDefinition"] = None,\n executor_def: Optional["ExecutorDefinition"] = None,\n hooks: Optional[AbstractSet["HookDefinition"]] = None,\n) -> UnresolvedAssetJobDefinition:\n """Creates a definition of a job which will either materialize a selection of assets or observe\n a selection of source assets. This will only be resolved to a JobDefinition once placed in a\n code location.\n\n Args:\n name (str):\n The name for the job.\n selection (Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]):\n The assets that will be materialized or observed when the job is run.\n\n The selected assets must all be included in the assets that are passed to the assets\n argument of the Definitions object that this job is included on.\n\n The string "my_asset*" selects my_asset and all downstream assets within the code\n location. A list of strings represents the union of all assets selected by strings\n within the list.\n\n The selection will be resolved to a set of assets when the location is loaded. If the\n selection resolves to all source assets, the created job will perform source asset\n observations. If the selection resolves to all regular assets, the created job will\n materialize assets. If the selection resolves to a mixed set of source assets and\n regular assets, an error will be thrown.\n\n config:\n Describes how the Job is parameterized at runtime.\n\n If no value is provided, then the schema for the job's run config is a standard\n format based on its ops and resources.\n\n If a dictionary is provided, then it must conform to the standard config schema, and\n it will be used as the job's run config for the job whenever the job is executed.\n The values provided will be viewable and editable in the Dagster UI, so be\n careful with secrets.\n\n If a :py:class:`ConfigMapping` object is provided, then the schema for the job's run config is\n determined by the config mapping, and the ConfigMapping, which should return\n configuration in the standard format to configure the job.\n tags (Optional[Mapping[str, Any]]):\n Arbitrary information that will be attached to the execution of the Job.\n Values that are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag\n values provided at invocation time.\n metadata (Optional[Mapping[str, RawMetadataValue]]): Arbitrary metadata about the job.\n Keys are displayed string labels, and values are one of the following: string, float,\n int, JSON-serializable dict, JSON-serializable list, and one of the data classes\n returned by a MetadataValue static method.\n description (Optional[str]):\n A description for the Job.\n partitions_def (Optional[PartitionsDefinition]):\n Defines the set of partitions for this job. All AssetDefinitions selected for this job\n must have a matching PartitionsDefinition. If no PartitionsDefinition is provided, the\n PartitionsDefinition will be inferred from the selected AssetDefinitions.\n executor_def (Optional[ExecutorDefinition]):\n How this Job will be executed. Defaults to :py:class:`multi_or_in_process_executor`,\n which can be switched between multi-process and in-process modes of execution. The\n default mode of execution is multi-process.\n\n\n Returns:\n UnresolvedAssetJobDefinition: The job, which can be placed inside a code location.\n\n Examples:\n .. code-block:: python\n\n # A job that targets all assets in the code location:\n @asset\n def asset1():\n ...\n\n defs = Definitions(\n assets=[asset1],\n jobs=[define_asset_job("all_assets")],\n )\n\n # A job that targets a single asset\n @asset\n def asset1():\n ...\n\n defs = Definitions(\n assets=[asset1],\n jobs=[define_asset_job("all_assets", selection=[asset1])],\n )\n\n # A job that targets all the assets in a group:\n defs = Definitions(\n assets=assets,\n jobs=[define_asset_job("marketing_job", selection=AssetSelection.groups("marketing"))],\n )\n\n @observable_source_asset\n def source_asset():\n ...\n\n # A job that observes a source asset:\n defs = Definitions(\n assets=assets,\n jobs=[define_asset_job("observation_job", selection=[source_asset])],\n )\n\n # Resources are supplied to the assets, not the job:\n @asset(required_resource_keys={"slack_client"})\n def asset1():\n ...\n\n defs = Definitions(\n assets=[asset1],\n jobs=[define_asset_job("all_assets")],\n resources={"slack_client": prod_slack_client},\n )\n\n """\n from dagster._core.definitions import AssetSelection\n\n # convert string-based selections to AssetSelection objects\n if selection is None:\n resolved_selection = AssetSelection.all()\n else:\n resolved_selection = AssetSelection.from_coercible(selection)\n\n return UnresolvedAssetJobDefinition(\n name=name,\n selection=resolved_selection,\n config=config,\n description=description,\n tags=tags,\n metadata=metadata,\n partitions_def=partitions_def,\n executor_def=executor_def,\n hooks=hooks,\n )
\n
", "current_page_name": "_modules/dagster/_core/definitions/unresolved_asset_job_definition", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.unresolved_asset_job_definition"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.utils

\nimport keyword\nimport os\nimport re\nfrom glob import glob\nfrom typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, cast\n\nimport yaml\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._core.storage.tags import check_reserved_tags\nfrom dagster._utils.yaml_utils import merge_yaml_strings, merge_yamls\n\nDEFAULT_OUTPUT = "result"\nDEFAULT_GROUP_NAME = "default"  # asset group_name used when none is provided\nDEFAULT_IO_MANAGER_KEY = "io_manager"\n\nDISALLOWED_NAMES = set(\n    [\n        "context",\n        "conf",\n        "config",\n        "meta",\n        "arg_dict",\n        "dict",\n        "input_arg_dict",\n        "output_arg_dict",\n        "int",\n        "str",\n        "float",\n        "bool",\n        "input",\n        "output",\n        "type",\n    ]\n    + list(keyword.kwlist)  # just disallow all python keywords\n)\n\nVALID_NAME_REGEX_STR = r"^[A-Za-z0-9_]+$"\nVALID_NAME_REGEX = re.compile(VALID_NAME_REGEX_STR)\n\n\nclass NoValueSentinel:\n    """Sentinel value to distinguish unset from None."""\n\n\ndef has_valid_name_chars(name: str) -> bool:\n    return bool(VALID_NAME_REGEX.match(name))\n\n\ndef check_valid_name(name: str, allow_list: Optional[List[str]] = None) -> str:\n    check.str_param(name, "name")\n\n    if allow_list and name in allow_list:\n        return name\n\n    if name in DISALLOWED_NAMES:\n        raise DagsterInvalidDefinitionError(\n            f'"{name}" is not a valid name in Dagster. It conflicts with a Dagster or python'\n            " reserved keyword."\n        )\n\n    check_valid_chars(name)\n\n    check.invariant(is_valid_name(name))\n    return name\n\n\ndef check_valid_chars(name: str):\n    if not has_valid_name_chars(name):\n        raise DagsterInvalidDefinitionError(\n            f'"{name}" is not a valid name in Dagster. Names must be in regex'\n            f" {VALID_NAME_REGEX_STR}."\n        )\n\n\ndef is_valid_name(name: str) -> bool:\n    check.str_param(name, "name")\n\n    return name not in DISALLOWED_NAMES and has_valid_name_chars(name)\n\n\ndef _kv_str(key: object, value: object) -> str:\n    return f'{key}="{value!r}"'\n\n\ndef struct_to_string(name: str, **kwargs: object) -> str:\n    # Sort the kwargs to ensure consistent representations across Python versions\n    props_str = ", ".join([_kv_str(key, value) for key, value in sorted(kwargs.items())])\n    return f"{name}({props_str})"\n\n\ndef validate_tags(\n    tags: Optional[Mapping[str, Any]], allow_reserved_tags: bool = True\n) -> Mapping[str, str]:\n    valid_tags: Dict[str, str] = {}\n    for key, value in check.opt_mapping_param(tags, "tags", key_type=str).items():\n        if not isinstance(value, str):\n            valid = False\n            err_reason = f'Could not JSON encode value "{value}"'\n            str_val = None\n            try:\n                str_val = seven.json.dumps(value)\n                err_reason = (\n                    'JSON encoding "{json}" of value "{val}" is not equivalent to original value'\n                    .format(json=str_val, val=value)\n                )\n\n                valid = seven.json.loads(str_val) == value\n            except Exception:\n                pass\n\n            if not valid:\n                raise DagsterInvalidDefinitionError(\n                    f'Invalid value for tag "{key}", {err_reason}. Tag values must be strings '\n                    "or meet the constraint that json.loads(json.dumps(value)) == value."\n                )\n\n            valid_tags[key] = str_val  # type: ignore  # (possible none)\n        else:\n            valid_tags[key] = value\n\n    if not allow_reserved_tags:\n        check_reserved_tags(valid_tags)\n\n    return valid_tags\n\n\ndef validate_group_name(group_name: Optional[str]) -> str:\n    """Ensures a string name is valid and returns a default if no name provided."""\n    if group_name:\n        check_valid_chars(group_name)\n        return group_name\n    return DEFAULT_GROUP_NAME\n\n\n
[docs]def config_from_files(config_files: Sequence[str]) -> Mapping[str, Any]:\n """Constructs run config from YAML files.\n\n Args:\n config_files (List[str]): List of paths or glob patterns for yaml files\n to load and parse as the run config.\n\n Returns:\n Dict[str, Any]: A run config dictionary constructed from provided YAML files.\n\n Raises:\n FileNotFoundError: When a config file produces no results\n DagsterInvariantViolationError: When one of the YAML files is invalid and has a parse\n error.\n """\n config_files = check.opt_sequence_param(config_files, "config_files")\n\n filenames = []\n for file_glob in config_files or []:\n globbed_files = glob(file_glob)\n if not globbed_files:\n raise DagsterInvariantViolationError(\n f'File or glob pattern "{file_glob}" for "config_files" produced no results.'\n )\n\n filenames += [os.path.realpath(globbed_file) for globbed_file in globbed_files]\n\n try:\n run_config = merge_yamls(filenames)\n except yaml.YAMLError as err:\n raise DagsterInvariantViolationError(\n f"Encountered error attempting to parse yaml. Parsing files {filenames} "\n f"loaded by file/patterns {config_files}."\n ) from err\n\n return check.is_dict(cast(Dict[str, object], run_config), key_type=str)
\n\n\n
[docs]def config_from_yaml_strings(yaml_strings: Sequence[str]) -> Mapping[str, Any]:\n """Static constructor for run configs from YAML strings.\n\n Args:\n yaml_strings (List[str]): List of yaml strings to parse as the run config.\n\n Returns:\n Dict[Str, Any]: A run config dictionary constructed from the provided yaml strings\n\n Raises:\n DagsterInvariantViolationError: When one of the YAML documents is invalid and has a\n parse error.\n """\n yaml_strings = check.sequence_param(yaml_strings, "yaml_strings", of_type=str)\n\n try:\n run_config = merge_yaml_strings(yaml_strings)\n except yaml.YAMLError as err:\n raise DagsterInvariantViolationError(\n f"Encountered error attempting to parse yaml. Parsing YAMLs {yaml_strings} "\n ) from err\n\n return check.is_dict(cast(Dict[str, object], run_config), key_type=str)
\n\n\n
[docs]def config_from_pkg_resources(pkg_resource_defs: Sequence[Tuple[str, str]]) -> Mapping[str, Any]:\n """Load a run config from a package resource, using :py:func:`pkg_resources.resource_string`.\n\n Example:\n .. code-block:: python\n\n config_from_pkg_resources(\n pkg_resource_defs=[\n ('dagster_examples.airline_demo.environments', 'local_base.yaml'),\n ('dagster_examples.airline_demo.environments', 'local_warehouse.yaml'),\n ],\n )\n\n\n Args:\n pkg_resource_defs (List[(str, str)]): List of pkg_resource modules/files to\n load as the run config.\n\n Returns:\n Dict[Str, Any]: A run config dictionary constructed from the provided yaml strings\n\n Raises:\n DagsterInvariantViolationError: When one of the YAML documents is invalid and has a\n parse error.\n """\n import pkg_resources # expensive, import only on use\n\n pkg_resource_defs = check.sequence_param(pkg_resource_defs, "pkg_resource_defs", of_type=tuple)\n\n try:\n yaml_strings = [\n pkg_resources.resource_string(*pkg_resource_def).decode("utf-8")\n for pkg_resource_def in pkg_resource_defs\n ]\n except (ModuleNotFoundError, FileNotFoundError, UnicodeDecodeError) as err:\n raise DagsterInvariantViolationError(\n "Encountered error attempting to parse yaml. Loading YAMLs from "\n f"package resources {pkg_resource_defs}."\n ) from err\n\n return config_from_yaml_strings(yaml_strings=yaml_strings)
\n
", "current_page_name": "_modules/dagster/_core/definitions/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.utils"}, "version_strategy": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.definitions.version_strategy

\nimport hashlib\nimport inspect\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Any, NamedTuple, Optional\n\nfrom dagster._annotations import public\n\nif TYPE_CHECKING:\n    from .op_definition import OpDefinition\n    from .resource_definition import ResourceDefinition\n\n\n
[docs]class OpVersionContext(NamedTuple):\n """Provides execution-time information for computing the version for an op.\n\n Attributes:\n op_def (OpDefinition): The definition of the op to compute a version for.\n op_config (Any): The parsed config to be passed to the op during execution.\n """\n\n op_def: "OpDefinition"\n op_config: Any
\n\n\n
[docs]class ResourceVersionContext(NamedTuple):\n """Provides execution-time information for computing the version for a resource.\n\n Attributes:\n resource_def (ResourceDefinition): The definition of the resource whose version will be computed.\n resource_config (Any): The parsed config to be passed to the resource during execution.\n """\n\n resource_def: "ResourceDefinition"\n resource_config: Any
\n\n\n
[docs]class VersionStrategy(ABC):\n """Abstract class for defining a strategy to version ops and resources.\n\n When subclassing, `get_op_version` must be implemented, and\n `get_resource_version` can be optionally implemented.\n\n `get_op_version` should ingest an OpVersionContext, and `get_resource_version` should ingest a\n ResourceVersionContext. From that, each synthesize a unique string called\n a `version`, which will\n be tagged to outputs of that op in the job. Providing a\n `VersionStrategy` instance to a\n job will enable memoization on that job, such that only steps whose\n outputs do not have an up-to-date version will run.\n """\n\n
[docs] @public\n @abstractmethod\n def get_op_version(self, context: OpVersionContext) -> str:\n """Computes a version for an op.\n\n Args:\n context (OpVersionContext): The context for computing the version.\n\n Returns:\n str: The version for the op.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n def get_resource_version(self, context: ResourceVersionContext) -> Optional[str]:\n """Computes a version for a resource.\n\n Args:\n context (ResourceVersionContext): The context for computing the version.\n\n Returns:\n Optional[str]: The version for the resource. If None, the resource will not be\n memoized.\n """\n return None
\n\n\n
[docs]class SourceHashVersionStrategy(VersionStrategy):\n """VersionStrategy that checks for changes to the source code of ops and resources.\n\n Only checks for changes within the immediate body of the op/resource's\n decorated function (or compute function, if the op/resource was\n constructed directly from a definition).\n """\n\n def _get_source_hash(self, fn):\n code_as_str = inspect.getsource(fn)\n return hashlib.sha1(code_as_str.encode("utf-8")).hexdigest()\n\n
[docs] @public\n def get_op_version(self, context: OpVersionContext) -> str:\n """Computes a version for an op by hashing its source code.\n\n Args:\n context (OpVersionContext): The context for computing the version.\n\n Returns:\n str: The version for the op.\n """\n compute_fn = context.op_def.compute_fn\n if callable(compute_fn):\n return self._get_source_hash(compute_fn)\n else:\n return self._get_source_hash(compute_fn.decorated_fn)
\n\n
[docs] @public\n def get_resource_version(self, context: ResourceVersionContext) -> Optional[str]:\n """Computes a version for a resource by hashing its source code.\n\n Args:\n context (ResourceVersionContext): The context for computing the version.\n\n Returns:\n Optional[str]: The version for the resource. If None, the resource will not be\n memoized.\n """\n return self._get_source_hash(context.resource_def.resource_fn)
\n
", "current_page_name": "_modules/dagster/_core/definitions/version_strategy", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.definitions.version_strategy"}}, "errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.errors

\n"""Core Dagster error classes.\n\nAll errors thrown by the Dagster framework inherit from :py:class:`~dagster.DagsterError`. Users\nshould not subclass this base class for their own exceptions.\n\nThere is another exception base class, :py:class:`~dagster.DagsterUserCodeExecutionError`, which is\nused by the framework in concert with the :py:func:`~dagster._core.errors.user_code_error_boundary`.\n\nDagster uses this construct to wrap user code into which it calls. User code can perform arbitrary\ncomputations and may itself throw exceptions. The error boundary catches these user code-generated\nexceptions, and then reraises them wrapped in a subclass of\n:py:class:`~dagster.DagsterUserCodeExecutionError`.\n\nThe wrapped exceptions include additional context for the original exceptions, injected by the\nDagster runtime.\n"""\n\nimport sys\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Type\n\nimport dagster._check as check\nfrom dagster._utils.interrupts import raise_interrupts_as\n\nif TYPE_CHECKING:\n    from dagster._core.log_manager import DagsterLogManager\n\n\nclass DagsterExecutionInterruptedError(BaseException):\n    """Pipeline execution was interrupted during the execution process.\n\n    Just like KeyboardInterrupt this inherits from BaseException\n    as to not be accidentally caught by code that catches Exception\n    and thus prevent the interpreter from exiting.\n    """\n\n\n
[docs]class DagsterError(Exception):\n """Base class for all errors thrown by the Dagster framework.\n\n Users should not subclass this base class for their own exceptions.\n """\n\n @property\n def is_user_code_error(self):\n """Returns true if this error is attributable to user code."""\n return False
\n\n\n
[docs]class DagsterInvalidDefinitionError(DagsterError):\n """Indicates that the rules for a definition have been violated by the user."""
\n\n\nclass DagsterInvalidObservationError(DagsterError):\n """Indicates that an invalid value was returned from a source asset observation function."""\n\n\n
[docs]class DagsterInvalidSubsetError(DagsterError):\n """Indicates that a subset of a pipeline is invalid because either:\n - One or more ops in the specified subset do not exist on the job.'\n - The subset produces an invalid job.\n """
\n\n\nclass DagsterInvalidDeserializationVersionError(DagsterError):\n """Indicates that a serialized value has an unsupported version and cannot be deserialized."""\n\n\nPYTHONIC_CONFIG_ERROR_VERBIAGE = """\nThis config type can be a:\n - Python primitive type\n - int, float, bool, str, list\n - A Python Dict or List type containing other valid types\n - Custom data classes extending dagster.Config\n - A Pydantic discriminated union type (https://docs.pydantic.dev/usage/types/#discriminated-unions-aka-tagged-unions)\n"""\n\nPYTHONIC_RESOURCE_ADDITIONAL_TYPES = """\n\nIf this config type represents a resource dependency, its annotation must either:\n - Extend dagster.ConfigurableResource, dagster.ConfigurableIOManager, or\n - Be wrapped in a ResourceDependency annotation, e.g. ResourceDependency[{invalid_type_str}]\n"""\n\n\ndef _generate_pythonic_config_error_message(\n config_class: Optional[Type],\n field_name: Optional[str],\n invalid_type: Any,\n is_resource: bool = False,\n) -> str:\n invalid_type_name = getattr(invalid_type, "__name__", "<my type>")\n pythonic_config_error_verbiage = (\n PYTHONIC_CONFIG_ERROR_VERBIAGE + (PYTHONIC_RESOURCE_ADDITIONAL_TYPES if is_resource else "")\n ).format(invalid_type_str=invalid_type_name)\n\n return ("""\nError defining Dagster config class{config_class}{field_name}.\nUnable to resolve config type {invalid_type} to a supported Dagster config type.\n\n{PYTHONIC_CONFIG_ERROR_VERBIAGE}""").format(\n config_class=f" {config_class!r}" if config_class else "",\n field_name=f" on field '{field_name}'" if field_name else "",\n invalid_type=repr(invalid_type),\n PYTHONIC_CONFIG_ERROR_VERBIAGE=pythonic_config_error_verbiage,\n )\n\n\nclass DagsterInvalidPythonicConfigDefinitionError(DagsterError):\n """Indicates that you have attempted to construct a Pythonic config or resource class with an invalid value."""\n\n def __init__(\n self,\n config_class: Optional[Type],\n field_name: Optional[str],\n invalid_type: Any,\n is_resource: bool = False,\n **kwargs,\n ):\n self.invalid_type = invalid_type\n self.field_name = field_name\n self.config_class = config_class\n super(DagsterInvalidPythonicConfigDefinitionError, self).__init__(\n _generate_pythonic_config_error_message(\n config_class=config_class,\n field_name=field_name,\n invalid_type=invalid_type,\n is_resource=is_resource,\n ),\n **kwargs,\n )\n\n\nclass DagsterInvalidDagsterTypeInPythonicConfigDefinitionError(DagsterError):\n """Indicates that you have attempted to construct a Pythonic config or resource class with a DagsterType\n annotated field.\n """\n\n def __init__(\n self,\n config_class_name: str,\n field_name: Optional[str],\n **kwargs,\n ):\n self.field_name = field_name\n super(DagsterInvalidDagsterTypeInPythonicConfigDefinitionError, self).__init__(\n f"""Error defining Dagster config class '{config_class_name}' on field '{field_name}'. DagsterTypes cannot be used to annotate a config type. DagsterType is meant only for type checking and coercion in op and asset inputs and outputs.\n{PYTHONIC_CONFIG_ERROR_VERBIAGE}""",\n **kwargs,\n )\n\n\nCONFIG_ERROR_VERBIAGE = """\nThis value can be a:\n - Field\n - Python primitive types that resolve to dagster config types\n - int, float, bool, str, list.\n - A dagster config type: Int, Float, Bool, Array, Optional, Selector, Shape, Permissive, Map\n - A bare python dictionary, which is wrapped in Field(Shape(...)). Any values\n in the dictionary get resolved by the same rules, recursively.\n - A python list with a single entry that can resolve to a type, e.g. [int]\n"""\n\n\n
[docs]class DagsterInvalidConfigDefinitionError(DagsterError):\n """Indicates that you have attempted to construct a config with an invalid value.\n\n Acceptable values for config types are any of:\n 1. A Python primitive type that resolves to a Dagster config type\n (:py:class:`~python:int`, :py:class:`~python:float`, :py:class:`~python:bool`,\n :py:class:`~python:str`, or :py:class:`~python:list`).\n\n 2. A Dagster config type: :py:data:`~dagster.Int`, :py:data:`~dagster.Float`,\n :py:data:`~dagster.Bool`, :py:data:`~dagster.String`,\n :py:data:`~dagster.StringSource`, :py:data:`~dagster.Any`,\n :py:class:`~dagster.Array`, :py:data:`~dagster.Noneable`, :py:data:`~dagster.Enum`,\n :py:class:`~dagster.Selector`, :py:class:`~dagster.Shape`, or\n :py:class:`~dagster.Permissive`.\n\n 3. A bare python dictionary, which will be automatically wrapped in\n :py:class:`~dagster.Shape`. Values of the dictionary are resolved recursively\n according to the same rules.\n\n 4. A bare python list of length one which itself is config type.\n Becomes :py:class:`Array` with list element as an argument.\n\n 5. An instance of :py:class:`~dagster.Field`.\n """\n\n def __init__(self, original_root, current_value, stack, reason=None, **kwargs):\n self.original_root = original_root\n self.current_value = current_value\n self.stack = stack\n super(DagsterInvalidConfigDefinitionError, self).__init__(\n (\n "Error defining config. Original value passed: {original_root}. "\n "{stack_str}{current_value} "\n "cannot be resolved.{reason_str}"\n + CONFIG_ERROR_VERBIAGE\n ).format(\n original_root=repr(original_root),\n stack_str="Error at stack path :" + ":".join(stack) + ". " if stack else "",\n current_value=repr(current_value),\n reason_str=f" Reason: {reason}." if reason else "",\n ),\n **kwargs,\n )
\n\n\n
[docs]class DagsterInvariantViolationError(DagsterError):\n """Indicates the user has violated a well-defined invariant that can only be enforced\n at runtime.\n """
\n\n\n
[docs]class DagsterExecutionStepNotFoundError(DagsterError):\n """Thrown when the user specifies execution step keys that do not exist."""\n\n def __init__(self, *args, **kwargs):\n self.step_keys = check.list_param(kwargs.pop("step_keys"), "step_keys", str)\n super(DagsterExecutionStepNotFoundError, self).__init__(*args, **kwargs)
\n\n\nclass DagsterExecutionPlanSnapshotNotFoundError(DagsterError):\n """Thrown when an expected execution plan snapshot could not be found on a PipelineRun."""\n\n\n
[docs]class DagsterRunNotFoundError(DagsterError):\n """Thrown when a run cannot be found in run storage."""\n\n def __init__(self, *args, **kwargs):\n self.invalid_run_id = check.str_param(kwargs.pop("invalid_run_id"), "invalid_run_id")\n super(DagsterRunNotFoundError, self).__init__(*args, **kwargs)
\n\n\n
[docs]class DagsterStepOutputNotFoundError(DagsterError):\n """Indicates that previous step outputs required for an execution step to proceed are not\n available.\n """\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.output_name = check.str_param(kwargs.pop("output_name"), "output_name")\n super(DagsterStepOutputNotFoundError, self).__init__(*args, **kwargs)
\n\n\n@contextmanager\ndef raise_execution_interrupts() -> Iterator[None]:\n with raise_interrupts_as(DagsterExecutionInterruptedError):\n yield\n\n\n
[docs]@contextmanager\ndef user_code_error_boundary(\n error_cls: Type["DagsterUserCodeExecutionError"],\n msg_fn: Callable[[], str],\n log_manager: Optional["DagsterLogManager"] = None,\n **kwargs: object,\n) -> Iterator[None]:\n """Wraps the execution of user-space code in an error boundary. This places a uniform\n policy around any user code invoked by the framework. This ensures that all user\n errors are wrapped in an exception derived from DagsterUserCodeExecutionError,\n and that the original stack trace of the user error is preserved, so that it\n can be reported without confusing framework code in the stack trace, if a\n tool author wishes to do so.\n\n Examples:\n .. code-block:: python\n\n with user_code_error_boundary(\n # Pass a class that inherits from DagsterUserCodeExecutionError\n DagsterExecutionStepExecutionError,\n # Pass a function that produces a message\n "Error occurred during step execution"\n ):\n call_user_provided_function()\n\n """\n check.callable_param(msg_fn, "msg_fn")\n check.class_param(error_cls, "error_cls", superclass=DagsterUserCodeExecutionError)\n\n with raise_execution_interrupts():\n if log_manager:\n log_manager.begin_python_log_capture()\n try:\n yield\n except DagsterError as de:\n # The system has thrown an error that is part of the user-framework contract\n raise de\n except Exception as e:\n # An exception has been thrown by user code and computation should cease\n # with the error reported further up the stack\n raise error_cls(\n msg_fn(), user_exception=e, original_exc_info=sys.exc_info(), **kwargs\n ) from e\n finally:\n if log_manager:\n log_manager.end_python_log_capture()
\n\n\n
[docs]class DagsterUserCodeExecutionError(DagsterError):\n """This is the base class for any exception that is meant to wrap an\n :py:class:`~python:Exception` thrown by user code. It wraps that existing user code.\n The ``original_exc_info`` argument to the constructor is meant to be a tuple of the type\n returned by :py:func:`sys.exc_info <python:sys.exc_info>` at the call site of the constructor.\n\n Users should not subclass this base class for their own exceptions and should instead throw\n freely from user code. User exceptions will be automatically wrapped and rethrown.\n """\n\n def __init__(self, *args, **kwargs):\n # original_exc_info should be gotten from a sys.exc_info() call at the\n # callsite inside of the exception handler. this will allow consuming\n # code to *re-raise* the user error in it's original format\n # for cleaner error reporting that does not have framework code in it\n user_exception = check.inst_param(kwargs.pop("user_exception"), "user_exception", Exception)\n original_exc_info = check.tuple_param(kwargs.pop("original_exc_info"), "original_exc_info")\n\n check.invariant(original_exc_info[0] is not None)\n\n super(DagsterUserCodeExecutionError, self).__init__(args[0], *args[1:], **kwargs)\n\n self.user_exception = check.opt_inst_param(user_exception, "user_exception", Exception)\n self.original_exc_info = original_exc_info\n\n @property\n def is_user_code_error(self) -> bool:\n return True
\n\n\n
[docs]class DagsterTypeCheckError(DagsterUserCodeExecutionError):\n """Indicates an error in the op type system at runtime. E.g. a op receives an\n unexpected input, or produces an output that does not match the type of the output definition.\n """
\n\n\nclass DagsterExecutionLoadInputError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while loading an input for a step."""\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.input_name = check.str_param(kwargs.pop("input_name"), "input_name")\n super(DagsterExecutionLoadInputError, self).__init__(*args, **kwargs)\n\n\nclass DagsterExecutionHandleOutputError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while handling an output for a step."""\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.output_name = check.str_param(kwargs.pop("output_name"), "output_name")\n super(DagsterExecutionHandleOutputError, self).__init__(*args, **kwargs)\n\n\n
[docs]class DagsterExecutionStepExecutionError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while executing the body of an execution step."""\n\n def __init__(self, *args, **kwargs):\n self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")\n self.op_name = check.str_param(kwargs.pop("op_name"), "op_name")\n self.op_def_name = check.str_param(kwargs.pop("op_def_name"), "op_def_name")\n super(DagsterExecutionStepExecutionError, self).__init__(*args, **kwargs)
\n\n\n
[docs]class DagsterResourceFunctionError(DagsterUserCodeExecutionError):\n """Indicates an error occurred while executing the body of the ``resource_fn`` in a\n :py:class:`~dagster.ResourceDefinition` during resource initialization.\n """
\n\n\n
[docs]class DagsterConfigMappingFunctionError(DagsterUserCodeExecutionError):\n """Indicates that an unexpected error occurred while executing the body of a config mapping\n function defined in a :py:class:`~dagster.JobDefinition` or `~dagster.GraphDefinition` during\n config parsing.\n """
\n\n\nclass DagsterTypeLoadingError(DagsterUserCodeExecutionError):\n """Indicates that an unexpected error occurred while executing the body of an type load\n function defined in a :py:class:`~dagster.DagsterTypeLoader` during loading of a custom type.\n """\n\n\n
[docs]class DagsterUnknownResourceError(DagsterError, AttributeError):\n # inherits from AttributeError as it is raised within a __getattr__ call... used to support\n # object hasattr method\n """Indicates that an unknown resource was accessed in the body of an execution step. May often\n happen by accessing a resource in the compute function of an op without first supplying the\n op with the correct `required_resource_keys` argument.\n """\n\n def __init__(self, resource_name, *args, **kwargs):\n self.resource_name = check.str_param(resource_name, "resource_name")\n msg = (\n f"Unknown resource `{resource_name}`. Specify `{resource_name}` as a required resource "\n "on the compute / config function that accessed it."\n )\n super(DagsterUnknownResourceError, self).__init__(msg, *args, **kwargs)
\n\n\nclass DagsterInvalidInvocationError(DagsterError):\n """Indicates that an error has occurred when an op has been invoked, but before the actual\n core compute has been reached.\n """\n\n\n
[docs]class DagsterInvalidConfigError(DagsterError):\n """Thrown when provided config is invalid (does not type check against the relevant config\n schema).\n """\n\n def __init__(self, preamble, errors, config_value, *args, **kwargs):\n from dagster._config import EvaluationError\n\n check.str_param(preamble, "preamble")\n self.errors = check.list_param(errors, "errors", of_type=EvaluationError)\n self.config_value = config_value\n\n error_msg = preamble\n error_messages = []\n\n for i_error, error in enumerate(self.errors):\n error_messages.append(error.message)\n error_msg += f"\\n Error {i_error + 1}: {error.message}"\n\n self.message = error_msg\n self.error_messages = error_messages\n\n super(DagsterInvalidConfigError, self).__init__(error_msg, *args, **kwargs)
\n\n\n
[docs]class DagsterUnmetExecutorRequirementsError(DagsterError):\n """Indicates the resolved executor is incompatible with the state of other systems\n such as the :py:class:`~dagster._core.instance.DagsterInstance` or system storage configuration.\n """
\n\n\n
[docs]class DagsterSubprocessError(DagsterError):\n """An exception has occurred in one or more of the child processes dagster manages.\n This error forwards the message and stack trace for all of the collected errors.\n """\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.subprocess_error_infos = check.list_param(\n kwargs.pop("subprocess_error_infos"), "subprocess_error_infos", SerializableErrorInfo\n )\n super(DagsterSubprocessError, self).__init__(*args, **kwargs)
\n\n\nclass DagsterUserCodeUnreachableError(DagsterError):\n """Dagster was unable to reach a user code server to fetch information about user code."""\n\n\nclass DagsterUserCodeProcessError(DagsterError):\n """An exception has occurred in a user code process that the host process raising this error\n was communicating with.\n """\n\n @staticmethod\n def from_error_info(error_info):\n from dagster._utils.error import SerializableErrorInfo\n\n check.inst_param(error_info, "error_info", SerializableErrorInfo)\n return DagsterUserCodeProcessError(\n error_info.to_string(), user_code_process_error_infos=[error_info]\n )\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.user_code_process_error_infos = check.list_param(\n kwargs.pop("user_code_process_error_infos"),\n "user_code_process_error_infos",\n SerializableErrorInfo,\n )\n super(DagsterUserCodeProcessError, self).__init__(*args, **kwargs)\n\n\nclass DagsterMaxRetriesExceededError(DagsterError):\n """Raised when raise_on_error is true, and retries were exceeded, this error should be raised."""\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.user_code_process_error_infos = check.list_param(\n kwargs.pop("user_code_process_error_infos"),\n "user_code_process_error_infos",\n SerializableErrorInfo,\n )\n super(DagsterMaxRetriesExceededError, self).__init__(*args, **kwargs)\n\n @staticmethod\n def from_error_info(error_info):\n from dagster._utils.error import SerializableErrorInfo\n\n check.inst_param(error_info, "error_info", SerializableErrorInfo)\n return DagsterMaxRetriesExceededError(\n error_info.to_string(), user_code_process_error_infos=[error_info]\n )\n\n\nclass DagsterCodeLocationNotFoundError(DagsterError):\n pass\n\n\nclass DagsterCodeLocationLoadError(DagsterError):\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.load_error_infos = check.list_param(\n kwargs.pop("load_error_infos"),\n "load_error_infos",\n SerializableErrorInfo,\n )\n super(DagsterCodeLocationLoadError, self).__init__(*args, **kwargs)\n\n\nclass DagsterLaunchFailedError(DagsterError):\n """Indicates an error while attempting to launch a pipeline run."""\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.serializable_error_info = check.opt_inst_param(\n kwargs.pop("serializable_error_info", None),\n "serializable_error_info",\n SerializableErrorInfo,\n )\n super(DagsterLaunchFailedError, self).__init__(*args, **kwargs)\n\n\nclass DagsterBackfillFailedError(DagsterError):\n """Indicates an error while attempting to launch a backfill."""\n\n def __init__(self, *args, **kwargs):\n from dagster._utils.error import SerializableErrorInfo\n\n self.serializable_error_info = check.opt_inst_param(\n kwargs.pop("serializable_error_info", None),\n "serializable_error_info",\n SerializableErrorInfo,\n )\n super(DagsterBackfillFailedError, self).__init__(*args, **kwargs)\n\n\nclass DagsterRunAlreadyExists(DagsterError):\n """Indicates that a pipeline run already exists in a run storage."""\n\n\nclass DagsterSnapshotDoesNotExist(DagsterError):\n """Indicates you attempted to create a pipeline run with a nonexistent snapshot id."""\n\n\nclass DagsterRunConflict(DagsterError):\n """Indicates that a conflicting pipeline run exists in a run storage."""\n\n\n
[docs]class DagsterTypeCheckDidNotPass(DagsterError):\n """Indicates that a type check failed.\n\n This is raised when ``raise_on_error`` is ``True`` in calls to the synchronous job and\n graph execution APIs (e.g. `graph.execute_in_process()`, `job.execute_in_process()` -- typically\n within a test), and a :py:class:`~dagster.DagsterType`'s type check fails by returning either\n ``False`` or an instance of :py:class:`~dagster.TypeCheck` whose ``success`` member is ``False``.\n """\n\n def __init__(self, description=None, metadata=None, dagster_type=None):\n from dagster import DagsterType\n from dagster._core.definitions.metadata import normalize_metadata\n\n super(DagsterTypeCheckDidNotPass, self).__init__(description)\n self.description = check.opt_str_param(description, "description")\n self.metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n )\n self.dagster_type = check.opt_inst_param(dagster_type, "dagster_type", DagsterType)
\n\n\nclass DagsterAssetCheckFailedError(DagsterError):\n """Indicates than an asset check failed."""\n\n\n
[docs]class DagsterEventLogInvalidForRun(DagsterError):\n """Raised when the event logs for a historical run are malformed or invalid."""\n\n def __init__(self, run_id):\n self.run_id = check.str_param(run_id, "run_id")\n super(DagsterEventLogInvalidForRun, self).__init__(\n f"Event logs invalid for run id {run_id}"\n )
\n\n\nclass ScheduleExecutionError(DagsterUserCodeExecutionError):\n """Errors raised in a user process during the execution of schedule."""\n\n\nclass SensorExecutionError(DagsterUserCodeExecutionError):\n """Errors raised in a user process during the execution of a sensor (or its job)."""\n\n\nclass PartitionExecutionError(DagsterUserCodeExecutionError):\n """Errors raised during the execution of user-provided functions of a partition set schedule."""\n\n\nclass DagsterInvalidAssetKey(DagsterError):\n """Error raised by invalid asset key."""\n\n\nclass DagsterInvalidMetadata(DagsterError):\n """Error raised by invalid metadata parameters."""\n\n\nclass HookExecutionError(DagsterUserCodeExecutionError):\n """Error raised during the execution of a user-defined hook."""\n\n\nclass RunStatusSensorExecutionError(DagsterUserCodeExecutionError):\n """Error raised during the execution of a user-defined run status sensor."""\n\n\nclass FreshnessPolicySensorExecutionError(DagsterUserCodeExecutionError):\n """Error raised during the execution of a user-defined freshness policy sensor."""\n\n\nclass DagsterImportError(DagsterError):\n """Import error raised while importing user-code."""\n\n\nclass JobError(DagsterUserCodeExecutionError):\n """Errors raised during the execution of user-provided functions for a defined Job."""\n\n\nclass DagsterUnknownStepStateError(DagsterError):\n """When job execution completes with steps in an unknown state."""\n\n\nclass DagsterObjectStoreError(DagsterError):\n """Errors during an object store operation."""\n\n\nclass DagsterInvalidPropertyError(DagsterError):\n """Indicates that an invalid property was accessed. May often happen by accessing a property\n that no longer exists after breaking changes.\n """\n\n\nclass DagsterHomeNotSetError(DagsterError):\n """The user has tried to use a command that requires an instance or invoke DagsterInstance.get()\n without setting DAGSTER_HOME env var.\n """\n\n\nclass DagsterUnknownPartitionError(DagsterError):\n """The user has tried to access run config for a partition name that does not exist."""\n\n\nclass DagsterUndefinedDataVersionError(DagsterError):\n """The user attempted to retrieve the most recent logical version for an asset, but no logical version is defined."""\n\n\nclass DagsterAssetBackfillDataLoadError(DagsterError):\n """Indicates that an asset backfill is now unloadable. May happen when (1) a code location containing\n targeted assets is unloadable or (2) and asset or an asset's partitions definition has been removed.\n """\n\n\nclass DagsterDefinitionChangedDeserializationError(DagsterError):\n """Indicates that a stored value can't be deserialized because the definition needed to interpret\n it has changed.\n """\n\n\nclass DagsterPipesExecutionError(DagsterError):\n """Indicates that an error occurred during the execution of an external process."""\n
", "current_page_name": "_modules/dagster/_core/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.errors"}, "event_api": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.event_api

\nfrom datetime import datetime\nfrom typing import Callable, Mapping, NamedTuple, Optional, Sequence, Union\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization, AssetObservation\nfrom dagster._core.errors import DagsterInvalidInvocationError\nfrom dagster._core.events import DagsterEventType\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._serdes import whitelist_for_serdes\n\nEventHandlerFn: TypeAlias = Callable[[EventLogEntry, str], None]\n\n\n
[docs]class RunShardedEventsCursor(NamedTuple):\n """Pairs an id-based event log cursor with a timestamp-based run cursor, for improved\n performance on run-sharded event log storages (e.g. the default SqliteEventLogStorage). For\n run-sharded storages, the id field is ignored, since they may not be unique across shards.\n """\n\n id: int\n run_updated_after: datetime
\n\n\n
[docs]@whitelist_for_serdes\nclass EventLogRecord(NamedTuple):\n """Internal representation of an event record, as stored in a\n :py:class:`~dagster._core.storage.event_log.EventLogStorage`.\n\n Users should not instantiate this class directly.\n """\n\n storage_id: PublicAttr[int]\n event_log_entry: PublicAttr[EventLogEntry]\n\n @property\n def run_id(self) -> str:\n return self.event_log_entry.run_id\n\n @property\n def timestamp(self) -> float:\n return self.event_log_entry.timestamp\n\n @property\n def asset_key(self) -> Optional[AssetKey]:\n dagster_event = self.event_log_entry.dagster_event\n if dagster_event:\n return dagster_event.asset_key\n\n return None\n\n @property\n def partition_key(self) -> Optional[str]:\n dagster_event = self.event_log_entry.dagster_event\n if dagster_event:\n return dagster_event.partition\n\n return None\n\n @property\n def asset_materialization(self) -> Optional[AssetMaterialization]:\n return self.event_log_entry.asset_materialization\n\n @property\n def asset_observation(self) -> Optional[AssetObservation]:\n return self.event_log_entry.asset_observation
\n\n\n
[docs]@whitelist_for_serdes\nclass EventRecordsFilter(\n NamedTuple(\n "_EventRecordsFilter",\n [\n ("event_type", DagsterEventType),\n ("asset_key", Optional[AssetKey]),\n ("asset_partitions", Optional[Sequence[str]]),\n ("after_cursor", Optional[Union[int, RunShardedEventsCursor]]),\n ("before_cursor", Optional[Union[int, RunShardedEventsCursor]]),\n ("after_timestamp", Optional[float]),\n ("before_timestamp", Optional[float]),\n ("storage_ids", Optional[Sequence[int]]),\n ("tags", Optional[Mapping[str, Union[str, Sequence[str]]]]),\n ],\n )\n):\n """Defines a set of filter fields for fetching a set of event log entries or event log records.\n\n Args:\n event_type (DagsterEventType): Filter argument for dagster event type\n asset_key (Optional[AssetKey]): Asset key for which to get asset materialization event\n entries / records.\n asset_partitions (Optional[List[str]]): Filter parameter such that only asset\n events with a partition value matching one of the provided values. Only\n valid when the `asset_key` parameter is provided.\n after_cursor (Optional[Union[int, RunShardedEventsCursor]]): Filter parameter such that only\n records with storage_id greater than the provided value are returned. Using a\n run-sharded events cursor will result in a significant performance gain when run against\n a SqliteEventLogStorage implementation (which is run-sharded)\n before_cursor (Optional[Union[int, RunShardedEventsCursor]]): Filter parameter such that\n records with storage_id less than the provided value are returned. Using a run-sharded\n events cursor will result in a significant performance gain when run against\n a SqliteEventLogStorage implementation (which is run-sharded)\n after_timestamp (Optional[float]): Filter parameter such that only event records for\n events with timestamp greater than the provided value are returned.\n before_timestamp (Optional[float]): Filter parameter such that only event records for\n events with timestamp less than the provided value are returned.\n """\n\n def __new__(\n cls,\n event_type: DagsterEventType,\n asset_key: Optional[AssetKey] = None,\n asset_partitions: Optional[Sequence[str]] = None,\n after_cursor: Optional[Union[int, RunShardedEventsCursor]] = None,\n before_cursor: Optional[Union[int, RunShardedEventsCursor]] = None,\n after_timestamp: Optional[float] = None,\n before_timestamp: Optional[float] = None,\n storage_ids: Optional[Sequence[int]] = None,\n tags: Optional[Mapping[str, Union[str, Sequence[str]]]] = None,\n ):\n check.opt_sequence_param(asset_partitions, "asset_partitions", of_type=str)\n check.inst_param(event_type, "event_type", DagsterEventType)\n\n tags = check.opt_mapping_param(tags, "tags", key_type=str)\n if tags and event_type is not DagsterEventType.ASSET_MATERIALIZATION:\n raise DagsterInvalidInvocationError(\n "Can only filter by tags for asset materialization events"\n )\n\n # type-ignores work around mypy type inference bug\n return super(EventRecordsFilter, cls).__new__(\n cls,\n event_type=event_type,\n asset_key=check.opt_inst_param(asset_key, "asset_key", AssetKey),\n asset_partitions=asset_partitions,\n after_cursor=check.opt_inst_param(\n after_cursor, "after_cursor", (int, RunShardedEventsCursor)\n ),\n before_cursor=check.opt_inst_param(\n before_cursor, "before_cursor", (int, RunShardedEventsCursor)\n ),\n after_timestamp=check.opt_float_param(after_timestamp, "after_timestamp"),\n before_timestamp=check.opt_float_param(before_timestamp, "before_timestamp"),\n storage_ids=check.opt_nullable_sequence_param(storage_ids, "storage_ids", of_type=int),\n tags=check.opt_mapping_param(tags, "tags", key_type=str),\n )
\n
", "current_page_name": "_modules/dagster/_core/event_api", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.event_api"}, "events": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.events

\n"""Structured representations of system events."""\nimport logging\nimport os\nimport sys\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    HookDefinition,\n    NodeHandle,\n)\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationPlanned,\n)\nfrom dagster._core.definitions.events import AssetLineageInfo, ObjectStoreOperationType\nfrom dagster._core.definitions.metadata import (\n    MetadataFieldSerializer,\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import HookExecutionError\nfrom dagster._core.execution.context.system import IPlanContext, IStepContext, StepExecutionContext\nfrom dagster._core.execution.plan.handle import ResolvedFromDynamicStepHandle, StepHandle\nfrom dagster._core.execution.plan.inputs import StepInputData\nfrom dagster._core.execution.plan.objects import StepFailureData, StepRetryData, StepSuccessData\nfrom dagster._core.execution.plan.outputs import StepOutputData\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.captured_log_manager import CapturedLogContext\nfrom dagster._core.storage.dagster_run import DagsterRunStatus\nfrom dagster._serdes import (\n    NamedTupleSerializer,\n    whitelist_for_serdes,\n)\nfrom dagster._serdes.serdes import UnpackContext\nfrom dagster._utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info\nfrom dagster._utils.timing import format_duration\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.events import ObjectStoreOperation\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.execution.plan.step import StepKind\n\n\nEventSpecificData = Union[\n    StepOutputData,\n    StepFailureData,\n    StepSuccessData,\n    "StepMaterializationData",\n    "StepExpectationResultData",\n    StepInputData,\n    "EngineEventData",\n    "HookErroredData",\n    StepRetryData,\n    "JobFailureData",\n    "JobCanceledData",\n    "ObjectStoreOperationResultData",\n    "HandledOutputData",\n    "LoadedInputData",\n    "ComputeLogsCaptureData",\n    "AssetObservationData",\n    "AssetMaterializationPlannedData",\n    "AssetCheckEvaluation",\n    "AssetCheckEvaluationPlanned",\n]\n\n\n
[docs]class DagsterEventType(str, Enum):\n """The types of events that may be yielded by op and job execution."""\n\n STEP_OUTPUT = "STEP_OUTPUT"\n STEP_INPUT = "STEP_INPUT"\n STEP_FAILURE = "STEP_FAILURE"\n STEP_START = "STEP_START"\n STEP_SUCCESS = "STEP_SUCCESS"\n STEP_SKIPPED = "STEP_SKIPPED"\n\n # The process carrying out step execution is starting/started. Shown as a\n # marker start/end in the Dagster UI.\n STEP_WORKER_STARTING = "STEP_WORKER_STARTING"\n STEP_WORKER_STARTED = "STEP_WORKER_STARTED"\n\n # Resource initialization for execution has started/succeede/failed. Shown\n # as a marker start/end in the Dagster UI.\n RESOURCE_INIT_STARTED = "RESOURCE_INIT_STARTED"\n RESOURCE_INIT_SUCCESS = "RESOURCE_INIT_SUCCESS"\n RESOURCE_INIT_FAILURE = "RESOURCE_INIT_FAILURE"\n\n STEP_UP_FOR_RETRY = "STEP_UP_FOR_RETRY" # "failed" but want to retry\n STEP_RESTARTED = "STEP_RESTARTED"\n\n ASSET_MATERIALIZATION = "ASSET_MATERIALIZATION"\n ASSET_MATERIALIZATION_PLANNED = "ASSET_MATERIALIZATION_PLANNED"\n ASSET_OBSERVATION = "ASSET_OBSERVATION"\n STEP_EXPECTATION_RESULT = "STEP_EXPECTATION_RESULT"\n ASSET_CHECK_EVALUATION_PLANNED = "ASSET_CHECK_EVALUATION_PLANNED"\n ASSET_CHECK_EVALUATION = "ASSET_CHECK_EVALUATION"\n\n # We want to display RUN_* events in the Dagster UI and in our LogManager output, but in order to\n # support backcompat for our storage layer, we need to keep the persisted value to be strings\n # of the form "PIPELINE_*". We may have user code that pass in the DagsterEventType\n # enum values into storage APIs (like get_event_records, which takes in an EventRecordsFilter).\n RUN_ENQUEUED = "PIPELINE_ENQUEUED"\n RUN_DEQUEUED = "PIPELINE_DEQUEUED"\n RUN_STARTING = "PIPELINE_STARTING" # Launch is happening, execution hasn't started yet\n RUN_START = "PIPELINE_START" # Execution has started\n RUN_SUCCESS = "PIPELINE_SUCCESS"\n RUN_FAILURE = "PIPELINE_FAILURE"\n RUN_CANCELING = "PIPELINE_CANCELING"\n RUN_CANCELED = "PIPELINE_CANCELED"\n\n # Keep these legacy enum values around, to keep back-compatability for user code that might be\n # using these constants to filter event records\n PIPELINE_ENQUEUED = RUN_ENQUEUED\n PIPELINE_DEQUEUED = RUN_DEQUEUED\n PIPELINE_STARTING = RUN_STARTING\n PIPELINE_START = RUN_START\n PIPELINE_SUCCESS = RUN_SUCCESS\n PIPELINE_FAILURE = RUN_FAILURE\n PIPELINE_CANCELING = RUN_CANCELING\n PIPELINE_CANCELED = RUN_CANCELED\n\n OBJECT_STORE_OPERATION = "OBJECT_STORE_OPERATION"\n ASSET_STORE_OPERATION = "ASSET_STORE_OPERATION"\n LOADED_INPUT = "LOADED_INPUT"\n HANDLED_OUTPUT = "HANDLED_OUTPUT"\n\n ENGINE_EVENT = "ENGINE_EVENT"\n\n HOOK_COMPLETED = "HOOK_COMPLETED"\n HOOK_ERRORED = "HOOK_ERRORED"\n HOOK_SKIPPED = "HOOK_SKIPPED"\n\n ALERT_START = "ALERT_START"\n ALERT_SUCCESS = "ALERT_SUCCESS"\n ALERT_FAILURE = "ALERT_FAILURE"\n\n LOGS_CAPTURED = "LOGS_CAPTURED"
\n\n\nEVENT_TYPE_VALUE_TO_DISPLAY_STRING = {\n "PIPELINE_ENQUEUED": "RUN_ENQUEUED",\n "PIPELINE_DEQUEUED": "RUN_DEQUEUED",\n "PIPELINE_STARTING": "RUN_STARTING",\n "PIPELINE_START": "RUN_START",\n "PIPELINE_SUCCESS": "RUN_SUCCESS",\n "PIPELINE_FAILURE": "RUN_FAILURE",\n "PIPELINE_CANCELING": "RUN_CANCELING",\n "PIPELINE_CANCELED": "RUN_CANCELED",\n}\n\nSTEP_EVENTS = {\n DagsterEventType.STEP_INPUT,\n DagsterEventType.STEP_START,\n DagsterEventType.STEP_OUTPUT,\n DagsterEventType.STEP_FAILURE,\n DagsterEventType.STEP_SUCCESS,\n DagsterEventType.STEP_SKIPPED,\n DagsterEventType.ASSET_MATERIALIZATION,\n DagsterEventType.ASSET_OBSERVATION,\n DagsterEventType.STEP_EXPECTATION_RESULT,\n DagsterEventType.ASSET_CHECK_EVALUATION,\n DagsterEventType.OBJECT_STORE_OPERATION,\n DagsterEventType.HANDLED_OUTPUT,\n DagsterEventType.LOADED_INPUT,\n DagsterEventType.STEP_RESTARTED,\n DagsterEventType.STEP_UP_FOR_RETRY,\n}\n\nFAILURE_EVENTS = {\n DagsterEventType.RUN_FAILURE,\n DagsterEventType.STEP_FAILURE,\n DagsterEventType.RUN_CANCELED,\n}\n\nPIPELINE_EVENTS = {\n DagsterEventType.RUN_ENQUEUED,\n DagsterEventType.RUN_DEQUEUED,\n DagsterEventType.RUN_STARTING,\n DagsterEventType.RUN_START,\n DagsterEventType.RUN_SUCCESS,\n DagsterEventType.RUN_FAILURE,\n DagsterEventType.RUN_CANCELING,\n DagsterEventType.RUN_CANCELED,\n}\n\nHOOK_EVENTS = {\n DagsterEventType.HOOK_COMPLETED,\n DagsterEventType.HOOK_ERRORED,\n DagsterEventType.HOOK_SKIPPED,\n}\n\nALERT_EVENTS = {\n DagsterEventType.ALERT_START,\n DagsterEventType.ALERT_SUCCESS,\n DagsterEventType.ALERT_FAILURE,\n}\n\nMARKER_EVENTS = {\n DagsterEventType.ENGINE_EVENT,\n DagsterEventType.STEP_WORKER_STARTING,\n DagsterEventType.STEP_WORKER_STARTED,\n DagsterEventType.RESOURCE_INIT_STARTED,\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n DagsterEventType.RESOURCE_INIT_FAILURE,\n}\n\n\nEVENT_TYPE_TO_PIPELINE_RUN_STATUS = {\n DagsterEventType.RUN_START: DagsterRunStatus.STARTED,\n DagsterEventType.RUN_SUCCESS: DagsterRunStatus.SUCCESS,\n DagsterEventType.RUN_FAILURE: DagsterRunStatus.FAILURE,\n DagsterEventType.RUN_ENQUEUED: DagsterRunStatus.QUEUED,\n DagsterEventType.RUN_STARTING: DagsterRunStatus.STARTING,\n DagsterEventType.RUN_CANCELING: DagsterRunStatus.CANCELING,\n DagsterEventType.RUN_CANCELED: DagsterRunStatus.CANCELED,\n}\n\nPIPELINE_RUN_STATUS_TO_EVENT_TYPE = {v: k for k, v in EVENT_TYPE_TO_PIPELINE_RUN_STATUS.items()}\n\nASSET_EVENTS = {\n DagsterEventType.ASSET_MATERIALIZATION,\n DagsterEventType.ASSET_OBSERVATION,\n DagsterEventType.ASSET_MATERIALIZATION_PLANNED,\n}\n\nASSET_CHECK_EVENTS = {\n DagsterEventType.ASSET_CHECK_EVALUATION,\n DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED,\n}\n\n\ndef _assert_type(\n method: str,\n expected_type: Union[DagsterEventType, Sequence[DagsterEventType]],\n actual_type: DagsterEventType,\n) -> None:\n _expected_type = (\n [expected_type] if isinstance(expected_type, DagsterEventType) else expected_type\n )\n check.invariant(\n actual_type in _expected_type,\n f"{method} only callable when event_type is"\n f" {','.join([t.value for t in _expected_type])}, called on {actual_type}",\n )\n\n\ndef _validate_event_specific_data(\n event_type: DagsterEventType, event_specific_data: Optional["EventSpecificData"]\n) -> Optional["EventSpecificData"]:\n if event_type == DagsterEventType.STEP_OUTPUT:\n check.inst_param(event_specific_data, "event_specific_data", StepOutputData)\n elif event_type == DagsterEventType.STEP_FAILURE:\n check.inst_param(event_specific_data, "event_specific_data", StepFailureData)\n elif event_type == DagsterEventType.STEP_SUCCESS:\n check.inst_param(event_specific_data, "event_specific_data", StepSuccessData)\n elif event_type == DagsterEventType.ASSET_MATERIALIZATION:\n check.inst_param(event_specific_data, "event_specific_data", StepMaterializationData)\n elif event_type == DagsterEventType.STEP_EXPECTATION_RESULT:\n check.inst_param(event_specific_data, "event_specific_data", StepExpectationResultData)\n elif event_type == DagsterEventType.STEP_INPUT:\n check.inst_param(event_specific_data, "event_specific_data", StepInputData)\n elif event_type in (\n DagsterEventType.ENGINE_EVENT,\n DagsterEventType.STEP_WORKER_STARTING,\n DagsterEventType.STEP_WORKER_STARTED,\n DagsterEventType.RESOURCE_INIT_STARTED,\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n DagsterEventType.RESOURCE_INIT_FAILURE,\n ):\n check.inst_param(event_specific_data, "event_specific_data", EngineEventData)\n elif event_type == DagsterEventType.HOOK_ERRORED:\n check.inst_param(event_specific_data, "event_specific_data", HookErroredData)\n elif event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:\n check.inst_param(\n event_specific_data, "event_specific_data", AssetMaterializationPlannedData\n )\n elif event_type == DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED:\n check.inst_param(event_specific_data, "event_specific_data", AssetCheckEvaluationPlanned)\n elif event_type == DagsterEventType.ASSET_CHECK_EVALUATION:\n check.inst_param(event_specific_data, "event_specific_data", AssetCheckEvaluation)\n\n return event_specific_data\n\n\ndef log_step_event(step_context: IStepContext, event: "DagsterEvent") -> None:\n event_type = DagsterEventType(event.event_type_value)\n log_level = logging.ERROR if event_type in FAILURE_EVENTS else logging.DEBUG\n\n step_context.log.log_dagster_event(\n level=log_level,\n msg=event.message or f"{event_type} for step {step_context.step.key}",\n dagster_event=event,\n )\n\n\ndef log_job_event(job_context: IPlanContext, event: "DagsterEvent") -> None:\n event_type = DagsterEventType(event.event_type_value)\n log_level = logging.ERROR if event_type in FAILURE_EVENTS else logging.DEBUG\n\n job_context.log.log_dagster_event(\n level=log_level,\n msg=event.message or f"{event_type} for pipeline {job_context.job_name}",\n dagster_event=event,\n )\n\n\ndef log_resource_event(log_manager: DagsterLogManager, event: "DagsterEvent") -> None:\n event_specific_data = cast(EngineEventData, event.event_specific_data)\n\n log_level = logging.ERROR if event_specific_data.error else logging.DEBUG\n log_manager.log_dagster_event(level=log_level, msg=event.message or "", dagster_event=event)\n\n\nclass DagsterEventSerializer(NamedTupleSerializer["DagsterEvent"]):\n def before_unpack(self, context, unpacked_dict: Any) -> Dict[str, Any]:\n event_type_value, event_specific_data = _handle_back_compat(\n unpacked_dict["event_type_value"], unpacked_dict.get("event_specific_data")\n )\n unpacked_dict["event_type_value"] = event_type_value\n unpacked_dict["event_specific_data"] = event_specific_data\n\n return unpacked_dict\n\n def handle_unpack_error(\n self,\n exc: Exception,\n context: UnpackContext,\n storage_dict: Dict[str, Any],\n ) -> "DagsterEvent":\n event_type_value, _ = _handle_back_compat(\n storage_dict["event_type_value"], storage_dict.get("event_specific_data")\n )\n step_key = storage_dict.get("step_key")\n orig_message = storage_dict.get("message")\n new_message = (\n f"Could not deserialize event of type {event_type_value}. This event may have been"\n " written by a newer version of Dagster."\n + (f' Original message: "{orig_message}"' if orig_message else "")\n )\n return DagsterEvent(\n event_type_value=DagsterEventType.ENGINE_EVENT.value,\n job_name=storage_dict["pipeline_name"],\n message=new_message,\n step_key=step_key,\n event_specific_data=EngineEventData(\n error=serializable_error_info_from_exc_info(sys.exc_info())\n ),\n )\n\n\n
[docs]@whitelist_for_serdes(\n serializer=DagsterEventSerializer,\n storage_field_names={\n "node_handle": "solid_handle",\n "job_name": "pipeline_name",\n },\n)\nclass DagsterEvent(\n NamedTuple(\n "_DagsterEvent",\n [\n ("event_type_value", str),\n ("job_name", str),\n ("step_handle", Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]]),\n ("node_handle", Optional[NodeHandle]),\n ("step_kind_value", Optional[str]),\n ("logging_tags", Optional[Mapping[str, str]]),\n ("event_specific_data", Optional["EventSpecificData"]),\n ("message", Optional[str]),\n ("pid", Optional[int]),\n ("step_key", Optional[str]),\n ],\n )\n):\n """Events yielded by op and job execution.\n\n Users should not instantiate this class.\n\n Attributes:\n event_type_value (str): Value for a DagsterEventType.\n job_name (str)\n node_handle (NodeHandle)\n step_kind_value (str): Value for a StepKind.\n logging_tags (Dict[str, str])\n event_specific_data (Any): Type must correspond to event_type_value.\n message (str)\n pid (int)\n step_key (Optional[str]): DEPRECATED\n """\n\n @staticmethod\n def from_step(\n event_type: "DagsterEventType",\n step_context: IStepContext,\n event_specific_data: Optional["EventSpecificData"] = None,\n message: Optional[str] = None,\n ) -> "DagsterEvent":\n event = DagsterEvent(\n event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n event_specific_data=_validate_event_specific_data(event_type, event_specific_data),\n message=check.opt_str_param(message, "message"),\n pid=os.getpid(),\n )\n\n log_step_event(step_context, event)\n\n return event\n\n @staticmethod\n def from_job(\n event_type: DagsterEventType,\n job_context: IPlanContext,\n message: Optional[str] = None,\n event_specific_data: Optional["EventSpecificData"] = None,\n step_handle: Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]] = None,\n ) -> "DagsterEvent":\n check.opt_inst_param(\n step_handle, "step_handle", (StepHandle, ResolvedFromDynamicStepHandle)\n )\n\n event = DagsterEvent(\n event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,\n job_name=job_context.job_name,\n message=check.opt_str_param(message, "message"),\n event_specific_data=_validate_event_specific_data(event_type, event_specific_data),\n step_handle=step_handle,\n pid=os.getpid(),\n )\n\n log_job_event(job_context, event)\n\n return event\n\n @staticmethod\n def from_resource(\n event_type: DagsterEventType,\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n message: Optional[str] = None,\n event_specific_data: Optional["EngineEventData"] = None,\n ) -> "DagsterEvent":\n event = DagsterEvent(\n event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,\n job_name=job_name,\n message=check.opt_str_param(message, "message"),\n event_specific_data=_validate_event_specific_data(\n DagsterEventType.ENGINE_EVENT, event_specific_data\n ),\n step_handle=execution_plan.step_handle_for_single_step_plans(),\n pid=os.getpid(),\n )\n log_resource_event(log_manager, event)\n return event\n\n def __new__(\n cls,\n event_type_value: str,\n job_name: str,\n step_handle: Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]] = None,\n node_handle: Optional[NodeHandle] = None,\n step_kind_value: Optional[str] = None,\n logging_tags: Optional[Mapping[str, str]] = None,\n event_specific_data: Optional["EventSpecificData"] = None,\n message: Optional[str] = None,\n pid: Optional[int] = None,\n # legacy\n step_key: Optional[str] = None,\n ):\n # old events may contain node_handle but not step_handle\n if node_handle is not None and step_handle is None:\n step_handle = StepHandle(node_handle)\n\n # Legacy events may have step_key set directly, preserve those to stay in sync\n # with legacy execution plan snapshots.\n if step_handle is not None and step_key is None:\n step_key = step_handle.to_key()\n\n return super(DagsterEvent, cls).__new__(\n cls,\n check.str_param(event_type_value, "event_type_value"),\n check.str_param(job_name, "job_name"),\n check.opt_inst_param(\n step_handle, "step_handle", (StepHandle, ResolvedFromDynamicStepHandle)\n ),\n check.opt_inst_param(node_handle, "node_handle", NodeHandle),\n check.opt_str_param(step_kind_value, "step_kind_value"),\n check.opt_mapping_param(logging_tags, "logging_tags"),\n _validate_event_specific_data(DagsterEventType(event_type_value), event_specific_data),\n check.opt_str_param(message, "message"),\n check.opt_int_param(pid, "pid"),\n check.opt_str_param(step_key, "step_key"),\n )\n\n @property\n def node_name(self) -> str:\n check.invariant(self.node_handle is not None)\n node_handle = cast(NodeHandle, self.node_handle)\n return node_handle.name\n\n @public\n @property\n def event_type(self) -> DagsterEventType:\n """DagsterEventType: The type of this event."""\n return DagsterEventType(self.event_type_value)\n\n @public\n @property\n def is_step_event(self) -> bool:\n """bool: If this event relates to a specific step."""\n return self.event_type in STEP_EVENTS\n\n @public\n @property\n def is_hook_event(self) -> bool:\n """bool: If this event relates to the execution of a hook."""\n return self.event_type in HOOK_EVENTS\n\n @property\n def is_alert_event(self) -> bool:\n return self.event_type in ALERT_EVENTS\n\n @property\n def step_kind(self) -> "StepKind":\n from dagster._core.execution.plan.step import StepKind\n\n return StepKind(self.step_kind_value)\n\n @public\n @property\n def is_step_success(self) -> bool:\n """bool: If this event is of type STEP_SUCCESS."""\n return self.event_type == DagsterEventType.STEP_SUCCESS\n\n @public\n @property\n def is_successful_output(self) -> bool:\n """bool: If this event is of type STEP_OUTPUT."""\n return self.event_type == DagsterEventType.STEP_OUTPUT\n\n @public\n @property\n def is_step_start(self) -> bool:\n """bool: If this event is of type STEP_START."""\n return self.event_type == DagsterEventType.STEP_START\n\n @public\n @property\n def is_step_failure(self) -> bool:\n """bool: If this event is of type STEP_FAILURE."""\n return self.event_type == DagsterEventType.STEP_FAILURE\n\n @public\n @property\n def is_resource_init_failure(self) -> bool:\n """bool: If this event is of type RESOURCE_INIT_FAILURE."""\n return self.event_type == DagsterEventType.RESOURCE_INIT_FAILURE\n\n @public\n @property\n def is_step_skipped(self) -> bool:\n """bool: If this event is of type STEP_SKIPPED."""\n return self.event_type == DagsterEventType.STEP_SKIPPED\n\n @public\n @property\n def is_step_up_for_retry(self) -> bool:\n """bool: If this event is of type STEP_UP_FOR_RETRY."""\n return self.event_type == DagsterEventType.STEP_UP_FOR_RETRY\n\n @public\n @property\n def is_step_restarted(self) -> bool:\n """bool: If this event is of type STEP_RESTARTED."""\n return self.event_type == DagsterEventType.STEP_RESTARTED\n\n @property\n def is_job_success(self) -> bool:\n return self.event_type == DagsterEventType.RUN_SUCCESS\n\n @property\n def is_job_failure(self) -> bool:\n return self.event_type == DagsterEventType.RUN_FAILURE\n\n @property\n def is_run_failure(self) -> bool:\n return self.event_type == DagsterEventType.RUN_FAILURE\n\n @public\n @property\n def is_failure(self) -> bool:\n """bool: If this event represents the failure of a run or step."""\n return self.event_type in FAILURE_EVENTS\n\n @property\n def is_job_event(self) -> bool:\n return self.event_type in PIPELINE_EVENTS\n\n @public\n @property\n def is_engine_event(self) -> bool:\n """bool: If this event is of type ENGINE_EVENT."""\n return self.event_type == DagsterEventType.ENGINE_EVENT\n\n @public\n @property\n def is_handled_output(self) -> bool:\n """bool: If this event is of type HANDLED_OUTPUT."""\n return self.event_type == DagsterEventType.HANDLED_OUTPUT\n\n @public\n @property\n def is_loaded_input(self) -> bool:\n """bool: If this event is of type LOADED_INPUT."""\n return self.event_type == DagsterEventType.LOADED_INPUT\n\n @public\n @property\n def is_step_materialization(self) -> bool:\n """bool: If this event is of type ASSET_MATERIALIZATION."""\n return self.event_type == DagsterEventType.ASSET_MATERIALIZATION\n\n @public\n @property\n def is_expectation_result(self) -> bool:\n """bool: If this event is of type STEP_EXPECTATION_RESULT."""\n return self.event_type == DagsterEventType.STEP_EXPECTATION_RESULT\n\n @public\n @property\n def is_asset_observation(self) -> bool:\n """bool: If this event is of type ASSET_OBSERVATION."""\n return self.event_type == DagsterEventType.ASSET_OBSERVATION\n\n @public\n @property\n def is_asset_materialization_planned(self) -> bool:\n """bool: If this event is of type ASSET_MATERIALIZATION_PLANNED."""\n return self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED\n\n @public\n @property\n def asset_key(self) -> Optional[AssetKey]:\n """Optional[AssetKey]: For events that correspond to a specific asset_key / partition\n (ASSET_MATERIALIZTION, ASSET_OBSERVATION, ASSET_MATERIALIZATION_PLANNED), returns that\n asset key. Otherwise, returns None.\n """\n if self.event_type == DagsterEventType.ASSET_MATERIALIZATION:\n return self.step_materialization_data.materialization.asset_key\n elif self.event_type == DagsterEventType.ASSET_OBSERVATION:\n return self.asset_observation_data.asset_observation.asset_key\n elif self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:\n return self.asset_materialization_planned_data.asset_key\n else:\n return None\n\n @public\n @property\n def partition(self) -> Optional[str]:\n """Optional[AssetKey]: For events that correspond to a specific asset_key / partition\n (ASSET_MATERIALIZTION, ASSET_OBSERVATION, ASSET_MATERIALIZATION_PLANNED), returns that\n partition. Otherwise, returns None.\n """\n if self.event_type == DagsterEventType.ASSET_MATERIALIZATION:\n return self.step_materialization_data.materialization.partition\n elif self.event_type == DagsterEventType.ASSET_OBSERVATION:\n return self.asset_observation_data.asset_observation.partition\n elif self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:\n return self.asset_materialization_planned_data.partition\n else:\n return None\n\n @property\n def step_input_data(self) -> "StepInputData":\n _assert_type("step_input_data", DagsterEventType.STEP_INPUT, self.event_type)\n return cast(StepInputData, self.event_specific_data)\n\n @property\n def step_output_data(self) -> StepOutputData:\n _assert_type("step_output_data", DagsterEventType.STEP_OUTPUT, self.event_type)\n return cast(StepOutputData, self.event_specific_data)\n\n @property\n def step_success_data(self) -> "StepSuccessData":\n _assert_type("step_success_data", DagsterEventType.STEP_SUCCESS, self.event_type)\n return cast(StepSuccessData, self.event_specific_data)\n\n @property\n def step_failure_data(self) -> "StepFailureData":\n _assert_type("step_failure_data", DagsterEventType.STEP_FAILURE, self.event_type)\n return cast(StepFailureData, self.event_specific_data)\n\n @property\n def step_retry_data(self) -> "StepRetryData":\n _assert_type("step_retry_data", DagsterEventType.STEP_UP_FOR_RETRY, self.event_type)\n return cast(StepRetryData, self.event_specific_data)\n\n @property\n def step_materialization_data(self) -> "StepMaterializationData":\n _assert_type(\n "step_materialization_data", DagsterEventType.ASSET_MATERIALIZATION, self.event_type\n )\n return cast(StepMaterializationData, self.event_specific_data)\n\n @property\n def asset_observation_data(self) -> "AssetObservationData":\n _assert_type("asset_observation_data", DagsterEventType.ASSET_OBSERVATION, self.event_type)\n return cast(AssetObservationData, self.event_specific_data)\n\n @property\n def asset_materialization_planned_data(self) -> "AssetMaterializationPlannedData":\n _assert_type(\n "asset_materialization_planned",\n DagsterEventType.ASSET_MATERIALIZATION_PLANNED,\n self.event_type,\n )\n return cast(AssetMaterializationPlannedData, self.event_specific_data)\n\n @property\n def asset_check_planned_data(self) -> "AssetCheckEvaluationPlanned":\n _assert_type(\n "asset_check_planned",\n DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED,\n self.event_type,\n )\n return cast(AssetCheckEvaluationPlanned, self.event_specific_data)\n\n @property\n def step_expectation_result_data(self) -> "StepExpectationResultData":\n _assert_type(\n "step_expectation_result_data",\n DagsterEventType.STEP_EXPECTATION_RESULT,\n self.event_type,\n )\n return cast(StepExpectationResultData, self.event_specific_data)\n\n @property\n def materialization(self) -> AssetMaterialization:\n _assert_type(\n "step_materialization_data", DagsterEventType.ASSET_MATERIALIZATION, self.event_type\n )\n return cast(StepMaterializationData, self.event_specific_data).materialization\n\n @property\n def asset_check_evaluation_data(self) -> AssetCheckEvaluation:\n _assert_type(\n "asset_check_evaluation", DagsterEventType.ASSET_CHECK_EVALUATION, self.event_type\n )\n return cast(AssetCheckEvaluation, self.event_specific_data)\n\n @property\n def job_failure_data(self) -> "JobFailureData":\n _assert_type("job_failure_data", DagsterEventType.RUN_FAILURE, self.event_type)\n return cast(JobFailureData, self.event_specific_data)\n\n @property\n def engine_event_data(self) -> "EngineEventData":\n _assert_type(\n "engine_event_data",\n [\n DagsterEventType.ENGINE_EVENT,\n DagsterEventType.RESOURCE_INIT_STARTED,\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n DagsterEventType.RESOURCE_INIT_FAILURE,\n DagsterEventType.STEP_WORKER_STARTED,\n DagsterEventType.STEP_WORKER_STARTING,\n ],\n self.event_type,\n )\n return cast(EngineEventData, self.event_specific_data)\n\n @property\n def hook_completed_data(self) -> Optional["EventSpecificData"]:\n _assert_type("hook_completed_data", DagsterEventType.HOOK_COMPLETED, self.event_type)\n return self.event_specific_data\n\n @property\n def hook_errored_data(self) -> "HookErroredData":\n _assert_type("hook_errored_data", DagsterEventType.HOOK_ERRORED, self.event_type)\n return cast(HookErroredData, self.event_specific_data)\n\n @property\n def hook_skipped_data(self) -> Optional["EventSpecificData"]:\n _assert_type("hook_skipped_data", DagsterEventType.HOOK_SKIPPED, self.event_type)\n return self.event_specific_data\n\n @property\n def logs_captured_data(self) -> "ComputeLogsCaptureData":\n _assert_type("logs_captured_data", DagsterEventType.LOGS_CAPTURED, self.event_type)\n return cast(ComputeLogsCaptureData, self.event_specific_data)\n\n @staticmethod\n def step_output_event(\n step_context: StepExecutionContext, step_output_data: StepOutputData\n ) -> "DagsterEvent":\n output_def = step_context.op.output_def_named(\n step_output_data.step_output_handle.output_name\n )\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_OUTPUT,\n step_context=step_context,\n event_specific_data=step_output_data,\n message=(\n 'Yielded output "{output_name}"{mapping_clause} of type'\n ' "{output_type}".{type_check_clause}'.format(\n output_name=step_output_data.step_output_handle.output_name,\n output_type=output_def.dagster_type.display_name,\n type_check_clause=(\n (\n " Warning! Type check failed."\n if not step_output_data.type_check_data.success\n else " (Type check passed)."\n )\n if step_output_data.type_check_data\n else " (No type check)."\n ),\n mapping_clause=(\n f' mapping key "{step_output_data.step_output_handle.mapping_key}"'\n if step_output_data.step_output_handle.mapping_key\n else ""\n ),\n )\n ),\n )\n\n @staticmethod\n def step_failure_event(\n step_context: IStepContext,\n step_failure_data: "StepFailureData",\n message=None,\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_FAILURE,\n step_context=step_context,\n event_specific_data=step_failure_data,\n message=(message or f'Execution of step "{step_context.step.key}" failed.'),\n )\n\n @staticmethod\n def step_retry_event(\n step_context: IStepContext, step_retry_data: "StepRetryData"\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_UP_FOR_RETRY,\n step_context=step_context,\n event_specific_data=step_retry_data,\n message=(\n 'Execution of step "{step_key}" failed and has requested a retry{wait_str}.'.format(\n step_key=step_context.step.key,\n wait_str=(\n f" in {step_retry_data.seconds_to_wait} seconds"\n if step_retry_data.seconds_to_wait\n else ""\n ),\n )\n ),\n )\n\n @staticmethod\n def step_input_event(\n step_context: StepExecutionContext, step_input_data: "StepInputData"\n ) -> "DagsterEvent":\n input_def = step_context.op_def.input_def_named(step_input_data.input_name)\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_INPUT,\n step_context=step_context,\n event_specific_data=step_input_data,\n message='Got input "{input_name}" of type "{input_type}".{type_check_clause}'.format(\n input_name=step_input_data.input_name,\n input_type=input_def.dagster_type.display_name,\n type_check_clause=(\n (\n " Warning! Type check failed."\n if not step_input_data.type_check_data.success\n else " (Type check passed)."\n )\n if step_input_data.type_check_data\n else " (No type check)."\n ),\n ),\n )\n\n @staticmethod\n def step_start_event(step_context: IStepContext) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_START,\n step_context=step_context,\n message=f'Started execution of step "{step_context.step.key}".',\n )\n\n @staticmethod\n def step_restarted_event(step_context: IStepContext, previous_attempts: int) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_RESTARTED,\n step_context=step_context,\n message='Started re-execution (attempt # {n}) of step "{step_key}".'.format(\n step_key=step_context.step.key, n=previous_attempts + 1\n ),\n )\n\n @staticmethod\n def step_success_event(\n step_context: IStepContext, success: "StepSuccessData"\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_SUCCESS,\n step_context=step_context,\n event_specific_data=success,\n message='Finished execution of step "{step_key}" in {duration}.'.format(\n step_key=step_context.step.key,\n duration=format_duration(success.duration_ms),\n ),\n )\n\n @staticmethod\n def step_skipped_event(step_context: IStepContext) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_SKIPPED,\n step_context=step_context,\n message=f'Skipped execution of step "{step_context.step.key}".',\n )\n\n @staticmethod\n def asset_materialization(\n step_context: IStepContext,\n materialization: AssetMaterialization,\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n step_context=step_context,\n event_specific_data=StepMaterializationData(materialization),\n message=(\n materialization.description\n if materialization.description\n else "Materialized value{label_clause}.".format(\n label_clause=f" {materialization.label}" if materialization.label else ""\n )\n ),\n )\n\n @staticmethod\n def asset_observation(\n step_context: IStepContext, observation: AssetObservation\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.ASSET_OBSERVATION,\n step_context=step_context,\n event_specific_data=AssetObservationData(observation),\n )\n\n @staticmethod\n def asset_check_evaluation(\n step_context: IStepContext, asset_check_evaluation: AssetCheckEvaluation\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n event_type=DagsterEventType.ASSET_CHECK_EVALUATION,\n step_context=step_context,\n event_specific_data=asset_check_evaluation,\n )\n\n @staticmethod\n def step_expectation_result(\n step_context: IStepContext, expectation_result: ExpectationResult\n ) -> "DagsterEvent":\n def _msg():\n if expectation_result.description:\n return expectation_result.description\n\n return "Expectation{label_clause} {result_verb}".format(\n label_clause=" " + expectation_result.label if expectation_result.label else "",\n result_verb="passed" if expectation_result.success else "failed",\n )\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.STEP_EXPECTATION_RESULT,\n step_context=step_context,\n event_specific_data=StepExpectationResultData(expectation_result),\n message=_msg(),\n )\n\n @staticmethod\n def job_start(job_context: IPlanContext) -> "DagsterEvent":\n return DagsterEvent.from_job(\n DagsterEventType.RUN_START,\n job_context,\n message=f'Started execution of run for "{job_context.job_name}".',\n )\n\n @staticmethod\n def job_success(job_context: IPlanContext) -> "DagsterEvent":\n return DagsterEvent.from_job(\n DagsterEventType.RUN_SUCCESS,\n job_context,\n message=f'Finished execution of run for "{job_context.job_name}".',\n )\n\n @staticmethod\n def job_failure(\n job_context_or_name: Union[IPlanContext, str],\n context_msg: str,\n error_info: Optional[SerializableErrorInfo] = None,\n ) -> "DagsterEvent":\n check.str_param(context_msg, "context_msg")\n if isinstance(job_context_or_name, IPlanContext):\n return DagsterEvent.from_job(\n DagsterEventType.RUN_FAILURE,\n job_context_or_name,\n message=(\n f'Execution of run for "{job_context_or_name.job_name}" failed. {context_msg}'\n ),\n event_specific_data=JobFailureData(error_info),\n )\n else:\n # when the failure happens trying to bring up context, the job_context hasn't been\n # built and so can't use from_pipeline\n check.str_param(job_context_or_name, "pipeline_name")\n event = DagsterEvent(\n event_type_value=DagsterEventType.RUN_FAILURE.value,\n job_name=job_context_or_name,\n event_specific_data=JobFailureData(error_info),\n message=f'Execution of run for "{job_context_or_name}" failed. {context_msg}',\n pid=os.getpid(),\n )\n return event\n\n @staticmethod\n def job_canceled(\n job_context: IPlanContext, error_info: Optional[SerializableErrorInfo] = None\n ) -> "DagsterEvent":\n return DagsterEvent.from_job(\n DagsterEventType.RUN_CANCELED,\n job_context,\n message=f'Execution of run for "{job_context.job_name}" canceled.',\n event_specific_data=JobCanceledData(\n check.opt_inst_param(error_info, "error_info", SerializableErrorInfo)\n ),\n )\n\n @staticmethod\n def step_worker_starting(\n step_context: IStepContext,\n message: str,\n metadata: Mapping[str, MetadataValue],\n ) -> "DagsterEvent":\n return DagsterEvent.from_step(\n DagsterEventType.STEP_WORKER_STARTING,\n step_context,\n message=message,\n event_specific_data=EngineEventData(\n metadata=metadata, marker_start="step_process_start"\n ),\n )\n\n @staticmethod\n def step_worker_started(\n log_manager: DagsterLogManager,\n job_name: str,\n message: str,\n metadata: Mapping[str, MetadataValue],\n step_key: Optional[str],\n ) -> "DagsterEvent":\n event = DagsterEvent(\n DagsterEventType.STEP_WORKER_STARTED.value,\n job_name=job_name,\n message=message,\n event_specific_data=EngineEventData(metadata=metadata, marker_end="step_process_start"),\n pid=os.getpid(),\n step_key=step_key,\n )\n log_manager.log_dagster_event(\n level=logging.DEBUG,\n msg=message,\n dagster_event=event,\n )\n return event\n\n @staticmethod\n def resource_init_start(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_keys: AbstractSet[str],\n ) -> "DagsterEvent":\n return DagsterEvent.from_resource(\n DagsterEventType.RESOURCE_INIT_STARTED,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Starting initialization of resources [{}].".format(\n ", ".join(sorted(resource_keys))\n ),\n event_specific_data=EngineEventData(metadata={}, marker_start="resources"),\n )\n\n @staticmethod\n def resource_init_success(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_instances: Mapping[str, Any],\n resource_init_times: Mapping[str, str],\n ) -> "DagsterEvent":\n metadata = {}\n for key in resource_instances.keys():\n metadata[key] = MetadataValue.python_artifact(resource_instances[key].__class__)\n metadata[f"{key}:init_time"] = resource_init_times[key]\n\n return DagsterEvent.from_resource(\n DagsterEventType.RESOURCE_INIT_SUCCESS,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Finished initialization of resources [{}].".format(\n ", ".join(sorted(resource_init_times.keys()))\n ),\n event_specific_data=EngineEventData(\n metadata=metadata,\n marker_end="resources",\n ),\n )\n\n @staticmethod\n def resource_init_failure(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_keys: AbstractSet[str],\n error: SerializableErrorInfo,\n ) -> "DagsterEvent":\n return DagsterEvent.from_resource(\n DagsterEventType.RESOURCE_INIT_FAILURE,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Initialization of resources [{}] failed.".format(", ".join(resource_keys)),\n event_specific_data=EngineEventData(\n metadata={},\n marker_end="resources",\n error=error,\n ),\n )\n\n @staticmethod\n def resource_teardown_failure(\n job_name: str,\n execution_plan: "ExecutionPlan",\n log_manager: DagsterLogManager,\n resource_keys: AbstractSet[str],\n error: SerializableErrorInfo,\n ) -> "DagsterEvent":\n return DagsterEvent.from_resource(\n DagsterEventType.ENGINE_EVENT,\n job_name=job_name,\n execution_plan=execution_plan,\n log_manager=log_manager,\n message="Teardown of resources [{}] failed.".format(", ".join(resource_keys)),\n event_specific_data=EngineEventData(\n metadata={},\n marker_start=None,\n marker_end=None,\n error=error,\n ),\n )\n\n @staticmethod\n def engine_event(\n plan_context: IPlanContext,\n message: str,\n event_specific_data: Optional["EngineEventData"] = None,\n ) -> "DagsterEvent":\n if isinstance(plan_context, IStepContext):\n return DagsterEvent.from_step(\n DagsterEventType.ENGINE_EVENT,\n step_context=plan_context,\n event_specific_data=event_specific_data,\n message=message,\n )\n else:\n return DagsterEvent.from_job(\n DagsterEventType.ENGINE_EVENT,\n plan_context,\n message,\n event_specific_data=event_specific_data,\n )\n\n @staticmethod\n def object_store_operation(\n step_context: IStepContext, object_store_operation_result: "ObjectStoreOperation"\n ) -> "DagsterEvent":\n object_store_name = (\n f"{object_store_operation_result.object_store_name} "\n if object_store_operation_result.object_store_name\n else ""\n )\n\n serialization_strategy_modifier = (\n f" using {object_store_operation_result.serialization_strategy_name}"\n if object_store_operation_result.serialization_strategy_name\n else ""\n )\n\n value_name = object_store_operation_result.value_name\n\n if (\n ObjectStoreOperationType(object_store_operation_result.op)\n == ObjectStoreOperationType.SET_OBJECT\n ):\n message = (\n f"Stored intermediate object for output {value_name} in "\n f"{object_store_name}object store{serialization_strategy_modifier}."\n )\n elif (\n ObjectStoreOperationType(object_store_operation_result.op)\n == ObjectStoreOperationType.GET_OBJECT\n ):\n message = (\n f"Retrieved intermediate object for input {value_name} in "\n f"{object_store_name}object store{serialization_strategy_modifier}."\n )\n elif (\n ObjectStoreOperationType(object_store_operation_result.op)\n == ObjectStoreOperationType.CP_OBJECT\n ):\n message = (\n "Copied intermediate object for input {value_name} from {key} to {dest_key}"\n ).format(\n value_name=value_name,\n key=object_store_operation_result.key,\n dest_key=object_store_operation_result.dest_key,\n )\n else:\n message = ""\n\n return DagsterEvent.from_step(\n DagsterEventType.OBJECT_STORE_OPERATION,\n step_context,\n event_specific_data=ObjectStoreOperationResultData(\n op=object_store_operation_result.op,\n value_name=value_name,\n address=object_store_operation_result.key,\n metadata={"key": MetadataValue.path(object_store_operation_result.key)},\n version=object_store_operation_result.version,\n mapping_key=object_store_operation_result.mapping_key,\n ),\n message=message,\n )\n\n @staticmethod\n def handled_output(\n step_context: IStepContext,\n output_name: str,\n manager_key: str,\n message_override: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ) -> "DagsterEvent":\n message = f'Handled output "{output_name}" using IO manager "{manager_key}"'\n return DagsterEvent.from_step(\n event_type=DagsterEventType.HANDLED_OUTPUT,\n step_context=step_context,\n event_specific_data=HandledOutputData(\n output_name=output_name,\n manager_key=manager_key,\n metadata=metadata if metadata else {},\n ),\n message=message_override or message,\n )\n\n @staticmethod\n def loaded_input(\n step_context: IStepContext,\n input_name: str,\n manager_key: str,\n upstream_output_name: Optional[str] = None,\n upstream_step_key: Optional[str] = None,\n message_override: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ) -> "DagsterEvent":\n message = f'Loaded input "{input_name}" using input manager "{manager_key}"'\n if upstream_output_name:\n message += f', from output "{upstream_output_name}" of step "{upstream_step_key}"'\n\n return DagsterEvent.from_step(\n event_type=DagsterEventType.LOADED_INPUT,\n step_context=step_context,\n event_specific_data=LoadedInputData(\n input_name=input_name,\n manager_key=manager_key,\n upstream_output_name=upstream_output_name,\n upstream_step_key=upstream_step_key,\n metadata=metadata if metadata else {},\n ),\n message=message_override or message,\n )\n\n @staticmethod\n def hook_completed(\n step_context: StepExecutionContext, hook_def: HookDefinition\n ) -> "DagsterEvent":\n event_type = DagsterEventType.HOOK_COMPLETED\n\n event = DagsterEvent(\n event_type_value=event_type.value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n message=(\n f'Finished the execution of hook "{hook_def.name}" triggered for'\n f' "{step_context.op.name}".'\n ),\n )\n\n step_context.log.log_dagster_event(\n level=logging.DEBUG, msg=event.message or "", dagster_event=event\n )\n\n return event\n\n @staticmethod\n def hook_errored(\n step_context: StepExecutionContext, error: HookExecutionError\n ) -> "DagsterEvent":\n event_type = DagsterEventType.HOOK_ERRORED\n\n event = DagsterEvent(\n event_type_value=event_type.value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n event_specific_data=_validate_event_specific_data(\n event_type,\n HookErroredData(\n error=serializable_error_info_from_exc_info(error.original_exc_info)\n ),\n ),\n )\n\n step_context.log.log_dagster_event(level=logging.ERROR, msg=str(error), dagster_event=event)\n\n return event\n\n @staticmethod\n def hook_skipped(\n step_context: StepExecutionContext, hook_def: HookDefinition\n ) -> "DagsterEvent":\n event_type = DagsterEventType.HOOK_SKIPPED\n\n event = DagsterEvent(\n event_type_value=event_type.value,\n job_name=step_context.job_name,\n step_handle=step_context.step.handle,\n node_handle=step_context.step.node_handle,\n step_kind_value=step_context.step.kind.value,\n logging_tags=step_context.event_tags,\n message=(\n f'Skipped the execution of hook "{hook_def.name}". It did not meet its triggering '\n f'condition during the execution of "{step_context.op.name}".'\n ),\n )\n\n step_context.log.log_dagster_event(\n level=logging.DEBUG, msg=event.message or "", dagster_event=event\n )\n\n return event\n\n @staticmethod\n def legacy_compute_log_step_event(step_context: StepExecutionContext):\n step_key = step_context.step.key\n return DagsterEvent.from_step(\n DagsterEventType.LOGS_CAPTURED,\n step_context,\n message=f"Started capturing logs for step: {step_key}.",\n event_specific_data=ComputeLogsCaptureData(\n step_keys=[step_key],\n file_key=step_key,\n ),\n )\n\n @staticmethod\n def capture_logs(\n job_context: IPlanContext,\n step_keys: Sequence[str],\n log_key: Sequence[str],\n log_context: CapturedLogContext,\n ):\n file_key = log_key[-1]\n return DagsterEvent.from_job(\n DagsterEventType.LOGS_CAPTURED,\n job_context,\n message=f"Started capturing logs in process (pid: {os.getpid()}).",\n event_specific_data=ComputeLogsCaptureData(\n step_keys=step_keys,\n file_key=file_key,\n external_stdout_url=log_context.external_stdout_url,\n external_stderr_url=log_context.external_stderr_url,\n external_url=log_context.external_url,\n ),\n )
\n\n\ndef get_step_output_event(\n events: Sequence[DagsterEvent], step_key: str, output_name: Optional[str] = "result"\n) -> Optional["DagsterEvent"]:\n check.sequence_param(events, "events", of_type=DagsterEvent)\n check.str_param(step_key, "step_key")\n check.str_param(output_name, "output_name")\n for event in events:\n if (\n event.event_type == DagsterEventType.STEP_OUTPUT\n and event.step_key == step_key\n and event.step_output_data.output_name == output_name\n ):\n return event\n return None\n\n\n@whitelist_for_serdes\nclass AssetObservationData(\n NamedTuple("_AssetObservation", [("asset_observation", AssetObservation)])\n):\n def __new__(cls, asset_observation: AssetObservation):\n return super(AssetObservationData, cls).__new__(\n cls,\n asset_observation=check.inst_param(\n asset_observation, "asset_observation", AssetObservation\n ),\n )\n\n\n@whitelist_for_serdes\nclass StepMaterializationData(\n NamedTuple(\n "_StepMaterializationData",\n [\n ("materialization", AssetMaterialization),\n ("asset_lineage", Sequence[AssetLineageInfo]),\n ],\n )\n):\n def __new__(\n cls,\n materialization: AssetMaterialization,\n asset_lineage: Optional[Sequence[AssetLineageInfo]] = None,\n ):\n return super(StepMaterializationData, cls).__new__(\n cls,\n materialization=check.inst_param(\n materialization, "materialization", AssetMaterialization\n ),\n asset_lineage=check.opt_sequence_param(\n asset_lineage, "asset_lineage", of_type=AssetLineageInfo\n ),\n )\n\n\n@whitelist_for_serdes\nclass AssetMaterializationPlannedData(\n NamedTuple(\n "_AssetMaterializationPlannedData",\n [("asset_key", AssetKey), ("partition", Optional[str])],\n )\n):\n def __new__(cls, asset_key: AssetKey, partition: Optional[str] = None):\n return super(AssetMaterializationPlannedData, cls).__new__(\n cls,\n asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n partition=check.opt_str_param(partition, "partition"),\n )\n\n\n@whitelist_for_serdes\nclass StepExpectationResultData(\n NamedTuple(\n "_StepExpectationResultData",\n [\n ("expectation_result", ExpectationResult),\n ],\n )\n):\n def __new__(cls, expectation_result: ExpectationResult):\n return super(StepExpectationResultData, cls).__new__(\n cls,\n expectation_result=check.inst_param(\n expectation_result, "expectation_result", ExpectationResult\n ),\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass ObjectStoreOperationResultData(\n NamedTuple(\n "_ObjectStoreOperationResultData",\n [\n ("op", ObjectStoreOperationType),\n ("value_name", Optional[str]),\n ("metadata", Mapping[str, MetadataValue]),\n ("address", Optional[str]),\n ("version", Optional[str]),\n ("mapping_key", Optional[str]),\n ],\n )\n):\n def __new__(\n cls,\n op: ObjectStoreOperationType,\n value_name: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n address: Optional[str] = None,\n version: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ):\n return super(ObjectStoreOperationResultData, cls).__new__(\n cls,\n op=cast(ObjectStoreOperationType, check.str_param(op, "op")),\n value_name=check.opt_str_param(value_name, "value_name"),\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n address=check.opt_str_param(address, "address"),\n version=check.opt_str_param(version, "version"),\n mapping_key=check.opt_str_param(mapping_key, "mapping_key"),\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass EngineEventData(\n NamedTuple(\n "_EngineEventData",\n [\n ("metadata", Mapping[str, MetadataValue]),\n ("error", Optional[SerializableErrorInfo]),\n ("marker_start", Optional[str]),\n ("marker_end", Optional[str]),\n ],\n )\n):\n # serdes log\n # * added optional error\n # * added marker_start / marker_end\n #\n def __new__(\n cls,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n error: Optional[SerializableErrorInfo] = None,\n marker_start: Optional[str] = None,\n marker_end: Optional[str] = None,\n ):\n return super(EngineEventData, cls).__new__(\n cls,\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n error=check.opt_inst_param(error, "error", SerializableErrorInfo),\n marker_start=check.opt_str_param(marker_start, "marker_start"),\n marker_end=check.opt_str_param(marker_end, "marker_end"),\n )\n\n @staticmethod\n def in_process(\n pid: int, step_keys_to_execute: Optional[Sequence[str]] = None\n ) -> "EngineEventData":\n return EngineEventData(\n metadata={\n "pid": MetadataValue.text(str(pid)),\n **(\n {"step_keys": MetadataValue.text(str(step_keys_to_execute))}\n if step_keys_to_execute\n else {}\n ),\n }\n )\n\n @staticmethod\n def multiprocess(\n pid: int, step_keys_to_execute: Optional[Sequence[str]] = None\n ) -> "EngineEventData":\n return EngineEventData(\n metadata={\n "pid": MetadataValue.text(str(pid)),\n **(\n {"step_keys": MetadataValue.text(str(step_keys_to_execute))}\n if step_keys_to_execute\n else {}\n ),\n }\n )\n\n @staticmethod\n def interrupted(steps_interrupted: Sequence[str]) -> "EngineEventData":\n return EngineEventData(\n metadata={"steps_interrupted": MetadataValue.text(str(steps_interrupted))}\n )\n\n @staticmethod\n def engine_error(error: SerializableErrorInfo) -> "EngineEventData":\n return EngineEventData(metadata={}, error=error)\n\n\n@whitelist_for_serdes(storage_name="PipelineFailureData")\nclass JobFailureData(\n NamedTuple(\n "_JobFailureData",\n [\n ("error", Optional[SerializableErrorInfo]),\n ],\n )\n):\n def __new__(cls, error: Optional[SerializableErrorInfo]):\n return super(JobFailureData, cls).__new__(\n cls, error=check.opt_inst_param(error, "error", SerializableErrorInfo)\n )\n\n\n@whitelist_for_serdes(storage_name="PipelineCanceledData")\nclass JobCanceledData(\n NamedTuple(\n "_JobCanceledData",\n [\n ("error", Optional[SerializableErrorInfo]),\n ],\n )\n):\n def __new__(cls, error: Optional[SerializableErrorInfo]):\n return super(JobCanceledData, cls).__new__(\n cls, error=check.opt_inst_param(error, "error", SerializableErrorInfo)\n )\n\n\n@whitelist_for_serdes\nclass HookErroredData(\n NamedTuple(\n "_HookErroredData",\n [\n ("error", SerializableErrorInfo),\n ],\n )\n):\n def __new__(cls, error: SerializableErrorInfo):\n return super(HookErroredData, cls).__new__(\n cls, error=check.inst_param(error, "error", SerializableErrorInfo)\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass HandledOutputData(\n NamedTuple(\n "_HandledOutputData",\n [\n ("output_name", str),\n ("manager_key", str),\n ("metadata", Mapping[str, MetadataValue]),\n ],\n )\n):\n def __new__(\n cls,\n output_name: str,\n manager_key: str,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ):\n return super(HandledOutputData, cls).__new__(\n cls,\n output_name=check.str_param(output_name, "output_name"),\n manager_key=check.str_param(manager_key, "manager_key"),\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n )\n\n\n@whitelist_for_serdes(\n storage_field_names={"metadata": "metadata_entries"},\n field_serializers={"metadata": MetadataFieldSerializer},\n)\nclass LoadedInputData(\n NamedTuple(\n "_LoadedInputData",\n [\n ("input_name", str),\n ("manager_key", str),\n ("upstream_output_name", Optional[str]),\n ("upstream_step_key", Optional[str]),\n ("metadata", Mapping[str, MetadataValue]),\n ],\n )\n):\n def __new__(\n cls,\n input_name: str,\n manager_key: str,\n upstream_output_name: Optional[str] = None,\n upstream_step_key: Optional[str] = None,\n metadata: Optional[Mapping[str, MetadataValue]] = None,\n ):\n return super(LoadedInputData, cls).__new__(\n cls,\n input_name=check.str_param(input_name, "input_name"),\n manager_key=check.str_param(manager_key, "manager_key"),\n upstream_output_name=check.opt_str_param(upstream_output_name, "upstream_output_name"),\n upstream_step_key=check.opt_str_param(upstream_step_key, "upstream_step_key"),\n metadata=normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str)\n ),\n )\n\n\n@whitelist_for_serdes(storage_field_names={"file_key": "log_key"})\nclass ComputeLogsCaptureData(\n NamedTuple(\n "_ComputeLogsCaptureData",\n [\n ("file_key", str), # renamed log_key => file_key to avoid confusion\n ("step_keys", Sequence[str]),\n ("external_url", Optional[str]),\n ("external_stdout_url", Optional[str]),\n ("external_stderr_url", Optional[str]),\n ],\n )\n):\n def __new__(\n cls,\n file_key: str,\n step_keys: Sequence[str],\n external_url: Optional[str] = None,\n external_stdout_url: Optional[str] = None,\n external_stderr_url: Optional[str] = None,\n ):\n return super(ComputeLogsCaptureData, cls).__new__(\n cls,\n file_key=check.str_param(file_key, "file_key"),\n step_keys=check.opt_list_param(step_keys, "step_keys", of_type=str),\n external_url=check.opt_str_param(external_url, "external_url"),\n external_stdout_url=check.opt_str_param(external_stdout_url, "external_stdout_url"),\n external_stderr_url=check.opt_str_param(external_stderr_url, "external_stderr_url"),\n )\n\n\n###################################################################################################\n# THE GRAVEYARD\n#\n# -|- -|- -|-\n# | | |\n# _-'~~~~~`-_ . _-'~~~~~`-_ _-'~~~~~`-_\n# .' '. .' '. .' '.\n# | R I P | | R I P | | R I P |\n# | | | | | |\n# | Synthetic | | Asset | | Pipeline |\n# | Process | | Store | | Init |\n# | Events | | Operations | | Failures |\n# | | | | | |\n###################################################################################################\n\n\n# Old data structures referenced below\n# class AssetStoreOperationData(NamedTuple):\n# op: str\n# step_key: str\n# output_name: str\n# asset_store_key: str\n#\n#\n# class AssetStoreOperationType(Enum):\n# SET_ASSET = "SET_ASSET"\n# GET_ASSET = "GET_ASSET"\n#\n#\n# class PipelineInitFailureData(NamedTuple):\n# error: SerializableErrorInfo\n\n\ndef _handle_back_compat(\n event_type_value: str,\n event_specific_data: Optional[Dict[str, Any]],\n) -> Tuple[str, Optional[Dict[str, Any]]]:\n # transform old specific process events in to engine events\n if event_type_value in [\n "PIPELINE_PROCESS_START",\n "PIPELINE_PROCESS_STARTED",\n "PIPELINE_PROCESS_EXITED",\n ]:\n return "ENGINE_EVENT", {"__class__": "EngineEventData"}\n\n # changes asset store ops in to get/set asset\n elif event_type_value == "ASSET_STORE_OPERATION":\n assert (\n event_specific_data is not None\n ), "ASSET_STORE_OPERATION event must have specific data"\n if event_specific_data["op"] in (\n "GET_ASSET",\n '{"__enum__": "AssetStoreOperationType.GET_ASSET"}',\n ):\n return (\n "LOADED_INPUT",\n {\n "__class__": "LoadedInputData",\n "input_name": event_specific_data["output_name"],\n "manager_key": event_specific_data["asset_store_key"],\n },\n )\n if event_specific_data["op"] in (\n "SET_ASSET",\n '{"__enum__": "AssetStoreOperationType.SET_ASSET"}',\n ):\n return (\n "HANDLED_OUTPUT",\n {\n "__class__": "HandledOutputData",\n "output_name": event_specific_data["output_name"],\n "manager_key": event_specific_data["asset_store_key"],\n },\n )\n\n # previous name for ASSET_MATERIALIZATION was STEP_MATERIALIZATION\n if event_type_value == "STEP_MATERIALIZATION":\n assert event_specific_data is not None, "STEP_MATERIALIZATION event must have specific data"\n return "ASSET_MATERIALIZATION", event_specific_data\n\n # transform PIPELINE_INIT_FAILURE to PIPELINE_FAILURE\n if event_type_value == "PIPELINE_INIT_FAILURE":\n assert (\n event_specific_data is not None\n ), "PIPELINE_INIT_FAILURE event must have specific data"\n return "PIPELINE_FAILURE", {\n "__class__": "PipelineFailureData",\n "error": event_specific_data.get("error"),\n }\n\n return event_type_value, event_specific_data\n
", "current_page_name": "_modules/dagster/_core/events", "customsidebar": null, "favicon_url": null, "log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.events.log

\nfrom typing import Mapping, NamedTuple, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.definitions.events import AssetMaterialization, AssetObservation\nfrom dagster._core.events import DagsterEvent, DagsterEventType\nfrom dagster._core.utils import coerce_valid_log_level\nfrom dagster._serdes.serdes import (\n    deserialize_value,\n    serialize_value,\n    whitelist_for_serdes,\n)\nfrom dagster._utils.error import SerializableErrorInfo\nfrom dagster._utils.log import (\n    JsonEventLoggerHandler,\n    StructuredLoggerHandler,\n    StructuredLoggerMessage,\n    construct_single_handler_logger,\n)\n\n\n
[docs]@whitelist_for_serdes(\n # These were originally distinguished from each other but ended up being empty subclasses\n # of EventLogEntry -- instead of using the subclasses we were relying on\n # EventLogEntry.is_dagster_event to distinguish events that originate in the logging\n # machinery from events that are yielded by user code\n old_storage_names={"DagsterEventRecord", "LogMessageRecord", "EventRecord"},\n old_fields={"message": ""},\n storage_field_names={"job_name": "pipeline_name"},\n)\nclass EventLogEntry(\n NamedTuple(\n "_EventLogEntry",\n [\n ("error_info", PublicAttr[Optional[SerializableErrorInfo]]),\n ("level", PublicAttr[Union[str, int]]),\n ("user_message", PublicAttr[str]),\n ("run_id", PublicAttr[str]),\n ("timestamp", PublicAttr[float]),\n ("step_key", PublicAttr[Optional[str]]),\n ("job_name", PublicAttr[Optional[str]]),\n ("dagster_event", PublicAttr[Optional[DagsterEvent]]),\n ],\n )\n):\n """Entries in the event log.\n\n Users should not instantiate this object directly. These entries may originate from the logging machinery (DagsterLogManager/context.log), from\n framework events (e.g. EngineEvent), or they may correspond to events yielded by user code\n (e.g. Output).\n\n Args:\n error_info (Optional[SerializableErrorInfo]): Error info for an associated exception, if\n any, as generated by serializable_error_info_from_exc_info and friends.\n level (Union[str, int]): The Python log level at which to log this event. Note that\n framework and user code events are also logged to Python logging. This value may be an\n integer or a (case-insensitive) string member of PYTHON_LOGGING_LEVELS_NAMES.\n user_message (str): For log messages, this is the user-generated message.\n run_id (str): The id of the run which generated this event.\n timestamp (float): The Unix timestamp of this event.\n step_key (Optional[str]): The step key for the step which generated this event. Some events\n are generated outside of a step context.\n job_name (Optional[str]): The job which generated this event. Some events are\n generated outside of a job context.\n dagster_event (Optional[DagsterEvent]): For framework and user events, the associated\n structured event.\n """\n\n def __new__(\n cls,\n error_info,\n level,\n user_message,\n run_id,\n timestamp,\n step_key=None,\n job_name=None,\n dagster_event=None,\n ):\n return super(EventLogEntry, cls).__new__(\n cls,\n check.opt_inst_param(error_info, "error_info", SerializableErrorInfo),\n coerce_valid_log_level(level),\n check.str_param(user_message, "user_message"),\n check.str_param(run_id, "run_id"),\n check.float_param(timestamp, "timestamp"),\n check.opt_str_param(step_key, "step_key"),\n check.opt_str_param(job_name, "job_name"),\n check.opt_inst_param(dagster_event, "dagster_event", DagsterEvent),\n )\n\n @public\n @property\n def is_dagster_event(self) -> bool:\n """bool: If this entry contains a DagsterEvent."""\n return bool(self.dagster_event)\n\n
[docs] @public\n def get_dagster_event(self) -> DagsterEvent:\n """DagsterEvent: Returns the DagsterEvent contained within this entry. If this entry does not\n contain a DagsterEvent, an error will be raised.\n """\n if not isinstance(self.dagster_event, DagsterEvent):\n check.failed(\n "Not a dagster event, check is_dagster_event before calling get_dagster_event",\n )\n\n return self.dagster_event
\n\n def to_json(self):\n return serialize_value(self)\n\n @staticmethod\n def from_json(json_str: str):\n return deserialize_value(json_str, EventLogEntry)\n\n @public\n @property\n def dagster_event_type(self) -> Optional[DagsterEventType]:\n """Optional[DagsterEventType]: The type of the DagsterEvent contained by this entry, if any."""\n return self.dagster_event.event_type if self.dagster_event else None\n\n @public\n @property\n def message(self) -> str:\n """Return the message from the structured DagsterEvent if present, fallback to user_message."""\n if self.is_dagster_event:\n msg = self.get_dagster_event().message\n if msg is not None:\n return msg\n\n return self.user_message\n\n @property\n def asset_materialization(self) -> Optional[AssetMaterialization]:\n if (\n self.dagster_event\n and self.dagster_event.event_type_value == DagsterEventType.ASSET_MATERIALIZATION\n ):\n materialization = self.dagster_event.step_materialization_data.materialization\n if isinstance(materialization, AssetMaterialization):\n return materialization\n\n return None\n\n @property\n def asset_observation(self) -> Optional[AssetObservation]:\n if (\n self.dagster_event\n and self.dagster_event.event_type_value == DagsterEventType.ASSET_OBSERVATION\n ):\n observation = self.dagster_event.asset_observation_data.asset_observation\n if isinstance(observation, AssetObservation):\n return observation\n\n return None\n\n @property\n def tags(self) -> Optional[Mapping[str, str]]:\n materialization = self.asset_materialization\n if materialization:\n return materialization.tags\n\n observation = self.asset_observation\n if observation:\n return observation.tags\n\n return None
\n\n\ndef construct_event_record(logger_message: StructuredLoggerMessage) -> EventLogEntry:\n check.inst_param(logger_message, "logger_message", StructuredLoggerMessage)\n\n return EventLogEntry(\n level=logger_message.level,\n user_message=logger_message.meta["orig_message"],\n run_id=logger_message.meta["run_id"],\n timestamp=logger_message.record.created,\n step_key=logger_message.meta.get("step_key"),\n job_name=logger_message.meta.get("job_name"),\n dagster_event=logger_message.meta.get("dagster_event"),\n error_info=None,\n )\n\n\ndef construct_event_logger(event_record_callback):\n """Callback receives a stream of event_records. Piggybacks on the logging machinery."""\n check.callable_param(event_record_callback, "event_record_callback")\n\n return construct_single_handler_logger(\n "event-logger",\n "debug",\n StructuredLoggerHandler(\n lambda logger_message: event_record_callback(construct_event_record(logger_message))\n ),\n )\n\n\ndef construct_json_event_logger(json_path):\n """Record a stream of event records to json."""\n check.str_param(json_path, "json_path")\n return construct_single_handler_logger(\n "json-event-record-logger",\n "debug",\n JsonEventLoggerHandler(\n json_path,\n lambda record: construct_event_record(\n StructuredLoggerMessage(\n name=record.name,\n message=record.msg,\n level=record.levelno,\n meta=record.dagster_meta,\n record=record,\n )\n ),\n ),\n )\n
", "current_page_name": "_modules/dagster/_core/events/log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}, {"link": "../", "title": "dagster._core.events"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.events.log"}, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.events"}, "execution": {"api": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.api

\nimport sys\nfrom contextlib import contextmanager\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._core.definitions import IJob, JobDefinition\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.job_base import InMemoryJob\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.repository_definition import RepositoryLoadData\nfrom dagster._core.errors import DagsterExecutionInterruptedError, DagsterInvariantViolationError\nfrom dagster._core.events import DagsterEvent, EngineEventData\nfrom dagster._core.execution.context.system import PlanOrchestrationContext\nfrom dagster._core.execution.plan.execute_plan import inner_plan_execution_iterator\nfrom dagster._core.execution.plan.plan import ExecutionPlan\nfrom dagster._core.execution.plan.state import KnownExecutionState\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.instance import DagsterInstance, InstanceRef\nfrom dagster._core.selector import parse_step_selection\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.system_config.objects import ResolvedRunConfig\nfrom dagster._core.telemetry import log_dagster_event, log_repo_stats, telemetry_wrapper\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster._utils.interrupts import capture_interrupts\nfrom dagster._utils.merger import merge_dicts\n\nfrom .context_creation_job import (\n    ExecutionContextManager,\n    PlanExecutionContextManager,\n    PlanOrchestrationContextManager,\n    orchestration_context_event_generator,\n    scoped_job_context,\n)\nfrom .job_execution_result import JobExecutionResult\n\nif TYPE_CHECKING:\n    from dagster._core.execution.plan.outputs import StepOutputHandle\n\n## Brief guide to the execution APIs\n# | function name               | operates over      | sync  | supports    | creates new DagsterRun  |\n# |                             |                    |       | reexecution | in instance             |\n# | --------------------------- | ------------------ | ----- | ----------- | ----------------------- |\n# | execute_job                 | ReconstructableJob | sync  | yes         | yes                     |\n# | execute_run_iterator        | DagsterRun         | async | (1)         | no                      |\n# | execute_run                 | DagsterRun         | sync  | (1)         | no                      |\n# | execute_plan_iterator       | ExecutionPlan      | async | (2)         | no                      |\n# | execute_plan                | ExecutionPlan      | sync  | (2)         | no                      |\n#\n# Notes on reexecution support:\n# (1) The appropriate bits must be set on the DagsterRun passed to this function. Specifically,\n#     parent_run_id and root_run_id must be set and consistent, and if a resolved_op_selection or\n#     step_keys_to_execute are set they must be consistent with the parent and root runs.\n# (2) As for (1), but the ExecutionPlan passed must also agree in all relevant bits.\n\n\ndef execute_run_iterator(\n    job: IJob,\n    dagster_run: DagsterRun,\n    instance: DagsterInstance,\n    resume_from_failure: bool = False,\n) -> Iterator[DagsterEvent]:\n    check.inst_param(job, "job", IJob)\n    check.inst_param(dagster_run, "dagster_run", DagsterRun)\n    check.inst_param(instance, "instance", DagsterInstance)\n\n    if dagster_run.status == DagsterRunStatus.CANCELED:\n        # This can happen if the run was force-terminated while it was starting\n        def gen_execute_on_cancel():\n            yield instance.report_engine_event(\n                "Not starting execution since the run was canceled before execution could start",\n                dagster_run,\n            )\n\n        return gen_execute_on_cancel()\n\n    if not resume_from_failure:\n        if dagster_run.status not in (DagsterRunStatus.NOT_STARTED, DagsterRunStatus.STARTING):\n            if dagster_run.is_finished:\n\n                def gen_ignore_duplicate_run_worker():\n                    yield instance.report_engine_event(\n                        "Ignoring a run worker that started after the run had already finished.",\n                        dagster_run,\n                    )\n\n                return gen_ignore_duplicate_run_worker()\n            elif instance.run_monitoring_enabled:\n                # This can happen if the pod was unexpectedly restarted by the cluster - ignore it since\n                # the run monitoring daemon will also spin up a new pod\n                def gen_ignore_duplicate_run_worker():\n                    yield instance.report_engine_event(\n                        "Ignoring a duplicate run that was started from somewhere other than"\n                        " the run monitor daemon",\n                        dagster_run,\n                    )\n\n                return gen_ignore_duplicate_run_worker()\n            else:\n\n                def gen_fail_restarted_run_worker():\n                    yield instance.report_engine_event(\n                        f"{dagster_run.job_name} ({dagster_run.run_id}) started a new"\n                        f" run worker while the run was already in state {dagster_run.status}."\n                        " This most frequently happens when the run worker unexpectedly stops"\n                        " and is restarted by the cluster. Marking the run as failed.",\n                        dagster_run,\n                    )\n                    yield instance.report_run_failed(dagster_run)\n\n                return gen_fail_restarted_run_worker()\n\n    else:\n        check.invariant(\n            dagster_run.status == DagsterRunStatus.STARTED\n            or dagster_run.status == DagsterRunStatus.STARTING,\n            desc=(\n                "Run of {} ({}) in state {}, expected STARTED or STARTING because it's "\n                "resuming from a run worker failure".format(\n                    dagster_run.job_name, dagster_run.run_id, dagster_run.status\n                )\n            ),\n        )\n\n    if (\n        dagster_run.resolved_op_selection\n        or dagster_run.asset_selection\n        or dagster_run.asset_check_selection\n    ):\n        # when `execute_run_iterator` is directly called, the sub pipeline hasn't been created\n        # note that when we receive the solids to execute via DagsterRun, it won't support\n        # solid selection query syntax\n        job = job.get_subset(\n            op_selection=(\n                list(dagster_run.resolved_op_selection)\n                if dagster_run.resolved_op_selection\n                else None\n            ),\n            asset_selection=dagster_run.asset_selection,\n            asset_check_selection=dagster_run.asset_check_selection,\n        )\n\n    execution_plan = _get_execution_plan_from_run(job, dagster_run, instance)\n    if isinstance(job, ReconstructableJob):\n        job = job.with_repository_load_data(execution_plan.repository_load_data)\n\n    return iter(\n        ExecuteRunWithPlanIterable(\n            execution_plan=execution_plan,\n            iterator=job_execution_iterator,\n            execution_context_manager=PlanOrchestrationContextManager(\n                context_event_generator=orchestration_context_event_generator,\n                job=job,\n                execution_plan=execution_plan,\n                dagster_run=dagster_run,\n                instance=instance,\n                run_config=dagster_run.run_config,\n                raise_on_error=False,\n                executor_defs=None,\n                output_capture=None,\n                resume_from_failure=resume_from_failure,\n            ),\n        )\n    )\n\n\ndef execute_run(\n    job: IJob,\n    dagster_run: DagsterRun,\n    instance: DagsterInstance,\n    raise_on_error: bool = False,\n) -> JobExecutionResult:\n    """Executes an existing job run synchronously.\n\n    Synchronous version of execute_run_iterator.\n\n    Args:\n        job (IJob): The pipeline to execute.\n        dagster_run (DagsterRun): The run to execute\n        instance (DagsterInstance): The instance in which the run has been created.\n        raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n            Defaults to ``False``.\n\n    Returns:\n        JobExecutionResult: The result of the execution.\n    """\n    if isinstance(job, JobDefinition):\n        raise DagsterInvariantViolationError(\n            "execute_run requires a reconstructable job but received job definition directly"\n            " instead. To support hand-off to other processes please wrap your definition in a call"\n            " to reconstructable(). Learn more about reconstructable here:"\n            " https://docs.dagster.io/_apidocs/execution#dagster.reconstructable"\n        )\n\n    check.inst_param(job, "job", IJob)\n    check.inst_param(dagster_run, "dagster_run", DagsterRun)\n    check.inst_param(instance, "instance", DagsterInstance)\n\n    if dagster_run.status == DagsterRunStatus.CANCELED:\n        message = "Not starting execution since the run was canceled before execution could start"\n        instance.report_engine_event(\n            message,\n            dagster_run,\n        )\n        raise DagsterInvariantViolationError(message)\n\n    check.invariant(\n        dagster_run.status == DagsterRunStatus.NOT_STARTED\n        or dagster_run.status == DagsterRunStatus.STARTING,\n        desc="Run {} ({}) in state {}, expected NOT_STARTED or STARTING".format(\n            dagster_run.job_name, dagster_run.run_id, dagster_run.status\n        ),\n    )\n    if (\n        dagster_run.resolved_op_selection\n        or dagster_run.asset_selection\n        or dagster_run.asset_check_selection\n    ):\n        # when `execute_run` is directly called, the sub job hasn't been created\n        # note that when we receive the solids to execute via DagsterRun, it won't support\n        # solid selection query syntax\n        job = job.get_subset(\n            op_selection=(\n                list(dagster_run.resolved_op_selection)\n                if dagster_run.resolved_op_selection\n                else None\n            ),\n            asset_selection=dagster_run.asset_selection,\n            asset_check_selection=dagster_run.asset_check_selection,\n        )\n\n    execution_plan = _get_execution_plan_from_run(job, dagster_run, instance)\n    if isinstance(job, ReconstructableJob):\n        job = job.with_repository_load_data(execution_plan.repository_load_data)\n\n    output_capture: Optional[Dict[StepOutputHandle, Any]] = {}\n\n    _execute_run_iterable = ExecuteRunWithPlanIterable(\n        execution_plan=execution_plan,\n        iterator=job_execution_iterator,\n        execution_context_manager=PlanOrchestrationContextManager(\n            context_event_generator=orchestration_context_event_generator,\n            job=job,\n            execution_plan=execution_plan,\n            dagster_run=dagster_run,\n            instance=instance,\n            run_config=dagster_run.run_config,\n            raise_on_error=raise_on_error,\n            executor_defs=None,\n            output_capture=output_capture,\n        ),\n    )\n    event_list = list(_execute_run_iterable)\n\n    # We need to reload the run object after execution for it to be accurate\n    reloaded_dagster_run = check.not_none(instance.get_run_by_id(dagster_run.run_id))\n\n    return JobExecutionResult(\n        job.get_definition(),\n        scoped_job_context(\n            execution_plan,\n            job,\n            reloaded_dagster_run.run_config,\n            reloaded_dagster_run,\n            instance,\n        ),\n        event_list,\n        reloaded_dagster_run,\n    )\n\n\n@contextmanager\ndef ephemeral_instance_if_missing(\n    instance: Optional[DagsterInstance],\n) -> Iterator[DagsterInstance]:\n    if instance:\n        yield instance\n    else:\n        with DagsterInstance.ephemeral() as ephemeral_instance:\n            yield ephemeral_instance\n\n\n
[docs]class ReexecutionOptions(NamedTuple):\n """Reexecution options for python-based execution in Dagster.\n\n Args:\n parent_run_id (str): The run_id of the run to reexecute.\n step_selection (Sequence[str]):\n The list of step selections to reexecute. Must be a subset or match of the\n set of steps executed in the original run. For example:\n\n - ``['some_op']``: selects ``some_op`` itself.\n - ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n - ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n - ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n """\n\n parent_run_id: str\n step_selection: Sequence[str] = []\n\n @staticmethod\n def from_failure(run_id: str, instance: DagsterInstance) -> "ReexecutionOptions":\n """Creates reexecution options from a failed run.\n\n Args:\n run_id (str): The run_id of the failed run. Run must fail in order to be reexecuted.\n instance (DagsterInstance): The DagsterInstance that the original run occurred in.\n\n Returns:\n ReexecutionOptions: Reexecution options to pass to a python execution.\n """\n from dagster._core.execution.plan.state import KnownExecutionState\n\n parent_run = check.not_none(instance.get_run_by_id(run_id))\n check.invariant(\n parent_run.status == DagsterRunStatus.FAILURE,\n "Cannot reexecute from failure a run that is not failed",\n )\n # Tried to thread through KnownExecutionState to execution plan creation, but little benefit.\n # It is recalculated later by the re-execution machinery.\n step_keys_to_execute, _ = KnownExecutionState.build_resume_retry_reexecution(\n instance, parent_run=cast(DagsterRun, instance.get_run_by_id(run_id))\n )\n return ReexecutionOptions(parent_run_id=run_id, step_selection=step_keys_to_execute)
\n\n\n
[docs]def execute_job(\n job: ReconstructableJob,\n instance: "DagsterInstance",\n run_config: Any = None,\n tags: Optional[Mapping[str, Any]] = None,\n raise_on_error: bool = False,\n op_selection: Optional[Sequence[str]] = None,\n reexecution_options: Optional[ReexecutionOptions] = None,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n) -> JobExecutionResult:\n """Execute a job synchronously.\n\n This API represents dagster's python entrypoint for out-of-process\n execution. For most testing purposes, :py:meth:`~dagster.JobDefinition.\n execute_in_process` will be more suitable, but when wanting to run\n execution using an out-of-process executor (such as :py:class:`dagster.\n multiprocess_executor`), then `execute_job` is suitable.\n\n `execute_job` expects a persistent :py:class:`DagsterInstance` for\n execution, meaning the `$DAGSTER_HOME` environment variable must be set.\n It also expects a reconstructable pointer to a :py:class:`JobDefinition` so\n that it can be reconstructed in separate processes. This can be done by\n wrapping the ``JobDefinition`` in a call to :py:func:`dagster.\n reconstructable`.\n\n .. code-block:: python\n\n from dagster import DagsterInstance, execute_job, job, reconstructable\n\n @job\n def the_job():\n ...\n\n instance = DagsterInstance.get()\n result = execute_job(reconstructable(the_job), instance=instance)\n assert result.success\n\n\n If using the :py:meth:`~dagster.GraphDefinition.to_job` method to\n construct the ``JobDefinition``, then the invocation must be wrapped in a\n module-scope function, which can be passed to ``reconstructable``.\n\n .. code-block:: python\n\n from dagster import graph, reconstructable\n\n @graph\n def the_graph():\n ...\n\n def define_job():\n return the_graph.to_job(...)\n\n result = execute_job(reconstructable(define_job), ...)\n\n Since `execute_job` is potentially executing outside of the current\n process, output objects need to be retrieved by use of the provided job's\n io managers. Output objects can be retrieved by opening the result of\n `execute_job` as a context manager.\n\n .. code-block:: python\n\n from dagster import execute_job\n\n with execute_job(...) as result:\n output_obj = result.output_for_node("some_op")\n\n ``execute_job`` can also be used to reexecute a run, by providing a :py:class:`ReexecutionOptions` object.\n\n .. code-block:: python\n\n from dagster import ReexecutionOptions, execute_job\n\n instance = DagsterInstance.get()\n\n options = ReexecutionOptions.from_failure(run_id=failed_run_id, instance)\n execute_job(reconstructable(job), instance, reexecution_options=options)\n\n Parameters:\n job (ReconstructableJob): A reconstructable pointer to a :py:class:`JobDefinition`.\n instance (DagsterInstance): The instance to execute against.\n run_config (Optional[dict]): The configuration that parametrizes this run, as a dict.\n tags (Optional[Dict[str, Any]]): Arbitrary key-value pairs that will be added to run logs.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``False``.\n op_selection (Optional[List[str]]): A list of op selection queries (including single\n op names) to execute. For example:\n\n - ``['some_op']``: selects ``some_op`` itself.\n - ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n - ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n - ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n reexecution_options (Optional[ReexecutionOptions]):\n Reexecution options to provide to the run, if this run is\n intended to be a reexecution of a previous run. Cannot be used in\n tandem with the ``op_selection`` argument.\n\n Returns:\n :py:class:`JobExecutionResult`: The result of job execution.\n """\n check.inst_param(job, "job", ReconstructableJob)\n check.inst_param(instance, "instance", DagsterInstance)\n check.opt_sequence_param(asset_selection, "asset_selection", of_type=AssetKey)\n\n # get the repository load data here because we call job.get_definition() later in this fn\n job_def, _ = _job_with_repository_load_data(job)\n\n if reexecution_options is not None and op_selection is not None:\n raise DagsterInvariantViolationError(\n "re-execution and op selection cannot be used together at this time."\n )\n\n if reexecution_options:\n if run_config is None:\n run = check.not_none(instance.get_run_by_id(reexecution_options.parent_run_id))\n run_config = run.run_config\n return _reexecute_job(\n job_arg=job_def,\n parent_run_id=reexecution_options.parent_run_id,\n run_config=run_config,\n step_selection=list(reexecution_options.step_selection),\n tags=tags,\n instance=instance,\n raise_on_error=raise_on_error,\n )\n else:\n return _logged_execute_job(\n job_arg=job_def,\n instance=instance,\n run_config=run_config,\n tags=tags,\n op_selection=op_selection,\n raise_on_error=raise_on_error,\n asset_selection=asset_selection,\n )
\n\n\n@telemetry_wrapper\ndef _logged_execute_job(\n job_arg: Union[IJob, JobDefinition],\n instance: DagsterInstance,\n run_config: Optional[Mapping[str, object]] = None,\n tags: Optional[Mapping[str, str]] = None,\n op_selection: Optional[Sequence[str]] = None,\n raise_on_error: bool = True,\n asset_selection: Optional[Sequence[AssetKey]] = None,\n) -> JobExecutionResult:\n check.inst_param(instance, "instance", DagsterInstance)\n\n job_arg, repository_load_data = _job_with_repository_load_data(job_arg)\n\n (\n job_arg,\n run_config,\n tags,\n resolved_op_selection,\n op_selection,\n ) = _check_execute_job_args(\n job_arg=job_arg,\n run_config=run_config,\n tags=tags,\n op_selection=op_selection,\n )\n\n log_repo_stats(instance=instance, job=job_arg, source="execute_pipeline")\n\n dagster_run = instance.create_run_for_job(\n job_def=job_arg.get_definition(),\n run_config=run_config,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n tags=tags,\n job_code_origin=(\n job_arg.get_python_origin() if isinstance(job_arg, ReconstructableJob) else None\n ),\n repository_load_data=repository_load_data,\n asset_selection=frozenset(asset_selection) if asset_selection else None,\n )\n\n return execute_run(\n job_arg,\n dagster_run,\n instance,\n raise_on_error=raise_on_error,\n )\n\n\ndef _reexecute_job(\n job_arg: Union[IJob, JobDefinition],\n parent_run_id: str,\n run_config: Optional[Mapping[str, object]] = None,\n step_selection: Optional[Sequence[str]] = None,\n tags: Optional[Mapping[str, str]] = None,\n instance: Optional[DagsterInstance] = None,\n raise_on_error: bool = True,\n) -> JobExecutionResult:\n """Reexecute an existing job run."""\n check.opt_sequence_param(step_selection, "step_selection", of_type=str)\n\n check.str_param(parent_run_id, "parent_run_id")\n\n with ephemeral_instance_if_missing(instance) as execute_instance:\n job_arg, repository_load_data = _job_with_repository_load_data(job_arg)\n\n (job_arg, run_config, tags, _, _) = _check_execute_job_args(\n job_arg=job_arg,\n run_config=run_config,\n tags=tags,\n )\n\n parent_dagster_run = execute_instance.get_run_by_id(parent_run_id)\n if parent_dagster_run is None:\n check.failed(\n f"No parent run with id {parent_run_id} found in instance.",\n )\n\n execution_plan: Optional[ExecutionPlan] = None\n # resolve step selection DSL queries using parent execution information\n if step_selection:\n execution_plan = _resolve_reexecute_step_selection(\n execute_instance,\n job_arg,\n run_config,\n cast(DagsterRun, parent_dagster_run),\n step_selection,\n )\n\n if parent_dagster_run.asset_selection:\n job_arg = job_arg.get_subset(\n op_selection=None, asset_selection=parent_dagster_run.asset_selection\n )\n\n dagster_run = execute_instance.create_run_for_job(\n job_def=job_arg.get_definition(),\n execution_plan=execution_plan,\n run_config=run_config,\n tags=tags,\n op_selection=parent_dagster_run.op_selection,\n asset_selection=parent_dagster_run.asset_selection,\n resolved_op_selection=parent_dagster_run.resolved_op_selection,\n root_run_id=parent_dagster_run.root_run_id or parent_dagster_run.run_id,\n parent_run_id=parent_dagster_run.run_id,\n job_code_origin=(\n job_arg.get_python_origin() if isinstance(job_arg, ReconstructableJob) else None\n ),\n repository_load_data=repository_load_data,\n )\n\n return execute_run(\n job_arg,\n dagster_run,\n execute_instance,\n raise_on_error=raise_on_error,\n )\n check.failed("Should not reach here.")\n\n\ndef execute_plan_iterator(\n execution_plan: ExecutionPlan,\n job: IJob,\n dagster_run: DagsterRun,\n instance: DagsterInstance,\n retry_mode: Optional[RetryMode] = None,\n run_config: Optional[Mapping[str, object]] = None,\n) -> Iterator[DagsterEvent]:\n check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n check.inst_param(job, "job", IJob)\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.inst_param(instance, "instance", DagsterInstance)\n retry_mode = check.opt_inst_param(retry_mode, "retry_mode", RetryMode, RetryMode.DISABLED)\n run_config = check.opt_mapping_param(run_config, "run_config")\n\n if isinstance(job, ReconstructableJob):\n job = job.with_repository_load_data(execution_plan.repository_load_data)\n\n return iter(\n ExecuteRunWithPlanIterable(\n execution_plan=execution_plan,\n iterator=inner_plan_execution_iterator,\n execution_context_manager=PlanExecutionContextManager(\n job=job,\n retry_mode=retry_mode,\n execution_plan=execution_plan,\n run_config=run_config,\n dagster_run=dagster_run,\n instance=instance,\n ),\n )\n )\n\n\ndef execute_plan(\n execution_plan: ExecutionPlan,\n job: IJob,\n instance: DagsterInstance,\n dagster_run: DagsterRun,\n run_config: Optional[Mapping[str, object]] = None,\n retry_mode: Optional[RetryMode] = None,\n) -> Sequence[DagsterEvent]:\n """This is the entry point of dagster-graphql executions. For the dagster CLI entry point, see\n execute_job() above.\n """\n check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n check.inst_param(job, "job", IJob)\n check.inst_param(instance, "instance", DagsterInstance)\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n run_config = check.opt_mapping_param(run_config, "run_config")\n check.opt_inst_param(retry_mode, "retry_mode", RetryMode)\n\n return list(\n execute_plan_iterator(\n execution_plan=execution_plan,\n job=job,\n run_config=run_config,\n dagster_run=dagster_run,\n instance=instance,\n retry_mode=retry_mode,\n )\n )\n\n\ndef _get_execution_plan_from_run(\n job: IJob,\n dagster_run: DagsterRun,\n instance: DagsterInstance,\n) -> ExecutionPlan:\n execution_plan_snapshot = (\n instance.get_execution_plan_snapshot(dagster_run.execution_plan_snapshot_id)\n if dagster_run.execution_plan_snapshot_id\n else None\n )\n\n # Rebuild from snapshot if able and selection has not changed\n if (\n execution_plan_snapshot is not None\n and execution_plan_snapshot.can_reconstruct_plan\n and job.resolved_op_selection == dagster_run.resolved_op_selection\n and job.asset_selection == dagster_run.asset_selection\n and job.asset_check_selection == dagster_run.asset_check_selection\n ):\n return ExecutionPlan.rebuild_from_snapshot(\n dagster_run.job_name,\n execution_plan_snapshot,\n )\n\n return create_execution_plan(\n job,\n run_config=dagster_run.run_config,\n step_keys_to_execute=dagster_run.step_keys_to_execute,\n instance_ref=instance.get_ref() if instance.is_persistent else None,\n repository_load_data=(\n execution_plan_snapshot.repository_load_data if execution_plan_snapshot else None\n ),\n known_state=(\n execution_plan_snapshot.initial_known_state if execution_plan_snapshot else None\n ),\n )\n\n\ndef create_execution_plan(\n job: Union[IJob, JobDefinition],\n run_config: Optional[Mapping[str, object]] = None,\n step_keys_to_execute: Optional[Sequence[str]] = None,\n known_state: Optional[KnownExecutionState] = None,\n instance_ref: Optional[InstanceRef] = None,\n tags: Optional[Mapping[str, str]] = None,\n repository_load_data: Optional[RepositoryLoadData] = None,\n) -> ExecutionPlan:\n if isinstance(job, IJob):\n # If you have repository_load_data, make sure to use it when building plan\n if isinstance(job, ReconstructableJob) and repository_load_data is not None:\n job = job.with_repository_load_data(repository_load_data)\n job_def = job.get_definition()\n else:\n job_def = job\n\n run_config = check.opt_mapping_param(run_config, "run_config", key_type=str)\n check.opt_nullable_sequence_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)\n check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)\n tags = check.opt_mapping_param(tags, "tags", key_type=str, value_type=str)\n known_state = check.opt_inst_param(\n known_state,\n "known_state",\n KnownExecutionState,\n default=KnownExecutionState(),\n )\n repository_load_data = check.opt_inst_param(\n repository_load_data, "repository_load_data", RepositoryLoadData\n )\n\n resolved_run_config = ResolvedRunConfig.build(job_def, run_config)\n\n return ExecutionPlan.build(\n job_def,\n resolved_run_config,\n step_keys_to_execute=step_keys_to_execute,\n known_state=known_state,\n instance_ref=instance_ref,\n tags=tags,\n repository_load_data=repository_load_data,\n )\n\n\ndef job_execution_iterator(\n job_context: PlanOrchestrationContext, execution_plan: ExecutionPlan\n) -> Iterator[DagsterEvent]:\n """A complete execution of a pipeline. Yields pipeline start, success,\n and failure events.\n\n Args:\n pipeline_context (PlanOrchestrationContext):\n execution_plan (ExecutionPlan):\n """\n # TODO: restart event?\n if not job_context.resume_from_failure:\n yield DagsterEvent.job_start(job_context)\n\n job_exception_info = None\n job_canceled_info = None\n failed_steps = []\n generator_closed = False\n try:\n for event in job_context.executor.execute(job_context, execution_plan):\n if event.is_step_failure:\n failed_steps.append(event.step_key)\n elif event.is_resource_init_failure and event.step_key:\n failed_steps.append(event.step_key)\n\n # Telemetry\n log_dagster_event(event, job_context)\n\n yield event\n except GeneratorExit:\n # Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed\n # (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/).\n generator_closed = True\n job_exception_info = serializable_error_info_from_exc_info(sys.exc_info())\n if job_context.raise_on_error:\n raise\n except (KeyboardInterrupt, DagsterExecutionInterruptedError):\n job_canceled_info = serializable_error_info_from_exc_info(sys.exc_info())\n if job_context.raise_on_error:\n raise\n except BaseException:\n job_exception_info = serializable_error_info_from_exc_info(sys.exc_info())\n if job_context.raise_on_error:\n raise # finally block will run before this is re-raised\n finally:\n if job_canceled_info:\n reloaded_run = job_context.instance.get_run_by_id(job_context.run_id)\n if reloaded_run and reloaded_run.status == DagsterRunStatus.CANCELING:\n event = DagsterEvent.job_canceled(job_context, job_canceled_info)\n elif reloaded_run and reloaded_run.status == DagsterRunStatus.CANCELED:\n # This happens if the run was force-terminated but was still able to send\n # a cancellation request\n event = DagsterEvent.engine_event(\n job_context,\n "Computational resources were cleaned up after the run was forcibly marked"\n " as canceled.",\n EngineEventData(),\n )\n elif job_context.instance.run_will_resume(job_context.run_id):\n event = DagsterEvent.engine_event(\n job_context,\n "Execution was interrupted unexpectedly. No user initiated termination"\n " request was found, not treating as failure because run will be resumed.",\n EngineEventData(),\n )\n elif reloaded_run and reloaded_run.status == DagsterRunStatus.FAILURE:\n event = DagsterEvent.engine_event(\n job_context,\n "Execution was interrupted for a run that was already in a failure state.",\n EngineEventData(),\n )\n else:\n event = DagsterEvent.job_failure(\n job_context,\n "Execution was interrupted unexpectedly. "\n "No user initiated termination request was found, treating as failure.",\n job_canceled_info,\n )\n elif job_exception_info:\n event = DagsterEvent.job_failure(\n job_context,\n "An exception was thrown during execution.",\n job_exception_info,\n )\n elif failed_steps:\n event = DagsterEvent.job_failure(\n job_context,\n f"Steps failed: {failed_steps}.",\n )\n else:\n event = DagsterEvent.job_success(job_context)\n if not generator_closed:\n yield event\n\n\nclass ExecuteRunWithPlanIterable:\n """Utility class to consolidate execution logic.\n\n This is a class and not a function because, e.g., in constructing a `scoped_pipeline_context`\n for `JobExecutionResult`, we need to pull out the `pipeline_context` after we're done\n yielding events. This broadly follows a pattern we make use of in other places,\n cf. `dagster._utils.EventGenerationManager`.\n """\n\n def __init__(\n self,\n execution_plan: ExecutionPlan,\n iterator: Callable[..., Iterator[DagsterEvent]],\n execution_context_manager: ExecutionContextManager[Any],\n ):\n self.execution_plan = check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n self.iterator = check.callable_param(iterator, "iterator")\n self.execution_context_manager = check.inst_param(\n execution_context_manager, "execution_context_manager", ExecutionContextManager\n )\n\n self.job_context = None\n\n def __iter__(self) -> Iterator[DagsterEvent]:\n # Since interrupts can't be raised at arbitrary points safely, delay them until designated\n # checkpoints during the execution.\n # To be maximally certain that interrupts are always caught during an execution process,\n # you can safely add an additional `with capture_interrupts()` at the very beginning of the\n # process that performs the execution.\n with capture_interrupts():\n yield from self.execution_context_manager.prepare_context()\n self.job_context = self.execution_context_manager.get_context()\n generator_closed = False\n try:\n if self.job_context: # False if we had a pipeline init failure\n yield from self.iterator(\n execution_plan=self.execution_plan,\n job_context=self.job_context,\n )\n except GeneratorExit:\n # Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed\n # (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/).\n generator_closed = True\n raise\n finally:\n for event in self.execution_context_manager.shutdown_context():\n if not generator_closed:\n yield event\n\n\ndef _check_execute_job_args(\n job_arg: Union[JobDefinition, IJob],\n run_config: Optional[Mapping[str, object]],\n tags: Optional[Mapping[str, str]],\n op_selection: Optional[Sequence[str]] = None,\n) -> Tuple[\n IJob,\n Optional[Mapping],\n Mapping[str, str],\n Optional[AbstractSet[str]],\n Optional[Sequence[str]],\n]:\n ijob = InMemoryJob(job_arg) if isinstance(job_arg, JobDefinition) else job_arg\n job_def = job_arg if isinstance(job_arg, JobDefinition) else job_arg.get_definition()\n\n run_config = check.opt_mapping_param(run_config, "run_config")\n\n tags = check.opt_mapping_param(tags, "tags", key_type=str)\n check.opt_sequence_param(op_selection, "op_selection", of_type=str)\n\n tags = merge_dicts(job_def.tags, tags)\n\n # generate job subset from the given op_selection\n if op_selection:\n ijob = ijob.get_subset(op_selection=op_selection)\n\n return (\n ijob,\n run_config,\n tags,\n ijob.resolved_op_selection,\n op_selection,\n )\n\n\ndef _resolve_reexecute_step_selection(\n instance: DagsterInstance,\n job: IJob,\n run_config: Optional[Mapping],\n parent_dagster_run: DagsterRun,\n step_selection: Sequence[str],\n) -> ExecutionPlan:\n if parent_dagster_run.op_selection:\n job = job.get_subset(op_selection=parent_dagster_run.op_selection)\n\n state = KnownExecutionState.build_for_reexecution(instance, parent_dagster_run)\n\n parent_plan = create_execution_plan(\n job,\n parent_dagster_run.run_config,\n known_state=state,\n )\n step_keys_to_execute = parse_step_selection(parent_plan.get_all_step_deps(), step_selection)\n execution_plan = create_execution_plan(\n job,\n run_config,\n step_keys_to_execute=list(step_keys_to_execute),\n known_state=state.update_for_step_selection(step_keys_to_execute),\n tags=parent_dagster_run.tags,\n )\n return execution_plan\n\n\ndef _job_with_repository_load_data(\n job_arg: Union[JobDefinition, IJob],\n) -> Tuple[Union[JobDefinition, IJob], Optional[RepositoryLoadData]]:\n """For ReconstructableJob, generate and return any required RepositoryLoadData, alongside\n a ReconstructableJob with this repository load data baked in.\n """\n if isinstance(job_arg, ReconstructableJob):\n # Unless this ReconstructableJob alread has repository_load_data attached, this will\n # force the repository_load_data to be computed from scratch.\n repository_load_data = job_arg.repository.get_definition().repository_load_data\n return job_arg.with_repository_load_data(repository_load_data), repository_load_data\n return job_arg, None\n
", "current_page_name": "_modules/dagster/_core/execution/api", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.api"}, "build_resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.build_resources

\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Generator, Mapping, Optional, cast\n\nimport dagster._check as check\nfrom dagster._config import process_config\nfrom dagster._core.definitions.resource_definition import (\n    ResourceDefinition,\n    Resources,\n    ScopedResourcesBuilder,\n)\nfrom dagster._core.definitions.run_config import define_resource_dictionary_cls\nfrom dagster._core.errors import DagsterInvalidConfigError\nfrom dagster._core.execution.resources_init import resource_initialization_manager\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.io_manager import IOManager, IOManagerDefinition\nfrom dagster._core.system_config.objects import ResourceConfig, config_map_resources\n\nfrom .api import ephemeral_instance_if_missing\nfrom .context_creation_job import initialize_console_manager\n\n\ndef get_mapped_resource_config(\n    resource_defs: Mapping[str, ResourceDefinition], resource_config: Mapping[str, Any]\n) -> Mapping[str, ResourceConfig]:\n    resource_config_schema = define_resource_dictionary_cls(\n        resource_defs, set(resource_defs.keys())\n    )\n    config_evr = process_config(resource_config_schema, resource_config)\n    if not config_evr.success:\n        raise DagsterInvalidConfigError(\n            "Error in config for resources ",\n            config_evr.errors,\n            resource_config,\n        )\n    config_value = cast(Dict[str, Any], config_evr.value)\n    return config_map_resources(resource_defs, config_value)\n\n\n
[docs]@contextmanager\ndef build_resources(\n resources: Mapping[str, Any],\n instance: Optional[DagsterInstance] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n dagster_run: Optional[DagsterRun] = None,\n log_manager: Optional[DagsterLogManager] = None,\n) -> Generator[Resources, None, None]:\n """Context manager that yields resources using provided resource definitions and run config.\n\n This API allows for using resources in an independent context. Resources will be initialized\n with the provided run config, and optionally, dagster_run. The resulting resources will be\n yielded on a dictionary keyed identically to that provided for `resource_defs`. Upon exiting the\n context, resources will also be torn down safely.\n\n Args:\n resources (Mapping[str, Any]): Resource instances or definitions to build. All\n required resource dependencies to a given resource must be contained within this\n dictionary, or the resource build will fail.\n instance (Optional[DagsterInstance]): The dagster instance configured to instantiate\n resources on.\n resource_config (Optional[Mapping[str, Any]]): A dict representing the config to be\n provided to each resource during initialization and teardown.\n dagster_run (Optional[PipelineRun]): The pipeline run to provide during resource\n initialization and teardown. If the provided resources require either the `dagster_run`\n or `run_id` attributes of the provided context during resource initialization and/or\n teardown, this must be provided, or initialization will fail.\n log_manager (Optional[DagsterLogManager]): Log Manager to use during resource\n initialization. Defaults to system log manager.\n\n Examples:\n .. code-block:: python\n\n from dagster import resource, build_resources\n\n @resource\n def the_resource():\n return "foo"\n\n with build_resources(resources={"from_def": the_resource, "from_val": "bar"}) as resources:\n assert resources.from_def == "foo"\n assert resources.from_val == "bar"\n\n """\n resources = check.mapping_param(resources, "resource_defs", key_type=str)\n instance = check.opt_inst_param(instance, "instance", DagsterInstance)\n resource_config = check.opt_mapping_param(resource_config, "resource_config", key_type=str)\n log_manager = check.opt_inst_param(log_manager, "log_manager", DagsterLogManager)\n resource_defs = wrap_resources_for_execution(resources)\n mapped_resource_config = get_mapped_resource_config(resource_defs, resource_config)\n\n with ephemeral_instance_if_missing(instance) as dagster_instance:\n resources_manager = resource_initialization_manager(\n resource_defs=resource_defs,\n resource_configs=mapped_resource_config,\n log_manager=log_manager if log_manager else initialize_console_manager(dagster_run),\n execution_plan=None,\n dagster_run=dagster_run,\n resource_keys_to_init=set(resource_defs.keys()),\n instance=dagster_instance,\n emit_persistent_events=False,\n )\n try:\n list(resources_manager.generate_setup_events())\n instantiated_resources = check.inst(\n resources_manager.get_object(), ScopedResourcesBuilder\n )\n yield instantiated_resources.build(\n set(instantiated_resources.resource_instance_dict.keys())\n )\n finally:\n list(resources_manager.generate_teardown_events())
\n\n\ndef wrap_resources_for_execution(\n resources: Optional[Mapping[str, Any]] = None\n) -> Dict[str, ResourceDefinition]:\n return (\n {\n resource_key: wrap_resource_for_execution(resource)\n for resource_key, resource in resources.items()\n }\n if resources\n else {}\n )\n\n\ndef wrap_resource_for_execution(resource: Any) -> ResourceDefinition:\n from dagster._config.pythonic_config import ConfigurableResourceFactory, PartialResource\n\n # Wrap instantiated resource values in a resource definition.\n # If an instantiated IO manager is provided, wrap it in an IO manager definition.\n if isinstance(resource, (ConfigurableResourceFactory, PartialResource)):\n return resource.get_resource_definition()\n elif isinstance(resource, ResourceDefinition):\n return resource\n elif isinstance(resource, IOManager):\n return IOManagerDefinition.hardcoded_io_manager(resource)\n else:\n return ResourceDefinition.hardcoded_resource(resource)\n
", "current_page_name": "_modules/dagster/_core/execution/build_resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.build_resources"}, "context": {"compute": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.compute

\nfrom abc import ABC, ABCMeta, abstractmethod\nfrom inspect import _empty as EmptyAnnotation\nfrom typing import (\n    AbstractSet,\n    Any,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import deprecated, experimental, public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey, AssetCheckSpec\nfrom dagster._core.definitions.asset_checks import AssetChecksDefinition\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.data_version import (\n    DataProvenance,\n    DataVersion,\n    extract_data_provenance_from_entry,\n)\nfrom dagster._core.definitions.decorators.op_decorator import DecoratedOpFunction\nfrom dagster._core.definitions.dependency import Node, NodeHandle\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    UserEvent,\n)\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.errors import (\n    DagsterInvalidDefinitionError,\n    DagsterInvalidPropertyError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._utils.forked_pdb import ForkedPdb\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n)\n\nfrom .system import StepExecutionContext\n\n\n# This metaclass has to exist for OpExecutionContext to have a metaclass\nclass AbstractComputeMetaclass(ABCMeta):\n    pass\n\n\nclass AbstractComputeExecutionContext(ABC, metaclass=AbstractComputeMetaclass):\n    """Base class for op context implemented by OpExecutionContext and DagstermillExecutionContext."""\n\n    @abstractmethod\n    def has_tag(self, key: str) -> bool:\n        """Implement this method to check if a logging tag is set."""\n\n    @abstractmethod\n    def get_tag(self, key: str) -> Optional[str]:\n        """Implement this method to get a logging tag."""\n\n    @property\n    @abstractmethod\n    def run_id(self) -> str:\n        """The run id for the context."""\n\n    @property\n    @abstractmethod\n    def op_def(self) -> OpDefinition:\n        """The op definition corresponding to the execution step being executed."""\n\n    @property\n    @abstractmethod\n    def job_def(self) -> JobDefinition:\n        """The job being executed."""\n\n    @property\n    @abstractmethod\n    def run(self) -> DagsterRun:\n        """The DagsterRun object corresponding to the execution."""\n\n    @property\n    @abstractmethod\n    def resources(self) -> Any:\n        """Resources available in the execution context."""\n\n    @property\n    @abstractmethod\n    def log(self) -> DagsterLogManager:\n        """The log manager available in the execution context."""\n\n    @property\n    @abstractmethod\n    def op_config(self) -> Any:\n        """The parsed config specific to this op."""\n\n\nclass OpExecutionContextMetaClass(AbstractComputeMetaclass):\n    def __instancecheck__(cls, instance) -> bool:\n        # This makes isinstance(context, OpExecutionContext) throw a deprecation warning when\n        # context is an AssetExecutionContext. This metaclass can be deleted once AssetExecutionContext\n        # has been split into it's own class in 1.7.0\n        if type(instance) is AssetExecutionContext and cls is not AssetExecutionContext:\n            deprecation_warning(\n                subject="AssetExecutionContext",\n                additional_warn_text=(\n                    "Starting in version 1.7.0 AssetExecutionContext will no longer be a subclass"\n                    " of OpExecutionContext."\n                ),\n                breaking_version="1.7.0",\n                stacklevel=1,\n            )\n        return super().__instancecheck__(instance)\n\n\n
[docs]class OpExecutionContext(AbstractComputeExecutionContext, metaclass=OpExecutionContextMetaClass):\n """The ``context`` object that can be made available as the first argument to the function\n used for computing an op or asset.\n\n This context object provides system information such as resources, config, and logging.\n\n To construct an execution context for testing purposes, use :py:func:`dagster.build_op_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import op, OpExecutionContext\n\n @op\n def hello_world(context: OpExecutionContext):\n context.log.info("Hello, world!")\n """\n\n __slots__ = ["_step_execution_context"]\n\n def __init__(self, step_execution_context: StepExecutionContext):\n self._step_execution_context = check.inst_param(\n step_execution_context,\n "step_execution_context",\n StepExecutionContext,\n )\n self._pdb: Optional[ForkedPdb] = None\n self._events: List[DagsterEvent] = []\n self._output_metadata: Dict[str, Any] = {}\n\n @public\n @property\n def op_config(self) -> Any:\n """Any: The parsed config specific to this op."""\n return self._step_execution_context.op_config\n\n @property\n def dagster_run(self) -> DagsterRun:\n """PipelineRun: The current pipeline run."""\n return self._step_execution_context.dagster_run\n\n @property\n def run(self) -> DagsterRun:\n """DagsterRun: The current run."""\n return self.dagster_run\n\n @public\n @property\n def instance(self) -> DagsterInstance:\n """DagsterInstance: The current Dagster instance."""\n return self._step_execution_context.instance\n\n @public\n @property\n def pdb(self) -> ForkedPdb:\n """dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the op.\n\n Example:\n .. code-block:: python\n\n @op\n def debug(context):\n context.pdb.set_trace()\n """\n if self._pdb is None:\n self._pdb = ForkedPdb()\n\n return self._pdb\n\n @property\n def file_manager(self):\n """Deprecated access to the file manager.\n\n :meta private:\n """\n raise DagsterInvalidPropertyError(\n "You have attempted to access the file manager which has been moved to resources in"\n " 0.10.0. Please access it via `context.resources.file_manager` instead."\n )\n\n @public\n @property\n def resources(self) -> Any:\n """Resources: The currently available resources."""\n return self._step_execution_context.resources\n\n @property\n def step_launcher(self) -> Optional[StepLauncher]:\n """Optional[StepLauncher]: The current step launcher, if any."""\n return self._step_execution_context.step_launcher\n\n @public\n @property\n def run_id(self) -> str:\n """str: The id of the current execution's run."""\n return self._step_execution_context.run_id\n\n @public\n @property\n def run_config(self) -> Mapping[str, object]:\n """dict: The run config for the current execution."""\n return self._step_execution_context.run_config\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """JobDefinition: The currently executing pipeline."""\n return self._step_execution_context.job_def\n\n @public\n @property\n def job_name(self) -> str:\n """str: The name of the currently executing pipeline."""\n return self._step_execution_context.job_name\n\n @public\n @property\n def log(self) -> DagsterLogManager:\n """DagsterLogManager: The log manager available in the execution context."""\n return self._step_execution_context.log\n\n @property\n def node_handle(self) -> NodeHandle:\n """NodeHandle: The current op's handle.\n\n :meta private:\n """\n return self._step_execution_context.node_handle\n\n @property\n def op_handle(self) -> NodeHandle:\n """NodeHandle: The current op's handle.\n\n :meta private:\n """\n return self.node_handle\n\n @property\n def op(self) -> Node:\n """Node: The object representing the invoked op within the graph.\n\n :meta private:\n\n """\n return self._step_execution_context.job_def.get_node(self.node_handle)\n\n @public\n @property\n def op_def(self) -> OpDefinition:\n """OpDefinition: The current op definition."""\n return cast(OpDefinition, self.op.definition)\n\n @public\n @property\n def has_partition_key(self) -> bool:\n """Whether the current run is a partitioned run."""\n return self._step_execution_context.has_partition_key\n\n @public\n @property\n def partition_key(self) -> str:\n """The partition key for the current run.\n\n Raises an error if the current run is not a partitioned run. Or if the current run is operating\n over a range of partitions (ie. a backfill of several partitions executed in a single run).\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def my_asset(context: AssetExecutionContext):\n context.log.info(context.partition_key)\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n """\n return self._step_execution_context.partition_key\n\n @deprecated(breaking_version="2.0", additional_warn_text="Use `partition_key_range` instead.")\n @public\n @property\n def asset_partition_key_range(self) -> PartitionKeyRange:\n """The range of partition keys for the current run.\n\n If run is for a single partition key, return a `PartitionKeyRange` with the same start and\n end. Raises an error if the current run is not a partitioned run.\n """\n return self.partition_key_range\n\n @public\n @property\n def partition_key_range(self) -> PartitionKeyRange:\n """The range of partition keys for the current run.\n\n If run is for a single partition key, returns a `PartitionKeyRange` with the same start and\n end. Raises an error if the current run is not a partitioned run.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def my_asset(context: AssetExecutionContext):\n context.log.info(context.partition_key_range)\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n """\n return self._step_execution_context.asset_partition_key_range\n\n @public\n @property\n def partition_time_window(self) -> TimeWindow:\n """The partition time window for the current run.\n\n Raises an error if the current run is not a partitioned run, or if the job's partition\n definition is not a TimeWindowPartitionsDefinition.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def my_asset(context: AssetExecutionContext):\n context.log.info(context.partition_time_window)\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n """\n return self._step_execution_context.partition_time_window\n\n
[docs] @public\n def has_tag(self, key: str) -> bool:\n """Check if a logging tag is set.\n\n Args:\n key (str): The tag to check.\n\n Returns:\n bool: Whether the tag is set.\n """\n return self._step_execution_context.has_tag(key)
\n\n
[docs] @public\n def get_tag(self, key: str) -> Optional[str]:\n """Get a logging tag.\n\n Args:\n key (tag): The tag to get.\n\n Returns:\n Optional[str]: The value of the tag, if present.\n """\n return self._step_execution_context.get_tag(key)
\n\n @property\n def run_tags(self) -> Mapping[str, str]:\n """Mapping[str, str]: The tags for the current run."""\n return self._step_execution_context.run_tags\n\n def has_events(self) -> bool:\n return bool(self._events)\n\n def consume_events(self) -> Iterator[DagsterEvent]:\n """Pops and yields all user-generated events that have been recorded from this context.\n\n If consume_events has not yet been called, this will yield all logged events since the beginning of the op's computation. If consume_events has been called, it will yield all events since the last time consume_events was called. Designed for internal use. Users should never need to invoke this method.\n """\n events = self._events\n self._events = []\n yield from events\n\n
[docs] @public\n def log_event(self, event: UserEvent) -> None:\n """Log an AssetMaterialization, AssetObservation, or ExpectationResult from within the body of an op.\n\n Events logged with this method will appear in the list of DagsterEvents, as well as the event log.\n\n Args:\n event (Union[AssetMaterialization, AssetObservation, ExpectationResult]): The event to log.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import op, AssetMaterialization\n\n @op\n def log_materialization(context):\n context.log_event(AssetMaterialization("foo"))\n """\n if isinstance(event, AssetMaterialization):\n self._events.append(\n DagsterEvent.asset_materialization(self._step_execution_context, event)\n )\n elif isinstance(event, AssetObservation):\n self._events.append(DagsterEvent.asset_observation(self._step_execution_context, event))\n elif isinstance(event, ExpectationResult):\n self._events.append(\n DagsterEvent.step_expectation_result(self._step_execution_context, event)\n )\n else:\n check.failed(f"Unexpected event {event}")
\n\n
[docs] @public\n def add_output_metadata(\n self,\n metadata: Mapping[str, Any],\n output_name: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ) -> None:\n """Add metadata to one of the outputs of an op.\n\n This can be invoked multiple times per output in the body of an op. If the same key is\n passed multiple times, the value associated with the last call will be used.\n\n Args:\n metadata (Mapping[str, Any]): The metadata to attach to the output\n output_name (Optional[str]): The name of the output to attach metadata to. If there is only one output on the op, then this argument does not need to be provided. The metadata will automatically be attached to the only output.\n mapping_key (Optional[str]): The mapping key of the output to attach metadata to. If the\n output is not dynamic, this argument does not need to be provided.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import Out, op\n from typing import Tuple\n\n @op\n def add_metadata(context):\n context.add_output_metadata({"foo", "bar"})\n return 5 # Since the default output is called "result", metadata will be attached to the output "result".\n\n @op(out={"a": Out(), "b": Out()})\n def add_metadata_two_outputs(context) -> Tuple[str, int]:\n context.add_output_metadata({"foo": "bar"}, output_name="b")\n context.add_output_metadata({"baz": "bat"}, output_name="a")\n\n return ("dog", 5)\n\n """\n metadata = check.mapping_param(metadata, "metadata", key_type=str)\n output_name = check.opt_str_param(output_name, "output_name")\n mapping_key = check.opt_str_param(mapping_key, "mapping_key")\n\n self._step_execution_context.add_output_metadata(\n metadata=metadata, output_name=output_name, mapping_key=mapping_key\n )
\n\n def get_output_metadata(\n self, output_name: str, mapping_key: Optional[str] = None\n ) -> Optional[Mapping[str, Any]]:\n return self._step_execution_context.get_output_metadata(\n output_name=output_name, mapping_key=mapping_key\n )\n\n def get_step_execution_context(self) -> StepExecutionContext:\n """Allows advanced users (e.g. framework authors) to punch through to the underlying\n step execution context.\n\n :meta private:\n\n Returns:\n StepExecutionContext: The underlying system context.\n """\n return self._step_execution_context\n\n @public\n @property\n def retry_number(self) -> int:\n """Which retry attempt is currently executing i.e. 0 for initial attempt, 1 for first retry, etc."""\n return self._step_execution_context.previous_attempt_count\n\n def describe_op(self):\n return self._step_execution_context.describe_op()\n\n
[docs] @public\n def get_mapping_key(self) -> Optional[str]:\n """Which mapping_key this execution is for if downstream of a DynamicOutput, otherwise None."""\n return self._step_execution_context.step.get_mapping_key()
\n\n #############################################################################################\n # asset related methods\n #############################################################################################\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """The AssetKey for the current asset. In a multi_asset, use asset_key_for_output instead."""\n if self.has_assets_def and len(self.assets_def.keys_by_output_name.keys()) > 1:\n raise DagsterInvariantViolationError(\n "Cannot call `context.asset_key` in a multi_asset with more than one asset. Use"\n " `context.asset_key_for_output` instead."\n )\n return next(iter(self.assets_def.keys_by_output_name.values()))\n\n @public\n @property\n def has_assets_def(self) -> bool:\n """If there is a backing AssetsDefinition for what is currently executing."""\n assets_def = self.job_def.asset_layer.assets_def_for_node(self.node_handle)\n return assets_def is not None\n\n @public\n @property\n def assets_def(self) -> AssetsDefinition:\n """The backing AssetsDefinition for what is currently executing, errors if not available."""\n assets_def = self.job_def.asset_layer.assets_def_for_node(self.node_handle)\n if assets_def is None:\n raise DagsterInvalidPropertyError(\n f"Op '{self.op.name}' does not have an assets definition."\n )\n return assets_def\n\n @public\n @property\n def selected_asset_keys(self) -> AbstractSet[AssetKey]:\n """Get the set of AssetKeys this execution is expected to materialize."""\n if not self.has_assets_def:\n return set()\n return self.assets_def.keys\n\n @public\n @property\n def has_asset_checks_def(self) -> bool:\n """Return a boolean indicating the presence of a backing AssetChecksDefinition\n for the current execution.\n\n Returns:\n bool: True if there is a backing AssetChecksDefinition for the current execution, otherwise False.\n """\n return self.job_def.asset_layer.asset_checks_def_for_node(self.node_handle) is not None\n\n @public\n @property\n def asset_checks_def(self) -> AssetChecksDefinition:\n """The backing AssetChecksDefinition for what is currently executing, errors if not\n available.\n\n Returns:\n AssetChecksDefinition.\n """\n asset_checks_def = self.job_def.asset_layer.asset_checks_def_for_node(self.node_handle)\n if asset_checks_def is None:\n raise DagsterInvalidPropertyError(\n f"Op '{self.op.name}' does not have an asset checks definition."\n )\n\n return asset_checks_def\n\n @public\n @property\n def selected_asset_check_keys(self) -> AbstractSet[AssetCheckKey]:\n if self.has_assets_def:\n return self.assets_def.check_keys\n\n if self.has_asset_checks_def:\n check.failed("Subset selection is not yet supported within an AssetChecksDefinition")\n\n return set()\n\n @public\n @property\n def selected_output_names(self) -> AbstractSet[str]:\n """Get the output names that correspond to the current selection of assets this execution is expected to materialize."""\n # map selected asset keys to the output names they correspond to\n selected_asset_keys = self.selected_asset_keys\n selected_outputs: Set[str] = set()\n for output_name in self.op.output_dict.keys():\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n self.node_handle, output_name\n )\n if any( # For graph-backed assets, check if a downstream asset is selected\n [\n asset_key in selected_asset_keys\n for asset_key in self.job_def.asset_layer.downstream_dep_assets(\n self.node_handle, output_name\n )\n ]\n ) or (asset_info and asset_info.key in selected_asset_keys):\n selected_outputs.add(output_name)\n\n return selected_outputs\n\n
[docs] @public\n def asset_key_for_output(self, output_name: str = "result") -> AssetKey:\n """Return the AssetKey for the corresponding output."""\n asset_output_info = self.job_def.asset_layer.asset_info_for_output(\n node_handle=self.op_handle, output_name=output_name\n )\n if asset_output_info is None:\n check.failed(f"Output '{output_name}' has no asset")\n else:\n return asset_output_info.key
\n\n
[docs] @public\n def output_for_asset_key(self, asset_key: AssetKey) -> str:\n """Return the output name for the corresponding asset key."""\n node_output_handle = self.job_def.asset_layer.node_output_handle_for_asset(asset_key)\n if node_output_handle is None:\n check.failed(f"Asset key '{asset_key}' has no output")\n else:\n return node_output_handle.output_name
\n\n
[docs] @public\n def asset_key_for_input(self, input_name: str) -> AssetKey:\n """Return the AssetKey for the corresponding input."""\n key = self.job_def.asset_layer.asset_key_for_input(\n node_handle=self.op_handle, input_name=input_name\n )\n if key is None:\n check.failed(f"Input '{input_name}' has no asset")\n else:\n return key
\n\n
[docs] @public\n def asset_partition_key_for_output(self, output_name: str = "result") -> str:\n """Returns the asset partition key for the given output.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the partition key for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_for_output())\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_for_output("first_asset"))\n context.log.info(context.asset_partition_key_for_output("second_asset"))\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n # "2023-08-21"\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_for_output())\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n\n """\n return self._step_execution_context.asset_partition_key_for_output(output_name)
\n\n
[docs] @public\n def asset_partitions_time_window_for_output(self, output_name: str = "result") -> TimeWindow:\n """The time window for the partitions of the output asset.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partitions_time_window_for_output`` to get the TimeWindow of all of the partitions\n being materialized by the backfill.\n\n Raises an error if either of the following are true:\n - The output asset has no partitioning.\n - The output asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the time window for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_time_window_for_output())\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_time_window_for_output("first_asset"))\n context.log.info(context.asset_partitions_time_window_for_output("second_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n # TimeWindow("2023-08-21", "2023-08-26")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partitions_time_window_for_output())\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n """\n return self._step_execution_context.asset_partitions_time_window_for_output(output_name)
\n\n
[docs] @public\n def asset_partition_key_range_for_output(\n self, output_name: str = "result"\n ) -> PartitionKeyRange:\n """Return the PartitionKeyRange for the corresponding output. Errors if the run is not partitioned.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_key_range_for_output`` to get all of the partitions being materialized\n by the backfill.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the partition key range for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_range_for_output())\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_key_range_for_output("first_asset"))\n context.log.info(context.asset_partition_key_range_for_output("second_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_range_for_output())\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n """\n return self._step_execution_context.asset_partition_key_range_for_output(output_name)
\n\n
[docs] @public\n def asset_partition_key_range_for_input(self, input_name: str) -> PartitionKeyRange:\n """Return the PartitionKeyRange for the corresponding input. Errors if the asset depends on a\n non-contiguous chunk of the input.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_key_range_for_input`` to get the range of partitions keys of the input that\n are relevant to that backfill.\n\n Args:\n input_name (str): The name of the input to get the time window for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_key_range_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-21", end="2023-08-25")\n\n @asset(\n ins={\n "upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n partitions_def=partitions_def,\n )\n def another_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_key_range_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-20", end="2023-08-24")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_range_for_input("self_dependent_asset"))\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # PartitionKeyRange(start="2023-08-20", end="2023-08-24")\n\n\n """\n return self._step_execution_context.asset_partition_key_range_for_input(input_name)
\n\n
[docs] @public\n def asset_partition_key_for_input(self, input_name: str) -> str:\n """Returns the partition key of the upstream asset corresponding to the given input.\n\n Args:\n input_name (str): The name of the input to get the partition key for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_key_for_input("upstream_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-21"\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_key_for_input("self_dependent_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # "2023-08-20"\n\n """\n return self._step_execution_context.asset_partition_key_for_input(input_name)
\n\n
[docs] @public\n def asset_partitions_def_for_output(self, output_name: str = "result") -> PartitionsDefinition:\n """The PartitionsDefinition on the asset corresponding to this output.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the PartitionsDefinition for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_def_for_output())\n\n # materializing the 2023-08-21 partition of this asset will log:\n # DailyPartitionsDefinition("2023-08-20")\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partitions_def_for_output("first_asset"))\n context.log.info(context.asset_partitions_def_for_output("second_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # DailyPartitionsDefinition("2023-08-20")\n # DailyPartitionsDefinition("2023-08-20")\n\n """\n asset_key = self.asset_key_for_output(output_name)\n result = self._step_execution_context.job_def.asset_layer.partitions_def_for_asset(\n asset_key\n )\n if result is None:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {asset_key}, but it is not"\n " partitioned"\n )\n\n return result
\n\n
[docs] @public\n def asset_partitions_def_for_input(self, input_name: str) -> PartitionsDefinition:\n """The PartitionsDefinition on the upstream asset corresponding to this input.\n\n Args:\n input_name (str): The name of the input to get the PartitionsDefinition for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partitions_def_for_input("upstream_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # DailyPartitionsDefinition("2023-08-20")\n\n """\n asset_key = self.asset_key_for_input(input_name)\n result = self._step_execution_context.job_def.asset_layer.partitions_def_for_asset(\n asset_key\n )\n if result is None:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {asset_key}, but it is not"\n " partitioned"\n )\n\n return result
\n\n
[docs] @public\n def asset_partition_keys_for_output(self, output_name: str = "result") -> Sequence[str]:\n """Returns a list of the partition keys for the given output.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_keys_for_output`` to get all of the partitions being materialized\n by the backfill.\n\n Args:\n output_name (str): For assets defined with the ``@asset`` decorator, the name of the output\n will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``\n should be the op output associated with the asset key (as determined by AssetOut)\n to get the partition keys for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_keys_for_output())\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n\n @multi_asset(\n outs={\n "first_asset": AssetOut(key=["my_assets", "first_asset"]),\n "second_asset": AssetOut(key=["my_assets", "second_asset"])\n }\n partitions_def=partitions_def,\n )\n def a_multi_asset(context: AssetExecutionContext):\n context.log.info(context.asset_partition_keys_for_output("first_asset"))\n context.log.info(context.asset_partition_keys_for_output("second_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_keys_for_output())\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n """\n return self.asset_partitions_def_for_output(output_name).get_partition_keys_in_range(\n self._step_execution_context.asset_partition_key_range_for_output(output_name),\n dynamic_partitions_store=self.instance,\n )
\n\n
[docs] @public\n def asset_partition_keys_for_input(self, input_name: str) -> Sequence[str]:\n """Returns a list of the partition keys of the upstream asset corresponding to the\n given input.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partition_keys_for_input`` to get all of the partition keys of the input that\n are relevant to that backfill.\n\n Args:\n input_name (str): The name of the input to get the time window for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_keys_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]\n\n @asset(\n ins={\n "upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n partitions_def=partitions_def,\n )\n def another_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partition_keys_for_input("upstream_asset"))\n\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-20", "2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24"]\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partition_keys_for_input("self_dependent_asset"))\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # ["2023-08-20", "2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24"]\n """\n return list(\n self._step_execution_context.asset_partitions_subset_for_input(\n input_name\n ).get_partition_keys()\n )
\n\n
[docs] @public\n def asset_partitions_time_window_for_input(self, input_name: str = "result") -> TimeWindow:\n """The time window for the partitions of the input asset.\n\n If you want to write your asset to support running a backfill of several partitions in a single run,\n you can use ``asset_partitions_time_window_for_input`` to get the time window of the input that\n are relevant to that backfill.\n\n Raises an error if either of the following are true:\n - The input asset has no partitioning.\n - The input asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n\n Args:\n input_name (str): The name of the input to get the partition key for.\n\n Examples:\n .. code-block:: python\n\n partitions_def = DailyPartitionsDefinition("2023-08-20")\n\n @asset(\n partitions_def=partitions_def\n )\n def upstream_asset():\n ...\n\n @asset(\n partitions_def=partitions_def\n )\n def an_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partitions_time_window_for_input("upstream_asset"))\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-22")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n\n @asset(\n ins={\n "upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n partitions_def=partitions_def,\n )\n def another_asset(context: AssetExecutionContext, upstream_asset):\n context.log.info(context.asset_partitions_time_window_for_input("upstream_asset"))\n\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-20", "2023-08-21")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-21", "2023-08-26")\n\n\n @asset(\n partitions_def=partitions_def,\n ins={\n "self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1))\n }\n )\n def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):\n context.log.info(context.asset_partitions_time_window_for_input("self_dependent_asset"))\n\n # materializing the 2023-08-21 partition of this asset will log:\n # TimeWindow("2023-08-20", "2023-08-21")\n\n # running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:\n # TimeWindow("2023-08-20", "2023-08-25")\n\n """\n return self._step_execution_context.asset_partitions_time_window_for_input(input_name)
\n\n
[docs] @public\n @experimental\n def get_asset_provenance(self, asset_key: AssetKey) -> Optional[DataProvenance]:\n """Return the provenance information for the most recent materialization of an asset.\n\n Args:\n asset_key (AssetKey): Key of the asset for which to retrieve provenance.\n\n Returns:\n Optional[DataProvenance]: Provenance information for the most recent\n materialization of the asset. Returns `None` if the asset was never materialized or\n the materialization record is too old to contain provenance information.\n """\n record = self.instance.get_latest_data_version_record(asset_key)\n\n return (\n None if record is None else extract_data_provenance_from_entry(record.event_log_entry)\n )
\n\n def set_data_version(self, asset_key: AssetKey, data_version: DataVersion) -> None:\n """Set the data version for an asset being materialized by the currently executing step.\n This is useful for external execution situations where it is not possible to return\n an `Output`.\n\n Args:\n asset_key (AssetKey): Key of the asset for which to set the data version.\n data_version (DataVersion): The data version to set.\n """\n self._step_execution_context.set_data_version(asset_key, data_version)\n\n @property\n def asset_check_spec(self) -> AssetCheckSpec:\n asset_checks_def = check.not_none(\n self.job_def.asset_layer.asset_checks_def_for_node(self.node_handle),\n "This context does not correspond to an AssetChecksDefinition",\n )\n return asset_checks_def.spec\n\n # In this mode no conversion is done on returned values and missing but expected outputs are not\n # allowed.\n @property\n def requires_typed_event_stream(self) -> bool:\n return self._step_execution_context.requires_typed_event_stream\n\n @property\n def typed_event_stream_error_message(self) -> Optional[str]:\n return self._step_execution_context.typed_event_stream_error_message\n\n def set_requires_typed_event_stream(self, *, error_message: Optional[str] = None) -> None:\n self._step_execution_context.set_requires_typed_event_stream(error_message=error_message)
\n\n\n
[docs]class AssetExecutionContext(OpExecutionContext):\n def __init__(self, step_execution_context: StepExecutionContext):\n super().__init__(step_execution_context=step_execution_context)
\n\n\ndef build_execution_context(\n step_context: StepExecutionContext,\n) -> Union[OpExecutionContext, AssetExecutionContext]:\n """Get the correct context based on the type of step (op or asset) and the user provided context\n type annotation. Follows these rules.\n\n step type annotation result\n asset AssetExecutionContext AssetExecutionContext\n asset OpExecutionContext OpExecutionContext\n asset None AssetExecutionContext\n op AssetExecutionContext Error - we cannot init an AssetExecutionContext w/o an AssetsDefinition\n op OpExecutionContext OpExecutionContext\n op None OpExecutionContext\n For ops in graph-backed assets\n step type annotation result\n op AssetExecutionContext AssetExecutionContext\n op OpExecutionContext OpExecutionContext\n op None OpExecutionContext\n """\n is_sda_step = step_context.is_sda_step\n is_op_in_graph_asset = is_sda_step and step_context.is_op_in_graph\n context_annotation = EmptyAnnotation\n compute_fn = step_context.op_def._compute_fn # noqa: SLF001\n compute_fn = (\n compute_fn\n if isinstance(compute_fn, DecoratedOpFunction)\n else DecoratedOpFunction(compute_fn)\n )\n if compute_fn.has_context_arg():\n context_param = compute_fn.get_context_arg()\n context_annotation = context_param.annotation\n\n # It would be nice to do this check at definition time, rather than at run time, but we don't\n # know if the op is part of an op job or a graph-backed asset until we have the step execution context\n if context_annotation is AssetExecutionContext and not is_sda_step:\n # AssetExecutionContext requires an AssetsDefinition during init, so an op in an op job\n # cannot be annotated with AssetExecutionContext\n raise DagsterInvalidDefinitionError(\n "Cannot annotate @op `context` parameter with type AssetExecutionContext unless the"\n " op is part of a graph-backed asset. `context` must be annotated with"\n " OpExecutionContext, or left blank."\n )\n\n if context_annotation is EmptyAnnotation:\n # if no type hint has been given, default to:\n # * AssetExecutionContext for sda steps, not in graph-backed assets\n # * OpExecutionContext for non sda steps\n # * OpExecutionContext for ops in graph-backed assets\n if is_op_in_graph_asset or not is_sda_step:\n return OpExecutionContext(step_context)\n return AssetExecutionContext(step_context)\n if context_annotation is AssetExecutionContext:\n return AssetExecutionContext(step_context)\n return OpExecutionContext(step_context)\n
", "current_page_name": "_modules/dagster/_core/execution/context/compute", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.compute"}, "hook": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.hook

\nimport warnings\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Dict, Mapping, Optional, Set, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\nfrom ...definitions.composition import PendingNodeInvocation\nfrom ...definitions.decorators.graph_decorator import graph\nfrom ...definitions.dependency import Node\nfrom ...definitions.hook_definition import HookDefinition\nfrom ...definitions.op_definition import OpDefinition\nfrom ...definitions.resource_definition import IContainsGenerator, Resources\nfrom ...errors import DagsterInvalidPropertyError, DagsterInvariantViolationError\nfrom ...log_manager import DagsterLogManager\nfrom ..plan.step import ExecutionStep\nfrom ..plan.utils import RetryRequestedFromPolicy\nfrom .system import StepExecutionContext\n\nif TYPE_CHECKING:\n    from dagster._core.instance import DagsterInstance\n\n\ndef _property_msg(prop_name: str, method_name: str) -> str:\n    return (\n        f"The {prop_name} {method_name} is not set when a `HookContext` is constructed from "\n        "`build_hook_context`."\n    )\n\n\ndef _check_property_on_test_context(\n    context: "HookContext", attr_str: str, user_facing_name: str, param_on_builder: str\n):\n    """Check if attribute is not None on context. If none, error, and point user in direction of\n    how to specify the parameter on the context object.\n    """\n    value = getattr(context, attr_str)\n    if value is None:\n        raise DagsterInvalidPropertyError(\n            f"Attribute '{user_facing_name}' was not provided when "\n            f"constructing context. Provide a value for the '{param_on_builder}' parameter on "\n            "'build_hook_context'. To learn more, check out the testing hooks section of Dagster's "\n            "concepts docs: https://docs.dagster.io/concepts/ops-jobs-graphs/op-hooks#testing-hooks"\n        )\n    else:\n        return value\n\n\n
[docs]class HookContext:\n """The ``context`` object available to a hook function on an DagsterEvent."""\n\n def __init__(\n self,\n step_execution_context: StepExecutionContext,\n hook_def: HookDefinition,\n ):\n self._step_execution_context = step_execution_context\n self._hook_def = check.inst_param(hook_def, "hook_def", HookDefinition)\n self._required_resource_keys = hook_def.required_resource_keys\n self._resources = step_execution_context.scoped_resources_builder.build(\n self._required_resource_keys\n )\n\n @public\n @property\n def job_name(self) -> str:\n """The name of the job where this hook is being triggered."""\n return self._step_execution_context.job_name\n\n @public\n @property\n def run_id(self) -> str:\n """The id of the run where this hook is being triggered."""\n return self._step_execution_context.run_id\n\n @public\n @property\n def hook_def(self) -> HookDefinition:\n """The hook that the context object belongs to."""\n return self._hook_def\n\n @public\n @property\n def instance(self) -> "DagsterInstance":\n """The instance configured to run the current job."""\n return self._step_execution_context.instance\n\n @property\n def op(self) -> Node:\n """The op instance associated with the hook."""\n return self._step_execution_context.op\n\n @property\n def step(self) -> ExecutionStep:\n warnings.warn(\n "The step property of HookContext has been deprecated, and will be removed "\n "in a future release."\n )\n return self._step_execution_context.step\n\n @public\n @property\n def step_key(self) -> str:\n """The key for the step where this hook is being triggered."""\n return self._step_execution_context.step.key\n\n @public\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n """Resources required by this hook."""\n return self._required_resource_keys\n\n @public\n @property\n def resources(self) -> "Resources":\n """Resources available in the hook context."""\n return self._resources\n\n @property\n def solid_config(self) -> Any:\n solid_config = self._step_execution_context.resolved_run_config.ops.get(\n str(self._step_execution_context.step.node_handle)\n )\n return solid_config.config if solid_config else None\n\n @public\n @property\n def op_config(self) -> Any:\n """The parsed config specific to this op."""\n return self.solid_config\n\n # Because of the fact that we directly use the log manager of the step, if a user calls\n # hook_context.log.with_tags, then they will end up mutating the step's logging tags as well.\n # This is not problematic because the hook only runs after the step has been completed.\n @public\n @property\n def log(self) -> DagsterLogManager:\n """Centralized log dispatch from user code."""\n return self._step_execution_context.log\n\n @property\n def solid_exception(self) -> Optional[BaseException]:\n """The thrown exception in a failed solid.\n\n Returns:\n Optional[BaseException]: the exception object, None if the solid execution succeeds.\n """\n return self.op_exception\n\n @public\n @property\n def op_exception(self) -> Optional[BaseException]:\n """The thrown exception in a failed op."""\n exc = self._step_execution_context.step_exception\n\n if isinstance(exc, RetryRequestedFromPolicy):\n return exc.__cause__\n\n return exc\n\n @property\n def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n """The computed output values.\n\n Returns a dictionary where keys are output names and the values are:\n * the output values in the normal case\n * a dictionary from mapping key to corresponding value in the mapped case\n """\n results: Dict[str, Union[Any, Dict[str, Any]]] = {}\n captured = self._step_execution_context.step_output_capture\n\n if captured is None:\n check.failed("Outputs were unexpectedly not captured for hook")\n\n # make the returned values more user-friendly\n for step_output_handle, value in captured.items():\n if step_output_handle.mapping_key:\n if results.get(step_output_handle.output_name) is None:\n results[step_output_handle.output_name] = {\n step_output_handle.mapping_key: value\n }\n else:\n results[step_output_handle.output_name][step_output_handle.mapping_key] = value\n else:\n results[step_output_handle.output_name] = value\n\n return results\n\n @public\n @property\n def op_output_values(self):\n """Computed output values in an op."""\n return self.solid_output_values
\n\n\nclass UnboundHookContext(HookContext):\n def __init__(\n self,\n resources: Mapping[str, Any],\n op: Optional[Union[OpDefinition, PendingNodeInvocation]],\n run_id: Optional[str],\n job_name: Optional[str],\n op_exception: Optional[Exception],\n instance: Optional["DagsterInstance"],\n ):\n from ..build_resources import build_resources, wrap_resources_for_execution\n from ..context_creation_job import initialize_console_manager\n\n self._op = None\n if op is not None:\n\n @graph(name="hook_context_container")\n def temp_graph():\n op()\n\n self._op = temp_graph.nodes[0]\n\n # Open resource context manager\n self._resource_defs = wrap_resources_for_execution(resources)\n self._resources_cm = build_resources(self._resource_defs)\n self._resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n\n self._run_id = run_id\n self._job_name = job_name\n self._op_exception = op_exception\n self._instance = instance\n\n self._log = initialize_console_manager(None)\n\n self._cm_scope_entered = False\n\n def __enter__(self):\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc: Any):\n self._resources_cm.__exit__(*exc)\n\n def __del__(self):\n if self._resources_contain_cm and not self._cm_scope_entered:\n self._resources_cm.__exit__(None, None, None)\n\n @property\n def job_name(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_job_name", user_facing_name="job_name", param_on_builder="job_name"\n )\n\n @property\n def run_id(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_run_id", user_facing_name="run_id", param_on_builder="run_id"\n )\n\n @property\n def hook_def(self) -> HookDefinition:\n raise DagsterInvalidPropertyError(_property_msg("hook_def", "property"))\n\n @property\n def op(self) -> Node:\n return _check_property_on_test_context(\n self, attr_str="_op", user_facing_name="op", param_on_builder="op"\n )\n\n @property\n def step(self) -> ExecutionStep:\n raise DagsterInvalidPropertyError(_property_msg("step", "property"))\n\n @property\n def step_key(self) -> str:\n raise DagsterInvalidPropertyError(_property_msg("step_key", "property"))\n\n @property\n def required_resource_keys(self) -> Set[str]:\n raise DagsterInvalidPropertyError(_property_msg("hook_def", "property"))\n\n @property\n def resources(self) -> "Resources":\n if self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_hook_context(...) as context:`"\n )\n return self._resources\n\n @property\n def solid_config(self) -> Any:\n raise DagsterInvalidPropertyError(_property_msg("solid_config", "property"))\n\n @property\n def log(self) -> DagsterLogManager:\n return self._log\n\n @property\n def op_exception(self) -> Optional[BaseException]:\n return self._op_exception\n\n @property\n def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n """The computed output values.\n\n Returns a dictionary where keys are output names and the values are:\n * the output values in the normal case\n * a dictionary from mapping key to corresponding value in the mapped case\n """\n raise DagsterInvalidPropertyError(_property_msg("solid_output_values", "method"))\n\n @property\n def instance(self) -> "DagsterInstance":\n if not self._instance:\n raise DagsterInvariantViolationError(\n "Tried to access the HookContext instance, but no instance was provided to"\n " `build_hook_context`."\n )\n\n return self._instance\n\n\nclass BoundHookContext(HookContext):\n def __init__(\n self,\n hook_def: HookDefinition,\n resources: Resources,\n op: Optional[Node],\n log_manager: DagsterLogManager,\n run_id: Optional[str],\n job_name: Optional[str],\n op_exception: Optional[Exception],\n instance: Optional["DagsterInstance"],\n ):\n self._hook_def = hook_def\n self._resources = resources\n self._op = op\n self._log_manager = log_manager\n self._run_id = run_id\n self._job_name = job_name\n self._op_exception = op_exception\n self._instance = instance\n\n @property\n def job_name(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_job_name", user_facing_name="job_name", param_on_builder="job_name"\n )\n\n @property\n def run_id(self) -> str:\n return _check_property_on_test_context(\n self, attr_str="_run_id", user_facing_name="run_id", param_on_builder="run_id"\n )\n\n @property\n def hook_def(self) -> HookDefinition:\n return self._hook_def\n\n @property\n def op(self) -> Node:\n return _check_property_on_test_context(\n self, attr_str="_op", user_facing_name="op", param_on_builder="op"\n )\n\n @property\n def step(self) -> ExecutionStep:\n raise DagsterInvalidPropertyError(_property_msg("step", "property"))\n\n @property\n def step_key(self) -> str:\n raise DagsterInvalidPropertyError(_property_msg("step_key", "property"))\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return self._hook_def.required_resource_keys\n\n @property\n def resources(self) -> "Resources":\n return self._resources\n\n @property\n def solid_config(self) -> Any:\n raise DagsterInvalidPropertyError(_property_msg("solid_config", "property"))\n\n @property\n def log(self) -> DagsterLogManager:\n return self._log_manager\n\n @property\n def op_exception(self):\n return self._op_exception\n\n @property\n def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n """The computed output values.\n\n Returns a dictionary where keys are output names and the values are:\n * the output values in the normal case\n * a dictionary from mapping key to corresponding value in the mapped case\n """\n raise DagsterInvalidPropertyError(_property_msg("solid_output_values", "method"))\n\n @property\n def instance(self) -> "DagsterInstance":\n if not self._instance:\n raise DagsterInvariantViolationError(\n "Tried to access the HookContext instance, but no instance was provided to"\n " `build_hook_context`."\n )\n\n return self._instance\n\n\n
[docs]def build_hook_context(\n resources: Optional[Mapping[str, Any]] = None,\n op: Optional[Union[OpDefinition, PendingNodeInvocation]] = None,\n run_id: Optional[str] = None,\n job_name: Optional[str] = None,\n op_exception: Optional[Exception] = None,\n instance: Optional["DagsterInstance"] = None,\n) -> UnboundHookContext:\n """Builds hook context from provided parameters.\n\n ``build_hook_context`` can be used as either a function or a context manager. If there is a\n provided resource to ``build_hook_context`` that is a context manager, then it must be used as a\n context manager. This function can be used to provide the context argument to the invocation of\n a hook definition.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can\n either be values or resource definitions.\n op (Optional[OpDefinition, PendingNodeInvocation]): The op definition which the\n hook may be associated with.\n run_id (Optional[str]): The id of the run in which the hook is invoked (provided for mocking purposes).\n job_name (Optional[str]): The name of the job in which the hook is used (provided for mocking purposes).\n op_exception (Optional[Exception]): The exception that caused the hook to be triggered.\n instance (Optional[DagsterInstance]): The Dagster instance configured to run the hook.\n\n Examples:\n .. code-block:: python\n\n context = build_hook_context()\n hook_to_invoke(context)\n\n with build_hook_context(resources={"foo": context_manager_resource}) as context:\n hook_to_invoke(context)\n """\n op = check.opt_inst_param(op, "op", (OpDefinition, PendingNodeInvocation))\n\n from dagster._core.instance import DagsterInstance\n\n return UnboundHookContext(\n resources=check.opt_mapping_param(resources, "resources", key_type=str),\n op=op,\n run_id=check.opt_str_param(run_id, "run_id"),\n job_name=check.opt_str_param(job_name, "job_name"),\n op_exception=check.opt_inst_param(op_exception, "op_exception", Exception),\n instance=check.opt_inst_param(instance, "instance", DagsterInstance),\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/hook", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.hook"}, "init": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.init

\nfrom typing import Any, Mapping, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.resource_definition import (\n    IContainsGenerator,\n    ResourceDefinition,\n    Resources,\n)\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\n\n\n
[docs]class InitResourceContext:\n """The context object available as the argument to the initialization function of a :py:class:`dagster.ResourceDefinition`.\n\n Users should not instantiate this object directly. To construct an `InitResourceContext` for testing purposes, use :py:func:`dagster.build_init_resource_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import resource, InitResourceContext\n\n @resource\n def the_resource(init_context: InitResourceContext):\n init_context.log.info("Hello, world!")\n """\n\n def __init__(\n self,\n resource_config: Any,\n resources: Resources,\n resource_def: Optional[ResourceDefinition] = None,\n instance: Optional[DagsterInstance] = None,\n dagster_run: Optional[DagsterRun] = None,\n log_manager: Optional[DagsterLogManager] = None,\n ):\n self._resource_config = resource_config\n self._resource_def = resource_def\n self._log_manager = log_manager\n self._instance = instance\n self._resources = resources\n self._dagster_run = dagster_run\n\n @public\n @property\n def resource_config(self) -> Any:\n """The configuration data provided by the run config. The schema\n for this data is defined by the ``config_field`` argument to\n :py:class:`ResourceDefinition`.\n """\n return self._resource_config\n\n @public\n @property\n def resource_def(self) -> Optional[ResourceDefinition]:\n """The definition of the resource currently being constructed."""\n return self._resource_def\n\n @public\n @property\n def resources(self) -> Resources:\n """The resources that are available to the resource that we are initalizing."""\n return self._resources\n\n @public\n @property\n def instance(self) -> Optional[DagsterInstance]:\n """The Dagster instance configured for the current execution context."""\n return self._instance\n\n @property\n def dagster_run(self) -> Optional[DagsterRun]:\n """The dagster run to use. When initializing resources outside of execution context, this will be None."""\n return self._dagster_run\n\n @public\n @property\n def log(self) -> Optional[DagsterLogManager]:\n """The Dagster log manager configured for the current execution context."""\n return self._log_manager\n\n # backcompat: keep around this property from when InitResourceContext used to be a NamedTuple\n @public\n @property\n def log_manager(self) -> Optional[DagsterLogManager]:\n """The log manager for this run of the job."""\n return self._log_manager\n\n @public\n @property\n def run_id(self) -> Optional[str]:\n """The id for this run of the job or pipeline. When initializing resources outside of\n execution context, this will be None.\n """\n return self.dagster_run.run_id if self.dagster_run else None\n\n def replace_config(self, config: Any) -> "InitResourceContext":\n return InitResourceContext(\n resource_config=config,\n resources=self.resources,\n instance=self.instance,\n resource_def=self.resource_def,\n dagster_run=self.dagster_run,\n log_manager=self.log,\n )
\n\n\nclass UnboundInitResourceContext(InitResourceContext):\n """Resource initialization context outputted by ``build_init_resource_context``.\n\n Represents a context whose config has not yet been validated against a resource definition,\n hence the inability to access the `resource_def` attribute. When an instance of\n ``UnboundInitResourceContext`` is passed to a resource invocation, config is validated,\n and it is subsumed into an `InitResourceContext`, which contains the resource_def validated\n against.\n """\n\n def __init__(\n self,\n resource_config: Any,\n resources: Optional[Union[Resources, Mapping[str, Any]]],\n instance: Optional[DagsterInstance],\n ):\n from dagster._core.execution.api import ephemeral_instance_if_missing\n from dagster._core.execution.build_resources import (\n build_resources,\n wrap_resources_for_execution,\n )\n from dagster._core.execution.context_creation_job import initialize_console_manager\n\n self._instance_provided = (\n check.opt_inst_param(instance, "instance", DagsterInstance) is not None\n )\n # Construct ephemeral instance if missing\n self._instance_cm = ephemeral_instance_if_missing(instance)\n # Pylint can't infer that the ephemeral_instance context manager has an __enter__ method,\n # so ignore lint error\n instance = self._instance_cm.__enter__()\n\n if isinstance(resources, Resources):\n check.failed("Should not have a Resources object directly from this initialization")\n\n self._resource_defs = wrap_resources_for_execution(\n check.opt_mapping_param(resources, "resources")\n )\n\n self._resources_cm = build_resources(self._resource_defs, instance=instance)\n resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(resources, IContainsGenerator)\n\n self._cm_scope_entered = False\n super(UnboundInitResourceContext, self).__init__(\n resource_config=resource_config,\n resources=resources,\n resource_def=None,\n instance=instance,\n dagster_run=None,\n log_manager=initialize_console_manager(None),\n )\n\n def __enter__(self):\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc):\n self._resources_cm.__exit__(*exc)\n if self._instance_provided:\n self._instance_cm.__exit__(*exc)\n\n def __del__(self):\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n self._resources_cm.__exit__(None, None, None)\n if self._instance_provided and not self._cm_scope_entered:\n self._instance_cm.__exit__(None, None, None)\n\n @property\n def resource_config(self) -> Any:\n return self._resource_config\n\n @property\n def resource_def(self) -> Optional[ResourceDefinition]:\n raise DagsterInvariantViolationError(\n "UnboundInitLoggerContext has not been validated against a logger definition."\n )\n\n @property\n def resources(self) -> Resources:\n """The resources that are available to the resource that we are initalizing."""\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_init_resource_context(...) as context:`"\n )\n return self._resources\n\n @property\n def instance(self) -> Optional[DagsterInstance]:\n return self._instance\n\n @property\n def log(self) -> Optional[DagsterLogManager]:\n return self._log_manager\n\n # backcompat: keep around this property from when InitResourceContext used to be a NamedTuple\n @property\n def log_manager(self) -> Optional[DagsterLogManager]:\n return self._log_manager\n\n @property\n def run_id(self) -> Optional[str]:\n return None\n\n\n
[docs]def build_init_resource_context(\n config: Optional[Mapping[str, Any]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n instance: Optional[DagsterInstance] = None,\n) -> InitResourceContext:\n """Builds resource initialization context from provided parameters.\n\n ``build_init_resource_context`` can be used as either a function or context manager. If there is a\n provided resource to ``build_init_resource_context`` that is a context manager, then it must be\n used as a context manager. This function can be used to provide the context argument to the\n invocation of a resource.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be\n either values or resource definitions.\n config (Optional[Any]): The resource config to provide to the context.\n instance (Optional[DagsterInstance]): The dagster instance configured for the context.\n Defaults to DagsterInstance.ephemeral().\n\n Examples:\n .. code-block:: python\n\n context = build_init_resource_context()\n resource_to_init(context)\n\n with build_init_resource_context(\n resources={"foo": context_manager_resource}\n ) as context:\n resource_to_init(context)\n\n """\n return UnboundInitResourceContext(\n resource_config=check.opt_mapping_param(config, "config", key_type=str),\n instance=check.opt_inst_param(instance, "instance", DagsterInstance),\n resources=check.opt_mapping_param(resources, "resources", key_type=str),\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/init", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.init"}, "input": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.input

\nfrom datetime import datetime\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.events import AssetKey, AssetObservation, CoercibleToAssetKey\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataValue,\n)\nfrom dagster._core.definitions.partition import PartitionsSubset\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.time_window_partitions import TimeWindow, TimeWindowPartitionsSubset\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.instance import DagsterInstance, DynamicPartitionsStore\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import PartitionsDefinition\n    from dagster._core.definitions.op_definition import OpDefinition\n    from dagster._core.definitions.resource_definition import Resources\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import StepExecutionContext\n    from dagster._core.log_manager import DagsterLogManager\n    from dagster._core.types.dagster_type import DagsterType\n\n    from .output import OutputContext\n\n\n
[docs]class InputContext:\n """The ``context`` object available to the load_input method of :py:class:`InputManager`.\n\n Users should not instantiate this object directly. In order to construct\n an `InputContext` for testing an IO Manager's `load_input` method, use\n :py:func:`dagster.build_input_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import IOManager, InputContext\n\n class MyIOManager(IOManager):\n def load_input(self, context: InputContext):\n ...\n """\n\n def __init__(\n self,\n *,\n name: Optional[str] = None,\n job_name: Optional[str] = None,\n op_def: Optional["OpDefinition"] = None,\n config: Optional[Any] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n upstream_output: Optional["OutputContext"] = None,\n dagster_type: Optional["DagsterType"] = None,\n log_manager: Optional["DagsterLogManager"] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n resources: Optional[Union["Resources", Mapping[str, Any]]] = None,\n step_context: Optional["StepExecutionContext"] = None,\n asset_key: Optional[AssetKey] = None,\n partition_key: Optional[str] = None,\n asset_partitions_subset: Optional[PartitionsSubset] = None,\n asset_partitions_def: Optional["PartitionsDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n ):\n from dagster._core.definitions.resource_definition import IContainsGenerator, Resources\n from dagster._core.execution.build_resources import build_resources\n\n self._name = name\n self._job_name = job_name\n self._op_def = op_def\n self._config = config\n self._metadata = metadata or {}\n self._upstream_output = upstream_output\n self._dagster_type = dagster_type\n self._log = log_manager\n self._resource_config = resource_config\n self._step_context = step_context\n self._asset_key = asset_key\n if self._step_context and self._step_context.has_partition_key:\n self._partition_key: Optional[str] = self._step_context.partition_key\n else:\n self._partition_key = partition_key\n\n self._asset_partitions_subset = asset_partitions_subset\n self._asset_partitions_def = asset_partitions_def\n\n if isinstance(resources, Resources):\n self._resources_cm = None\n self._resources = resources\n else:\n self._resources_cm = build_resources(\n check.opt_mapping_param(resources, "resources", key_type=str)\n )\n self._resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n self._cm_scope_entered = False\n\n self._events: List["DagsterEvent"] = []\n self._observations: List[AssetObservation] = []\n self._instance = instance\n\n def __enter__(self):\n if self._resources_cm:\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc):\n if self._resources_cm:\n self._resources_cm.__exit__(*exc)\n\n def __del__(self):\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n self._resources_cm.__exit__(None, None, None)\n\n @property\n def instance(self) -> DagsterInstance:\n if self._instance is None:\n raise DagsterInvariantViolationError(\n "Attempting to access instance, "\n "but it was not provided when constructing the InputContext"\n )\n return self._instance\n\n @public\n @property\n def has_input_name(self) -> bool:\n """If we're the InputContext is being used to load the result of a run from outside the run,\n then it won't have an input name.\n """\n return self._name is not None\n\n @public\n @property\n def name(self) -> str:\n """The name of the input that we're loading."""\n if self._name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access name, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._name\n\n @property\n def job_name(self) -> str:\n if self._job_name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access job_name, "\n "but it was not provided when constructing the InputContext"\n )\n return self._job_name\n\n @public\n @property\n def op_def(self) -> "OpDefinition":\n """The definition of the op that's loading the input."""\n if self._op_def is None:\n raise DagsterInvariantViolationError(\n "Attempting to access op_def, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._op_def\n\n @public\n @property\n def config(self) -> Any:\n """The config attached to the input that we're loading."""\n return self._config\n\n @public\n @property\n def metadata(self) -> Optional[ArbitraryMetadataMapping]:\n """A dict of metadata that is assigned to the InputDefinition that we're loading for.\n This property only contains metadata passed in explicitly with :py:class:`AssetIn`\n or :py:class:`In`. To access metadata of an upstream asset or operation definition,\n use the metadata in :py:attr:`.InputContext.upstream_output`.\n """\n return self._metadata\n\n @public\n @property\n def upstream_output(self) -> Optional["OutputContext"]:\n """Info about the output that produced the object we're loading."""\n return self._upstream_output\n\n @public\n @property\n def dagster_type(self) -> "DagsterType":\n """The type of this input.\n Dagster types do not propagate from an upstream output to downstream inputs,\n and this property only captures type information for the input that is either\n passed in explicitly with :py:class:`AssetIn` or :py:class:`In`, or can be\n infered from type hints. For an asset input, the Dagster type from the upstream\n asset definition is ignored.\n """\n if self._dagster_type is None:\n raise DagsterInvariantViolationError(\n "Attempting to access dagster_type, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._dagster_type\n\n @public\n @property\n def log(self) -> "DagsterLogManager":\n """The log manager to use for this input."""\n if self._log is None:\n raise DagsterInvariantViolationError(\n "Attempting to access log, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._log\n\n @public\n @property\n def resource_config(self) -> Optional[Mapping[str, Any]]:\n """The config associated with the resource that initializes the InputManager."""\n return self._resource_config\n\n @public\n @property\n def resources(self) -> Any:\n """The resources required by the resource that initializes the\n input manager. If using the :py:func:`@input_manager` decorator, these resources\n correspond to those requested with the `required_resource_keys` parameter.\n """\n if self._resources is None:\n raise DagsterInvariantViolationError(\n "Attempting to access resources, "\n "but it was not provided when constructing the InputContext"\n )\n\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_input_context(...) as context:`"\n )\n return self._resources\n\n @public\n @property\n def has_asset_key(self) -> bool:\n """Returns True if an asset is being loaded as input, otherwise returns False. A return value of False\n indicates that an output from an op is being loaded as the input.\n """\n return self._asset_key is not None\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """The ``AssetKey`` of the asset that is being loaded as an input."""\n if self._asset_key is None:\n raise DagsterInvariantViolationError(\n "Attempting to access asset_key, but no asset is associated with this input"\n )\n\n return self._asset_key\n\n @public\n @property\n def asset_partitions_def(self) -> "PartitionsDefinition":\n """The PartitionsDefinition on the upstream asset corresponding to this input."""\n if self._asset_partitions_def is None:\n if self.asset_key:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {self.asset_key}, but it is not"\n " partitioned"\n )\n else:\n raise DagsterInvariantViolationError(\n "Attempting to access partitions def for asset, but input does not correspond"\n " to an asset"\n )\n\n return self._asset_partitions_def\n\n @property\n def step_context(self) -> "StepExecutionContext":\n if self._step_context is None:\n raise DagsterInvariantViolationError(\n "Attempting to access step_context, "\n "but it was not provided when constructing the InputContext"\n )\n\n return self._step_context\n\n @public\n @property\n def has_partition_key(self) -> bool:\n """Whether the current run is a partitioned run."""\n return self._partition_key is not None\n\n @public\n @property\n def partition_key(self) -> str:\n """The partition key for the current run.\n\n Raises an error if the current run is not a partitioned run.\n """\n if self._partition_key is None:\n check.failed(\n "Tried to access partition_key on a non-partitioned run.",\n )\n\n return self._partition_key\n\n @public\n @property\n def has_asset_partitions(self) -> bool:\n """Returns True if the asset being loaded as input is partitioned."""\n return self._asset_partitions_subset is not None\n\n @public\n @property\n def asset_partition_key(self) -> str:\n """The partition key for input asset.\n\n Raises an error if the input asset has no partitioning, or if the run covers a partition\n range for the input asset.\n """\n subset = self._asset_partitions_subset\n\n if subset is None:\n check.failed("The input does not correspond to a partitioned asset.")\n\n partition_keys = list(subset.get_partition_keys())\n if len(partition_keys) == 1:\n return partition_keys[0]\n else:\n check.failed(\n f"Tried to access partition key for asset '{self.asset_key}', "\n f"but the number of input partitions != 1: '{subset}'."\n )\n\n @public\n @property\n def asset_partition_key_range(self) -> PartitionKeyRange:\n """The partition key range for input asset.\n\n Raises an error if the input asset has no partitioning.\n """\n subset = self._asset_partitions_subset\n\n if subset is None:\n check.failed(\n "Tried to access asset_partition_key_range, but the asset is not partitioned.",\n )\n\n partition_key_ranges = subset.get_partition_key_ranges(\n dynamic_partitions_store=self.instance\n )\n if len(partition_key_ranges) != 1:\n check.failed(\n "Tried to access asset_partition_key_range, but there are "\n f"({len(partition_key_ranges)}) key ranges associated with this input.",\n )\n\n return partition_key_ranges[0]\n\n @public\n @property\n def asset_partition_keys(self) -> Sequence[str]:\n """The partition keys for input asset.\n\n Raises an error if the input asset has no partitioning.\n """\n if self._asset_partitions_subset is None:\n check.failed(\n "Tried to access asset_partition_keys, but the asset is not partitioned.",\n )\n\n return list(self._asset_partitions_subset.get_partition_keys())\n\n @public\n @property\n def asset_partitions_time_window(self) -> TimeWindow:\n """The time window for the partitions of the input asset.\n\n Raises an error if either of the following are true:\n - The input asset has no partitioning.\n - The input asset is not partitioned with a TimeWindowPartitionsDefinition.\n """\n subset = self._asset_partitions_subset\n\n if subset is None:\n check.failed(\n "Tried to access asset_partitions_time_window, but the asset is not partitioned.",\n )\n\n if not isinstance(subset, TimeWindowPartitionsSubset):\n check.failed(\n "Tried to access asset_partitions_time_window, but the asset is not partitioned"\n " with time windows.",\n )\n\n time_windows = subset.included_time_windows\n if len(time_windows) != 1:\n check.failed(\n "Tried to access asset_partitions_time_window, but there are "\n f"({len(time_windows)}) time windows associated with this input.",\n )\n\n return time_windows[0]\n\n
[docs] @public\n def get_identifier(self) -> Sequence[str]:\n """Utility method to get a collection of identifiers that as a whole represent a unique\n step input.\n\n If not using memoization, the unique identifier collection consists of\n\n - ``run_id``: the id of the run which generates the input.\n Note: This method also handles the re-execution memoization logic. If the step that\n generates the input is skipped in the re-execution, the ``run_id`` will be the id\n of its parent run.\n - ``step_key``: the key for a compute step.\n - ``name``: the name of the output. (default: 'result').\n\n If using memoization, the ``version`` corresponding to the step output is used in place of\n the ``run_id``.\n\n Returns:\n List[str, ...]: A list of identifiers, i.e. (run_id or version), step_key, and output_name\n """\n if self.upstream_output is None:\n raise DagsterInvariantViolationError(\n "InputContext.upstream_output not defined. Cannot compute an identifier"\n )\n\n return self.upstream_output.get_identifier()
\n\n
[docs] @public\n def get_asset_identifier(self) -> Sequence[str]:\n """The sequence of strings making up the AssetKey for the asset being loaded as an input.\n If the asset is partitioned, the identifier contains the partition key as the final element in the\n sequence. For example, for the asset key ``AssetKey(["foo", "bar", "baz"])``, materialized with\n partition key "2023-06-01", ``get_asset_identifier`` will return ``["foo", "bar", "baz", "2023-06-01"]``.\n """\n if self.asset_key is not None:\n if self.has_asset_partitions:\n return [*self.asset_key.path, self.asset_partition_key]\n else:\n return self.asset_key.path\n else:\n check.failed("Can't get asset identifier for an input with no asset key")
\n\n def consume_events(self) -> Iterator["DagsterEvent"]:\n """Pops and yields all user-generated events that have been recorded from this context.\n\n If consume_events has not yet been called, this will yield all logged events since the call to `handle_input`. If consume_events has been called, it will yield all events since the last time consume_events was called. Designed for internal use. Users should never need to invoke this method.\n """\n events = self._events\n self._events = []\n yield from events\n\n def add_input_metadata(\n self,\n metadata: Mapping[str, Any],\n description: Optional[str] = None,\n ) -> None:\n """Accepts a dictionary of metadata. Metadata entries will appear on the LOADED_INPUT event.\n If the input is an asset, metadata will be attached to an asset observation.\n\n The asset observation will be yielded from the run and appear in the event log.\n Only valid if the context has an asset key.\n """\n from dagster._core.definitions.metadata import normalize_metadata\n from dagster._core.events import DagsterEvent\n\n metadata = check.mapping_param(metadata, "metadata", key_type=str)\n self._metadata = {**self._metadata, **normalize_metadata(metadata)}\n if self.has_asset_key:\n check.opt_str_param(description, "description")\n\n observation = AssetObservation(\n asset_key=self.asset_key,\n description=description,\n partition=self.asset_partition_key if self.has_asset_partitions else None,\n metadata=metadata,\n )\n self._observations.append(observation)\n if self._step_context:\n self._events.append(DagsterEvent.asset_observation(self._step_context, observation))\n\n def get_observations(\n self,\n ) -> Sequence[AssetObservation]:\n """Retrieve the list of user-generated asset observations that were observed via the context.\n\n User-generated events that were yielded will not appear in this list.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import IOManager, build_input_context, AssetObservation\n\n class MyIOManager(IOManager):\n def load_input(self, context, obj):\n ...\n\n def test_load_input():\n mgr = MyIOManager()\n context = build_input_context()\n mgr.load_input(context)\n observations = context.get_observations()\n ...\n """\n return self._observations\n\n def consume_metadata(self) -> Mapping[str, MetadataValue]:\n result = self._metadata\n self._metadata = {}\n return result
\n\n\n
[docs]def build_input_context(\n name: Optional[str] = None,\n config: Optional[Any] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n upstream_output: Optional["OutputContext"] = None,\n dagster_type: Optional["DagsterType"] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n resources: Optional[Mapping[str, Any]] = None,\n op_def: Optional["OpDefinition"] = None,\n step_context: Optional["StepExecutionContext"] = None,\n asset_key: Optional[CoercibleToAssetKey] = None,\n partition_key: Optional[str] = None,\n asset_partition_key_range: Optional[PartitionKeyRange] = None,\n asset_partitions_def: Optional["PartitionsDefinition"] = None,\n instance: Optional[DagsterInstance] = None,\n) -> "InputContext":\n """Builds input context from provided parameters.\n\n ``build_input_context`` can be used as either a function, or a context manager. If resources\n that are also context managers are provided, then ``build_input_context`` must be used as a\n context manager.\n\n Args:\n name (Optional[str]): The name of the input that we're loading.\n config (Optional[Any]): The config attached to the input that we're loading.\n metadata (Optional[Dict[str, Any]]): A dict of metadata that is assigned to the\n InputDefinition that we're loading for.\n upstream_output (Optional[OutputContext]): Info about the output that produced the object\n we're loading.\n dagster_type (Optional[DagsterType]): The type of this input.\n resource_config (Optional[Dict[str, Any]]): The resource config to make available from the\n input context. This usually corresponds to the config provided to the resource that\n loads the input manager.\n resources (Optional[Dict[str, Any]]): The resources to make available from the context.\n For a given key, you can provide either an actual instance of an object, or a resource\n definition.\n asset_key (Optional[Union[AssetKey, Sequence[str], str]]): The asset key attached to the InputDefinition.\n op_def (Optional[OpDefinition]): The definition of the op that's loading the input.\n step_context (Optional[StepExecutionContext]): For internal use.\n partition_key (Optional[str]): String value representing partition key to execute with.\n asset_partition_key_range (Optional[str]): The range of asset partition keys to load.\n asset_partitions_def: Optional[PartitionsDefinition]: The PartitionsDefinition of the asset\n being loaded.\n\n Examples:\n .. code-block:: python\n\n build_input_context()\n\n with build_input_context(resources={"foo": context_manager_resource}) as context:\n do_something\n """\n from dagster._core.definitions import OpDefinition, PartitionsDefinition\n from dagster._core.execution.context.output import OutputContext\n from dagster._core.execution.context.system import StepExecutionContext\n from dagster._core.execution.context_creation_job import initialize_console_manager\n from dagster._core.types.dagster_type import DagsterType\n\n name = check.opt_str_param(name, "name")\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n upstream_output = check.opt_inst_param(upstream_output, "upstream_output", OutputContext)\n dagster_type = check.opt_inst_param(dagster_type, "dagster_type", DagsterType)\n resource_config = check.opt_mapping_param(resource_config, "resource_config", key_type=str)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n op_def = check.opt_inst_param(op_def, "op_def", OpDefinition)\n step_context = check.opt_inst_param(step_context, "step_context", StepExecutionContext)\n asset_key = AssetKey.from_coercible(asset_key) if asset_key else None\n partition_key = check.opt_str_param(partition_key, "partition_key")\n asset_partition_key_range = check.opt_inst_param(\n asset_partition_key_range, "asset_partition_key_range", PartitionKeyRange\n )\n asset_partitions_def = check.opt_inst_param(\n asset_partitions_def, "asset_partitions_def", PartitionsDefinition\n )\n if asset_partitions_def and asset_partition_key_range:\n asset_partitions_subset = asset_partitions_def.empty_subset().with_partition_key_range(\n asset_partition_key_range, dynamic_partitions_store=instance\n )\n elif asset_partition_key_range:\n asset_partitions_subset = KeyRangeNoPartitionsDefPartitionsSubset(asset_partition_key_range)\n else:\n asset_partitions_subset = None\n\n return InputContext(\n name=name,\n job_name=None,\n config=config,\n metadata=metadata,\n upstream_output=upstream_output,\n dagster_type=dagster_type,\n log_manager=initialize_console_manager(None),\n resource_config=resource_config,\n resources=resources,\n step_context=step_context,\n op_def=op_def,\n asset_key=asset_key,\n partition_key=partition_key,\n asset_partitions_subset=asset_partitions_subset,\n asset_partitions_def=asset_partitions_def,\n instance=instance,\n )
\n\n\nclass KeyRangeNoPartitionsDefPartitionsSubset(PartitionsSubset):\n """For build_input_context when no PartitionsDefinition has been provided."""\n\n def __init__(self, key_range: PartitionKeyRange):\n self._key_range = key_range\n\n def get_partition_keys_not_in_subset(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Iterable[str]:\n raise NotImplementedError()\n\n def get_partition_keys(self, current_time: Optional[datetime] = None) -> Iterable[str]:\n if self._key_range.start == self._key_range.end:\n return self._key_range.start\n else:\n raise NotImplementedError()\n\n def get_partition_key_ranges(\n self,\n current_time: Optional[datetime] = None,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> Sequence[PartitionKeyRange]:\n return [self._key_range]\n\n def with_partition_keys(self, partition_keys: Iterable[str]) -> "PartitionsSubset":\n raise NotImplementedError()\n\n def with_partition_key_range(\n self,\n partition_key_range: PartitionKeyRange,\n dynamic_partitions_store: Optional[DynamicPartitionsStore] = None,\n ) -> "PartitionsSubset":\n raise NotImplementedError()\n\n def serialize(self) -> str:\n raise NotImplementedError()\n\n @property\n def partitions_def(self) -> "PartitionsDefinition":\n raise NotImplementedError()\n\n def __len__(self) -> int:\n raise NotImplementedError()\n\n def __contains__(self, value) -> bool:\n raise NotImplementedError()\n\n @classmethod\n def from_serialized(\n cls, partitions_def: "PartitionsDefinition", serialized: str\n ) -> "PartitionsSubset":\n raise NotImplementedError()\n\n @classmethod\n def can_deserialize(\n cls,\n partitions_def: "PartitionsDefinition",\n serialized: str,\n serialized_partitions_def_unique_id: Optional[str],\n serialized_partitions_def_class_name: Optional[str],\n ) -> bool:\n raise NotImplementedError()\n\n @classmethod\n def empty_subset(cls, partitions_def: "PartitionsDefinition") -> "PartitionsSubset":\n raise NotImplementedError()\n
", "current_page_name": "_modules/dagster/_core/execution/context/input", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.input"}, "invocation": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.invocation

\nfrom contextlib import ExitStack\nfrom typing import (\n    AbstractSet,\n    Any,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.composition import PendingNodeInvocation\nfrom dagster._core.definitions.decorators.op_decorator import DecoratedOpFunction\nfrom dagster._core.definitions.dependency import Node, NodeHandle\nfrom dagster._core.definitions.events import (\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    UserEvent,\n)\nfrom dagster._core.definitions.hook_definition import HookDefinition\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.resource_definition import (\n    IContainsGenerator,\n    ResourceDefinition,\n    Resources,\n    ScopedResourcesBuilder,\n)\nfrom dagster._core.definitions.resource_requirement import ensure_requirements_satisfied\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    TimeWindowPartitionsDefinition,\n    has_one_dimension_time_window_partitioning,\n)\nfrom dagster._core.errors import (\n    DagsterInvalidInvocationError,\n    DagsterInvalidPropertyError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.execution.build_resources import build_resources, wrap_resources_for_execution\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.types.dagster_type import DagsterType\nfrom dagster._utils.forked_pdb import ForkedPdb\nfrom dagster._utils.merger import merge_dicts\n\nfrom .compute import OpExecutionContext\nfrom .system import StepExecutionContext, TypeCheckContext\n\n\ndef _property_msg(prop_name: str, method_name: str) -> str:\n    return (\n        f"The {prop_name} {method_name} is not set on the context when a solid is directly invoked."\n    )\n\n\nclass UnboundOpExecutionContext(OpExecutionContext):\n    """The ``context`` object available as the first argument to a solid's compute function when\n    being invoked directly. Can also be used as a context manager.\n    """\n\n    def __init__(\n        self,\n        op_config: Any,\n        resources_dict: Mapping[str, Any],\n        resources_config: Mapping[str, Any],\n        instance: Optional[DagsterInstance],\n        partition_key: Optional[str],\n        partition_key_range: Optional[PartitionKeyRange],\n        mapping_key: Optional[str],\n        assets_def: Optional[AssetsDefinition],\n    ):\n        from dagster._core.execution.api import ephemeral_instance_if_missing\n        from dagster._core.execution.context_creation_job import initialize_console_manager\n\n        self._op_config = op_config\n        self._mapping_key = mapping_key\n\n        self._exit_stack = ExitStack()\n\n        # Construct ephemeral instance if missing\n        self._instance = self._exit_stack.enter_context(ephemeral_instance_if_missing(instance))\n\n        self._resources_config = resources_config\n        # Open resource context manager\n        self._resources_contain_cm = False\n        self._resource_defs = wrap_resources_for_execution(resources_dict)\n        self._resources = self._exit_stack.enter_context(\n            build_resources(\n                resources=self._resource_defs,\n                instance=self._instance,\n                resource_config=resources_config,\n            )\n        )\n        self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n\n        self._log = initialize_console_manager(None)\n        self._pdb: Optional[ForkedPdb] = None\n        self._cm_scope_entered = False\n        check.invariant(\n            not (partition_key and partition_key_range),\n            "Must supply at most one of partition_key or partition_key_range",\n        )\n        self._partition_key = partition_key\n        self._partition_key_range = partition_key_range\n        self._user_events: List[UserEvent] = []\n        self._output_metadata: Dict[str, Any] = {}\n\n        self._assets_def = check.opt_inst_param(assets_def, "assets_def", AssetsDefinition)\n\n    def __enter__(self):\n        self._cm_scope_entered = True\n        return self\n\n    def __exit__(self, *exc):\n        self._exit_stack.close()\n\n    def __del__(self):\n        self._exit_stack.close()\n\n    @property\n    def op_config(self) -> Any:\n        return self._op_config\n\n    @property\n    def resource_keys(self) -> AbstractSet[str]:\n        return self._resource_defs.keys()\n\n    @property\n    def resources(self) -> Resources:\n        if self._resources_contain_cm and not self._cm_scope_entered:\n            raise DagsterInvariantViolationError(\n                "At least one provided resource is a generator, but attempting to access "\n                "resources outside of context manager scope. You can use the following syntax to "\n                "open a context manager: `with build_op_context(...) as context:`"\n            )\n        return self._resources\n\n    @property\n    def dagster_run(self) -> DagsterRun:\n        raise DagsterInvalidPropertyError(_property_msg("pipeline_run", "property"))\n\n    @property\n    def instance(self) -> DagsterInstance:\n        return self._instance\n\n    @property\n    def pdb(self) -> ForkedPdb:\n        """dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.\n\n        Example:\n        .. code-block:: python\n\n            @solid\n            def debug_solid(context):\n                context.pdb.set_trace()\n\n        """\n        if self._pdb is None:\n            self._pdb = ForkedPdb()\n\n        return self._pdb\n\n    @property\n    def step_launcher(self) -> Optional[StepLauncher]:\n        raise DagsterInvalidPropertyError(_property_msg("step_launcher", "property"))\n\n    @property\n    def run_id(self) -> str:\n        """str: Hard-coded value to indicate that we are directly invoking solid."""\n        return "EPHEMERAL"\n\n    @property\n    def run_config(self) -> dict:\n        raise DagsterInvalidPropertyError(_property_msg("run_config", "property"))\n\n    @property\n    def job_def(self) -> JobDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("job_def", "property"))\n\n    @property\n    def job_name(self) -> str:\n        raise DagsterInvalidPropertyError(_property_msg("job_name", "property"))\n\n    @property\n    def log(self) -> DagsterLogManager:\n        """DagsterLogManager: A console manager constructed for this context."""\n        return self._log\n\n    @property\n    def node_handle(self) -> NodeHandle:\n        raise DagsterInvalidPropertyError(_property_msg("solid_handle", "property"))\n\n    @property\n    def op(self) -> JobDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("op", "property"))\n\n    @property\n    def solid(self) -> Node:\n        raise DagsterInvalidPropertyError(_property_msg("solid", "property"))\n\n    @property\n    def op_def(self) -> OpDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("op_def", "property"))\n\n    @property\n    def assets_def(self) -> AssetsDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("assets_def", "property"))\n\n    @property\n    def has_partition_key(self) -> bool:\n        return self._partition_key is not None\n\n    @property\n    def partition_key(self) -> str:\n        if self._partition_key:\n            return self._partition_key\n        check.failed("Tried to access partition_key for a non-partitioned run")\n\n    @property\n    def partition_key_range(self) -> PartitionKeyRange:\n        """The range of partition keys for the current run.\n\n        If run is for a single partition key, return a `PartitionKeyRange` with the same start and\n        end. Raises an error if the current run is not a partitioned run.\n        """\n        if self._partition_key_range:\n            return self._partition_key_range\n        elif self._partition_key:\n            return PartitionKeyRange(self._partition_key, self._partition_key)\n        else:\n            check.failed("Tried to access partition_key range for a non-partitioned run")\n\n    def asset_partition_key_for_output(self, output_name: str = "result") -> str:\n        return self.partition_key\n\n    def has_tag(self, key: str) -> bool:\n        raise DagsterInvalidPropertyError(_property_msg("has_tag", "method"))\n\n    def get_tag(self, key: str) -> str:\n        raise DagsterInvalidPropertyError(_property_msg("get_tag", "method"))\n\n    def get_step_execution_context(self) -> StepExecutionContext:\n        raise DagsterInvalidPropertyError(_property_msg("get_step_execution_context", "methods"))\n\n    def bind(\n        self,\n        op_def: OpDefinition,\n        pending_invocation: Optional[PendingNodeInvocation[OpDefinition]],\n        assets_def: Optional[AssetsDefinition],\n        config_from_args: Optional[Mapping[str, Any]],\n        resources_from_args: Optional[Mapping[str, Any]],\n    ) -> "BoundOpExecutionContext":\n        from dagster._core.definitions.resource_invocation import resolve_bound_config\n\n        if resources_from_args:\n            if self._resource_defs:\n                raise DagsterInvalidInvocationError(\n                    "Cannot provide resources in both context and kwargs"\n                )\n            resource_defs = wrap_resources_for_execution(resources_from_args)\n            # add new resources context to the stack to be cleared on exit\n            resources = self._exit_stack.enter_context(\n                build_resources(resource_defs, self.instance)\n            )\n        elif assets_def and assets_def.resource_defs:\n            for key in sorted(list(assets_def.resource_defs.keys())):\n                if key in self._resource_defs:\n                    raise DagsterInvalidInvocationError(\n                        f"Error when invoking {assets_def!s} resource '{key}' "\n                        "provided on both the definition and invocation context. Please "\n                        "provide on only one or the other."\n                    )\n            resource_defs = wrap_resources_for_execution(\n                {**self._resource_defs, **assets_def.resource_defs}\n            )\n            # add new resources context to the stack to be cleared on exit\n            resources = self._exit_stack.enter_context(\n                build_resources(resource_defs, self.instance, self._resources_config)\n            )\n        else:\n            resources = self.resources\n            resource_defs = self._resource_defs\n\n        _validate_resource_requirements(resource_defs, op_def)\n\n        if self.op_config and config_from_args:\n            raise DagsterInvalidInvocationError("Cannot provide config in both context and kwargs")\n        op_config = resolve_bound_config(config_from_args or self.op_config, op_def)\n\n        return BoundOpExecutionContext(\n            op_def=op_def,\n            op_config=op_config,\n            resources=resources,\n            resources_config=self._resources_config,\n            instance=self.instance,\n            log_manager=self.log,\n            pdb=self.pdb,\n            tags=(\n                pending_invocation.tags\n                if isinstance(pending_invocation, PendingNodeInvocation)\n                else None\n            ),\n            hook_defs=(\n                pending_invocation.hook_defs\n                if isinstance(pending_invocation, PendingNodeInvocation)\n                else None\n            ),\n            alias=(\n                pending_invocation.given_alias\n                if isinstance(pending_invocation, PendingNodeInvocation)\n                else None\n            ),\n            user_events=self._user_events,\n            output_metadata=self._output_metadata,\n            mapping_key=self._mapping_key,\n            partition_key=self._partition_key,\n            partition_key_range=self._partition_key_range,\n            assets_def=assets_def,\n        )\n\n    def get_events(self) -> Sequence[UserEvent]:\n        """Retrieve the list of user-generated events that were logged via the context.\n\n        **Examples:**\n\n        .. code-block:: python\n\n            from dagster import op, build_op_context, AssetMaterialization, ExpectationResult\n\n            @op\n            def my_op(context):\n                ...\n\n            def test_my_op():\n                context = build_op_context()\n                my_op(context)\n                all_user_events = context.get_events()\n                materializations = [event for event in all_user_events if isinstance(event, AssetMaterialization)]\n                expectation_results = [event for event in all_user_events if isinstance(event, ExpectationResult)]\n                ...\n        """\n        return self._user_events\n\n    def get_output_metadata(\n        self, output_name: str, mapping_key: Optional[str] = None\n    ) -> Optional[Mapping[str, Any]]:\n        """Retrieve metadata that was logged for an output and mapping_key, if it exists.\n\n        If metadata cannot be found for the particular output_name/mapping_key combination, None will be returned.\n\n        Args:\n            output_name (str): The name of the output to retrieve logged metadata for.\n            mapping_key (Optional[str]): The mapping key to retrieve metadata for (only applies when using dynamic outputs).\n\n        Returns:\n            Optional[Mapping[str, Any]]: The metadata values present for the output_name/mapping_key combination, if present.\n        """\n        metadata = self._output_metadata.get(output_name)\n        if mapping_key and metadata:\n            return metadata.get(mapping_key)\n        return metadata\n\n    def get_mapping_key(self) -> Optional[str]:\n        return self._mapping_key\n\n\ndef _validate_resource_requirements(\n    resource_defs: Mapping[str, ResourceDefinition], op_def: OpDefinition\n) -> None:\n    """Validate correctness of resources against required resource keys."""\n    if cast(DecoratedOpFunction, op_def.compute_fn).has_context_arg():\n        for requirement in op_def.get_resource_requirements():\n            if not requirement.is_io_manager_requirement:\n                ensure_requirements_satisfied(resource_defs, [requirement])\n\n\nclass BoundOpExecutionContext(OpExecutionContext):\n    """The op execution context that is passed to the compute function during invocation.\n\n    This context is bound to a specific op definition, for which the resources and config have\n    been validated.\n    """\n\n    _op_def: OpDefinition\n    _op_config: Any\n    _resources: "Resources"\n    _resources_config: Mapping[str, Any]\n    _instance: DagsterInstance\n    _log_manager: DagsterLogManager\n    _pdb: Optional[ForkedPdb]\n    _tags: Mapping[str, str]\n    _hook_defs: Optional[AbstractSet[HookDefinition]]\n    _alias: str\n    _user_events: List[UserEvent]\n    _seen_outputs: Dict[str, Union[str, Set[str]]]\n    _output_metadata: Dict[str, Any]\n    _mapping_key: Optional[str]\n    _partition_key: Optional[str]\n    _partition_key_range: Optional[PartitionKeyRange]\n    _assets_def: Optional[AssetsDefinition]\n\n    def __init__(\n        self,\n        op_def: OpDefinition,\n        op_config: Any,\n        resources: "Resources",\n        resources_config: Mapping[str, Any],\n        instance: DagsterInstance,\n        log_manager: DagsterLogManager,\n        pdb: Optional[ForkedPdb],\n        tags: Optional[Mapping[str, str]],\n        hook_defs: Optional[AbstractSet[HookDefinition]],\n        alias: Optional[str],\n        user_events: List[UserEvent],\n        output_metadata: Dict[str, Any],\n        mapping_key: Optional[str],\n        partition_key: Optional[str],\n        partition_key_range: Optional[PartitionKeyRange],\n        assets_def: Optional[AssetsDefinition],\n    ):\n        self._op_def = op_def\n        self._op_config = op_config\n        self._resources = resources\n        self._instance = instance\n        self._log = log_manager\n        self._pdb = pdb\n        self._tags = merge_dicts(self._op_def.tags, tags) if tags else self._op_def.tags\n        self._hook_defs = hook_defs\n        self._alias = alias if alias else self._op_def.name\n        self._resources_config = resources_config\n        self._user_events = user_events\n        self._seen_outputs = {}\n        self._output_metadata = output_metadata\n        self._mapping_key = mapping_key\n        self._partition_key = partition_key\n        self._partition_key_range = partition_key_range\n        self._assets_def = assets_def\n        self._requires_typed_event_stream = False\n        self._typed_event_stream_error_message = None\n\n    @property\n    def op_config(self) -> Any:\n        return self._op_config\n\n    @property\n    def resources(self) -> Resources:\n        return self._resources\n\n    @property\n    def dagster_run(self) -> DagsterRun:\n        raise DagsterInvalidPropertyError(_property_msg("pipeline_run", "property"))\n\n    @property\n    def instance(self) -> DagsterInstance:\n        return self._instance\n\n    @property\n    def pdb(self) -> ForkedPdb:\n        """dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.\n\n        Example:\n        .. code-block:: python\n\n            @solid\n            def debug_solid(context):\n                context.pdb.set_trace()\n\n        """\n        if self._pdb is None:\n            self._pdb = ForkedPdb()\n\n        return self._pdb\n\n    @property\n    def step_launcher(self) -> Optional[StepLauncher]:\n        raise DagsterInvalidPropertyError(_property_msg("step_launcher", "property"))\n\n    @property\n    def run_id(self) -> str:\n        """str: Hard-coded value to indicate that we are directly invoking solid."""\n        return "EPHEMERAL"\n\n    @property\n    def run_config(self) -> Mapping[str, object]:\n        run_config: Dict[str, object] = {}\n        if self._op_config:\n            run_config["ops"] = {self._op_def.name: {"config": self._op_config}}\n        run_config["resources"] = self._resources_config\n        return run_config\n\n    @property\n    def job_def(self) -> JobDefinition:\n        raise DagsterInvalidPropertyError(_property_msg("job_def", "property"))\n\n    @property\n    def job_name(self) -> str:\n        raise DagsterInvalidPropertyError(_property_msg("job_name", "property"))\n\n    @property\n    def log(self) -> DagsterLogManager:\n        """DagsterLogManager: A console manager constructed for this context."""\n        return self._log\n\n    @property\n    def node_handle(self) -> NodeHandle:\n        raise DagsterInvalidPropertyError(_property_msg("node_handle", "property"))\n\n    @property\n    def op(self) -> Node:\n        raise DagsterInvalidPropertyError(_property_msg("op", "property"))\n\n    @property\n    def op_def(self) -> OpDefinition:\n        return self._op_def\n\n    @property\n    def has_assets_def(self) -> bool:\n        return self._assets_def is not None\n\n    @property\n    def assets_def(self) -> AssetsDefinition:\n        if self._assets_def is None:\n            raise DagsterInvalidPropertyError(\n                f"Op {self.op_def.name} does not have an assets definition."\n            )\n        return self._assets_def\n\n    @property\n    def has_partition_key(self) -> bool:\n        return self._partition_key is not None\n\n    def has_tag(self, key: str) -> bool:\n        return key in self._tags\n\n    def get_tag(self, key: str) -> Optional[str]:\n        return self._tags.get(key)\n\n    @property\n    def alias(self) -> str:\n        return self._alias\n\n    def get_step_execution_context(self) -> StepExecutionContext:\n        raise DagsterInvalidPropertyError(_property_msg("get_step_execution_context", "methods"))\n\n    def for_type(self, dagster_type: DagsterType) -> TypeCheckContext:\n        resources = cast(NamedTuple, self.resources)\n        return TypeCheckContext(\n            self.run_id,\n            self.log,\n            ScopedResourcesBuilder(resources._asdict()),\n            dagster_type,\n        )\n\n    def get_mapping_key(self) -> Optional[str]:\n        return self._mapping_key\n\n    def describe_op(self) -> str:\n        if isinstance(self.op_def, OpDefinition):\n            return f'op "{self.op_def.name}"'\n\n        return f'solid "{self.op_def.name}"'\n\n    def log_event(self, event: UserEvent) -> None:\n        check.inst_param(\n            event,\n            "event",\n            (AssetMaterialization, AssetObservation, ExpectationResult),\n        )\n        self._user_events.append(event)\n\n    def observe_output(self, output_name: str, mapping_key: Optional[str] = None) -> None:\n        if mapping_key:\n            if output_name not in self._seen_outputs:\n                self._seen_outputs[output_name] = set()\n            cast(Set[str], self._seen_outputs[output_name]).add(mapping_key)\n        else:\n            self._seen_outputs[output_name] = "seen"\n\n    def has_seen_output(self, output_name: str, mapping_key: Optional[str] = None) -> bool:\n        if mapping_key:\n            return (\n                output_name in self._seen_outputs and mapping_key in self._seen_outputs[output_name]\n            )\n        return output_name in self._seen_outputs\n\n    @property\n    def partition_key(self) -> str:\n        if self._partition_key is not None:\n            return self._partition_key\n        check.failed("Tried to access partition_key for a non-partitioned asset")\n\n    @property\n    def partition_key_range(self) -> PartitionKeyRange:\n        """The range of partition keys for the current run.\n\n        If run is for a single partition key, return a `PartitionKeyRange` with the same start and\n        end. Raises an error if the current run is not a partitioned run.\n        """\n        if self._partition_key_range:\n            return self._partition_key_range\n        elif self._partition_key:\n            return PartitionKeyRange(self._partition_key, self._partition_key)\n        else:\n            check.failed("Tried to access partition_key range for a non-partitioned run")\n\n    def asset_partition_key_for_output(self, output_name: str = "result") -> str:\n        return self.partition_key\n\n    def asset_partitions_time_window_for_output(self, output_name: str = "result") -> TimeWindow:\n        partitions_def = self.assets_def.partitions_def\n        if partitions_def is None:\n            check.failed("Tried to access partition_key for a non-partitioned asset")\n\n        if not has_one_dimension_time_window_partitioning(partitions_def=partitions_def):\n            raise DagsterInvariantViolationError(\n                "Expected a TimeWindowPartitionsDefinition or MultiPartitionsDefinition with a"\n                f" single time dimension, but instead found {type(partitions_def)}"\n            )\n\n        return cast(\n            Union[MultiPartitionsDefinition, TimeWindowPartitionsDefinition], partitions_def\n        ).time_window_for_partition_key(self.partition_key)\n\n    def add_output_metadata(\n        self,\n        metadata: Mapping[str, Any],\n        output_name: Optional[str] = None,\n        mapping_key: Optional[str] = None,\n    ) -> None:\n        """Add metadata to one of the outputs of an op.\n\n        This can only be used once per output in the body of an op. Using this method with the same output_name more than once within an op will result in an error.\n\n        Args:\n            metadata (Mapping[str, Any]): The metadata to attach to the output\n            output_name (Optional[str]): The name of the output to attach metadata to. If there is only one output on the op, then this argument does not need to be provided. The metadata will automatically be attached to the only output.\n\n        **Examples:**\n\n        .. code-block:: python\n\n            from dagster import Out, op\n            from typing import Tuple\n\n            @op\n            def add_metadata(context):\n                context.add_output_metadata({"foo", "bar"})\n                return 5 # Since the default output is called "result", metadata will be attached to the output "result".\n\n            @op(out={"a": Out(), "b": Out()})\n            def add_metadata_two_outputs(context) -> Tuple[str, int]:\n                context.add_output_metadata({"foo": "bar"}, output_name="b")\n                context.add_output_metadata({"baz": "bat"}, output_name="a")\n\n                return ("dog", 5)\n\n        """\n        metadata = check.mapping_param(metadata, "metadata", key_type=str)\n        output_name = check.opt_str_param(output_name, "output_name")\n        mapping_key = check.opt_str_param(mapping_key, "mapping_key")\n\n        if output_name is None and len(self.op_def.output_defs) == 1:\n            output_def = self.op_def.output_defs[0]\n            output_name = output_def.name\n        elif output_name is None:\n            raise DagsterInvariantViolationError(\n                "Attempted to log metadata without providing output_name, but multiple outputs"\n                " exist. Please provide an output_name to the invocation of"\n                " `context.add_output_metadata`."\n            )\n        else:\n            output_def = self.op_def.output_def_named(output_name)\n\n        if self.has_seen_output(output_name, mapping_key):\n            output_desc = (\n                f"output '{output_def.name}'"\n                if not mapping_key\n                else f"output '{output_def.name}' with mapping_key '{mapping_key}'"\n            )\n            raise DagsterInvariantViolationError(\n                f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log output"\n                f" metadata for {output_desc} which has already been yielded. Metadata must be"\n                " logged before the output is yielded."\n            )\n        if output_def.is_dynamic and not mapping_key:\n            raise DagsterInvariantViolationError(\n                f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log metadata"\n                f" for dynamic output '{output_def.name}' without providing a mapping key. When"\n                " logging metadata for a dynamic output, it is necessary to provide a mapping key."\n            )\n\n        output_name = output_def.name\n        if output_name in self._output_metadata:\n            if not mapping_key or mapping_key in self._output_metadata[output_name]:\n                raise DagsterInvariantViolationError(\n                    f"In {self.op_def.node_type_str} '{self.op_def.name}', attempted to log"\n                    f" metadata for output '{output_name}' more than once."\n                )\n        if mapping_key:\n            if output_name not in self._output_metadata:\n                self._output_metadata[output_name] = {}\n            self._output_metadata[output_name][mapping_key] = metadata\n\n        else:\n            self._output_metadata[output_name] = metadata\n\n    # In this mode no conversion is done on returned values and missing but expected outputs are not\n    # allowed.\n    @property\n    def requires_typed_event_stream(self) -> bool:\n        return self._requires_typed_event_stream\n\n    @property\n    def typed_event_stream_error_message(self) -> Optional[str]:\n        return self._typed_event_stream_error_message\n\n    def set_requires_typed_event_stream(self, *, error_message: Optional[str]) -> None:\n        self._requires_typed_event_stream = True\n        self._typed_event_stream_error_message = error_message\n\n\n
[docs]def build_op_context(\n resources: Optional[Mapping[str, Any]] = None,\n op_config: Any = None,\n resources_config: Optional[Mapping[str, Any]] = None,\n instance: Optional[DagsterInstance] = None,\n config: Any = None,\n partition_key: Optional[str] = None,\n partition_key_range: Optional[PartitionKeyRange] = None,\n mapping_key: Optional[str] = None,\n _assets_def: Optional[AssetsDefinition] = None,\n) -> UnboundOpExecutionContext:\n """Builds op execution context from provided parameters.\n\n ``build_op_context`` can be used as either a function or context manager. If there is a\n provided resource that is a context manager, then ``build_op_context`` must be used as a\n context manager. This function can be used to provide the context argument when directly\n invoking a op.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be\n either values or resource definitions.\n op_config (Optional[Mapping[str, Any]]): The config to provide to the op.\n resources_config (Optional[Mapping[str, Any]]): The config to provide to the resources.\n instance (Optional[DagsterInstance]): The dagster instance configured for the context.\n Defaults to DagsterInstance.ephemeral().\n mapping_key (Optional[str]): A key representing the mapping key from an upstream dynamic\n output. Can be accessed using ``context.get_mapping_key()``.\n partition_key (Optional[str]): String value representing partition key to execute with.\n partition_key_range (Optional[PartitionKeyRange]): Partition key range to execute with.\n _assets_def (Optional[AssetsDefinition]): Internal argument that populates the op's assets\n definition, not meant to be populated by users.\n\n Examples:\n .. code-block:: python\n\n context = build_op_context()\n op_to_invoke(context)\n\n with build_op_context(resources={"foo": context_manager_resource}) as context:\n op_to_invoke(context)\n """\n if op_config and config:\n raise DagsterInvalidInvocationError(\n "Attempted to invoke ``build_op_context`` with both ``op_config``, and its "\n "legacy version, ``config``. Please provide one or the other."\n )\n\n op_config = op_config if op_config else config\n return UnboundOpExecutionContext(\n resources_dict=check.opt_mapping_param(resources, "resources", key_type=str),\n resources_config=check.opt_mapping_param(\n resources_config, "resources_config", key_type=str\n ),\n op_config=op_config,\n instance=check.opt_inst_param(instance, "instance", DagsterInstance),\n partition_key=check.opt_str_param(partition_key, "partition_key"),\n partition_key_range=check.opt_inst_param(\n partition_key_range, "partition_key_range", PartitionKeyRange\n ),\n mapping_key=check.opt_str_param(mapping_key, "mapping_key"),\n assets_def=check.opt_inst_param(_assets_def, "_assets_def", AssetsDefinition),\n )
\n\n\n
[docs]def build_asset_context(\n resources: Optional[Mapping[str, Any]] = None,\n resources_config: Optional[Mapping[str, Any]] = None,\n asset_config: Optional[Mapping[str, Any]] = None,\n instance: Optional[DagsterInstance] = None,\n partition_key: Optional[str] = None,\n partition_key_range: Optional[PartitionKeyRange] = None,\n):\n """Builds asset execution context from provided parameters.\n\n ``build_asset_context`` can be used as either a function or context manager. If there is a\n provided resource that is a context manager, then ``build_asset_context`` must be used as a\n context manager. This function can be used to provide the context argument when directly\n invoking an asset.\n\n Args:\n resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be\n either values or resource definitions.\n resources_config (Optional[Mapping[str, Any]]): The config to provide to the resources.\n asset_config (Optional[Mapping[str, Any]]): The config to provide to the asset.\n instance (Optional[DagsterInstance]): The dagster instance configured for the context.\n Defaults to DagsterInstance.ephemeral().\n partition_key (Optional[str]): String value representing partition key to execute with.\n partition_key_range (Optional[PartitionKeyRange]): Partition key range to execute with.\n\n Examples:\n .. code-block:: python\n\n context = build_asset_context()\n asset_to_invoke(context)\n\n with build_asset_context(resources={"foo": context_manager_resource}) as context:\n asset_to_invoke(context)\n """\n return build_op_context(\n op_config=asset_config,\n resources=resources,\n resources_config=resources_config,\n partition_key=partition_key,\n partition_key_range=partition_key_range,\n instance=instance,\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/invocation", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.invocation"}, "logger": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.logger

\nfrom typing import Any, Optional\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.logger_definition import LoggerDefinition\nfrom dagster._core.errors import DagsterInvariantViolationError\n\nfrom .output import RUN_ID_PLACEHOLDER\n\n\n
[docs]class InitLoggerContext:\n """The context object available as the argument to the initialization function of a :py:class:`dagster.LoggerDefinition`.\n\n Users should not instantiate this object directly. To construct an\n `InitLoggerContext` for testing purposes, use :py:func:`dagster.\n build_init_logger_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import logger, InitLoggerContext\n\n @logger\n def hello_world(init_context: InitLoggerContext):\n ...\n\n """\n\n def __init__(\n self,\n logger_config: Any,\n logger_def: Optional[LoggerDefinition] = None,\n job_def: Optional[JobDefinition] = None,\n run_id: Optional[str] = None,\n ):\n self._logger_config = logger_config\n self._job_def = check.opt_inst_param(job_def, "job_def", JobDefinition)\n self._logger_def = check.opt_inst_param(logger_def, "logger_def", LoggerDefinition)\n self._run_id = check.opt_str_param(run_id, "run_id")\n\n @public\n @property\n def logger_config(self) -> Any:\n """The configuration data provided by the run config. The\n schema for this data is defined by ``config_schema`` on the :py:class:`LoggerDefinition`.\n """\n return self._logger_config\n\n @property\n def job_def(self) -> Optional[JobDefinition]:\n """The job definition currently being executed."""\n return self._job_def\n\n @public\n @property\n def logger_def(self) -> Optional[LoggerDefinition]:\n """The logger definition for the logger being constructed."""\n return self._logger_def\n\n @public\n @property\n def run_id(self) -> Optional[str]:\n """The ID for this run of the job."""\n return self._run_id
\n\n\nclass UnboundInitLoggerContext(InitLoggerContext):\n """Logger initialization context outputted by ``build_init_logger_context``.\n\n Represents a context whose config has not yet been validated against a logger definition, hence\n the inability to access the `logger_def` attribute. When an instance of\n ``UnboundInitLoggerContext`` is passed to ``LoggerDefinition.initialize``, config is validated,\n and it is subsumed into an `InitLoggerContext`, which contains the logger_def validated against.\n """\n\n def __init__(self, logger_config: Any, job_def: Optional[JobDefinition]):\n super(UnboundInitLoggerContext, self).__init__(\n logger_config, logger_def=None, job_def=job_def, run_id=None\n )\n\n @property\n def logger_def(self) -> LoggerDefinition:\n raise DagsterInvariantViolationError(\n "UnboundInitLoggerContext has not been validated against a logger definition."\n )\n\n @property\n def run_id(self) -> Optional[str]:\n return RUN_ID_PLACEHOLDER\n
", "current_page_name": "_modules/dagster/_core/execution/context/logger", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.logger"}, "output": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.output

\nimport warnings\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    ContextManager,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.asset_layer import AssetOutputInfo\nfrom dagster._core.definitions.events import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    CoercibleToAssetKey,\n)\nfrom dagster._core.definitions.metadata import (\n    ArbitraryMetadataMapping,\n    MetadataValue,\n    RawMetadataValue,\n)\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.errors import DagsterInvalidMetadata, DagsterInvariantViolationError\nfrom dagster._core.execution.plan.utils import build_resources_for_manager\n\nif TYPE_CHECKING:\n    from dagster._core.definitions import JobDefinition, PartitionsDefinition\n    from dagster._core.definitions.op_definition import OpDefinition\n    from dagster._core.definitions.resource_definition import Resources\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import StepExecutionContext\n    from dagster._core.execution.plan.outputs import StepOutputHandle\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.log_manager import DagsterLogManager\n    from dagster._core.system_config.objects import ResolvedRunConfig\n    from dagster._core.types.dagster_type import DagsterType\n\nRUN_ID_PLACEHOLDER = "__EPHEMERAL_RUN_ID"\n\n\n
[docs]class OutputContext:\n """The context object that is available to the `handle_output` method of an :py:class:`IOManager`.\n\n Users should not instantiate this object directly. To construct an\n `OutputContext` for testing an IO Manager's `handle_output` method, use\n :py:func:`dagster.build_output_context`.\n\n Example:\n .. code-block:: python\n\n from dagster import IOManager, OutputContext\n\n class MyIOManager(IOManager):\n def handle_output(self, context: OutputContext, obj):\n ...\n """\n\n _step_key: Optional[str]\n _name: Optional[str]\n _job_name: Optional[str]\n _run_id: Optional[str]\n _metadata: ArbitraryMetadataMapping\n _user_generated_metadata: Mapping[str, MetadataValue]\n _mapping_key: Optional[str]\n _config: object\n _op_def: Optional["OpDefinition"]\n _dagster_type: Optional["DagsterType"]\n _log: Optional["DagsterLogManager"]\n _version: Optional[str]\n _resource_config: Optional[Mapping[str, object]]\n _step_context: Optional["StepExecutionContext"]\n _asset_info: Optional[AssetOutputInfo]\n _warn_on_step_context_use: bool\n _resources: Optional["Resources"]\n _resources_cm: Optional[ContextManager["Resources"]]\n _resources_contain_cm: Optional[bool]\n _cm_scope_entered: Optional[bool]\n _events: List["DagsterEvent"]\n _user_events: List[Union[AssetMaterialization, AssetObservation]]\n\n def __init__(\n self,\n step_key: Optional[str] = None,\n name: Optional[str] = None,\n job_name: Optional[str] = None,\n run_id: Optional[str] = None,\n metadata: Optional[ArbitraryMetadataMapping] = None,\n mapping_key: Optional[str] = None,\n config: object = None,\n dagster_type: Optional["DagsterType"] = None,\n log_manager: Optional["DagsterLogManager"] = None,\n version: Optional[str] = None,\n resource_config: Optional[Mapping[str, object]] = None,\n resources: Optional[Union["Resources", Mapping[str, object]]] = None,\n step_context: Optional["StepExecutionContext"] = None,\n op_def: Optional["OpDefinition"] = None,\n asset_info: Optional[AssetOutputInfo] = None,\n warn_on_step_context_use: bool = False,\n partition_key: Optional[str] = None,\n ):\n from dagster._core.definitions.resource_definition import IContainsGenerator, Resources\n from dagster._core.execution.build_resources import build_resources\n\n self._step_key = step_key\n self._name = name\n self._job_name = job_name\n self._run_id = run_id\n self._metadata = metadata or {}\n self._mapping_key = mapping_key\n self._config = config\n self._op_def = op_def\n self._dagster_type = dagster_type\n self._log = log_manager\n self._version = version\n self._resource_config = resource_config\n self._step_context = step_context\n self._asset_info = asset_info\n self._warn_on_step_context_use = warn_on_step_context_use\n if self._step_context and self._step_context.has_partition_key:\n self._partition_key: Optional[str] = self._step_context.partition_key\n else:\n self._partition_key = partition_key\n\n if isinstance(resources, Resources):\n self._resources_cm = None\n self._resources = resources\n else:\n self._resources_cm = build_resources(\n check.opt_mapping_param(resources, "resources", key_type=str)\n )\n self._resources = self._resources_cm.__enter__()\n self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)\n self._cm_scope_entered = False\n\n self._events = []\n self._user_events = []\n self._user_generated_metadata = {}\n\n def __enter__(self):\n if self._resources_cm:\n self._cm_scope_entered = True\n return self\n\n def __exit__(self, *exc):\n if self._resources_cm:\n self._resources_cm.__exit__(*exc)\n\n def __del__(self):\n if (\n hasattr(self, "_resources_cm")\n and self._resources_cm\n and self._resources_contain_cm\n and not self._cm_scope_entered\n ):\n self._resources_cm.__exit__(None, None, None)\n\n @public\n @property\n def step_key(self) -> str:\n """The step_key for the compute step that produced the output."""\n if self._step_key is None:\n raise DagsterInvariantViolationError(\n "Attempting to access step_key, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._step_key\n\n @public\n @property\n def name(self) -> str:\n """The name of the output that produced the output."""\n if self._name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access name, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._name\n\n @property\n def job_name(self) -> str:\n if self._job_name is None:\n raise DagsterInvariantViolationError(\n "Attempting to access pipeline_name, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._job_name\n\n @public\n @property\n def run_id(self) -> str:\n """The id of the run that produced the output."""\n if self._run_id is None:\n raise DagsterInvariantViolationError(\n "Attempting to access run_id, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._run_id\n\n @public\n @property\n def metadata(self) -> Optional[ArbitraryMetadataMapping]:\n """A dict of the metadata that is assigned to the OutputDefinition that produced\n the output.\n """\n return self._metadata\n\n @public\n @property\n def mapping_key(self) -> Optional[str]:\n """The key that identifies a unique mapped output. None for regular outputs."""\n return self._mapping_key\n\n @public\n @property\n def config(self) -> Any:\n """The configuration for the output."""\n return self._config\n\n @public\n @property\n def op_def(self) -> "OpDefinition":\n """The definition of the op that produced the output."""\n from dagster._core.definitions import OpDefinition\n\n if self._op_def is None:\n raise DagsterInvariantViolationError(\n "Attempting to access op_def, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return cast(OpDefinition, self._op_def)\n\n @public\n @property\n def dagster_type(self) -> "DagsterType":\n """The type of this output."""\n if self._dagster_type is None:\n raise DagsterInvariantViolationError(\n "Attempting to access dagster_type, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._dagster_type\n\n @public\n @property\n def log(self) -> "DagsterLogManager":\n """The log manager to use for this output."""\n if self._log is None:\n raise DagsterInvariantViolationError(\n "Attempting to access log, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._log\n\n @public\n @property\n def version(self) -> Optional[str]:\n """(Experimental) The version of the output."""\n return self._version\n\n @public\n @property\n def resource_config(self) -> Optional[Mapping[str, object]]:\n """The config associated with the resource that initializes the InputManager."""\n return self._resource_config\n\n @public\n @property\n def resources(self) -> Any:\n """The resources required by the output manager, specified by the `required_resource_keys`\n parameter.\n """\n if self._resources is None:\n raise DagsterInvariantViolationError(\n "Attempting to access resources, "\n "but it was not provided when constructing the OutputContext"\n )\n\n if self._resources_cm and self._resources_contain_cm and not self._cm_scope_entered:\n raise DagsterInvariantViolationError(\n "At least one provided resource is a generator, but attempting to access "\n "resources outside of context manager scope. You can use the following syntax to "\n "open a context manager: `with build_output_context(...) as context:`"\n )\n return self._resources\n\n @property\n def asset_info(self) -> Optional[AssetOutputInfo]:\n """(Experimental) Asset info corresponding to the output."""\n return self._asset_info\n\n @public\n @property\n def has_asset_key(self) -> bool:\n """Returns True if an asset is being stored, otherwise returns False. A return value of False\n indicates that an output from an op is being stored.\n """\n return self._asset_info is not None\n\n @public\n @property\n def asset_key(self) -> AssetKey:\n """The ``AssetKey`` of the asset that is being stored as an output."""\n if self._asset_info is None:\n raise DagsterInvariantViolationError(\n "Attempting to access asset_key, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._asset_info.key\n\n @public\n @property\n def asset_partitions_def(self) -> "PartitionsDefinition":\n """The PartitionsDefinition on the asset corresponding to this output."""\n asset_key = self.asset_key\n result = self.step_context.job_def.asset_layer.partitions_def_for_asset(asset_key)\n if result is None:\n raise DagsterInvariantViolationError(\n f"Attempting to access partitions def for asset {asset_key}, but it is not"\n " partitioned"\n )\n\n return result\n\n @property\n def step_context(self) -> "StepExecutionContext":\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.step_context"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n if self._step_context is None:\n raise DagsterInvariantViolationError(\n "Attempting to access step_context, "\n "but it was not provided when constructing the OutputContext"\n )\n\n return self._step_context\n\n @public\n @property\n def has_partition_key(self) -> bool:\n """Whether the current run is a partitioned run."""\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.has_partition_key"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self._partition_key is not None\n\n @public\n @property\n def partition_key(self) -> str:\n """The partition key for the current run.\n\n Raises an error if the current run is not a partitioned run.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.partition_key"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n if self._partition_key is None:\n check.failed(\n "Tried to access partition_key on a non-partitioned run.",\n )\n\n return self._partition_key\n\n @public\n @property\n def has_asset_partitions(self) -> bool:\n """Returns True if the asset being stored is partitioned."""\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.has_asset_partitions"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n if self._step_context is not None:\n return self._step_context.has_asset_partitions_for_output(self.name)\n else:\n return False\n\n @public\n @property\n def asset_partition_key(self) -> str:\n """The partition key for output asset.\n\n Raises an error if the output asset has no partitioning, or if the run covers a partition\n range for the output asset.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partition_key"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.step_context.asset_partition_key_for_output(self.name)\n\n @public\n @property\n def asset_partition_key_range(self) -> PartitionKeyRange:\n """The partition key range for output asset.\n\n Raises an error if the output asset has no partitioning.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partition_key_range"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.step_context.asset_partition_key_range_for_output(self.name)\n\n @public\n @property\n def asset_partition_keys(self) -> Sequence[str]:\n """The partition keys for the output asset.\n\n Raises an error if the output asset has no partitioning.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partition_keys"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.asset_partitions_def.get_partition_keys_in_range(\n self.step_context.asset_partition_key_range_for_output(self.name),\n dynamic_partitions_store=self.step_context.instance,\n )\n\n @public\n @property\n def asset_partitions_time_window(self) -> TimeWindow:\n """The time window for the partitions of the output asset.\n\n Raises an error if either of the following are true:\n - The output asset has no partitioning.\n - The output asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n """\n if self._warn_on_step_context_use:\n warnings.warn(\n "You are using InputContext.upstream_output.asset_partitions_time_window"\n "This use on upstream_output is deprecated and will fail in the future"\n "Try to obtain what you need directly from InputContext"\n "For more details: https://github.com/dagster-io/dagster/issues/7900"\n )\n\n return self.step_context.asset_partitions_time_window_for_output(self.name)\n\n def get_run_scoped_output_identifier(self) -> Sequence[str]:\n """Utility method to get a collection of identifiers that as a whole represent a unique\n step output.\n\n The unique identifier collection consists of\n\n - ``run_id``: the id of the run which generates the output.\n Note: This method also handles the re-execution memoization logic. If the step that\n generates the output is skipped in the re-execution, the ``run_id`` will be the id\n of its parent run.\n - ``step_key``: the key for a compute step.\n - ``name``: the name of the output. (default: 'result').\n\n Returns:\n Sequence[str, ...]: A list of identifiers, i.e. run id, step key, and output name\n """\n warnings.warn(\n "`OutputContext.get_run_scoped_output_identifier` is deprecated. Use "\n "`OutputContext.get_identifier` instead."\n )\n # if run_id is None and this is a re-execution, it means we failed to find its source run id\n check.invariant(\n self.run_id is not None,\n "Unable to find the run scoped output identifier: run_id is None on OutputContext.",\n )\n check.invariant(\n self.step_key is not None,\n "Unable to find the run scoped output identifier: step_key is None on OutputContext.",\n )\n check.invariant(\n self.name is not None,\n "Unable to find the run scoped output identifier: name is None on OutputContext.",\n )\n run_id = cast(str, self.run_id)\n step_key = cast(str, self.step_key)\n name = cast(str, self.name)\n\n if self.mapping_key:\n return [run_id, step_key, name, self.mapping_key]\n\n return [run_id, step_key, name]\n\n
[docs] @public\n def get_identifier(self) -> Sequence[str]:\n """Utility method to get a collection of identifiers that as a whole represent a unique\n step output.\n\n If not using memoization, the unique identifier collection consists of\n\n - ``run_id``: the id of the run which generates the output.\n Note: This method also handles the re-execution memoization logic. If the step that\n generates the output is skipped in the re-execution, the ``run_id`` will be the id\n of its parent run.\n - ``step_key``: the key for a compute step.\n - ``name``: the name of the output. (default: 'result').\n\n If using memoization, the ``version`` corresponding to the step output is used in place of\n the ``run_id``.\n\n Returns:\n Sequence[str, ...]: A list of identifiers, i.e. (run_id or version), step_key, and output_name\n """\n version = self.version\n step_key = self.step_key\n name = self.name\n if version is not None:\n check.invariant(\n self.mapping_key is None,\n f"Mapping key and version both provided for output '{name}' of step"\n f" '{step_key}'. Dynamic mapping is not supported when using versioning.",\n )\n identifier = ["versioned_outputs", version, step_key, name]\n else:\n run_id = self.run_id\n identifier = [run_id, step_key, name]\n if self.mapping_key:\n identifier.append(self.mapping_key)\n\n return identifier
\n\n def get_output_identifier(self) -> Sequence[str]:\n warnings.warn(\n "`OutputContext.get_output_identifier` is deprecated. Use "\n "`OutputContext.get_identifier` instead."\n )\n\n return self.get_identifier()\n\n
[docs] @public\n def get_asset_identifier(self) -> Sequence[str]:\n """The sequence of strings making up the AssetKey for the asset being stored as an output.\n If the asset is partitioned, the identifier contains the partition key as the final element in the\n sequence. For example, for the asset key ``AssetKey(["foo", "bar", "baz"])`` materialized with\n partition key "2023-06-01", ``get_asset_identifier`` will return ``["foo", "bar", "baz", "2023-06-01"]``.\n """\n if self.asset_key is not None:\n if self.has_asset_partitions:\n return [*self.asset_key.path, self.asset_partition_key]\n else:\n return self.asset_key.path\n else:\n check.failed("Can't get asset output identifier for an output with no asset key")
\n\n def get_asset_output_identifier(self) -> Sequence[str]:\n warnings.warn(\n "`OutputContext.get_asset_output_identifier` is deprecated. Use "\n "`OutputContext.get_asset_identifier` instead."\n )\n\n return self.get_asset_identifier()\n\n
[docs] @public\n def log_event(self, event: Union[AssetObservation, AssetMaterialization]) -> None:\n """Log an AssetMaterialization or AssetObservation from within the body of an io manager's `handle_output` method.\n\n Events logged with this method will appear in the event log.\n\n Args:\n event (Union[AssetMaterialization, AssetObservation]): The event to log.\n\n Examples:\n .. code-block:: python\n\n from dagster import IOManager, AssetMaterialization\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n context.log_event(AssetMaterialization("foo"))\n """\n from dagster._core.events import DagsterEvent\n\n if isinstance(event, (AssetMaterialization)):\n if self._step_context:\n self._events.append(DagsterEvent.asset_materialization(self._step_context, event))\n self._user_events.append(event)\n elif isinstance(event, AssetObservation):\n if self._step_context:\n self._events.append(DagsterEvent.asset_observation(self._step_context, event))\n self._user_events.append(event)\n else:\n check.failed(f"Unexpected event {event}")
\n\n def consume_events(self) -> Iterator["DagsterEvent"]:\n """Pops and yields all user-generated events that have been recorded from this context.\n\n If consume_events has not yet been called, this will yield all logged events since the call to `handle_output`. If consume_events has been called, it will yield all events since the last time consume_events was called. Designed for internal use. Users should never need to invoke this method.\n """\n events = self._events\n self._events = []\n yield from events\n\n def get_logged_events(\n self,\n ) -> Sequence[Union[AssetMaterialization, AssetObservation]]:\n """Retrieve the list of user-generated events that were logged via the context.\n\n\n User-generated events that were yielded will not appear in this list.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import IOManager, build_output_context, AssetMaterialization\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n ...\n\n def test_handle_output():\n mgr = MyIOManager()\n context = build_output_context()\n mgr.handle_output(context)\n all_user_events = context.get_logged_events()\n materializations = [event for event in all_user_events if isinstance(event, AssetMaterialization)]\n ...\n """\n return self._user_events\n\n
[docs] @public\n def add_output_metadata(self, metadata: Mapping[str, RawMetadataValue]) -> None:\n """Add a dictionary of metadata to the handled output.\n\n Metadata entries added will show up in the HANDLED_OUTPUT and ASSET_MATERIALIZATION events for the run.\n\n Args:\n metadata (Mapping[str, RawMetadataValue]): A metadata dictionary to log\n\n Examples:\n .. code-block:: python\n\n from dagster import IOManager\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n context.add_output_metadata({"foo": "bar"})\n """\n from dagster._core.definitions.metadata import normalize_metadata\n\n overlapping_labels = set(self._user_generated_metadata.keys()) & metadata.keys()\n if overlapping_labels:\n raise DagsterInvalidMetadata(\n f"Tried to add metadata for key(s) that already have metadata: {overlapping_labels}"\n )\n\n self._user_generated_metadata = {\n **self._user_generated_metadata,\n **normalize_metadata(metadata),\n }
\n\n def get_logged_metadata(\n self,\n ) -> Mapping[str, MetadataValue]:\n """Get the mapping of metadata entries that have been logged for use with this output."""\n return self._user_generated_metadata\n\n def consume_logged_metadata(\n self,\n ) -> Mapping[str, MetadataValue]:\n """Pops and yields all user-generated metadata entries that have been recorded from this context.\n\n If consume_logged_metadata has not yet been called, this will yield all logged events since\n the call to `handle_output`. If consume_logged_metadata has been called, it will yield all\n events since the last time consume_logged_metadata_entries was called. Designed for internal\n use. Users should never need to invoke this method.\n """\n result = self._user_generated_metadata\n self._user_generated_metadata = {}\n return result or {}
\n\n\ndef get_output_context(\n execution_plan: "ExecutionPlan",\n job_def: "JobDefinition",\n resolved_run_config: "ResolvedRunConfig",\n step_output_handle: "StepOutputHandle",\n run_id: Optional[str],\n log_manager: Optional["DagsterLogManager"],\n step_context: Optional["StepExecutionContext"],\n resources: Optional["Resources"],\n version: Optional[str],\n warn_on_step_context_use: bool = False,\n) -> "OutputContext":\n """Args:\n run_id (str): The run ID of the run that produced the output, not necessarily the run that\n the context will be used in.\n """\n step = execution_plan.get_step_by_key(step_output_handle.step_key)\n # get config\n op_config = resolved_run_config.ops[step.node_handle.to_string()]\n outputs_config = op_config.outputs\n\n if outputs_config:\n output_config = outputs_config.get_output_manager_config(step_output_handle.output_name)\n else:\n output_config = None\n\n step_output = execution_plan.get_step_output(step_output_handle)\n output_def = job_def.get_node(step_output.node_handle).output_def_named(step_output.name)\n\n io_manager_key = output_def.io_manager_key\n resource_config = resolved_run_config.resources[io_manager_key].config\n\n node_handle = execution_plan.get_step_by_key(step.key).node_handle\n asset_info = job_def.asset_layer.asset_info_for_output(\n node_handle=node_handle, output_name=step_output.name\n )\n if asset_info is not None:\n metadata = job_def.asset_layer.metadata_for_asset(asset_info.key) or output_def.metadata\n else:\n metadata = output_def.metadata\n\n if step_context:\n check.invariant(\n not resources,\n "Expected either resources or step context to be set, but "\n "received both. If step context is provided, resources for IO manager will be "\n "retrieved off of that.",\n )\n resources = build_resources_for_manager(io_manager_key, step_context)\n\n return OutputContext(\n step_key=step_output_handle.step_key,\n name=step_output_handle.output_name,\n job_name=job_def.name,\n run_id=run_id,\n metadata=metadata,\n mapping_key=step_output_handle.mapping_key,\n config=output_config,\n op_def=job_def.get_node(step.node_handle).definition, # type: ignore # (should be OpDefinition not NodeDefinition)\n dagster_type=output_def.dagster_type,\n log_manager=log_manager,\n version=version,\n step_context=step_context,\n resource_config=resource_config,\n resources=resources,\n asset_info=asset_info,\n warn_on_step_context_use=warn_on_step_context_use,\n )\n\n\ndef step_output_version(\n job_def: "JobDefinition",\n execution_plan: "ExecutionPlan",\n resolved_run_config: "ResolvedRunConfig",\n step_output_handle: "StepOutputHandle",\n) -> Optional[str]:\n from dagster._core.execution.resolve_versions import resolve_step_output_versions\n\n step_output_versions = resolve_step_output_versions(\n job_def, execution_plan, resolved_run_config\n )\n return (\n step_output_versions[step_output_handle]\n if step_output_handle in step_output_versions\n else None\n )\n\n\n
[docs]def build_output_context(\n step_key: Optional[str] = None,\n name: Optional[str] = None,\n metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n run_id: Optional[str] = None,\n mapping_key: Optional[str] = None,\n config: Optional[Any] = None,\n dagster_type: Optional["DagsterType"] = None,\n version: Optional[str] = None,\n resource_config: Optional[Mapping[str, object]] = None,\n resources: Optional[Mapping[str, object]] = None,\n op_def: Optional["OpDefinition"] = None,\n asset_key: Optional[CoercibleToAssetKey] = None,\n partition_key: Optional[str] = None,\n) -> "OutputContext":\n """Builds output context from provided parameters.\n\n ``build_output_context`` can be used as either a function, or a context manager. If resources\n that are also context managers are provided, then ``build_output_context`` must be used as a\n context manager.\n\n Args:\n step_key (Optional[str]): The step_key for the compute step that produced the output.\n name (Optional[str]): The name of the output that produced the output.\n metadata (Optional[Mapping[str, Any]]): A dict of the metadata that is assigned to the\n OutputDefinition that produced the output.\n mapping_key (Optional[str]): The key that identifies a unique mapped output. None for regular outputs.\n config (Optional[Any]): The configuration for the output.\n dagster_type (Optional[DagsterType]): The type of this output.\n version (Optional[str]): (Experimental) The version of the output.\n resource_config (Optional[Mapping[str, Any]]): The resource config to make available from the\n input context. This usually corresponds to the config provided to the resource that\n loads the output manager.\n resources (Optional[Resources]): The resources to make available from the context.\n For a given key, you can provide either an actual instance of an object, or a resource\n definition.\n op_def (Optional[OpDefinition]): The definition of the op that produced the output.\n asset_key: Optional[Union[AssetKey, Sequence[str], str]]: The asset key corresponding to the\n output.\n partition_key: Optional[str]: String value representing partition key to execute with.\n\n Examples:\n .. code-block:: python\n\n build_output_context()\n\n with build_output_context(resources={"foo": context_manager_resource}) as context:\n do_something\n\n """\n from dagster._core.definitions import OpDefinition\n from dagster._core.execution.context_creation_job import initialize_console_manager\n from dagster._core.types.dagster_type import DagsterType\n\n step_key = check.opt_str_param(step_key, "step_key")\n name = check.opt_str_param(name, "name")\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n run_id = check.opt_str_param(run_id, "run_id", default=RUN_ID_PLACEHOLDER)\n mapping_key = check.opt_str_param(mapping_key, "mapping_key")\n dagster_type = check.opt_inst_param(dagster_type, "dagster_type", DagsterType)\n version = check.opt_str_param(version, "version")\n resource_config = check.opt_mapping_param(resource_config, "resource_config", key_type=str)\n resources = check.opt_mapping_param(resources, "resources", key_type=str)\n op_def = check.opt_inst_param(op_def, "op_def", OpDefinition)\n asset_key = AssetKey.from_coercible(asset_key) if asset_key else None\n partition_key = check.opt_str_param(partition_key, "partition_key")\n\n return OutputContext(\n step_key=step_key,\n name=name,\n job_name=None,\n run_id=run_id,\n metadata=metadata,\n mapping_key=mapping_key,\n config=config,\n dagster_type=dagster_type,\n log_manager=initialize_console_manager(None),\n version=version,\n resource_config=resource_config,\n resources=resources,\n step_context=None,\n op_def=op_def,\n asset_info=AssetOutputInfo(key=asset_key) if asset_key else None,\n partition_key=partition_key,\n )
\n
", "current_page_name": "_modules/dagster/_core/execution/context/output", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.output"}, "system": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.context.system

\n"""This module contains the execution context objects that are internal to the system.\nNot every property on these should be exposed to random Jane or Joe dagster user\nso we have a different layer of objects that encode the explicit public API\nin the user_context module.\n"""\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom hashlib import sha256\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.data_version import (\n    DATA_VERSION_TAG,\n    SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD,\n    extract_data_version_from_entry,\n)\nfrom dagster._core.definitions.dependency import OpNode\nfrom dagster._core.definitions.events import AssetKey, AssetLineageInfo\nfrom dagster._core.definitions.hook_definition import HookDefinition\nfrom dagster._core.definitions.job_base import IJob\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.partition import PartitionsDefinition, PartitionsSubset\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.partition_mapping import (\n    PartitionMapping,\n    infer_partition_mapping,\n)\nfrom dagster._core.definitions.policy import RetryPolicy\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.resource_definition import ScopedResourcesBuilder\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    TimeWindowPartitionsDefinition,\n    has_one_dimension_time_window_partitioning,\n)\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.plan.handle import ResolvedFromDynamicStepHandle, StepHandle\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._core.execution.plan.step import ExecutionStep\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.io_manager import IOManager\nfrom dagster._core.storage.tags import (\n    ASSET_PARTITION_RANGE_END_TAG,\n    ASSET_PARTITION_RANGE_START_TAG,\n    MULTIDIMENSIONAL_PARTITION_PREFIX,\n    PARTITION_NAME_TAG,\n)\nfrom dagster._core.system_config.objects import ResolvedRunConfig\nfrom dagster._core.types.dagster_type import DagsterType\n\nfrom .input import InputContext\nfrom .output import OutputContext, get_output_context\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.data_version import (\n        DataVersion,\n    )\n    from dagster._core.definitions.dependency import NodeHandle\n    from dagster._core.definitions.resource_definition import Resources\n    from dagster._core.event_api import EventLogRecord\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.execution.plan.state import KnownExecutionState\n    from dagster._core.instance import DagsterInstance\n\n    from .hook import HookContext\n\n\ndef is_iterable(obj: Any) -> bool:\n    try:\n        iter(obj)\n    except:\n        return False\n    return True\n\n\nclass IPlanContext(ABC):\n    """Context interface to represent run information that does not require access to user code.\n\n    The information available via this interface is accessible to the system throughout a run.\n    """\n\n    @property\n    @abstractmethod\n    def plan_data(self) -> "PlanData":\n        raise NotImplementedError()\n\n    @property\n    def job(self) -> IJob:\n        return self.plan_data.job\n\n    @property\n    def dagster_run(self) -> DagsterRun:\n        return self.plan_data.dagster_run\n\n    @property\n    def run_id(self) -> str:\n        return self.dagster_run.run_id\n\n    @property\n    def run_config(self) -> Mapping[str, object]:\n        return self.dagster_run.run_config\n\n    @property\n    def job_name(self) -> str:\n        return self.dagster_run.job_name\n\n    @property\n    def instance(self) -> "DagsterInstance":\n        return self.plan_data.instance\n\n    @property\n    def raise_on_error(self) -> bool:\n        return self.plan_data.raise_on_error\n\n    @property\n    def retry_mode(self) -> RetryMode:\n        return self.plan_data.retry_mode\n\n    @property\n    def execution_plan(self) -> "ExecutionPlan":\n        return self.plan_data.execution_plan\n\n    @property\n    @abstractmethod\n    def output_capture(self) -> Optional[Mapping[StepOutputHandle, Any]]:\n        raise NotImplementedError()\n\n    @property\n    def log(self) -> DagsterLogManager:\n        raise NotImplementedError()\n\n    @property\n    def logging_tags(self) -> Mapping[str, str]:\n        return self.log.logging_metadata.all_tags()\n\n    @property\n    def event_tags(self) -> Mapping[str, str]:\n        return self.log.logging_metadata.event_tags()\n\n    def has_tag(self, key: str) -> bool:\n        check.str_param(key, "key")\n        return key in self.dagster_run.tags\n\n    def get_tag(self, key: str) -> Optional[str]:\n        check.str_param(key, "key")\n        return self.dagster_run.tags.get(key)\n\n    @property\n    def run_tags(self) -> Mapping[str, str]:\n        return self.dagster_run.tags\n\n\nclass PlanData(NamedTuple):\n    """The data about a run that is available during both orchestration and execution.\n\n    This object does not contain any information that requires access to user code, such as the\n    pipeline definition and resources.\n    """\n\n    job: IJob\n    dagster_run: DagsterRun\n    instance: "DagsterInstance"\n    execution_plan: "ExecutionPlan"\n    raise_on_error: bool = False\n    retry_mode: RetryMode = RetryMode.DISABLED\n\n\nclass ExecutionData(NamedTuple):\n    """The data that is available to the system during execution.\n\n    This object contains information that requires access to user code, such as the pipeline\n    definition and resources.\n    """\n\n    scoped_resources_builder: ScopedResourcesBuilder\n    resolved_run_config: ResolvedRunConfig\n    job_def: JobDefinition\n\n\nclass IStepContext(IPlanContext):\n    """Interface to represent data to be available during either step orchestration or execution."""\n\n    @property\n    @abstractmethod\n    def step(self) -> ExecutionStep:\n        raise NotImplementedError()\n\n    @property\n    @abstractmethod\n    def node_handle(self) -> "NodeHandle":\n        raise NotImplementedError()\n\n\nclass PlanOrchestrationContext(IPlanContext):\n    """Context for the orchestration of a run.\n\n    This context assumes inability to run user code directly.\n    """\n\n    def __init__(\n        self,\n        plan_data: PlanData,\n        log_manager: DagsterLogManager,\n        executor: Executor,\n        output_capture: Optional[Dict[StepOutputHandle, Any]],\n        resume_from_failure: bool = False,\n    ):\n        self._plan_data = plan_data\n        self._log_manager = log_manager\n        self._executor = executor\n        self._output_capture = output_capture\n        self._resume_from_failure = resume_from_failure\n\n    @property\n    def plan_data(self) -> PlanData:\n        return self._plan_data\n\n    @property\n    def reconstructable_job(self) -> ReconstructableJob:\n        if not isinstance(self.job, ReconstructableJob):\n            raise DagsterInvariantViolationError(\n                "reconstructable_pipeline property must be a ReconstructableJob"\n            )\n        return self.job\n\n    @property\n    def log(self) -> DagsterLogManager:\n        return self._log_manager\n\n    @property\n    def executor(self) -> Executor:\n        return self._executor\n\n    @property\n    def output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]:\n        return self._output_capture\n\n    def for_step(self, step: ExecutionStep) -> "IStepContext":\n        return StepOrchestrationContext(\n            plan_data=self.plan_data,\n            log_manager=self._log_manager.with_tags(**step.logging_tags),\n            executor=self.executor,\n            step=step,\n            output_capture=self.output_capture,\n        )\n\n    @property\n    def resume_from_failure(self) -> bool:\n        return self._resume_from_failure\n\n\nclass StepOrchestrationContext(PlanOrchestrationContext, IStepContext):\n    """Context for the orchestration of a step.\n\n    This context assumes inability to run user code directly. Thus, it does not include any resource\n    information.\n    """\n\n    def __init__(\n        self,\n        plan_data: PlanData,\n        log_manager: DagsterLogManager,\n        executor: Executor,\n        step: ExecutionStep,\n        output_capture: Optional[Dict[StepOutputHandle, Any]],\n    ):\n        super(StepOrchestrationContext, self).__init__(\n            plan_data, log_manager, executor, output_capture\n        )\n        self._step = step\n\n    @property\n    def step(self) -> ExecutionStep:\n        return self._step\n\n    @property\n    def node_handle(self) -> "NodeHandle":\n        return self.step.node_handle\n\n\nclass PlanExecutionContext(IPlanContext):\n    """Context for the execution of a plan.\n\n    This context assumes that user code can be run directly, and thus includes resource and\n    information.\n    """\n\n    def __init__(\n        self,\n        plan_data: PlanData,\n        execution_data: ExecutionData,\n        log_manager: DagsterLogManager,\n        output_capture: Optional[Dict[StepOutputHandle, Any]] = None,\n    ):\n        self._plan_data = plan_data\n        self._execution_data = execution_data\n        self._log_manager = log_manager\n        self._output_capture = output_capture\n\n    @property\n    def plan_data(self) -> PlanData:\n        return self._plan_data\n\n    @property\n    def output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]:\n        return self._output_capture\n\n    def for_step(\n        self,\n        step: ExecutionStep,\n        known_state: Optional["KnownExecutionState"] = None,\n    ) -> IStepContext:\n        return StepExecutionContext(\n            plan_data=self.plan_data,\n            execution_data=self._execution_data,\n            log_manager=self._log_manager.with_tags(**step.logging_tags),\n            step=step,\n            output_capture=self.output_capture,\n            known_state=known_state,\n        )\n\n    @property\n    def job_def(self) -> JobDefinition:\n        return self._execution_data.job_def\n\n    @property\n    def resolved_run_config(self) -> ResolvedRunConfig:\n        return self._execution_data.resolved_run_config\n\n    @property\n    def scoped_resources_builder(self) -> ScopedResourcesBuilder:\n        return self._execution_data.scoped_resources_builder\n\n    @property\n    def log(self) -> DagsterLogManager:\n        return self._log_manager\n\n    @property\n    def partitions_def(self) -> Optional[PartitionsDefinition]:\n        from dagster._core.definitions.job_definition import JobDefinition\n\n        job_def = self._execution_data.job_def\n        if not isinstance(job_def, JobDefinition):\n            check.failed(\n                "Can only call 'partitions_def', when using jobs, not legacy pipelines",\n            )\n        partitions_def = job_def.partitions_def\n        return partitions_def\n\n    @property\n    def has_partitions(self) -> bool:\n        tags = self._plan_data.dagster_run.tags\n        return bool(\n            PARTITION_NAME_TAG in tags\n            or any([tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX) for tag in tags.keys()])\n            or (\n                tags.get(ASSET_PARTITION_RANGE_START_TAG)\n                and tags.get(ASSET_PARTITION_RANGE_END_TAG)\n            )\n        )\n\n    @property\n    def partition_key(self) -> str:\n        from dagster._core.definitions.multi_dimensional_partitions import (\n            MultiPartitionsDefinition,\n            get_multipartition_key_from_tags,\n        )\n\n        if not self.has_partitions:\n            raise DagsterInvariantViolationError(\n                "Cannot access partition_key for a non-partitioned run"\n            )\n\n        tags = self._plan_data.dagster_run.tags\n        if any([tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX) for tag in tags.keys()]):\n            return get_multipartition_key_from_tags(tags)\n        elif PARTITION_NAME_TAG in tags:\n            return tags[PARTITION_NAME_TAG]\n        else:\n            range_start = tags[ASSET_PARTITION_RANGE_START_TAG]\n            range_end = tags[ASSET_PARTITION_RANGE_END_TAG]\n\n            if range_start != range_end:\n                raise DagsterInvariantViolationError(\n                    "Cannot access partition_key for a partitioned run with a range of partitions."\n                    " Call partition_key_range instead."\n                )\n            else:\n                if isinstance(self.partitions_def, MultiPartitionsDefinition):\n                    return self.partitions_def.get_partition_key_from_str(cast(str, range_start))\n                return cast(str, range_start)\n\n    @property\n    def asset_partition_key_range(self) -> PartitionKeyRange:\n        from dagster._core.definitions.multi_dimensional_partitions import (\n            MultiPartitionsDefinition,\n            get_multipartition_key_from_tags,\n        )\n\n        if not self.has_partitions:\n            raise DagsterInvariantViolationError(\n                "Cannot access partition_key for a non-partitioned run"\n            )\n\n        tags = self._plan_data.dagster_run.tags\n        if any([tag.startswith(MULTIDIMENSIONAL_PARTITION_PREFIX) for tag in tags.keys()]):\n            multipartition_key = get_multipartition_key_from_tags(tags)\n            return PartitionKeyRange(multipartition_key, multipartition_key)\n        elif PARTITION_NAME_TAG in tags:\n            partition_key = tags[PARTITION_NAME_TAG]\n            return PartitionKeyRange(partition_key, partition_key)\n        else:\n            partition_key_range_start = tags[ASSET_PARTITION_RANGE_START_TAG]\n            if partition_key_range_start is not None:\n                if isinstance(self.partitions_def, MultiPartitionsDefinition):\n                    return PartitionKeyRange(\n                        self.partitions_def.get_partition_key_from_str(partition_key_range_start),\n                        self.partitions_def.get_partition_key_from_str(\n                            tags[ASSET_PARTITION_RANGE_END_TAG]\n                        ),\n                    )\n            return PartitionKeyRange(partition_key_range_start, tags[ASSET_PARTITION_RANGE_END_TAG])\n\n    @property\n    def partition_time_window(self) -> TimeWindow:\n        partitions_def = self.partitions_def\n\n        if partitions_def is None:\n            raise DagsterInvariantViolationError("Partitions definition is not defined")\n\n        if not has_one_dimension_time_window_partitioning(partitions_def=partitions_def):\n            raise DagsterInvariantViolationError(\n                "Expected a TimeWindowPartitionsDefinition or MultiPartitionsDefinition with a"\n                f" single time dimension, but instead found {type(partitions_def)}"\n            )\n\n        if self.has_partition_key:\n            return cast(\n                Union[MultiPartitionsDefinition, TimeWindowPartitionsDefinition], partitions_def\n            ).time_window_for_partition_key(self.partition_key)\n        elif self.has_partition_key_range:\n            partition_key_range = self.asset_partition_key_range\n            partitions_def = cast(\n                Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition], partitions_def\n            )\n            return TimeWindow(\n                partitions_def.time_window_for_partition_key(partition_key_range.start).start,\n                partitions_def.time_window_for_partition_key(partition_key_range.end).end,\n            )\n\n        else:\n            check.failed(\n                "Has a PartitionsDefinition, so should either have a partition key or a partition"\n                " key range"\n            )\n\n    @property\n    def has_partition_key(self) -> bool:\n        return PARTITION_NAME_TAG in self._plan_data.dagster_run.tags\n\n    @property\n    def has_partition_key_range(self) -> bool:\n        return ASSET_PARTITION_RANGE_START_TAG in self._plan_data.dagster_run.tags\n\n    def for_type(self, dagster_type: DagsterType) -> "TypeCheckContext":\n        return TypeCheckContext(\n            self.run_id, self.log, self._execution_data.scoped_resources_builder, dagster_type\n        )\n\n\n@dataclass\nclass InputAssetVersionInfo:\n    # This is the storage id of the last materialization of any partition of an asset. Thus it is\n    # computed the same way for both partitioned and non-partitioned assets.\n    storage_id: int\n\n    # If the input asset is partitioned, this is a hash of the sorted data versions of each dependency\n    # partition. If the input asset is not partitioned, this is the data version of the asset. It\n    # can be none if we are sourcing a materialization from before data versions.\n    data_version: Optional["DataVersion"]\n\n    # This is the run_id on the event that the storage_id references\n    run_id: str\n\n    # This is the timestamp on the event that the storage_id references\n    timestamp: float\n\n\n
[docs]class StepExecutionContext(PlanExecutionContext, IStepContext):\n """Context for the execution of a step. Users should not instantiate this class directly.\n\n This context assumes that user code can be run directly, and thus includes resource and information.\n """\n\n def __init__(\n self,\n plan_data: PlanData,\n execution_data: ExecutionData,\n log_manager: DagsterLogManager,\n step: ExecutionStep,\n output_capture: Optional[Dict[StepOutputHandle, Any]],\n known_state: Optional["KnownExecutionState"],\n ):\n from dagster._core.execution.resources_init import get_required_resource_keys_for_step\n\n super(StepExecutionContext, self).__init__(\n plan_data=plan_data,\n execution_data=execution_data,\n log_manager=log_manager,\n output_capture=output_capture,\n )\n self._step = step\n self._required_resource_keys = get_required_resource_keys_for_step(\n plan_data.job.get_definition(),\n step,\n plan_data.execution_plan,\n )\n self._resources = execution_data.scoped_resources_builder.build(\n self._required_resource_keys\n )\n self._known_state = known_state\n self._input_lineage: List[AssetLineageInfo] = []\n\n resources_iter = cast(Iterable, self._resources)\n\n step_launcher_resources = [\n resource for resource in resources_iter if isinstance(resource, StepLauncher)\n ]\n\n self._step_launcher: Optional[StepLauncher] = None\n if len(step_launcher_resources) > 1:\n raise DagsterInvariantViolationError(\n "Multiple required resources for {described_op} have inherited StepLauncher"\n "There should be at most one step launcher resource per {node_type}.".format(\n described_op=self.describe_op(), node_type=self.op_def.node_type_str\n )\n )\n elif len(step_launcher_resources) == 1:\n self._step_launcher = step_launcher_resources[0]\n\n self._step_exception: Optional[BaseException] = None\n\n self._step_output_capture: Optional[Dict[StepOutputHandle, Any]] = None\n # Enable step output capture if there are any hooks which will receive them.\n # Expect in the future that hooks may control whether or not they get outputs,\n # but for now presence of any will cause output capture.\n if self.job_def.get_all_hooks_for_handle(self.node_handle):\n self._step_output_capture = {}\n\n self._output_metadata: Dict[str, Any] = {}\n self._seen_outputs: Dict[str, Union[str, Set[str]]] = {}\n\n self._input_asset_version_info: Dict[AssetKey, Optional["InputAssetVersionInfo"]] = {}\n self._is_external_input_asset_version_info_loaded = False\n self._data_version_cache: Dict[AssetKey, "DataVersion"] = {}\n\n self._requires_typed_event_stream = False\n self._typed_event_stream_error_message = None\n\n # In this mode no conversion is done on returned values and missing but expected outputs are not\n # allowed.\n @property\n def requires_typed_event_stream(self) -> bool:\n return self._requires_typed_event_stream\n\n @property\n def typed_event_stream_error_message(self) -> Optional[str]:\n return self._typed_event_stream_error_message\n\n # Error message will be appended to the default error message.\n def set_requires_typed_event_stream(self, *, error_message: Optional[str] = None):\n self._requires_typed_event_stream = True\n self._typed_event_stream_error_message = error_message\n\n @property\n def step(self) -> ExecutionStep:\n return self._step\n\n @property\n def node_handle(self) -> "NodeHandle":\n return self.step.node_handle\n\n @property\n def required_resource_keys(self) -> AbstractSet[str]:\n return self._required_resource_keys\n\n @property\n def resources(self) -> "Resources":\n return self._resources\n\n @property\n def step_launcher(self) -> Optional[StepLauncher]:\n return self._step_launcher\n\n @property\n def op_def(self) -> OpDefinition:\n return self.op.definition\n\n @property\n def job_def(self) -> "JobDefinition":\n return self._execution_data.job_def\n\n @property\n def op(self) -> OpNode:\n return self.job_def.get_op(self._step.node_handle)\n\n @property\n def op_retry_policy(self) -> Optional[RetryPolicy]:\n return self.job_def.get_retry_policy_for_handle(self.node_handle)\n\n def describe_op(self) -> str:\n return f'op "{self.node_handle}"'\n\n def get_io_manager(self, step_output_handle: StepOutputHandle) -> IOManager:\n step_output = self.execution_plan.get_step_output(step_output_handle)\n io_manager_key = (\n self.job_def.get_node(step_output.node_handle)\n .output_def_named(step_output.name)\n .io_manager_key\n )\n\n output_manager = getattr(self.resources, io_manager_key)\n return check.inst(output_manager, IOManager)\n\n def get_output_context(self, step_output_handle: StepOutputHandle) -> OutputContext:\n return get_output_context(\n self.execution_plan,\n self.job_def,\n self.resolved_run_config,\n step_output_handle,\n self._get_source_run_id(step_output_handle),\n log_manager=self.log,\n step_context=self,\n resources=None,\n version=self.execution_plan.get_version_for_step_output_handle(step_output_handle),\n )\n\n def for_input_manager(\n self,\n name: str,\n config: Any,\n metadata: Any,\n dagster_type: DagsterType,\n source_handle: Optional[StepOutputHandle] = None,\n resource_config: Any = None,\n resources: Optional["Resources"] = None,\n artificial_output_context: Optional["OutputContext"] = None,\n ) -> InputContext:\n if source_handle and artificial_output_context:\n check.failed("Cannot specify both source_handle and artificial_output_context.")\n\n upstream_output: Optional[OutputContext] = None\n\n if source_handle is not None:\n version = self.execution_plan.get_version_for_step_output_handle(source_handle)\n\n # NOTE: this is using downstream step_context for upstream OutputContext. step_context\n # will be set to None for 0.15 release.\n upstream_output = get_output_context(\n self.execution_plan,\n self.job_def,\n self.resolved_run_config,\n source_handle,\n self._get_source_run_id(source_handle),\n log_manager=self.log,\n step_context=self,\n resources=None,\n version=version,\n warn_on_step_context_use=True,\n )\n else:\n upstream_output = artificial_output_context\n\n asset_key = self.job_def.asset_layer.asset_key_for_input(\n node_handle=self.node_handle, input_name=name\n )\n asset_partitions_subset = (\n self.asset_partitions_subset_for_input(name)\n if self.has_asset_partitions_for_input(name)\n else None\n )\n\n asset_partitions_def = (\n self.job_def.asset_layer.partitions_def_for_asset(asset_key) if asset_key else None\n )\n return InputContext(\n job_name=self.job_def.name,\n name=name,\n op_def=self.op_def,\n config=config,\n metadata=metadata,\n upstream_output=upstream_output,\n dagster_type=dagster_type,\n log_manager=self.log,\n step_context=self,\n resource_config=resource_config,\n resources=resources,\n asset_key=asset_key,\n asset_partitions_subset=asset_partitions_subset,\n asset_partitions_def=asset_partitions_def,\n instance=self.instance,\n )\n\n def for_hook(self, hook_def: HookDefinition) -> "HookContext":\n from .hook import HookContext\n\n return HookContext(self, hook_def)\n\n def get_known_state(self) -> "KnownExecutionState":\n if not self._known_state:\n check.failed(\n "Attempted to access KnownExecutionState but it was not provided at context"\n " creation"\n )\n return self._known_state\n\n def can_load(\n self,\n step_output_handle: StepOutputHandle,\n ) -> bool:\n # can load from upstream in the same run\n if step_output_handle in self.get_known_state().ready_outputs:\n return True\n\n if (\n self._should_load_from_previous_runs(step_output_handle)\n # should and can load from a previous run\n and self._get_source_run_id_from_logs(step_output_handle)\n ):\n return True\n\n return False\n\n def observe_output(self, output_name: str, mapping_key: Optional[str] = None) -> None:\n if mapping_key:\n if output_name not in self._seen_outputs:\n self._seen_outputs[output_name] = set()\n cast(Set[str], self._seen_outputs[output_name]).add(mapping_key)\n else:\n self._seen_outputs[output_name] = "seen"\n\n def has_seen_output(self, output_name: str, mapping_key: Optional[str] = None) -> bool:\n if mapping_key:\n return (\n output_name in self._seen_outputs and mapping_key in self._seen_outputs[output_name]\n )\n return output_name in self._seen_outputs\n\n def add_output_metadata(\n self,\n metadata: Mapping[str, Any],\n output_name: Optional[str] = None,\n mapping_key: Optional[str] = None,\n ) -> None:\n if output_name is None and len(self.op_def.output_defs) == 1:\n output_def = self.op_def.output_defs[0]\n output_name = output_def.name\n elif output_name is None:\n raise DagsterInvariantViolationError(\n "Attempted to log metadata without providing output_name, but multiple outputs"\n " exist. Please provide an output_name to the invocation of"\n " `context.add_output_metadata`."\n )\n else:\n output_def = self.op_def.output_def_named(output_name)\n\n if self.has_seen_output(output_name, mapping_key):\n output_desc = (\n f"output '{output_def.name}'"\n if not mapping_key\n else f"output '{output_def.name}' with mapping_key '{mapping_key}'"\n )\n raise DagsterInvariantViolationError(\n f"In {self.op_def.node_type_str} '{self.op.name}', attempted to log output"\n f" metadata for {output_desc} which has already been yielded. Metadata must be"\n " logged before the output is yielded."\n )\n if output_def.is_dynamic and not mapping_key:\n raise DagsterInvariantViolationError(\n f"In {self.op_def.node_type_str} '{self.op.name}', attempted to log metadata"\n f" for dynamic output '{output_def.name}' without providing a mapping key. When"\n " logging metadata for a dynamic output, it is necessary to provide a mapping key."\n )\n\n if mapping_key:\n if output_name not in self._output_metadata:\n self._output_metadata[output_name] = {}\n if mapping_key in self._output_metadata[output_name]:\n self._output_metadata[output_name][mapping_key].update(metadata)\n else:\n self._output_metadata[output_name][mapping_key] = metadata\n else:\n if output_name in self._output_metadata:\n self._output_metadata[output_name].update(metadata)\n else:\n self._output_metadata[output_name] = metadata\n\n def get_output_metadata(\n self, output_name: str, mapping_key: Optional[str] = None\n ) -> Optional[Mapping[str, Any]]:\n metadata = self._output_metadata.get(output_name)\n if mapping_key and metadata:\n return metadata.get(mapping_key)\n return metadata\n\n def _get_source_run_id_from_logs(self, step_output_handle: StepOutputHandle) -> Optional[str]:\n # walk through event logs to find the right run_id based on the run lineage\n\n parent_state = self.get_known_state().parent_state\n while parent_state:\n # if the parent run has yielded an StepOutput event for the given step output,\n # we find the source run id\n if step_output_handle in parent_state.produced_outputs:\n return parent_state.run_id\n\n # else, keep looking backwards\n parent_state = parent_state.get_parent_state()\n\n # When a fixed path is provided via io manager, it's able to run step subset using an execution\n # plan when the ascendant outputs were not previously created by dagster-controlled\n # computations. for example, in backfills, with fixed path io manager, we allow users to\n # "re-execute" runs with steps where the outputs weren't previously stored by dagster.\n\n # Warn about this special case because it will also reach here when all previous runs have\n # skipped yielding this output. From the logs, we have no easy way to differentiate the fixed\n # path case and the skipping case, until we record the skipping info in KnownExecutionState,\n # i.e. resolve https://github.com/dagster-io/dagster/issues/3511\n self.log.warning(\n f"No previously stored outputs found for source {step_output_handle}. "\n "This is either because you are using an IO Manager that does not depend on run ID, "\n "or because all the previous runs have skipped the output in conditional execution."\n )\n return None\n\n def _should_load_from_previous_runs(self, step_output_handle: StepOutputHandle) -> bool:\n # should not load if not a re-execution\n if self.dagster_run.parent_run_id is None:\n return False\n # should not load if re-executing the entire pipeline\n if self.dagster_run.step_keys_to_execute is None:\n return False\n\n # should not load if the entire dynamic step is being executed in the current run\n handle = StepHandle.parse_from_key(step_output_handle.step_key)\n if (\n isinstance(handle, ResolvedFromDynamicStepHandle)\n and handle.unresolved_form.to_key() in self.dagster_run.step_keys_to_execute\n ):\n return False\n\n # should not load if this step is being executed in the current run\n return step_output_handle.step_key not in self.dagster_run.step_keys_to_execute\n\n def _get_source_run_id(self, step_output_handle: StepOutputHandle) -> Optional[str]:\n if self._should_load_from_previous_runs(step_output_handle):\n return self._get_source_run_id_from_logs(step_output_handle)\n else:\n return self.dagster_run.run_id\n\n def capture_step_exception(self, exception: BaseException):\n self._step_exception = check.inst_param(exception, "exception", BaseException)\n\n @property\n def step_exception(self) -> Optional[BaseException]:\n return self._step_exception\n\n @property\n def step_output_capture(self) -> Optional[Dict[StepOutputHandle, Any]]:\n return self._step_output_capture\n\n @property\n def previous_attempt_count(self) -> int:\n return self.get_known_state().get_retry_state().get_attempt_count(self._step.key)\n\n @property\n def op_config(self) -> Any:\n op_config = self.resolved_run_config.ops.get(str(self.node_handle))\n return op_config.config if op_config else None\n\n @property\n def is_op_in_graph(self) -> bool:\n """Whether this step corresponds to an op within a graph (either @graph, or @graph_asset)."""\n return self.step.node_handle.parent is not None\n\n @property\n def is_sda_step(self) -> bool:\n """Whether this step corresponds to a software define asset, inferred by presence of asset info on outputs.\n\n note: ops can materialize assets as well.\n """\n for output in self.step.step_outputs:\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n self.node_handle, output.name\n )\n if asset_info is not None:\n return True\n return False\n\n def set_data_version(self, asset_key: AssetKey, data_version: "DataVersion") -> None:\n self._data_version_cache[asset_key] = data_version\n\n def has_data_version(self, asset_key: AssetKey) -> bool:\n return asset_key in self._data_version_cache\n\n def get_data_version(self, asset_key: AssetKey) -> "DataVersion":\n return self._data_version_cache[asset_key]\n\n @property\n def input_asset_records(self) -> Optional[Mapping[AssetKey, Optional["InputAssetVersionInfo"]]]:\n return self._input_asset_version_info\n\n @property\n def is_external_input_asset_version_info_loaded(self) -> bool:\n return self._is_external_input_asset_version_info_loaded\n\n def get_input_asset_version_info(self, key: AssetKey) -> Optional["InputAssetVersionInfo"]:\n if key not in self._input_asset_version_info:\n self._fetch_input_asset_version_info(key)\n return self._input_asset_version_info[key]\n\n # "external" refers to records for inputs generated outside of this step\n def fetch_external_input_asset_version_info(self) -> None:\n output_keys = self.get_output_asset_keys()\n\n all_dep_keys: List[AssetKey] = []\n for output_key in output_keys:\n if output_key not in self.job_def.asset_layer.asset_deps:\n continue\n dep_keys = self.job_def.asset_layer.upstream_assets_for_asset(output_key)\n for key in dep_keys:\n if key not in all_dep_keys and key not in output_keys:\n all_dep_keys.append(key)\n\n self._input_asset_version_info = {}\n for key in all_dep_keys:\n self._fetch_input_asset_version_info(key)\n self._is_external_input_asset_version_info_loaded = True\n\n def _fetch_input_asset_version_info(self, key: AssetKey) -> None:\n from dagster._core.definitions.data_version import (\n extract_data_version_from_entry,\n )\n\n event = self._get_input_asset_event(key)\n if event is None:\n self._input_asset_version_info[key] = None\n else:\n storage_id = event.storage_id\n # Input name will be none if this is an internal dep\n input_name = self.job_def.asset_layer.input_for_asset_key(self.node_handle, key)\n # Exclude AllPartitionMapping for now to avoid huge queries\n if input_name and self.has_asset_partitions_for_input(input_name):\n subset = self.asset_partitions_subset_for_input(\n input_name, require_valid_partitions=False\n )\n input_keys = list(subset.get_partition_keys())\n\n # This check represents a temporary constraint that prevents huge query results for upstream\n # partition data versions from timing out runs. If a partitioned dependency (a) uses an\n # AllPartitionMapping; and (b) has greater than or equal to\n # SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD dependency partitions, then we\n # process it as a non-partitioned dependency (note that this was the behavior for\n # all partition dependencies prior to 2023-08). This means that stale status\n # results cannot be accurately computed for the dependency, and there is thus\n # corresponding logic in the CachingStaleStatusResolver to account for this. This\n # constraint should be removed when we have thoroughly examined the performance of\n # the data version retrieval query and can guarantee decent performance.\n if len(input_keys) < SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD:\n data_version = self._get_partitions_data_version_from_keys(key, input_keys)\n else:\n data_version = extract_data_version_from_entry(event.event_log_entry)\n else:\n data_version = extract_data_version_from_entry(event.event_log_entry)\n self._input_asset_version_info[key] = InputAssetVersionInfo(\n storage_id, data_version, event.run_id, event.timestamp\n )\n\n def partition_mapping_for_input(self, input_name: str) -> Optional[PartitionMapping]:\n asset_layer = self.job_def.asset_layer\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n if upstream_asset_key:\n upstream_asset_partitions_def = asset_layer.partitions_def_for_asset(upstream_asset_key)\n assets_def = asset_layer.assets_def_for_node(self.node_handle)\n partitions_def = assets_def.partitions_def if assets_def else None\n explicit_partition_mapping = self.job_def.asset_layer.partition_mapping_for_node_input(\n self.node_handle, upstream_asset_key\n )\n return infer_partition_mapping(\n explicit_partition_mapping,\n partitions_def,\n upstream_asset_partitions_def,\n )\n else:\n return None\n\n def _get_input_asset_event(self, key: AssetKey) -> Optional["EventLogRecord"]:\n event = self.instance.get_latest_data_version_record(key)\n if event:\n self._check_input_asset_event(key, event)\n return event\n\n def _check_input_asset_event(self, key: AssetKey, event: "EventLogRecord") -> None:\n assert event.event_log_entry\n event_data_version = extract_data_version_from_entry(event.event_log_entry)\n if key in self._data_version_cache and self._data_version_cache[key] != event_data_version:\n self.log.warning(\n f"Data version mismatch for asset {key}. Data version from materialization within"\n f" current step is `{self._data_version_cache[key]}`. Data version from most recent"\n f" materialization is `{event_data_version}`. Most recent materialization will be"\n " used for provenance tracking."\n )\n\n def _get_partitions_data_version_from_keys(\n self, key: AssetKey, partition_keys: Sequence[str]\n ) -> "DataVersion":\n from dagster._core.definitions.data_version import (\n DataVersion,\n )\n from dagster._core.events import DagsterEventType\n\n # TODO: this needs to account for observations also\n event_type = DagsterEventType.ASSET_MATERIALIZATION\n tags_by_partition = (\n self.instance._event_storage.get_latest_tags_by_partition( # noqa: SLF001\n key, event_type, [DATA_VERSION_TAG], asset_partitions=list(partition_keys)\n )\n )\n partition_data_versions = [\n pair[1][DATA_VERSION_TAG]\n for pair in sorted(tags_by_partition.items(), key=lambda x: x[0])\n ]\n hash_sig = sha256()\n hash_sig.update(bytearray("".join(partition_data_versions), "utf8"))\n return DataVersion(hash_sig.hexdigest())\n\n # Call this to clear the cache for an input asset record. This is necessary when an old\n # materialization for an asset was loaded during `fetch_external_input_asset_records` because an\n # intrastep asset is not required, but then that asset is materialized during the step. If we\n # don't clear the cache for this asset, then we won't use the most up-to-date asset record.\n def wipe_input_asset_version_info(self, key: AssetKey) -> None:\n if key in self._input_asset_version_info:\n del self._input_asset_version_info[key]\n\n def get_output_asset_keys(self) -> AbstractSet[AssetKey]:\n output_keys: Set[AssetKey] = set()\n for step_output in self.step.step_outputs:\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n self.node_handle, step_output.name\n )\n if asset_info is None or not asset_info.is_required:\n continue\n output_keys.add(asset_info.key)\n return output_keys\n\n def has_asset_partitions_for_input(self, input_name: str) -> bool:\n asset_layer = self.job_def.asset_layer\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n\n return (\n upstream_asset_key is not None\n and asset_layer.partitions_def_for_asset(upstream_asset_key) is not None\n )\n\n def asset_partition_key_range_for_input(self, input_name: str) -> PartitionKeyRange:\n subset = self.asset_partitions_subset_for_input(input_name)\n partition_key_ranges = subset.get_partition_key_ranges(\n dynamic_partitions_store=self.instance\n )\n\n if len(partition_key_ranges) != 1:\n check.failed(\n "Tried to access asset partition key range, but there are "\n f"({len(partition_key_ranges)}) key ranges associated with this input.",\n )\n\n return partition_key_ranges[0]\n\n def asset_partitions_subset_for_input(\n self, input_name: str, *, require_valid_partitions: bool = True\n ) -> PartitionsSubset:\n asset_layer = self.job_def.asset_layer\n assets_def = asset_layer.assets_def_for_node(self.node_handle)\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n\n if upstream_asset_key is not None:\n upstream_asset_partitions_def = asset_layer.partitions_def_for_asset(upstream_asset_key)\n\n if upstream_asset_partitions_def is not None:\n partitions_def = assets_def.partitions_def if assets_def else None\n partitions_subset = (\n partitions_def.empty_subset().with_partition_key_range(\n self.asset_partition_key_range, dynamic_partitions_store=self.instance\n )\n if partitions_def\n else None\n )\n partition_mapping = infer_partition_mapping(\n asset_layer.partition_mapping_for_node_input(\n self.node_handle, upstream_asset_key\n ),\n partitions_def,\n upstream_asset_partitions_def,\n )\n mapped_partitions_result = (\n partition_mapping.get_upstream_mapped_partitions_result_for_partitions(\n partitions_subset,\n upstream_asset_partitions_def,\n dynamic_partitions_store=self.instance,\n )\n )\n\n if (\n require_valid_partitions\n and mapped_partitions_result.required_but_nonexistent_partition_keys\n ):\n raise DagsterInvariantViolationError(\n f"Partition key range {self.asset_partition_key_range} in"\n f" {self.node_handle.name} depends on invalid partition keys"\n f" {mapped_partitions_result.required_but_nonexistent_partition_keys} in"\n f" upstream asset {upstream_asset_key}"\n )\n\n return mapped_partitions_result.partitions_subset\n\n check.failed("The input has no asset partitions")\n\n def asset_partition_key_for_input(self, input_name: str) -> str:\n start, end = self.asset_partition_key_range_for_input(input_name)\n if start == end:\n return start\n else:\n check.failed(\n f"Tried to access partition key for input '{input_name}' of step '{self.step.key}',"\n f" but the step input has a partition range: '{start}' to '{end}'."\n )\n\n def _partitions_def_for_output(self, output_name: str) -> Optional[PartitionsDefinition]:\n asset_info = self.job_def.asset_layer.asset_info_for_output(\n node_handle=self.node_handle, output_name=output_name\n )\n if asset_info:\n return asset_info.partitions_def\n else:\n return None\n\n def partitions_def_for_output(self, output_name: str) -> Optional[PartitionsDefinition]:\n return self._partitions_def_for_output(output_name)\n\n def has_asset_partitions_for_output(self, output_name: str) -> bool:\n return self._partitions_def_for_output(output_name) is not None\n\n def asset_partition_key_range_for_output(self, output_name: str) -> PartitionKeyRange:\n if self._partitions_def_for_output(output_name) is not None:\n return self.asset_partition_key_range\n\n check.failed("The output has no asset partitions")\n\n def asset_partition_key_for_output(self, output_name: str) -> str:\n start, end = self.asset_partition_key_range_for_output(output_name)\n if start == end:\n return start\n else:\n check.failed(\n f"Tried to access partition key for output '{output_name}' of step"\n f" '{self.step.key}', but the step output has a partition range: '{start}' to"\n f" '{end}'."\n )\n\n def asset_partitions_time_window_for_output(self, output_name: str) -> TimeWindow:\n """The time window for the partitions of the asset correponding to the given output.\n\n Raises an error if either of the following are true:\n - The output asset has no partitioning.\n - The output asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n """\n partitions_def = self._partitions_def_for_output(output_name)\n\n if not partitions_def:\n raise ValueError(\n "Tried to get asset partitions for an output that does not correspond to a "\n "partitioned asset."\n )\n\n if not has_one_dimension_time_window_partitioning(partitions_def):\n raise ValueError(\n "Tried to get asset partitions for an output that correponds to a partitioned "\n "asset that is not time-partitioned."\n )\n\n partitions_def = cast(\n Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition], partitions_def\n )\n partition_key_range = self.asset_partition_key_range_for_output(output_name)\n return TimeWindow(\n # mypy thinks partitions_def is <nothing> here because ????\n partitions_def.time_window_for_partition_key(partition_key_range.start).start,\n partitions_def.time_window_for_partition_key(partition_key_range.end).end,\n )\n\n def asset_partitions_time_window_for_input(self, input_name: str) -> TimeWindow:\n """The time window for the partitions of the asset correponding to the given input.\n\n Raises an error if either of the following are true:\n - The input asset has no partitioning.\n - The input asset is not partitioned with a TimeWindowPartitionsDefinition or a\n MultiPartitionsDefinition with one time-partitioned dimension.\n """\n asset_layer = self.job_def.asset_layer\n upstream_asset_key = asset_layer.asset_key_for_input(self.node_handle, input_name)\n\n if upstream_asset_key is None:\n raise ValueError("The input has no corresponding asset")\n\n upstream_asset_partitions_def = asset_layer.partitions_def_for_asset(upstream_asset_key)\n\n if not upstream_asset_partitions_def:\n raise ValueError(\n "Tried to get asset partitions for an input that does not correspond to a "\n "partitioned asset."\n )\n\n if not has_one_dimension_time_window_partitioning(upstream_asset_partitions_def):\n raise ValueError(\n "Tried to get asset partitions for an input that correponds to a partitioned "\n "asset that is not time-partitioned."\n )\n\n upstream_asset_partitions_def = cast(\n Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition],\n upstream_asset_partitions_def,\n )\n partition_key_range = self.asset_partition_key_range_for_input(input_name)\n\n return TimeWindow(\n upstream_asset_partitions_def.time_window_for_partition_key(\n partition_key_range.start\n ).start,\n upstream_asset_partitions_def.time_window_for_partition_key(\n partition_key_range.end\n ).end,\n )\n\n def get_type_loader_context(self) -> "DagsterTypeLoaderContext":\n return DagsterTypeLoaderContext(\n plan_data=self.plan_data,\n execution_data=self._execution_data,\n log_manager=self._log_manager,\n step=self.step,\n output_capture=self._output_capture,\n known_state=self._known_state,\n )\n\n def output_observes_source_asset(self, output_name: str) -> bool:\n """Returns True if this step observes a source asset."""\n asset_layer = self.job_def.asset_layer\n if asset_layer is None:\n return False\n asset_key = asset_layer.asset_key_for_output(self.node_handle, output_name)\n if asset_key is None:\n return False\n return asset_layer.is_observable_for_asset(asset_key)
\n\n\n
[docs]class TypeCheckContext:\n """The ``context`` object available to a type check function on a DagsterType."""\n\n def __init__(\n self,\n run_id: str,\n log_manager: DagsterLogManager,\n scoped_resources_builder: ScopedResourcesBuilder,\n dagster_type: DagsterType,\n ):\n self._run_id = run_id\n self._log = log_manager\n self._resources = scoped_resources_builder.build(dagster_type.required_resource_keys)\n\n @public\n @property\n def resources(self) -> "Resources":\n """An object whose attributes contain the resources available to this op."""\n return self._resources\n\n @public\n @property\n def run_id(self) -> str:\n """The id of this job run."""\n return self._run_id\n\n @public\n @property\n def log(self) -> DagsterLogManager:\n """Centralized log dispatch from user code."""\n return self._log
\n\n\n
[docs]class DagsterTypeLoaderContext(StepExecutionContext):\n """The context object provided to a :py:class:`@dagster_type_loader <dagster_type_loader>`-decorated function during execution.\n\n Users should not construct this object directly.\n """\n\n @public\n @property\n def resources(self) -> "Resources":\n """The resources available to the type loader, specified by the `required_resource_keys` argument of the decorator."""\n return super(DagsterTypeLoaderContext, self).resources\n\n @public\n @property\n def job_def(self) -> "JobDefinition":\n """The underlying job definition being executed."""\n return super(DagsterTypeLoaderContext, self).job_def\n\n @public\n @property\n def op_def(self) -> "OpDefinition":\n """The op for which type loading is occurring."""\n return super(DagsterTypeLoaderContext, self).op_def
\n
", "current_page_name": "_modules/dagster/_core/execution/context/system", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.context.system"}}, "execute_in_process_result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.execute_in_process_result

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions import JobDefinition, NodeHandle\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.utils import DEFAULT_OUTPUT\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nfrom .execution_result import ExecutionResult\n\n\n
[docs]class ExecuteInProcessResult(ExecutionResult):\n """Result object returned by in-process testing APIs.\n\n Users should not instantiate this object directly. Used for retrieving run success, events, and outputs from execution methods that return this object.\n\n This object is returned by:\n - :py:meth:`dagster.GraphDefinition.execute_in_process`\n - :py:meth:`dagster.JobDefinition.execute_in_process`\n - :py:meth:`dagster.materialize_to_memory`\n - :py:meth:`dagster.materialize`\n """\n\n _handle: NodeHandle\n _event_list: Sequence[DagsterEvent]\n _dagster_run: DagsterRun\n _output_capture: Mapping[StepOutputHandle, Any]\n _job_def: JobDefinition\n\n def __init__(\n self,\n event_list: Sequence[DagsterEvent],\n dagster_run: DagsterRun,\n output_capture: Optional[Mapping[StepOutputHandle, Any]],\n job_def: JobDefinition,\n ):\n self._job_def = job_def\n\n self._event_list = event_list\n self._dagster_run = dagster_run\n\n self._output_capture = check.opt_mapping_param(\n output_capture, "output_capture", key_type=StepOutputHandle\n )\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """JobDefinition: The job definition that was executed."""\n return self._job_def\n\n @public\n @property\n def dagster_run(self) -> DagsterRun:\n """DagsterRun: The Dagster run that was executed."""\n return self._dagster_run\n\n @public\n @property\n def all_events(self) -> Sequence[DagsterEvent]:\n """List[DagsterEvent]: All dagster events emitted during execution."""\n return self._event_list\n\n @public\n @property\n def run_id(self) -> str:\n """str: The run ID of the executed :py:class:`DagsterRun`."""\n return self.dagster_run.run_id\n\n def _get_output_for_handle(self, handle: NodeHandle, output_name: str) -> Any:\n mapped_outputs = {}\n step_key = str(handle)\n output_found = False\n for step_output_handle, value in self._output_capture.items():\n # For the mapped output case, where step keys are in the format\n # "step_key[upstream_mapped_output_name]" within the step output handle.\n if (\n step_output_handle.step_key.startswith(f"{step_key}[")\n and step_output_handle.output_name == output_name\n ):\n output_found = True\n key_start = step_output_handle.step_key.find("[")\n key_end = step_output_handle.step_key.find("]")\n upstream_mapped_output_name = step_output_handle.step_key[key_start + 1 : key_end]\n mapped_outputs[upstream_mapped_output_name] = value\n\n # For all other cases, search for exact match.\n elif (\n step_key == step_output_handle.step_key\n and step_output_handle.output_name == output_name\n ):\n output_found = True\n if not step_output_handle.mapping_key:\n return self._output_capture[step_output_handle]\n mapped_outputs[step_output_handle.mapping_key] = value\n\n if not output_found:\n raise DagsterInvariantViolationError(\n f"No outputs found for output '{output_name}' from node '{handle}'."\n )\n return mapped_outputs\n\n
[docs] @public\n def output_for_node(self, node_str: str, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output value with a particular name from the in-process run of the job.\n\n Args:\n node_str (str): Name of the op/graph whose output should be retrieved. If the intended\n graph/op is nested within another graph, the syntax is `outer_graph.inner_node`.\n output_name (Optional[str]): Name of the output on the op/graph to retrieve. Defaults to\n `result`, the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(ExecuteInProcessResult, self).output_for_node(\n node_str, output_name=output_name\n )
\n\n
[docs] @public\n def asset_value(self, asset_key: CoercibleToAssetKey) -> Any:\n """Retrieves the value of an asset that was materialized during the execution of the job.\n\n Args:\n asset_key (CoercibleToAssetKey): The key of the asset to retrieve.\n\n Returns:\n Any: The value of the retrieved asset.\n """\n node_output_handle = self._job_def.asset_layer.node_output_handle_for_asset(\n AssetKey.from_coercible(asset_key)\n )\n return self.output_for_node(\n node_str=str(node_output_handle.node_handle), output_name=node_output_handle.output_name\n )
\n\n
[docs] @public\n def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output of top-level job, if an output is returned.\n\n Args:\n output_name (Optional[str]): The name of the output to retrieve. Defaults to `result`,\n the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(ExecuteInProcessResult, self).output_value(output_name=output_name)
\n
", "current_page_name": "_modules/dagster/_core/execution/execute_in_process_result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.execute_in_process_result"}, "job_execution_result": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.job_execution_result

\nfrom typing import Any, Sequence\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions import JobDefinition, NodeHandle\nfrom dagster._core.definitions.utils import DEFAULT_OUTPUT\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.plan.utils import build_resources_for_manager\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nfrom .execution_result import ExecutionResult\n\n\n
[docs]class JobExecutionResult(ExecutionResult):\n """Result object returned by :py:func:`dagster.execute_job`.\n\n Used for retrieving run success, events, and outputs from `execute_job`.\n Users should not directly instantiate this class.\n\n Events and run information can be retrieved off of the object directly. In\n order to access outputs, the `ExecuteJobResult` object needs to be opened\n as a context manager, which will re-initialize the resources from\n execution.\n """\n\n def __init__(self, job_def, reconstruct_context, event_list, dagster_run):\n self._job_def = job_def\n self._reconstruct_context = reconstruct_context\n self._context = None\n self._event_list = event_list\n self._dagster_run = dagster_run\n\n def __enter__(self) -> "JobExecutionResult":\n context = self._reconstruct_context.__enter__()\n self._context = context\n return self\n\n def __exit__(self, *exc):\n exit_result = self._reconstruct_context.__exit__(*exc)\n self._context = None\n return exit_result\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """JobDefinition: The job definition that was executed."""\n return self._job_def\n\n @public\n @property\n def dagster_run(self) -> DagsterRun:\n """DagsterRun: The Dagster run that was executed."""\n return self._dagster_run\n\n @public\n @property\n def all_events(self) -> Sequence[DagsterEvent]:\n """Sequence[DagsterEvent]: List of all events yielded by the job execution."""\n return self._event_list\n\n @public\n @property\n def run_id(self) -> str:\n """str: The id of the Dagster run that was executed."""\n return self.dagster_run.run_id\n\n
[docs] @public\n def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output of top-level job, if an output is returned.\n\n In order to use this method, the `ExecuteJobResult` object must be opened as a context manager. If this method is used without opening the context manager, it will result in a :py:class:`DagsterInvariantViolationError`. If the top-level job has no output, calling this method will also result in a :py:class:`DagsterInvariantViolationError`.\n\n Args:\n output_name (Optional[str]): The name of the output to retrieve. Defaults to `result`,\n the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(JobExecutionResult, self).output_value(output_name=output_name)
\n\n
[docs] @public\n def output_for_node(self, node_str: str, output_name: str = DEFAULT_OUTPUT) -> Any:\n """Retrieves output value with a particular name from the run of the job.\n\n In order to use this method, the `ExecuteJobResult` object must be opened as a context manager. If this method is used without opening the context manager, it will result in a :py:class:`DagsterInvariantViolationError`.\n\n Args:\n node_str (str): Name of the op/graph whose output should be retrieved. If the intended\n graph/op is nested within another graph, the syntax is `outer_graph.inner_node`.\n output_name (Optional[str]): Name of the output on the op/graph to retrieve. Defaults to\n `result`, the default output name in dagster.\n\n Returns:\n Any: The value of the retrieved output.\n """\n return super(JobExecutionResult, self).output_for_node(node_str, output_name=output_name)
\n\n def _get_output_for_handle(self, handle: NodeHandle, output_name: str) -> Any:\n if not self._context:\n raise DagsterInvariantViolationError(\n "In order to access output objects, the result of `execute_job` must be opened as a"\n " context manager: 'with execute_job(...) as result:"\n )\n found = False\n result = None\n for compute_step_event in self.compute_events_for_handle(handle):\n if (\n compute_step_event.is_successful_output\n and compute_step_event.step_output_data.output_name == output_name\n ):\n found = True\n output = compute_step_event.step_output_data\n step = self._context.execution_plan.get_step_by_key(compute_step_event.step_key)\n dagster_type = (\n self.job_def.get_node(handle).output_def_named(output_name).dagster_type\n )\n value = self._get_value(self._context.for_step(step), output, dagster_type)\n check.invariant(\n not (output.mapping_key and step.get_mapping_key()),\n "Not set up to handle mapped outputs downstream of mapped steps",\n )\n mapping_key = output.mapping_key or step.get_mapping_key()\n if mapping_key:\n if result is None:\n result = {mapping_key: value}\n else:\n result[mapping_key] = (\n value # pylint:disable=unsupported-assignment-operation\n )\n else:\n result = value\n\n if found:\n return result\n\n node = self.job_def.get_node(handle)\n raise DagsterInvariantViolationError(\n f"Did not find result {output_name} in {node.describe_node()}"\n )\n\n def _get_value(self, context, step_output_data, dagster_type):\n step_output_handle = step_output_data.step_output_handle\n manager = context.get_io_manager(step_output_handle)\n manager_key = context.execution_plan.get_manager_key(step_output_handle, self.job_def)\n res = manager.load_input(\n context.for_input_manager(\n name=None,\n config=None,\n metadata=None,\n dagster_type=dagster_type,\n source_handle=step_output_handle,\n resource_config=context.resolved_run_config.resources[manager_key].config,\n resources=build_resources_for_manager(manager_key, context),\n )\n )\n return res
\n
", "current_page_name": "_modules/dagster/_core/execution/job_execution_result", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.job_execution_result"}, "validate_run_config": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.validate_run_config

\nfrom typing import Any, Mapping, Optional, Union\n\nimport dagster._check as check\nfrom dagster._core.definitions import JobDefinition\nfrom dagster._core.definitions.run_config import RunConfig, convert_config_input\nfrom dagster._core.system_config.objects import ResolvedRunConfig\n\n\n
[docs]def validate_run_config(\n job_def: JobDefinition,\n run_config: Optional[Union[Mapping[str, Any], RunConfig]] = None,\n) -> Mapping[str, Any]:\n """Function to validate a provided run config blob against a given job.\n\n If validation is successful, this function will return a dictionary representation of the\n validated config actually used during execution.\n\n Args:\n job_def (JobDefinition): The job definition to validate run\n config against\n run_config (Optional[Dict[str, Any]]): The run config to validate\n\n Returns:\n Dict[str, Any]: A dictionary representation of the validated config.\n """\n check.inst_param(job_def, "job_def", JobDefinition)\n run_config = check.opt_mapping_param(\n convert_config_input(run_config), "run_config", key_type=str\n )\n\n return ResolvedRunConfig.build(job_def, run_config).to_dict()
\n
", "current_page_name": "_modules/dagster/_core/execution/validate_run_config", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.validate_run_config"}, "with_resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.execution.with_resources

\nfrom typing import Any, Iterable, List, Mapping, Optional, Sequence, TypeVar, cast\n\nfrom dagster import _check as check\nfrom dagster._core.execution.build_resources import wrap_resources_for_execution\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..._config import Shape\nfrom ..definitions.resource_requirement import ResourceAddable\nfrom ..definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom ..errors import DagsterInvalidConfigError, DagsterInvalidInvocationError\n\nT = TypeVar("T", bound=ResourceAddable)\n\n\n
[docs]def with_resources(\n definitions: Iterable[T],\n resource_defs: Mapping[str, object],\n resource_config_by_key: Optional[Mapping[str, Any]] = None,\n) -> Sequence[T]:\n """Adds dagster resources to copies of resource-requiring dagster definitions.\n\n An error will be thrown if any provided definitions have a conflicting\n resource definition provided for a key provided to resource_defs. Resource\n config can be provided, with keys in the config dictionary corresponding to\n the keys for each resource definition. If any definition has unsatisfied\n resource keys after applying with_resources, an error will be thrown.\n\n Args:\n definitions (Iterable[ResourceAddable]): Dagster definitions to provide resources to.\n resource_defs (Mapping[str, object]):\n Mapping of resource keys to objects to satisfy\n resource requirements of provided dagster definitions.\n resource_config_by_key (Optional[Mapping[str, Any]]):\n Specifies config for provided resources. The key in this dictionary\n corresponds to configuring the same key in the resource_defs\n dictionary.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset, resource, with_resources\n\n @resource(config_schema={"bar": str})\n def foo_resource():\n ...\n\n @asset(required_resource_keys={"foo"})\n def asset1(context):\n foo = context.resources.foo\n ...\n\n @asset(required_resource_keys={"foo"})\n def asset2(context):\n foo = context.resources.foo\n ...\n\n asset1_with_foo, asset2_with_foo = with_resources(\n [the_asset, other_asset],\n resource_config_by_key={\n "foo": {\n "config": {"bar": ...}\n }\n }\n )\n """\n from dagster._config import validate_config\n from dagster._core.definitions.job_definition import (\n default_job_io_manager_with_fs_io_manager_schema,\n )\n\n check.mapping_param(resource_defs, "resource_defs")\n resource_config_by_key = check.opt_mapping_param(\n resource_config_by_key, "resource_config_by_key"\n )\n\n resource_defs = wrap_resources_for_execution(\n merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n resource_defs,\n )\n )\n\n for key, resource_def in resource_defs.items():\n if key in resource_config_by_key:\n resource_config = resource_config_by_key[key]\n if not isinstance(resource_config, dict) or "config" not in resource_config:\n raise DagsterInvalidInvocationError(\n f"Error with config for resource key '{key}': Expected a "\n "dictionary of the form {'config': ...}, but received "\n f"{resource_config}"\n )\n\n outer_config_shape = Shape({"config": resource_def.get_config_field()})\n config_evr = validate_config(outer_config_shape, resource_config)\n if not config_evr.success:\n raise DagsterInvalidConfigError(\n f"Error when applying config for resource with key '{key}' ",\n config_evr.errors,\n resource_config,\n )\n resource_defs[key] = resource_defs[key].configured(resource_config["config"])\n\n transformed_defs: List[T] = []\n for definition in definitions:\n transformed_defs.append(cast(T, definition.with_resources(resource_defs)))\n\n return transformed_defs
\n
", "current_page_name": "_modules/dagster/_core/execution/with_resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.execution.with_resources"}}, "executor": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.executor.base

\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Iterator\n\nfrom dagster._annotations import public\nfrom dagster._core.execution.retries import RetryMode\n\nif TYPE_CHECKING:\n    from dagster._core.events import DagsterEvent\n    from dagster._core.execution.context.system import PlanOrchestrationContext\n    from dagster._core.execution.plan.plan import ExecutionPlan\n\n\n
[docs]class Executor(ABC):\n
[docs] @public\n @abstractmethod\n def execute(\n self, plan_context: "PlanOrchestrationContext", execution_plan: "ExecutionPlan"\n ) -> Iterator["DagsterEvent"]:\n """For the given context and execution plan, orchestrate a series of sub plan executions in a way that satisfies the whole plan being executed.\n\n Args:\n plan_context (PlanOrchestrationContext): The plan's orchestration context.\n execution_plan (ExecutionPlan): The plan to execute.\n\n Returns:\n A stream of dagster events.\n """
\n\n @public\n @property\n @abstractmethod\n def retries(self) -> RetryMode:\n """Whether retries are enabled or disabled for this instance of the executor.\n\n Executors should allow this to be controlled via configuration if possible.\n\n Returns: RetryMode\n """
\n
", "current_page_name": "_modules/dagster/_core/executor/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.executor.base"}, "init": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.executor.init

\nfrom typing import Mapping, NamedTuple\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr\nfrom dagster._core.definitions import ExecutorDefinition, IJob\nfrom dagster._core.instance import DagsterInstance\n\n\n
[docs]class InitExecutorContext(\n NamedTuple(\n "InitExecutorContext",\n [\n ("job", PublicAttr[IJob]),\n ("executor_def", PublicAttr[ExecutorDefinition]),\n ("executor_config", PublicAttr[Mapping[str, object]]),\n ("instance", PublicAttr[DagsterInstance]),\n ],\n )\n):\n """Executor-specific initialization context.\n\n Attributes:\n job (IJob): The job to be executed.\n executor_def (ExecutorDefinition): The definition of the executor currently being\n constructed.\n executor_config (dict): The parsed config passed to the executor.\n instance (DagsterInstance): The current instance.\n """\n\n def __new__(\n cls,\n job: IJob,\n executor_def: ExecutorDefinition,\n executor_config: Mapping[str, object],\n instance: DagsterInstance,\n ):\n return super(InitExecutorContext, cls).__new__(\n cls,\n job=check.inst_param(job, "job", IJob),\n executor_def=check.inst_param(executor_def, "executor_def", ExecutorDefinition),\n executor_config=check.mapping_param(executor_config, "executor_config", key_type=str),\n instance=check.inst_param(instance, "instance", DagsterInstance),\n )
\n
", "current_page_name": "_modules/dagster/_core/executor/init", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.executor.init"}}, "instance": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.instance

\nimport logging\nimport logging.config\nimport os\nimport sys\nimport time\nimport weakref\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom enum import Enum\nfrom tempfile import TemporaryDirectory\nfrom types import TracebackType\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Generic,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport yaml\nfrom typing_extensions import Protocol, Self, TypeAlias, TypeVar, runtime_checkable\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationPlanned,\n)\nfrom dagster._core.definitions.data_version import extract_data_provenance_from_entry\nfrom dagster._core.definitions.events import AssetKey, AssetObservation\nfrom dagster._core.errors import (\n    DagsterHomeNotSetError,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n    DagsterRunAlreadyExists,\n    DagsterRunConflict,\n)\nfrom dagster._core.log_manager import DagsterLogRecord\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.dagster_run import (\n    IN_PROGRESS_RUN_STATUSES,\n    DagsterRun,\n    DagsterRunStatsSnapshot,\n    DagsterRunStatus,\n    JobBucket,\n    RunPartitionData,\n    RunRecord,\n    RunsFilter,\n    TagBucket,\n)\nfrom dagster._core.storage.tags import (\n    ASSET_PARTITION_RANGE_END_TAG,\n    ASSET_PARTITION_RANGE_START_TAG,\n    PARENT_RUN_ID_TAG,\n    PARTITION_NAME_TAG,\n    RESUME_RETRY_TAG,\n    ROOT_RUN_ID_TAG,\n)\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._seven import get_current_datetime_in_utc\nfrom dagster._utils import PrintFn, traced\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n    experimental_warning,\n)\n\nfrom .config import (\n    DAGSTER_CONFIG_YAML_FILENAME,\n    DEFAULT_LOCAL_CODE_SERVER_STARTUP_TIMEOUT,\n    get_default_tick_retention_settings,\n    get_tick_retention_settings,\n)\nfrom .ref import InstanceRef\n\n# 'airflow_execution_date' and 'is_airflow_ingest_pipeline' are hardcoded tags used in the\n# airflow ingestion logic (see: dagster_pipeline_factory.py). 'airflow_execution_date' stores the\n# 'execution_date' used in Airflow operator execution and 'is_airflow_ingest_pipeline' determines\n# whether 'airflow_execution_date' is needed.\n# https://github.com/dagster-io/dagster/issues/2403\nAIRFLOW_EXECUTION_DATE_STR = "airflow_execution_date"\nIS_AIRFLOW_INGEST_PIPELINE_STR = "is_airflow_ingest_pipeline"\n\n# Our internal guts can handle empty strings for job name and run id\n# However making these named constants for documentation, to encode where we are making the assumption,\n# and to allow us to change this more easily in the future, provided we are disciplined about\n# actually using this constants.\nRUNLESS_RUN_ID = ""\nRUNLESS_JOB_NAME = ""\n\nif TYPE_CHECKING:\n    from dagster._core.debug import DebugRunPayload\n    from dagster._core.definitions.asset_check_spec import AssetCheckKey\n    from dagster._core.definitions.job_definition import (\n        JobDefinition,\n    )\n    from dagster._core.definitions.partition import PartitionsDefinition\n    from dagster._core.definitions.repository_definition.repository_definition import (\n        RepositoryLoadData,\n    )\n    from dagster._core.definitions.run_request import InstigatorType\n    from dagster._core.event_api import EventHandlerFn\n    from dagster._core.events import (\n        AssetMaterialization,\n        DagsterEvent,\n        DagsterEventType,\n        EngineEventData,\n    )\n    from dagster._core.events.log import EventLogEntry\n    from dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill\n    from dagster._core.execution.plan.plan import ExecutionPlan\n    from dagster._core.execution.plan.resume_retry import ReexecutionStrategy\n    from dagster._core.execution.stats import RunStepKeyStatsSnapshot\n    from dagster._core.host_representation import (\n        CodeLocation,\n        ExternalJob,\n        ExternalJobOrigin,\n        ExternalSensor,\n        HistoricalJob,\n    )\n    from dagster._core.host_representation.external import ExternalSchedule\n    from dagster._core.launcher import RunLauncher\n    from dagster._core.run_coordinator import RunCoordinator\n    from dagster._core.scheduler import Scheduler, SchedulerDebugInfo\n    from dagster._core.scheduler.instigation import (\n        InstigatorState,\n        InstigatorStatus,\n        InstigatorTick,\n        TickData,\n        TickStatus,\n    )\n    from dagster._core.secrets import SecretsLoader\n    from dagster._core.snap import ExecutionPlanSnapshot, JobSnapshot\n    from dagster._core.storage.asset_check_execution_record import AssetCheckInstanceSupport\n    from dagster._core.storage.compute_log_manager import ComputeLogManager\n    from dagster._core.storage.daemon_cursor import DaemonCursorStorage\n    from dagster._core.storage.event_log import EventLogStorage\n    from dagster._core.storage.event_log.base import (\n        AssetRecord,\n        EventLogConnection,\n        EventLogRecord,\n        EventRecordsFilter,\n    )\n    from dagster._core.storage.partition_status_cache import (\n        AssetPartitionStatus,\n        AssetStatusCacheValue,\n    )\n    from dagster._core.storage.root import LocalArtifactStorage\n    from dagster._core.storage.runs import RunStorage\n    from dagster._core.storage.schedules import ScheduleStorage\n    from dagster._core.storage.sql import AlembicVersion\n    from dagster._core.workspace.workspace import IWorkspace\n    from dagster._daemon.types import DaemonHeartbeat, DaemonStatus\n\n\nDagsterInstanceOverrides: TypeAlias = Mapping[str, Any]\n\n\ndef _check_run_equality(\n    pipeline_run: DagsterRun, candidate_run: DagsterRun\n) -> Mapping[str, Tuple[Any, Any]]:\n    field_diff: Dict[str, Tuple[Any, Any]] = {}\n    for field in pipeline_run._fields:\n        expected_value = getattr(pipeline_run, field)\n        candidate_value = getattr(candidate_run, field)\n        if expected_value != candidate_value:\n            field_diff[field] = (expected_value, candidate_value)\n\n    return field_diff\n\n\ndef _format_field_diff(field_diff: Mapping[str, Tuple[Any, Any]]) -> str:\n    return "\\n".join(\n        [\n            (\n                "    {field_name}:\\n"\n                + "        Expected: {expected_value}\\n"\n                + "        Received: {candidate_value}"\n            ).format(\n                field_name=field_name,\n                expected_value=expected_value,\n                candidate_value=candidate_value,\n            )\n            for field_name, (\n                expected_value,\n                candidate_value,\n            ) in field_diff.items()\n        ]\n    )\n\n\nclass _EventListenerLogHandler(logging.Handler):\n    def __init__(self, instance: "DagsterInstance"):\n        self._instance = instance\n        super(_EventListenerLogHandler, self).__init__()\n\n    def emit(self, record: DagsterLogRecord) -> None:\n        from dagster._core.events import EngineEventData\n        from dagster._core.events.log import StructuredLoggerMessage, construct_event_record\n\n        event = construct_event_record(\n            StructuredLoggerMessage(\n                name=record.name,\n                message=record.msg,\n                level=record.levelno,\n                meta=record.dagster_meta,  # type: ignore\n                record=record,\n            )\n        )\n\n        try:\n            self._instance.handle_new_event(event)\n        except Exception as e:\n            sys.stderr.write(f"Exception while writing logger call to event log: {e}\\n")\n            if event.dagster_event:\n                # Swallow user-generated log failures so that the entire step/run doesn't fail, but\n                # raise failures writing system-generated log events since they are the source of\n                # truth for the state of the run\n                raise\n            elif event.run_id:\n                self._instance.report_engine_event(\n                    "Exception while writing logger call to event log",\n                    job_name=event.job_name,\n                    run_id=event.run_id,\n                    step_key=event.step_key,\n                    engine_event_data=EngineEventData(\n                        error=serializable_error_info_from_exc_info(sys.exc_info()),\n                    ),\n                )\n\n\nclass InstanceType(Enum):\n    PERSISTENT = "PERSISTENT"\n    EPHEMERAL = "EPHEMERAL"\n\n\nT_DagsterInstance = TypeVar("T_DagsterInstance", bound="DagsterInstance", default="DagsterInstance")\n\n\nclass MayHaveInstanceWeakref(Generic[T_DagsterInstance]):\n    """Mixin for classes that can have a weakref back to a Dagster instance."""\n\n    _instance_weakref: "Optional[weakref.ReferenceType[T_DagsterInstance]]"\n\n    def __init__(self):\n        self._instance_weakref = None\n\n    @property\n    def has_instance(self) -> bool:\n        return hasattr(self, "_instance_weakref") and (self._instance_weakref is not None)\n\n    @property\n    def _instance(self) -> T_DagsterInstance:\n        instance = (\n            self._instance_weakref()\n            # Backcompat with custom subclasses that don't call super().__init__()\n            # in their own __init__ implementations\n            if (hasattr(self, "_instance_weakref") and self._instance_weakref is not None)\n            else None\n        )\n        if instance is None:\n            raise DagsterInvariantViolationError(\n                "Attempted to resolve undefined DagsterInstance weakref."\n            )\n        else:\n            return instance\n\n    def register_instance(self, instance: T_DagsterInstance) -> None:\n        check.invariant(\n            # Backcompat with custom subclasses that don't call super().__init__()\n            # in their own __init__ implementations\n            (not hasattr(self, "_instance_weakref") or self._instance_weakref is None),\n            "Must only call initialize once",\n        )\n\n        # Store a weakref to avoid a circular reference / enable GC\n        self._instance_weakref = weakref.ref(instance)\n\n\n@runtime_checkable\nclass DynamicPartitionsStore(Protocol):\n    @abstractmethod\n    def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]: ...\n\n    @abstractmethod\n    def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool: ...\n\n\n
[docs]class DagsterInstance(DynamicPartitionsStore):\n """Core abstraction for managing Dagster's access to storage and other resources.\n\n Use DagsterInstance.get() to grab the current DagsterInstance which will load based on\n the values in the ``dagster.yaml`` file in ``$DAGSTER_HOME``.\n\n Alternatively, DagsterInstance.ephemeral() can use used which provides a set of\n transient in-memory components.\n\n Configuration of this class should be done by setting values in ``$DAGSTER_HOME/dagster.yaml``.\n For example, to use Postgres for dagster storage, you can write a ``dagster.yaml`` such as the\n following:\n\n .. literalinclude:: ../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :language: YAML\n\n Args:\n instance_type (InstanceType): Indicates whether the instance is ephemeral or persistent.\n Users should not attempt to set this value directly or in their ``dagster.yaml`` files.\n local_artifact_storage (LocalArtifactStorage): The local artifact storage is used to\n configure storage for any artifacts that require a local disk, such as schedules, or\n when using the filesystem system storage to manage files and intermediates. By default,\n this will be a :py:class:`dagster._core.storage.root.LocalArtifactStorage`. Configurable\n in ``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass`\n machinery.\n run_storage (RunStorage): The run storage is used to store metadata about ongoing and past\n pipeline runs. By default, this will be a\n :py:class:`dagster._core.storage.runs.SqliteRunStorage`. Configurable in ``dagster.yaml``\n using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.\n event_storage (EventLogStorage): Used to store the structured event logs generated by\n pipeline runs. By default, this will be a\n :py:class:`dagster._core.storage.event_log.SqliteEventLogStorage`. Configurable in\n ``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.\n compute_log_manager (Optional[ComputeLogManager]): The compute log manager handles stdout\n and stderr logging for op compute functions. By default, this will be a\n :py:class:`dagster._core.storage.local_compute_log_manager.LocalComputeLogManager`.\n Configurable in ``dagster.yaml`` using the\n :py:class:`~dagster.serdes.ConfigurableClass` machinery.\n run_coordinator (Optional[RunCoordinator]): A runs coordinator may be used to manage the execution\n of pipeline runs.\n run_launcher (Optional[RunLauncher]): Optionally, a run launcher may be used to enable\n a Dagster instance to launch pipeline runs, e.g. on a remote Kubernetes cluster, in\n addition to running them locally.\n settings (Optional[Dict]): Specifies certain per-instance settings,\n such as feature flags. These are set in the ``dagster.yaml`` under a set of whitelisted\n keys.\n ref (Optional[InstanceRef]): Used by internal machinery to pass instances across process\n boundaries.\n """\n\n # Stores TemporaryDirectory instances that were created for DagsterInstance.local_temp() calls\n # to be removed once the instance is garbage collected.\n _TEMP_DIRS: "weakref.WeakKeyDictionary[DagsterInstance, TemporaryDirectory]" = (\n weakref.WeakKeyDictionary()\n )\n\n def __init__(\n self,\n instance_type: InstanceType,\n local_artifact_storage: "LocalArtifactStorage",\n run_storage: "RunStorage",\n event_storage: "EventLogStorage",\n run_coordinator: Optional["RunCoordinator"],\n compute_log_manager: Optional["ComputeLogManager"],\n run_launcher: Optional["RunLauncher"],\n scheduler: Optional["Scheduler"] = None,\n schedule_storage: Optional["ScheduleStorage"] = None,\n settings: Optional[Mapping[str, Any]] = None,\n secrets_loader: Optional["SecretsLoader"] = None,\n ref: Optional[InstanceRef] = None,\n **_kwargs: Any, # we accept kwargs for forward-compat of custom instances\n ):\n from dagster._core.launcher import RunLauncher\n from dagster._core.run_coordinator import RunCoordinator\n from dagster._core.scheduler import Scheduler\n from dagster._core.secrets import SecretsLoader\n from dagster._core.storage.captured_log_manager import CapturedLogManager\n from dagster._core.storage.compute_log_manager import ComputeLogManager\n from dagster._core.storage.event_log import EventLogStorage\n from dagster._core.storage.root import LocalArtifactStorage\n from dagster._core.storage.runs import RunStorage\n from dagster._core.storage.schedules import ScheduleStorage\n\n self._instance_type = check.inst_param(instance_type, "instance_type", InstanceType)\n self._local_artifact_storage = check.inst_param(\n local_artifact_storage, "local_artifact_storage", LocalArtifactStorage\n )\n self._event_storage = check.inst_param(event_storage, "event_storage", EventLogStorage)\n self._event_storage.register_instance(self)\n\n self._run_storage = check.inst_param(run_storage, "run_storage", RunStorage)\n self._run_storage.register_instance(self)\n\n if compute_log_manager:\n self._compute_log_manager = check.inst_param(\n compute_log_manager, "compute_log_manager", ComputeLogManager\n )\n if not isinstance(self._compute_log_manager, CapturedLogManager):\n deprecation_warning(\n "ComputeLogManager",\n "1.2.0",\n "Implement the CapturedLogManager interface instead.",\n )\n self._compute_log_manager.register_instance(self)\n else:\n check.invariant(\n ref, "Compute log manager must be provided if instance is not from a ref"\n )\n self._compute_log_manager = None\n\n self._scheduler = check.opt_inst_param(scheduler, "scheduler", Scheduler)\n\n self._schedule_storage = check.opt_inst_param(\n schedule_storage, "schedule_storage", ScheduleStorage\n )\n if self._schedule_storage:\n self._schedule_storage.register_instance(self)\n\n if run_coordinator:\n self._run_coordinator = check.inst_param(\n run_coordinator, "run_coordinator", RunCoordinator\n )\n self._run_coordinator.register_instance(self)\n else:\n check.invariant(ref, "Run coordinator must be provided if instance is not from a ref")\n self._run_coordinator = None\n\n if run_launcher:\n self._run_launcher: Optional[RunLauncher] = check.inst_param(\n run_launcher, "run_launcher", RunLauncher\n )\n run_launcher.register_instance(self)\n else:\n check.invariant(ref, "Run launcher must be provided if instance is not from a ref")\n self._run_launcher = None\n\n self._settings = check.opt_mapping_param(settings, "settings")\n\n self._secrets_loader = check.opt_inst_param(secrets_loader, "secrets_loader", SecretsLoader)\n\n if self._secrets_loader:\n self._secrets_loader.register_instance(self)\n\n self._ref = check.opt_inst_param(ref, "ref", InstanceRef)\n\n self._subscribers: Dict[str, List[Callable]] = defaultdict(list)\n\n run_monitoring_enabled = self.run_monitoring_settings.get("enabled", False)\n self._run_monitoring_enabled = run_monitoring_enabled\n if self.run_monitoring_enabled and self.run_monitoring_max_resume_run_attempts:\n check.invariant(\n self.run_launcher.supports_resume_run,\n "The configured run launcher does not support resuming runs. Set"\n " max_resume_run_attempts to 0 to use run monitoring. Any runs with a failed"\n " run worker will be marked as failed, but will not be resumed.",\n )\n\n if self.run_retries_enabled:\n check.invariant(\n self.event_log_storage.supports_event_consumer_queries(),\n "Run retries are enabled, but the configured event log storage does not support"\n " them. Consider switching to Postgres or Mysql.",\n )\n\n # ctors\n\n
[docs] @public\n @staticmethod\n def ephemeral(\n tempdir: Optional[str] = None,\n preload: Optional[Sequence["DebugRunPayload"]] = None,\n settings: Optional[Dict] = None,\n ) -> "DagsterInstance":\n """Create a `DagsterInstance` suitable for ephemeral execution, useful in test contexts. An\n ephemeral instance uses mostly in-memory components. Use `local_temp` to create a test\n instance that is fully persistent.\n\n Args:\n tempdir (Optional[str]): The path of a directory to be used for local artifact storage.\n preload (Optional[Sequence[DebugRunPayload]]): A sequence of payloads to load into the\n instance's run storage. Useful for debugging.\n settings (Optional[Dict]): Settings for the instance.\n\n Returns:\n DagsterInstance: An ephemeral DagsterInstance.\n """\n from dagster._core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher\n from dagster._core.run_coordinator import DefaultRunCoordinator\n from dagster._core.storage.event_log import InMemoryEventLogStorage\n from dagster._core.storage.noop_compute_log_manager import NoOpComputeLogManager\n from dagster._core.storage.root import LocalArtifactStorage, TemporaryLocalArtifactStorage\n from dagster._core.storage.runs import InMemoryRunStorage\n\n if tempdir is not None:\n local_storage = LocalArtifactStorage(tempdir)\n else:\n local_storage = TemporaryLocalArtifactStorage()\n\n return DagsterInstance(\n instance_type=InstanceType.EPHEMERAL,\n local_artifact_storage=local_storage,\n run_storage=InMemoryRunStorage(preload=preload),\n event_storage=InMemoryEventLogStorage(preload=preload),\n compute_log_manager=NoOpComputeLogManager(),\n run_coordinator=DefaultRunCoordinator(),\n run_launcher=SyncInMemoryRunLauncher(),\n settings=settings,\n )
\n\n
[docs] @public\n @staticmethod\n def get() -> "DagsterInstance":\n """Get the current `DagsterInstance` as specified by the ``DAGSTER_HOME`` environment variable.\n\n Returns:\n DagsterInstance: The current DagsterInstance.\n """\n dagster_home_path = os.getenv("DAGSTER_HOME")\n\n if not dagster_home_path:\n raise DagsterHomeNotSetError(\n "The environment variable $DAGSTER_HOME is not set. \\nDagster requires this"\n " environment variable to be set to an existing directory in your filesystem. This"\n " directory is used to store metadata across sessions, or load the dagster.yaml"\n " file which can configure storing metadata in an external database.\\nYou can"\n " resolve this error by exporting the environment variable. For example, you can"\n " run the following command in your shell or include it in your shell configuration"\n ' file:\\n\\texport DAGSTER_HOME=~"/dagster_home"\\nor PowerShell\\n$env:DAGSTER_HOME'\n " = ($home + '\\\\dagster_home')or batchset"\n " DAGSTER_HOME=%UserProfile%/dagster_homeAlternatively, DagsterInstance.ephemeral()"\n " can be used for a transient instance.\\n"\n )\n\n dagster_home_path = os.path.expanduser(dagster_home_path)\n\n if not os.path.isabs(dagster_home_path):\n raise DagsterInvariantViolationError(\n (\n '$DAGSTER_HOME "{}" must be an absolute path. Dagster requires this '\n "environment variable to be set to an existing directory in your filesystem."\n ).format(dagster_home_path)\n )\n\n if not (os.path.exists(dagster_home_path) and os.path.isdir(dagster_home_path)):\n raise DagsterInvariantViolationError(\n (\n '$DAGSTER_HOME "{}" is not a directory or does not exist. Dagster requires this'\n " environment variable to be set to an existing directory in your filesystem"\n ).format(dagster_home_path)\n )\n\n return DagsterInstance.from_config(dagster_home_path)
\n\n
[docs] @public\n @staticmethod\n def local_temp(\n tempdir: Optional[str] = None,\n overrides: Optional[DagsterInstanceOverrides] = None,\n ) -> "DagsterInstance":\n """Create a DagsterInstance that uses a temporary directory for local storage. This is a\n regular, fully persistent instance. Use `ephemeral` to get an ephemeral instance with\n in-memory components.\n\n Args:\n tempdir (Optional[str]): The path of a directory to be used for local artifact storage.\n overrides (Optional[DagsterInstanceOverrides]): Override settings for the instance.\n\n Returns:\n DagsterInstance\n """\n if tempdir is None:\n created_dir = TemporaryDirectory()\n i = DagsterInstance.from_ref(\n InstanceRef.from_dir(created_dir.name, overrides=overrides)\n )\n DagsterInstance._TEMP_DIRS[i] = created_dir\n return i\n\n return DagsterInstance.from_ref(InstanceRef.from_dir(tempdir, overrides=overrides))
\n\n @staticmethod\n def from_config(\n config_dir: str,\n config_filename: str = DAGSTER_CONFIG_YAML_FILENAME,\n ) -> "DagsterInstance":\n instance_ref = InstanceRef.from_dir(config_dir, config_filename=config_filename)\n return DagsterInstance.from_ref(instance_ref)\n\n @staticmethod\n def from_ref(instance_ref: InstanceRef) -> "DagsterInstance":\n check.inst_param(instance_ref, "instance_ref", InstanceRef)\n\n # DagsterInstance doesn't implement ConfigurableClass, but we may still sometimes want to\n # have custom subclasses of DagsterInstance. This machinery allows for those custom\n # subclasses to receive additional keyword arguments passed through the config YAML.\n klass = instance_ref.custom_instance_class or DagsterInstance\n kwargs = instance_ref.custom_instance_class_config\n\n unified_storage = instance_ref.storage\n run_storage = unified_storage.run_storage if unified_storage else instance_ref.run_storage\n event_storage = (\n unified_storage.event_log_storage if unified_storage else instance_ref.event_storage\n )\n schedule_storage = (\n unified_storage.schedule_storage if unified_storage else instance_ref.schedule_storage\n )\n\n return klass(\n instance_type=InstanceType.PERSISTENT,\n local_artifact_storage=instance_ref.local_artifact_storage,\n run_storage=run_storage, # type: ignore # (possible none)\n event_storage=event_storage, # type: ignore # (possible none)\n schedule_storage=schedule_storage,\n compute_log_manager=None, # lazy load\n scheduler=instance_ref.scheduler,\n run_coordinator=None, # lazy load\n run_launcher=None, # lazy load\n settings=instance_ref.settings,\n secrets_loader=instance_ref.secrets_loader,\n ref=instance_ref,\n **kwargs,\n )\n\n # flags\n\n @property\n def is_persistent(self) -> bool:\n return self._instance_type == InstanceType.PERSISTENT\n\n @property\n def is_ephemeral(self) -> bool:\n return self._instance_type == InstanceType.EPHEMERAL\n\n def get_ref(self) -> InstanceRef:\n if self._ref:\n return self._ref\n\n check.failed(\n "Attempted to prepare an ineligible DagsterInstance ({inst_type}) for cross "\n "process communication.{dagster_home_msg}".format(\n inst_type=self._instance_type,\n dagster_home_msg=(\n "\\nDAGSTER_HOME environment variable is not set, set it to "\n "a directory on the filesystem for dagster to use for storage and cross "\n "process coordination."\n if os.getenv("DAGSTER_HOME") is None\n else ""\n ),\n )\n )\n\n @property\n def root_directory(self) -> str:\n return self._local_artifact_storage.base_dir\n\n def _info(self, component: object) -> Union[str, Mapping[Any, Any]]:\n # ConfigurableClass may not have inst_data if it's a direct instantiation\n # which happens for ephemeral instances\n if isinstance(component, ConfigurableClass) and component.inst_data:\n return component.inst_data.info_dict()\n if type(component) is dict:\n return component\n return component.__class__.__name__\n\n def _info_str_for_component(self, component_name: str, component: object) -> str:\n return yaml.dump(\n {component_name: self._info(component)}, default_flow_style=False, sort_keys=False\n )\n\n def info_dict(self) -> Mapping[str, object]:\n settings: Mapping[str, object] = self._settings if self._settings else {}\n\n ret = {\n "local_artifact_storage": self._info(self._local_artifact_storage),\n "run_storage": self._info(self._run_storage),\n "event_log_storage": self._info(self._event_storage),\n "compute_logs": self._info(self._compute_log_manager),\n "schedule_storage": self._info(self._schedule_storage),\n "scheduler": self._info(self._scheduler),\n "run_coordinator": self._info(self._run_coordinator),\n "run_launcher": self._info(self.run_launcher),\n }\n ret.update(\n {\n settings_key: self._info(settings_value)\n for settings_key, settings_value in settings.items()\n }\n )\n\n return ret\n\n def info_str(self) -> str:\n return yaml.dump(self.info_dict(), default_flow_style=False, sort_keys=False)\n\n def schema_str(self) -> str:\n def _schema_dict(alembic_version: "AlembicVersion") -> Optional[Mapping[str, object]]:\n if not alembic_version:\n return None\n db_revision, head_revision = alembic_version\n return {\n "current": db_revision,\n "latest": head_revision,\n }\n\n return yaml.dump(\n {\n "schema": {\n "event_log_storage": _schema_dict(self._event_storage.alembic_version()), # type: ignore # (possible none)\n "run_storage": _schema_dict(self._event_storage.alembic_version()), # type: ignore # (possible none)\n "schedule_storage": _schema_dict(self._event_storage.alembic_version()), # type: ignore # (possible none)\n }\n },\n default_flow_style=False,\n sort_keys=False,\n )\n\n @property\n def run_storage(self) -> "RunStorage":\n return self._run_storage\n\n @property\n def event_log_storage(self) -> "EventLogStorage":\n return self._event_storage\n\n @property\n def daemon_cursor_storage(self) -> "DaemonCursorStorage":\n return self._run_storage\n\n # schedule storage\n\n @property\n def schedule_storage(self) -> Optional["ScheduleStorage"]:\n return self._schedule_storage\n\n @property\n def scheduler(self) -> Optional["Scheduler"]:\n return self._scheduler\n\n @property\n def scheduler_class(self) -> Optional[str]:\n return self.scheduler.__class__.__name__ if self.scheduler else None\n\n # run coordinator\n\n @property\n def run_coordinator(self) -> "RunCoordinator":\n # Lazily load in case the run coordinator requires dependencies that are not available\n # everywhere that loads the instance\n if not self._run_coordinator:\n check.invariant(\n self._ref, "Run coordinator not provided, and no instance ref available"\n )\n run_coordinator = cast(InstanceRef, self._ref).run_coordinator\n check.invariant(run_coordinator, "Run coordinator not configured in instance ref")\n self._run_coordinator = cast("RunCoordinator", run_coordinator)\n self._run_coordinator.register_instance(self)\n return self._run_coordinator\n\n # run launcher\n\n @property\n def run_launcher(self) -> "RunLauncher":\n # Lazily load in case the launcher requires dependencies that are not available everywhere\n # that loads the instance (e.g. The EcsRunLauncher requires boto3)\n if not self._run_launcher:\n check.invariant(self._ref, "Run launcher not provided, and no instance ref available")\n launcher = cast(InstanceRef, self._ref).run_launcher\n check.invariant(launcher, "Run launcher not configured in instance ref")\n self._run_launcher = cast("RunLauncher", launcher)\n self._run_launcher.register_instance(self)\n return self._run_launcher\n\n # compute logs\n\n @property\n def compute_log_manager(self) -> "ComputeLogManager":\n if not self._compute_log_manager:\n check.invariant(\n self._ref, "Compute log manager not provided, and no instance ref available"\n )\n compute_log_manager = cast(InstanceRef, self._ref).compute_log_manager\n check.invariant(\n compute_log_manager, "Compute log manager not configured in instance ref"\n )\n self._compute_log_manager = cast("ComputeLogManager", compute_log_manager)\n self._compute_log_manager.register_instance(self)\n return self._compute_log_manager\n\n def get_settings(self, settings_key: str) -> Any:\n check.str_param(settings_key, "settings_key")\n if self._settings and settings_key in self._settings:\n return self._settings.get(settings_key)\n return {}\n\n @property\n def telemetry_enabled(self) -> bool:\n if self.is_ephemeral:\n return False\n\n dagster_telemetry_enabled_default = True\n\n telemetry_settings = self.get_settings("telemetry")\n\n if not telemetry_settings:\n return dagster_telemetry_enabled_default\n\n if "enabled" in telemetry_settings:\n return telemetry_settings["enabled"]\n else:\n return dagster_telemetry_enabled_default\n\n @property\n def nux_enabled(self) -> bool:\n if self.is_ephemeral:\n return False\n\n nux_enabled_by_default = True\n\n nux_settings = self.get_settings("nux")\n if not nux_settings:\n return nux_enabled_by_default\n\n if "enabled" in nux_settings:\n return nux_settings["enabled"]\n else:\n return nux_enabled_by_default\n\n # run monitoring\n\n @property\n def run_monitoring_enabled(self) -> bool:\n return self._run_monitoring_enabled\n\n @property\n def run_monitoring_settings(self) -> Any:\n return self.get_settings("run_monitoring")\n\n @property\n def run_monitoring_start_timeout_seconds(self) -> int:\n return self.run_monitoring_settings.get("start_timeout_seconds", 180)\n\n @property\n def run_monitoring_cancel_timeout_seconds(self) -> int:\n return self.run_monitoring_settings.get("cancel_timeout_seconds", 180)\n\n @property\n def code_server_settings(self) -> Any:\n return self.get_settings("code_servers")\n\n @property\n def code_server_process_startup_timeout(self) -> int:\n return self.code_server_settings.get(\n "local_startup_timeout", DEFAULT_LOCAL_CODE_SERVER_STARTUP_TIMEOUT\n )\n\n @property\n def code_server_reload_timeout(self) -> int:\n return self.code_server_settings.get(\n "reload_timeout", DEFAULT_LOCAL_CODE_SERVER_STARTUP_TIMEOUT\n )\n\n @property\n def wait_for_local_code_server_processes_on_shutdown(self) -> bool:\n return self.code_server_settings.get("wait_for_local_processes_on_shutdown", False)\n\n @property\n def run_monitoring_max_resume_run_attempts(self) -> int:\n return self.run_monitoring_settings.get("max_resume_run_attempts", 0)\n\n @property\n def run_monitoring_poll_interval_seconds(self) -> int:\n return self.run_monitoring_settings.get("poll_interval_seconds", 120)\n\n @property\n def cancellation_thread_poll_interval_seconds(self) -> int:\n return self.get_settings("run_monitoring").get(\n "cancellation_thread_poll_interval_seconds", 10\n )\n\n @property\n def run_retries_enabled(self) -> bool:\n return self.get_settings("run_retries").get("enabled", False)\n\n @property\n def run_retries_max_retries(self) -> int:\n return self.get_settings("run_retries").get("max_retries")\n\n @property\n def auto_materialize_enabled(self) -> bool:\n return self.get_settings("auto_materialize").get("enabled", True)\n\n @property\n def auto_materialize_minimum_interval_seconds(self) -> int:\n return self.get_settings("auto_materialize").get("minimum_interval_seconds")\n\n @property\n def auto_materialize_run_tags(self) -> Dict[str, str]:\n return self.get_settings("auto_materialize").get("run_tags", {})\n\n @property\n def auto_materialize_respect_materialization_data_versions(self) -> bool:\n return self.get_settings("auto_materialize").get(\n "respect_materialization_data_versions", False\n )\n\n # python logs\n\n @property\n def managed_python_loggers(self) -> Sequence[str]:\n python_log_settings = self.get_settings("python_logs") or {}\n loggers: Sequence[str] = python_log_settings.get("managed_python_loggers", [])\n return loggers\n\n @property\n def python_log_level(self) -> Optional[str]:\n python_log_settings = self.get_settings("python_logs") or {}\n return python_log_settings.get("python_log_level")\n\n def upgrade(self, print_fn: Optional[PrintFn] = None) -> None:\n from dagster._core.storage.migration.utils import upgrading_instance\n\n with upgrading_instance(self):\n if print_fn:\n print_fn("Updating run storage...")\n self._run_storage.upgrade() # type: ignore # (unknown method on run storage)\n self._run_storage.migrate(print_fn)\n\n if print_fn:\n print_fn("Updating event storage...")\n self._event_storage.upgrade()\n self._event_storage.reindex_assets(print_fn=print_fn)\n\n if print_fn:\n print_fn("Updating schedule storage...")\n self._schedule_storage.upgrade() # type: ignore # (possible none)\n self._schedule_storage.migrate(print_fn) # type: ignore # (possible none)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n if self._schedule_storage:\n self._schedule_storage.optimize_for_webserver(\n statement_timeout=statement_timeout, pool_recycle=pool_recycle\n )\n self._run_storage.optimize_for_webserver(\n statement_timeout=statement_timeout, pool_recycle=pool_recycle\n )\n self._event_storage.optimize_for_webserver(\n statement_timeout=statement_timeout, pool_recycle=pool_recycle\n )\n\n def reindex(self, print_fn: PrintFn = lambda _: None) -> None:\n print_fn("Checking for reindexing...")\n self._event_storage.reindex_events(print_fn)\n self._event_storage.reindex_assets(print_fn)\n self._run_storage.optimize(print_fn)\n self._schedule_storage.optimize(print_fn) # type: ignore # (possible none)\n print_fn("Done.")\n\n def dispose(self) -> None:\n self._local_artifact_storage.dispose()\n self._run_storage.dispose()\n if self._run_coordinator:\n self._run_coordinator.dispose()\n if self._run_launcher:\n self._run_launcher.dispose()\n self._event_storage.dispose()\n if self._compute_log_manager:\n self._compute_log_manager.dispose()\n if self._secrets_loader:\n self._secrets_loader.dispose()\n\n if self in DagsterInstance._TEMP_DIRS:\n DagsterInstance._TEMP_DIRS[self].cleanup()\n del DagsterInstance._TEMP_DIRS[self]\n\n # run storage\n
[docs] @public\n def get_run_by_id(self, run_id: str) -> Optional[DagsterRun]:\n """Get a :py:class:`DagsterRun` matching the provided `run_id`.\n\n Args:\n run_id (str): The id of the run to retrieve.\n\n Returns:\n Optional[DagsterRun]: The run corresponding to the given id. If no run matching the id\n is found, return `None`.\n """\n record = self.get_run_record_by_id(run_id)\n if record is None:\n return None\n return record.dagster_run
\n\n
[docs] @public\n @traced\n def get_run_record_by_id(self, run_id: str) -> Optional[RunRecord]:\n """Get a :py:class:`RunRecord` matching the provided `run_id`.\n\n Args:\n run_id (str): The id of the run record to retrieve.\n\n Returns:\n Optional[RunRecord]: The run record corresponding to the given id. If no run matching\n the id is found, return `None`.\n """\n records = self._run_storage.get_run_records(RunsFilter(run_ids=[run_id]))\n if not records:\n return None\n return records[0]
\n\n @traced\n def get_job_snapshot(self, snapshot_id: str) -> "JobSnapshot":\n return self._run_storage.get_job_snapshot(snapshot_id)\n\n @traced\n def has_job_snapshot(self, snapshot_id: str) -> bool:\n return self._run_storage.has_job_snapshot(snapshot_id)\n\n @traced\n def has_snapshot(self, snapshot_id: str) -> bool:\n return self._run_storage.has_snapshot(snapshot_id)\n\n @traced\n def get_historical_job(self, snapshot_id: str) -> "HistoricalJob":\n from dagster._core.host_representation import HistoricalJob\n\n snapshot = self._run_storage.get_job_snapshot(snapshot_id)\n parent_snapshot = (\n self._run_storage.get_job_snapshot(snapshot.lineage_snapshot.parent_snapshot_id)\n if snapshot.lineage_snapshot\n else None\n )\n return HistoricalJob(snapshot, snapshot_id, parent_snapshot)\n\n @traced\n def has_historical_job(self, snapshot_id: str) -> bool:\n return self._run_storage.has_job_snapshot(snapshot_id)\n\n @traced\n def get_execution_plan_snapshot(self, snapshot_id: str) -> "ExecutionPlanSnapshot":\n return self._run_storage.get_execution_plan_snapshot(snapshot_id)\n\n @traced\n def get_run_stats(self, run_id: str) -> DagsterRunStatsSnapshot:\n return self._event_storage.get_stats_for_run(run_id)\n\n @traced\n def get_run_step_stats(\n self, run_id: str, step_keys: Optional[Sequence[str]] = None\n ) -> Sequence["RunStepKeyStatsSnapshot"]:\n return self._event_storage.get_step_stats_for_run(run_id, step_keys)\n\n @traced\n def get_run_tags(\n self,\n tag_keys: Optional[Sequence[str]] = None,\n value_prefix: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[Tuple[str, Set[str]]]:\n return self._run_storage.get_run_tags(\n tag_keys=tag_keys, value_prefix=value_prefix, limit=limit\n )\n\n @traced\n def get_run_tag_keys(self) -> Sequence[str]:\n return self._run_storage.get_run_tag_keys()\n\n @traced\n def get_run_group(self, run_id: str) -> Optional[Tuple[str, Sequence[DagsterRun]]]:\n return self._run_storage.get_run_group(run_id)\n\n def create_run_for_job(\n self,\n job_def: "JobDefinition",\n execution_plan: Optional["ExecutionPlan"] = None,\n run_id: Optional[str] = None,\n run_config: Optional[Mapping[str, object]] = None,\n resolved_op_selection: Optional[AbstractSet[str]] = None,\n status: Optional[Union[DagsterRunStatus, str]] = None,\n tags: Optional[Mapping[str, str]] = None,\n root_run_id: Optional[str] = None,\n parent_run_id: Optional[str] = None,\n op_selection: Optional[Sequence[str]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n external_job_origin: Optional["ExternalJobOrigin"] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n repository_load_data: Optional["RepositoryLoadData"] = None,\n ) -> DagsterRun:\n from dagster._core.definitions.job_definition import JobDefinition\n from dagster._core.execution.api import create_execution_plan\n from dagster._core.execution.plan.plan import ExecutionPlan\n from dagster._core.snap import snapshot_from_execution_plan\n\n check.inst_param(job_def, "pipeline_def", JobDefinition)\n check.opt_inst_param(execution_plan, "execution_plan", ExecutionPlan)\n\n # note that op_selection is required to execute the solid subset, which is the\n # frozenset version of the previous solid_subset.\n # op_selection is not required and will not be converted to op_selection here.\n # i.e. this function doesn't handle solid queries.\n # op_selection is only used to pass the user queries further down.\n check.opt_set_param(resolved_op_selection, "resolved_op_selection", of_type=str)\n check.opt_list_param(op_selection, "op_selection", of_type=str)\n check.opt_set_param(asset_selection, "asset_selection", of_type=AssetKey)\n\n # op_selection never provided\n if asset_selection or op_selection:\n # for cases when `create_run_for_pipeline` is directly called\n job_def = job_def.get_subset(\n asset_selection=asset_selection,\n op_selection=op_selection,\n )\n step_keys_to_execute = None\n\n if execution_plan:\n step_keys_to_execute = execution_plan.step_keys_to_execute\n\n else:\n execution_plan = create_execution_plan(\n job=job_def,\n run_config=run_config,\n instance_ref=self.get_ref() if self.is_persistent else None,\n tags=tags,\n repository_load_data=repository_load_data,\n )\n\n return self.create_run(\n job_name=job_def.name,\n run_id=run_id,\n run_config=run_config,\n op_selection=op_selection,\n asset_selection=asset_selection,\n asset_check_selection=None,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=DagsterRunStatus(status) if status else None,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=job_def.get_job_snapshot(),\n execution_plan_snapshot=snapshot_from_execution_plan(\n execution_plan,\n job_def.get_job_snapshot_id(),\n ),\n parent_job_snapshot=job_def.get_parent_job_snapshot(),\n external_job_origin=external_job_origin,\n job_code_origin=job_code_origin,\n )\n\n def _construct_run_with_snapshots(\n self,\n job_name: str,\n run_id: str,\n run_config: Optional[Mapping[str, object]],\n resolved_op_selection: Optional[AbstractSet[str]],\n step_keys_to_execute: Optional[Sequence[str]],\n status: Optional[DagsterRunStatus],\n tags: Mapping[str, str],\n root_run_id: Optional[str],\n parent_run_id: Optional[str],\n job_snapshot: Optional["JobSnapshot"],\n execution_plan_snapshot: Optional["ExecutionPlanSnapshot"],\n parent_job_snapshot: Optional["JobSnapshot"],\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet["AssetCheckKey"]] = None,\n op_selection: Optional[Sequence[str]] = None,\n external_job_origin: Optional["ExternalJobOrigin"] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n ) -> DagsterRun:\n # https://github.com/dagster-io/dagster/issues/2403\n if tags and IS_AIRFLOW_INGEST_PIPELINE_STR in tags:\n if AIRFLOW_EXECUTION_DATE_STR not in tags:\n tags = {\n **tags,\n AIRFLOW_EXECUTION_DATE_STR: get_current_datetime_in_utc().isoformat(),\n }\n\n check.invariant(\n not (not job_snapshot and execution_plan_snapshot),\n "It is illegal to have an execution plan snapshot and not have a pipeline snapshot."\n " It is possible to have no execution plan snapshot since we persist runs that do"\n " not successfully compile execution plans in the scheduled case.",\n )\n\n job_snapshot_id = (\n self._ensure_persisted_job_snapshot(job_snapshot, parent_job_snapshot)\n if job_snapshot\n else None\n )\n\n execution_plan_snapshot_id = (\n self._ensure_persisted_execution_plan_snapshot(\n execution_plan_snapshot, job_snapshot_id, step_keys_to_execute\n )\n if execution_plan_snapshot and job_snapshot_id\n else None\n )\n\n return DagsterRun(\n job_name=job_name,\n run_id=run_id,\n run_config=run_config,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=status,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot_id=job_snapshot_id,\n execution_plan_snapshot_id=execution_plan_snapshot_id,\n external_job_origin=external_job_origin,\n job_code_origin=job_code_origin,\n has_repository_load_data=execution_plan_snapshot is not None\n and execution_plan_snapshot.repository_load_data is not None,\n )\n\n def _ensure_persisted_job_snapshot(\n self,\n job_snapshot: "JobSnapshot",\n parent_job_snapshot: "Optional[JobSnapshot]",\n ) -> str:\n from dagster._core.snap import JobSnapshot, create_job_snapshot_id\n\n check.inst_param(job_snapshot, "job_snapshot", JobSnapshot)\n check.opt_inst_param(parent_job_snapshot, "parent_job_snapshot", JobSnapshot)\n\n if job_snapshot.lineage_snapshot:\n if not self._run_storage.has_job_snapshot(\n job_snapshot.lineage_snapshot.parent_snapshot_id\n ):\n check.invariant(\n create_job_snapshot_id(parent_job_snapshot) # type: ignore # (possible none)\n == job_snapshot.lineage_snapshot.parent_snapshot_id,\n "Parent pipeline snapshot id out of sync with passed parent pipeline snapshot",\n )\n\n returned_job_snapshot_id = self._run_storage.add_job_snapshot(\n parent_job_snapshot # type: ignore # (possible none)\n )\n check.invariant(\n job_snapshot.lineage_snapshot.parent_snapshot_id == returned_job_snapshot_id\n )\n\n job_snapshot_id = create_job_snapshot_id(job_snapshot)\n if not self._run_storage.has_job_snapshot(job_snapshot_id):\n returned_job_snapshot_id = self._run_storage.add_job_snapshot(job_snapshot)\n check.invariant(job_snapshot_id == returned_job_snapshot_id)\n\n return job_snapshot_id\n\n def _ensure_persisted_execution_plan_snapshot(\n self,\n execution_plan_snapshot: "ExecutionPlanSnapshot",\n job_snapshot_id: str,\n step_keys_to_execute: Optional[Sequence[str]],\n ) -> str:\n from dagster._core.snap.execution_plan_snapshot import (\n ExecutionPlanSnapshot,\n create_execution_plan_snapshot_id,\n )\n\n check.inst_param(execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot)\n check.str_param(job_snapshot_id, "job_snapshot_id")\n check.opt_nullable_sequence_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)\n\n check.invariant(\n execution_plan_snapshot.job_snapshot_id == job_snapshot_id,\n "Snapshot mismatch: Snapshot ID in execution plan snapshot is "\n f'"{execution_plan_snapshot.job_snapshot_id}" and snapshot_id created in memory is '\n f'"{job_snapshot_id}"',\n )\n\n execution_plan_snapshot_id = create_execution_plan_snapshot_id(execution_plan_snapshot)\n\n if not self._run_storage.has_execution_plan_snapshot(execution_plan_snapshot_id):\n returned_execution_plan_snapshot_id = self._run_storage.add_execution_plan_snapshot(\n execution_plan_snapshot\n )\n\n check.invariant(execution_plan_snapshot_id == returned_execution_plan_snapshot_id)\n\n return execution_plan_snapshot_id\n\n def _log_asset_planned_events(\n self, dagster_run: DagsterRun, execution_plan_snapshot: "ExecutionPlanSnapshot"\n ) -> None:\n from dagster._core.events import (\n AssetMaterializationPlannedData,\n DagsterEvent,\n DagsterEventType,\n )\n\n job_name = dagster_run.job_name\n\n for step in execution_plan_snapshot.steps:\n if step.key in execution_plan_snapshot.step_keys_to_execute:\n for output in step.outputs:\n asset_key = check.not_none(output.properties).asset_key\n if asset_key:\n # Logs and stores asset_materialization_planned event\n partition_tag = dagster_run.tags.get(PARTITION_NAME_TAG)\n partition_range_start, partition_range_end = dagster_run.tags.get(\n ASSET_PARTITION_RANGE_START_TAG\n ), dagster_run.tags.get(ASSET_PARTITION_RANGE_END_TAG)\n\n if partition_tag and (partition_range_start or partition_range_end):\n raise DagsterInvariantViolationError(\n f"Cannot have {ASSET_PARTITION_RANGE_START_TAG} or"\n f" {ASSET_PARTITION_RANGE_END_TAG} set along with"\n f" {PARTITION_NAME_TAG}"\n )\n\n if partition_range_start or partition_range_end:\n if not partition_range_start or not partition_range_end:\n raise DagsterInvariantViolationError(\n f"Cannot have {ASSET_PARTITION_RANGE_START_TAG} or"\n f" {ASSET_PARTITION_RANGE_END_TAG} set without the other"\n )\n\n # TODO: resolve which partitions are in the range, and emit an event for each\n\n partition = (\n partition_tag\n if check.not_none(output.properties).is_asset_partitioned\n else None\n )\n\n event = DagsterEvent(\n event_type_value=DagsterEventType.ASSET_MATERIALIZATION_PLANNED.value,\n job_name=job_name,\n message=(\n f"{job_name} intends to materialize asset {asset_key.to_string()}"\n ),\n event_specific_data=AssetMaterializationPlannedData(\n asset_key, partition=partition\n ),\n step_key=step.key,\n )\n self.report_dagster_event(event, dagster_run.run_id, logging.DEBUG)\n\n if check.not_none(output.properties).asset_check_key:\n asset_check_key = check.not_none(\n check.not_none(output.properties).asset_check_key\n )\n target_asset_key = asset_check_key.asset_key\n check_name = asset_check_key.name\n\n event = DagsterEvent(\n event_type_value=DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED.value,\n job_name=job_name,\n message=(\n f"{job_name} intends to execute asset check {check_name} on"\n f" asset {target_asset_key.to_string()}"\n ),\n event_specific_data=AssetCheckEvaluationPlanned(\n target_asset_key,\n check_name=check_name,\n ),\n step_key=step.key,\n )\n self.report_dagster_event(event, dagster_run.run_id, logging.DEBUG)\n\n def create_run(\n self,\n *,\n job_name: str,\n run_id: Optional[str],\n run_config: Optional[Mapping[str, object]],\n status: Optional[DagsterRunStatus],\n tags: Optional[Mapping[str, Any]],\n root_run_id: Optional[str],\n parent_run_id: Optional[str],\n step_keys_to_execute: Optional[Sequence[str]],\n execution_plan_snapshot: Optional["ExecutionPlanSnapshot"],\n job_snapshot: Optional["JobSnapshot"],\n parent_job_snapshot: Optional["JobSnapshot"],\n asset_selection: Optional[AbstractSet[AssetKey]],\n asset_check_selection: Optional[AbstractSet["AssetCheckKey"]],\n resolved_op_selection: Optional[AbstractSet[str]],\n op_selection: Optional[Sequence[str]],\n external_job_origin: Optional["ExternalJobOrigin"],\n job_code_origin: Optional[JobPythonOrigin],\n ) -> DagsterRun:\n from dagster._core.definitions.asset_check_spec import AssetCheckKey\n from dagster._core.definitions.utils import validate_tags\n from dagster._core.host_representation.origin import ExternalJobOrigin\n from dagster._core.snap import ExecutionPlanSnapshot, JobSnapshot\n\n check.str_param(job_name, "job_name")\n check.opt_str_param(\n run_id, "run_id"\n ) # will be assigned to make_new_run_id() lower in callstack\n check.opt_mapping_param(run_config, "run_config", key_type=str)\n\n check.opt_inst_param(status, "status", DagsterRunStatus)\n check.opt_mapping_param(tags, "tags", key_type=str)\n\n validated_tags = validate_tags(tags)\n\n check.opt_str_param(root_run_id, "root_run_id")\n check.opt_str_param(parent_run_id, "parent_run_id")\n\n # If step_keys_to_execute is None, then everything is executed. In some cases callers\n # are still exploding and sending the full list of step keys even though that is\n # unnecessary.\n\n check.opt_sequence_param(step_keys_to_execute, "step_keys_to_execute")\n check.opt_inst_param(\n execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot\n )\n\n if root_run_id or parent_run_id:\n check.invariant(\n root_run_id and parent_run_id,\n "If root_run_id or parent_run_id is passed, this is a re-execution scenario and"\n " root_run_id and parent_run_id must both be passed.",\n )\n\n # The job_snapshot should always be set in production scenarios. In tests\n # we have sometimes omitted it out of convenience.\n\n check.opt_inst_param(job_snapshot, "job_snapshot", JobSnapshot)\n check.opt_inst_param(parent_job_snapshot, "parent_job_snapshot", JobSnapshot)\n\n if parent_job_snapshot:\n check.invariant(\n job_snapshot,\n "If parent_job_snapshot is set, job_snapshot should also be.",\n )\n\n # op_selection is a sequence of selection queries assigned by the user.\n # *Most* callers expand the op_selection into an explicit set of\n # resolved_op_selection via accessing external_job.resolved_op_selection\n # but not all do. Some (launch execution mutation in graphql and backfill run\n # creation, for example) actually pass the solid *selection* into the\n # resolved_op_selection parameter, but just as a frozen set, rather than\n # fully resolving the selection, as the daemon launchers do. Given the\n # state of callers we just check to ensure that the arguments are well-formed.\n #\n # asset_selection adds another dimension to this lovely dance. op_selection\n # and asset_selection are mutually exclusive and should never both be set.\n # This is invariant is checked in a sporadic fashion around\n # the codebase, but is never enforced in a typed fashion.\n #\n # Additionally, the way that callsites currently behave *if* asset selection\n # is set (i.e., not None) then *neither* op_selection *nor*\n # resolved_op_selection is passed. In the asset selection case resolving\n # the set of assets into the canonical resolved_op_selection is done in\n # the user process, and the exact resolution is never persisted in the run.\n # We are asserting that invariant here to maintain that behavior.\n #\n # Finally, asset_check_selection can be passed along with asset_selection. It\n # is mutually exclusive with op_selection and resolved_op_selection. A `None`\n # value will include any asset checks that target selected assets. An empty set\n # will include no asset checks.\n\n check.opt_set_param(resolved_op_selection, "resolved_op_selection", of_type=str)\n check.opt_sequence_param(op_selection, "op_selection", of_type=str)\n check.opt_set_param(asset_selection, "asset_selection", of_type=AssetKey)\n check.opt_set_param(asset_check_selection, "asset_check_selection", of_type=AssetCheckKey)\n\n if asset_selection is not None or asset_check_selection is not None:\n check.invariant(\n op_selection is None,\n "Cannot pass op_selection with either of asset_selection or asset_check_selection",\n )\n\n check.invariant(\n resolved_op_selection is None,\n "Cannot pass resolved_op_selection with either of asset_selection or"\n " asset_check_selection",\n )\n\n # The "python origin" arguments exist so a job can be reconstructed in memory\n # after a DagsterRun has been fetched from the database.\n #\n # There are cases (notably in _logged_execute_job with Reconstructable jobs)\n # where job_code_origin and is not. In some cloud test cases only\n # external_job_origin is passed But they are almost always passed together.\n # If these are not set the created run will never be able to be relaunched from\n # the information just in the run or in another process.\n\n check.opt_inst_param(external_job_origin, "external_job_origin", ExternalJobOrigin)\n check.opt_inst_param(job_code_origin, "job_code_origin", JobPythonOrigin)\n\n dagster_run = self._construct_run_with_snapshots(\n job_name=job_name,\n run_id=run_id, # type: ignore # (possible none)\n run_config=run_config,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=status,\n tags=validated_tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=job_snapshot,\n execution_plan_snapshot=execution_plan_snapshot,\n parent_job_snapshot=parent_job_snapshot,\n external_job_origin=external_job_origin,\n job_code_origin=job_code_origin,\n )\n\n dagster_run = self._run_storage.add_run(dagster_run)\n\n if execution_plan_snapshot:\n self._log_asset_planned_events(dagster_run, execution_plan_snapshot)\n\n return dagster_run\n\n def create_reexecuted_run(\n self,\n *,\n parent_run: DagsterRun,\n code_location: "CodeLocation",\n external_job: "ExternalJob",\n strategy: "ReexecutionStrategy",\n extra_tags: Optional[Mapping[str, Any]] = None,\n run_config: Optional[Mapping[str, Any]] = None,\n use_parent_run_tags: bool = False,\n ) -> DagsterRun:\n from dagster._core.execution.plan.resume_retry import (\n ReexecutionStrategy,\n )\n from dagster._core.execution.plan.state import KnownExecutionState\n from dagster._core.host_representation import CodeLocation, ExternalJob\n\n check.inst_param(parent_run, "parent_run", DagsterRun)\n check.inst_param(code_location, "code_location", CodeLocation)\n check.inst_param(external_job, "external_job", ExternalJob)\n check.inst_param(strategy, "strategy", ReexecutionStrategy)\n check.opt_mapping_param(extra_tags, "extra_tags", key_type=str)\n check.opt_mapping_param(run_config, "run_config", key_type=str)\n\n check.bool_param(use_parent_run_tags, "use_parent_run_tags")\n\n root_run_id = parent_run.root_run_id or parent_run.run_id\n parent_run_id = parent_run.run_id\n\n tags = merge_dicts(\n external_job.tags,\n (\n # these can differ from external_job.tags if tags were added at launch time\n parent_run.tags\n if use_parent_run_tags\n else {}\n ),\n extra_tags or {},\n {\n PARENT_RUN_ID_TAG: parent_run_id,\n ROOT_RUN_ID_TAG: root_run_id,\n },\n )\n\n run_config = run_config if run_config is not None else parent_run.run_config\n\n if strategy == ReexecutionStrategy.FROM_FAILURE:\n check.invariant(\n parent_run.status == DagsterRunStatus.FAILURE,\n "Cannot reexecute from failure a run that is not failed",\n )\n\n (\n step_keys_to_execute,\n known_state,\n ) = KnownExecutionState.build_resume_retry_reexecution(\n self,\n parent_run=parent_run,\n )\n tags[RESUME_RETRY_TAG] = "true"\n elif strategy == ReexecutionStrategy.ALL_STEPS:\n step_keys_to_execute = None\n known_state = None\n else:\n raise DagsterInvariantViolationError(f"Unknown reexecution strategy: {strategy}")\n\n external_execution_plan = code_location.get_external_execution_plan(\n external_job,\n run_config,\n step_keys_to_execute=step_keys_to_execute,\n known_state=known_state,\n instance=self,\n )\n\n return self.create_run(\n job_name=parent_run.job_name,\n run_id=None,\n run_config=run_config,\n resolved_op_selection=parent_run.resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=DagsterRunStatus.NOT_STARTED,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=external_job.job_snapshot,\n execution_plan_snapshot=external_execution_plan.execution_plan_snapshot,\n parent_job_snapshot=external_job.parent_job_snapshot,\n op_selection=parent_run.op_selection,\n asset_selection=parent_run.asset_selection,\n asset_check_selection=parent_run.asset_check_selection,\n external_job_origin=external_job.get_external_origin(),\n job_code_origin=external_job.get_python_origin(),\n )\n\n def register_managed_run(\n self,\n job_name: str,\n run_id: str,\n run_config: Optional[Mapping[str, object]],\n resolved_op_selection: Optional[AbstractSet[str]],\n step_keys_to_execute: Optional[Sequence[str]],\n tags: Mapping[str, str],\n root_run_id: Optional[str],\n parent_run_id: Optional[str],\n job_snapshot: Optional["JobSnapshot"],\n execution_plan_snapshot: Optional["ExecutionPlanSnapshot"],\n parent_job_snapshot: Optional["JobSnapshot"],\n op_selection: Optional[Sequence[str]] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n ) -> DagsterRun:\n # The usage of this method is limited to dagster-airflow, specifically in Dagster\n # Operators that are executed in Airflow. Because a common workflow in Airflow is to\n # retry dags from arbitrary tasks, we need any node to be capable of creating a\n # DagsterRun.\n #\n # The try-except DagsterRunAlreadyExists block handles the race when multiple "root" tasks\n # simultaneously execute self._run_storage.add_run(dagster_run). When this happens, only\n # one task succeeds in creating the run, while the others get DagsterRunAlreadyExists\n # error; at this point, the failed tasks try again to fetch the existing run.\n # https://github.com/dagster-io/dagster/issues/2412\n\n dagster_run = self._construct_run_with_snapshots(\n job_name=job_name,\n run_id=run_id,\n run_config=run_config,\n op_selection=op_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=DagsterRunStatus.MANAGED,\n tags=tags,\n root_run_id=root_run_id,\n parent_run_id=parent_run_id,\n job_snapshot=job_snapshot,\n execution_plan_snapshot=execution_plan_snapshot,\n parent_job_snapshot=parent_job_snapshot,\n job_code_origin=job_code_origin,\n )\n\n def get_run() -> DagsterRun:\n candidate_run = self.get_run_by_id(dagster_run.run_id)\n\n field_diff = _check_run_equality(dagster_run, candidate_run) # type: ignore # (possible none)\n\n if field_diff:\n raise DagsterRunConflict(\n "Found conflicting existing run with same id {run_id}. Runs differ in:"\n "\\n{field_diff}".format(\n run_id=dagster_run.run_id,\n field_diff=_format_field_diff(field_diff),\n ),\n )\n return candidate_run # type: ignore # (possible none)\n\n if self.has_run(dagster_run.run_id):\n return get_run()\n\n try:\n return self._run_storage.add_run(dagster_run)\n except DagsterRunAlreadyExists:\n return get_run()\n\n @traced\n def add_run(self, dagster_run: DagsterRun) -> DagsterRun:\n return self._run_storage.add_run(dagster_run)\n\n @traced\n def add_snapshot(\n self,\n snapshot: Union["JobSnapshot", "ExecutionPlanSnapshot"],\n snapshot_id: Optional[str] = None,\n ) -> None:\n return self._run_storage.add_snapshot(snapshot, snapshot_id)\n\n @traced\n def handle_run_event(self, run_id: str, event: "DagsterEvent") -> None:\n return self._run_storage.handle_run_event(run_id, event)\n\n @traced\n def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]) -> None:\n return self._run_storage.add_run_tags(run_id, new_tags)\n\n @traced\n def has_run(self, run_id: str) -> bool:\n return self._run_storage.has_run(run_id)\n\n @traced\n def get_runs(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[DagsterRun]:\n return self._run_storage.get_runs(filters, cursor, limit, bucket_by)\n\n @traced\n def get_run_ids(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[str]:\n return self._run_storage.get_run_ids(filters, cursor=cursor, limit=limit)\n\n @traced\n def get_runs_count(self, filters: Optional[RunsFilter] = None) -> int:\n return self._run_storage.get_runs_count(filters)\n\n
[docs] @public\n @traced\n def get_run_records(\n self,\n filters: Optional[RunsFilter] = None,\n limit: Optional[int] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n cursor: Optional[str] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[RunRecord]:\n """Return a list of run records stored in the run storage, sorted by the given column in given order.\n\n Args:\n filters (Optional[RunsFilter]): the filter by which to filter runs.\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n order_by (Optional[str]): Name of the column to sort by. Defaults to id.\n ascending (Optional[bool]): Sort the result in ascending order if True, descending\n otherwise. Defaults to descending.\n\n Returns:\n List[RunRecord]: List of run records stored in the run storage.\n """\n return self._run_storage.get_run_records(\n filters, limit, order_by, ascending, cursor, bucket_by\n )
\n\n @traced\n def get_run_partition_data(self, runs_filter: RunsFilter) -> Sequence[RunPartitionData]:\n """Get run partition data for a given partitioned job."""\n return self._run_storage.get_run_partition_data(runs_filter)\n\n def wipe(self) -> None:\n self._run_storage.wipe()\n self._event_storage.wipe()\n\n
[docs] @public\n @traced\n def delete_run(self, run_id: str) -> None:\n """Delete a run and all events generated by that from storage.\n\n Args:\n run_id (str): The id of the run to delete.\n """\n self._run_storage.delete_run(run_id)\n self._event_storage.delete_events(run_id)
\n\n # event storage\n @traced\n def logs_after(\n self,\n run_id: str,\n cursor: Optional[int] = None,\n of_type: Optional["DagsterEventType"] = None,\n limit: Optional[int] = None,\n ) -> Sequence["EventLogEntry"]:\n return self._event_storage.get_logs_for_run(\n run_id,\n cursor=cursor,\n of_type=of_type,\n limit=limit,\n )\n\n @traced\n def all_logs(\n self,\n run_id: str,\n of_type: Optional[Union["DagsterEventType", Set["DagsterEventType"]]] = None,\n ) -> Sequence["EventLogEntry"]:\n return self._event_storage.get_logs_for_run(run_id, of_type=of_type)\n\n @traced\n def get_records_for_run(\n self,\n run_id: str,\n cursor: Optional[str] = None,\n of_type: Optional[Union["DagsterEventType", Set["DagsterEventType"]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> "EventLogConnection":\n return self._event_storage.get_records_for_run(run_id, cursor, of_type, limit, ascending)\n\n def watch_event_logs(self, run_id: str, cursor: Optional[str], cb: "EventHandlerFn") -> None:\n return self._event_storage.watch(run_id, cursor, cb)\n\n def end_watch_event_logs(self, run_id: str, cb: "EventHandlerFn") -> None:\n return self._event_storage.end_watch(run_id, cb)\n\n # asset storage\n\n @traced\n def can_cache_asset_status_data(self) -> bool:\n return self._event_storage.can_cache_asset_status_data()\n\n @traced\n def update_asset_cached_status_data(\n self, asset_key: AssetKey, cache_values: "AssetStatusCacheValue"\n ) -> None:\n self._event_storage.update_asset_cached_status_data(asset_key, cache_values)\n\n @traced\n def wipe_asset_cached_status(self, asset_keys: Sequence[AssetKey]) -> None:\n check.list_param(asset_keys, "asset_keys", of_type=AssetKey)\n for asset_key in asset_keys:\n self._event_storage.wipe_asset_cached_status(asset_key)\n\n @traced\n def all_asset_keys(self) -> Sequence[AssetKey]:\n return self._event_storage.all_asset_keys()\n\n
[docs] @public\n @traced\n def get_asset_keys(\n self,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[AssetKey]:\n """Return a filtered subset of asset keys managed by this instance.\n\n Args:\n prefix (Optional[Sequence[str]]): Return only assets having this key prefix.\n limit (Optional[int]): Maximum number of keys to return.\n cursor (Optional[str]): Cursor to use for pagination.\n\n Returns:\n Sequence[AssetKey]: List of asset keys.\n """\n return self._event_storage.get_asset_keys(prefix=prefix, limit=limit, cursor=cursor)
\n\n
[docs] @public\n @traced\n def has_asset_key(self, asset_key: AssetKey) -> bool:\n """Return true if this instance manages the given asset key.\n\n Args:\n asset_key (AssetKey): Asset key to check.\n """\n return self._event_storage.has_asset_key(asset_key)
\n\n @traced\n def get_latest_materialization_events(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional["EventLogEntry"]]:\n return self._event_storage.get_latest_materialization_events(asset_keys)\n\n
[docs] @public\n @traced\n def get_latest_materialization_event(self, asset_key: AssetKey) -> Optional["EventLogEntry"]:\n """Fetch the latest materialization event for the given asset key.\n\n Args:\n asset_key (AssetKey): Asset key to return materialization for.\n\n Returns:\n Optional[AssetMaterialization]: The latest materialization event for the given asset\n key, or `None` if the asset has not been materialized.\n """\n return self._event_storage.get_latest_materialization_events([asset_key]).get(asset_key)
\n\n
[docs] @public\n @traced\n def get_event_records(\n self,\n event_records_filter: "EventRecordsFilter",\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Sequence["EventLogRecord"]:\n """Return a list of event records stored in the event log storage.\n\n Args:\n event_records_filter (Optional[EventRecordsFilter]): the filter by which to filter event\n records.\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n ascending (Optional[bool]): Sort the result in ascending order if True, descending\n otherwise. Defaults to descending.\n\n Returns:\n List[EventLogRecord]: List of event log records stored in the event log storage.\n """\n return self._event_storage.get_event_records(event_records_filter, limit, ascending)
\n\n
[docs] @public\n @traced\n def get_status_by_partition(\n self,\n asset_key: AssetKey,\n partition_keys: Sequence[str],\n partitions_def: "PartitionsDefinition",\n ) -> Optional[Mapping[str, "AssetPartitionStatus"]]:\n """Get the current status of provided partition_keys for the provided asset.\n\n Args:\n asset_key (AssetKey): The asset to get per-partition status for.\n partition_keys (Sequence[str]): The partitions to get status for.\n partitions_def (PartitionsDefinition): The PartitionsDefinition of the asset to get\n per-partition status for.\n\n Returns:\n Optional[Mapping[str, AssetPartitionStatus]]: status for each partition key\n\n """\n from dagster._core.storage.partition_status_cache import (\n AssetPartitionStatus,\n AssetStatusCacheValue,\n get_and_update_asset_status_cache_value,\n )\n\n cached_value = get_and_update_asset_status_cache_value(self, asset_key, partitions_def)\n\n if isinstance(cached_value, AssetStatusCacheValue):\n materialized_partitions = cached_value.deserialize_materialized_partition_subsets(\n partitions_def\n )\n failed_partitions = cached_value.deserialize_failed_partition_subsets(partitions_def)\n in_progress_partitions = cached_value.deserialize_in_progress_partition_subsets(\n partitions_def\n )\n\n status_by_partition = {}\n\n for partition_key in partition_keys:\n if partition_key in in_progress_partitions:\n status_by_partition[partition_key] = AssetPartitionStatus.IN_PROGRESS\n elif partition_key in failed_partitions:\n status_by_partition[partition_key] = AssetPartitionStatus.FAILED\n elif partition_key in materialized_partitions:\n status_by_partition[partition_key] = AssetPartitionStatus.MATERIALIZED\n else:\n status_by_partition[partition_key] = None\n\n return status_by_partition
\n\n
[docs] @public\n @traced\n def get_asset_records(\n self, asset_keys: Optional[Sequence[AssetKey]] = None\n ) -> Sequence["AssetRecord"]:\n """Return an `AssetRecord` for each of the given asset keys.\n\n Args:\n asset_keys (Optional[Sequence[AssetKey]]): List of asset keys to retrieve records for.\n\n Returns:\n Sequence[AssetRecord]: List of asset records.\n """\n return self._event_storage.get_asset_records(asset_keys)
\n\n @traced\n def get_event_tags_for_asset(\n self,\n asset_key: AssetKey,\n filter_tags: Optional[Mapping[str, str]] = None,\n filter_event_id: Optional[int] = None,\n ) -> Sequence[Mapping[str, str]]:\n """Fetches asset event tags for the given asset key.\n\n If filter_tags is provided, searches for events containing all of the filter tags. Then,\n returns all tags for those events. This enables searching for multipartitioned asset\n partition tags with a fixed dimension value, e.g. all of the tags for events where\n "country" == "US".\n\n If filter_event_id is provided, searches for the event with the provided event_id.\n\n Returns a list of dicts, where each dict is a mapping of tag key to tag value for a\n single event.\n """\n return self._event_storage.get_event_tags_for_asset(asset_key, filter_tags, filter_event_id)\n\n
[docs] @public\n @traced\n def wipe_assets(self, asset_keys: Sequence[AssetKey]) -> None:\n """Wipes asset event history from the event log for the given asset keys.\n\n Args:\n asset_keys (Sequence[AssetKey]): Asset keys to wipe.\n """\n check.list_param(asset_keys, "asset_keys", of_type=AssetKey)\n for asset_key in asset_keys:\n self._event_storage.wipe_asset(asset_key)
\n\n @traced\n def get_materialization_count_by_partition(\n self, asset_keys: Sequence[AssetKey], after_cursor: Optional[int] = None\n ) -> Mapping[AssetKey, Mapping[str, int]]:\n return self._event_storage.get_materialization_count_by_partition(asset_keys, after_cursor)\n\n @traced\n def get_materialized_partitions(\n self,\n asset_key: AssetKey,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Set[str]:\n return self._event_storage.get_materialized_partitions(\n asset_key, before_cursor=before_cursor, after_cursor=after_cursor\n )\n\n @traced\n def get_latest_storage_id_by_partition(\n self, asset_key: AssetKey, event_type: "DagsterEventType"\n ) -> Mapping[str, int]:\n """Fetch the latest materialzation storage id for each partition for a given asset key.\n\n Returns a mapping of partition to storage id.\n """\n return self._event_storage.get_latest_storage_id_by_partition(asset_key, event_type)\n\n
[docs] @public\n @traced\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n """Get the set of partition keys for the specified :py:class:`DynamicPartitionsDefinition`.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n """\n check.str_param(partitions_def_name, "partitions_def_name")\n return self._event_storage.get_dynamic_partitions(partitions_def_name)
\n\n
[docs] @public\n @traced\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n """Add partitions to the specified :py:class:`DynamicPartitionsDefinition` idempotently.\n Does not add any partitions that already exist.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n partition_keys (Sequence[str]): Partition keys to add.\n """\n from dagster._core.definitions.partition import (\n raise_error_on_invalid_partition_key_substring,\n )\n\n check.str_param(partitions_def_name, "partitions_def_name")\n check.sequence_param(partition_keys, "partition_keys", of_type=str)\n if isinstance(partition_keys, str):\n # Guard against a single string being passed in `partition_keys`\n raise DagsterInvalidInvocationError("partition_keys must be a sequence of strings")\n raise_error_on_invalid_partition_key_substring(partition_keys)\n return self._event_storage.add_dynamic_partitions(partitions_def_name, partition_keys)
\n\n
[docs] @public\n @traced\n def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:\n """Delete a partition for the specified :py:class:`DynamicPartitionsDefinition`.\n If the partition does not exist, exits silently.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n partition_key (Sequence[str]): Partition key to delete.\n """\n check.str_param(partitions_def_name, "partitions_def_name")\n check.sequence_param(partition_key, "partition_key", of_type=str)\n self._event_storage.delete_dynamic_partition(partitions_def_name, partition_key)
\n\n
[docs] @public\n @traced\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n """Check if a partition key exists for the :py:class:`DynamicPartitionsDefinition`.\n\n Args:\n partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.\n partition_key (Sequence[str]): Partition key to check.\n """\n check.str_param(partitions_def_name, "partitions_def_name")\n check.str_param(partition_key, "partition_key")\n return self._event_storage.has_dynamic_partition(partitions_def_name, partition_key)
\n\n # event subscriptions\n\n def _get_yaml_python_handlers(self) -> Sequence[logging.Handler]:\n if self._settings:\n logging_config = self.get_settings("python_logs").get("dagster_handler_config", {})\n\n if logging_config:\n experimental_warning("Handling yaml-defined logging configuration")\n\n # Handlers can only be retrieved from dictConfig configuration if they are attached\n # to a logger. We add a dummy logger to the configuration that allows us to access user\n # defined handlers.\n handler_names = logging_config.get("handlers", {}).keys()\n\n dagster_dummy_logger_name = "dagster_dummy_logger"\n\n processed_dict_conf = {\n "version": 1,\n "disable_existing_loggers": False,\n "loggers": {dagster_dummy_logger_name: {"handlers": handler_names}},\n }\n processed_dict_conf.update(logging_config)\n\n logging.config.dictConfig(processed_dict_conf)\n\n dummy_logger = logging.getLogger(dagster_dummy_logger_name)\n return dummy_logger.handlers\n return []\n\n def _get_event_log_handler(self) -> _EventListenerLogHandler:\n event_log_handler = _EventListenerLogHandler(self)\n event_log_handler.setLevel(10)\n return event_log_handler\n\n def get_handlers(self) -> Sequence[logging.Handler]:\n handlers: List[logging.Handler] = [self._get_event_log_handler()]\n handlers.extend(self._get_yaml_python_handlers())\n return handlers\n\n def store_event(self, event: "EventLogEntry") -> None:\n self._event_storage.store_event(event)\n\n def handle_new_event(self, event: "EventLogEntry") -> None:\n run_id = event.run_id\n\n self._event_storage.store_event(event)\n\n if event.is_dagster_event and event.get_dagster_event().is_job_event:\n self._run_storage.handle_run_event(run_id, event.get_dagster_event())\n\n for sub in self._subscribers[run_id]:\n sub(event)\n\n def add_event_listener(self, run_id: str, cb) -> None:\n self._subscribers[run_id].append(cb)\n\n def report_engine_event(\n self,\n message: str,\n dagster_run: Optional[DagsterRun] = None,\n engine_event_data: Optional["EngineEventData"] = None,\n cls: Optional[Type[object]] = None,\n step_key: Optional[str] = None,\n job_name: Optional[str] = None,\n run_id: Optional[str] = None,\n ) -> "DagsterEvent":\n """Report a EngineEvent that occurred outside of a job execution context."""\n from dagster._core.events import DagsterEvent, DagsterEventType, EngineEventData\n\n check.opt_class_param(cls, "cls")\n check.str_param(message, "message")\n check.opt_inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(run_id, "run_id")\n check.opt_str_param(job_name, "job_name")\n\n check.invariant(\n dagster_run or (job_name and run_id),\n "Must include either dagster_run or job_name and run_id",\n )\n\n run_id = run_id if run_id else dagster_run.run_id # type: ignore\n job_name = job_name if job_name else dagster_run.job_name # type: ignore\n\n engine_event_data = check.opt_inst_param(\n engine_event_data,\n "engine_event_data",\n EngineEventData,\n EngineEventData({}),\n )\n\n if cls:\n message = f"[{cls.__name__}] {message}"\n\n log_level = logging.INFO\n if engine_event_data and engine_event_data.error:\n log_level = logging.ERROR\n\n dagster_event = DagsterEvent(\n event_type_value=DagsterEventType.ENGINE_EVENT.value,\n job_name=job_name,\n message=message,\n event_specific_data=engine_event_data,\n step_key=step_key,\n )\n self.report_dagster_event(dagster_event, run_id=run_id, log_level=log_level)\n return dagster_event\n\n def report_dagster_event(\n self,\n dagster_event: "DagsterEvent",\n run_id: str,\n log_level: Union[str, int] = logging.INFO,\n ) -> None:\n """Takes a DagsterEvent and stores it in persistent storage for the corresponding DagsterRun."""\n from dagster._core.events.log import EventLogEntry\n\n event_record = EventLogEntry(\n user_message="",\n level=log_level,\n job_name=dagster_event.job_name,\n run_id=run_id,\n error_info=None,\n timestamp=time.time(),\n step_key=dagster_event.step_key,\n dagster_event=dagster_event,\n )\n self.handle_new_event(event_record)\n\n def report_run_canceling(self, run: DagsterRun, message: Optional[str] = None):\n from dagster._core.events import DagsterEvent, DagsterEventType\n\n check.inst_param(run, "run", DagsterRun)\n message = check.opt_str_param(\n message,\n "message",\n "Sending run termination request.",\n )\n canceling_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_CANCELING.value,\n job_name=run.job_name,\n message=message,\n )\n self.report_dagster_event(canceling_event, run_id=run.run_id)\n\n def report_run_canceled(\n self,\n dagster_run: DagsterRun,\n message: Optional[str] = None,\n ) -> "DagsterEvent":\n from dagster._core.events import DagsterEvent, DagsterEventType\n\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n\n message = check.opt_str_param(\n message,\n "mesage",\n "This run has been marked as canceled from outside the execution context.",\n )\n\n dagster_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_CANCELED.value,\n job_name=dagster_run.job_name,\n message=message,\n )\n self.report_dagster_event(dagster_event, run_id=dagster_run.run_id, log_level=logging.ERROR)\n return dagster_event\n\n def report_run_failed(\n self, dagster_run: DagsterRun, message: Optional[str] = None\n ) -> "DagsterEvent":\n from dagster._core.events import DagsterEvent, DagsterEventType\n\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n\n message = check.opt_str_param(\n message,\n "message",\n "This run has been marked as failed from outside the execution context.",\n )\n\n dagster_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_FAILURE.value,\n job_name=dagster_run.job_name,\n message=message,\n )\n self.report_dagster_event(dagster_event, run_id=dagster_run.run_id, log_level=logging.ERROR)\n return dagster_event\n\n # directories\n\n def file_manager_directory(self, run_id: str) -> str:\n return self._local_artifact_storage.file_manager_dir(run_id)\n\n def storage_directory(self) -> str:\n return self._local_artifact_storage.storage_dir\n\n def schedules_directory(self) -> str:\n return self._local_artifact_storage.schedules_dir\n\n # Runs coordinator\n\n def submit_run(self, run_id: str, workspace: "IWorkspace") -> DagsterRun:\n """Submit a pipeline run to the coordinator.\n\n This method delegates to the ``RunCoordinator``, configured on the instance, and will\n call its implementation of ``RunCoordinator.submit_run()`` to send the run to the\n coordinator for execution. Runs should be created in the instance (e.g., by calling\n ``DagsterInstance.create_run()``) *before* this method is called, and\n should be in the ``PipelineRunStatus.NOT_STARTED`` state. They also must have a non-null\n ExternalPipelineOrigin.\n\n Args:\n run_id (str): The id of the run.\n """\n from dagster._core.host_representation import ExternalJobOrigin\n from dagster._core.run_coordinator import SubmitRunContext\n\n run = self.get_run_by_id(run_id)\n if run is None:\n raise DagsterInvariantViolationError(\n f"Could not load run {run_id} that was passed to submit_run"\n )\n\n check.inst(\n run.external_job_origin,\n ExternalJobOrigin,\n "External pipeline origin must be set for submitted runs",\n )\n check.inst(\n run.job_code_origin,\n JobPythonOrigin,\n "Python origin must be set for submitted runs",\n )\n\n try:\n submitted_run = self.run_coordinator.submit_run(\n SubmitRunContext(run, workspace=workspace)\n )\n except:\n from dagster._core.events import EngineEventData\n\n error = serializable_error_info_from_exc_info(sys.exc_info())\n self.report_engine_event(\n error.message,\n run,\n EngineEventData.engine_error(error),\n )\n self.report_run_failed(run)\n raise\n\n return submitted_run\n\n # Run launcher\n\n def launch_run(self, run_id: str, workspace: "IWorkspace") -> DagsterRun:\n """Launch a pipeline run.\n\n This method is typically called using `instance.submit_run` rather than being invoked\n directly. This method delegates to the ``RunLauncher``, if any, configured on the instance,\n and will call its implementation of ``RunLauncher.launch_run()`` to begin the execution of\n the specified run. Runs should be created in the instance (e.g., by calling\n ``DagsterInstance.create_run()``) *before* this method is called, and should be in the\n ``PipelineRunStatus.NOT_STARTED`` state.\n\n Args:\n run_id (str): The id of the run the launch.\n """\n from dagster._core.events import DagsterEvent, DagsterEventType, EngineEventData\n from dagster._core.launcher import LaunchRunContext\n\n run = self.get_run_by_id(run_id)\n if run is None:\n raise DagsterInvariantViolationError(\n f"Could not load run {run_id} that was passed to launch_run"\n )\n\n launch_started_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_STARTING.value,\n job_name=run.job_name,\n )\n self.report_dagster_event(launch_started_event, run_id=run.run_id)\n\n run = self.get_run_by_id(run_id)\n if run is None:\n check.failed(f"Failed to reload run {run_id}")\n\n try:\n self.run_launcher.launch_run(LaunchRunContext(dagster_run=run, workspace=workspace))\n except:\n error = serializable_error_info_from_exc_info(sys.exc_info())\n self.report_engine_event(\n error.message,\n run,\n EngineEventData.engine_error(error),\n )\n self.report_run_failed(run)\n raise\n\n return run\n\n def resume_run(self, run_id: str, workspace: "IWorkspace", attempt_number: int) -> DagsterRun:\n """Resume a pipeline run.\n\n This method should be called on runs which have already been launched, but whose run workers\n have died.\n\n Args:\n run_id (str): The id of the run the launch.\n """\n from dagster._core.events import EngineEventData\n from dagster._core.launcher import ResumeRunContext\n from dagster._daemon.monitoring import RESUME_RUN_LOG_MESSAGE\n\n run = self.get_run_by_id(run_id)\n if run is None:\n raise DagsterInvariantViolationError(\n f"Could not load run {run_id} that was passed to resume_run"\n )\n if run.status not in IN_PROGRESS_RUN_STATUSES:\n raise DagsterInvariantViolationError(\n f"Run {run_id} is not in a state that can be resumed"\n )\n\n self.report_engine_event(\n RESUME_RUN_LOG_MESSAGE,\n run,\n )\n\n try:\n self.run_launcher.resume_run(\n ResumeRunContext(\n dagster_run=run,\n workspace=workspace,\n resume_attempt_number=attempt_number,\n )\n )\n except:\n error = serializable_error_info_from_exc_info(sys.exc_info())\n self.report_engine_event(\n error.message,\n run,\n EngineEventData.engine_error(error),\n )\n self.report_run_failed(run)\n raise\n\n return run\n\n def count_resume_run_attempts(self, run_id: str) -> int:\n from dagster._daemon.monitoring import count_resume_run_attempts\n\n return count_resume_run_attempts(self, run_id)\n\n def run_will_resume(self, run_id: str) -> bool:\n if not self.run_monitoring_enabled:\n return False\n return self.count_resume_run_attempts(run_id) < self.run_monitoring_max_resume_run_attempts\n\n # Scheduler\n\n def start_schedule(self, external_schedule: "ExternalSchedule") -> "InstigatorState":\n return self._scheduler.start_schedule(self, external_schedule) # type: ignore\n\n def stop_schedule(\n self,\n schedule_origin_id: str,\n schedule_selector_id: str,\n external_schedule: Optional["ExternalSchedule"],\n ) -> "InstigatorState":\n return self._scheduler.stop_schedule( # type: ignore\n self, schedule_origin_id, schedule_selector_id, external_schedule\n )\n\n def scheduler_debug_info(self) -> "SchedulerDebugInfo":\n from dagster._core.definitions.run_request import InstigatorType\n from dagster._core.scheduler import SchedulerDebugInfo\n\n errors = []\n\n schedules: List[str] = []\n for schedule_state in self.all_instigator_state(instigator_type=InstigatorType.SCHEDULE):\n schedule_info: Mapping[str, Mapping[str, object]] = {\n schedule_state.instigator_name: {\n "status": schedule_state.status.value,\n "cron_schedule": schedule_state.instigator_data.cron_schedule,\n "schedule_origin_id": schedule_state.instigator_origin_id,\n "repository_origin_id": schedule_state.repository_origin_id,\n }\n }\n\n schedules.append(yaml.safe_dump(schedule_info, default_flow_style=False))\n\n return SchedulerDebugInfo(\n scheduler_config_info=self._info_str_for_component("Scheduler", self.scheduler),\n scheduler_info=self.scheduler.debug_info(), # type: ignore\n schedule_storage=schedules,\n errors=errors,\n )\n\n # Schedule / Sensor Storage\n\n def start_sensor(self, external_sensor: "ExternalSensor") -> "InstigatorState":\n from dagster._core.definitions.run_request import InstigatorType\n from dagster._core.scheduler.instigation import (\n InstigatorState,\n InstigatorStatus,\n SensorInstigatorData,\n )\n\n stored_state = self.get_instigator_state(\n external_sensor.get_external_origin_id(), external_sensor.selector_id\n )\n\n computed_state = external_sensor.get_current_instigator_state(stored_state)\n if computed_state.is_running:\n return computed_state\n\n if not stored_state:\n return self.add_instigator_state(\n InstigatorState(\n external_sensor.get_external_origin(),\n InstigatorType.SENSOR,\n InstigatorStatus.RUNNING,\n SensorInstigatorData(min_interval=external_sensor.min_interval_seconds),\n )\n )\n else:\n return self.update_instigator_state(stored_state.with_status(InstigatorStatus.RUNNING))\n\n def stop_sensor(\n self,\n instigator_origin_id: str,\n selector_id: str,\n external_sensor: Optional["ExternalSensor"],\n ) -> "InstigatorState":\n from dagster._core.definitions.run_request import InstigatorType\n from dagster._core.scheduler.instigation import (\n InstigatorState,\n InstigatorStatus,\n SensorInstigatorData,\n )\n\n stored_state = self.get_instigator_state(instigator_origin_id, selector_id)\n computed_state: InstigatorState\n if external_sensor:\n computed_state = external_sensor.get_current_instigator_state(stored_state)\n else:\n computed_state = check.not_none(stored_state)\n\n if not computed_state.is_running:\n return computed_state\n\n if not stored_state:\n assert external_sensor\n return self.add_instigator_state(\n InstigatorState(\n external_sensor.get_external_origin(),\n InstigatorType.SENSOR,\n InstigatorStatus.STOPPED,\n SensorInstigatorData(min_interval=external_sensor.min_interval_seconds),\n )\n )\n else:\n return self.update_instigator_state(stored_state.with_status(InstigatorStatus.STOPPED))\n\n @traced\n def all_instigator_state(\n self,\n repository_origin_id: Optional[str] = None,\n repository_selector_id: Optional[str] = None,\n instigator_type: Optional["InstigatorType"] = None,\n instigator_statuses: Optional[Set["InstigatorStatus"]] = None,\n ):\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.all_instigator_state(\n repository_origin_id, repository_selector_id, instigator_type, instigator_statuses\n )\n\n @traced\n def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional["InstigatorState"]:\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.get_instigator_state(origin_id, selector_id)\n\n def add_instigator_state(self, state: "InstigatorState") -> "InstigatorState":\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.add_instigator_state(state)\n\n def update_instigator_state(self, state: "InstigatorState") -> "InstigatorState":\n if not self._schedule_storage:\n check.failed("Schedule storage not available")\n return self._schedule_storage.update_instigator_state(state)\n\n def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:\n return self._schedule_storage.delete_instigator_state(origin_id, selector_id) # type: ignore # (possible none)\n\n @property\n def supports_batch_tick_queries(self) -> bool:\n return self._schedule_storage and self._schedule_storage.supports_batch_queries # type: ignore # (possible none)\n\n @traced\n def get_batch_ticks(\n self,\n selector_ids: Sequence[str],\n limit: Optional[int] = None,\n statuses: Optional[Sequence["TickStatus"]] = None,\n ) -> Mapping[str, Sequence["InstigatorTick"]]:\n if not self._schedule_storage:\n return {}\n return self._schedule_storage.get_batch_ticks(selector_ids, limit, statuses)\n\n @traced\n def get_tick(\n self, origin_id: str, selector_id: str, timestamp: float\n ) -> Optional["InstigatorTick"]:\n matches = self._schedule_storage.get_ticks( # type: ignore # (possible none)\n origin_id, selector_id, before=timestamp + 1, after=timestamp - 1, limit=1\n )\n return matches[0] if len(matches) else None\n\n @traced\n def get_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses: Optional[Sequence["TickStatus"]] = None,\n ) -> Sequence["InstigatorTick"]:\n return self._schedule_storage.get_ticks( # type: ignore # (possible none)\n origin_id, selector_id, before=before, after=after, limit=limit, statuses=statuses\n )\n\n def create_tick(self, tick_data: "TickData") -> "InstigatorTick":\n return check.not_none(self._schedule_storage).create_tick(tick_data)\n\n def update_tick(self, tick: "InstigatorTick"):\n return check.not_none(self._schedule_storage).update_tick(tick)\n\n def purge_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: float,\n tick_statuses: Optional[Sequence["TickStatus"]] = None,\n ) -> None:\n self._schedule_storage.purge_ticks(origin_id, selector_id, before, tick_statuses) # type: ignore # (possible none)\n\n def wipe_all_schedules(self) -> None:\n if self._scheduler:\n self._scheduler.wipe(self) # type: ignore # (possible none)\n\n self._schedule_storage.wipe() # type: ignore # (possible none)\n\n def logs_path_for_schedule(self, schedule_origin_id: str) -> str:\n return self._scheduler.get_logs_path(self, schedule_origin_id) # type: ignore # (possible none)\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(\n self,\n exception_type: Optional[Type[BaseException]],\n exception_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> None:\n self.dispose()\n\n # dagster daemon\n def add_daemon_heartbeat(self, daemon_heartbeat: "DaemonHeartbeat") -> None:\n """Called on a regular interval by the daemon."""\n self._run_storage.add_daemon_heartbeat(daemon_heartbeat)\n\n def get_daemon_heartbeats(self) -> Mapping[str, "DaemonHeartbeat"]:\n """Latest heartbeats of all daemon types."""\n return self._run_storage.get_daemon_heartbeats()\n\n def wipe_daemon_heartbeats(self) -> None:\n self._run_storage.wipe_daemon_heartbeats()\n\n def get_required_daemon_types(self) -> Sequence[str]:\n from dagster._core.run_coordinator import QueuedRunCoordinator\n from dagster._core.scheduler import DagsterDaemonScheduler\n from dagster._daemon.asset_daemon import AssetDaemon\n from dagster._daemon.auto_run_reexecution.event_log_consumer import EventLogConsumerDaemon\n from dagster._daemon.daemon import (\n BackfillDaemon,\n MonitoringDaemon,\n SchedulerDaemon,\n SensorDaemon,\n )\n from dagster._daemon.run_coordinator.queued_run_coordinator_daemon import (\n QueuedRunCoordinatorDaemon,\n )\n\n if self.is_ephemeral:\n return []\n\n daemons = [SensorDaemon.daemon_type(), BackfillDaemon.daemon_type()]\n if isinstance(self.scheduler, DagsterDaemonScheduler):\n daemons.append(SchedulerDaemon.daemon_type())\n if isinstance(self.run_coordinator, QueuedRunCoordinator):\n daemons.append(QueuedRunCoordinatorDaemon.daemon_type())\n if self.run_monitoring_enabled:\n daemons.append(MonitoringDaemon.daemon_type())\n if self.run_retries_enabled:\n daemons.append(EventLogConsumerDaemon.daemon_type())\n if self.auto_materialize_enabled:\n daemons.append(AssetDaemon.daemon_type())\n return daemons\n\n def get_daemon_statuses(\n self, daemon_types: Optional[Sequence[str]] = None\n ) -> Mapping[str, "DaemonStatus"]:\n """Get the current status of the daemons. If daemon_types aren't provided, defaults to all\n required types. Returns a dict of daemon type to status.\n """\n from dagster._daemon.controller import get_daemon_statuses\n\n check.opt_sequence_param(daemon_types, "daemon_types", of_type=str)\n return get_daemon_statuses(\n self, daemon_types=daemon_types or self.get_required_daemon_types(), ignore_errors=True\n )\n\n @property\n def daemon_skip_heartbeats_without_errors(self) -> bool:\n # If enabled, daemon threads won't write heartbeats unless they encounter an error. This is\n # enabled in cloud, where we don't need to use heartbeats to check if daemons are running, but\n # do need to surface errors to users. This is an optimization to reduce DB writes.\n return False\n\n # backfill\n def get_backfills(\n self,\n status: Optional["BulkActionStatus"] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence["PartitionBackfill"]:\n return self._run_storage.get_backfills(status=status, cursor=cursor, limit=limit)\n\n def get_backfill(self, backfill_id: str) -> Optional["PartitionBackfill"]:\n return self._run_storage.get_backfill(backfill_id)\n\n def add_backfill(self, partition_backfill: "PartitionBackfill") -> None:\n self._run_storage.add_backfill(partition_backfill)\n\n def update_backfill(self, partition_backfill: "PartitionBackfill") -> None:\n self._run_storage.update_backfill(partition_backfill)\n\n @property\n def should_start_background_run_thread(self) -> bool:\n """Gate on an experimental feature to start a thread that monitors for if the run should be canceled."""\n return False\n\n def get_tick_retention_settings(\n self, instigator_type: "InstigatorType"\n ) -> Mapping["TickStatus", int]:\n from dagster._core.definitions.run_request import InstigatorType\n\n retention_settings = self.get_settings("retention")\n\n if instigator_type == InstigatorType.SCHEDULE:\n tick_settings = retention_settings.get("schedule")\n elif instigator_type == InstigatorType.SENSOR:\n tick_settings = retention_settings.get("sensor")\n elif instigator_type == InstigatorType.AUTO_MATERIALIZE:\n tick_settings = retention_settings.get("auto_materialize")\n else:\n raise Exception(f"Unexpected instigator type {instigator_type}")\n\n default_tick_settings = get_default_tick_retention_settings(instigator_type)\n return get_tick_retention_settings(tick_settings, default_tick_settings)\n\n def inject_env_vars(self, location_name: Optional[str]) -> None:\n if not self._secrets_loader:\n return\n\n new_env = self._secrets_loader.get_secrets_for_environment(location_name)\n for k, v in new_env.items():\n os.environ[k] = v\n\n def get_latest_data_version_record(\n self,\n key: AssetKey,\n is_source: Optional[bool] = None,\n partition_key: Optional[str] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Optional["EventLogRecord"]:\n from dagster._core.event_api import EventRecordsFilter\n from dagster._core.events import DagsterEventType\n\n # When we cant don't know whether the requested key corresponds to a source or regular\n # asset, we need to retrieve both the latest observation and materialization for all assets.\n # If there is a materialization, it's a regular asset and we can ignore the observation.\n\n observation: Optional[EventLogRecord] = None\n if is_source or is_source is None:\n observations = self.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_OBSERVATION,\n asset_key=key,\n asset_partitions=[partition_key] if partition_key else None,\n before_cursor=before_cursor,\n after_cursor=after_cursor,\n ),\n limit=1,\n )\n observation = next(iter(observations), None)\n\n materialization: Optional[EventLogRecord] = None\n if not is_source:\n materializations = self.get_event_records(\n EventRecordsFilter(\n event_type=DagsterEventType.ASSET_MATERIALIZATION,\n asset_key=key,\n asset_partitions=[partition_key] if partition_key else None,\n before_cursor=before_cursor,\n after_cursor=after_cursor,\n ),\n limit=1,\n )\n materialization = next(iter(materializations), None)\n\n return materialization or observation\n\n
[docs] @public\n def get_latest_materialization_code_versions(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional[str]]:\n """Returns the code version used for the latest materialization of each of the provided\n assets.\n\n Args:\n asset_keys (Iterable[AssetKey]): The asset keys to find latest materialization code\n versions for.\n\n Returns:\n Mapping[AssetKey, Optional[str]]: A dictionary with a key for each of the provided asset\n keys. The values will be None if the asset has no materializations. If an asset does\n not have a code version explicitly assigned to its definitions, but was\n materialized, Dagster assigns the run ID as its code version.\n """\n result: Dict[AssetKey, Optional[str]] = {}\n latest_materialization_events = self.get_latest_materialization_events(asset_keys)\n for asset_key in asset_keys:\n event_log_entry = latest_materialization_events.get(asset_key)\n if event_log_entry is None:\n result[asset_key] = None\n else:\n data_provenance = extract_data_provenance_from_entry(event_log_entry)\n result[asset_key] = data_provenance.code_version if data_provenance else None\n\n return result
\n\n @experimental\n def report_runless_asset_event(\n self,\n asset_event: Union["AssetMaterialization", "AssetObservation", "AssetCheckEvaluation"],\n ):\n """Record an event log entry related to assets that does not belong to a Dagster run."""\n from dagster._core.events import (\n AssetMaterialization,\n AssetObservationData,\n DagsterEvent,\n DagsterEventType,\n StepMaterializationData,\n )\n\n if isinstance(asset_event, AssetMaterialization):\n event_type_value = DagsterEventType.ASSET_MATERIALIZATION.value\n data_payload = StepMaterializationData(asset_event)\n elif isinstance(asset_event, AssetCheckEvaluation):\n event_type_value = DagsterEventType.ASSET_CHECK_EVALUATION.value\n data_payload = asset_event\n elif isinstance(asset_event, AssetObservation):\n event_type_value = DagsterEventType.ASSET_OBSERVATION.value\n data_payload = AssetObservationData(asset_event)\n else:\n raise DagsterInvariantViolationError(\n f"Received unexpected asset event type {asset_event}, expected"\n " AssetMaterialization, AssetObservation or AssetCheckEvaluation"\n )\n\n return self.report_dagster_event(\n run_id=RUNLESS_RUN_ID,\n dagster_event=DagsterEvent(\n event_type_value=event_type_value,\n event_specific_data=data_payload,\n job_name=RUNLESS_JOB_NAME,\n ),\n )\n\n def get_asset_check_support(self) -> "AssetCheckInstanceSupport":\n from dagster._core.storage.asset_check_execution_record import AssetCheckInstanceSupport\n\n return (\n AssetCheckInstanceSupport.SUPPORTED\n if self.event_log_storage.supports_asset_checks\n else AssetCheckInstanceSupport.NEEDS_MIGRATION\n )
\n
", "current_page_name": "_modules/dagster/_core/instance", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "ref": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.instance.ref

\nimport os\nfrom typing import TYPE_CHECKING, Any, Mapping, NamedTuple, Optional, Sequence, Type\n\nimport yaml\n\nimport dagster._check as check\nfrom dagster._serdes import ConfigurableClassData, class_from_code_pointer, whitelist_for_serdes\n\nfrom .config import DAGSTER_CONFIG_YAML_FILENAME, dagster_instance_config\n\nif TYPE_CHECKING:\n    from dagster._core.instance import DagsterInstance, DagsterInstanceOverrides\n    from dagster._core.launcher.base import RunLauncher\n    from dagster._core.run_coordinator.base import RunCoordinator\n    from dagster._core.scheduler.scheduler import Scheduler\n    from dagster._core.secrets.loader import SecretsLoader\n    from dagster._core.storage.base_storage import DagsterStorage\n    from dagster._core.storage.compute_log_manager import ComputeLogManager\n    from dagster._core.storage.event_log.base import EventLogStorage\n    from dagster._core.storage.root import LocalArtifactStorage\n    from dagster._core.storage.runs.base import RunStorage\n    from dagster._core.storage.schedules.base import ScheduleStorage\n\n\ndef compute_logs_directory(base: str) -> str:\n    return os.path.join(base, "storage")\n\n\ndef _runs_directory(base: str) -> str:\n    return os.path.join(base, "history", "")\n\n\ndef _event_logs_directory(base: str) -> str:\n    return os.path.join(base, "history", "runs", "")\n\n\ndef _schedule_directory(base: str) -> str:\n    return os.path.join(base, "schedules")\n\n\ndef configurable_class_data(config_field: Mapping[str, Any]) -> ConfigurableClassData:\n    return ConfigurableClassData(\n        check.str_elem(config_field, "module"),\n        check.str_elem(config_field, "class"),\n        yaml.dump(check.opt_dict_elem(config_field, "config"), default_flow_style=False),\n    )\n\n\ndef configurable_class_data_or_default(\n    config_value: Mapping[str, Any], field_name: str, default: Optional[ConfigurableClassData]\n) -> Optional[ConfigurableClassData]:\n    return (\n        configurable_class_data(config_value[field_name])\n        if config_value.get(field_name)\n        else default\n    )\n\n\ndef configurable_secrets_loader_data(\n    config_field: Mapping[str, Any], default: Optional[ConfigurableClassData]\n) -> Optional[ConfigurableClassData]:\n    if not config_field:\n        return default\n    elif "custom" in config_field:\n        return configurable_class_data(config_field["custom"])\n    else:\n        return None\n\n\ndef configurable_storage_data(\n    config_field: Mapping[str, Any], defaults: Mapping[str, Optional[ConfigurableClassData]]\n) -> Sequence[Optional[ConfigurableClassData]]:\n    storage_data: ConfigurableClassData\n    run_storage_data: Optional[ConfigurableClassData]\n    event_storage_data: Optional[ConfigurableClassData]\n    schedule_storage_data: Optional[ConfigurableClassData]\n\n    if not config_field:\n        storage_data = check.not_none(defaults.get("storage"))\n        run_storage_data = check.not_none(defaults.get("run_storage"))\n        event_storage_data = check.not_none(defaults.get("event_log_storage"))\n        schedule_storage_data = check.not_none(defaults.get("schedule_storage"))\n    elif "postgres" in config_field:\n        config_yaml = yaml.dump(config_field["postgres"], default_flow_style=False)\n        storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="DagsterPostgresStorage",\n            config_yaml=config_yaml,\n        )\n        # for backwards compatibility\n        run_storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="PostgresRunStorage",\n            config_yaml=config_yaml,\n        )\n        event_storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="PostgresEventLogStorage",\n            config_yaml=config_yaml,\n        )\n        schedule_storage_data = ConfigurableClassData(\n            module_name="dagster_postgres",\n            class_name="PostgresScheduleStorage",\n            config_yaml=config_yaml,\n        )\n\n    elif "mysql" in config_field:\n        config_yaml = yaml.dump(config_field["mysql"], default_flow_style=False)\n        storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="DagsterMySQLStorage",\n            config_yaml=config_yaml,\n        )\n        # for backwards compatibility\n        run_storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="MySQLRunStorage",\n            config_yaml=config_yaml,\n        )\n        event_storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="MySQLEventLogStorage",\n            config_yaml=config_yaml,\n        )\n        schedule_storage_data = ConfigurableClassData(\n            module_name="dagster_mysql",\n            class_name="MySQLScheduleStorage",\n            config_yaml=config_yaml,\n        )\n\n    elif "sqlite" in config_field:\n        base_dir = config_field["sqlite"]["base_dir"]\n        storage_data = ConfigurableClassData(\n            "dagster._core.storage.sqlite_storage",\n            "DagsterSqliteStorage",\n            yaml.dump({"base_dir": base_dir}, default_flow_style=False),\n        )\n\n        # Back-compat fo the legacy storage field only works if the base_dir is a string\n        # (env var doesn't work since each storage has a different value for the base_dir field)\n        if isinstance(base_dir, str):\n            run_storage_data = ConfigurableClassData(\n                "dagster._core.storage.runs",\n                "SqliteRunStorage",\n                yaml.dump({"base_dir": _runs_directory(base_dir)}, default_flow_style=False),\n            )\n\n            event_storage_data = ConfigurableClassData(\n                "dagster._core.storage.event_log",\n                "SqliteEventLogStorage",\n                yaml.dump({"base_dir": _event_logs_directory(base_dir)}, default_flow_style=False),\n            )\n\n            schedule_storage_data = ConfigurableClassData(\n                "dagster._core.storage.schedules",\n                "SqliteScheduleStorage",\n                yaml.dump({"base_dir": _schedule_directory(base_dir)}, default_flow_style=False),\n            )\n        else:\n            run_storage_data = None\n            event_storage_data = None\n            schedule_storage_data = None\n    else:\n        storage_data = configurable_class_data(config_field["custom"])\n        storage_config_yaml = yaml.dump(\n            {\n                "module_name": storage_data.module_name,\n                "class_name": storage_data.class_name,\n                "config_yaml": storage_data.config_yaml,\n            },\n            default_flow_style=False,\n        )\n        run_storage_data = ConfigurableClassData(\n            "dagster._core.storage.legacy_storage", "LegacyRunStorage", storage_config_yaml\n        )\n        event_storage_data = ConfigurableClassData(\n            "dagster._core.storage.legacy_storage", "LegacyEventLogStorage", storage_config_yaml\n        )\n        schedule_storage_data = ConfigurableClassData(\n            "dagster._core.storage.legacy_storage", "LegacyScheduleStorage", storage_config_yaml\n        )\n\n    return [storage_data, run_storage_data, event_storage_data, schedule_storage_data]\n\n\n
[docs]@whitelist_for_serdes\nclass InstanceRef(\n NamedTuple(\n "_InstanceRef",\n [\n ("local_artifact_storage_data", ConfigurableClassData),\n ("compute_logs_data", ConfigurableClassData),\n ("scheduler_data", Optional[ConfigurableClassData]),\n ("run_coordinator_data", Optional[ConfigurableClassData]),\n ("run_launcher_data", Optional[ConfigurableClassData]),\n ("settings", Mapping[str, object]),\n # Required for backwards compatibility, but going forward will be unused by new versions\n # of DagsterInstance, which instead will instead grab the constituent storages from the\n # unified `storage_data`, if it is populated.\n ("run_storage_data", Optional[ConfigurableClassData]),\n ("event_storage_data", Optional[ConfigurableClassData]),\n ("schedule_storage_data", Optional[ConfigurableClassData]),\n ("custom_instance_class_data", Optional[ConfigurableClassData]),\n # unified storage field\n ("storage_data", Optional[ConfigurableClassData]),\n ("secrets_loader_data", Optional[ConfigurableClassData]),\n ],\n )\n):\n """Serializable representation of a :py:class:`DagsterInstance`.\n\n Users should not instantiate this class directly.\n """\n\n def __new__(\n cls,\n local_artifact_storage_data: ConfigurableClassData,\n compute_logs_data: ConfigurableClassData,\n scheduler_data: Optional[ConfigurableClassData],\n run_coordinator_data: Optional[ConfigurableClassData],\n run_launcher_data: Optional[ConfigurableClassData],\n settings: Mapping[str, object],\n run_storage_data: Optional[ConfigurableClassData],\n event_storage_data: Optional[ConfigurableClassData],\n schedule_storage_data: Optional[ConfigurableClassData],\n custom_instance_class_data: Optional[ConfigurableClassData] = None,\n storage_data: Optional[ConfigurableClassData] = None,\n secrets_loader_data: Optional[ConfigurableClassData] = None,\n ):\n return super(cls, InstanceRef).__new__(\n cls,\n local_artifact_storage_data=check.inst_param(\n local_artifact_storage_data, "local_artifact_storage_data", ConfigurableClassData\n ),\n compute_logs_data=check.inst_param(\n compute_logs_data, "compute_logs_data", ConfigurableClassData\n ),\n scheduler_data=check.opt_inst_param(\n scheduler_data, "scheduler_data", ConfigurableClassData\n ),\n run_coordinator_data=check.opt_inst_param(\n run_coordinator_data, "run_coordinator_data", ConfigurableClassData\n ),\n run_launcher_data=check.opt_inst_param(\n run_launcher_data, "run_launcher_data", ConfigurableClassData\n ),\n settings=check.opt_mapping_param(settings, "settings", key_type=str),\n run_storage_data=check.opt_inst_param(\n run_storage_data, "run_storage_data", ConfigurableClassData\n ),\n event_storage_data=check.opt_inst_param(\n event_storage_data, "event_storage_data", ConfigurableClassData\n ),\n schedule_storage_data=check.opt_inst_param(\n schedule_storage_data, "schedule_storage_data", ConfigurableClassData\n ),\n custom_instance_class_data=check.opt_inst_param(\n custom_instance_class_data,\n "instance_class",\n ConfigurableClassData,\n ),\n storage_data=check.opt_inst_param(storage_data, "storage_data", ConfigurableClassData),\n secrets_loader_data=check.opt_inst_param(\n secrets_loader_data, "secrets_loader_data", ConfigurableClassData\n ),\n )\n\n @staticmethod\n def config_defaults(base_dir: str) -> Mapping[str, Optional[ConfigurableClassData]]:\n default_run_storage_data = ConfigurableClassData(\n "dagster._core.storage.runs",\n "SqliteRunStorage",\n yaml.dump({"base_dir": _runs_directory(base_dir)}, default_flow_style=False),\n )\n default_event_log_storage_data = ConfigurableClassData(\n "dagster._core.storage.event_log",\n "SqliteEventLogStorage",\n yaml.dump({"base_dir": _event_logs_directory(base_dir)}, default_flow_style=False),\n )\n default_schedule_storage_data = ConfigurableClassData(\n "dagster._core.storage.schedules",\n "SqliteScheduleStorage",\n yaml.dump({"base_dir": _schedule_directory(base_dir)}, default_flow_style=False),\n )\n\n return {\n "local_artifact_storage": ConfigurableClassData(\n "dagster._core.storage.root",\n "LocalArtifactStorage",\n yaml.dump({"base_dir": base_dir}, default_flow_style=False),\n ),\n "storage": ConfigurableClassData(\n "dagster._core.storage.sqlite_storage",\n "DagsterSqliteStorage",\n yaml.dump({"base_dir": base_dir}, default_flow_style=False),\n ),\n "compute_logs": ConfigurableClassData(\n "dagster._core.storage.local_compute_log_manager",\n "LocalComputeLogManager",\n yaml.dump({"base_dir": compute_logs_directory(base_dir)}, default_flow_style=False),\n ),\n "scheduler": ConfigurableClassData(\n "dagster._core.scheduler",\n "DagsterDaemonScheduler",\n yaml.dump({}),\n ),\n "run_coordinator": ConfigurableClassData(\n "dagster._core.run_coordinator", "DefaultRunCoordinator", yaml.dump({})\n ),\n "run_launcher": ConfigurableClassData(\n "dagster",\n "DefaultRunLauncher",\n yaml.dump({}),\n ),\n # For back-compat, the default is actually set in the secrets_loader property above,\n # so that old clients loading new config don't try to load a class that they\n # don't recognize\n "secrets": None,\n # LEGACY DEFAULTS\n "run_storage": default_run_storage_data,\n "event_log_storage": default_event_log_storage_data,\n "schedule_storage": default_schedule_storage_data,\n }\n\n @staticmethod\n def from_dir(\n base_dir: str,\n *,\n config_dir: Optional[str] = None,\n config_filename: str = DAGSTER_CONFIG_YAML_FILENAME,\n overrides: Optional["DagsterInstanceOverrides"] = None,\n ) -> "InstanceRef":\n if config_dir is None:\n config_dir = base_dir\n\n overrides = check.opt_mapping_param(overrides, "overrides")\n config_value, custom_instance_class = dagster_instance_config(\n config_dir, config_filename=config_filename, overrides=overrides\n )\n\n if custom_instance_class:\n config_keys = set(custom_instance_class.config_schema().keys()) # type: ignore # (undefined method)\n custom_instance_class_config = {\n key: val for key, val in config_value.items() if key in config_keys\n }\n custom_instance_class_data = ConfigurableClassData(\n config_value["instance_class"]["module"],\n config_value["instance_class"]["class"],\n yaml.dump(custom_instance_class_config, default_flow_style=False),\n )\n defaults = custom_instance_class.config_defaults(base_dir) # type: ignore # (undefined method)\n else:\n custom_instance_class_data = None\n defaults = InstanceRef.config_defaults(base_dir)\n\n local_artifact_storage_data = configurable_class_data_or_default(\n config_value, "local_artifact_storage", defaults["local_artifact_storage"]\n )\n\n compute_logs_data = configurable_class_data_or_default(\n config_value,\n "compute_logs",\n defaults["compute_logs"],\n )\n\n if (\n config_value.get("run_storage")\n or config_value.get("event_log_storage")\n or config_value.get("schedule_storage")\n ):\n # using legacy config, specifying config for each of the constituent storages, make sure\n # to create a composite storage\n run_storage_data = configurable_class_data_or_default(\n config_value, "run_storage", defaults["run_storage"]\n )\n event_storage_data = configurable_class_data_or_default(\n config_value, "event_log_storage", defaults["event_log_storage"]\n )\n schedule_storage_data = configurable_class_data_or_default(\n config_value, "schedule_storage", defaults["schedule_storage"]\n )\n storage_data = ConfigurableClassData(\n module_name="dagster._core.storage.legacy_storage",\n class_name="CompositeStorage",\n config_yaml=yaml.dump(\n {\n "run_storage": {\n "module_name": run_storage_data.module_name, # type: ignore # (possible none)\n "class_name": run_storage_data.class_name, # type: ignore # (possible none)\n "config_yaml": run_storage_data.config_yaml, # type: ignore # (possible none)\n },\n "event_log_storage": {\n "module_name": event_storage_data.module_name, # type: ignore # (possible none)\n "class_name": event_storage_data.class_name, # type: ignore # (possible none)\n "config_yaml": event_storage_data.config_yaml, # type: ignore # (possible none)\n },\n "schedule_storage": {\n "module_name": schedule_storage_data.module_name, # type: ignore # (possible none)\n "class_name": schedule_storage_data.class_name, # type: ignore # (possible none)\n "config_yaml": schedule_storage_data.config_yaml, # type: ignore # (possible none)\n },\n },\n default_flow_style=False,\n ),\n )\n\n else:\n [\n storage_data,\n run_storage_data,\n event_storage_data,\n schedule_storage_data,\n ] = configurable_storage_data(\n config_value.get("storage"), defaults # type: ignore # (possible none)\n )\n\n scheduler_data = configurable_class_data_or_default(\n config_value, "scheduler", defaults["scheduler"]\n )\n\n if config_value.get("run_queue"):\n run_coordinator_data = configurable_class_data(\n {\n "module": "dagster.core.run_coordinator",\n "class": "QueuedRunCoordinator",\n "config": config_value["run_queue"],\n }\n )\n else:\n run_coordinator_data = configurable_class_data_or_default(\n config_value,\n "run_coordinator",\n defaults["run_coordinator"],\n )\n\n run_launcher_data = configurable_class_data_or_default(\n config_value,\n "run_launcher",\n defaults["run_launcher"],\n )\n\n secrets_loader_data = configurable_secrets_loader_data(\n config_value.get("secrets"), defaults["secrets"] # type: ignore # (possible none)\n )\n\n settings_keys = {\n "telemetry",\n "python_logs",\n "run_monitoring",\n "run_retries",\n "code_servers",\n "retention",\n "sensors",\n "schedules",\n "nux",\n "auto_materialize",\n }\n settings = {key: config_value.get(key) for key in settings_keys if config_value.get(key)}\n\n return InstanceRef(\n local_artifact_storage_data=local_artifact_storage_data, # type: ignore # (possible none)\n run_storage_data=run_storage_data,\n event_storage_data=event_storage_data,\n compute_logs_data=compute_logs_data, # type: ignore # (possible none)\n schedule_storage_data=schedule_storage_data,\n scheduler_data=scheduler_data,\n run_coordinator_data=run_coordinator_data,\n run_launcher_data=run_launcher_data,\n settings=settings,\n custom_instance_class_data=custom_instance_class_data,\n storage_data=storage_data,\n secrets_loader_data=secrets_loader_data,\n )\n\n @staticmethod\n def from_dict(instance_ref_dict):\n def value_for_ref_item(k, v):\n if v is None:\n return None\n if k == "settings":\n return v\n return ConfigurableClassData(*v)\n\n return InstanceRef(**{k: value_for_ref_item(k, v) for k, v in instance_ref_dict.items()})\n\n @property\n def local_artifact_storage(self) -> "LocalArtifactStorage":\n from dagster._core.storage.root import LocalArtifactStorage\n\n return self.local_artifact_storage_data.rehydrate(as_type=LocalArtifactStorage)\n\n @property\n def storage(self) -> Optional["DagsterStorage"]:\n from dagster._core.storage.base_storage import DagsterStorage\n\n return self.storage_data.rehydrate(as_type=DagsterStorage) if self.storage_data else None\n\n @property\n def run_storage(self) -> Optional["RunStorage"]:\n from dagster._core.storage.runs.base import RunStorage\n\n return (\n self.run_storage_data.rehydrate(as_type=RunStorage) if self.run_storage_data else None\n )\n\n @property\n def event_storage(self) -> Optional["EventLogStorage"]:\n from dagster._core.storage.event_log.base import EventLogStorage\n\n return (\n self.event_storage_data.rehydrate(as_type=EventLogStorage)\n if self.event_storage_data\n else None\n )\n\n @property\n def schedule_storage(self) -> Optional["ScheduleStorage"]:\n from dagster._core.storage.schedules.base import ScheduleStorage\n\n return (\n self.schedule_storage_data.rehydrate(as_type=ScheduleStorage)\n if self.schedule_storage_data\n else None\n )\n\n @property\n def compute_log_manager(self) -> "ComputeLogManager":\n from dagster._core.storage.compute_log_manager import ComputeLogManager\n\n return self.compute_logs_data.rehydrate(as_type=ComputeLogManager)\n\n @property\n def scheduler(self) -> Optional["Scheduler"]:\n from dagster._core.scheduler.scheduler import Scheduler\n\n return self.scheduler_data.rehydrate(as_type=Scheduler) if self.scheduler_data else None\n\n @property\n def run_coordinator(self) -> Optional["RunCoordinator"]:\n from dagster._core.run_coordinator.base import RunCoordinator\n\n return (\n self.run_coordinator_data.rehydrate(as_type=RunCoordinator)\n if self.run_coordinator_data\n else None\n )\n\n @property\n def run_launcher(self) -> Optional["RunLauncher"]:\n from dagster._core.launcher.base import RunLauncher\n\n return (\n self.run_launcher_data.rehydrate(as_type=RunLauncher)\n if self.run_launcher_data\n else None\n )\n\n @property\n def secrets_loader(self) -> Optional["SecretsLoader"]:\n from dagster._core.secrets.loader import SecretsLoader\n\n # Defining a default here rather than in stored config to avoid\n # back-compat issues when loading the config on older versions where\n # EnvFileLoader was not defined\n return (\n self.secrets_loader_data.rehydrate(as_type=SecretsLoader)\n if self.secrets_loader_data\n else None\n )\n\n @property\n def custom_instance_class(self) -> Type["DagsterInstance"]:\n return ( # type: ignore # (ambiguous return type)\n class_from_code_pointer(\n self.custom_instance_class_data.module_name,\n self.custom_instance_class_data.class_name,\n )\n if self.custom_instance_class_data\n else None\n )\n\n @property\n def custom_instance_class_config(self) -> Mapping[str, Any]:\n return (\n self.custom_instance_class_data.config_dict if self.custom_instance_class_data else {}\n )\n\n def to_dict(self) -> Mapping[str, Any]:\n return self._asdict()
\n
", "current_page_name": "_modules/dagster/_core/instance/ref", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}, {"link": "../", "title": "dagster._core.instance"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.instance.ref"}, "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.instance"}, "instance_for_test": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.instance_for_test

\nimport os\nimport sys\nimport tempfile\nfrom contextlib import ExitStack, contextmanager\nfrom typing import Any, Iterator, Mapping, Optional\n\nimport yaml\n\nfrom dagster._utils.error import serializable_error_info_from_exc_info\n\nfrom .._utils.env import environ\nfrom .._utils.merger import merge_dicts\nfrom .instance import DagsterInstance\n\n\n
[docs]@contextmanager\ndef instance_for_test(\n overrides: Optional[Mapping[str, Any]] = None,\n set_dagster_home: bool = True,\n temp_dir: Optional[str] = None,\n) -> Iterator[DagsterInstance]:\n """Creates a persistent :py:class:`~dagster.DagsterInstance` available within a context manager.\n\n When a context manager is opened, if no `temp_dir` parameter is set, a new\n temporary directory will be created for the duration of the context\n manager's opening. If the `set_dagster_home` parameter is set to True\n (True by default), the `$DAGSTER_HOME` environment variable will be\n overridden to be this directory (or the directory passed in by `temp_dir`)\n for the duration of the context manager being open.\n\n Args:\n overrides (Optional[Mapping[str, Any]]):\n Config to provide to instance (config format follows that typically found in an `instance.yaml` file).\n set_dagster_home (Optional[bool]):\n If set to True, the `$DAGSTER_HOME` environment variable will be\n overridden to be the directory used by this instance for the\n duration that the context manager is open. Upon the context\n manager closing, the `$DAGSTER_HOME` variable will be re-set to the original value. (Defaults to True).\n temp_dir (Optional[str]):\n The directory to use for storing local artifacts produced by the\n instance. If not set, a temporary directory will be created for\n the duration of the context manager being open, and all artifacts\n will be torn down afterward.\n """\n with ExitStack() as stack:\n if not temp_dir:\n temp_dir = stack.enter_context(tempfile.TemporaryDirectory())\n\n # wait for any grpc processes that created runs during test disposal to finish,\n # since they might also be using this instance's tempdir (and to keep each test\n # isolated / avoid race conditions in newer versions of grpcio when servers are\n # shutting down and spinning up at the same time)\n instance_overrides = merge_dicts(\n {\n "telemetry": {"enabled": False},\n "code_servers": {"wait_for_local_processes_on_shutdown": True},\n },\n (overrides if overrides else {}),\n )\n\n if set_dagster_home:\n stack.enter_context(\n environ({"DAGSTER_HOME": temp_dir, "DAGSTER_DISABLE_TELEMETRY": "yes"})\n )\n\n with open(os.path.join(temp_dir, "dagster.yaml"), "w", encoding="utf8") as fd:\n yaml.dump(instance_overrides, fd, default_flow_style=False)\n\n with DagsterInstance.from_config(temp_dir) as instance:\n try:\n yield instance\n except:\n sys.stderr.write(\n "Test raised an exception, attempting to clean up instance:"\n + serializable_error_info_from_exc_info(sys.exc_info()).to_string()\n + "\\n"\n )\n raise\n finally:\n cleanup_test_instance(instance)
\n\n\ndef cleanup_test_instance(instance: DagsterInstance) -> None:\n # To avoid filesystem contention when we close the temporary directory, wait for\n # all runs to reach a terminal state, and close any subprocesses or threads\n # that might be accessing the run history DB.\n\n # Since launcher is lazy loaded, we don't need to do anyting if it's None\n if instance._run_launcher: # noqa: SLF001\n instance._run_launcher.join() # noqa: SLF001\n
", "current_page_name": "_modules/dagster/_core/instance_for_test", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.instance_for_test"}, "launcher": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.launcher.base

\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom typing import NamedTuple, Optional\n\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.workspace.workspace import IWorkspace\nfrom dagster._serdes import whitelist_for_serdes\n\n\nclass LaunchRunContext(NamedTuple):\n    """Context available within a run launcher's launch_run call."""\n\n    dagster_run: DagsterRun\n    workspace: Optional[IWorkspace]\n\n    @property\n    def job_code_origin(self) -> Optional[JobPythonOrigin]:\n        return self.dagster_run.job_code_origin\n\n\nclass ResumeRunContext(NamedTuple):\n    """Context available within a run launcher's resume_run call."""\n\n    dagster_run: DagsterRun\n    workspace: Optional[IWorkspace]\n    resume_attempt_number: Optional[int] = None\n\n    @property\n    def job_code_origin(self) -> Optional[JobPythonOrigin]:\n        return self.dagster_run.job_code_origin\n\n\n@whitelist_for_serdes\nclass WorkerStatus(Enum):\n    RUNNING = "RUNNING"\n    NOT_FOUND = "NOT_FOUND"\n    FAILED = "FAILED"\n    SUCCESS = "SUCCESS"\n    UNKNOWN = "UNKNOWN"\n\n\nclass CheckRunHealthResult(NamedTuple):\n    """Result of a check_run_worker_health call."""\n\n    status: WorkerStatus\n    msg: Optional[str] = None\n    transient: Optional[bool] = None\n    run_worker_id: Optional[str] = None  # Identifier for a particular run worker\n\n    def __str__(self) -> str:\n        return f"{self.status.value}: '{self.msg}'"\n\n\n
[docs]class RunLauncher(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n @abstractmethod\n def launch_run(self, context: LaunchRunContext) -> None:\n """Launch a run.\n\n This method should begin the execution of the specified run, and may emit engine events.\n Runs should be created in the instance (e.g., by calling\n ``DagsterInstance.create_run()``) *before* this method is called, and\n should be in the ``PipelineRunStatus.STARTING`` state. Typically, this method will\n not be invoked directly, but should be invoked through ``DagsterInstance.launch_run()``.\n\n Args:\n context (LaunchRunContext): information about the launch - every run launcher\n will need the PipelineRun, and some run launchers may need information from the\n IWorkspace from which the run was launched.\n """\n\n @abstractmethod\n def terminate(self, run_id: str) -> bool:\n """Terminates a process.\n\n Returns False is the process was already terminated. Returns true if\n the process was alive and was successfully terminated\n """\n\n def dispose(self) -> None:\n """Do any resource cleanup that should happen when the DagsterInstance is\n cleaning itself up.\n """\n\n def join(self, timeout: int = 30) -> None:\n pass\n\n @property\n def supports_check_run_worker_health(self) -> bool:\n """Whether the run launcher supports check_run_worker_health."""\n return False\n\n def check_run_worker_health(self, run: DagsterRun) -> CheckRunHealthResult:\n raise NotImplementedError(\n "This run launcher does not support run monitoring. Please disable it on your instance."\n )\n\n def get_run_worker_debug_info(self, run: DagsterRun) -> Optional[str]:\n return None\n\n @property\n def supports_resume_run(self) -> bool:\n """Whether the run launcher supports resume_run."""\n return False\n\n def resume_run(self, context: ResumeRunContext) -> None:\n raise NotImplementedError(\n "This run launcher does not support resuming runs. If using "\n "run monitoring, set max_resume_run_attempts to 0."\n )
\n
", "current_page_name": "_modules/dagster/_core/launcher/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.launcher.base"}, "default_run_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.launcher.default_run_launcher

\nimport time\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional, cast\n\nfrom typing_extensions import Self\n\nimport dagster._seven as seven\nfrom dagster import (\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.errors import (\n    DagsterInvariantViolationError,\n    DagsterLaunchFailedError,\n    DagsterUserCodeProcessError,\n)\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import GRPC_INFO_TAG\nfrom dagster._serdes import (\n    ConfigurableClass,\n    deserialize_value,\n)\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom dagster._utils.merger import merge_dicts\n\nfrom .base import LaunchRunContext, RunLauncher\n\nif TYPE_CHECKING:\n    from dagster._core.instance import DagsterInstance\n    from dagster._grpc.client import DagsterGrpcClient\n\n\n# note: this class is a top level export, so we defer many imports til use for performance\n
[docs]class DefaultRunLauncher(RunLauncher, ConfigurableClass):\n """Launches runs against running GRPC servers."""\n\n def __init__(\n self,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = inst_data\n\n self._run_ids = set()\n\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {}\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return DefaultRunLauncher(inst_data=inst_data)\n\n @staticmethod\n def launch_run_from_grpc_client(\n instance: "DagsterInstance", run: DagsterRun, grpc_client: "DagsterGrpcClient"\n ):\n # defer for perf\n from dagster._grpc.types import ExecuteExternalJobArgs, StartRunResult\n\n instance.add_run_tags(\n run.run_id,\n {\n GRPC_INFO_TAG: seven.json.dumps(\n merge_dicts(\n {"host": grpc_client.host},\n (\n {"port": grpc_client.port}\n if grpc_client.port\n else {"socket": grpc_client.socket}\n ),\n ({"use_ssl": True} if grpc_client.use_ssl else {}),\n )\n )\n },\n )\n\n res = deserialize_value(\n grpc_client.start_run(\n ExecuteExternalJobArgs(\n job_origin=run.external_job_origin, # type: ignore # (possible none)\n run_id=run.run_id,\n instance_ref=instance.get_ref(),\n )\n ),\n StartRunResult,\n )\n if not res.success:\n raise (\n DagsterLaunchFailedError(\n res.message, serializable_error_info=res.serializable_error_info\n )\n )\n\n def launch_run(self, context: LaunchRunContext) -> None:\n # defer for perf\n from dagster._core.host_representation.code_location import (\n GrpcServerCodeLocation,\n )\n\n run = context.dagster_run\n\n check.inst_param(run, "run", DagsterRun)\n\n if not context.workspace:\n raise DagsterInvariantViolationError(\n "DefaultRunLauncher requires a workspace to be included in its LaunchRunContext"\n )\n\n external_job_origin = check.not_none(run.external_job_origin)\n code_location = context.workspace.get_code_location(\n external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n check.inst(\n code_location,\n GrpcServerCodeLocation,\n "DefaultRunLauncher: Can't launch runs for pipeline not loaded from a GRPC server",\n )\n\n DefaultRunLauncher.launch_run_from_grpc_client(\n self._instance, run, cast(GrpcServerCodeLocation, code_location).client\n )\n\n self._run_ids.add(run.run_id)\n\n def _get_grpc_client_for_termination(self, run_id):\n # defer for perf\n from dagster._grpc.client import DagsterGrpcClient\n\n if not self.has_instance:\n return None\n\n run = self._instance.get_run_by_id(run_id)\n if not run or run.is_finished:\n return None\n\n tags = run.tags\n\n if GRPC_INFO_TAG not in tags:\n return None\n\n grpc_info = seven.json.loads(tags.get(GRPC_INFO_TAG))\n\n return DagsterGrpcClient(\n port=grpc_info.get("port"),\n socket=grpc_info.get("socket"),\n host=grpc_info.get("host"),\n use_ssl=bool(grpc_info.get("use_ssl", False)),\n )\n\n def terminate(self, run_id):\n # defer for perf\n from dagster._grpc.types import CancelExecutionRequest, CancelExecutionResult\n\n check.str_param(run_id, "run_id")\n if not self.has_instance:\n return False\n\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n client = self._get_grpc_client_for_termination(run_id)\n\n if not client:\n self._instance.report_engine_event(\n message="Unable to get grpc client to send termination request to.",\n dagster_run=run,\n cls=self.__class__,\n )\n return False\n\n res = deserialize_value(\n client.cancel_execution(CancelExecutionRequest(run_id=run_id)), CancelExecutionResult\n )\n\n if res.serializable_error_info:\n raise DagsterUserCodeProcessError.from_error_info(res.serializable_error_info)\n\n return res.success\n\n def join(self, timeout=30):\n # If this hasn't been initialized at all, we can just do a noop\n if not self.has_instance:\n return\n\n total_time = 0\n interval = 0.01\n\n while True:\n active_run_ids = [\n run_id\n for run_id in self._run_ids\n if (\n self._instance.get_run_by_id(run_id)\n and not self._instance.get_run_by_id(run_id).is_finished\n )\n ]\n\n if len(active_run_ids) == 0:\n return\n\n if total_time >= timeout:\n raise Exception(f"Timed out waiting for these runs to finish: {active_run_ids!r}")\n\n total_time += interval\n time.sleep(interval)\n interval = interval * 2
\n
", "current_page_name": "_modules/dagster/_core/launcher/default_run_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.launcher.default_run_launcher"}}, "log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.log_manager

\nimport datetime\nimport logging\nfrom typing import TYPE_CHECKING, Any, Mapping, NamedTuple, Optional, Sequence, Union, cast\n\nfrom typing_extensions import Protocol\n\nimport dagster._check as check\nfrom dagster._core.utils import coerce_valid_log_level, make_new_run_id\nfrom dagster._utils.log import get_dagster_logger\n\nif TYPE_CHECKING:\n    from dagster import DagsterInstance\n    from dagster._core.events import DagsterEvent\n    from dagster._core.storage.dagster_run import DagsterRun\n\nDAGSTER_META_KEY = "dagster_meta"\n\n\nclass IDagsterMeta(Protocol):\n    @property\n    def dagster_meta(self) -> "DagsterLoggingMetadata": ...\n\n\n# The type-checker complains here that DagsterLogRecord does not implement the `dagster_meta`\n# property of `IDagsterMeta`. We ignore this error because we don't need to implement this method--\n# `DagsterLogRecord` is a stub class that is never instantiated. We only ever cast\n# `logging.LogRecord` objects to `DagsterLogRecord`, because it gives us typed access to the\n# `dagster_meta` property. `dagster_meta` itself is set on these `logging.LogRecord` objects via the\n# `extra` argument to `logging.Logger.log` (see `DagsterLogManager.log_dagster_event`), but\n# `logging.LogRecord` has no way of exposing to the type-checker the attributes that are dynamically\n# defined via `extra`.\nclass DagsterLogRecord(logging.LogRecord, IDagsterMeta):  # type: ignore\n    pass\n\n\nclass DagsterMessageProps(\n    NamedTuple(\n        "_DagsterMessageProps",\n        [\n            ("orig_message", Optional[str]),\n            ("log_message_id", Optional[str]),\n            ("log_timestamp", Optional[str]),\n            ("dagster_event", Optional[Any]),\n        ],\n    )\n):\n    """Internal class used to represent specific attributes about a logged message."""\n\n    def __new__(\n        cls,\n        orig_message: str,\n        log_message_id: Optional[str] = None,\n        log_timestamp: Optional[str] = None,\n        dagster_event: Optional["DagsterEvent"] = None,\n    ):\n        return super().__new__(\n            cls,\n            orig_message=check.str_param(orig_message, "orig_message"),\n            log_message_id=check.opt_str_param(\n                log_message_id, "log_message_id", default=make_new_run_id()\n            ),\n            log_timestamp=check.opt_str_param(\n                log_timestamp,\n                "log_timestamp",\n                default=datetime.datetime.utcnow().isoformat(),\n            ),\n            dagster_event=dagster_event,\n        )\n\n    @property\n    def error_str(self) -> Optional[str]:\n        if self.dagster_event is None:\n            return None\n\n        event_specific_data = self.dagster_event.event_specific_data\n        if not event_specific_data:\n            return None\n\n        error = getattr(event_specific_data, "error", None)\n        if error:\n            return f'\\n\\n{getattr(event_specific_data, "error_display_string", error.to_string())}'\n        return None\n\n    @property\n    def pid(self) -> Optional[str]:\n        if self.dagster_event is None or self.dagster_event.pid is None:\n            return None\n        return str(self.dagster_event.pid)\n\n    @property\n    def step_key(self) -> Optional[str]:\n        if self.dagster_event is None:\n            return None\n        return self.dagster_event.step_key\n\n    @property\n    def event_type_value(self) -> Optional[str]:\n        if self.dagster_event is None:\n            return None\n        return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(\n    NamedTuple(\n        "_DagsterLoggingMetadata",\n        [\n            ("run_id", Optional[str]),\n            ("job_name", Optional[str]),\n            ("job_tags", Mapping[str, str]),\n            ("step_key", Optional[str]),\n            ("op_name", Optional[str]),\n            ("resource_name", Optional[str]),\n            ("resource_fn_name", Optional[str]),\n        ],\n    )\n):\n    """Internal class used to represent the context in which a given message was logged (i.e. the\n    step, pipeline run, resource, etc.).\n    """\n\n    def __new__(\n        cls,\n        run_id: Optional[str] = None,\n        job_name: Optional[str] = None,\n        job_tags: Optional[Mapping[str, str]] = None,\n        step_key: Optional[str] = None,\n        op_name: Optional[str] = None,\n        resource_name: Optional[str] = None,\n        resource_fn_name: Optional[str] = None,\n    ):\n        return super().__new__(\n            cls,\n            run_id=run_id,\n            job_name=job_name,\n            job_tags=job_tags or {},\n            step_key=step_key,\n            op_name=op_name,\n            resource_name=resource_name,\n            resource_fn_name=resource_fn_name,\n        )\n\n    @property\n    def log_source(self) -> str:\n        if self.resource_name is None:\n            return self.job_name or "system"\n        return f"resource:{self.resource_name}"\n\n    def all_tags(self) -> Mapping[str, str]:\n        # converts all values into strings\n        return {k: str(v) for k, v in self._asdict().items()}\n\n    def event_tags(self) -> Mapping[str, str]:\n        # Exclude pipeline_tags since it can be quite large and can be found on the run\n        return {k: str(v) for k, v in self._asdict().items() if k != "job_tags"}\n\n\ndef construct_log_string(\n    logging_metadata: DagsterLoggingMetadata, message_props: DagsterMessageProps\n) -> str:\n    from dagster._core.events import EVENT_TYPE_VALUE_TO_DISPLAY_STRING\n\n    event_type_str = (\n        EVENT_TYPE_VALUE_TO_DISPLAY_STRING[message_props.event_type_value]\n        if message_props.event_type_value in EVENT_TYPE_VALUE_TO_DISPLAY_STRING\n        else message_props.event_type_value\n    )\n    return " - ".join(\n        filter(\n            None,\n            (\n                logging_metadata.log_source,\n                logging_metadata.run_id,\n                message_props.pid,\n                logging_metadata.step_key,\n                event_type_str,\n                message_props.orig_message,\n            ),\n        )\n    ) + (message_props.error_str or "")\n\n\ndef get_dagster_meta_dict(\n    logging_metadata: DagsterLoggingMetadata, dagster_message_props: DagsterMessageProps\n) -> Mapping[str, object]:\n    # combine all dagster meta information into a single dictionary\n    meta_dict = {\n        **logging_metadata._asdict(),\n        **dagster_message_props._asdict(),\n    }\n    # step-level events can be logged from a pipeline context. for these cases, pull the step\n    # key from the underlying DagsterEvent\n    if meta_dict["step_key"] is None:\n        meta_dict["step_key"] = dagster_message_props.step_key\n\n    return meta_dict\n\n\nclass DagsterLogHandler(logging.Handler):\n    """Internal class used to turn regular logs into Dagster logs by adding Dagster-specific\n    metadata (such as pipeline_name or step_key), as well as reformatting the underlying message.\n\n    Note: The `loggers` argument will be populated with the set of @loggers supplied to the current\n    pipeline run. These essentially work as handlers (they do not create their own log messages,\n    they simply re-log messages that are created from context.log.x() calls), which is why they are\n    referenced from within this handler class.\n    """\n\n    def __init__(\n        self,\n        logging_metadata: DagsterLoggingMetadata,\n        loggers: Sequence[logging.Logger],\n        handlers: Sequence[logging.Handler],\n    ):\n        self._logging_metadata = logging_metadata\n        self._loggers = loggers\n        self._handlers = handlers\n        self._should_capture = True\n        super().__init__()\n\n    @property\n    def logging_metadata(self) -> DagsterLoggingMetadata:\n        return self._logging_metadata\n\n    def with_tags(self, **new_tags: str) -> "DagsterLogHandler":\n        return DagsterLogHandler(\n            logging_metadata=self.logging_metadata._replace(**new_tags),\n            loggers=self._loggers,\n            handlers=self._handlers,\n        )\n\n    def _extract_extra(self, record: logging.LogRecord) -> Mapping[str, Any]:\n        """In the logging.Logger log() implementation, the elements of the `extra` dictionary\n        argument are smashed into the __dict__ of the underlying logging.LogRecord.\n        This function figures out what the original `extra` values of the log call were by\n        comparing the set of attributes in the received record to those of a default record.\n        """\n        ref_attrs = list(logging.makeLogRecord({}).__dict__.keys()) + [\n            "message",\n            "asctime",\n        ]\n        return {k: v for k, v in record.__dict__.items() if k not in ref_attrs}\n\n    def _convert_record(self, record: logging.LogRecord) -> DagsterLogRecord:\n        # we store the originating DagsterEvent in the DAGSTER_META_KEY field, if applicable\n        dagster_meta = getattr(record, DAGSTER_META_KEY, None)\n\n        # generate some properties for this specific record\n        dagster_message_props = DagsterMessageProps(\n            orig_message=record.getMessage(), dagster_event=dagster_meta\n        )\n\n        # set the dagster meta info for the record\n        setattr(\n            record,\n            DAGSTER_META_KEY,\n            get_dagster_meta_dict(self._logging_metadata, dagster_message_props),\n        )\n\n        # update the message to be formatted like other dagster logs\n        record.msg = construct_log_string(self._logging_metadata, dagster_message_props)\n        record.args = ()\n\n        # DagsterLogRecord is a LogRecord with a `dagster_meta` field\n        return cast(DagsterLogRecord, record)\n\n    def filter(self, record: logging.LogRecord) -> bool:\n        """If you list multiple levels of a python logging hierarchy as managed loggers, and do not\n        set the propagate attribute to False, this will result in that record getting logged\n        multiple times, as the DagsterLogHandler will be invoked at each level of the hierarchy as\n        the message is propagated. This filter prevents this from happening.\n        """\n        return self._should_capture and not isinstance(\n            getattr(record, DAGSTER_META_KEY, None), dict\n        )\n\n    def emit(self, record: logging.LogRecord) -> None:\n        """For any received record, add Dagster metadata, and have handlers handle it."""\n        try:\n            # to prevent the potential for infinite loops in which a handler produces log messages\n            # which are then captured and then handled by that same handler (etc.), do not capture\n            # any log messages while one is currently being emitted\n            self._should_capture = False\n            dagster_record = self._convert_record(record)\n            # built-in handlers\n            for handler in self._handlers:\n                if dagster_record.levelno >= handler.level:\n                    handler.handle(dagster_record)\n            # user-defined @loggers\n            for logger in self._loggers:\n                logger.log(\n                    dagster_record.levelno,\n                    dagster_record.msg,\n                    exc_info=dagster_record.exc_info,\n                    extra=self._extract_extra(record),\n                )\n        finally:\n            self._should_capture = True\n\n\n
[docs]class DagsterLogManager(logging.Logger):\n """Centralized dispatch for logging from user code.\n\n Handles the construction of uniform structured log messages and passes them through to the\n underlying loggers/handlers.\n\n An instance of the log manager is made available to ops as ``context.log``. Users should not\n initialize instances of the log manager directly. To configure custom loggers, set the\n ``logger_defs`` argument in an `@job` decorator or when calling the `to_job()` method on a\n :py:class:`GraphDefinition`.\n\n The log manager inherits standard convenience methods like those exposed by the Python standard\n library :py:mod:`python:logging` module (i.e., within the body of an op,\n ``context.log.{debug, info, warning, warn, error, critical, fatal}``).\n\n The underlying integer API can also be called directly using, e.g.\n ``context.log.log(5, msg)``, and the log manager will delegate to the ``log`` method\n defined on each of the loggers it manages.\n\n User-defined custom log levels are not supported, and calls to, e.g.,\n ``context.log.trace`` or ``context.log.notice`` will result in hard exceptions **at runtime**.\n """\n\n def __init__(\n self,\n dagster_handler: DagsterLogHandler,\n level: int = logging.NOTSET,\n managed_loggers: Optional[Sequence[logging.Logger]] = None,\n ):\n super().__init__(name="dagster", level=coerce_valid_log_level(level))\n self._managed_loggers = check.opt_sequence_param(\n managed_loggers, "managed_loggers", of_type=logging.Logger\n )\n self._dagster_handler = dagster_handler\n self.addHandler(dagster_handler)\n\n @classmethod\n def create(\n cls,\n loggers: Sequence[logging.Logger],\n handlers: Optional[Sequence[logging.Handler]] = None,\n instance: Optional["DagsterInstance"] = None,\n dagster_run: Optional["DagsterRun"] = None,\n ) -> "DagsterLogManager":\n """Create a DagsterLogManager with a set of subservient loggers."""\n handlers = check.opt_sequence_param(handlers, "handlers", of_type=logging.Handler)\n\n managed_loggers = [get_dagster_logger()]\n python_log_level = logging.NOTSET\n\n if instance:\n handlers = [*handlers, *instance.get_handlers()]\n managed_loggers += [\n logging.getLogger(lname) if lname != "root" else logging.getLogger()\n for lname in instance.managed_python_loggers\n ]\n if instance.python_log_level is not None:\n python_log_level = coerce_valid_log_level(instance.python_log_level)\n\n # set all loggers to the declared logging level\n for logger in managed_loggers:\n logger.setLevel(python_log_level)\n\n if dagster_run:\n logging_metadata = DagsterLoggingMetadata(\n run_id=dagster_run.run_id,\n job_name=dagster_run.job_name,\n job_tags=dagster_run.tags,\n )\n else:\n logging_metadata = DagsterLoggingMetadata()\n\n return cls(\n dagster_handler=DagsterLogHandler(\n logging_metadata=logging_metadata,\n loggers=loggers,\n handlers=handlers,\n ),\n level=python_log_level,\n managed_loggers=managed_loggers,\n )\n\n @property\n def logging_metadata(self) -> DagsterLoggingMetadata:\n return self._dagster_handler.logging_metadata\n\n def begin_python_log_capture(self) -> None:\n for logger in self._managed_loggers:\n logger.addHandler(self._dagster_handler)\n\n def end_python_log_capture(self) -> None:\n for logger in self._managed_loggers:\n logger.removeHandler(self._dagster_handler)\n\n def log_dagster_event(\n self, level: Union[str, int], msg: str, dagster_event: "DagsterEvent"\n ) -> None:\n """Log a DagsterEvent at the given level. Attributes about the context it was logged in\n (such as the solid name or pipeline name) will be automatically attached to the created record.\n\n Args:\n level (str, int): either a string representing the desired log level ("INFO", "WARN"),\n or an integer level such as logging.INFO or logging.DEBUG.\n msg (str): message describing the event\n dagster_event (DagsterEvent): DagsterEvent that will be logged\n """\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level: Union[str, int], msg: object, *args: Any, **kwargs: Any) -> None:\n """Log a message at the given level. Attributes about the context it was logged in (such as\n the solid name or pipeline name) will be automatically attached to the created record.\n\n Args:\n level (str, int): either a string representing the desired log level ("INFO", "WARN"),\n or an integer level such as logging.INFO or logging.DEBUG.\n msg (str): the message to be logged\n *args: the logged message will be msg % args\n """\n level = coerce_valid_log_level(level)\n # log DagsterEvents regardless of level\n if self.isEnabledFor(level) or ("extra" in kwargs and DAGSTER_META_KEY in kwargs["extra"]):\n self._log(level, msg, args, **kwargs)\n\n def with_tags(self, **new_tags: str) -> "DagsterLogManager":\n """Add new tags in "new_tags" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n new_tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n """\n return DagsterLogManager(\n dagster_handler=self._dagster_handler.with_tags(**new_tags),\n managed_loggers=self._managed_loggers,\n level=self.level,\n )
\n
", "current_page_name": "_modules/dagster/_core/log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.log_manager"}, "pipes": {"client": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.pipes.client

\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Iterator, List, Optional, Sequence\n\nfrom dagster_pipes import (\n    DagsterPipesError,\n    PipesContextData,\n    PipesExtras,\n    PipesParams,\n)\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.result import MaterializeResult\nfrom dagster._core.execution.context.compute import OpExecutionContext\n\nfrom .context import PipesExecutionResult\n\nif TYPE_CHECKING:\n    from .context import PipesMessageHandler\n\n\n
[docs]@experimental\nclass PipesClient(ABC):\n """Pipes client base class.\n\n Pipes clients for specific external environments should subclass this.\n """\n\n
[docs] @public\n @abstractmethod\n def run(\n self,\n *,\n context: OpExecutionContext,\n extras: Optional[PipesExtras] = None,\n **kwargs,\n ) -> "PipesClientCompletedInvocation":\n """Synchronously execute an external process with the pipes protocol. Derived\n clients must have `context` and `extras` arguments, but also can add arbitrary\n arguments that are appropriate for their own implementation.\n\n Args:\n context (OpExecutionContext): The context from the executing op/asset.\n extras (Optional[PipesExtras]): Arbitrary data to pass to the external environment.\n\n Returns:\n PipesClientCompletedInvocation: Wrapper containing results reported by the external\n process.\n """
\n\n\n@experimental\nclass PipesClientCompletedInvocation:\n def __init__(self, results: Sequence["PipesExecutionResult"]):\n self._results = results\n\n def get_results(self) -> Sequence["PipesExecutionResult"]:\n """Get the stream of results as a Sequence of a completed pipes\n client invocation. For each "report" call in the external process,\n one result object will be in the list.\n\n Returns: Sequence[PipesExecutionResult]\n """\n return tuple(self._results)\n\n def get_materialize_result(self) -> MaterializeResult:\n """Get a single materialize result for a pipes invocation. This coalesces\n the materialization result and any separately reported asset check results from\n the external process.\n\n This does not work on invocations that materialize multiple assets and will fail\n in that case. For multiple assets use `get_results` instead to get the result stream.\n\n Returns: MaterializeResult\n """\n return materialize_result_from_pipes_results(self.get_results())\n\n def get_asset_check_result(self) -> AssetCheckResult:\n """Get a single asset check result for a pipes invocation.\n\n This does not work on invocations that have anything except a single asset check result.\n Use `get_results` instead to get the result stream in those cases.\n\n Returns: AssetCheckResult\n """\n return _check_result_from_pipes_results(self.get_results())\n\n\n
[docs]@experimental\nclass PipesContextInjector(ABC):\n @abstractmethod\n @contextmanager\n def inject_context(self, context_data: "PipesContextData") -> Iterator[PipesParams]:\n """A `@contextmanager` that injects context data into the external process.\n\n This method should write the context data to a location accessible to the external\n process. It should yield parameters that the external process can use to locate and load the\n context data.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A JSON-serializable dict of parameters to be used used by the external\n process to locate and load the injected context data.\n """\n\n @abstractmethod\n def no_messages_debug_text(self) -> str:\n """A message to be displayed when no messages are received from the external process to aid with debugging.\n\n Example: "Attempted to inject context using a magic portal. Expected PipesMagicPortalContextLoader to be\n explicitly passed to open_dagster_pipes in the external process."\n """
\n\n\n
[docs]@experimental\nclass PipesMessageReader(ABC):\n @abstractmethod\n @contextmanager\n def read_messages(self, handler: "PipesMessageHandler") -> Iterator[PipesParams]:\n """A `@contextmanager` that reads messages reported by an external process.\n\n This method should start a thread to continuously read messages from some location\n accessible to the external process. It should yield parameters that the external process\n can use to direct its message output.\n\n Args:\n handler (PipesMessageHandler): The message handler to use to process messages read from\n the external process.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to determine\n where to write messages.\n """\n\n @abstractmethod\n def no_messages_debug_text(self) -> str:\n """A message to be displayed when no messages are received from the external process to aid with\n debugging.\n\n Example: "Attempted to read messages using a magic portal. Expected PipesMagicPortalMessageWriter\n to be explicitly passed to open_dagster_pipes in the external process."\n """
\n\n\ndef materialize_result_from_pipes_results(\n all_results: Sequence[PipesExecutionResult],\n) -> MaterializeResult:\n mat_results: List[MaterializeResult] = [\n mat_result for mat_result in all_results if isinstance(mat_result, MaterializeResult)\n ]\n check_results: List[AssetCheckResult] = [\n check_result for check_result in all_results if isinstance(check_result, AssetCheckResult)\n ]\n\n check.invariant(len(mat_results) > 0, "No materialization results received. Internal error?")\n if len(mat_results) > 1:\n raise DagsterPipesError(\n "Multiple materialize results returned with asset keys"\n f" {sorted([check.not_none(mr.asset_key).to_user_string() for mr in mat_results])}."\n " If you are materializing multiple assets in a pipes invocation, use"\n " get_results() instead.",\n )\n mat_result = next(iter(mat_results))\n for check_result in check_results:\n if check_result.asset_key:\n check.invariant(\n mat_result.asset_key == check_result.asset_key,\n "Check result specified an asset key that is not part of the returned"\n " materialization. If this was deliberate, use get_results() instead.",\n )\n\n if check_results:\n return mat_result._replace(\n check_results=[*(mat_result.check_results or []), *check_results]\n )\n else:\n return mat_result\n\n\ndef _check_result_from_pipes_results(\n all_results: Sequence[PipesExecutionResult],\n) -> AssetCheckResult:\n mat_results: List[MaterializeResult] = [\n mat_result for mat_result in all_results if isinstance(mat_result, MaterializeResult)\n ]\n check_results: List[AssetCheckResult] = [\n check_result for check_result in all_results if isinstance(check_result, AssetCheckResult)\n ]\n\n # return the single asset check result if thats what we got\n if len(mat_results) == 0 and len(check_results) == 1:\n return next(iter(check_results))\n\n # otherwise error\n raise DagsterPipesError(\n f"Did not find singular AssetCheckResult, got {len(mat_results)} MaterializeResults and"\n f" {len(check_results)} AssetCheckResults. Correct the reported results or use"\n " get_results() instead.",\n )\n
", "current_page_name": "_modules/dagster/_core/pipes/client", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.pipes.client"}, "context": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.pipes.context

\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom queue import Queue\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, Mapping, Optional, Set, Union\n\nfrom dagster_pipes import (\n    DAGSTER_PIPES_CONTEXT_ENV_VAR,\n    DAGSTER_PIPES_MESSAGES_ENV_VAR,\n    PIPES_METADATA_TYPE_INFER,\n    PipesContextData,\n    PipesDataProvenance,\n    PipesExtras,\n    PipesMessage,\n    PipesMetadataType,\n    PipesMetadataValue,\n    PipesParams,\n    PipesTimeWindow,\n    encode_env_var,\n)\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.asset_check_result import AssetCheckResult\nfrom dagster._core.definitions.asset_check_spec import AssetCheckSeverity\nfrom dagster._core.definitions.data_version import DataProvenance, DataVersion\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.metadata import MetadataValue, normalize_metadata_value\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.result import MaterializeResult\nfrom dagster._core.definitions.time_window_partitions import (\n    TimeWindow,\n    has_one_dimension_time_window_partitioning,\n)\nfrom dagster._core.errors import DagsterPipesExecutionError\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dagster._core.execution.context.invocation import BoundOpExecutionContext\n\nif TYPE_CHECKING:\n    from dagster._core.pipes.client import PipesMessageReader\n\nPipesExecutionResult: TypeAlias = Union[MaterializeResult, AssetCheckResult]\n\n\n
[docs]@experimental\nclass PipesMessageHandler:\n """Class to process :py:obj:`PipesMessage` objects received from a pipes process.\n\n Args:\n context (OpExecutionContext): The context for the executing op/asset.\n """\n\n def __init__(self, context: OpExecutionContext) -> None:\n self._context = context\n # Queue is thread-safe\n self._result_queue: Queue[PipesExecutionResult] = Queue()\n # Only read by the main thread after all messages are handled, so no need for a lock\n self._unmaterialized_assets: Set[AssetKey] = set(context.selected_asset_keys)\n self._received_any_msg = False\n self._received_closed_msg = False\n\n @contextmanager\n def handle_messages(self, message_reader: "PipesMessageReader") -> Iterator[PipesParams]:\n with message_reader.read_messages(self) as params:\n yield params\n for key in self._unmaterialized_assets:\n self._result_queue.put(MaterializeResult(asset_key=key))\n\n def clear_result_queue(self) -> Iterator[PipesExecutionResult]:\n while not self._result_queue.empty():\n yield self._result_queue.get()\n\n @property\n def received_any_message(self) -> bool:\n return self._received_any_msg\n\n @property\n def received_closed_message(self) -> bool:\n return self._received_closed_msg\n\n def _resolve_metadata(\n self, metadata: Mapping[str, PipesMetadataValue]\n ) -> Mapping[str, MetadataValue]:\n return {\n k: self._resolve_metadata_value(v["raw_value"], v["type"]) for k, v in metadata.items()\n }\n\n def _resolve_metadata_value(\n self, value: Any, metadata_type: PipesMetadataType\n ) -> MetadataValue:\n if metadata_type == PIPES_METADATA_TYPE_INFER:\n return normalize_metadata_value(value)\n elif metadata_type == "text":\n return MetadataValue.text(value)\n elif metadata_type == "url":\n return MetadataValue.url(value)\n elif metadata_type == "path":\n return MetadataValue.path(value)\n elif metadata_type == "notebook":\n return MetadataValue.notebook(value)\n elif metadata_type == "json":\n return MetadataValue.json(value)\n elif metadata_type == "md":\n return MetadataValue.md(value)\n elif metadata_type == "float":\n return MetadataValue.float(value)\n elif metadata_type == "int":\n return MetadataValue.int(value)\n elif metadata_type == "bool":\n return MetadataValue.bool(value)\n elif metadata_type == "dagster_run":\n return MetadataValue.dagster_run(value)\n elif metadata_type == "asset":\n return MetadataValue.asset(AssetKey.from_user_string(value))\n elif metadata_type == "table":\n return MetadataValue.table(value)\n elif metadata_type == "null":\n return MetadataValue.null()\n else:\n check.failed(f"Unexpected metadata type {metadata_type}")\n\n # Type ignores because we currently validate in individual handlers\n def handle_message(self, message: PipesMessage) -> None:\n if self._received_closed_msg:\n self._context.log.warn(f"[pipes] unexpected message received after closed: `{message}`")\n\n if not self._received_any_msg:\n self._received_any_msg = True\n self._context.log.info("[pipes] external process successfully opened dagster pipes.")\n\n if message["method"] == "opened":\n pass\n elif message["method"] == "closed":\n self._handle_closed()\n elif message["method"] == "report_asset_materialization":\n self._handle_report_asset_materialization(**message["params"]) # type: ignore\n elif message["method"] == "report_asset_check":\n self._handle_report_asset_check(**message["params"]) # type: ignore\n elif message["method"] == "log":\n self._handle_log(**message["params"]) # type: ignore\n else:\n raise DagsterPipesExecutionError(f"Unknown message method: {message['method']}")\n\n def _handle_closed(self) -> None:\n self._received_closed_msg = True\n\n def _handle_report_asset_materialization(\n self,\n asset_key: str,\n metadata: Optional[Mapping[str, PipesMetadataValue]],\n data_version: Optional[str],\n ) -> None:\n check.str_param(asset_key, "asset_key")\n check.opt_str_param(data_version, "data_version")\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n resolved_asset_key = AssetKey.from_user_string(asset_key)\n resolved_metadata = self._resolve_metadata(metadata)\n resolved_data_version = None if data_version is None else DataVersion(data_version)\n result = MaterializeResult(\n asset_key=resolved_asset_key,\n metadata=resolved_metadata,\n data_version=resolved_data_version,\n )\n self._result_queue.put(result)\n self._unmaterialized_assets.remove(resolved_asset_key)\n\n def _handle_report_asset_check(\n self,\n asset_key: str,\n check_name: str,\n passed: bool,\n severity: str,\n metadata: Mapping[str, PipesMetadataValue],\n ) -> None:\n check.str_param(asset_key, "asset_key")\n check.str_param(check_name, "check_name")\n check.bool_param(passed, "passed")\n check.literal_param(severity, "severity", [x.value for x in AssetCheckSeverity])\n metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)\n resolved_asset_key = AssetKey.from_user_string(asset_key)\n resolved_metadata = self._resolve_metadata(metadata)\n resolved_severity = AssetCheckSeverity(severity)\n result = AssetCheckResult(\n asset_key=resolved_asset_key,\n check_name=check_name,\n passed=passed,\n severity=resolved_severity,\n metadata=resolved_metadata,\n )\n self._result_queue.put(result)\n\n def _handle_log(self, message: str, level: str = "info") -> None:\n check.str_param(message, "message")\n self._context.log.log(level, message)
\n\n\n
[docs]@experimental\n@dataclass\nclass PipesSession:\n """Object representing a pipes session.\n\n A pipes session is defined by a pair of :py:class:`PipesContextInjector` and\n :py:class:`PipesMessageReader` objects. At the opening of the session, the context injector\n writes context data to an externally accessible location, and the message reader starts\n monitoring an externally accessible location. These locations are encoded in parameters stored\n on a `PipesSession` object.\n\n During the session, an external process should be started and the parameters injected into its\n environment. The typical way to do this is to call :py:meth:`PipesSession.get_bootstrap_env_vars`\n and pass the result as environment variables.\n\n During execution, results (e.g. asset materializations) are reported by the external process and\n buffered on the `PipesSession` object. The buffer can periodically be cleared and yielded to\n Dagster machinery by calling `yield from PipesSession.get_results()`.\n\n When the external process exits, the session can be closed. Closing consists of handling any\n unprocessed messages written by the external process and cleaning up any resources used for\n context injection and message reading.\n\n Args:\n context_data (PipesContextData): The context for the executing op/asset.\n message_handler (PipesMessageHandler): The message handler to use for processing messages\n context_injector_params (PipesParams): Parameters yielded by the context injector,\n indicating the location from which the external process should load context data.\n message_reader_params (PipesParams): Parameters yielded by the message reader, indicating\n the location to which the external process should write messages.\n """\n\n context_data: PipesContextData\n message_handler: PipesMessageHandler\n context_injector_params: PipesParams\n message_reader_params: PipesParams\n\n
[docs] @public\n def get_bootstrap_env_vars(self) -> Dict[str, str]:\n """Encode context injector and message reader params as environment variables.\n\n Passing environment variables is the typical way to expose the pipes I/O parameters\n to a pipes process.\n\n Returns:\n Mapping[str, str]: Environment variables to pass to the external process. The values are\n serialized as json, compressed with gzip, and then base-64-encoded.\n """\n return {\n param_name: encode_env_var(param_value)\n for param_name, param_value in self.get_bootstrap_params().items()\n }
\n\n
[docs] @public\n def get_bootstrap_params(self) -> Dict[str, Any]:\n """Get the params necessary to bootstrap a launched pipes process. These parameters are typically\n are as environment variable. See `get_bootstrap_env_vars`. It is the context injector's\n responsibility to decide how to pass these parameters to the external environment.\n\n Returns:\n Mapping[str, str]: Parameters to pass to the external process and their corresponding\n values that must be passed by the context injector.\n """\n return {\n DAGSTER_PIPES_CONTEXT_ENV_VAR: self.context_injector_params,\n DAGSTER_PIPES_MESSAGES_ENV_VAR: self.message_reader_params,\n }
\n\n
[docs] @public\n def get_results(self) -> Iterator[PipesExecutionResult]:\n """Iterator over buffered :py:class:`PipesExecutionResult` objects received from the\n external process.\n\n When this is called it clears the results buffer.\n\n Yields:\n ExtResult: Result reported by external process.\n """\n yield from self.message_handler.clear_result_queue()
\n\n\ndef build_external_execution_context_data(\n context: OpExecutionContext,\n extras: Optional[PipesExtras],\n) -> "PipesContextData":\n asset_keys = (\n [_convert_asset_key(key) for key in sorted(context.selected_asset_keys)]\n if context.has_assets_def\n else None\n )\n code_version_by_asset_key = (\n {\n _convert_asset_key(key): context.assets_def.code_versions_by_key[key]\n for key in context.selected_asset_keys\n }\n if context.has_assets_def\n else None\n )\n provenance_by_asset_key = (\n {\n _convert_asset_key(key): _convert_data_provenance(context.get_asset_provenance(key))\n for key in context.selected_asset_keys\n }\n if context.has_assets_def\n else None\n )\n partition_key = context.partition_key if context.has_partition_key else None\n partition_key_range = context.partition_key_range if context.has_partition_key else None\n partition_time_window = (\n context.partition_time_window\n if context.has_partition_key\n and has_one_dimension_time_window_partitioning(\n context.get_step_execution_context().partitions_def\n )\n else None\n )\n return PipesContextData(\n asset_keys=asset_keys,\n code_version_by_asset_key=code_version_by_asset_key,\n provenance_by_asset_key=provenance_by_asset_key,\n partition_key=partition_key,\n partition_key_range=(\n _convert_partition_key_range(partition_key_range) if partition_key_range else None\n ),\n partition_time_window=(\n _convert_time_window(partition_time_window) if partition_time_window else None\n ),\n run_id=context.run_id,\n job_name=None if isinstance(context, BoundOpExecutionContext) else context.job_name,\n retry_number=0 if isinstance(context, BoundOpExecutionContext) else context.retry_number,\n extras=extras or {},\n )\n\n\ndef _convert_asset_key(asset_key: AssetKey) -> str:\n return asset_key.to_user_string()\n\n\ndef _convert_data_provenance(\n provenance: Optional[DataProvenance],\n) -> Optional["PipesDataProvenance"]:\n return (\n None\n if provenance is None\n else PipesDataProvenance(\n code_version=provenance.code_version,\n input_data_versions={\n _convert_asset_key(k): v.value for k, v in provenance.input_data_versions.items()\n },\n is_user_provided=provenance.is_user_provided,\n )\n )\n\n\ndef _convert_time_window(\n time_window: TimeWindow,\n) -> "PipesTimeWindow":\n return PipesTimeWindow(\n start=time_window.start.isoformat(),\n end=time_window.end.isoformat(),\n )\n\n\ndef _convert_partition_key_range(\n partition_key_range: PartitionKeyRange,\n) -> "PipesTimeWindow":\n return PipesTimeWindow(\n start=partition_key_range.start,\n end=partition_key_range.end,\n )\n
", "current_page_name": "_modules/dagster/_core/pipes/context", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.pipes.context"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.pipes.utils

\nimport datetime\nimport json\nimport os\nimport sys\nimport tempfile\nimport time\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom threading import Event, Thread\nfrom typing import Iterator, Optional, TextIO\n\nfrom dagster_pipes import (\n    PIPES_PROTOCOL_VERSION_FIELD,\n    PipesContextData,\n    PipesDefaultContextLoader,\n    PipesDefaultMessageWriter,\n    PipesExtras,\n    PipesParams,\n)\n\nfrom dagster import (\n    OpExecutionContext,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom dagster._core.pipes.client import (\n    PipesContextInjector,\n    PipesMessageReader,\n)\nfrom dagster._core.pipes.context import (\n    PipesMessageHandler,\n    PipesSession,\n    build_external_execution_context_data,\n)\nfrom dagster._utils import tail_file\n\n_CONTEXT_INJECTOR_FILENAME = "context"\n_MESSAGE_READER_FILENAME = "messages"\n\n\n
[docs]@experimental\nclass PipesFileContextInjector(PipesContextInjector):\n """Context injector that injects context data into the external process by writing it to a\n specified file.\n\n Args:\n path (str): The path of a file to which to write context data. The file will be deleted on\n close of the pipes session.\n """\n\n def __init__(self, path: str):\n self._path = check.str_param(path, "path")\n\n @contextmanager\n def inject_context(self, context_data: "PipesContextData") -> Iterator[PipesParams]:\n """Inject context to external environment by writing it to a file as JSON and exposing the\n path to the file.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n with open(self._path, "w") as input_stream:\n json.dump(context_data, input_stream)\n try:\n yield {PipesDefaultContextLoader.FILE_PATH_KEY: self._path}\n finally:\n if os.path.exists(self._path):\n os.remove(self._path)\n\n def no_messages_debug_text(self) -> str:\n return f"Attempted to inject context via file {self._path}"
\n\n\n
[docs]@experimental\nclass PipesTempFileContextInjector(PipesContextInjector):\n """Context injector that injects context data into the external process by writing it to an\n automatically-generated temporary file.\n """\n\n @contextmanager\n def inject_context(self, context: "PipesContextData") -> Iterator[PipesParams]:\n """Inject context to external environment by writing it to an automatically-generated\n temporary file as JSON and exposing the path to the file.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n with tempfile.TemporaryDirectory() as tempdir:\n with PipesFileContextInjector(\n os.path.join(tempdir, _CONTEXT_INJECTOR_FILENAME)\n ).inject_context(context) as params:\n yield params\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to inject context via a temporary file."
\n\n\n
[docs]class PipesEnvContextInjector(PipesContextInjector):\n """Context injector that injects context data into the external process by injecting it directly into the external process environment."""\n\n @contextmanager\n def inject_context(\n self,\n context_data: "PipesContextData",\n ) -> Iterator[PipesParams]:\n """Inject context to external environment by embedding directly in the parameters that will\n be passed to the external process (typically as environment variables).\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n yield {PipesDefaultContextLoader.DIRECT_KEY: context_data}\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to inject context directly, typically as an environment variable."
\n\n\n
[docs]@experimental\nclass PipesFileMessageReader(PipesMessageReader):\n """Message reader that reads messages by tailing a specified file.\n\n Args:\n path (str): The path of the file to which messages will be written. The file will be deleted\n on close of the pipes session.\n """\n\n def __init__(self, path: str):\n self._path = check.str_param(path, "path")\n\n @contextmanager\n def read_messages(\n self,\n handler: "PipesMessageHandler",\n ) -> Iterator[PipesParams]:\n """Set up a thread to read streaming messages from the external process by tailing the\n target file.\n\n Args:\n handler (PipesMessageHandler): object to process incoming messages\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol messages.\n """\n is_task_complete = Event()\n thread = None\n try:\n open(self._path, "w").close() # create file\n thread = Thread(\n target=self._reader_thread, args=(handler, is_task_complete), daemon=True\n )\n thread.start()\n yield {PipesDefaultMessageWriter.FILE_PATH_KEY: self._path}\n finally:\n is_task_complete.set()\n if os.path.exists(self._path):\n os.remove(self._path)\n if thread:\n thread.join()\n\n def _reader_thread(self, handler: "PipesMessageHandler", is_resource_complete: Event) -> None:\n for line in tail_file(self._path, lambda: is_resource_complete.is_set()):\n message = json.loads(line)\n handler.handle_message(message)\n\n def no_messages_debug_text(self) -> str:\n return f"Attempted to read messages from file {self._path}."
\n\n\n
[docs]@experimental\nclass PipesTempFileMessageReader(PipesMessageReader):\n """Message reader that reads messages by tailing an automatically-generated temporary file."""\n\n @contextmanager\n def read_messages(\n self,\n handler: "PipesMessageHandler",\n ) -> Iterator[PipesParams]:\n """Set up a thread to read streaming messages from the external process by an\n automatically-generated temporary file.\n\n Args:\n handler (PipesMessageHandler): object to process incoming messages\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol messages.\n """\n with tempfile.TemporaryDirectory() as tempdir:\n with PipesFileMessageReader(\n os.path.join(tempdir, _MESSAGE_READER_FILENAME)\n ).read_messages(handler) as params:\n yield params\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to read messages from a local temporary file."
\n\n\n# Number of seconds to wait after an external process has completed for stdio logs to become\n# available. If this is exceeded, proceed with exiting without picking up logs.\nWAIT_FOR_STDIO_LOGS_TIMEOUT = 60\n\n\n
[docs]@experimental\nclass PipesBlobStoreMessageReader(PipesMessageReader):\n """Message reader that reads a sequence of message chunks written by an external process into a\n blob store such as S3, Azure blob storage, or GCS.\n\n The reader maintains a counter, starting at 1, that is synchronized with a message writer in\n some pipes process. The reader starts a thread that periodically attempts to read a chunk\n indexed by the counter at some location expected to be written by the pipes process. The chunk\n should be a file with each line corresponding to a JSON-encoded pipes message. When a chunk is\n successfully read, the messages are processed and the counter is incremented. The\n :py:class:`PipesBlobStoreMessageWriter` on the other end is expected to similarly increment a\n counter (starting from 1) on successful write, keeping counters on the read and write end in\n sync.\n\n If `stdout_reader` or `stderr_reader` are passed, this reader will also start them when\n `read_messages` is called. If they are not passed, then the reader performs no stdout/stderr\n forwarding.\n\n Args:\n interval (float): interval in seconds between attempts to download a chunk\n stdout_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stdout logs.\n stderr_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stderr logs.\n """\n\n interval: float\n counter: int\n stdout_reader: "PipesBlobStoreStdioReader"\n stderr_reader: "PipesBlobStoreStdioReader"\n\n def __init__(\n self,\n interval: float = 10,\n stdout_reader: Optional["PipesBlobStoreStdioReader"] = None,\n stderr_reader: Optional["PipesBlobStoreStdioReader"] = None,\n ):\n self.interval = interval\n self.counter = 1\n self.stdout_reader = (\n check.opt_inst_param(stdout_reader, "stdout_reader", PipesBlobStoreStdioReader)\n or PipesNoOpStdioReader()\n )\n self.stderr_reader = (\n check.opt_inst_param(stderr_reader, "stderr_reader", PipesBlobStoreStdioReader)\n or PipesNoOpStdioReader()\n )\n\n @contextmanager\n def read_messages(\n self,\n handler: "PipesMessageHandler",\n ) -> Iterator[PipesParams]:\n """Set up a thread to read streaming messages by periodically reading message chunks from a\n target location.\n\n Args:\n handler (PipesMessageHandler): object to process incoming messages\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol message chunks.\n """\n with self.get_params() as params:\n is_task_complete = Event()\n messages_thread = None\n try:\n messages_thread = Thread(\n target=self._messages_thread, args=(handler, params, is_task_complete)\n )\n messages_thread.start()\n self.stdout_reader.start(params, is_task_complete)\n self.stderr_reader.start(params, is_task_complete)\n yield params\n finally:\n self.wait_for_stdio_logs(params)\n is_task_complete.set()\n if messages_thread:\n messages_thread.join()\n self.stdout_reader.stop()\n self.stderr_reader.stop()\n\n # In cases where we are forwarding logs, in some cases the logs might not be written out until\n # after the run completes. We wait for them to exist.\n def wait_for_stdio_logs(self, params):\n start_or_last_download = datetime.datetime.now()\n while (\n datetime.datetime.now() - start_or_last_download\n ).seconds <= WAIT_FOR_STDIO_LOGS_TIMEOUT and (\n (self.stdout_reader and not self.stdout_reader.is_ready(params))\n or (self.stderr_reader and not self.stderr_reader.is_ready(params))\n ):\n time.sleep(5)\n\n @abstractmethod\n @contextmanager\n def get_params(self) -> Iterator[PipesParams]:\n """Yield a set of parameters to be passed to a message writer in a pipes process.\n\n Yields:\n PipesParams: A dict of parameters that specifies where a pipes process should write\n pipes protocol message chunks.\n """\n\n @abstractmethod\n def download_messages_chunk(self, index: int, params: PipesParams) -> Optional[str]: ...\n\n def _messages_thread(\n self,\n handler: "PipesMessageHandler",\n params: PipesParams,\n is_task_complete: Event,\n ) -> None:\n start_or_last_download = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n if (now - start_or_last_download).seconds > self.interval or is_task_complete.is_set():\n start_or_last_download = now\n chunk = self.download_messages_chunk(self.counter, params)\n if chunk:\n for line in chunk.split("\\n"):\n message = json.loads(line)\n handler.handle_message(message)\n self.counter += 1\n elif is_task_complete.is_set():\n break\n time.sleep(1)
\n\n\nclass PipesBlobStoreStdioReader(ABC):\n @abstractmethod\n def start(self, params: PipesParams, is_task_complete: Event) -> None: ...\n\n @abstractmethod\n def stop(self) -> None: ...\n\n @abstractmethod\n def is_ready(self, params: PipesParams) -> bool: ...\n\n\n@experimental\nclass PipesChunkedStdioReader(PipesBlobStoreStdioReader):\n """Reader for reading stdout/stderr logs from a blob store such as S3, Azure blob storage, or GCS.\n\n Args:\n interval (float): interval in seconds between attempts to download a chunk.\n target_stream (TextIO): The stream to which to write the logs. Typcially `sys.stdout` or `sys.stderr`.\n """\n\n def __init__(self, *, interval: float = 10, target_stream: TextIO):\n self.interval = interval\n self.target_stream = target_stream\n self.thread: Optional[Thread] = None\n\n @abstractmethod\n def download_log_chunk(self, params: PipesParams) -> Optional[str]: ...\n\n def start(self, params: PipesParams, is_task_complete: Event) -> None:\n self.thread = Thread(target=self._reader_thread, args=(params, is_task_complete))\n self.thread.start()\n\n def stop(self) -> None:\n if self.thread:\n self.thread.join()\n\n def _reader_thread(\n self,\n params: PipesParams,\n is_task_complete: Event,\n ) -> None:\n start_or_last_download = datetime.datetime.now()\n while True:\n now = datetime.datetime.now()\n if (\n (now - start_or_last_download).seconds > self.interval or is_task_complete.is_set()\n ) and self.is_ready(params):\n start_or_last_download = now\n chunk = self.download_log_chunk(params)\n if chunk:\n self.target_stream.write(chunk)\n elif is_task_complete.is_set():\n break\n time.sleep(self.interval)\n\n\nclass PipesNoOpStdioReader(PipesBlobStoreStdioReader):\n """Default implementation for a pipes stdio reader that does nothing."""\n\n def start(self, params: PipesParams, is_task_complete: Event) -> None:\n pass\n\n def stop(self) -> None:\n pass\n\n def is_ready(self, params: PipesParams) -> bool:\n return True\n\n\ndef extract_message_or_forward_to_stdout(handler: "PipesMessageHandler", log_line: str):\n # exceptions as control flow, you love to see it\n try:\n message = json.loads(log_line)\n if PIPES_PROTOCOL_VERSION_FIELD in message.keys():\n handler.handle_message(message)\n else:\n sys.stdout.writelines((log_line, "\\n"))\n except Exception:\n # move non-message logs in to stdout for compute log capture\n sys.stdout.writelines((log_line, "\\n"))\n\n\n_FAIL_TO_YIELD_ERROR_MESSAGE = (\n "Did you forget to `yield from pipes_session.get_results()` or `return"\n " <PipesClient>.run(...).get_results`? If using `open_pipes_session`,"\n " `pipes_session.get_results` should be called once after the `open_pipes_session` block has"\n " exited to yield any remaining buffered results via `<PipesSession>.get_results()`."\n " If using `<PipesClient>.run`, you should always return"\n " `<PipesClient>.run(...).get_results()` or `<PipesClient>.run(...).get_materialize_result()`."\n)\n\n\n
[docs]@experimental\n@contextmanager\ndef open_pipes_session(\n context: OpExecutionContext,\n context_injector: PipesContextInjector,\n message_reader: PipesMessageReader,\n extras: Optional[PipesExtras] = None,\n) -> Iterator[PipesSession]:\n """Context manager that opens and closes a pipes session.\n\n This context manager should be used to wrap the launch of an external process using the pipe\n protocol to report results back to Dagster. The yielded :py:class:`PipesSession` should be used\n to (a) obtain the environment variables that need to be provided to the external process; (b)\n access results streamed back from the external process.\n\n This method is an alternative to :py:class:`PipesClient` subclasses for users who want more\n control over how pipes processes are launched. When using `open_pipes_session`, it is the user's\n responsibility to inject the message reader and context injector parameters available on the\n yielded `PipesSession` and pass them to the appropriate API when launching the external process.\n Typically these parameters should be set as environment variables.\n\n\n Args:\n context (OpExecutionContext): The context for the current op/asset execution.\n context_injector (PipesContextInjector): The context injector to use to inject context into the external process.\n message_reader (PipesMessageReader): The message reader to use to read messages from the external process.\n extras (Optional[PipesExtras]): Optional extras to pass to the external process via the injected context.\n\n Yields:\n PipesSession: Interface for interacting with the external process.\n\n .. code-block:: python\n\n import subprocess\n from dagster import open_pipes_session\n\n extras = {"foo": "bar"}\n\n @asset\n def ext_asset(context: OpExecutionContext):\n with open_pipes_session(\n context=context,\n extras={"foo": "bar"},\n context_injector=ExtTempFileContextInjector(),\n message_reader=ExtTempFileMessageReader(),\n ) as pipes_session:\n subprocess.Popen(\n ["/bin/python", "/path/to/script.py"],\n env={**pipes_session.get_bootstrap_env_vars()}\n )\n while process.poll() is None:\n yield from pipes_session.get_results()\n\n yield from pipes_session.get_results()\n """\n context.set_requires_typed_event_stream(error_message=_FAIL_TO_YIELD_ERROR_MESSAGE)\n context_data = build_external_execution_context_data(context, extras)\n message_handler = PipesMessageHandler(context)\n try:\n with context_injector.inject_context(\n context_data\n ) as ci_params, message_handler.handle_messages(message_reader) as mr_params:\n yield PipesSession(\n context_data=context_data,\n message_handler=message_handler,\n context_injector_params=ci_params,\n message_reader_params=mr_params,\n )\n finally:\n if not message_handler.received_any_message:\n context.log.warn(\n "[pipes] did not receive any messages from external process. Check stdout / stderr"\n " logs from the external process if"\n f" possible.\\n{context_injector.__class__.__name__}:"\n f" {context_injector.no_messages_debug_text()}\\n{message_reader.__class__.__name__}:"\n f" {message_reader.no_messages_debug_text()}\\n"\n )\n elif not message_handler.received_closed_message:\n context.log.warn(\n "[pipes] did not receive closed message from external process. Buffered messages"\n " may have been discarded without being delivered. Use `open_dagster_pipes` as a"\n " context manager (a with block) to ensure that cleanup is successfully completed."\n " If that is not possible, manually call `PipesContext.close()` before process"\n " exit."\n )
\n
", "current_page_name": "_modules/dagster/_core/pipes/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.pipes.utils"}}, "run_coordinator": {"default_run_coordinator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.run_coordinator.default_run_coordinator

\nimport logging\nfrom typing import Mapping, Optional\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\nfrom .base import RunCoordinator, SubmitRunContext\n\n\n
[docs]class DefaultRunCoordinator(RunCoordinator, ConfigurableClass):\n """Immediately send runs to the run launcher."""\n\n def __init__(self, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self._logger = logging.getLogger("dagster.run_coordinator.default_run_coordinator")\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, object]\n ) -> Self:\n return cls(inst_data=inst_data, **config_value)\n\n def submit_run(self, context: SubmitRunContext) -> DagsterRun:\n dagster_run = context.dagster_run\n\n if dagster_run.status == DagsterRunStatus.NOT_STARTED:\n self._instance.launch_run(dagster_run.run_id, context.workspace)\n else:\n self._logger.warning(\n f"submit_run called for run {dagster_run.run_id} with status "\n f"{dagster_run.status.value}, skipping launch."\n )\n\n run = self._instance.get_run_by_id(dagster_run.run_id)\n if run is None:\n check.failed(f"Failed to reload run {dagster_run.run_id}")\n return run\n\n def cancel_run(self, run_id: str) -> bool:\n return self._instance.run_launcher.terminate(run_id)
\n
", "current_page_name": "_modules/dagster/_core/run_coordinator/default_run_coordinator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.run_coordinator.default_run_coordinator"}, "queued_run_coordinator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.run_coordinator.queued_run_coordinator

\nimport logging\nfrom typing import Any, Mapping, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Self\n\nfrom dagster import (\n    DagsterEvent,\n    DagsterEventType,\n    IntSource,\n    String,\n    _check as check,\n)\nfrom dagster._builtins import Bool\nfrom dagster._config import Array, Field, Noneable, ScalarUnion, Shape\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.instance import T_DagsterInstance\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\nfrom .base import RunCoordinator, SubmitRunContext\n\n\nclass RunQueueConfig(\n    NamedTuple(\n        "_RunQueueConfig",\n        [\n            ("max_concurrent_runs", int),\n            ("tag_concurrency_limits", Sequence[Mapping[str, Any]]),\n            ("max_user_code_failure_retries", int),\n            ("user_code_failure_retry_delay", int),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        max_concurrent_runs: int,\n        tag_concurrency_limits: Optional[Sequence[Mapping[str, Any]]],\n        max_user_code_failure_retries: int = 0,\n        user_code_failure_retry_delay: int = 60,\n    ):\n        return super(RunQueueConfig, cls).__new__(\n            cls,\n            check.int_param(max_concurrent_runs, "max_concurrent_runs"),\n            check.opt_sequence_param(tag_concurrency_limits, "tag_concurrency_limits"),\n            check.int_param(max_user_code_failure_retries, "max_user_code_failure_retries"),\n            check.int_param(user_code_failure_retry_delay, "user_code_failure_retry_delay"),\n        )\n\n\n
[docs]class QueuedRunCoordinator(RunCoordinator[T_DagsterInstance], ConfigurableClass):\n """Enqueues runs via the run storage, to be deqeueued by the Dagster Daemon process. Requires\n the Dagster Daemon process to be alive in order for runs to be launched.\n """\n\n def __init__(\n self,\n max_concurrent_runs: Optional[int] = None,\n tag_concurrency_limits: Optional[Sequence[Mapping[str, Any]]] = None,\n dequeue_interval_seconds: Optional[int] = None,\n dequeue_use_threads: Optional[bool] = None,\n dequeue_num_workers: Optional[int] = None,\n max_user_code_failure_retries: Optional[int] = None,\n user_code_failure_retry_delay: Optional[int] = None,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data: Optional[ConfigurableClassData] = check.opt_inst_param(\n inst_data, "inst_data", ConfigurableClassData\n )\n self._max_concurrent_runs: int = check.opt_int_param(\n max_concurrent_runs, "max_concurrent_runs", 10\n )\n check.invariant(\n self._max_concurrent_runs >= -1,\n "Negative values other than -1 (which disables the limit) for max_concurrent_runs"\n " are disallowed.",\n )\n self._tag_concurrency_limits: Sequence[Mapping[str, Any]] = check.opt_list_param(\n tag_concurrency_limits,\n "tag_concurrency_limits",\n )\n self._dequeue_interval_seconds: int = check.opt_int_param(\n dequeue_interval_seconds, "dequeue_interval_seconds", 5\n )\n self._dequeue_use_threads: bool = check.opt_bool_param(\n dequeue_use_threads, "dequeue_use_threads", False\n )\n self._dequeue_num_workers: Optional[int] = check.opt_int_param(\n dequeue_num_workers, "dequeue_num_workers"\n )\n self._max_user_code_failure_retries: int = check.opt_int_param(\n max_user_code_failure_retries, "max_user_code_failure_retries", 0\n )\n self._user_code_failure_retry_delay: int = check.opt_int_param(\n user_code_failure_retry_delay, "user_code_failure_retry_delay", 60\n )\n self._logger = logging.getLogger("dagster.run_coordinator.queued_run_coordinator")\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n def get_run_queue_config(self) -> RunQueueConfig:\n return RunQueueConfig(\n max_concurrent_runs=self._max_concurrent_runs,\n tag_concurrency_limits=self._tag_concurrency_limits,\n max_user_code_failure_retries=self._max_user_code_failure_retries,\n user_code_failure_retry_delay=self._user_code_failure_retry_delay,\n )\n\n @property\n def dequeue_interval_seconds(self) -> int:\n return self._dequeue_interval_seconds\n\n @property\n def dequeue_use_threads(self) -> bool:\n return self._dequeue_use_threads\n\n @property\n def dequeue_num_workers(self) -> Optional[int]:\n return self._dequeue_num_workers\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {\n "max_concurrent_runs": Field(\n config=IntSource,\n is_required=False,\n description=(\n "The maximum number of runs that are allowed to be in progress at once."\n " Defaults to 10. Set to -1 to disable the limit. Set to 0 to stop any runs"\n " from launching. Any other negative values are disallowed."\n ),\n ),\n "tag_concurrency_limits": Field(\n config=Noneable(\n Array(\n Shape(\n {\n "key": String,\n "value": Field(\n ScalarUnion(\n scalar_type=String,\n non_scalar_schema=Shape({"applyLimitPerUniqueValue": Bool}),\n ),\n is_required=False,\n ),\n "limit": Field(int),\n }\n )\n )\n ),\n is_required=False,\n description=(\n "A set of limits that are applied to runs with particular tags. If a value is"\n " set, the limit is applied to only that key-value pair. If no value is set,"\n " the limit is applied across all values of that key. If the value is set to a"\n " dict with `applyLimitPerUniqueValue: true`, the limit will apply to the"\n " number of unique values for that key."\n ),\n ),\n "dequeue_interval_seconds": Field(\n config=IntSource,\n is_required=False,\n description=(\n "The interval in seconds at which the Dagster Daemon "\n "should periodically check the run queue for new runs to launch."\n ),\n ),\n "dequeue_use_threads": Field(\n config=bool,\n is_required=False,\n description=(\n "Whether or not to use threads for concurrency when launching dequeued runs."\n ),\n ),\n "dequeue_num_workers": Field(\n config=IntSource,\n is_required=False,\n description=(\n "If dequeue_use_threads is true, limit the number of concurrent worker threads."\n ),\n ),\n "max_user_code_failure_retries": Field(\n config=IntSource,\n is_required=False,\n default_value=0,\n description=(\n "If there is an error reaching a Dagster gRPC server while dequeuing the run,"\n " how many times to retry the dequeue before failing it. The only run launcher"\n " that requires the gRPC server to be running is the DefaultRunLauncher, so"\n " setting this will have no effect unless that run launcher is being used."\n ),\n ),\n "user_code_failure_retry_delay": Field(\n config=IntSource,\n is_required=False,\n default_value=60,\n description=(\n "If there is an error reaching a Dagster gRPC server while dequeuing the run,"\n " how long to wait before retrying any runs from that same code location. The"\n " only run launcher that requires the gRPC server to be running is the"\n " DefaultRunLauncher, so setting this will have no effect unless that run"\n " launcher is being used."\n ),\n ),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return cls(\n inst_data=inst_data,\n max_concurrent_runs=config_value.get("max_concurrent_runs"),\n tag_concurrency_limits=config_value.get("tag_concurrency_limits"),\n dequeue_interval_seconds=config_value.get("dequeue_interval_seconds"),\n dequeue_use_threads=config_value.get("dequeue_use_threads"),\n dequeue_num_workers=config_value.get("dequeue_num_workers"),\n max_user_code_failure_retries=config_value.get("max_user_code_failure_retries"),\n user_code_failure_retry_delay=config_value.get("user_code_failure_retry_delay"),\n )\n\n def submit_run(self, context: SubmitRunContext) -> DagsterRun:\n dagster_run = context.dagster_run\n\n if dagster_run.status == DagsterRunStatus.NOT_STARTED:\n enqueued_event = DagsterEvent(\n event_type_value=DagsterEventType.PIPELINE_ENQUEUED.value,\n job_name=dagster_run.job_name,\n )\n self._instance.report_dagster_event(enqueued_event, run_id=dagster_run.run_id)\n else:\n # the run was already submitted, this is a no-op\n self._logger.warning(\n f"submit_run called for run {dagster_run.run_id} with status "\n f"{dagster_run.status.value}, skipping enqueue."\n )\n\n run = self._instance.get_run_by_id(dagster_run.run_id)\n if run is None:\n check.failed(f"Failed to reload run {dagster_run.run_id}")\n return run\n\n def cancel_run(self, run_id: str) -> bool:\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n # NOTE: possible race condition if the dequeuer acts on this run at the same time\n # https://github.com/dagster-io/dagster/issues/3323\n if run.status == DagsterRunStatus.QUEUED:\n self._instance.report_run_canceling(\n run,\n message="Canceling run from the queue.",\n )\n self._instance.report_run_canceled(run)\n return True\n else:\n return self._instance.run_launcher.terminate(run_id)
\n
", "current_page_name": "_modules/dagster/_core/run_coordinator/queued_run_coordinator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.run_coordinator.queued_run_coordinator"}}, "scheduler": {"scheduler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.scheduler.scheduler

\nimport abc\nimport os\nfrom typing import Any, Mapping, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._config import Field, IntSource\nfrom dagster._core.definitions.run_request import InstigatorType\nfrom dagster._core.errors import DagsterError\nfrom dagster._core.host_representation import ExternalSchedule\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.scheduler.instigation import (\n    InstigatorState,\n    InstigatorStatus,\n    ScheduleInstigatorData,\n)\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom dagster._seven import get_current_datetime_in_utc\nfrom dagster._utils import mkdir_p\n\n\nclass DagsterSchedulerError(DagsterError):\n    """Base class for all Dagster Scheduler errors."""\n\n\nclass DagsterScheduleDoesNotExist(DagsterSchedulerError):\n    """Errors raised when fetching a schedule."""\n\n\nclass SchedulerDebugInfo(\n    NamedTuple(\n        "SchedulerDebugInfo",\n        [\n            ("errors", Sequence[str]),\n            ("scheduler_config_info", str),\n            ("scheduler_info", str),\n            ("schedule_storage", Sequence[str]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        errors: Sequence[str],\n        scheduler_config_info: str,\n        scheduler_info: str,\n        schedule_storage: Sequence[str],\n    ):\n        return super(SchedulerDebugInfo, cls).__new__(\n            cls,\n            errors=check.sequence_param(errors, "errors", of_type=str),\n            scheduler_config_info=check.str_param(scheduler_config_info, "scheduler_config_info"),\n            scheduler_info=check.str_param(scheduler_info, "scheduler_info"),\n            schedule_storage=check.sequence_param(\n                schedule_storage, "schedule_storage", of_type=str\n            ),\n        )\n\n\n
[docs]class Scheduler(abc.ABC):\n """Abstract base class for a scheduler. This component is responsible for interfacing with\n an external system such as cron to ensure scheduled repeated execution according.\n """\n\n def start_schedule(\n self, instance: DagsterInstance, external_schedule: ExternalSchedule\n ) -> InstigatorState:\n """Updates the status of the given schedule to `InstigatorStatus.RUNNING` in schedule storage,.\n\n This should not be overridden by subclasses.\n\n Args:\n instance (DagsterInstance): The current instance.\n external_schedule (ExternalSchedule): The schedule to start\n\n """\n check.inst_param(instance, "instance", DagsterInstance)\n check.inst_param(external_schedule, "external_schedule", ExternalSchedule)\n\n stored_state = instance.get_instigator_state(\n external_schedule.get_external_origin_id(), external_schedule.selector_id\n )\n computed_state = external_schedule.get_current_instigator_state(stored_state)\n if computed_state.is_running:\n return computed_state\n\n new_instigator_data = ScheduleInstigatorData(\n external_schedule.cron_schedule,\n get_current_datetime_in_utc().timestamp(),\n )\n\n if not stored_state:\n started_state = InstigatorState(\n external_schedule.get_external_origin(),\n InstigatorType.SCHEDULE,\n InstigatorStatus.RUNNING,\n new_instigator_data,\n )\n instance.add_instigator_state(started_state)\n else:\n started_state = stored_state.with_status(InstigatorStatus.RUNNING).with_data(\n new_instigator_data\n )\n instance.update_instigator_state(started_state)\n return started_state\n\n def stop_schedule(\n self,\n instance: DagsterInstance,\n schedule_origin_id: str,\n schedule_selector_id: str,\n external_schedule: Optional[ExternalSchedule],\n ) -> InstigatorState:\n """Updates the status of the given schedule to `InstigatorStatus.STOPPED` in schedule storage,.\n\n This should not be overridden by subclasses.\n\n Args:\n schedule_origin_id (string): The id of the schedule target to stop running.\n """\n check.str_param(schedule_origin_id, "schedule_origin_id")\n check.opt_inst_param(external_schedule, "external_schedule", ExternalSchedule)\n\n stored_state = instance.get_instigator_state(schedule_origin_id, schedule_selector_id)\n\n if not external_schedule:\n computed_state = stored_state\n else:\n computed_state = external_schedule.get_current_instigator_state(stored_state)\n\n if computed_state and not computed_state.is_running:\n return computed_state\n\n if not stored_state:\n assert external_schedule\n stopped_state = InstigatorState(\n external_schedule.get_external_origin(),\n InstigatorType.SCHEDULE,\n InstigatorStatus.STOPPED,\n ScheduleInstigatorData(\n external_schedule.cron_schedule,\n ),\n )\n instance.add_instigator_state(stopped_state)\n else:\n stopped_state = stored_state.with_status(InstigatorStatus.STOPPED).with_data(\n ScheduleInstigatorData(\n cron_schedule=computed_state.instigator_data.cron_schedule, # type: ignore\n )\n )\n instance.update_instigator_state(stopped_state)\n\n return stopped_state\n\n @abc.abstractmethod\n def debug_info(self) -> str:\n """Returns debug information about the scheduler."""\n\n @abc.abstractmethod\n def get_logs_path(self, instance: DagsterInstance, schedule_origin_id: str) -> str:\n """Get path to store logs for schedule.\n\n Args:\n schedule_origin_id (string): The id of the schedule target to retrieve the log path for\n """
\n\n\nDEFAULT_MAX_CATCHUP_RUNS = 5\n\n\n
[docs]class DagsterDaemonScheduler(Scheduler, ConfigurableClass):\n """Default scheduler implementation that submits runs from the `dagster-daemon`\n long-lived process. Periodically checks each running schedule for execution times that don't\n have runs yet and launches them.\n """\n\n def __init__(\n self,\n max_catchup_runs: int = DEFAULT_MAX_CATCHUP_RUNS,\n max_tick_retries: int = 0,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self.max_catchup_runs = check.opt_int_param(\n max_catchup_runs, "max_catchup_runs", DEFAULT_MAX_CATCHUP_RUNS\n )\n self.max_tick_retries = check.opt_int_param(max_tick_retries, "max_tick_retries", 0)\n self._inst_data = inst_data\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "max_catchup_runs": Field(\n IntSource,\n is_required=False,\n default_value=DEFAULT_MAX_CATCHUP_RUNS,\n description="""For partitioned schedules, controls the maximum number of past\n partitions for each schedule that will be considered when looking for missing\n runs . Generally this parameter will only come into play if the scheduler\n falls behind or launches after experiencing downtime. This parameter will not be checked for\n schedules without partition sets (for example, schedules created using the @schedule\n decorator) - only the most recent execution time will be considered for those schedules.\n\n Note that no matter what this value is, the scheduler will never launch a run from a time\n before the schedule was turned on (even if the start_date on the schedule is earlier) - if\n you want to launch runs for earlier partitions, launch a backfill.\n """,\n ),\n "max_tick_retries": Field(\n IntSource,\n default_value=0,\n is_required=False,\n description=(\n "For each schedule tick that raises an error, how many times to retry that tick"\n ),\n ),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return DagsterDaemonScheduler(inst_data=inst_data, **config_value)\n\n def debug_info(self) -> str:\n return ""\n\n def wipe(self, instance: DagsterInstance) -> None:\n pass\n\n def _get_or_create_logs_directory(\n self, instance: DagsterInstance, schedule_origin_id: str\n ) -> str:\n check.inst_param(instance, "instance", DagsterInstance)\n check.str_param(schedule_origin_id, "schedule_origin_id")\n\n logs_directory = os.path.join(instance.schedules_directory(), "logs", schedule_origin_id)\n if not os.path.isdir(logs_directory):\n mkdir_p(logs_directory)\n\n return logs_directory\n\n def get_logs_path(self, instance: DagsterInstance, schedule_origin_id: str) -> str:\n check.inst_param(instance, "instance", DagsterInstance)\n check.str_param(schedule_origin_id, "schedule_origin_id")\n\n logs_directory = self._get_or_create_logs_directory(instance, schedule_origin_id)\n return os.path.join(logs_directory, "scheduler.log")
\n
", "current_page_name": "_modules/dagster/_core/scheduler/scheduler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.scheduler.scheduler"}}, "storage": {"asset_value_loader": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.asset_value_loader

\nfrom contextlib import ExitStack\nfrom typing import Any, Dict, Mapping, Optional, Type, cast\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.job_definition import (\n    default_job_io_manager_with_fs_io_manager_schema,\n)\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom dagster._core.execution.build_resources import build_resources, get_mapped_resource_config\nfrom dagster._core.execution.context.input import build_input_context\nfrom dagster._core.execution.context.output import build_output_context\nfrom dagster._core.execution.resources_init import get_transitive_required_resource_keys\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.config import is_dagster_home_set\nfrom dagster._core.types.dagster_type import resolve_dagster_type\nfrom dagster._utils.merger import merge_dicts\n\nfrom .io_manager import IOManager\n\n\n
[docs]class AssetValueLoader:\n """Caches resource definitions that are used to load asset values across multiple load\n invocations.\n\n Should not be instantiated directly. Instead, use\n :py:meth:`~dagster.RepositoryDefinition.get_asset_value_loader`.\n """\n\n def __init__(\n self,\n assets_defs_by_key: Mapping[AssetKey, AssetsDefinition],\n source_assets_by_key: Mapping[AssetKey, SourceAsset],\n instance: Optional[DagsterInstance] = None,\n ):\n self._assets_defs_by_key = assets_defs_by_key\n self._source_assets_by_key = source_assets_by_key\n self._resource_instance_cache: Dict[str, object] = {}\n self._exit_stack: ExitStack = ExitStack().__enter__()\n if not instance and is_dagster_home_set():\n self._instance = self._exit_stack.enter_context(DagsterInstance.get())\n else:\n self._instance = instance\n\n def _ensure_resource_instances_in_cache(\n self,\n resource_defs: Mapping[str, ResourceDefinition],\n resource_config: Optional[Mapping[str, Any]] = None,\n ):\n for built_resource_key, built_resource in (\n self._exit_stack.enter_context(\n build_resources(\n resources={\n resource_key: self._resource_instance_cache.get(resource_key, resource_def)\n for resource_key, resource_def in resource_defs.items()\n },\n instance=self._instance,\n resource_config=resource_config,\n )\n )\n ._asdict()\n .items()\n ):\n self._resource_instance_cache[built_resource_key] = built_resource\n\n
[docs] @public\n def load_asset_value(\n self,\n asset_key: CoercibleToAssetKey,\n *,\n python_type: Optional[Type[object]] = None,\n partition_key: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n ) -> object:\n """Loads the contents of an asset as a Python object.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the asset.\n\n Args:\n asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.\n python_type (Optional[Type]): The python type to load the asset as. This is what will\n be returned inside `load_input` by `context.dagster_type.typing_type`.\n partition_key (Optional[str]): The partition of the asset to load.\n metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`\n (is equivalent to setting the metadata argument in `In` or `AssetIn`).\n resource_config (Optional[Any]): A dictionary of resource configurations to be passed\n to the :py:class:`IOManager`.\n\n Returns:\n The contents of an asset as a Python object.\n """\n asset_key = AssetKey.from_coercible(asset_key)\n resource_config = resource_config or {}\n output_metadata = {}\n\n if asset_key in self._assets_defs_by_key:\n assets_def = self._assets_defs_by_key[asset_key]\n\n resource_defs = merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n assets_def.resource_defs,\n )\n io_manager_key = assets_def.get_io_manager_key_for_asset_key(asset_key)\n io_manager_def = resource_defs[io_manager_key]\n name = assets_def.get_output_name_for_asset_key(asset_key)\n output_metadata = assets_def.metadata_by_key[asset_key]\n op_def = assets_def.get_op_def_for_asset_key(asset_key)\n asset_partitions_def = assets_def.partitions_def\n elif asset_key in self._source_assets_by_key:\n source_asset = self._source_assets_by_key[asset_key]\n\n resource_defs = merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n source_asset.resource_defs,\n )\n io_manager_key = source_asset.get_io_manager_key()\n io_manager_def = resource_defs[io_manager_key]\n name = asset_key.path[-1]\n output_metadata = source_asset.raw_metadata\n op_def = None\n asset_partitions_def = source_asset.partitions_def\n else:\n check.failed(f"Asset key {asset_key} not found")\n\n required_resource_keys = get_transitive_required_resource_keys(\n io_manager_def.required_resource_keys, resource_defs\n ) | {io_manager_key}\n\n self._ensure_resource_instances_in_cache(\n {k: v for k, v in resource_defs.items() if k in required_resource_keys},\n resource_config=resource_config,\n )\n io_manager = cast(IOManager, self._resource_instance_cache[io_manager_key])\n\n io_config = resource_config.get(io_manager_key)\n io_resource_config = {io_manager_key: io_config} if io_config else {}\n\n io_manager_config = get_mapped_resource_config(\n {io_manager_key: io_manager_def}, io_resource_config\n )\n\n input_context = build_input_context(\n name=None,\n asset_key=asset_key,\n dagster_type=resolve_dagster_type(python_type),\n upstream_output=build_output_context(\n name=name,\n metadata=output_metadata,\n asset_key=asset_key,\n op_def=op_def,\n resource_config=resource_config,\n ),\n resources=self._resource_instance_cache,\n resource_config=io_manager_config[io_manager_key].config,\n partition_key=partition_key,\n asset_partition_key_range=(\n PartitionKeyRange(partition_key, partition_key)\n if partition_key is not None\n else None\n ),\n asset_partitions_def=asset_partitions_def,\n instance=self._instance,\n metadata=metadata,\n )\n\n return io_manager.load_input(input_context)
\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n self._exit_stack.close()
\n
", "current_page_name": "_modules/dagster/_core/storage/asset_value_loader", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.asset_value_loader"}, "base_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.base_storage

\nfrom abc import ABC, abstractmethod\n\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\n\nfrom .event_log.base import EventLogStorage\nfrom .runs.base import RunStorage\nfrom .schedules.base import ScheduleStorage\n\n\n
[docs]class DagsterStorage(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract base class for Dagster persistent storage, for reading and writing data for runs,\n events, and schedule/sensor state.\n\n Users should not directly instantiate concrete subclasses of this class; they are instantiated\n by internal machinery when ``dagster-webserver`` and ``dagster-daemon`` load, based on the values in the\n ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of concrete subclasses of this class\n should be done by setting values in that file.\n """\n\n @property\n @abstractmethod\n def event_log_storage(self) -> EventLogStorage[T_DagsterInstance]:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def run_storage(self) -> RunStorage[T_DagsterInstance]:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def schedule_storage(self) -> ScheduleStorage[T_DagsterInstance]:\n raise NotImplementedError()
\n
", "current_page_name": "_modules/dagster/_core/storage/base_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.base_storage"}, "captured_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.captured_log_manager

\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import IO, Callable, Generator, Iterator, NamedTuple, Optional, Sequence\n\nfrom typing_extensions import Final, Self\n\nimport dagster._check as check\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\n\nMAX_BYTES_CHUNK_READ: Final = 4194304  # 4 MB\n\n\nclass CapturedLogContext(\n    NamedTuple(\n        "_CapturedLogContext",\n        [\n            ("log_key", Sequence[str]),\n            ("external_url", Optional[str]),\n            ("external_stdout_url", Optional[str]),\n            ("external_stderr_url", Optional[str]),\n        ],\n    )\n):\n    """Object representing the context in which logs are captured.  Can be used by external logging\n    sidecar implementations to point the Dagster UI to an external url to view compute logs instead of a\n    Dagster-managed location.\n    """\n\n    def __new__(\n        cls,\n        log_key: Sequence[str],\n        external_stdout_url: Optional[str] = None,\n        external_stderr_url: Optional[str] = None,\n        external_url: Optional[str] = None,\n    ):\n        if external_url and (external_stdout_url or external_stderr_url):\n            check.failed(\n                "Cannot specify both `external_url` and one of"\n                " `external_stdout_url`/`external_stderr_url`"\n            )\n\n        return super(CapturedLogContext, cls).__new__(\n            cls,\n            log_key,\n            external_stdout_url=external_stdout_url,\n            external_stderr_url=external_stderr_url,\n            external_url=external_url,\n        )\n\n\nclass CapturedLogData(\n    NamedTuple(\n        "_CapturedLogData",\n        [\n            ("log_key", Sequence[str]),\n            ("stdout", Optional[bytes]),\n            ("stderr", Optional[bytes]),\n            ("cursor", Optional[str]),\n        ],\n    )\n):\n    """Object representing captured log data, either a partial chunk of the log data or the full\n    capture.  Contains the raw bytes and optionally the cursor offset for the partial chunk.\n    """\n\n    def __new__(\n        cls,\n        log_key: Sequence[str],\n        stdout: Optional[bytes] = None,\n        stderr: Optional[bytes] = None,\n        cursor: Optional[str] = None,\n    ):\n        return super(CapturedLogData, cls).__new__(cls, log_key, stdout, stderr, cursor)\n\n\nclass CapturedLogMetadata(\n    NamedTuple(\n        "_CapturedLogMetadata",\n        [\n            ("stdout_location", Optional[str]),\n            ("stderr_location", Optional[str]),\n            ("stdout_download_url", Optional[str]),\n            ("stderr_download_url", Optional[str]),\n        ],\n    )\n):\n    """Object representing metadata info for the captured log data, containing a display string for\n    the location of the log data and a URL for direct download of the captured log data.\n    """\n\n    def __new__(\n        cls,\n        stdout_location: Optional[str] = None,\n        stderr_location: Optional[str] = None,\n        stdout_download_url: Optional[str] = None,\n        stderr_download_url: Optional[str] = None,\n    ):\n        return super(CapturedLogMetadata, cls).__new__(\n            cls,\n            stdout_location=stdout_location,\n            stderr_location=stderr_location,\n            stdout_download_url=stdout_download_url,\n            stderr_download_url=stderr_download_url,\n        )\n\n\nclass CapturedLogSubscription:\n    def __init__(\n        self, manager: "CapturedLogManager", log_key: Sequence[str], cursor: Optional[str]\n    ):\n        self._manager = manager\n        self._log_key = log_key\n        self._cursor = cursor\n        self._observer: Optional[Callable[[CapturedLogData], None]] = None\n        self.is_complete = False\n\n    def __call__(self, observer: Optional[Callable[[CapturedLogData], None]]) -> Self:\n        self._observer = observer\n        self.fetch()\n        if self._manager.is_capture_complete(self._log_key):\n            self.complete()\n        return self\n\n    @property\n    def log_key(self) -> Sequence[str]:\n        return self._log_key\n\n    def dispose(self) -> None:\n        self._observer = None\n        self._manager.unsubscribe(self)\n\n    def fetch(self) -> None:\n        if not self._observer:\n            return\n\n        should_fetch = True\n        while should_fetch:\n            log_data = self._manager.get_log_data(\n                self._log_key,\n                self._cursor,\n                max_bytes=MAX_BYTES_CHUNK_READ,\n            )\n            if not self._cursor or log_data.cursor != self._cursor:\n                self._observer(log_data)\n                self._cursor = log_data.cursor\n            should_fetch = _has_max_data(log_data.stdout) or _has_max_data(log_data.stderr)\n\n    def complete(self) -> None:\n        self.is_complete = True\n\n\ndef _has_max_data(chunk: Optional[bytes]) -> bool:\n    # function is used as predicate but does not actually return a boolean\n    return chunk and len(chunk) >= MAX_BYTES_CHUNK_READ  # type: ignore\n\n\n
[docs]class CapturedLogManager(ABC):\n """Abstract base class for capturing the unstructured logs (stdout/stderr) in the current\n process, stored / retrieved with a provided log_key.\n """\n\n @abstractmethod\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:\n """Context manager for capturing the stdout/stderr within the current process, and persisting\n it under the given log key.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n """\n\n @abstractmethod\n @contextmanager\n def open_log_stream(\n self, log_key: Sequence[str], io_type: ComputeIOType\n ) -> Iterator[Optional[IO[bytes]]]:\n """Context manager for providing an IO stream that enables the caller to write to a log stream\n managed by the captured log manager, to be read later using the given log key.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n """\n\n @abstractmethod\n def is_capture_complete(self, log_key: Sequence[str]) -> bool:\n """Flag indicating when the log capture for a given log key has completed.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n\n Returns:\n Boolean\n """\n\n @abstractmethod\n def get_log_data(\n self,\n log_key: Sequence[str],\n cursor: Optional[str] = None,\n max_bytes: Optional[int] = None,\n ) -> CapturedLogData:\n """Returns a chunk of the captured stdout logs for a given log key.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n cursor (Optional[str]): A cursor representing the position of the log chunk to fetch\n max_bytes (Optional[int]): A limit on the size of the log chunk to fetch\n\n Returns:\n CapturedLogData\n """\n\n @abstractmethod\n def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:\n """Returns the metadata of the captured logs for a given log key, including\n displayable information on where the logs are persisted.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n\n Returns:\n CapturedLogMetadata\n """\n\n @abstractmethod\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ) -> None:\n """Deletes the captured logs for a given log key.\n\n Args:\n log_key(Optional[List[String]]): The log key of the logs to delete\n prefix(Optional[List[String]]): The prefix of the log keys to delete\n """\n\n @abstractmethod\n def subscribe(\n self, log_key: Sequence[str], cursor: Optional[str] = None\n ) -> CapturedLogSubscription:\n """Registers an observable object for log data.\n\n Args:\n log_key (List[String]): The log key identifying the captured logs\n cursor (Optional[String]): The string cursor marking the position within the log stream\n Returns:\n ComputeLogSubscription\n """\n\n @abstractmethod\n def unsubscribe(self, subscription: CapturedLogSubscription) -> None:\n """Deregisters an observable object from receiving log updates.\n\n Args:\n subscription (CapturedLogSubscription): subscription object which manages when to send\n back data to the subscriber\n """\n\n def build_log_key_for_run(self, run_id: str, step_key: str) -> Sequence[str]:\n """Legacy adapter to translate run_id/key to captured log manager-based log_key."""\n return [run_id, "compute_logs", step_key]
\n
", "current_page_name": "_modules/dagster/_core/storage/captured_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.captured_log_manager"}, "compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.compute_log_manager

\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom enum import Enum\nfrom typing import Callable, Iterator, NamedTuple, Optional\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.storage.dagster_run import DagsterRun\n\nMAX_BYTES_FILE_READ = 33554432  # 32 MB\nMAX_BYTES_CHUNK_READ = 4194304  # 4 MB\n\n\nclass ComputeIOType(Enum):\n    STDOUT = "stdout"\n    STDERR = "stderr"\n\n\nclass ComputeLogFileData(\n    NamedTuple(\n        "ComputeLogFileData",\n        [\n            ("path", str),\n            ("data", Optional[str]),\n            ("cursor", int),\n            ("size", int),\n            ("download_url", Optional[str]),\n        ],\n    )\n):\n    """Representation of a chunk of compute execution log data."""\n\n    def __new__(\n        cls, path: str, data: Optional[str], cursor: int, size: int, download_url: Optional[str]\n    ):\n        return super(ComputeLogFileData, cls).__new__(\n            cls,\n            path=check.str_param(path, "path"),\n            data=check.opt_str_param(data, "data"),\n            cursor=check.int_param(cursor, "cursor"),\n            size=check.int_param(size, "size"),\n            download_url=check.opt_str_param(download_url, "download_url"),\n        )\n\n\n
[docs]class ComputeLogManager(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract base class for storing unstructured compute logs (stdout/stderr) from the compute\n steps of pipeline solids.\n """\n\n @contextmanager\n def watch(self, dagster_run: DagsterRun, step_key: Optional[str] = None) -> Iterator[None]:\n """Watch the stdout/stderr for a given execution for a given run_id / step_key and persist it.\n\n Args:\n dagster_run (DagsterRun): The run config\n step_key (Optional[String]): The step_key for a compute step\n """\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n\n if not self.enabled(dagster_run, step_key):\n yield\n return\n\n self.on_watch_start(dagster_run, step_key)\n with self._watch_logs(dagster_run, step_key):\n yield\n self.on_watch_finish(dagster_run, step_key)\n\n @contextmanager\n @abstractmethod\n def _watch_logs(\n self, dagster_run: DagsterRun, step_key: Optional[str] = None\n ) -> Iterator[None]:\n """Method to watch the stdout/stderr logs for a given run_id / step_key. Kept separate from\n blessed `watch` method, which triggers all the start/finish hooks that are necessary to\n implement the different remote implementations.\n\n Args:\n dagster_run (DagsterRun): The run config\n step_key (Optional[String]): The step_key for a compute step\n """\n\n @abstractmethod\n def get_local_path(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n """Get the local path of the logfile for a given execution step. This determines the\n location on the local filesystem to which stdout/stderr will be rerouted.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either ComputeIOType.STDOUT or\n ComputeIOType.STDERR\n\n Returns:\n str\n """\n ...\n\n @abstractmethod\n def is_watch_completed(self, run_id: str, key: str) -> bool:\n """Flag indicating when computation for a given execution step has completed.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n\n Returns:\n Boolean\n """\n\n @abstractmethod\n def on_watch_start(self, dagster_run: DagsterRun, step_key: Optional[str]) -> None:\n """Hook called when starting to watch compute logs.\n\n Args:\n pipeline_run (PipelineRun): The pipeline run config\n step_key (Optional[String]): The step_key for a compute step\n """\n\n @abstractmethod\n def on_watch_finish(self, dagster_run: DagsterRun, step_key: Optional[str]) -> None:\n """Hook called when computation for a given execution step is finished.\n\n Args:\n pipeline_run (PipelineRun): The pipeline run config\n step_key (Optional[String]): The step_key for a compute step\n """\n\n @abstractmethod\n def download_url(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n """Get a URL where the logs can be downloaded.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either stdout or stderr\n\n Returns:\n String\n """\n\n @abstractmethod\n def read_logs_file(\n self,\n run_id: str,\n key: str,\n io_type: ComputeIOType,\n cursor: int = 0,\n max_bytes: int = MAX_BYTES_FILE_READ,\n ) -> ComputeLogFileData:\n """Get compute log data for a given compute step.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either stdout or stderr\n cursor (Optional[Int]): Starting cursor (byte) of log file\n max_bytes (Optional[Int]): Maximum number of bytes to be read and returned\n\n Returns:\n ComputeLogFileData\n """\n\n def enabled(self, _dagster_run: DagsterRun, _step_key: Optional[str]) -> bool:\n """Hook for disabling compute log capture.\n\n Args:\n _step_key (Optional[String]): The step_key for a compute step\n\n Returns:\n Boolean\n """\n return True\n\n @abstractmethod\n def on_subscribe(self, subscription: "ComputeLogSubscription") -> None:\n """Hook for managing streaming subscriptions for log data from `dagster-webserver`.\n\n Args:\n subscription (ComputeLogSubscription): subscription object which manages when to send\n back data to the subscriber\n """\n\n def on_unsubscribe(self, subscription: "ComputeLogSubscription") -> None:\n pass\n\n def observable(\n self, run_id: str, key: str, io_type: ComputeIOType, cursor: Optional[str] = None\n ) -> "ComputeLogSubscription":\n """Return a ComputeLogSubscription which streams back log data from the execution logs for a given\n compute step.\n\n Args:\n run_id (str): The id of the pipeline run.\n key (str): The unique descriptor of the execution step (e.g. `solid_invocation.compute`)\n io_type (ComputeIOType): Flag indicating the I/O type, either stdout or stderr\n cursor (Optional[Int]): Starting cursor (byte) of log file\n\n Returns:\n Observable\n """\n check.str_param(run_id, "run_id")\n check.str_param(key, "key")\n check.inst_param(io_type, "io_type", ComputeIOType)\n check.opt_str_param(cursor, "cursor")\n\n if cursor:\n cursor = int(cursor) # type: ignore # (var reassigned diff type)\n else:\n cursor = 0 # type: ignore # (var reassigned diff type)\n\n subscription = ComputeLogSubscription(self, run_id, key, io_type, cursor) # type: ignore # (var reassigned diff type)\n self.on_subscribe(subscription)\n return subscription\n\n def dispose(self):\n pass
\n\n\nclass ComputeLogSubscription:\n """Observable object that generates ComputeLogFileData objects as compute step execution logs\n are written.\n """\n\n def __init__(\n self,\n manager: ComputeLogManager,\n run_id: str,\n key: str,\n io_type: ComputeIOType,\n cursor: int,\n ):\n self.manager = manager\n self.run_id = run_id\n self.key = key\n self.io_type = io_type\n self.cursor = cursor\n self.observer: Optional[Callable[[ComputeLogFileData], None]] = None\n self.is_complete = False\n\n def __call__(self, observer: Callable[[ComputeLogFileData], None]) -> Self:\n self.observer = observer\n self.fetch()\n if self.manager.is_watch_completed(self.run_id, self.key):\n self.complete()\n return self\n\n def dispose(self) -> None:\n # called when the connection gets closed, allowing the observer to get GC'ed\n self.observer = None\n self.manager.on_unsubscribe(self)\n\n def fetch(self) -> None:\n if not self.observer:\n return\n\n should_fetch = True\n while should_fetch:\n update = self.manager.read_logs_file(\n self.run_id,\n self.key,\n self.io_type,\n self.cursor,\n max_bytes=MAX_BYTES_CHUNK_READ,\n )\n if not self.cursor or update.cursor != self.cursor:\n self.observer(update)\n self.cursor = update.cursor\n should_fetch = update.data and len(update.data.encode("utf-8")) >= MAX_BYTES_CHUNK_READ\n\n def complete(self) -> None:\n self.is_complete = True\n if not self.observer:\n return\n
", "current_page_name": "_modules/dagster/_core/storage/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.compute_log_manager"}, "dagster_run": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.dagster_run

\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Union,\n)\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._annotations import PublicAttr, public\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.tags import PARENT_RUN_ID_TAG, ROOT_RUN_ID_TAG\nfrom dagster._core.utils import make_new_run_id\nfrom dagster._serdes.serdes import (\n    NamedTupleSerializer,\n    whitelist_for_serdes,\n)\n\nfrom .tags import (\n    BACKFILL_ID_TAG,\n    REPOSITORY_LABEL_TAG,\n    RESUME_RETRY_TAG,\n    SCHEDULE_NAME_TAG,\n    SENSOR_NAME_TAG,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.host_representation.external import ExternalSchedule, ExternalSensor\n    from dagster._core.host_representation.origin import ExternalJobOrigin\n\n\n
[docs]@whitelist_for_serdes(storage_name="PipelineRunStatus")\nclass DagsterRunStatus(Enum):\n """The status of run execution."""\n\n # Runs waiting to be launched by the Dagster Daemon.\n QUEUED = "QUEUED"\n\n # Runs that have been launched, but execution has not yet started."""\n NOT_STARTED = "NOT_STARTED"\n\n # Runs that are managed outside of the Dagster control plane.\n MANAGED = "MANAGED"\n\n # Runs that have been launched, but execution has not yet started.\n STARTING = "STARTING"\n\n # Runs that have been launched and execution has started.\n STARTED = "STARTED"\n\n # Runs that have successfully completed.\n SUCCESS = "SUCCESS"\n\n # Runs that have failed to complete.\n FAILURE = "FAILURE"\n\n # Runs that are in-progress and pending to be canceled.\n CANCELING = "CANCELING"\n\n # Runs that have been canceled before completion.\n CANCELED = "CANCELED"
\n\n\n# These statuses that indicate a run may be using compute resources\nIN_PROGRESS_RUN_STATUSES = [\n DagsterRunStatus.STARTING,\n DagsterRunStatus.STARTED,\n DagsterRunStatus.CANCELING,\n]\n\n# This serves as an explicit list of run statuses that indicate that the run is not using compute\n# resources. This and the enum above should cover all run statuses.\nNON_IN_PROGRESS_RUN_STATUSES = [\n DagsterRunStatus.QUEUED,\n DagsterRunStatus.NOT_STARTED,\n DagsterRunStatus.SUCCESS,\n DagsterRunStatus.FAILURE,\n DagsterRunStatus.MANAGED,\n DagsterRunStatus.CANCELED,\n]\n\nFINISHED_STATUSES = [\n DagsterRunStatus.SUCCESS,\n DagsterRunStatus.FAILURE,\n DagsterRunStatus.CANCELED,\n]\n\n# Run statuses for runs that can be safely canceled.\n# Does not include the other unfinished statuses for the following reasons:\n# STARTING: Control has been ceded to the run worker, which will eventually move the run to a STARTED.\n# NOT_STARTED: Mostly replaced with STARTING. Runs are only here in the the brief window between\n# creating the run and launching or enqueueing it.\nCANCELABLE_RUN_STATUSES = [DagsterRunStatus.STARTED, DagsterRunStatus.QUEUED]\n\n\n@whitelist_for_serdes(storage_name="PipelineRunStatsSnapshot")\nclass DagsterRunStatsSnapshot(\n NamedTuple(\n "_DagsterRunStatsSnapshot",\n [\n ("run_id", str),\n ("steps_succeeded", int),\n ("steps_failed", int),\n ("materializations", int),\n ("expectations", int),\n ("enqueued_time", Optional[float]),\n ("launch_time", Optional[float]),\n ("start_time", Optional[float]),\n ("end_time", Optional[float]),\n ],\n )\n):\n def __new__(\n cls,\n run_id: str,\n steps_succeeded: int,\n steps_failed: int,\n materializations: int,\n expectations: int,\n enqueued_time: Optional[float],\n launch_time: Optional[float],\n start_time: Optional[float],\n end_time: Optional[float],\n ):\n return super(DagsterRunStatsSnapshot, cls).__new__(\n cls,\n run_id=check.str_param(run_id, "run_id"),\n steps_succeeded=check.int_param(steps_succeeded, "steps_succeeded"),\n steps_failed=check.int_param(steps_failed, "steps_failed"),\n materializations=check.int_param(materializations, "materializations"),\n expectations=check.int_param(expectations, "expectations"),\n enqueued_time=check.opt_float_param(enqueued_time, "enqueued_time"),\n launch_time=check.opt_float_param(launch_time, "launch_time"),\n start_time=check.opt_float_param(start_time, "start_time"),\n end_time=check.opt_float_param(end_time, "end_time"),\n )\n\n\nclass DagsterRunSerializer(NamedTupleSerializer["DagsterRun"]):\n # serdes log\n # * removed reexecution_config - serdes logic expected to strip unknown keys so no need to preserve\n # * added pipeline_snapshot_id\n # * renamed previous_run_id -> parent_run_id, added root_run_id\n # * added execution_plan_snapshot_id\n # * removed selector\n # * added solid_subset\n # * renamed solid_subset -> solid_selection, added solids_to_execute\n # * renamed environment_dict -> run_config\n # * added asset_selection\n # * added has_repository_load_data\n def before_unpack(self, context, unpacked_dict: Dict[str, Any]) -> Dict[str, Any]:\n # back compat for environment dict => run_config\n if "environment_dict" in unpacked_dict:\n check.invariant(\n unpacked_dict.get("run_config") is None,\n "Cannot set both run_config and environment_dict. Use run_config parameter.",\n )\n unpacked_dict["run_config"] = unpacked_dict["environment_dict"]\n del unpacked_dict["environment_dict"]\n\n # back compat for previous_run_id => parent_run_id, root_run_id\n if "previous_run_id" in unpacked_dict and not (\n "parent_run_id" in unpacked_dict and "root_run_id" in unpacked_dict\n ):\n unpacked_dict["parent_run_id"] = unpacked_dict["previous_run_id"]\n unpacked_dict["root_run_id"] = unpacked_dict["previous_run_id"]\n del unpacked_dict["previous_run_id"]\n\n # back compat for selector => pipeline_name, solids_to_execute\n if "selector" in unpacked_dict:\n selector = unpacked_dict["selector"]\n\n if not isinstance(selector, ExecutionSelector):\n check.failed(f"unexpected entry for 'select', {selector}")\n selector_name = selector.name\n selector_subset = selector.solid_subset\n\n job_name = unpacked_dict.get("pipeline_name")\n check.invariant(\n job_name is None or selector_name == job_name,\n f"Conflicting pipeline name {job_name} in arguments to PipelineRun: "\n f"selector was passed with pipeline {selector_name}",\n )\n if job_name is None:\n unpacked_dict["pipeline_name"] = selector_name\n\n solids_to_execute = unpacked_dict.get("solids_to_execute")\n check.invariant(\n solids_to_execute is None\n or (selector_subset and set(selector_subset) == solids_to_execute),\n f"Conflicting solids_to_execute {solids_to_execute} in arguments to"\n f" PipelineRun: selector was passed with subset {selector_subset}",\n )\n # for old runs that only have selector but no solids_to_execute\n if solids_to_execute is None:\n solids_to_execute = frozenset(selector_subset) if selector_subset else None\n\n # back compat for solid_subset => solids_to_execute\n if "solid_subset" in unpacked_dict:\n unpacked_dict["solids_to_execute"] = unpacked_dict["solid_subset"]\n del unpacked_dict["solid_subset"]\n\n return unpacked_dict\n\n\n
[docs]@whitelist_for_serdes(\n serializer=DagsterRunSerializer,\n # DagsterRun is serialized as PipelineRun so that it can be read by older (pre 0.13.x) version\n # of Dagster, but is read back in as a DagsterRun.\n storage_name="PipelineRun",\n old_fields={"mode": None},\n storage_field_names={\n "job_name": "pipeline_name",\n "job_snapshot_id": "pipeline_snapshot_id",\n "external_job_origin": "external_pipeline_origin",\n "job_code_origin": "pipeline_code_origin",\n "op_selection": "solid_selection",\n "resolved_op_selection": "solids_to_execute",\n },\n)\nclass DagsterRun(\n NamedTuple(\n "_DagsterRun",\n [\n ("job_name", PublicAttr[str]),\n ("run_id", str),\n ("run_config", Mapping[str, object]),\n ("asset_selection", Optional[AbstractSet[AssetKey]]),\n ("asset_check_selection", Optional[AbstractSet[AssetCheckKey]]),\n ("op_selection", Optional[Sequence[str]]),\n ("resolved_op_selection", Optional[AbstractSet[str]]),\n ("step_keys_to_execute", Optional[Sequence[str]]),\n ("status", DagsterRunStatus),\n ("tags", Mapping[str, str]),\n ("root_run_id", Optional[str]),\n ("parent_run_id", Optional[str]),\n ("job_snapshot_id", Optional[str]),\n ("execution_plan_snapshot_id", Optional[str]),\n ("external_job_origin", Optional["ExternalJobOrigin"]),\n ("job_code_origin", Optional[JobPythonOrigin]),\n ("has_repository_load_data", bool),\n ],\n )\n):\n """Serializable internal representation of a dagster run, as stored in a\n :py:class:`~dagster._core.storage.runs.RunStorage`.\n """\n\n def __new__(\n cls,\n job_name: str,\n run_id: Optional[str] = None,\n run_config: Optional[Mapping[str, object]] = None,\n asset_selection: Optional[AbstractSet[AssetKey]] = None,\n asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,\n op_selection: Optional[Sequence[str]] = None,\n resolved_op_selection: Optional[AbstractSet[str]] = None,\n step_keys_to_execute: Optional[Sequence[str]] = None,\n status: Optional[DagsterRunStatus] = None,\n tags: Optional[Mapping[str, str]] = None,\n root_run_id: Optional[str] = None,\n parent_run_id: Optional[str] = None,\n job_snapshot_id: Optional[str] = None,\n execution_plan_snapshot_id: Optional[str] = None,\n external_job_origin: Optional["ExternalJobOrigin"] = None,\n job_code_origin: Optional[JobPythonOrigin] = None,\n has_repository_load_data: Optional[bool] = None,\n ):\n check.invariant(\n (root_run_id is not None and parent_run_id is not None)\n or (root_run_id is None and parent_run_id is None),\n "Must set both root_run_id and parent_run_id when creating a PipelineRun that "\n "belongs to a run group",\n )\n # a set which contains the names of the ops to execute\n resolved_op_selection = check.opt_nullable_set_param(\n resolved_op_selection, "resolved_op_selection", of_type=str\n )\n # a list of op queries provided by the user\n # possible to be None when resolved_op_selection is set by the user directly\n op_selection = check.opt_nullable_sequence_param(op_selection, "op_selection", of_type=str)\n check.opt_nullable_sequence_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)\n\n asset_selection = check.opt_nullable_set_param(\n asset_selection, "asset_selection", of_type=AssetKey\n )\n asset_check_selection = check.opt_nullable_set_param(\n asset_check_selection, "asset_check_selection", of_type=AssetCheckKey\n )\n\n # Placing this with the other imports causes a cyclic import\n # https://github.com/dagster-io/dagster/issues/3181\n from dagster._core.host_representation.origin import ExternalJobOrigin\n\n if status == DagsterRunStatus.QUEUED:\n check.inst_param(\n external_job_origin,\n "external_job_origin",\n ExternalJobOrigin,\n "external_job_origin is required for queued runs",\n )\n\n if run_id is None:\n run_id = make_new_run_id()\n\n return super(DagsterRun, cls).__new__(\n cls,\n job_name=check.str_param(job_name, "job_name"),\n run_id=check.str_param(run_id, "run_id"),\n run_config=check.opt_mapping_param(run_config, "run_config", key_type=str),\n op_selection=op_selection,\n asset_selection=asset_selection,\n asset_check_selection=asset_check_selection,\n resolved_op_selection=resolved_op_selection,\n step_keys_to_execute=step_keys_to_execute,\n status=check.opt_inst_param(\n status, "status", DagsterRunStatus, DagsterRunStatus.NOT_STARTED\n ),\n tags=check.opt_mapping_param(tags, "tags", key_type=str, value_type=str),\n root_run_id=check.opt_str_param(root_run_id, "root_run_id"),\n parent_run_id=check.opt_str_param(parent_run_id, "parent_run_id"),\n job_snapshot_id=check.opt_str_param(job_snapshot_id, "job_snapshot_id"),\n execution_plan_snapshot_id=check.opt_str_param(\n execution_plan_snapshot_id, "execution_plan_snapshot_id"\n ),\n external_job_origin=check.opt_inst_param(\n external_job_origin, "external_job_origin", ExternalJobOrigin\n ),\n job_code_origin=check.opt_inst_param(\n job_code_origin, "job_code_origin", JobPythonOrigin\n ),\n has_repository_load_data=check.opt_bool_param(\n has_repository_load_data, "has_repository_load_data", default=False\n ),\n )\n\n def with_status(self, status: DagsterRunStatus) -> Self:\n if status == DagsterRunStatus.QUEUED:\n # Placing this with the other imports causes a cyclic import\n # https://github.com/dagster-io/dagster/issues/3181\n from dagster._core.host_representation.origin import ExternalJobOrigin\n\n check.inst(\n self.external_job_origin,\n ExternalJobOrigin,\n "external_pipeline_origin is required for queued runs",\n )\n\n return self._replace(status=status)\n\n def with_job_origin(self, origin: "ExternalJobOrigin") -> Self:\n from dagster._core.host_representation.origin import ExternalJobOrigin\n\n check.inst_param(origin, "origin", ExternalJobOrigin)\n return self._replace(external_job_origin=origin)\n\n def with_tags(self, tags: Mapping[str, str]) -> Self:\n return self._replace(tags=tags)\n\n def get_root_run_id(self) -> Optional[str]:\n return self.tags.get(ROOT_RUN_ID_TAG)\n\n def get_parent_run_id(self) -> Optional[str]:\n return self.tags.get(PARENT_RUN_ID_TAG)\n\n def tags_for_storage(self) -> Mapping[str, str]:\n repository_tags = {}\n if self.external_job_origin:\n # tag the run with a label containing the repository name / location name, to allow for\n # per-repository filtering of runs from the Dagster UI.\n repository_tags[REPOSITORY_LABEL_TAG] = (\n self.external_job_origin.external_repository_origin.get_label()\n )\n\n if not self.tags:\n return repository_tags\n\n return {**repository_tags, **self.tags}\n\n @public\n @property\n def is_finished(self) -> bool:\n """bool: If this run has completely finished execution."""\n return self.status in FINISHED_STATUSES\n\n @public\n @property\n def is_success(self) -> bool:\n """bool: If this run has successfully finished executing."""\n return self.status == DagsterRunStatus.SUCCESS\n\n @public\n @property\n def is_failure(self) -> bool:\n """bool: If this run has failed."""\n return self.status == DagsterRunStatus.FAILURE\n\n @public\n @property\n def is_failure_or_canceled(self) -> bool:\n """bool: If this run has either failed or was canceled."""\n return self.status == DagsterRunStatus.FAILURE or self.status == DagsterRunStatus.CANCELED\n\n @public\n @property\n def is_resume_retry(self) -> bool:\n """bool: If this run was created from retrying another run from the point of failure."""\n return self.tags.get(RESUME_RETRY_TAG) == "true"\n\n @property\n def previous_run_id(self) -> Optional[str]:\n # Compat\n return self.parent_run_id\n\n @staticmethod\n def tags_for_schedule(schedule) -> Mapping[str, str]:\n return {SCHEDULE_NAME_TAG: schedule.name}\n\n @staticmethod\n def tags_for_sensor(sensor) -> Mapping[str, str]:\n return {SENSOR_NAME_TAG: sensor.name}\n\n @staticmethod\n def tags_for_backfill_id(backfill_id: str) -> Mapping[str, str]:\n return {BACKFILL_ID_TAG: backfill_id}
\n\n\nclass RunsFilterSerializer(NamedTupleSerializer["RunsFilter"]):\n def before_unpack(\n self,\n context,\n unpacked_dict: Dict[str, Any],\n ) -> Dict[str, Any]:\n # We store empty run ids as [] but only accept None\n if "run_ids" in unpacked_dict and unpacked_dict["run_ids"] == []:\n unpacked_dict["run_ids"] = None\n return unpacked_dict\n\n\n
[docs]@whitelist_for_serdes(\n serializer=RunsFilterSerializer,\n old_storage_names={"PipelineRunsFilter"},\n storage_field_names={"job_name": "pipeline_name"},\n)\nclass RunsFilter(\n NamedTuple(\n "_RunsFilter",\n [\n ("run_ids", Sequence[str]),\n ("job_name", Optional[str]),\n ("statuses", Sequence[DagsterRunStatus]),\n ("tags", Mapping[str, Union[str, Sequence[str]]]),\n ("snapshot_id", Optional[str]),\n ("updated_after", Optional[datetime]),\n ("updated_before", Optional[datetime]),\n ("created_after", Optional[datetime]),\n ("created_before", Optional[datetime]),\n ],\n )\n):\n """Defines a filter across job runs, for use when querying storage directly.\n\n Each field of the RunsFilter represents a logical AND with each other. For\n example, if you specify job_name and tags, then you will receive only runs\n with the specified job_name AND the specified tags. If left blank, then\n all values will be permitted for that field.\n\n Args:\n run_ids (Optional[List[str]]): A list of job run_id values.\n job_name (Optional[str]):\n Name of the job to query for. If blank, all job_names will be accepted.\n statuses (Optional[List[DagsterRunStatus]]):\n A list of run statuses to filter by. If blank, all run statuses will be allowed.\n tags (Optional[Dict[str, Union[str, List[str]]]]):\n A dictionary of run tags to query by. All tags specified here must be present for a given run to pass the filter.\n snapshot_id (Optional[str]): The ID of the job snapshot to query for. Intended for internal use.\n updated_after (Optional[DateTime]): Filter by runs that were last updated before this datetime.\n created_before (Optional[DateTime]): Filter by runs that were created before this datetime.\n\n """\n\n def __new__(\n cls,\n run_ids: Optional[Sequence[str]] = None,\n job_name: Optional[str] = None,\n statuses: Optional[Sequence[DagsterRunStatus]] = None,\n tags: Optional[Mapping[str, Union[str, Sequence[str]]]] = None,\n snapshot_id: Optional[str] = None,\n updated_after: Optional[datetime] = None,\n updated_before: Optional[datetime] = None,\n created_after: Optional[datetime] = None,\n created_before: Optional[datetime] = None,\n ):\n check.invariant(run_ids != [], "When filtering on run ids, a non-empty list must be used.")\n\n return super(RunsFilter, cls).__new__(\n cls,\n run_ids=check.opt_sequence_param(run_ids, "run_ids", of_type=str),\n job_name=check.opt_str_param(job_name, "job_name"),\n statuses=check.opt_sequence_param(statuses, "statuses", of_type=DagsterRunStatus),\n tags=check.opt_mapping_param(tags, "tags", key_type=str),\n snapshot_id=check.opt_str_param(snapshot_id, "snapshot_id"),\n updated_after=check.opt_inst_param(updated_after, "updated_after", datetime),\n updated_before=check.opt_inst_param(updated_before, "updated_before", datetime),\n created_after=check.opt_inst_param(created_after, "created_after", datetime),\n created_before=check.opt_inst_param(created_before, "created_before", datetime),\n )\n\n @staticmethod\n def for_schedule(schedule: "ExternalSchedule") -> "RunsFilter":\n return RunsFilter(tags=DagsterRun.tags_for_schedule(schedule))\n\n @staticmethod\n def for_sensor(sensor: "ExternalSensor") -> "RunsFilter":\n return RunsFilter(tags=DagsterRun.tags_for_sensor(sensor))\n\n @staticmethod\n def for_backfill(backfill_id: str) -> "RunsFilter":\n return RunsFilter(tags=DagsterRun.tags_for_backfill_id(backfill_id))
\n\n\nclass JobBucket(NamedTuple):\n job_names: List[str]\n bucket_limit: Optional[int]\n\n\nclass TagBucket(NamedTuple):\n tag_key: str\n tag_values: List[str]\n bucket_limit: Optional[int]\n\n\n
[docs]class RunRecord(\n NamedTuple(\n "_RunRecord",\n [\n ("storage_id", int),\n ("dagster_run", DagsterRun),\n ("create_timestamp", datetime),\n ("update_timestamp", datetime),\n ("start_time", Optional[float]),\n ("end_time", Optional[float]),\n ],\n )\n):\n """Internal representation of a run record, as stored in a\n :py:class:`~dagster._core.storage.runs.RunStorage`.\n\n Users should not invoke this class directly.\n """\n\n def __new__(\n cls,\n storage_id: int,\n dagster_run: DagsterRun,\n create_timestamp: datetime,\n update_timestamp: datetime,\n start_time: Optional[float] = None,\n end_time: Optional[float] = None,\n ):\n return super(RunRecord, cls).__new__(\n cls,\n storage_id=check.int_param(storage_id, "storage_id"),\n dagster_run=check.inst_param(dagster_run, "dagster_run", DagsterRun),\n create_timestamp=check.inst_param(create_timestamp, "create_timestamp", datetime),\n update_timestamp=check.inst_param(update_timestamp, "update_timestamp", datetime),\n # start_time and end_time fields will be populated once the run has started and ended, respectively, but will be None beforehand.\n start_time=check.opt_float_param(start_time, "start_time"),\n end_time=check.opt_float_param(end_time, "end_time"),\n )
\n\n\n@whitelist_for_serdes\nclass RunPartitionData(\n NamedTuple(\n "_RunPartitionData",\n [\n ("run_id", str),\n ("partition", str),\n ("status", DagsterRunStatus),\n ("start_time", Optional[float]),\n ("end_time", Optional[float]),\n ],\n )\n):\n def __new__(\n cls,\n run_id: str,\n partition: str,\n status: DagsterRunStatus,\n start_time: Optional[float],\n end_time: Optional[float],\n ):\n return super(RunPartitionData, cls).__new__(\n cls,\n run_id=check.str_param(run_id, "run_id"),\n partition=check.str_param(partition, "partition"),\n status=check.inst_param(status, "status", DagsterRunStatus),\n start_time=check.opt_inst(start_time, float),\n end_time=check.opt_inst(end_time, float),\n )\n\n\n###################################################################################################\n# GRAVEYARD\n#\n# -|-\n# |\n# _-'~~~~~`-_\n# .' '.\n# | R I P |\n# | |\n# | Execution |\n# | Selector |\n# | |\n# | |\n###################################################################################################\n\n\n@whitelist_for_serdes\nclass ExecutionSelector(\n NamedTuple("_ExecutionSelector", [("name", str), ("solid_subset", Optional[Sequence[str]])])\n):\n """Kept here to maintain loading of PipelineRuns from when it was still alive."""\n\n def __new__(cls, name: str, solid_subset: Optional[Sequence[str]] = None):\n return super(ExecutionSelector, cls).__new__(\n cls,\n name=check.str_param(name, "name"),\n solid_subset=(\n None\n if solid_subset is None\n else check.sequence_param(solid_subset, "solid_subset", of_type=str)\n ),\n )\n
", "current_page_name": "_modules/dagster/_core/storage/dagster_run", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.dagster_run"}, "event_log": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.base

\nimport base64\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom typing import (\n    TYPE_CHECKING,\n    Iterable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n)\n\nimport dagster._check as check\nfrom dagster._core.assets import AssetDetails\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.event_api import EventHandlerFn, EventLogRecord, EventRecordsFilter\nfrom dagster._core.events import DagsterEventType\nfrom dagster._core.execution.stats import (\n    RunStepKeyStatsSnapshot,\n    build_run_stats_from_events,\n    build_run_step_stats_from_events,\n)\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.storage.asset_check_execution_record import AssetCheckExecutionRecord\nfrom dagster._core.storage.dagster_run import DagsterRunStatsSnapshot\nfrom dagster._core.storage.sql import AlembicVersion\nfrom dagster._seven import json\nfrom dagster._utils import PrintFn\nfrom dagster._utils.concurrency import ConcurrencyClaimStatus, ConcurrencyKeyInfo\n\nif TYPE_CHECKING:\n    from dagster._core.events.log import EventLogEntry\n    from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\n\nclass EventLogConnection(NamedTuple):\n    records: Sequence[EventLogRecord]\n    cursor: str\n    has_more: bool\n\n\nclass EventLogCursorType(Enum):\n    OFFSET = "OFFSET"\n    STORAGE_ID = "STORAGE_ID"\n\n\nclass EventLogCursor(NamedTuple):\n    """Representation of an event record cursor, keeping track of the log query state."""\n\n    cursor_type: EventLogCursorType\n    value: int\n\n    def is_offset_cursor(self) -> bool:\n        return self.cursor_type == EventLogCursorType.OFFSET\n\n    def is_id_cursor(self) -> bool:\n        return self.cursor_type == EventLogCursorType.STORAGE_ID\n\n    def offset(self) -> int:\n        check.invariant(self.cursor_type == EventLogCursorType.OFFSET)\n        return max(0, int(self.value))\n\n    def storage_id(self) -> int:\n        check.invariant(self.cursor_type == EventLogCursorType.STORAGE_ID)\n        return int(self.value)\n\n    def __str__(self) -> str:\n        return self.to_string()\n\n    def to_string(self) -> str:\n        raw = json.dumps({"type": self.cursor_type.value, "value": self.value})\n        return base64.b64encode(bytes(raw, encoding="utf-8")).decode("utf-8")\n\n    @staticmethod\n    def parse(cursor_str: str) -> "EventLogCursor":\n        raw = json.loads(base64.b64decode(cursor_str).decode("utf-8"))\n        return EventLogCursor(EventLogCursorType(raw["type"]), raw["value"])\n\n    @staticmethod\n    def from_offset(offset: int) -> "EventLogCursor":\n        return EventLogCursor(EventLogCursorType.OFFSET, offset)\n\n    @staticmethod\n    def from_storage_id(storage_id: int) -> "EventLogCursor":\n        return EventLogCursor(EventLogCursorType.STORAGE_ID, storage_id)\n\n\nclass AssetEntry(\n    NamedTuple(\n        "_AssetEntry",\n        [\n            ("asset_key", AssetKey),\n            ("last_materialization_record", Optional[EventLogRecord]),\n            ("last_run_id", Optional[str]),\n            ("asset_details", Optional[AssetDetails]),\n            ("cached_status", Optional["AssetStatusCacheValue"]),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        asset_key: AssetKey,\n        last_materialization_record: Optional[EventLogRecord] = None,\n        last_run_id: Optional[str] = None,\n        asset_details: Optional[AssetDetails] = None,\n        cached_status: Optional["AssetStatusCacheValue"] = None,\n    ):\n        from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\n        return super(AssetEntry, cls).__new__(\n            cls,\n            asset_key=check.inst_param(asset_key, "asset_key", AssetKey),\n            last_materialization_record=check.opt_inst_param(\n                last_materialization_record, "last_materialization_record", EventLogRecord\n            ),\n            last_run_id=check.opt_str_param(last_run_id, "last_run_id"),\n            asset_details=check.opt_inst_param(asset_details, "asset_details", AssetDetails),\n            cached_status=check.opt_inst_param(\n                cached_status, "cached_status", AssetStatusCacheValue\n            ),\n        )\n\n    @property\n    def last_materialization(self) -> Optional["EventLogEntry"]:\n        if self.last_materialization_record is None:\n            return None\n        return self.last_materialization_record.event_log_entry\n\n    @property\n    def last_materialization_storage_id(self) -> Optional[int]:\n        if self.last_materialization_record is None:\n            return None\n        return self.last_materialization_record.storage_id\n\n\n
[docs]class AssetRecord(NamedTuple):\n """Internal representation of an asset record, as stored in a :py:class:`~dagster._core.storage.event_log.EventLogStorage`.\n\n Users should not invoke this class directly.\n """\n\n storage_id: int\n asset_entry: AssetEntry
\n\n\n
[docs]class EventLogStorage(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract base class for storing structured event logs from pipeline runs.\n\n Note that event log storages using SQL databases as backing stores should implement\n :py:class:`~dagster._core.storage.event_log.SqlEventLogStorage`.\n\n Users should not directly instantiate concrete subclasses of this class; they are instantiated\n by internal machinery when ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the\n ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of concrete subclasses of this class\n should be done by setting values in that file.\n """\n\n def get_logs_for_run(\n self,\n run_id: str,\n cursor: Optional[Union[str, int]] = None,\n of_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> Sequence["EventLogEntry"]:\n """Get all of the logs corresponding to a run.\n\n Args:\n run_id (str): The id of the run for which to fetch logs.\n cursor (Optional[Union[str, int]]): Cursor value to track paginated queries. Legacy\n support for integer offset cursors.\n of_type (Optional[DagsterEventType]): the dagster event type to filter the logs.\n limit (Optional[int]): Max number of records to return.\n """\n if isinstance(cursor, int):\n cursor = EventLogCursor.from_offset(cursor + 1).to_string()\n records = self.get_records_for_run(\n run_id, cursor, of_type, limit, ascending=ascending\n ).records\n return [record.event_log_entry for record in records]\n\n @abstractmethod\n def get_records_for_run(\n self,\n run_id: str,\n cursor: Optional[str] = None,\n of_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> EventLogConnection:\n """Get all of the event log records corresponding to a run.\n\n Args:\n run_id (str): The id of the run for which to fetch logs.\n cursor (Optional[str]): Cursor value to track paginated queries.\n of_type (Optional[DagsterEventType]): the dagster event type to filter the logs.\n limit (Optional[int]): Max number of records to return.\n """\n\n def get_stats_for_run(self, run_id: str) -> DagsterRunStatsSnapshot:\n """Get a summary of events that have ocurred in a run."""\n return build_run_stats_from_events(run_id, self.get_logs_for_run(run_id))\n\n def get_step_stats_for_run(\n self, run_id: str, step_keys: Optional[Sequence[str]] = None\n ) -> Sequence[RunStepKeyStatsSnapshot]:\n """Get per-step stats for a pipeline run."""\n logs = self.get_logs_for_run(run_id)\n if step_keys:\n logs = [\n event\n for event in logs\n if event.is_dagster_event and event.get_dagster_event().step_key in step_keys\n ]\n\n return build_run_step_stats_from_events(run_id, logs)\n\n @abstractmethod\n def store_event(self, event: "EventLogEntry") -> None:\n """Store an event corresponding to a pipeline run.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n\n @abstractmethod\n def delete_events(self, run_id: str) -> None:\n """Remove events for a given run id."""\n\n @abstractmethod\n def upgrade(self) -> None:\n """This method should perform any schema migrations necessary to bring an\n out-of-date instance of the storage up to date.\n """\n\n @abstractmethod\n def reindex_events(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the event_log tables."""\n\n @abstractmethod\n def reindex_assets(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the asset tables."""\n\n @abstractmethod\n def wipe(self) -> None:\n """Clear the log storage."""\n\n @abstractmethod\n def watch(self, run_id: str, cursor: Optional[str], callback: EventHandlerFn) -> None:\n """Call this method to start watching."""\n\n @abstractmethod\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n """Call this method to stop watching."""\n\n @property\n @abstractmethod\n def is_persistent(self) -> bool:\n """bool: Whether the storage is persistent."""\n\n def dispose(self) -> None:\n """Explicit lifecycle management."""\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n """Allows for optimizing database connection / use in the context of a long lived webserver process."""\n\n @abstractmethod\n def get_event_records(\n self,\n event_records_filter: EventRecordsFilter,\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Sequence[EventLogRecord]:\n pass\n\n def supports_event_consumer_queries(self) -> bool:\n return False\n\n def get_logs_for_all_runs_by_log_id(\n self,\n after_cursor: int = -1,\n dagster_event_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ) -> Mapping[int, "EventLogEntry"]:\n """Get event records across all runs. Only supported for non sharded sql storage."""\n raise NotImplementedError()\n\n def get_maximum_record_id(self) -> Optional[int]:\n """Get the current greatest record id in the event log. Only supported for non sharded sql storage."""\n raise NotImplementedError()\n\n @abstractmethod\n def can_cache_asset_status_data(self) -> bool:\n pass\n\n @abstractmethod\n def wipe_asset_cached_status(self, asset_key: AssetKey) -> None:\n pass\n\n @abstractmethod\n def get_asset_records(\n self, asset_keys: Optional[Sequence[AssetKey]] = None\n ) -> Sequence[AssetRecord]:\n pass\n\n @abstractmethod\n def has_asset_key(self, asset_key: AssetKey) -> bool:\n pass\n\n @abstractmethod\n def all_asset_keys(self) -> Sequence[AssetKey]:\n pass\n\n @abstractmethod\n def update_asset_cached_status_data(\n self, asset_key: AssetKey, cache_values: "AssetStatusCacheValue"\n ) -> None:\n pass\n\n def get_asset_keys(\n self,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[AssetKey]:\n # base implementation of get_asset_keys, using the existing `all_asset_keys` and doing the\n # filtering in-memory\n asset_keys = sorted(self.all_asset_keys(), key=str)\n if prefix:\n asset_keys = [\n asset_key for asset_key in asset_keys if asset_key.path[: len(prefix)] == prefix\n ]\n if cursor:\n cursor_asset = AssetKey.from_db_string(cursor)\n if cursor_asset and cursor_asset in asset_keys:\n idx = asset_keys.index(cursor_asset)\n asset_keys = asset_keys[idx + 1 :]\n if limit:\n asset_keys = asset_keys[:limit]\n return asset_keys\n\n @abstractmethod\n def get_latest_materialization_events(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional["EventLogEntry"]]:\n pass\n\n def supports_add_asset_event_tags(self) -> bool:\n return False\n\n def add_asset_event_tags(\n self,\n event_id: int,\n event_timestamp: float,\n asset_key: AssetKey,\n new_tags: Mapping[str, str],\n ) -> None:\n raise NotImplementedError()\n\n @abstractmethod\n def get_event_tags_for_asset(\n self,\n asset_key: AssetKey,\n filter_tags: Optional[Mapping[str, str]] = None,\n filter_event_id: Optional[int] = None,\n ) -> Sequence[Mapping[str, str]]:\n pass\n\n @abstractmethod\n def wipe_asset(self, asset_key: AssetKey) -> None:\n """Remove asset index history from event log for given asset_key."""\n\n @abstractmethod\n def get_materialized_partitions(\n self,\n asset_key: AssetKey,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Set[str]:\n pass\n\n @abstractmethod\n def get_materialization_count_by_partition(\n self, asset_keys: Sequence[AssetKey], after_cursor: Optional[int] = None\n ) -> Mapping[AssetKey, Mapping[str, int]]:\n pass\n\n @abstractmethod\n def get_latest_storage_id_by_partition(\n self, asset_key: AssetKey, event_type: DagsterEventType\n ) -> Mapping[str, int]:\n pass\n\n @abstractmethod\n def get_latest_tags_by_partition(\n self,\n asset_key: AssetKey,\n event_type: DagsterEventType,\n tag_keys: Sequence[str],\n asset_partitions: Optional[Sequence[str]] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Mapping[str, Mapping[str, str]]:\n pass\n\n @abstractmethod\n def get_latest_asset_partition_materialization_attempts_without_materializations(\n self, asset_key: AssetKey\n ) -> Mapping[str, Tuple[str, int]]:\n pass\n\n @abstractmethod\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n """Get the list of partition keys for a dynamic partitions definition."""\n raise NotImplementedError()\n\n @abstractmethod\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n """Check if a dynamic partition exists."""\n raise NotImplementedError()\n\n @abstractmethod\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n """Add a partition for the specified dynamic partitions definition."""\n raise NotImplementedError()\n\n @abstractmethod\n def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:\n """Delete a partition for the specified dynamic partitions definition."""\n raise NotImplementedError()\n\n def alembic_version(self) -> Optional[AlembicVersion]:\n return None\n\n @property\n def is_run_sharded(self) -> bool:\n """Indicates that the EventLogStoarge is sharded."""\n return False\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n """Indicates that the EventLogStorage supports global concurrency limits."""\n return False\n\n @abstractmethod\n def set_concurrency_slots(self, concurrency_key: str, num: int) -> None:\n """Allocate concurrency slots for the given concurrency key."""\n raise NotImplementedError()\n\n @abstractmethod\n def get_concurrency_keys(self) -> Set[str]:\n """Get the set of concurrency limited keys."""\n raise NotImplementedError()\n\n @abstractmethod\n def get_concurrency_info(self, concurrency_key: str) -> ConcurrencyKeyInfo:\n """Get concurrency info for key."""\n raise NotImplementedError()\n\n @abstractmethod\n def claim_concurrency_slot(\n self, concurrency_key: str, run_id: str, step_key: str, priority: Optional[int] = None\n ) -> ConcurrencyClaimStatus:\n """Claim concurrency slots for step."""\n raise NotImplementedError()\n\n @abstractmethod\n def check_concurrency_claim(\n self, concurrency_key: str, run_id: str, step_key: str\n ) -> ConcurrencyClaimStatus:\n """Claim concurrency slots for step."""\n raise NotImplementedError()\n\n @abstractmethod\n def get_concurrency_run_ids(self) -> Set[str]:\n """Get a list of run_ids that are occupying or waiting for a concurrency key slot."""\n raise NotImplementedError()\n\n @abstractmethod\n def free_concurrency_slots_for_run(self, run_id: str) -> None:\n """Frees concurrency slots for a given run."""\n raise NotImplementedError()\n\n @abstractmethod\n def free_concurrency_slot_for_step(self, run_id: str, step_key: str) -> None:\n """Frees concurrency slots for a given run/step."""\n raise NotImplementedError()\n\n @property\n def supports_asset_checks(self):\n return True\n\n @abstractmethod\n def get_asset_check_execution_history(\n self,\n check_key: AssetCheckKey,\n limit: int,\n cursor: Optional[int] = None,\n ) -> Sequence[AssetCheckExecutionRecord]:\n """Get executions for one asset check, sorted by recency."""\n pass\n\n @abstractmethod\n def get_latest_asset_check_execution_by_key(\n self, check_keys: Sequence[AssetCheckKey]\n ) -> Mapping[AssetCheckKey, AssetCheckExecutionRecord]:\n """Get the latest executions for a list of asset checks."""\n pass
\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.base"}, "sql_event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.sql_event_log

\nimport logging\nfrom abc import abstractmethod\nfrom collections import OrderedDict, defaultdict\nfrom datetime import datetime\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    ContextManager,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._core.assets import AssetDetails\nfrom dagster._core.definitions.asset_check_evaluation import (\n    AssetCheckEvaluation,\n    AssetCheckEvaluationPlanned,\n)\nfrom dagster._core.definitions.asset_check_spec import AssetCheckKey\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization\nfrom dagster._core.errors import (\n    DagsterEventLogInvalidForRun,\n    DagsterInvalidInvocationError,\n    DagsterInvariantViolationError,\n)\nfrom dagster._core.event_api import RunShardedEventsCursor\nfrom dagster._core.events import ASSET_CHECK_EVENTS, ASSET_EVENTS, MARKER_EVENTS, DagsterEventType\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.execution.stats import RunStepKeyStatsSnapshot, build_run_step_stats_from_events\nfrom dagster._core.storage.asset_check_execution_record import (\n    AssetCheckExecutionRecord,\n    AssetCheckExecutionRecordStatus,\n)\nfrom dagster._core.storage.sql import SqlAlchemyQuery, SqlAlchemyRow\nfrom dagster._core.storage.sqlalchemy_compat import (\n    db_case,\n    db_fetch_mappings,\n    db_select,\n    db_subquery,\n)\nfrom dagster._serdes import (\n    deserialize_value,\n    serialize_value,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._utils import (\n    PrintFn,\n    datetime_as_float,\n    utc_datetime_from_naive,\n    utc_datetime_from_timestamp,\n)\nfrom dagster._utils.concurrency import (\n    ConcurrencyClaimStatus,\n    ConcurrencyKeyInfo,\n    ConcurrencySlotStatus,\n)\n\nfrom ..dagster_run import DagsterRunStatsSnapshot\nfrom .base import (\n    AssetEntry,\n    AssetRecord,\n    EventLogConnection,\n    EventLogCursor,\n    EventLogRecord,\n    EventLogStorage,\n    EventRecordsFilter,\n)\nfrom .migration import ASSET_DATA_MIGRATIONS, ASSET_KEY_INDEX_COLS, EVENT_LOG_DATA_MIGRATIONS\nfrom .schema import (\n    AssetCheckExecutionsTable,\n    AssetEventTagsTable,\n    AssetKeyTable,\n    ConcurrencySlotsTable,\n    DynamicPartitionsTable,\n    PendingStepsTable,\n    SecondaryIndexMigrationTable,\n    SqlEventLogStorageTable,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\nMAX_CONCURRENCY_SLOTS = 1000\nMIN_ASSET_ROWS = 25\n\n# We are using third-party library objects for DB connections-- at this time, these libraries are\n# untyped. When/if we upgrade to typed variants, the `Any` here can be replaced or the alias as a\n# whole can be dropped.\nSqlDbConnection: TypeAlias = Any\n\n\n
[docs]class SqlEventLogStorage(EventLogStorage):\n """Base class for SQL backed event log storages.\n\n Distinguishes between run-based connections and index connections in order to support run-level\n sharding, while maintaining the ability to do cross-run queries\n """\n\n @abstractmethod\n def run_connection(self, run_id: Optional[str]) -> ContextManager[Connection]:\n """Context manager yielding a connection to access the event logs for a specific run.\n\n Args:\n run_id (Optional[str]): Enables those storages which shard based on run_id, e.g.,\n SqliteEventLogStorage, to connect appropriately.\n """\n\n @abstractmethod\n def index_connection(self) -> ContextManager[Connection]:\n """Context manager yielding a connection to access cross-run indexed tables."""\n\n @abstractmethod\n def upgrade(self) -> None:\n """This method should perform any schema migrations necessary to bring an\n out-of-date instance of the storage up to date.\n """\n\n @abstractmethod\n def has_table(self, table_name: str) -> bool:\n """This method checks if a table exists in the database."""\n\n def prepare_insert_event(self, event):\n """Helper method for preparing the event log SQL insertion statement. Abstracted away to\n have a single place for the logical table representation of the event, while having a way\n for SQL backends to implement different execution implementations for `store_event`. See\n the `dagster-postgres` implementation which overrides the generic SQL implementation of\n `store_event`.\n """\n dagster_event_type = None\n asset_key_str = None\n partition = None\n step_key = event.step_key\n if event.is_dagster_event:\n dagster_event_type = event.dagster_event.event_type_value\n step_key = event.dagster_event.step_key\n if event.dagster_event.asset_key:\n check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey)\n asset_key_str = event.dagster_event.asset_key.to_string()\n if event.dagster_event.partition:\n partition = event.dagster_event.partition\n\n # https://stackoverflow.com/a/54386260/324449\n return SqlEventLogStorageTable.insert().values(\n run_id=event.run_id,\n event=serialize_value(event),\n dagster_event_type=dagster_event_type,\n # Postgres requires a datetime that is in UTC but has no timezone info set\n # in order to be stored correctly\n timestamp=datetime.utcfromtimestamp(event.timestamp),\n step_key=step_key,\n asset_key=asset_key_str,\n partition=partition,\n )\n\n def has_asset_key_col(self, column_name: str) -> bool:\n with self.index_connection() as conn:\n column_names = [x.get("name") for x in db.inspect(conn).get_columns(AssetKeyTable.name)]\n return column_name in column_names\n\n def has_asset_key_index_cols(self) -> bool:\n return self.has_asset_key_col("last_materialization_timestamp")\n\n def store_asset_event(self, event: EventLogEntry, event_id: int):\n check.inst_param(event, "event", EventLogEntry)\n\n if not (event.dagster_event and event.dagster_event.asset_key):\n return\n\n # We switched to storing the entire event record of the last materialization instead of just\n # the AssetMaterialization object, so that we have access to metadata like timestamp,\n # pipeline, run_id, etc.\n #\n # This should make certain asset queries way more performant, without having to do extra\n # queries against the event log.\n #\n # This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`\n # to `last_materialization_event`, for clarity. For now, we should do some back-compat.\n #\n # https://github.com/dagster-io/dagster/issues/3945\n\n values = self._get_asset_entry_values(event, event_id, self.has_asset_key_index_cols())\n insert_statement = AssetKeyTable.insert().values(\n asset_key=event.dagster_event.asset_key.to_string(), **values\n )\n update_statement = (\n AssetKeyTable.update()\n .values(**values)\n .where(\n AssetKeyTable.c.asset_key == event.dagster_event.asset_key.to_string(),\n )\n )\n\n with self.index_connection() as conn:\n try:\n conn.execute(insert_statement)\n except db_exc.IntegrityError:\n conn.execute(update_statement)\n\n def _get_asset_entry_values(\n self, event: EventLogEntry, event_id: int, has_asset_key_index_cols: bool\n ) -> Dict[str, Any]:\n # The AssetKeyTable contains a `last_materialization_timestamp` column that is exclusively\n # used to determine if an asset exists (last materialization timestamp > wipe timestamp).\n # This column is used nowhere else, and as of AssetObservation/AssetMaterializationPlanned\n # event creation, we want to extend this functionality to ensure that assets with any event\n # (observation, materialization, or materialization planned) yielded with timestamp\n # > wipe timestamp display in the Dagster UI.\n\n # As of the following PRs, we update last_materialization_timestamp to store the timestamp\n # of the latest asset observation, materialization, or materialization_planned that has occurred.\n # https://github.com/dagster-io/dagster/pull/6885\n # https://github.com/dagster-io/dagster/pull/7319\n\n entry_values: Dict[str, Any] = {}\n dagster_event = check.not_none(event.dagster_event)\n if dagster_event.is_step_materialization:\n entry_values.update(\n {\n "last_materialization": serialize_value(\n EventLogRecord(\n storage_id=event_id,\n event_log_entry=event,\n )\n ),\n "last_run_id": event.run_id,\n }\n )\n if has_asset_key_index_cols:\n entry_values.update(\n {\n "last_materialization_timestamp": utc_datetime_from_timestamp(\n event.timestamp\n ),\n }\n )\n elif dagster_event.is_asset_materialization_planned:\n # The AssetKeyTable also contains a `last_run_id` column that is updated upon asset\n # materialization. This column was not being used until the below PR. This new change\n # writes to the column upon `ASSET_MATERIALIZATION_PLANNED` events to fetch the last\n # run id for a set of assets in one roundtrip call to event log storage.\n # https://github.com/dagster-io/dagster/pull/7319\n entry_values.update({"last_run_id": event.run_id})\n if has_asset_key_index_cols:\n entry_values.update(\n {\n "last_materialization_timestamp": utc_datetime_from_timestamp(\n event.timestamp\n ),\n }\n )\n elif dagster_event.is_asset_observation:\n if has_asset_key_index_cols:\n entry_values.update(\n {\n "last_materialization_timestamp": utc_datetime_from_timestamp(\n event.timestamp\n ),\n }\n )\n\n return entry_values\n\n def supports_add_asset_event_tags(self) -> bool:\n return self.has_table(AssetEventTagsTable.name)\n\n def add_asset_event_tags(\n self,\n event_id: int,\n event_timestamp: float,\n asset_key: AssetKey,\n new_tags: Mapping[str, str],\n ) -> None:\n check.int_param(event_id, "event_id")\n check.float_param(event_timestamp, "event_timestamp")\n check.inst_param(asset_key, "asset_key", AssetKey)\n check.mapping_param(new_tags, "new_tags", key_type=str, value_type=str)\n\n if not self.supports_add_asset_event_tags():\n raise DagsterInvalidInvocationError(\n "In order to add asset event tags, you must run `dagster instance migrate` to "\n "create the AssetEventTags table."\n )\n\n current_tags_list = self.get_event_tags_for_asset(asset_key, filter_event_id=event_id)\n\n asset_key_str = asset_key.to_string()\n\n if len(current_tags_list) == 0:\n current_tags: Mapping[str, str] = {}\n else:\n current_tags = current_tags_list[0]\n\n with self.index_connection() as conn:\n current_tags_set = set(current_tags.keys())\n new_tags_set = set(new_tags.keys())\n\n existing_tags = current_tags_set & new_tags_set\n added_tags = new_tags_set.difference(existing_tags)\n\n for tag in existing_tags:\n conn.execute(\n AssetEventTagsTable.update()\n .where(\n db.and_(\n AssetEventTagsTable.c.event_id == event_id,\n AssetEventTagsTable.c.asset_key == asset_key_str,\n AssetEventTagsTable.c.key == tag,\n )\n )\n .values(value=new_tags[tag])\n )\n\n if added_tags:\n conn.execute(\n AssetEventTagsTable.insert(),\n [\n dict(\n event_id=event_id,\n asset_key=asset_key_str,\n key=tag,\n value=new_tags[tag],\n # Postgres requires a datetime that is in UTC but has no timezone info\n # set in order to be stored correctly\n event_timestamp=datetime.utcfromtimestamp(event_timestamp),\n )\n for tag in added_tags\n ],\n )\n\n def store_asset_event_tags(self, event: EventLogEntry, event_id: int) -> None:\n check.inst_param(event, "event", EventLogEntry)\n check.int_param(event_id, "event_id")\n\n if event.dagster_event and event.dagster_event.asset_key:\n if event.dagster_event.is_step_materialization:\n tags = event.dagster_event.step_materialization_data.materialization.tags\n elif event.dagster_event.is_asset_observation:\n tags = event.dagster_event.asset_observation_data.asset_observation.tags\n else:\n tags = None\n\n if not tags or not self.has_table(AssetEventTagsTable.name):\n # If tags table does not exist, silently exit. This is to support OSS\n # users who have not yet run the migration to create the table.\n # On read, we will throw an error if the table does not exist.\n return\n\n check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey)\n asset_key_str = event.dagster_event.asset_key.to_string()\n\n with self.index_connection() as conn:\n conn.execute(\n AssetEventTagsTable.insert(),\n [\n dict(\n event_id=event_id,\n asset_key=asset_key_str,\n key=key,\n value=value,\n # Postgres requires a datetime that is in UTC but has no timezone info\n # set in order to be stored correctly\n event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n )\n for key, value in tags.items()\n ],\n )\n\n def store_event(self, event: EventLogEntry) -> None:\n """Store an event corresponding to a pipeline run.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n check.inst_param(event, "event", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event)\n run_id = event.run_id\n\n event_id = None\n\n with self.run_connection(run_id) as conn:\n result = conn.execute(insert_event_statement)\n event_id = result.inserted_primary_key[0]\n\n if (\n event.is_dagster_event\n and event.dagster_event_type in ASSET_EVENTS\n and event.dagster_event.asset_key # type: ignore\n ):\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n "Cannot store asset event tags for null event id."\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, event_id)\n\n def get_records_for_run(\n self,\n run_id,\n cursor: Optional[str] = None,\n of_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ascending: bool = True,\n ) -> EventLogConnection:\n """Get all of the logs corresponding to a run.\n\n Args:\n run_id (str): The id of the run for which to fetch logs.\n cursor (Optional[int]): Zero-indexed logs will be returned starting from cursor + 1,\n i.e., if cursor is -1, all logs will be returned. (default: -1)\n of_type (Optional[DagsterEventType]): the dagster event type to filter the logs.\n limit (Optional[int]): the maximum number of events to fetch\n """\n check.str_param(run_id, "run_id")\n check.opt_str_param(cursor, "cursor")\n\n check.invariant(not of_type or isinstance(of_type, (DagsterEventType, frozenset, set)))\n\n dagster_event_types = (\n {of_type}\n if isinstance(of_type, DagsterEventType)\n else check.opt_set_param(of_type, "dagster_event_type", of_type=DagsterEventType)\n )\n\n query = (\n db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])\n .where(SqlEventLogStorageTable.c.run_id == run_id)\n .order_by(\n SqlEventLogStorageTable.c.id.asc()\n if ascending\n else SqlEventLogStorageTable.c.id.desc()\n )\n )\n if dagster_event_types:\n query = query.where(\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [dagster_event_type.value for dagster_event_type in dagster_event_types]\n )\n )\n\n # adjust 0 based index cursor to SQL offset\n if cursor is not None:\n cursor_obj = EventLogCursor.parse(cursor)\n if cursor_obj.is_offset_cursor():\n query = query.offset(cursor_obj.offset())\n elif cursor_obj.is_id_cursor():\n if ascending:\n query = query.where(SqlEventLogStorageTable.c.id > cursor_obj.storage_id())\n else:\n query = query.where(SqlEventLogStorageTable.c.id < cursor_obj.storage_id())\n\n if limit:\n query = query.limit(limit)\n\n with self.run_connection(run_id) as conn:\n results = conn.execute(query).fetchall()\n\n last_record_id = None\n try:\n records = []\n for (\n record_id,\n json_str,\n ) in results:\n records.append(\n EventLogRecord(\n storage_id=record_id,\n event_log_entry=deserialize_value(json_str, EventLogEntry),\n )\n )\n last_record_id = record_id\n except (seven.JSONDecodeError, DeserializationError) as err:\n raise DagsterEventLogInvalidForRun(run_id=run_id) from err\n\n if last_record_id is not None:\n next_cursor = EventLogCursor.from_storage_id(last_record_id).to_string()\n elif cursor:\n # record fetch returned no new logs, return the same cursor\n next_cursor = cursor\n else:\n # rely on the fact that all storage ids will be positive integers\n next_cursor = EventLogCursor.from_storage_id(-1).to_string()\n\n return EventLogConnection(\n records=records,\n cursor=next_cursor,\n has_more=bool(limit and len(results) == limit),\n )\n\n def get_stats_for_run(self, run_id: str) -> DagsterRunStatsSnapshot:\n check.str_param(run_id, "run_id")\n\n query = (\n db_select(\n [\n SqlEventLogStorageTable.c.dagster_event_type,\n db.func.count().label("n_events_of_type"),\n db.func.max(SqlEventLogStorageTable.c.timestamp).label("last_event_timestamp"),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.run_id == run_id,\n SqlEventLogStorageTable.c.dagster_event_type != None, # noqa: E711\n )\n )\n .group_by("dagster_event_type")\n )\n\n with self.run_connection(run_id) as conn:\n results = conn.execute(query).fetchall()\n\n try:\n counts = {}\n times = {}\n for result in results:\n (dagster_event_type, n_events_of_type, last_event_timestamp) = result\n check.invariant(dagster_event_type is not None)\n counts[dagster_event_type] = n_events_of_type\n times[dagster_event_type] = last_event_timestamp\n\n enqueued_time = times.get(DagsterEventType.PIPELINE_ENQUEUED.value, None)\n launch_time = times.get(DagsterEventType.PIPELINE_STARTING.value, None)\n start_time = times.get(DagsterEventType.PIPELINE_START.value, None)\n end_time = times.get(\n DagsterEventType.PIPELINE_SUCCESS.value,\n times.get(\n DagsterEventType.PIPELINE_FAILURE.value,\n times.get(DagsterEventType.PIPELINE_CANCELED.value, None),\n ),\n )\n\n return DagsterRunStatsSnapshot(\n run_id=run_id,\n steps_succeeded=counts.get(DagsterEventType.STEP_SUCCESS.value, 0),\n steps_failed=counts.get(DagsterEventType.STEP_FAILURE.value, 0),\n materializations=counts.get(DagsterEventType.ASSET_MATERIALIZATION.value, 0),\n expectations=counts.get(DagsterEventType.STEP_EXPECTATION_RESULT.value, 0),\n enqueued_time=datetime_as_float(enqueued_time) if enqueued_time else None,\n launch_time=datetime_as_float(launch_time) if launch_time else None,\n start_time=datetime_as_float(start_time) if start_time else None,\n end_time=datetime_as_float(end_time) if end_time else None,\n )\n except (seven.JSONDecodeError, DeserializationError) as err:\n raise DagsterEventLogInvalidForRun(run_id=run_id) from err\n\n def get_step_stats_for_run(\n self, run_id: str, step_keys: Optional[Sequence[str]] = None\n ) -> Sequence[RunStepKeyStatsSnapshot]:\n check.str_param(run_id, "run_id")\n check.opt_list_param(step_keys, "step_keys", of_type=str)\n\n # Originally, this was two different queries:\n # 1) one query which aggregated top-level step stats by grouping by event type / step_key in\n # a single query, using pure SQL (e.g. start_time, end_time, status, attempt counts).\n # 2) one query which fetched all the raw events for a specific event type and then inspected\n # the deserialized event object to aggregate stats derived from sequences of events.\n # (e.g. marker events, materializations, expectations resuls, attempts timing, etc.)\n #\n # For simplicity, we now just do the second type of query and derive the stats in Python\n # from the raw events. This has the benefit of being easier to read and also the benefit of\n # being able to share code with the in-memory event log storage implementation. We may\n # choose to revisit this in the future, especially if we are able to do JSON-column queries\n # in SQL as a way of bypassing the serdes layer in all cases.\n raw_event_query = (\n db_select([SqlEventLogStorageTable.c.event])\n .where(SqlEventLogStorageTable.c.run_id == run_id)\n .where(SqlEventLogStorageTable.c.step_key != None) # noqa: E711\n .where(\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [\n DagsterEventType.STEP_START.value,\n DagsterEventType.STEP_SUCCESS.value,\n DagsterEventType.STEP_SKIPPED.value,\n DagsterEventType.STEP_FAILURE.value,\n DagsterEventType.STEP_RESTARTED.value,\n DagsterEventType.ASSET_MATERIALIZATION.value,\n DagsterEventType.STEP_EXPECTATION_RESULT.value,\n DagsterEventType.STEP_RESTARTED.value,\n DagsterEventType.STEP_UP_FOR_RETRY.value,\n ]\n + [marker_event.value for marker_event in MARKER_EVENTS]\n )\n )\n .order_by(SqlEventLogStorageTable.c.id.asc())\n )\n if step_keys:\n raw_event_query = raw_event_query.where(\n SqlEventLogStorageTable.c.step_key.in_(step_keys)\n )\n\n with self.run_connection(run_id) as conn:\n results = conn.execute(raw_event_query).fetchall()\n\n try:\n records = [deserialize_value(json_str, EventLogEntry) for (json_str,) in results]\n return build_run_step_stats_from_events(run_id, records)\n except (seven.JSONDecodeError, DeserializationError) as err:\n raise DagsterEventLogInvalidForRun(run_id=run_id) from err\n\n def _apply_migration(self, migration_name, migration_fn, print_fn, force):\n if self.has_secondary_index(migration_name):\n if not force:\n if print_fn:\n print_fn(f"Skipping already applied data migration: {migration_name}")\n return\n if print_fn:\n print_fn(f"Starting data migration: {migration_name}")\n migration_fn()(self, print_fn)\n self.enable_secondary_index(migration_name)\n if print_fn:\n print_fn(f"Finished data migration: {migration_name}")\n\n def reindex_events(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the event_log table."""\n for migration_name, migration_fn in EVENT_LOG_DATA_MIGRATIONS.items():\n self._apply_migration(migration_name, migration_fn, print_fn, force)\n\n def reindex_assets(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:\n """Call this method to run any data migrations across the asset_keys table."""\n for migration_name, migration_fn in ASSET_DATA_MIGRATIONS.items():\n self._apply_migration(migration_name, migration_fn, print_fn, force)\n\n def wipe(self) -> None:\n """Clears the event log storage."""\n # Should be overridden by SqliteEventLogStorage and other storages that shard based on\n # run_id\n\n # https://stackoverflow.com/a/54386260/324449\n with self.run_connection(run_id=None) as conn:\n conn.execute(SqlEventLogStorageTable.delete())\n conn.execute(AssetKeyTable.delete())\n\n if self.has_table("asset_event_tags"):\n conn.execute(AssetEventTagsTable.delete())\n\n if self.has_table("dynamic_partitions"):\n conn.execute(DynamicPartitionsTable.delete())\n\n if self.has_table("concurrency_slots"):\n conn.execute(ConcurrencySlotsTable.delete())\n\n if self.has_table("pending_steps"):\n conn.execute(PendingStepsTable.delete())\n\n if self.has_table("asset_check_executions"):\n conn.execute(AssetCheckExecutionsTable.delete())\n\n self._wipe_index()\n\n def _wipe_index(self):\n with self.index_connection() as conn:\n conn.execute(SqlEventLogStorageTable.delete())\n conn.execute(AssetKeyTable.delete())\n\n if self.has_table("asset_event_tags"):\n conn.execute(AssetEventTagsTable.delete())\n\n if self.has_table("dynamic_partitions"):\n conn.execute(DynamicPartitionsTable.delete())\n\n if self.has_table("concurrency_slots"):\n conn.execute(ConcurrencySlotsTable.delete())\n\n if self.has_table("pending_steps"):\n conn.execute(PendingStepsTable.delete())\n\n if self.has_table("asset_check_executions"):\n conn.execute(AssetCheckExecutionsTable.delete())\n\n def delete_events(self, run_id: str) -> None:\n with self.run_connection(run_id) as conn:\n self.delete_events_for_run(conn, run_id)\n with self.index_connection() as conn:\n self.delete_events_for_run(conn, run_id)\n self.free_concurrency_slots_for_run(run_id)\n\n def delete_events_for_run(self, conn: Connection, run_id: str) -> None:\n check.str_param(run_id, "run_id")\n conn.execute(\n SqlEventLogStorageTable.delete().where(SqlEventLogStorageTable.c.run_id == run_id)\n )\n\n @property\n def is_persistent(self) -> bool:\n return True\n\n def update_event_log_record(self, record_id: int, event: EventLogEntry) -> None:\n """Utility method for migration scripts to update SQL representation of event records."""\n check.int_param(record_id, "record_id")\n check.inst_param(event, "event", EventLogEntry)\n dagster_event_type = None\n asset_key_str = None\n if event.is_dagster_event:\n dagster_event_type = event.dagster_event.event_type_value # type: ignore\n if event.dagster_event.asset_key: # type: ignore\n check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey) # type: ignore\n asset_key_str = event.dagster_event.asset_key.to_string() # type: ignore\n\n with self.run_connection(run_id=event.run_id) as conn:\n conn.execute(\n SqlEventLogStorageTable.update()\n .where(SqlEventLogStorageTable.c.id == record_id)\n .values(\n event=serialize_value(event),\n dagster_event_type=dagster_event_type,\n timestamp=datetime.utcfromtimestamp(event.timestamp),\n step_key=event.step_key,\n asset_key=asset_key_str,\n )\n )\n\n def get_event_log_table_data(self, run_id: str, record_id: int) -> Optional[SqlAlchemyRow]:\n """Utility method to test representation of the record in the SQL table. Returns all of\n the columns stored in the event log storage (as opposed to the deserialized `EventLogEntry`).\n This allows checking that certain fields are extracted to support performant lookups (e.g.\n extracting `step_key` for fast filtering).\n """\n with self.run_connection(run_id=run_id) as conn:\n query = (\n db_select([SqlEventLogStorageTable])\n .where(SqlEventLogStorageTable.c.id == record_id)\n .order_by(SqlEventLogStorageTable.c.id.asc())\n )\n return conn.execute(query).fetchone()\n\n def has_secondary_index(self, name: str) -> bool:\n """This method uses a checkpoint migration table to see if summary data has been constructed\n in a secondary index table. Can be used to checkpoint event_log data migrations.\n """\n query = (\n db_select([1])\n .where(SecondaryIndexMigrationTable.c.name == name)\n .where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711\n .limit(1)\n )\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n return len(results) > 0\n\n def enable_secondary_index(self, name: str) -> None:\n """This method marks an event_log data migration as complete, to indicate that a summary\n data migration is complete.\n """\n query = SecondaryIndexMigrationTable.insert().values(\n name=name,\n migration_completed=datetime.now(),\n )\n with self.index_connection() as conn:\n try:\n conn.execute(query)\n except db_exc.IntegrityError:\n conn.execute(\n SecondaryIndexMigrationTable.update()\n .where(SecondaryIndexMigrationTable.c.name == name)\n .values(migration_completed=datetime.now())\n )\n\n def _apply_filter_to_query(\n self,\n query: SqlAlchemyQuery,\n event_records_filter: EventRecordsFilter,\n asset_details: Optional[AssetDetails] = None,\n apply_cursor_filters: bool = True,\n ) -> SqlAlchemyQuery:\n query = query.where(\n SqlEventLogStorageTable.c.dagster_event_type == event_records_filter.event_type.value\n )\n\n if event_records_filter.asset_key:\n query = query.where(\n SqlEventLogStorageTable.c.asset_key == event_records_filter.asset_key.to_string(),\n )\n\n if event_records_filter.asset_partitions:\n query = query.where(\n SqlEventLogStorageTable.c.partition.in_(event_records_filter.asset_partitions)\n )\n\n if asset_details and asset_details.last_wipe_timestamp:\n query = query.where(\n SqlEventLogStorageTable.c.timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n\n if apply_cursor_filters:\n # allow the run-sharded sqlite implementation to disable this cursor filtering so that\n # it can implement its own custom cursor logic, as cursor ids are not unique across run\n # shards\n if event_records_filter.before_cursor is not None:\n before_cursor_id = (\n event_records_filter.before_cursor.id\n if isinstance(event_records_filter.before_cursor, RunShardedEventsCursor)\n else event_records_filter.before_cursor\n )\n query = query.where(SqlEventLogStorageTable.c.id < before_cursor_id)\n\n if event_records_filter.after_cursor is not None:\n after_cursor_id = (\n event_records_filter.after_cursor.id\n if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)\n else event_records_filter.after_cursor\n )\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor_id)\n\n if event_records_filter.before_timestamp:\n query = query.where(\n SqlEventLogStorageTable.c.timestamp\n < datetime.utcfromtimestamp(event_records_filter.before_timestamp)\n )\n\n if event_records_filter.after_timestamp:\n query = query.where(\n SqlEventLogStorageTable.c.timestamp\n > datetime.utcfromtimestamp(event_records_filter.after_timestamp)\n )\n\n if event_records_filter.storage_ids:\n query = query.where(SqlEventLogStorageTable.c.id.in_(event_records_filter.storage_ids))\n\n if event_records_filter.tags and self.has_table(AssetEventTagsTable.name):\n # If we don't have the tags table, we'll filter the results after the query\n check.invariant(\n isinstance(event_records_filter.asset_key, AssetKey),\n "Asset key must be set in event records filter to filter by tags.",\n )\n if self.supports_intersect:\n intersections = [\n db_select([AssetEventTagsTable.c.event_id]).where(\n db.and_(\n AssetEventTagsTable.c.asset_key\n == event_records_filter.asset_key.to_string(), # type: ignore # (bad sig?)\n AssetEventTagsTable.c.key == key,\n (\n AssetEventTagsTable.c.value == value\n if isinstance(value, str)\n else AssetEventTagsTable.c.value.in_(value)\n ),\n )\n )\n for key, value in event_records_filter.tags.items()\n ]\n query = query.where(SqlEventLogStorageTable.c.id.in_(db.intersect(*intersections)))\n\n return query\n\n def _apply_tags_table_joins(\n self,\n table: db.Table,\n tags: Mapping[str, Union[str, Sequence[str]]],\n asset_key: Optional[AssetKey],\n ) -> db.Table:\n event_id_col = table.c.id if table == SqlEventLogStorageTable else table.c.event_id\n i = 0\n for key, value in tags.items():\n i += 1\n tags_table = db_subquery(\n db_select([AssetEventTagsTable]), f"asset_event_tags_subquery_{i}"\n )\n table = table.join(\n tags_table,\n db.and_(\n event_id_col == tags_table.c.event_id,\n not asset_key or tags_table.c.asset_key == asset_key.to_string(),\n tags_table.c.key == key,\n (\n tags_table.c.value == value\n if isinstance(value, str)\n else tags_table.c.value.in_(value)\n ),\n ),\n )\n return table\n\n def get_event_records(\n self,\n event_records_filter: EventRecordsFilter,\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Sequence[EventLogRecord]:\n """Returns a list of (record_id, record)."""\n check.inst_param(event_records_filter, "event_records_filter", EventRecordsFilter)\n check.opt_int_param(limit, "limit")\n check.bool_param(ascending, "ascending")\n\n if event_records_filter.asset_key:\n asset_details = next(iter(self._get_assets_details([event_records_filter.asset_key])))\n else:\n asset_details = None\n\n if (\n event_records_filter.tags\n and not self.supports_intersect\n and self.has_table(AssetEventTagsTable.name)\n ):\n table = self._apply_tags_table_joins(\n SqlEventLogStorageTable, event_records_filter.tags, event_records_filter.asset_key\n )\n else:\n table = SqlEventLogStorageTable\n\n query = db_select(\n [SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event]\n ).select_from(table)\n\n query = self._apply_filter_to_query(\n query=query,\n event_records_filter=event_records_filter,\n asset_details=asset_details,\n )\n if limit:\n query = query.limit(limit)\n\n if ascending:\n query = query.order_by(SqlEventLogStorageTable.c.id.asc())\n else:\n query = query.order_by(SqlEventLogStorageTable.c.id.desc())\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n event_records = []\n for row_id, json_str in results:\n try:\n event_record = deserialize_value(json_str, NamedTuple)\n if not isinstance(event_record, EventLogEntry):\n logging.warning(\n "Could not resolve event record as EventLogEntry for id `%s`.", row_id\n )\n continue\n\n if event_records_filter.tags and not self.has_table(AssetEventTagsTable.name):\n # If we can't filter tags via the tags table, filter the returned records\n if limit is not None:\n raise DagsterInvalidInvocationError(\n "Cannot filter events on tags with a limit, without the asset event "\n "tags table. To fix, run `dagster instance migrate`."\n )\n\n event_record_tags = event_record.tags\n if not event_record_tags or any(\n event_record_tags.get(k) != v for k, v in event_records_filter.tags.items()\n ):\n continue\n\n event_records.append(\n EventLogRecord(storage_id=row_id, event_log_entry=event_record)\n )\n except seven.JSONDecodeError:\n logging.warning("Could not parse event record id `%s`.", row_id)\n\n return event_records\n\n def supports_event_consumer_queries(self) -> bool:\n return True\n\n @property\n def supports_intersect(self) -> bool:\n return True\n\n def get_logs_for_all_runs_by_log_id(\n self,\n after_cursor: int = -1,\n dagster_event_type: Optional[Union[DagsterEventType, Set[DagsterEventType]]] = None,\n limit: Optional[int] = None,\n ) -> Mapping[int, EventLogEntry]:\n check.int_param(after_cursor, "after_cursor")\n check.invariant(\n after_cursor >= -1,\n f"Don't know what to do with negative cursor {after_cursor}",\n )\n dagster_event_types = (\n {dagster_event_type}\n if isinstance(dagster_event_type, DagsterEventType)\n else check.opt_set_param(\n dagster_event_type, "dagster_event_type", of_type=DagsterEventType\n )\n )\n\n query = (\n db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])\n .where(SqlEventLogStorageTable.c.id > after_cursor)\n .order_by(SqlEventLogStorageTable.c.id.asc())\n )\n\n if dagster_event_types:\n query = query.where(\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [dagster_event_type.value for dagster_event_type in dagster_event_types]\n )\n )\n\n if limit:\n query = query.limit(limit)\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n events = {}\n record_id = None\n try:\n for (\n record_id,\n json_str,\n ) in results:\n events[record_id] = deserialize_value(json_str, EventLogEntry)\n except (seven.JSONDecodeError, DeserializationError):\n logging.warning("Could not parse event record id `%s`.", record_id)\n\n return events\n\n def get_maximum_record_id(self) -> Optional[int]:\n with self.index_connection() as conn:\n result = conn.execute(db_select([db.func.max(SqlEventLogStorageTable.c.id)])).fetchone()\n return result[0] # type: ignore\n\n def _construct_asset_record_from_row(\n self,\n row,\n last_materialization_record: Optional[EventLogRecord],\n can_cache_asset_status_data: bool,\n ) -> AssetRecord:\n from dagster._core.storage.partition_status_cache import AssetStatusCacheValue\n\n asset_key = AssetKey.from_db_string(row["asset_key"])\n if asset_key:\n return AssetRecord(\n storage_id=row["id"],\n asset_entry=AssetEntry(\n asset_key=asset_key,\n last_materialization_record=last_materialization_record,\n last_run_id=row["last_run_id"],\n asset_details=AssetDetails.from_db_string(row["asset_details"]),\n cached_status=(\n AssetStatusCacheValue.from_db_string(row["cached_status_data"])\n if can_cache_asset_status_data\n else None\n ),\n ),\n )\n else:\n check.failed("Row did not contain asset key.")\n\n def _get_latest_materialization_records(\n self, raw_asset_rows\n ) -> Mapping[AssetKey, Optional[EventLogRecord]]:\n # Given a list of raw asset rows, returns a mapping of asset key to latest asset materialization\n # event log entry. Fetches backcompat EventLogEntry records when the last_materialization\n # in the raw asset row is an AssetMaterialization.\n to_backcompat_fetch = set()\n results: Dict[AssetKey, Optional[EventLogRecord]] = {}\n for row in raw_asset_rows:\n asset_key = AssetKey.from_db_string(row["asset_key"])\n if not asset_key:\n continue\n event_or_materialization = (\n deserialize_value(row["last_materialization"], NamedTuple)\n if row["last_materialization"]\n else None\n )\n if isinstance(event_or_materialization, EventLogRecord):\n results[asset_key] = event_or_materialization\n else:\n to_backcompat_fetch.add(asset_key)\n\n latest_event_subquery = db_subquery(\n db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n db.func.max(SqlEventLogStorageTable.c.id).label("id"),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in to_backcompat_fetch]\n ),\n SqlEventLogStorageTable.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value,\n )\n )\n .group_by(SqlEventLogStorageTable.c.asset_key),\n "latest_event_subquery",\n )\n backcompat_query = db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n SqlEventLogStorageTable.c.id,\n SqlEventLogStorageTable.c.event,\n ]\n ).select_from(\n latest_event_subquery.join(\n SqlEventLogStorageTable,\n db.and_(\n SqlEventLogStorageTable.c.asset_key == latest_event_subquery.c.asset_key,\n SqlEventLogStorageTable.c.id == latest_event_subquery.c.id,\n ),\n )\n )\n with self.index_connection() as conn:\n event_rows = db_fetch_mappings(conn, backcompat_query)\n\n for row in event_rows:\n asset_key = AssetKey.from_db_string(cast(Optional[str], row["asset_key"]))\n if asset_key:\n results[asset_key] = EventLogRecord(\n storage_id=cast(int, row["id"]),\n event_log_entry=deserialize_value(cast(str, row["event"]), EventLogEntry),\n )\n return results\n\n def can_cache_asset_status_data(self) -> bool:\n return self.has_asset_key_col("cached_status_data")\n\n def wipe_asset_cached_status(self, asset_key: AssetKey) -> None:\n if self.can_cache_asset_status_data():\n check.inst_param(asset_key, "asset_key", AssetKey)\n with self.index_connection() as conn:\n conn.execute(\n AssetKeyTable.update()\n .values(dict(cached_status_data=None))\n .where(\n AssetKeyTable.c.asset_key == asset_key.to_string(),\n )\n )\n\n def get_asset_records(\n self, asset_keys: Optional[Sequence[AssetKey]] = None\n ) -> Sequence[AssetRecord]:\n rows = self._fetch_asset_rows(asset_keys=asset_keys)\n latest_materialization_records = self._get_latest_materialization_records(rows)\n can_cache_asset_status_data = self.can_cache_asset_status_data()\n\n asset_records: List[AssetRecord] = []\n for row in rows:\n asset_key = AssetKey.from_db_string(row["asset_key"])\n if asset_key:\n asset_records.append(\n self._construct_asset_record_from_row(\n row,\n latest_materialization_records.get(asset_key),\n can_cache_asset_status_data,\n )\n )\n\n return asset_records\n\n def has_asset_key(self, asset_key: AssetKey) -> bool:\n check.inst_param(asset_key, "asset_key", AssetKey)\n rows = self._fetch_asset_rows(asset_keys=[asset_key])\n return bool(rows)\n\n def all_asset_keys(self):\n rows = self._fetch_asset_rows()\n asset_keys = [\n AssetKey.from_db_string(row["asset_key"])\n for row in sorted(rows, key=lambda x: x["asset_key"])\n ]\n return [asset_key for asset_key in asset_keys if asset_key]\n\n def get_asset_keys(\n self,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[AssetKey]:\n rows = self._fetch_asset_rows(prefix=prefix, limit=limit, cursor=cursor)\n asset_keys = [\n AssetKey.from_db_string(row["asset_key"])\n for row in sorted(rows, key=lambda x: x["asset_key"])\n ]\n return [asset_key for asset_key in asset_keys if asset_key]\n\n def get_latest_materialization_events(\n self, asset_keys: Iterable[AssetKey]\n ) -> Mapping[AssetKey, Optional[EventLogEntry]]:\n check.iterable_param(asset_keys, "asset_keys", AssetKey)\n rows = self._fetch_asset_rows(asset_keys=asset_keys)\n return {\n asset_key: event_log_record.event_log_entry if event_log_record is not None else None\n for asset_key, event_log_record in self._get_latest_materialization_records(\n rows\n ).items()\n }\n\n def _fetch_asset_rows(\n self,\n asset_keys=None,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> Sequence[SqlAlchemyRow]:\n # fetches rows containing asset_key, last_materialization, and asset_details from the DB,\n # applying the filters specified in the arguments.\n #\n # Differs from _fetch_raw_asset_rows, in that it loops through to make sure enough rows are\n # returned to satisfy the limit.\n #\n # returns a list of rows where each row is a tuple of serialized asset_key, materialization,\n # and asset_details\n should_query = True\n current_cursor = cursor\n if self.has_secondary_index(ASSET_KEY_INDEX_COLS):\n # if we have migrated, we can limit using SQL\n fetch_limit = limit\n else:\n # if we haven't migrated, overfetch in case the first N results are wiped\n fetch_limit = max(limit, MIN_ASSET_ROWS) if limit else None\n result = []\n\n while should_query:\n rows, has_more, current_cursor = self._fetch_raw_asset_rows(\n asset_keys=asset_keys, prefix=prefix, limit=fetch_limit, cursor=current_cursor\n )\n result.extend(rows)\n should_query = bool(has_more) and bool(limit) and len(result) < cast(int, limit)\n\n is_partial_query = asset_keys is not None or bool(prefix) or bool(limit) or bool(cursor)\n if not is_partial_query and self._can_mark_assets_as_migrated(rows): # type: ignore\n self.enable_secondary_index(ASSET_KEY_INDEX_COLS)\n\n return result[:limit] if limit else result\n\n def _fetch_raw_asset_rows(\n self,\n asset_keys: Optional[Sequence[AssetKey]] = None,\n prefix: Optional[Sequence[str]] = None,\n limit: Optional[int] = None,\n cursor=None,\n ) -> Tuple[Iterable[SqlAlchemyRow], bool, Optional[str]]:\n # fetches rows containing asset_key, last_materialization, and asset_details from the DB,\n # applying the filters specified in the arguments. Does not guarantee that the number of\n # rows returned will match the limit specified. This helper function is used to fetch a\n # chunk of asset key rows, which may or may not be wiped.\n #\n # Returns a tuple of (rows, has_more, cursor), where each row is a tuple of serialized\n # asset_key, materialization, and asset_details\n # TODO update comment\n\n columns = [\n AssetKeyTable.c.id,\n AssetKeyTable.c.asset_key,\n AssetKeyTable.c.last_materialization,\n AssetKeyTable.c.last_run_id,\n AssetKeyTable.c.asset_details,\n ]\n if self.can_cache_asset_status_data():\n columns.extend([AssetKeyTable.c.cached_status_data])\n\n is_partial_query = asset_keys is not None or bool(prefix) or bool(limit) or bool(cursor)\n if self.has_asset_key_index_cols() and not is_partial_query:\n # if the schema has been migrated, fetch the last_materialization_timestamp to see if\n # we can lazily migrate the data table\n columns.append(AssetKeyTable.c.last_materialization_timestamp)\n columns.append(AssetKeyTable.c.wipe_timestamp)\n\n query = db_select(columns).order_by(AssetKeyTable.c.asset_key.asc())\n query = self._apply_asset_filter_to_query(query, asset_keys, prefix, limit, cursor)\n\n if self.has_secondary_index(ASSET_KEY_INDEX_COLS):\n query = query.where(\n db.or_(\n AssetKeyTable.c.wipe_timestamp.is_(None),\n AssetKeyTable.c.last_materialization_timestamp > AssetKeyTable.c.wipe_timestamp,\n )\n )\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n return rows, False, None\n\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n wiped_timestamps_by_asset_key: Dict[AssetKey, float] = {}\n row_by_asset_key: Dict[AssetKey, SqlAlchemyRow] = OrderedDict()\n\n for row in rows:\n asset_key = AssetKey.from_db_string(cast(str, row["asset_key"]))\n if not asset_key:\n continue\n asset_details = AssetDetails.from_db_string(row["asset_details"])\n if not asset_details or not asset_details.last_wipe_timestamp:\n row_by_asset_key[asset_key] = row\n continue\n materialization_or_event_or_record = (\n deserialize_value(cast(str, row["last_materialization"]), NamedTuple)\n if row["last_materialization"]\n else None\n )\n if isinstance(materialization_or_event_or_record, (EventLogRecord, EventLogEntry)):\n if isinstance(materialization_or_event_or_record, EventLogRecord):\n event_timestamp = materialization_or_event_or_record.event_log_entry.timestamp\n else:\n event_timestamp = materialization_or_event_or_record.timestamp\n\n if asset_details.last_wipe_timestamp > event_timestamp:\n # this asset has not been materialized since being wiped, skip\n continue\n else:\n # add the key\n row_by_asset_key[asset_key] = row\n else:\n row_by_asset_key[asset_key] = row\n wiped_timestamps_by_asset_key[asset_key] = asset_details.last_wipe_timestamp\n\n if wiped_timestamps_by_asset_key:\n materialization_times = self._fetch_backcompat_materialization_times(\n wiped_timestamps_by_asset_key.keys() # type: ignore\n )\n for asset_key, wiped_timestamp in wiped_timestamps_by_asset_key.items():\n materialization_time = materialization_times.get(asset_key)\n if not materialization_time or utc_datetime_from_naive(\n materialization_time\n ) < utc_datetime_from_timestamp(wiped_timestamp):\n # remove rows that have not been materialized since being wiped\n row_by_asset_key.pop(asset_key)\n\n has_more = limit and len(rows) == limit\n new_cursor = rows[-1]["id"] if rows else None\n\n return row_by_asset_key.values(), has_more, new_cursor # type: ignore\n\n def update_asset_cached_status_data(\n self, asset_key: AssetKey, cache_values: "AssetStatusCacheValue"\n ) -> None:\n if self.can_cache_asset_status_data():\n with self.index_connection() as conn:\n conn.execute(\n AssetKeyTable.update()\n .where(\n AssetKeyTable.c.asset_key == asset_key.to_string(),\n )\n .values(cached_status_data=serialize_value(cache_values))\n )\n\n def _fetch_backcompat_materialization_times(\n self, asset_keys: Sequence[AssetKey]\n ) -> Mapping[AssetKey, datetime]:\n # fetches the latest materialization timestamp for the given asset_keys. Uses the (slower)\n # raw event log table.\n backcompat_query = (\n db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n db.func.max(SqlEventLogStorageTable.c.timestamp).label("timestamp"),\n ]\n )\n .where(\n SqlEventLogStorageTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in asset_keys]\n )\n )\n .group_by(SqlEventLogStorageTable.c.asset_key)\n .order_by(db.func.max(SqlEventLogStorageTable.c.timestamp).asc())\n )\n with self.index_connection() as conn:\n backcompat_rows = db_fetch_mappings(conn, backcompat_query)\n return {AssetKey.from_db_string(row["asset_key"]): row["timestamp"] for row in backcompat_rows} # type: ignore\n\n def _can_mark_assets_as_migrated(self, rows):\n if not self.has_asset_key_index_cols():\n return False\n\n if self.has_secondary_index(ASSET_KEY_INDEX_COLS):\n # we have already migrated\n return False\n\n for row in rows:\n if not _get_from_row(row, "last_materialization_timestamp"):\n return False\n\n if _get_from_row(row, "asset_details") and not _get_from_row(row, "wipe_timestamp"):\n return False\n\n return True\n\n def _apply_asset_filter_to_query(\n self,\n query: SqlAlchemyQuery,\n asset_keys: Optional[Sequence[AssetKey]] = None,\n prefix=None,\n limit: Optional[int] = None,\n cursor: Optional[str] = None,\n ) -> SqlAlchemyQuery:\n if asset_keys is not None:\n query = query.where(\n AssetKeyTable.c.asset_key.in_([asset_key.to_string() for asset_key in asset_keys])\n )\n\n if prefix:\n prefix_str = seven.dumps(prefix)[:-1]\n query = query.where(AssetKeyTable.c.asset_key.startswith(prefix_str))\n\n if cursor:\n query = query.where(AssetKeyTable.c.asset_key > cursor)\n\n if limit:\n query = query.limit(limit)\n return query\n\n def _get_assets_details(\n self, asset_keys: Sequence[AssetKey]\n ) -> Sequence[Optional[AssetDetails]]:\n check.sequence_param(asset_keys, "asset_key", AssetKey)\n rows = None\n with self.index_connection() as conn:\n rows = db_fetch_mappings(\n conn,\n db_select([AssetKeyTable.c.asset_key, AssetKeyTable.c.asset_details]).where(\n AssetKeyTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in asset_keys]\n ),\n ),\n )\n\n asset_key_to_details = {\n cast(str, row["asset_key"]): (\n deserialize_value(cast(str, row["asset_details"]), AssetDetails)\n if row["asset_details"]\n else None\n )\n for row in rows\n }\n\n # returns a list of the corresponding asset_details to provided asset_keys\n return [\n asset_key_to_details.get(asset_key.to_string(), None) for asset_key in asset_keys\n ]\n\n def _add_assets_wipe_filter_to_query(\n self,\n query: SqlAlchemyQuery,\n assets_details: Sequence[Optional[AssetDetails]],\n asset_keys: Sequence[AssetKey],\n ) -> SqlAlchemyQuery:\n check.invariant(\n len(assets_details) == len(asset_keys),\n "asset_details and asset_keys must be the same length",\n )\n for i in range(len(assets_details)):\n asset_key, asset_details = asset_keys[i], assets_details[i]\n if asset_details and asset_details.last_wipe_timestamp:\n asset_key_in_row = SqlEventLogStorageTable.c.asset_key == asset_key.to_string()\n # If asset key is in row, keep the row if the timestamp > wipe timestamp, else remove the row.\n # If asset key is not in row, keep the row.\n query = query.where(\n db.or_(\n db.and_(\n asset_key_in_row,\n SqlEventLogStorageTable.c.timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp),\n ),\n db.not_(asset_key_in_row),\n )\n )\n\n return query\n\n def get_event_tags_for_asset(\n self,\n asset_key: AssetKey,\n filter_tags: Optional[Mapping[str, str]] = None,\n filter_event_id: Optional[int] = None,\n ) -> Sequence[Mapping[str, str]]:\n """Fetches asset event tags for the given asset key.\n\n If filter_tags is provided, searches for events containing all of the filter tags. Then,\n returns all tags for those events. This enables searching for multipartitioned asset\n partition tags with a fixed dimension value, e.g. all of the tags for events where\n "country" == "US".\n\n If filter_event_id is provided, fetches only tags applied to the given event.\n\n Returns a list of dicts, where each dict is a mapping of tag key to tag value for a\n single event.\n """\n asset_key = check.inst_param(asset_key, "asset_key", AssetKey)\n filter_tags = check.opt_mapping_param(\n filter_tags, "filter_tags", key_type=str, value_type=str\n )\n filter_event_id = check.opt_int_param(filter_event_id, "filter_event_id")\n\n if not self.has_table(AssetEventTagsTable.name):\n raise DagsterInvalidInvocationError(\n "In order to search for asset event tags, you must run "\n "`dagster instance migrate` to create the AssetEventTags table."\n )\n\n asset_details = self._get_assets_details([asset_key])[0]\n if not filter_tags:\n tags_query = db_select(\n [\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n AssetEventTagsTable.c.event_id,\n ]\n ).where(AssetEventTagsTable.c.asset_key == asset_key.to_string())\n if asset_details and asset_details.last_wipe_timestamp:\n tags_query = tags_query.where(\n AssetEventTagsTable.c.event_timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n elif self.supports_intersect:\n\n def get_tag_filter_query(tag_key, tag_value):\n filter_query = db_select([AssetEventTagsTable.c.event_id]).where(\n db.and_(\n AssetEventTagsTable.c.asset_key == asset_key.to_string(),\n AssetEventTagsTable.c.key == tag_key,\n AssetEventTagsTable.c.value == tag_value,\n )\n )\n if asset_details and asset_details.last_wipe_timestamp:\n filter_query = filter_query.where(\n AssetEventTagsTable.c.event_timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n return filter_query\n\n intersections = [\n get_tag_filter_query(tag_key, tag_value)\n for tag_key, tag_value in filter_tags.items()\n ]\n\n tags_query = db_select(\n [\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n AssetEventTagsTable.c.event_id,\n ]\n ).where(\n db.and_(\n AssetEventTagsTable.c.event_id.in_(db.intersect(*intersections)),\n )\n )\n else:\n table = self._apply_tags_table_joins(AssetEventTagsTable, filter_tags, asset_key)\n tags_query = db_select(\n [\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n AssetEventTagsTable.c.event_id,\n ]\n ).select_from(table)\n\n if asset_details and asset_details.last_wipe_timestamp:\n tags_query = tags_query.where(\n AssetEventTagsTable.c.event_timestamp\n > datetime.utcfromtimestamp(asset_details.last_wipe_timestamp)\n )\n\n if filter_event_id is not None:\n tags_query = tags_query.where(AssetEventTagsTable.c.event_id == filter_event_id)\n\n with self.index_connection() as conn:\n results = conn.execute(tags_query).fetchall()\n\n tags_by_event_id: Dict[int, Dict[str, str]] = defaultdict(dict)\n for row in results:\n key, value, event_id = row\n tags_by_event_id[event_id][key] = value\n\n return list(tags_by_event_id.values())\n\n def _asset_materialization_from_json_column(\n self, json_str: str\n ) -> Optional[AssetMaterialization]:\n if not json_str:\n return None\n\n # We switched to storing the entire event record of the last materialization instead of just\n # the AssetMaterialization object, so that we have access to metadata like timestamp,\n # pipeline, run_id, etc.\n #\n # This should make certain asset queries way more performant, without having to do extra\n # queries against the event log.\n #\n # This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`\n # to `last_materialization_event`, for clarity. For now, we should do some back-compat.\n #\n # https://github.com/dagster-io/dagster/issues/3945\n\n event_or_materialization = deserialize_value(json_str, NamedTuple)\n if isinstance(event_or_materialization, AssetMaterialization):\n return event_or_materialization\n\n if (\n not isinstance(event_or_materialization, EventLogEntry)\n or not event_or_materialization.is_dagster_event\n or not event_or_materialization.dagster_event.asset_key # type: ignore\n ):\n return None\n\n return event_or_materialization.dagster_event.step_materialization_data.materialization # type: ignore\n\n def _get_asset_key_values_on_wipe(self) -> Mapping[str, Any]:\n wipe_timestamp = pendulum.now("UTC").timestamp()\n values = {\n "asset_details": serialize_value(AssetDetails(last_wipe_timestamp=wipe_timestamp)),\n "last_run_id": None,\n }\n if self.has_asset_key_index_cols():\n values.update(\n dict(\n wipe_timestamp=utc_datetime_from_timestamp(wipe_timestamp),\n )\n )\n if self.can_cache_asset_status_data():\n values.update(dict(cached_status_data=None))\n return values\n\n def wipe_asset(self, asset_key: AssetKey) -> None:\n check.inst_param(asset_key, "asset_key", AssetKey)\n wiped_values = self._get_asset_key_values_on_wipe()\n\n with self.index_connection() as conn:\n conn.execute(\n AssetKeyTable.update()\n .values(**wiped_values)\n .where(\n AssetKeyTable.c.asset_key == asset_key.to_string(),\n )\n )\n\n def get_materialized_partitions(\n self,\n asset_key: AssetKey,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Set[str]:\n query = (\n db_select(\n [\n SqlEventLogStorageTable.c.partition,\n db.func.max(SqlEventLogStorageTable.c.id),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),\n SqlEventLogStorageTable.c.partition != None, # noqa: E711\n SqlEventLogStorageTable.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value,\n )\n )\n .group_by(SqlEventLogStorageTable.c.partition)\n )\n\n assets_details = self._get_assets_details([asset_key])\n query = self._add_assets_wipe_filter_to_query(query, assets_details, [asset_key])\n\n if after_cursor:\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor)\n if before_cursor:\n query = query.where(SqlEventLogStorageTable.c.id < before_cursor)\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n return set([cast(str, row[0]) for row in results])\n\n def get_materialization_count_by_partition(\n self,\n asset_keys: Sequence[AssetKey],\n after_cursor: Optional[int] = None,\n before_cursor: Optional[int] = None,\n ) -> Mapping[AssetKey, Mapping[str, int]]:\n check.sequence_param(asset_keys, "asset_keys", AssetKey)\n\n query = (\n db_select(\n [\n SqlEventLogStorageTable.c.asset_key,\n SqlEventLogStorageTable.c.partition,\n db.func.count(SqlEventLogStorageTable.c.id),\n ]\n )\n .where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key.in_(\n [asset_key.to_string() for asset_key in asset_keys]\n ),\n SqlEventLogStorageTable.c.partition != None, # noqa: E711\n SqlEventLogStorageTable.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value,\n )\n )\n .group_by(SqlEventLogStorageTable.c.asset_key, SqlEventLogStorageTable.c.partition)\n )\n\n assets_details = self._get_assets_details(asset_keys)\n query = self._add_assets_wipe_filter_to_query(query, assets_details, asset_keys)\n\n if after_cursor:\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor)\n\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n materialization_count_by_partition: Dict[AssetKey, Dict[str, int]] = {\n asset_key: {} for asset_key in asset_keys\n }\n for row in results:\n asset_key = AssetKey.from_db_string(cast(Optional[str], row[0]))\n if asset_key:\n materialization_count_by_partition[asset_key][cast(str, row[1])] = cast(int, row[2])\n\n return materialization_count_by_partition\n\n def _latest_event_ids_by_partition_subquery(\n self,\n asset_key: AssetKey,\n event_types: Sequence[DagsterEventType],\n asset_partitions: Optional[Sequence[str]] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ):\n """Subquery for locating the latest event ids by partition for a given asset key and set\n of event types.\n """\n query = db_select(\n [\n SqlEventLogStorageTable.c.dagster_event_type,\n SqlEventLogStorageTable.c.partition,\n db.func.max(SqlEventLogStorageTable.c.id).label("id"),\n ]\n ).where(\n db.and_(\n SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),\n SqlEventLogStorageTable.c.partition != None, # noqa: E711\n SqlEventLogStorageTable.c.dagster_event_type.in_(\n [event_type.value for event_type in event_types]\n ),\n )\n )\n if asset_partitions is not None:\n query = query.where(SqlEventLogStorageTable.c.partition.in_(asset_partitions))\n if before_cursor is not None:\n query = query.where(SqlEventLogStorageTable.c.id < before_cursor)\n if after_cursor is not None:\n query = query.where(SqlEventLogStorageTable.c.id > after_cursor)\n\n latest_event_ids_subquery = query.group_by(\n SqlEventLogStorageTable.c.dagster_event_type, SqlEventLogStorageTable.c.partition\n )\n\n assets_details = self._get_assets_details([asset_key])\n return db_subquery(\n self._add_assets_wipe_filter_to_query(\n latest_event_ids_subquery, assets_details, [asset_key]\n ),\n "latest_event_ids_by_partition_subquery",\n )\n\n def get_latest_storage_id_by_partition(\n self, asset_key: AssetKey, event_type: DagsterEventType\n ) -> Mapping[str, int]:\n """Fetch the latest materialzation storage id for each partition for a given asset key.\n\n Returns a mapping of partition to storage id.\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n latest_event_ids_by_partition_subquery = self._latest_event_ids_by_partition_subquery(\n asset_key, [event_type]\n )\n latest_event_ids_by_partition = db_select(\n [\n latest_event_ids_by_partition_subquery.c.partition,\n latest_event_ids_by_partition_subquery.c.id,\n ]\n )\n\n with self.index_connection() as conn:\n rows = conn.execute(latest_event_ids_by_partition).fetchall()\n\n latest_materialization_storage_id_by_partition: Dict[str, int] = {}\n for row in rows:\n latest_materialization_storage_id_by_partition[cast(str, row[0])] = cast(int, row[1])\n return latest_materialization_storage_id_by_partition\n\n def get_latest_tags_by_partition(\n self,\n asset_key: AssetKey,\n event_type: DagsterEventType,\n tag_keys: Sequence[str],\n asset_partitions: Optional[Sequence[str]] = None,\n before_cursor: Optional[int] = None,\n after_cursor: Optional[int] = None,\n ) -> Mapping[str, Mapping[str, str]]:\n check.inst_param(asset_key, "asset_key", AssetKey)\n check.inst_param(event_type, "event_type", DagsterEventType)\n check.sequence_param(tag_keys, "tag_keys", of_type=str)\n check.opt_nullable_sequence_param(asset_partitions, "asset_partitions", of_type=str)\n check.opt_int_param(before_cursor, "before_cursor")\n check.opt_int_param(after_cursor, "after_cursor")\n\n latest_event_ids_subquery = self._latest_event_ids_by_partition_subquery(\n asset_key=asset_key,\n event_types=[event_type],\n asset_partitions=asset_partitions,\n before_cursor=before_cursor,\n after_cursor=after_cursor,\n )\n\n latest_tags_by_partition_query = (\n db_select(\n [\n latest_event_ids_subquery.c.partition,\n AssetEventTagsTable.c.key,\n AssetEventTagsTable.c.value,\n ]\n )\n .select_from(\n latest_event_ids_subquery.join(\n AssetEventTagsTable,\n AssetEventTagsTable.c.event_id == latest_event_ids_subquery.c.id,\n )\n )\n .where(AssetEventTagsTable.c.key.in_(tag_keys))\n )\n\n latest_tags_by_partition: Dict[str, Dict[str, str]] = defaultdict(dict)\n with self.index_connection() as conn:\n rows = conn.execute(latest_tags_by_partition_query).fetchall()\n\n for row in rows:\n latest_tags_by_partition[cast(str, row[0])][cast(str, row[1])] = cast(str, row[2])\n\n # convert defaultdict to dict\n return dict(latest_tags_by_partition)\n\n def get_latest_asset_partition_materialization_attempts_without_materializations(\n self, asset_key: AssetKey\n ) -> Mapping[str, Tuple[str, int]]:\n """Fetch the latest materialzation and materialization planned events for each partition of the given asset.\n Return the partitions that have a materialization planned event but no matching (same run) materialization event.\n These materializations could be in progress, or they could have failed. A separate query checking the run status\n is required to know.\n\n Returns a mapping of partition to [run id, event id].\n """\n check.inst_param(asset_key, "asset_key", AssetKey)\n\n latest_event_ids_subquery = self._latest_event_ids_by_partition_subquery(\n asset_key,\n [\n DagsterEventType.ASSET_MATERIALIZATION,\n DagsterEventType.ASSET_MATERIALIZATION_PLANNED,\n ],\n )\n\n latest_events_subquery = db_subquery(\n db_select(\n [\n SqlEventLogStorageTable.c.dagster_event_type,\n SqlEventLogStorageTable.c.partition,\n SqlEventLogStorageTable.c.run_id,\n SqlEventLogStorageTable.c.id,\n ]\n ).select_from(\n latest_event_ids_subquery.join(\n SqlEventLogStorageTable,\n SqlEventLogStorageTable.c.id == latest_event_ids_subquery.c.id,\n ),\n ),\n "latest_events_subquery",\n )\n\n materialization_planned_events = db_select(\n [\n latest_events_subquery.c.dagster_event_type,\n latest_events_subquery.c.partition,\n latest_events_subquery.c.run_id,\n latest_events_subquery.c.id,\n ]\n ).where(\n latest_events_subquery.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION_PLANNED.value\n )\n\n materialization_events = db_select(\n [\n latest_events_subquery.c.dagster_event_type,\n latest_events_subquery.c.partition,\n latest_events_subquery.c.run_id,\n ]\n ).where(\n latest_events_subquery.c.dagster_event_type\n == DagsterEventType.ASSET_MATERIALIZATION.value\n )\n\n with self.index_connection() as conn:\n materialization_planned_rows = db_fetch_mappings(conn, materialization_planned_events)\n materialization_rows = db_fetch_mappings(conn, materialization_events)\n\n materialization_planned_rows_by_partition = {\n cast(str, row["partition"]): (cast(str, row["run_id"]), cast(int, row["id"]))\n for row in materialization_planned_rows\n }\n for row in materialization_rows:\n if (\n row["partition"] in materialization_planned_rows_by_partition\n and materialization_planned_rows_by_partition[cast(str, row["partition"])][0]\n == row["run_id"]\n ):\n materialization_planned_rows_by_partition.pop(cast(str, row["partition"]))\n\n return materialization_planned_rows_by_partition\n\n def _check_partitions_table(self) -> None:\n # Guards against cases where the user is not running the latest migration for\n # partitions storage. Should be updated when the partitions storage schema changes.\n if not self.has_table("dynamic_partitions"):\n raise DagsterInvalidInvocationError(\n "Using dynamic partitions definitions requires the dynamic partitions table, which"\n " currently does not exist. Add this table by running `dagster"\n " instance migrate`."\n )\n\n def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n """Get the list of partition keys for a partition definition."""\n self._check_partitions_table()\n columns = [\n DynamicPartitionsTable.c.partitions_def_name,\n DynamicPartitionsTable.c.partition,\n ]\n query = (\n db_select(columns)\n .where(DynamicPartitionsTable.c.partitions_def_name == partitions_def_name)\n .order_by(DynamicPartitionsTable.c.id)\n )\n with self.index_connection() as conn:\n rows = conn.execute(query).fetchall()\n\n return [cast(str, row[1]) for row in rows]\n\n def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n self._check_partitions_table()\n query = (\n db_select([DynamicPartitionsTable.c.partition])\n .where(\n db.and_(\n DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,\n DynamicPartitionsTable.c.partition == partition_key,\n )\n )\n .limit(1)\n )\n with self.index_connection() as conn:\n results = conn.execute(query).fetchall()\n\n return len(results) > 0\n\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n self._check_partitions_table()\n with self.index_connection() as conn:\n existing_rows = conn.execute(\n db_select([DynamicPartitionsTable.c.partition]).where(\n db.and_(\n DynamicPartitionsTable.c.partition.in_(partition_keys),\n DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,\n )\n )\n ).fetchall()\n existing_keys = set([row[0] for row in existing_rows])\n new_keys = [\n partition_key\n for partition_key in partition_keys\n if partition_key not in existing_keys\n ]\n\n if new_keys:\n conn.execute(\n DynamicPartitionsTable.insert(),\n [\n dict(partitions_def_name=partitions_def_name, partition=partition_key)\n for partition_key in new_keys\n ],\n )\n\n def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:\n self._check_partitions_table()\n with self.index_connection() as conn:\n conn.execute(\n DynamicPartitionsTable.delete().where(\n db.and_(\n DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,\n DynamicPartitionsTable.c.partition == partition_key,\n )\n )\n )\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n return self.has_table(ConcurrencySlotsTable.name)\n\n def set_concurrency_slots(self, concurrency_key: str, num: int) -> None:\n """Allocate a set of concurrency slots.\n\n Args:\n concurrency_key (str): The key to allocate the slots for.\n num (int): The number of slots to allocate.\n """\n if num > MAX_CONCURRENCY_SLOTS:\n raise DagsterInvalidInvocationError(\n f"Cannot have more than {MAX_CONCURRENCY_SLOTS} slots per concurrency key."\n )\n if num < 0:\n raise DagsterInvalidInvocationError("Cannot have a negative number of slots.")\n\n keys_to_assign = None\n with self.index_connection() as conn:\n count_row = conn.execute(\n db_select([db.func.count()])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n ).fetchone()\n existing = cast(int, count_row[0]) if count_row else 0\n\n if existing > num:\n # need to delete some slots, favoring ones where the slot is unallocated\n rows = conn.execute(\n db_select([ConcurrencySlotsTable.c.id])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n .order_by(\n db_case([(ConcurrencySlotsTable.c.run_id.is_(None), 1)], else_=0).desc(),\n ConcurrencySlotsTable.c.id.desc(),\n )\n .limit(existing - num)\n ).fetchall()\n\n if rows:\n # mark rows as deleted\n conn.execute(\n ConcurrencySlotsTable.update()\n .values(deleted=True)\n .where(ConcurrencySlotsTable.c.id.in_([row[0] for row in rows]))\n )\n\n # actually delete rows that are marked as deleted and are not claimed... the rest\n # will be deleted when the slots are released by the free_concurrency_slots\n conn.execute(\n ConcurrencySlotsTable.delete().where(\n db.and_(\n ConcurrencySlotsTable.c.deleted == True, # noqa: E712\n ConcurrencySlotsTable.c.run_id == None, # noqa: E711\n )\n )\n )\n elif num > existing:\n # need to add some slots\n rows = [\n {\n "concurrency_key": concurrency_key,\n "run_id": None,\n "step_key": None,\n "deleted": False,\n }\n for _ in range(existing, num)\n ]\n conn.execute(ConcurrencySlotsTable.insert().values(rows))\n keys_to_assign = [concurrency_key for _ in range(existing, num)]\n\n if keys_to_assign:\n # we've added some slots... if there are any pending steps, we can assign them now or\n # they will be unutilized until free_concurrency_slots is called\n self.assign_pending_steps(keys_to_assign)\n\n def has_unassigned_slots(self, concurrency_key: str) -> bool:\n with self.index_connection() as conn:\n pending_row = conn.execute(\n db_select([db.func.count()])\n .select_from(PendingStepsTable)\n .where(\n db.and_(\n PendingStepsTable.c.concurrency_key == concurrency_key,\n PendingStepsTable.c.assigned_timestamp != None, # noqa: E711\n )\n )\n ).fetchone()\n slots = conn.execute(\n db_select([db.func.count()])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n ).fetchone()\n pending_count = cast(int, pending_row[0]) if pending_row else 0\n slots_count = cast(int, slots[0]) if slots else 0\n return slots_count > pending_count\n\n def check_concurrency_claim(\n self, concurrency_key: str, run_id: str, step_key: str\n ) -> ConcurrencyClaimStatus:\n with self.index_connection() as conn:\n pending_row = conn.execute(\n db_select(\n [\n PendingStepsTable.c.assigned_timestamp,\n PendingStepsTable.c.priority,\n PendingStepsTable.c.create_timestamp,\n ]\n ).where(\n db.and_(\n PendingStepsTable.c.run_id == run_id,\n PendingStepsTable.c.step_key == step_key,\n PendingStepsTable.c.concurrency_key == concurrency_key,\n )\n )\n ).fetchone()\n\n if not pending_row:\n # no pending step pending_row exists, the slot is blocked and the enqueued timestamp is None\n return ConcurrencyClaimStatus(\n concurrency_key=concurrency_key,\n slot_status=ConcurrencySlotStatus.BLOCKED,\n priority=None,\n assigned_timestamp=None,\n enqueued_timestamp=None,\n )\n\n priority = cast(int, pending_row[1]) if pending_row[1] else None\n assigned_timestamp = cast(datetime, pending_row[0]) if pending_row[0] else None\n create_timestamp = cast(datetime, pending_row[2]) if pending_row[2] else None\n if assigned_timestamp is None:\n return ConcurrencyClaimStatus(\n concurrency_key=concurrency_key,\n slot_status=ConcurrencySlotStatus.BLOCKED,\n priority=priority,\n assigned_timestamp=None,\n enqueued_timestamp=create_timestamp,\n )\n\n # pending step is assigned, check to see if it's been claimed\n slot_row = conn.execute(\n db_select([db.func.count()]).where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.run_id == run_id,\n ConcurrencySlotsTable.c.step_key == step_key,\n )\n )\n ).fetchone()\n\n return ConcurrencyClaimStatus(\n concurrency_key=concurrency_key,\n slot_status=(\n ConcurrencySlotStatus.CLAIMED\n if slot_row and slot_row[0]\n else ConcurrencySlotStatus.BLOCKED\n ),\n priority=priority,\n assigned_timestamp=assigned_timestamp,\n enqueued_timestamp=create_timestamp,\n )\n\n def can_claim_from_pending(self, concurrency_key: str, run_id: str, step_key: str):\n with self.index_connection() as conn:\n row = conn.execute(\n db_select([PendingStepsTable.c.assigned_timestamp]).where(\n db.and_(\n PendingStepsTable.c.run_id == run_id,\n PendingStepsTable.c.step_key == step_key,\n PendingStepsTable.c.concurrency_key == concurrency_key,\n )\n )\n ).fetchone()\n return row and row[0] is not None\n\n def has_pending_step(self, concurrency_key: str, run_id: str, step_key: str):\n with self.index_connection() as conn:\n row = conn.execute(\n db_select([db.func.count()])\n .select_from(PendingStepsTable)\n .where(\n db.and_(\n PendingStepsTable.c.concurrency_key == concurrency_key,\n PendingStepsTable.c.run_id == run_id,\n PendingStepsTable.c.step_key == step_key,\n )\n )\n ).fetchone()\n return row and cast(int, row[0]) > 0\n\n def assign_pending_steps(self, concurrency_keys: Sequence[str]):\n if not concurrency_keys:\n return\n\n with self.index_connection() as conn:\n for key in concurrency_keys:\n row = conn.execute(\n db_select([PendingStepsTable.c.id])\n .where(\n db.and_(\n PendingStepsTable.c.concurrency_key == key,\n PendingStepsTable.c.assigned_timestamp == None, # noqa: E711\n )\n )\n .order_by(\n PendingStepsTable.c.priority.desc(),\n PendingStepsTable.c.create_timestamp.asc(),\n )\n .limit(1)\n ).fetchone()\n if row:\n conn.execute(\n PendingStepsTable.update()\n .where(PendingStepsTable.c.id == row[0])\n .values(assigned_timestamp=db.func.now())\n )\n\n def add_pending_step(\n self,\n concurrency_key: str,\n run_id: str,\n step_key: str,\n priority: Optional[int] = None,\n should_assign: bool = False,\n ):\n with self.index_connection() as conn:\n try:\n conn.execute(\n PendingStepsTable.insert().values(\n [\n dict(\n run_id=run_id,\n step_key=step_key,\n concurrency_key=concurrency_key,\n priority=priority or 0,\n assigned_timestamp=db.func.now() if should_assign else None,\n )\n ]\n )\n )\n except db_exc.IntegrityError:\n # do nothing\n pass\n\n def _remove_pending_steps(self, run_id: str, step_key: Optional[str] = None):\n query = PendingStepsTable.delete().where(PendingStepsTable.c.run_id == run_id)\n if step_key:\n query = query.where(PendingStepsTable.c.step_key == step_key)\n with self.index_connection() as conn:\n conn.execute(query)\n\n def claim_concurrency_slot(\n self, concurrency_key: str, run_id: str, step_key: str, priority: Optional[int] = None\n ) -> ConcurrencyClaimStatus:\n """Claim concurrency slot for step.\n\n Args:\n concurrency_keys (str): The concurrency key to claim.\n run_id (str): The run id to claim for.\n step_key (str): The step key to claim for.\n """\n # first, register the step by adding to pending queue\n if not self.has_pending_step(\n concurrency_key=concurrency_key, run_id=run_id, step_key=step_key\n ):\n has_unassigned_slots = self.has_unassigned_slots(concurrency_key)\n self.add_pending_step(\n concurrency_key=concurrency_key,\n run_id=run_id,\n step_key=step_key,\n priority=priority,\n should_assign=has_unassigned_slots,\n )\n\n # if the step is not assigned (i.e. has not been popped from queue), block the claim\n claim_status = self.check_concurrency_claim(\n concurrency_key=concurrency_key, run_id=run_id, step_key=step_key\n )\n if claim_status.is_claimed or not claim_status.is_assigned:\n return claim_status\n\n # attempt to claim a concurrency slot... this should generally work because we only assign\n # based on the number of unclaimed slots, but this should act as a safeguard, using the slot\n # rows as a semaphore\n slot_status = self._claim_concurrency_slot(\n concurrency_key=concurrency_key, run_id=run_id, step_key=step_key\n )\n return claim_status.with_slot_status(slot_status)\n\n def _claim_concurrency_slot(\n self, concurrency_key: str, run_id: str, step_key: str\n ) -> ConcurrencySlotStatus:\n """Claim a concurrency slot for the step. Helper method that is called for steps that are\n popped off the priority queue.\n\n Args:\n concurrency_key (str): The concurrency key to claim.\n run_id (str): The run id to claim a slot for.\n step_key (str): The step key to claim a slot for.\n """\n with self.index_connection() as conn:\n result = conn.execute(\n db_select([ConcurrencySlotsTable.c.id])\n .select_from(ConcurrencySlotsTable)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.concurrency_key == concurrency_key,\n ConcurrencySlotsTable.c.step_key == None, # noqa: E711\n ConcurrencySlotsTable.c.deleted == False, # noqa: E712\n )\n )\n .with_for_update(skip_locked=True)\n .limit(1)\n ).fetchone()\n if not result or not result[0]:\n return ConcurrencySlotStatus.BLOCKED\n if not conn.execute(\n ConcurrencySlotsTable.update()\n .values(run_id=run_id, step_key=step_key)\n .where(ConcurrencySlotsTable.c.id == result[0])\n ).rowcount:\n return ConcurrencySlotStatus.BLOCKED\n\n return ConcurrencySlotStatus.CLAIMED\n\n def get_concurrency_keys(self) -> Set[str]:\n """Get the set of concurrency limited keys."""\n with self.index_connection() as conn:\n rows = conn.execute(\n db_select([ConcurrencySlotsTable.c.concurrency_key])\n .select_from(ConcurrencySlotsTable)\n .where(ConcurrencySlotsTable.c.deleted == False) # noqa: E712\n .distinct()\n ).fetchall()\n return {cast(str, row[0]) for row in rows}\n\n def get_concurrency_info(self, concurrency_key: str) -> ConcurrencyKeyInfo:\n """Get the list of concurrency slots for a given concurrency key.\n\n Args:\n concurrency_key (str): The concurrency key to get the slots for.\n\n Returns:\n List[Tuple[str, int]]: A list of tuples of run_id and the number of slots it is\n occupying for the given concurrency key.\n """\n with self.index_connection() as conn:\n slot_query = (\n db_select(\n [\n ConcurrencySlotsTable.c.run_id,\n ConcurrencySlotsTable.c.deleted,\n db.func.count().label("count"),\n ]\n )\n .select_from(ConcurrencySlotsTable)\n .where(ConcurrencySlotsTable.c.concurrency_key == concurrency_key)\n .group_by(ConcurrencySlotsTable.c.run_id, ConcurrencySlotsTable.c.deleted)\n )\n slot_rows = db_fetch_mappings(conn, slot_query)\n pending_query = (\n db_select(\n [\n PendingStepsTable.c.run_id,\n db_case(\n [(PendingStepsTable.c.assigned_timestamp.is_(None), False)],\n else_=True,\n ).label("is_assigned"),\n db.func.count().label("count"),\n ]\n )\n .select_from(PendingStepsTable)\n .where(PendingStepsTable.c.concurrency_key == concurrency_key)\n .group_by(PendingStepsTable.c.run_id, "is_assigned")\n )\n pending_rows = db_fetch_mappings(conn, pending_query)\n\n return ConcurrencyKeyInfo(\n concurrency_key=concurrency_key,\n slot_count=sum(\n [\n cast(int, slot_row["count"])\n for slot_row in slot_rows\n if not slot_row["deleted"]\n ]\n ),\n active_slot_count=sum(\n [cast(int, slot_row["count"]) for slot_row in slot_rows if slot_row["run_id"]]\n ),\n active_run_ids={\n cast(str, slot_row["run_id"]) for slot_row in slot_rows if slot_row["run_id"]\n },\n pending_step_count=sum(\n [cast(int, row["count"]) for row in pending_rows if not row["is_assigned"]]\n ),\n pending_run_ids={\n cast(str, row["run_id"]) for row in pending_rows if not row["is_assigned"]\n },\n assigned_step_count=sum(\n [cast(int, row["count"]) for row in pending_rows if row["is_assigned"]]\n ),\n assigned_run_ids={\n cast(str, row["run_id"]) for row in pending_rows if row["is_assigned"]\n },\n )\n\n def get_concurrency_run_ids(self) -> Set[str]:\n with self.index_connection() as conn:\n rows = conn.execute(db_select([PendingStepsTable.c.run_id]).distinct()).fetchall()\n return set([cast(str, row[0]) for row in rows])\n\n def free_concurrency_slots_for_run(self, run_id: str) -> None:\n freed_concurrency_keys = self._free_concurrency_slots(run_id=run_id)\n self._remove_pending_steps(run_id=run_id)\n if freed_concurrency_keys:\n # assign any pending steps that can now claim a slot\n self.assign_pending_steps(freed_concurrency_keys)\n\n def free_concurrency_slot_for_step(self, run_id: str, step_key: str) -> None:\n freed_concurrency_keys = self._free_concurrency_slots(run_id=run_id, step_key=step_key)\n self._remove_pending_steps(run_id=run_id, step_key=step_key)\n if freed_concurrency_keys:\n # assign any pending steps that can now claim a slot\n self.assign_pending_steps(freed_concurrency_keys)\n\n def _free_concurrency_slots(self, run_id: str, step_key: Optional[str] = None) -> Sequence[str]:\n """Frees concurrency slots for a given run/step.\n\n Args:\n run_id (str): The run id to free the slots for.\n step_key (Optional[str]): The step key to free the slots for. If not provided, all the\n slots for all the steps of the run will be freed.\n """\n with self.index_connection() as conn:\n # first delete any rows that apply and are marked as deleted. This happens when the\n # configured number of slots has been reduced, and some of the pruned slots included\n # ones that were already allocated to the run/step\n delete_query = ConcurrencySlotsTable.delete().where(\n db.and_(\n ConcurrencySlotsTable.c.run_id == run_id,\n ConcurrencySlotsTable.c.deleted == True, # noqa: E712\n )\n )\n if step_key:\n delete_query = delete_query.where(ConcurrencySlotsTable.c.step_key == step_key)\n conn.execute(delete_query)\n\n # next, fetch the slots to free up, while grabbing the concurrency keys so that we can\n # allocate any pending steps from the queue for the freed slots, if necessary\n select_query = (\n db_select([ConcurrencySlotsTable.c.id, ConcurrencySlotsTable.c.concurrency_key])\n .select_from(ConcurrencySlotsTable)\n .where(ConcurrencySlotsTable.c.run_id == run_id)\n .with_for_update(skip_locked=True)\n )\n if step_key:\n select_query = select_query.where(ConcurrencySlotsTable.c.step_key == step_key)\n rows = conn.execute(select_query).fetchall()\n if not rows:\n return []\n\n # now, actually free the slots\n conn.execute(\n ConcurrencySlotsTable.update()\n .values(run_id=None, step_key=None)\n .where(\n db.and_(\n ConcurrencySlotsTable.c.id.in_([row[0] for row in rows]),\n )\n )\n )\n\n # return the concurrency keys for the freed slots\n return [cast(str, row[1]) for row in rows]\n\n def store_asset_check_event(self, event: EventLogEntry, event_id: Optional[int]) -> None:\n check.inst_param(event, "event", EventLogEntry)\n check.opt_int_param(event_id, "event_id")\n\n check.invariant(\n self.supports_asset_checks,\n "Asset checks require a database schema migration. Run `dagster instance migrate`.",\n )\n\n if event.dagster_event_type == DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED:\n self._store_asset_check_evaluation_planned(event, event_id)\n if event.dagster_event_type == DagsterEventType.ASSET_CHECK_EVALUATION:\n if event.run_id == "" or event.run_id is None:\n self._store_runless_asset_check_evaluation(event, event_id)\n else:\n self._update_asset_check_evaluation(event, event_id)\n\n def _store_asset_check_evaluation_planned(\n self, event: EventLogEntry, event_id: Optional[int]\n ) -> None:\n planned = cast(\n AssetCheckEvaluationPlanned, check.not_none(event.dagster_event).event_specific_data\n )\n with self.index_connection() as conn:\n conn.execute(\n AssetCheckExecutionsTable.insert().values(\n asset_key=planned.asset_key.to_string(),\n check_name=planned.check_name,\n run_id=event.run_id,\n execution_status=AssetCheckExecutionRecordStatus.PLANNED.value,\n evaluation_event=serialize_value(event),\n evaluation_event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n )\n )\n\n def _store_runless_asset_check_evaluation(\n self, event: EventLogEntry, event_id: Optional[int]\n ) -> None:\n evaluation = cast(\n AssetCheckEvaluation, check.not_none(event.dagster_event).event_specific_data\n )\n with self.index_connection() as conn:\n conn.execute(\n AssetCheckExecutionsTable.insert().values(\n asset_key=evaluation.asset_key.to_string(),\n check_name=evaluation.check_name,\n run_id=event.run_id,\n execution_status=(\n AssetCheckExecutionRecordStatus.SUCCEEDED.value\n if evaluation.passed\n else AssetCheckExecutionRecordStatus.FAILED.value\n ),\n evaluation_event=serialize_value(event),\n evaluation_event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n evaluation_event_storage_id=event_id,\n materialization_event_storage_id=(\n evaluation.target_materialization_data.storage_id\n if evaluation.target_materialization_data\n else None\n ),\n )\n )\n\n def _update_asset_check_evaluation(self, event: EventLogEntry, event_id: Optional[int]) -> None:\n evaluation = cast(\n AssetCheckEvaluation, check.not_none(event.dagster_event).event_specific_data\n )\n with self.index_connection() as conn:\n rows_updated = conn.execute(\n AssetCheckExecutionsTable.update()\n .where(\n # (asset_key, check_name, run_id) uniquely identifies the row created for the planned event\n db.and_(\n AssetCheckExecutionsTable.c.asset_key == evaluation.asset_key.to_string(),\n AssetCheckExecutionsTable.c.check_name == evaluation.check_name,\n AssetCheckExecutionsTable.c.run_id == event.run_id,\n )\n )\n .values(\n execution_status=(\n AssetCheckExecutionRecordStatus.SUCCEEDED.value\n if evaluation.passed\n else AssetCheckExecutionRecordStatus.FAILED.value\n ),\n evaluation_event=serialize_value(event),\n evaluation_event_timestamp=datetime.utcfromtimestamp(event.timestamp),\n evaluation_event_storage_id=event_id,\n materialization_event_storage_id=(\n evaluation.target_materialization_data.storage_id\n if evaluation.target_materialization_data\n else None\n ),\n )\n ).rowcount\n if rows_updated != 1:\n raise DagsterInvariantViolationError(\n "Expected to update one row for asset check evaluation, but updated"\n f" {rows_updated}."\n )\n\n def get_asset_check_execution_history(\n self,\n check_key: AssetCheckKey,\n limit: int,\n cursor: Optional[int] = None,\n ) -> Sequence[AssetCheckExecutionRecord]:\n check.inst_param(check_key, "key", AssetCheckKey)\n check.int_param(limit, "limit")\n check.opt_int_param(cursor, "cursor")\n\n query = (\n db_select(\n [\n AssetCheckExecutionsTable.c.id,\n AssetCheckExecutionsTable.c.run_id,\n AssetCheckExecutionsTable.c.execution_status,\n AssetCheckExecutionsTable.c.evaluation_event,\n AssetCheckExecutionsTable.c.create_timestamp,\n ]\n )\n .where(\n db.and_(\n AssetCheckExecutionsTable.c.asset_key == check_key.asset_key.to_string(),\n AssetCheckExecutionsTable.c.check_name == check_key.name,\n )\n )\n .order_by(AssetCheckExecutionsTable.c.id.desc())\n ).limit(limit)\n\n if cursor:\n query = query.where(AssetCheckExecutionsTable.c.id < cursor)\n\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n return [AssetCheckExecutionRecord.from_db_row(row) for row in rows]\n\n def get_latest_asset_check_execution_by_key(\n self, check_keys: Sequence[AssetCheckKey]\n ) -> Mapping[AssetCheckKey, AssetCheckExecutionRecord]:\n if not check_keys:\n return {}\n\n latest_ids_subquery = db_subquery(\n db_select(\n [\n db.func.max(AssetCheckExecutionsTable.c.id).label("id"),\n ]\n )\n .where(\n db.and_(\n AssetCheckExecutionsTable.c.asset_key.in_(\n [key.asset_key.to_string() for key in check_keys]\n ),\n AssetCheckExecutionsTable.c.check_name.in_([key.name for key in check_keys]),\n )\n )\n .group_by(\n AssetCheckExecutionsTable.c.asset_key,\n AssetCheckExecutionsTable.c.check_name,\n )\n )\n\n query = db_select(\n [\n AssetCheckExecutionsTable.c.id,\n AssetCheckExecutionsTable.c.asset_key,\n AssetCheckExecutionsTable.c.check_name,\n AssetCheckExecutionsTable.c.run_id,\n AssetCheckExecutionsTable.c.execution_status,\n AssetCheckExecutionsTable.c.evaluation_event,\n AssetCheckExecutionsTable.c.create_timestamp,\n ]\n ).select_from(\n AssetCheckExecutionsTable.join(\n latest_ids_subquery,\n db.and_(\n AssetCheckExecutionsTable.c.id == latest_ids_subquery.c.id,\n ),\n )\n )\n\n with self.index_connection() as conn:\n rows = db_fetch_mappings(conn, query)\n\n return {\n AssetCheckKey(\n asset_key=check.not_none(AssetKey.from_db_string(cast(str, row["asset_key"]))),\n name=cast(str, row["check_name"]),\n ): AssetCheckExecutionRecord.from_db_row(row)\n for row in rows\n }\n\n @property\n def supports_asset_checks(self):\n return self.has_table(AssetCheckExecutionsTable.name)
\n\n\ndef _get_from_row(row: SqlAlchemyRow, column: str) -> object:\n """Utility function for extracting a column from a sqlalchemy row proxy, since '_asdict' is not\n supported in sqlalchemy 1.3.\n """\n if column not in row.keys():\n return None\n return row[column]\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/sql_event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.sql_event_log"}, "sqlite": {"consolidated_sqlite_event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.sqlite.consolidated_sqlite_event_log

\nimport logging\nimport os\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom typing import Any, Mapping, Optional\n\nimport sqlalchemy as db\nfrom sqlalchemy.pool import NullPool\nfrom typing_extensions import Self\nfrom watchdog.events import PatternMatchingEventHandler\nfrom watchdog.observers import Observer\n\nimport dagster._check as check\nfrom dagster._config import StringSource\nfrom dagster._core.storage.dagster_run import DagsterRunStatus\nfrom dagster._core.storage.event_log.base import EventLogCursor\nfrom dagster._core.storage.sql import (\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlite import create_db_conn_string\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import SqlEventLogStorageMetadata\nfrom ..sql_event_log import SqlDbConnection, SqlEventLogStorage\n\nSQLITE_EVENT_LOG_FILENAME = "event_log"\n\n\n
[docs]class ConsolidatedSqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """SQLite-backed consolidated event log storage intended for test cases only.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To explicitly specify the consolidated SQLite for event log storage, you can add a block such as\n the following to your ``dagster.yaml``:\n\n .. code-block:: YAML\n\n run_storage:\n module: dagster._core.storage.event_log\n class: ConsolidatedSqliteEventLogStorage\n config:\n base_dir: /path/to/dir\n\n The ``base_dir`` param tells the event log storage where on disk to store the database.\n """\n\n def __init__(self, base_dir, inst_data: Optional[ConfigurableClassData] = None):\n self._base_dir = check.str_param(base_dir, "base_dir")\n self._conn_string = create_db_conn_string(base_dir, SQLITE_EVENT_LOG_FILENAME)\n self._secondary_index_cache = {}\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self._watchers = defaultdict(dict)\n self._obs = None\n\n if not os.path.exists(self.get_db_path()):\n self._init_db()\n\n super().__init__()\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return ConsolidatedSqliteEventLogStorage(inst_data=inst_data, **config_value)\n\n def _init_db(self):\n mkdir_p(self._base_dir)\n engine = create_engine(self._conn_string, poolclass=NullPool)\n alembic_config = get_alembic_config(__file__)\n\n should_mark_indexes = False\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n if not (db_revision and head_revision):\n SqlEventLogStorageMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n should_mark_indexes = True\n\n if should_mark_indexes:\n # mark all secondary indexes\n self.reindex_events()\n self.reindex_assets()\n\n @contextmanager\n def _connect(self):\n engine = create_engine(self._conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n\n def run_connection(self, run_id: Optional[str]) -> SqlDbConnection:\n return self._connect()\n\n def index_connection(self):\n return self._connect()\n\n def has_table(self, table_name: str) -> bool:\n engine = create_engine(self._conn_string, poolclass=NullPool)\n return bool(engine.dialect.has_table(engine.connect(), table_name))\n\n def get_db_path(self):\n return os.path.join(self._base_dir, f"{SQLITE_EVENT_LOG_FILENAME}.db")\n\n def upgrade(self):\n alembic_config = get_alembic_config(__file__)\n with self._connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def has_secondary_index(self, name):\n if name not in self._secondary_index_cache:\n self._secondary_index_cache[name] = super(\n ConsolidatedSqliteEventLogStorage, self\n ).has_secondary_index(name)\n return self._secondary_index_cache[name]\n\n def enable_secondary_index(self, name):\n super(ConsolidatedSqliteEventLogStorage, self).enable_secondary_index(name)\n if name in self._secondary_index_cache:\n del self._secondary_index_cache[name]\n\n def watch(self, run_id, cursor, callback):\n if not self._obs:\n self._obs = Observer()\n self._obs.start()\n self._obs.schedule(\n ConsolidatedSqliteEventLogStorageWatchdog(self), self._base_dir, True\n )\n\n self._watchers[run_id][callback] = cursor\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n return False\n\n def on_modified(self):\n keys = [\n (run_id, callback)\n for run_id, callback_dict in self._watchers.items()\n for callback, _ in callback_dict.items()\n ]\n for run_id, callback in keys:\n cursor = self._watchers[run_id][callback]\n\n # fetch events\n connection = self.get_records_for_run(run_id, cursor)\n\n # update cursor\n if connection.cursor:\n self._watchers[run_id][callback] = connection.cursor\n\n for record in connection.records:\n status = None\n try:\n status = callback(\n record.event_log_entry,\n str(EventLogCursor.from_storage_id(record.storage_id)),\n )\n except Exception:\n logging.exception("Exception in callback for event watch on run %s.", run_id)\n\n if (\n status == DagsterRunStatus.SUCCESS\n or status == DagsterRunStatus.FAILURE\n or status == DagsterRunStatus.CANCELED\n ):\n self.end_watch(run_id, callback)\n\n def end_watch(self, run_id, handler):\n if run_id in self._watchers and handler in self._watchers[run_id]:\n del self._watchers[run_id][handler]\n\n def dispose(self):\n if self._obs:\n self._obs.stop()\n self._obs.join(timeout=15)
\n\n\nclass ConsolidatedSqliteEventLogStorageWatchdog(PatternMatchingEventHandler):\n def __init__(self, event_log_storage, **kwargs):\n self._event_log_storage = check.inst_param(\n event_log_storage, "event_log_storage", ConsolidatedSqliteEventLogStorage\n )\n self._log_path = event_log_storage.get_db_path()\n super(ConsolidatedSqliteEventLogStorageWatchdog, self).__init__(\n patterns=[self._log_path], **kwargs\n )\n\n def on_modified(self, event):\n check.invariant(event.src_path == self._log_path)\n self._event_log_storage.on_modified()\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/sqlite/consolidated_sqlite_event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.sqlite.consolidated_sqlite_event_log"}, "sqlite_event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.event_log.sqlite.sqlite_event_log

\nimport contextlib\nimport glob\nimport logging\nimport os\nimport re\nimport sqlite3\nimport threading\nimport time\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Any, ContextManager, Iterable, Iterator, Optional, Sequence\n\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection, Engine\nfrom sqlalchemy.pool import NullPool\nfrom tqdm import tqdm\nfrom watchdog.events import FileSystemEvent, PatternMatchingEventHandler\nfrom watchdog.observers import Observer\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._config import StringSource\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.event_api import EventHandlerFn\nfrom dagster._core.events import ASSET_CHECK_EVENTS, ASSET_EVENTS, EVENT_TYPE_TO_PIPELINE_RUN_STATUS\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.dagster_run import DagsterRunStatus, RunsFilter\nfrom dagster._core.storage.event_log.base import EventLogCursor, EventLogRecord, EventRecordsFilter\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlalchemy_compat import db_select\nfrom dagster._core.storage.sqlite import create_db_conn_string\nfrom dagster._serdes import (\n    ConfigurableClass,\n    ConfigurableClassData,\n)\nfrom dagster._serdes.errors import DeserializationError\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import SqlEventLogStorageMetadata, SqlEventLogStorageTable\nfrom ..sql_event_log import RunShardedEventsCursor, SqlEventLogStorage\n\nif TYPE_CHECKING:\n    from dagster._core.storage.sqlite_storage import SqliteStorageConfig\nINDEX_SHARD_NAME = "index"\n\n\n
[docs]class SqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """SQLite-backed event log storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file insqliteve\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n This is the default event log storage when none is specified in the ``dagster.yaml``.\n\n To explicitly specify SQLite for event log storage, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. code-block:: YAML\n\n event_log_storage:\n module: dagster._core.storage.event_log\n class: SqliteEventLogStorage\n config:\n base_dir: /path/to/dir\n\n The ``base_dir`` param tells the event log storage where on disk to store the databases. To\n improve concurrent performance, event logs are stored in a separate SQLite database for each\n run.\n """\n\n def __init__(self, base_dir: str, inst_data: Optional[ConfigurableClassData] = None):\n """Note that idempotent initialization of the SQLite database is done on a per-run_id\n basis in the body of connect, since each run is stored in a separate database.\n """\n self._base_dir = os.path.abspath(check.str_param(base_dir, "base_dir"))\n mkdir_p(self._base_dir)\n\n self._obs = None\n\n self._watchers = defaultdict(dict)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n # Used to ensure that each run ID attempts to initialize its DB the first time it connects,\n # ensuring that the database will be created if it doesn't exist\n self._initialized_dbs = set()\n\n # Ensure that multiple threads (like the event log watcher) interact safely with each other\n self._db_lock = threading.Lock()\n\n if not os.path.exists(self.path_for_shard(INDEX_SHARD_NAME)):\n conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)\n engine = create_engine(conn_string, poolclass=NullPool)\n self._initdb(engine)\n self.reindex_events()\n self.reindex_assets()\n\n super().__init__()\n\n def upgrade(self) -> None:\n all_run_ids = self.get_all_run_ids()\n print(f"Updating event log storage for {len(all_run_ids)} runs on disk...") # noqa: T201\n alembic_config = get_alembic_config(__file__)\n if all_run_ids:\n for run_id in tqdm(all_run_ids):\n with self.run_connection(run_id) as conn:\n run_alembic_upgrade(alembic_config, conn, run_id)\n\n print("Updating event log storage for index db on disk...") # noqa: T201\n with self.index_connection() as conn:\n run_alembic_upgrade(alembic_config, conn, "index")\n\n self._initialized_dbs = set()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: "SqliteStorageConfig"\n ) -> "SqliteEventLogStorage":\n return SqliteEventLogStorage(inst_data=inst_data, **config_value)\n\n def get_all_run_ids(self) -> Sequence[str]:\n all_filenames = glob.glob(os.path.join(self._base_dir, "*.db"))\n return [\n os.path.splitext(os.path.basename(filename))[0]\n for filename in all_filenames\n if os.path.splitext(os.path.basename(filename))[0] != INDEX_SHARD_NAME\n ]\n\n def has_table(self, table_name: str) -> bool:\n conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)\n engine = create_engine(conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n return bool(engine.dialect.has_table(conn, table_name))\n\n def path_for_shard(self, run_id: str) -> str:\n return os.path.join(self._base_dir, f"{run_id}.db")\n\n def conn_string_for_shard(self, shard_name: str) -> str:\n check.str_param(shard_name, "shard_name")\n return create_db_conn_string(self._base_dir, shard_name)\n\n def _initdb(self, engine: Engine) -> None:\n alembic_config = get_alembic_config(__file__)\n\n retry_limit = 10\n\n while True:\n try:\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n\n if not (db_revision and head_revision):\n SqlEventLogStorageMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n\n break\n except (db_exc.DatabaseError, sqlite3.DatabaseError, sqlite3.OperationalError) as exc:\n # This is SQLite-specific handling for concurrency issues that can arise when\n # multiple processes (e.g. the dagster-webserver process and user code process) contend with\n # each other to init the db. When we hit the following errors, we know that another\n # process is on the case and we should retry.\n err_msg = str(exc)\n\n if not (\n re.search(r"table [A-Za-z_]* already exists", err_msg)\n or "database is locked" in err_msg\n or "UNIQUE constraint failed: alembic_version.version_num" in err_msg\n ):\n raise\n\n if retry_limit == 0:\n raise\n else:\n logging.info(\n "SqliteEventLogStorage._initdb: Encountered apparent concurrent init, "\n "retrying (%s retries left). Exception: %s",\n retry_limit,\n err_msg,\n )\n time.sleep(0.2)\n retry_limit -= 1\n\n @contextmanager\n def _connect(self, shard: str) -> Iterator[Connection]:\n with self._db_lock:\n check.str_param(shard, "shard")\n\n conn_string = self.conn_string_for_shard(shard)\n engine = create_engine(conn_string, poolclass=NullPool)\n\n if shard not in self._initialized_dbs:\n self._initdb(engine)\n self._initialized_dbs.add(shard)\n\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n engine.dispose()\n\n def run_connection(self, run_id: Optional[str] = None) -> Any:\n return self._connect(run_id) # type: ignore # bad sig\n\n def index_connection(self) -> ContextManager[Connection]:\n return self._connect(INDEX_SHARD_NAME)\n\n def store_event(self, event: EventLogEntry) -> None:\n """Overridden method to replicate asset events in a central assets.db sqlite shard, enabling\n cross-run asset queries.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n check.inst_param(event, "event", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event)\n run_id = event.run_id\n\n with self.run_connection(run_id) as conn:\n conn.execute(insert_event_statement)\n\n if event.is_dagster_event and event.dagster_event.asset_key: # type: ignore\n check.invariant(\n event.dagster_event_type in ASSET_EVENTS,\n "Can only store asset materializations, materialization_planned, and"\n " observations in index database",\n )\n\n event_id = None\n\n # mirror the event in the cross-run index database\n with self.index_connection() as conn:\n result = conn.execute(insert_event_statement)\n event_id = result.inserted_primary_key[0]\n\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n "Cannot store asset event tags for null event id."\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, None)\n\n if event.is_dagster_event and event.dagster_event_type in EVENT_TYPE_TO_PIPELINE_RUN_STATUS:\n # should mirror run status change events in the index shard\n with self.index_connection() as conn:\n result = conn.execute(insert_event_statement)\n\n def get_event_records(\n self,\n event_records_filter: EventRecordsFilter,\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Iterable[EventLogRecord]:\n """Overridden method to enable cross-run event queries in sqlite.\n\n The record id in sqlite does not auto increment cross runs, so instead of fetching events\n after record id, we only fetch events whose runs updated after update_timestamp.\n """\n check.opt_inst_param(event_records_filter, "event_records_filter", EventRecordsFilter)\n check.opt_int_param(limit, "limit")\n check.bool_param(ascending, "ascending")\n\n is_asset_query = event_records_filter and event_records_filter.event_type in ASSET_EVENTS\n if is_asset_query:\n # asset materializations, observations and materialization planned events get mirrored\n # into the index shard, so no custom run shard-aware cursor logic needed\n return super(SqliteEventLogStorage, self).get_event_records(\n event_records_filter=event_records_filter, limit=limit, ascending=ascending\n )\n\n query = db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])\n if event_records_filter.asset_key:\n asset_details = next(iter(self._get_assets_details([event_records_filter.asset_key])))\n else:\n asset_details = None\n\n if event_records_filter.after_cursor is not None and not isinstance(\n event_records_filter.after_cursor, RunShardedEventsCursor\n ):\n raise Exception("""\n Called `get_event_records` on a run-sharded event log storage with a cursor that\n is not run-aware. Add a RunShardedEventsCursor to your query filter\n or switch your instance configuration to use a non-run-sharded event log storage\n (e.g. PostgresEventLogStorage, ConsolidatedSqliteEventLogStorage)\n """)\n\n query = self._apply_filter_to_query(\n query=query,\n event_records_filter=event_records_filter,\n asset_details=asset_details,\n apply_cursor_filters=False, # run-sharded cursor filters don't really make sense\n )\n if limit:\n query = query.limit(limit)\n if ascending:\n query = query.order_by(SqlEventLogStorageTable.c.timestamp.asc())\n else:\n query = query.order_by(SqlEventLogStorageTable.c.timestamp.desc())\n\n # workaround for the run-shard sqlite to enable cross-run queries: get a list of run_ids\n # whose events may qualify the query, and then open run_connection per run_id at a time.\n run_updated_after = (\n event_records_filter.after_cursor.run_updated_after\n if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)\n else None\n )\n run_records = self._instance.get_run_records(\n filters=RunsFilter(updated_after=run_updated_after),\n order_by="update_timestamp",\n ascending=ascending,\n )\n\n event_records = []\n for run_record in run_records:\n run_id = run_record.dagster_run.run_id\n with self.run_connection(run_id) as conn:\n results = conn.execute(query).fetchall()\n\n for row_id, json_str in results:\n try:\n event_record = deserialize_value(json_str, EventLogEntry)\n event_records.append(\n EventLogRecord(storage_id=row_id, event_log_entry=event_record)\n )\n if limit and len(event_records) >= limit:\n break\n except DeserializationError:\n logging.warning(\n "Could not resolve event record as EventLogEntry for id `%s`.", row_id\n )\n except seven.JSONDecodeError:\n logging.warning("Could not parse event record id `%s`.", row_id)\n\n if limit and len(event_records) >= limit:\n break\n\n return event_records[:limit]\n\n def supports_event_consumer_queries(self) -> bool:\n return False\n\n def delete_events(self, run_id: str) -> None:\n with self.run_connection(run_id) as conn:\n self.delete_events_for_run(conn, run_id)\n\n # delete the mirrored event in the cross-run index database\n with self.index_connection() as conn:\n self.delete_events_for_run(conn, run_id)\n\n def wipe(self) -> None:\n # should delete all the run-sharded db files and drop the contents of the index\n for filename in (\n glob.glob(os.path.join(self._base_dir, "*.db"))\n + glob.glob(os.path.join(self._base_dir, "*.db-wal"))\n + glob.glob(os.path.join(self._base_dir, "*.db-shm"))\n ):\n if (\n not filename.endswith(f"{INDEX_SHARD_NAME}.db")\n and not filename.endswith(f"{INDEX_SHARD_NAME}.db-wal")\n and not filename.endswith(f"{INDEX_SHARD_NAME}.db-shm")\n ):\n with contextlib.suppress(FileNotFoundError):\n os.unlink(filename)\n\n self._initialized_dbs = set()\n self._wipe_index()\n\n def _delete_mirrored_events_for_asset_key(self, asset_key: AssetKey) -> None:\n with self.index_connection() as conn:\n conn.execute(\n SqlEventLogStorageTable.delete().where(\n SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),\n )\n )\n\n def wipe_asset(self, asset_key: AssetKey) -> None:\n # default implementation will update the event_logs in the sharded dbs, and the asset_key\n # table in the asset shard, but will not remove the mirrored event_log events in the asset\n # shard\n super(SqliteEventLogStorage, self).wipe_asset(asset_key)\n self._delete_mirrored_events_for_asset_key(asset_key)\n\n def watch(self, run_id: str, cursor: Optional[str], callback: EventHandlerFn) -> None:\n if not self._obs:\n self._obs = Observer()\n self._obs.start()\n\n watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback, cursor)\n self._watchers[run_id][callback] = (\n watchdog,\n self._obs.schedule(watchdog, self._base_dir, True),\n )\n\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n if handler in self._watchers[run_id]:\n event_handler, watch = self._watchers[run_id][handler]\n self._obs.remove_handler_for_watch(event_handler, watch) # type: ignore # (possible none)\n del self._watchers[run_id][handler]\n\n def dispose(self) -> None:\n if self._obs:\n self._obs.stop()\n self._obs.join(timeout=15)\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = get_alembic_config(__file__)\n with self.index_connection() as conn:\n return check_alembic_revision(alembic_config, conn)\n\n @property\n def is_run_sharded(self) -> bool:\n return True\n\n @property\n def supports_global_concurrency_limits(self) -> bool:\n return False
\n\n\nclass SqliteEventLogStorageWatchdog(PatternMatchingEventHandler):\n def __init__(\n self,\n event_log_storage: SqliteEventLogStorage,\n run_id: str,\n callback: EventHandlerFn,\n cursor: Optional[str],\n **kwargs: Any,\n ):\n self._event_log_storage = check.inst_param(\n event_log_storage, "event_log_storage", SqliteEventLogStorage\n )\n self._run_id = check.str_param(run_id, "run_id")\n self._cb = check.callable_param(callback, "callback")\n self._log_path = event_log_storage.path_for_shard(run_id)\n self._cursor = cursor\n super(SqliteEventLogStorageWatchdog, self).__init__(patterns=[self._log_path], **kwargs)\n\n def _process_log(self) -> None:\n connection = self._event_log_storage.get_records_for_run(self._run_id, self._cursor)\n if connection.cursor:\n self._cursor = connection.cursor\n for record in connection.records:\n status = None\n try:\n status = self._cb(\n record.event_log_entry, str(EventLogCursor.from_storage_id(record.storage_id))\n )\n except Exception:\n logging.exception("Exception in callback for event watch on run %s.", self._run_id)\n\n if (\n status == DagsterRunStatus.SUCCESS\n or status == DagsterRunStatus.FAILURE\n or status == DagsterRunStatus.CANCELED\n ):\n self._event_log_storage.end_watch(self._run_id, self._cb)\n\n def on_modified(self, event: FileSystemEvent) -> None:\n check.invariant(event.src_path == self._log_path)\n self._process_log()\n
", "current_page_name": "_modules/dagster/_core/storage/event_log/sqlite/sqlite_event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.event_log.sqlite.sqlite_event_log"}}}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.file_manager

\nimport io\nimport os\nimport shutil\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom typing import BinaryIO, ContextManager, Iterator, Optional, TextIO, Union\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._config import Field, StringSource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource, resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._utils import mkdir_p\n\nfrom .temp_file_manager import TempfileManager\n\nIOStream: TypeAlias = Union[TextIO, BinaryIO]\n\n\n
[docs]class FileHandle(ABC):\n """A reference to a file as manipulated by a FileManager.\n\n Subclasses may handle files that are resident on the local file system, in an object store, or\n in any arbitrary place where a file can be stored.\n\n This exists to handle the very common case where you wish to write a computation that reads,\n transforms, and writes files, but where you also want the same code to work in local development\n as well as on a cluster where the files will be stored in a globally available object store\n such as S3.\n """\n\n @public\n @property\n @abstractmethod\n def path_desc(self) -> str:\n """A representation of the file path for display purposes only."""\n raise NotImplementedError()
\n\n\n
[docs]class LocalFileHandle(FileHandle):\n """A reference to a file on a local filesystem."""\n\n def __init__(self, path: str):\n self._path = check.str_param(path, "path")\n\n @public\n @property\n def path(self) -> str:\n """The file's path."""\n return self._path\n\n @public\n @property\n def path_desc(self) -> str:\n """A representation of the file path for display purposes only."""\n return self._path
\n\n\n
[docs]class FileManager(ABC):\n """Base class for all file managers in dagster.\n\n The file manager is an interface that can be implemented by resources to provide abstract\n access to a file system such as local disk, S3, or other cloud storage.\n\n For examples of usage, see the documentation of the concrete file manager implementations.\n """\n\n
[docs] @public\n @abstractmethod\n def copy_handle_to_local_temp(self, file_handle: FileHandle) -> str:\n """Copy a file represented by a file handle to a temp file.\n\n In an implementation built around an object store such as S3, this method would be expected\n to download the file from S3 to local filesystem in a location assigned by the standard\n library's :py:mod:`python:tempfile` module.\n\n Temp files returned by this method are *not* guaranteed to be reusable across solid\n boundaries. For files that must be available across solid boundaries, use the\n :py:meth:`~dagster._core.storage.file_manager.FileManager.read`,\n :py:meth:`~dagster._core.storage.file_manager.FileManager.read_data`,\n :py:meth:`~dagster._core.storage.file_manager.FileManager.write`, and\n :py:meth:`~dagster._core.storage.file_manager.FileManager.write_data` methods.\n\n Args:\n file_handle (FileHandle): The handle to the file to make available as a local temp file.\n\n Returns:\n str: Path to the local temp file.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def delete_local_temp(self) -> None:\n """Delete all local temporary files created by previous calls to\n :py:meth:`~dagster._core.storage.file_manager.FileManager.copy_handle_to_local_temp`.\n\n Should typically only be called by framework implementors.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def read(self, file_handle: FileHandle, mode: str = "rb") -> ContextManager[IOStream]:\n """Return a file-like stream for the file handle.\n\n This may incur an expensive network call for file managers backed by object stores\n such as S3.\n\n Args:\n file_handle (FileHandle): The file handle to make available as a stream.\n mode (str): The mode in which to open the file. Default: ``"rb"``.\n\n Returns:\n Union[TextIO, BinaryIO]: A file-like stream.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def read_data(self, file_handle: FileHandle) -> bytes:\n """Return the bytes for a given file handle. This may incur an expensive network\n call for file managers backed by object stores such as s3.\n\n Args:\n file_handle (FileHandle): The file handle for which to return bytes.\n\n Returns:\n bytes: Bytes for a given file handle.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def write(self, file_obj: IOStream, mode: str = "wb", ext: Optional[str] = None) -> FileHandle:\n """Write the bytes contained within the given file object into the file manager.\n\n Args:\n file_obj (Union[TextIO, StringIO]): A file-like object.\n mode (Optional[str]): The mode in which to write the file into the file manager.\n Default: ``"wb"``.\n ext (Optional[str]): For file managers that support file extensions, the extension with\n which to write the file. Default: ``None``.\n\n Returns:\n FileHandle: A handle to the newly created file.\n """\n raise NotImplementedError()
\n\n
[docs] @public\n @abstractmethod\n def write_data(self, data: bytes, ext: Optional[str] = None) -> FileHandle:\n """Write raw bytes into the file manager.\n\n Args:\n data (bytes): The bytes to write into the file manager.\n ext (Optional[str]): For file managers that support file extensions, the extension with\n which to write the file. Default: ``None``.\n\n Returns:\n FileHandle: A handle to the newly created file.\n """\n raise NotImplementedError()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema={"base_dir": Field(StringSource, is_required=False)})\ndef local_file_manager(init_context: InitResourceContext) -> "LocalFileManager":\n """FileManager that provides abstract access to a local filesystem.\n\n By default, files will be stored in `<local_artifact_storage>/storage/file_manager` where\n `<local_artifact_storage>` can be configured the ``dagster.yaml`` file in ``$DAGSTER_HOME``.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n\n Examples:\n .. code-block:: python\n\n import tempfile\n\n from dagster import job, local_file_manager, op\n\n\n @op(required_resource_keys={"file_manager"})\n def write_files(context):\n fh_1 = context.resources.file_manager.write_data(b"foo")\n\n with tempfile.NamedTemporaryFile("w+") as fd:\n fd.write("bar")\n fd.seek(0)\n fh_2 = context.resources.file_manager.write(fd, mode="w", ext=".txt")\n\n return (fh_1, fh_2)\n\n\n @op(required_resource_keys={"file_manager"})\n def read_files(context, file_handles):\n fh_1, fh_2 = file_handles\n assert context.resources.file_manager.read_data(fh_2) == b"bar"\n fd = context.resources.file_manager.read(fh_2, mode="r")\n assert fd.read() == "foo"\n fd.close()\n\n\n @job(resource_defs={"file_manager": local_file_manager})\n def files_pipeline():\n read_files(write_files())\n\n Or to specify the file directory:\n\n .. code-block:: python\n\n @job(\n resource_defs={\n "file_manager": local_file_manager.configured({"base_dir": "/my/base/dir"})\n }\n )\n def files_pipeline():\n read_files(write_files())\n """\n return LocalFileManager(\n base_dir=init_context.resource_config.get(\n "base_dir", os.path.join(init_context.instance.storage_directory(), "file_manager") # type: ignore # (possible none)\n )\n )
\n\n\ndef check_file_like_obj(obj: object) -> None:\n check.invariant(obj and hasattr(obj, "read") and hasattr(obj, "write"))\n\n\nclass LocalFileManager(FileManager):\n def __init__(self, base_dir: str):\n self.base_dir = base_dir\n self._base_dir_ensured = False\n self._temp_file_manager = TempfileManager()\n\n @staticmethod\n def for_instance(instance: DagsterInstance, run_id: str) -> "LocalFileManager":\n check.inst_param(instance, "instance", DagsterInstance)\n return LocalFileManager(instance.file_manager_directory(run_id))\n\n def ensure_base_dir_exists(self) -> None:\n if self._base_dir_ensured:\n return\n\n mkdir_p(self.base_dir)\n\n self._base_dir_ensured = True\n\n def copy_handle_to_local_temp(self, file_handle: FileHandle) -> str:\n check.inst_param(file_handle, "file_handle", FileHandle)\n with self.read(file_handle, "rb") as handle_obj: # type: ignore # (??)\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_file_obj.write(handle_obj.read())\n temp_name = temp_file_obj.name\n temp_file_obj.close()\n return temp_name\n\n @contextmanager\n def read(self, file_handle: LocalFileHandle, mode: str = "rb") -> Iterator[IOStream]:\n check.inst_param(file_handle, "file_handle", LocalFileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n encoding = None if mode == "rb" else "utf8"\n with open(file_handle.path, mode, encoding=encoding) as file_obj:\n yield file_obj # type: ignore # (??)\n\n def read_data(self, file_handle: LocalFileHandle) -> bytes:\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read() # type: ignore # (??)\n\n def write_data(self, data: bytes, ext: Optional[str] = None):\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", ext=ext)\n\n def write(\n self, file_obj: IOStream, mode: str = "wb", ext: Optional[str] = None\n ) -> LocalFileHandle:\n check_file_like_obj(file_obj)\n check.opt_str_param(ext, "ext")\n\n self.ensure_base_dir_exists()\n\n dest_file_path = os.path.join(\n self.base_dir, str(uuid.uuid4()) + (("." + ext) if ext is not None else "")\n )\n\n encoding = None if "b" in mode else "utf8"\n with open(dest_file_path, mode, encoding=encoding) as dest_file_obj:\n shutil.copyfileobj(file_obj, dest_file_obj) # type: ignore # (??)\n return LocalFileHandle(dest_file_path)\n\n def delete_local_temp(self) -> None:\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster/_core/storage/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.file_manager"}, "fs_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.fs_io_manager

\nimport os\nimport pickle\nfrom typing import TYPE_CHECKING, Any, Optional\n\nfrom pydantic import Field\n\nimport dagster._check as check\nfrom dagster import (\n    DagsterInvariantViolationError,\n    Field as DagsterField,\n)\nfrom dagster._annotations import experimental\nfrom dagster._config import StringSource\nfrom dagster._config.pythonic_config import ConfigurableIOManagerFactory\nfrom dagster._core.definitions.events import AssetKey, AssetMaterialization\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import IOManager, dagster_maintained_io_manager, io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL, mkdir_p\n\nif TYPE_CHECKING:\n    from typing_extensions import Literal\n    from upath import UPath\n\n\n
[docs]class FilesystemIOManager(ConfigurableIOManagerFactory["PickledObjectFilesystemIOManager"]):\n """Built-in filesystem IO manager that stores and retrieves values using pickling.\n\n The base directory that the pickle files live inside is determined by:\n\n * The IO manager's "base_dir" configuration value, if specified. Otherwise...\n * A "storage/" directory underneath the value for "local_artifact_storage" in your dagster.yaml\n file, if specified. Otherwise...\n * A "storage/" directory underneath the directory that the DAGSTER_HOME environment variable\n points to, if that environment variable is specified. Otherwise...\n * A temporary directory.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n So, with a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n\n 1. Attach an IO manager to a set of assets using the reserved resource key ``"io_manager"``.\n\n .. code-block:: python\n\n from dagster import Definitions, asset, FilesystemIOManager\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": FilesystemIOManager(base_dir="/my/base/path")\n },\n )\n\n\n 2. Specify a job-level IO manager using the reserved resource key ``"io_manager"``,\n which will set the given IO manager on all ops in a job.\n\n .. code-block:: python\n\n from dagster import FilesystemIOManager, job, op\n\n @op\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(\n resource_defs={\n "io_manager": FilesystemIOManager(base_dir="/my/base/path")\n }\n )\n def job():\n op_b(op_a())\n\n\n 3. Specify IO manager on :py:class:`Out`, which allows you to set different IO managers on\n different step outputs.\n\n .. code-block:: python\n\n from dagster import FilesystemIOManager, job, op, Out\n\n @op(out=Out(io_manager_key="my_io_manager"))\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(resource_defs={"my_io_manager": FilesystemIOManager()})\n def job():\n op_b(op_a())\n\n """\n\n base_dir: Optional[str] = Field(default=None, description="Base directory for storing files.")\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def create_io_manager(self, context: InitResourceContext) -> "PickledObjectFilesystemIOManager":\n base_dir = self.base_dir or check.not_none(context.instance).storage_directory()\n return PickledObjectFilesystemIOManager(base_dir=base_dir)
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=FilesystemIOManager.to_config_schema(),\n description="Built-in filesystem IO manager that stores and retrieves values using pickling.",\n)\ndef fs_io_manager(init_context: InitResourceContext) -> "PickledObjectFilesystemIOManager":\n """Built-in filesystem IO manager that stores and retrieves values using pickling.\n\n The base directory that the pickle files live inside is determined by:\n\n * The IO manager's "base_dir" configuration value, if specified. Otherwise...\n * A "storage/" directory underneath the value for "local_artifact_storage" in your dagster.yaml\n file, if specified. Otherwise...\n * A "storage/" directory underneath the directory that the DAGSTER_HOME environment variable\n points to, if that environment variable is specified. Otherwise...\n * A temporary directory.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n So, with a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n\n 1. Attach an IO manager to a set of assets using the reserved resource key ``"io_manager"``.\n\n .. code-block:: python\n\n from dagster import Definitions, asset, fs_io_manager\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": fs_io_manager.configured({"base_dir": "/my/base/path"})\n },\n )\n\n\n 2. Specify a job-level IO manager using the reserved resource key ``"io_manager"``,\n which will set the given IO manager on all ops in a job.\n\n .. code-block:: python\n\n from dagster import fs_io_manager, job, op\n\n @op\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(\n resource_defs={\n "io_manager": fs_io_manager.configured({"base_dir": "/my/base/path"})\n }\n )\n def job():\n op_b(op_a())\n\n\n 3. Specify IO manager on :py:class:`Out`, which allows you to set different IO managers on\n different step outputs.\n\n .. code-block:: python\n\n from dagster import fs_io_manager, job, op, Out\n\n @op(out=Out(io_manager_key="my_io_manager"))\n def op_a():\n # create df ...\n return df\n\n @op\n def op_b(df):\n return df[:5]\n\n @job(resource_defs={"my_io_manager": fs_io_manager})\n def job():\n op_b(op_a())\n\n """\n return FilesystemIOManager.from_resource_context(init_context)
\n\n\nclass PickledObjectFilesystemIOManager(UPathIOManager):\n """Built-in filesystem IO manager that stores and retrieves values using pickling.\n Is compatible with local and remote filesystems via `universal-pathlib` and `fsspec`.\n Learn more about how to use remote filesystems here: https://github.com/fsspec/universal_pathlib.\n\n Args:\n base_dir (Optional[str]): base directory where all the step outputs which use this object\n manager will be stored in.\n **kwargs: additional keyword arguments for `universal_pathlib.UPath`.\n """\n\n extension: str = "" # TODO: maybe change this to .pickle? Leaving blank for compatibility.\n\n def __init__(self, base_dir=None, **kwargs):\n from upath import UPath\n\n self.base_dir = check.opt_str_param(base_dir, "base_dir")\n\n super().__init__(base_path=UPath(base_dir, **kwargs))\n\n def dump_to_path(self, context: OutputContext, obj: Any, path: "UPath"):\n try:\n with path.open("wb") as file:\n pickle.dump(obj, file, PICKLE_PROTOCOL)\n except (AttributeError, RecursionError, ImportError, pickle.PicklingError) as e:\n executor = context.step_context.job_def.executor_def\n\n if isinstance(e, RecursionError):\n # if obj can't be pickled because of RecursionError then __str__() will also\n # throw a RecursionError\n obj_repr = f"{obj.__class__} exceeds recursion limit and"\n else:\n obj_repr = obj.__str__()\n\n raise DagsterInvariantViolationError(\n f"Object {obj_repr} is not picklable. You are currently using the "\n f"fs_io_manager and the {executor.name}. You will need to use a different "\n "io manager to continue using this output. For example, you can use the "\n "mem_io_manager with the in_process_executor.\\n"\n "For more information on io managers, visit "\n "https://docs.dagster.io/concepts/io-management/io-managers \\n"\n "For more information on executors, vist "\n "https://docs.dagster.io/deployment/executors#overview"\n ) from e\n\n def load_from_path(self, context: InputContext, path: "UPath") -> Any:\n with path.open("rb") as file:\n return pickle.load(file)\n\n\nclass CustomPathPickledObjectFilesystemIOManager(IOManager):\n """Built-in filesystem IO managerthat stores and retrieves values using pickling and\n allow users to specify file path for outputs.\n\n Args:\n base_dir (Optional[str]): base directory where all the step outputs which use this object\n manager will be stored in.\n """\n\n def __init__(self, base_dir: Optional[str] = None):\n self.base_dir = check.opt_str_param(base_dir, "base_dir")\n self.write_mode: Literal["wb"] = "wb"\n self.read_mode: Literal["rb"] = "rb"\n\n def _get_path(self, path: str) -> str:\n return os.path.join(self.base_dir, path) # type: ignore # (possible none)\n\n def handle_output(self, context: OutputContext, obj: object):\n """Pickle the data and store the object to a custom file path.\n\n This method emits an AssetMaterialization event so the assets will be tracked by the\n Asset Catalog.\n """\n check.inst_param(context, "context", OutputContext)\n metadata = context.metadata\n path = check.str_param(metadata.get("path"), "metadata.path") # type: ignore # (possible none)\n\n filepath = self._get_path(path)\n\n # Ensure path exists\n mkdir_p(os.path.dirname(filepath))\n context.log.debug(f"Writing file at: {filepath}")\n\n with open(filepath, self.write_mode) as write_obj:\n pickle.dump(obj, write_obj, PICKLE_PROTOCOL)\n\n return AssetMaterialization(\n asset_key=AssetKey([context.job_name, context.step_key, context.name]),\n metadata={"path": MetadataValue.path(os.path.abspath(filepath))},\n )\n\n def load_input(self, context: InputContext) -> object:\n """Unpickle the file from a given file path and Load it to a data object."""\n check.inst_param(context, "context", InputContext)\n metadata = context.upstream_output.metadata # type: ignore # (possible none)\n path = check.str_param(metadata.get("path"), "metadata.path") # type: ignore # (possible none)\n filepath = self._get_path(path)\n context.log.debug(f"Loading file from: {filepath}")\n\n with open(filepath, self.read_mode) as read_obj:\n return pickle.load(read_obj)\n\n\n@dagster_maintained_io_manager\n@io_manager(config_schema={"base_dir": DagsterField(StringSource, is_required=True)})\n@experimental\ndef custom_path_fs_io_manager(\n init_context: InitResourceContext,\n) -> CustomPathPickledObjectFilesystemIOManager:\n """Built-in IO manager that allows users to custom output file path per output definition.\n\n It requires users to specify a base directory where all the step output will be stored in. It\n serializes and deserializes output values (assets) using pickling and stores the pickled object\n in the user-provided file paths.\n\n Example usage:\n\n .. code-block:: python\n\n from dagster import custom_path_fs_io_manager, job, op\n\n @op(out=Out(metadata={"path": "path/to/sample_output"}))\n def sample_data(df):\n return df[:5]\n\n my_custom_path_fs_io_manager = custom_path_fs_io_manager.configured(\n {"base_dir": "path/to/basedir"}\n )\n\n @job(resource_defs={"io_manager": my_custom_path_fs_io_manager})\n def my_job():\n sample_data()\n\n """\n return CustomPathPickledObjectFilesystemIOManager(\n base_dir=init_context.resource_config.get("base_dir")\n )\n
", "current_page_name": "_modules/dagster/_core/storage/fs_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.fs_io_manager"}, "input_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.input_manager

\nfrom abc import ABC, abstractmethod\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, AbstractSet, Callable, Optional, Union, cast, overload\n\nfrom typing_extensions import TypeAlias, TypeGuard\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import has_at_least_one_parameter\nfrom dagster._core.definitions.config import is_callable_valid_config_arg\nfrom dagster._core.definitions.definition_config_schema import (\n    CoercableToConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom dagster._core.definitions.resource_definition import ResourceDefinition, ResourceFunction\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.input import InputContext\n\nInputLoadFn: TypeAlias = Union[\n    Callable[["InputContext"], object],\n    Callable[[], object],\n]\n\n\n
[docs]class InputManager(ABC):\n """Base interface for classes that are responsible for loading solid inputs."""\n\n @abstractmethod\n def load_input(self, context: "InputContext") -> object:\n """The user-defined read method that loads an input to a solid.\n\n Args:\n context (InputContext): The input context.\n\n Returns:\n Any: The data object.\n """
\n\n\nclass IInputManagerDefinition:\n @property\n @abstractmethod\n def input_config_schema(self) -> IDefinitionConfigSchema:\n """The schema for per-input configuration for inputs that are managed by this\n input manager.\n """\n\n\n
[docs]class InputManagerDefinition(ResourceDefinition, IInputManagerDefinition):\n """Definition of an input manager resource.\n\n Input managers load op inputs.\n\n An InputManagerDefinition is a :py:class:`ResourceDefinition` whose resource_fn returns an\n :py:class:`InputManager`.\n\n The easiest way to create an InputManagerDefinition is with the\n :py:func:`@input_manager <input_manager>` decorator.\n """\n\n def __init__(\n self,\n resource_fn: ResourceFunction,\n config_schema: Optional[CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n input_config_schema: Optional[CoercableToConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n ):\n self._input_config_schema = convert_user_facing_definition_config_schema(\n input_config_schema\n )\n super(InputManagerDefinition, self).__init__(\n resource_fn=resource_fn,\n config_schema=config_schema,\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )\n\n @property\n def input_config_schema(self) -> IDefinitionConfigSchema:\n return self._input_config_schema\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: CoercableToConfigSchema,\n ) -> "InputManagerDefinition":\n return InputManagerDefinition(\n config_schema=config_schema,\n description=description or self.description,\n resource_fn=self.resource_fn,\n required_resource_keys=self.required_resource_keys,\n input_config_schema=self.input_config_schema,\n )
\n\n\n@overload\ndef input_manager(\n config_schema: InputLoadFn,\n) -> InputManagerDefinition: ...\n\n\n@overload\ndef input_manager(\n config_schema: Optional[CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n input_config_schema: Optional[CoercableToConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n) -> Callable[[InputLoadFn], InputManagerDefinition]: ...\n\n\n
[docs]def input_manager(\n config_schema: Union[InputLoadFn, Optional[CoercableToConfigSchema]] = None,\n description: Optional[str] = None,\n input_config_schema: Optional[CoercableToConfigSchema] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n) -> Union[InputManagerDefinition, Callable[[InputLoadFn], InputManagerDefinition]]:\n """Define an input manager.\n\n Input managers load op inputs, either from upstream outputs or by providing default values.\n\n The decorated function should accept a :py:class:`InputContext` and resource config, and return\n a loaded object that will be passed into one of the inputs of an op.\n\n The decorator produces an :py:class:`InputManagerDefinition`.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the resource-level config. If not\n set, Dagster will accept any config provided.\n description (Optional[str]): A human-readable description of the resource.\n input_config_schema (Optional[ConfigSchema]): A schema for the input-level config. Each\n input that uses this input manager can be configured separately using this config.\n If not set, Dagster will accept any config provided.\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by the input\n manager.\n version (Optional[str]): (Experimental) the version of the input manager definition.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import input_manager, op, job, In\n\n @input_manager\n def csv_loader(_):\n return read_csv("some/path")\n\n @op(ins={"input1": In(input_manager_key="csv_loader_key")})\n def my_op(_, input1):\n do_stuff(input1)\n\n @job(resource_defs={"csv_loader_key": csv_loader})\n def my_job():\n my_op()\n\n @input_manager(config_schema={"base_dir": str})\n def csv_loader(context):\n return read_csv(context.resource_config["base_dir"] + "/some/path")\n\n @input_manager(input_config_schema={"path": str})\n def csv_loader(context):\n return read_csv(context.config["path"])\n """\n if _is_input_load_fn(config_schema):\n return _InputManagerDecoratorCallable()(config_schema)\n\n def _wrap(load_fn: InputLoadFn) -> InputManagerDefinition:\n return _InputManagerDecoratorCallable(\n config_schema=cast(CoercableToConfigSchema, config_schema),\n description=description,\n version=version,\n input_config_schema=input_config_schema,\n required_resource_keys=required_resource_keys,\n )(load_fn)\n\n return _wrap
\n\n\ndef _is_input_load_fn(obj: Union[InputLoadFn, CoercableToConfigSchema]) -> TypeGuard[InputLoadFn]:\n return callable(obj) and not is_callable_valid_config_arg(obj)\n\n\nclass InputManagerWrapper(InputManager):\n def __init__(self, load_fn: InputLoadFn):\n self._load_fn = load_fn\n\n def load_input(self, context: "InputContext") -> object:\n # the @input_manager decorated function (self._load_fn) may return a direct value that\n # should be used or an instance of an InputManager. So we call self._load_fn and see if the\n # result is an InputManager. If so we call it's load_input method\n intermediate = (\n # type-ignore because function being used as attribute\n self._load_fn(context)\n if has_at_least_one_parameter(self._load_fn)\n else self._load_fn() # type: ignore # (strict type guard)\n )\n\n if isinstance(intermediate, InputManager):\n return intermediate.load_input(context)\n return intermediate\n\n\nclass _InputManagerDecoratorCallable:\n def __init__(\n self,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n version: Optional[str] = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n ):\n self.config_schema = config_schema\n self.description = check.opt_str_param(description, "description")\n self.version = check.opt_str_param(version, "version")\n self.input_config_schema = input_config_schema\n self.required_resource_keys = required_resource_keys\n\n def __call__(self, load_fn: InputLoadFn) -> InputManagerDefinition:\n check.callable_param(load_fn, "load_fn")\n\n def _resource_fn(_):\n return InputManagerWrapper(load_fn)\n\n input_manager_def = InputManagerDefinition(\n resource_fn=_resource_fn,\n config_schema=self.config_schema,\n description=self.description,\n version=self.version,\n input_config_schema=self.input_config_schema,\n required_resource_keys=self.required_resource_keys,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(input_manager_def, wrapped=load_fn) # type: ignore\n\n return input_manager_def\n
", "current_page_name": "_modules/dagster/_core/storage/input_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.input_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.io_manager

\nfrom abc import abstractmethod\nfrom functools import update_wrapper\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Callable, Optional, Set, Union, cast, overload\n\nfrom typing_extensions import TypeAlias, TypeGuard\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._config import UserConfigSchema\nfrom dagster._core.definitions.config import is_callable_valid_config_arg\nfrom dagster._core.definitions.definition_config_schema import (\n    CoercableToConfigSchema,\n    IDefinitionConfigSchema,\n    convert_user_facing_definition_config_schema,\n)\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.storage.input_manager import IInputManagerDefinition, InputManager\nfrom dagster._core.storage.output_manager import IOutputManagerDefinition, OutputManager\n\nfrom ..decorator_utils import get_function_params\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.init import InitResourceContext\n    from dagster._core.execution.context.input import InputContext\n    from dagster._core.execution.context.output import OutputContext\n\nIOManagerFunctionWithContext = Callable[["InitResourceContext"], "IOManager"]\nIOManagerFunction: TypeAlias = Union[\n    IOManagerFunctionWithContext,\n    Callable[[], "IOManager"],\n]\n\n\ndef is_io_manager_context_provided(\n    fn: IOManagerFunction,\n) -> TypeGuard[IOManagerFunctionWithContext]:\n    return len(get_function_params(fn)) >= 1\n\n\n
[docs]class IOManagerDefinition(ResourceDefinition, IInputManagerDefinition, IOutputManagerDefinition):\n """Definition of an IO manager resource.\n\n IOManagers are used to store op outputs and load them as inputs to downstream ops.\n\n An IOManagerDefinition is a :py:class:`ResourceDefinition` whose `resource_fn` returns an\n :py:class:`IOManager`.\n\n The easiest way to create an IOManagerDefnition is with the :py:func:`@io_manager <io_manager>`\n decorator.\n """\n\n def __init__(\n self,\n resource_fn: IOManagerFunction,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n version: Optional[str] = None,\n input_config_schema: CoercableToConfigSchema = None,\n output_config_schema: CoercableToConfigSchema = None,\n ):\n self._input_config_schema = convert_user_facing_definition_config_schema(\n input_config_schema\n )\n # Unlike other configurable objects, whose config schemas default to Any,\n # output_config_schema defaults to None. This the because IOManager input / output config\n # shares config namespace with dagster type loaders.\n self._output_config_schema = (\n convert_user_facing_definition_config_schema(output_config_schema)\n if output_config_schema is not None\n else None\n )\n super(IOManagerDefinition, self).__init__(\n resource_fn=resource_fn,\n config_schema=config_schema,\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n )\n\n @property\n def input_config_schema(self) -> IDefinitionConfigSchema:\n return self._input_config_schema\n\n @property\n def output_config_schema(self) -> Optional[IDefinitionConfigSchema]:\n return self._output_config_schema\n\n def copy_for_configured(\n self,\n description: Optional[str],\n config_schema: CoercableToConfigSchema,\n ) -> "IOManagerDefinition":\n io_def = IOManagerDefinition(\n config_schema=config_schema,\n description=description or self.description,\n resource_fn=self.resource_fn,\n required_resource_keys=self.required_resource_keys,\n input_config_schema=self.input_config_schema,\n output_config_schema=self.output_config_schema,\n )\n\n io_def._dagster_maintained = self._is_dagster_maintained() # noqa: SLF001\n\n return io_def\n\n
[docs] @public\n @staticmethod\n def hardcoded_io_manager(\n value: "IOManager", description: Optional[str] = None\n ) -> "IOManagerDefinition":\n """A helper function that creates an ``IOManagerDefinition`` with a hardcoded IOManager.\n\n Args:\n value (IOManager): A hardcoded IO Manager which helps mock the definition.\n description ([Optional[str]]): The description of the IO Manager. Defaults to None.\n\n Returns:\n [IOManagerDefinition]: A hardcoded resource.\n """\n check.inst_param(value, "value", IOManager)\n return IOManagerDefinition(resource_fn=lambda _init_context: value, description=description)
\n\n\n
[docs]class IOManager(InputManager, OutputManager):\n """Base class for user-provided IO managers.\n\n IOManagers are used to store op outputs and load them as inputs to downstream ops.\n\n Extend this class to handle how objects are loaded and stored. Users should implement\n ``handle_output`` to store an object and ``load_input`` to retrieve an object.\n """\n\n
[docs] @public\n @abstractmethod\n def load_input(self, context: "InputContext") -> Any:\n """User-defined method that loads an input to an op.\n\n Args:\n context (InputContext): The input context, which describes the input that's being loaded\n and the upstream output that's being loaded from.\n\n Returns:\n Any: The data object.\n """
\n\n
[docs] @public\n @abstractmethod\n def handle_output(self, context: "OutputContext", obj: Any) -> None:\n """User-defined method that stores an output of an op.\n\n Args:\n context (OutputContext): The context of the step output that produces this object.\n obj (Any): The object, returned by the op, to be stored.\n """
\n\n\n@overload\ndef io_manager(config_schema: IOManagerFunction) -> IOManagerDefinition: ...\n\n\n@overload\ndef io_manager(\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n output_config_schema: CoercableToConfigSchema = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[Set[str]] = None,\n version: Optional[str] = None,\n) -> Callable[[IOManagerFunction], IOManagerDefinition]: ...\n\n\n
[docs]def io_manager(\n config_schema: Union[IOManagerFunction, CoercableToConfigSchema] = None,\n description: Optional[str] = None,\n output_config_schema: CoercableToConfigSchema = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[Set[str]] = None,\n version: Optional[str] = None,\n) -> Union[IOManagerDefinition, Callable[[IOManagerFunction], IOManagerDefinition],]:\n """Define an IO manager.\n\n IOManagers are used to store op outputs and load them as inputs to downstream ops.\n\n The decorated function should accept an :py:class:`InitResourceContext` and return an\n :py:class:`IOManager`.\n\n Args:\n config_schema (Optional[ConfigSchema]): The schema for the resource config. Configuration\n data available in `init_context.resource_config`. If not set, Dagster will accept any\n config provided.\n description(Optional[str]): A human-readable description of the resource.\n output_config_schema (Optional[ConfigSchema]): The schema for per-output config. If not set,\n no per-output configuration will be allowed.\n input_config_schema (Optional[ConfigSchema]): The schema for per-input config. If not set,\n Dagster will accept any config provided.\n required_resource_keys (Optional[Set[str]]): Keys for the resources required by the object\n manager.\n version (Optional[str]): (Experimental) The version of a resource function. Two wrapped\n resource functions should only have the same version if they produce the same resource\n definition when provided with the same inputs.\n\n **Examples:**\n\n .. code-block:: python\n\n class MyIOManager(IOManager):\n def handle_output(self, context, obj):\n write_csv("some/path")\n\n def load_input(self, context):\n return read_csv("some/path")\n\n @io_manager\n def my_io_manager(init_context):\n return MyIOManager()\n\n @op(out=Out(io_manager_key="my_io_manager_key"))\n def my_op(_):\n return do_stuff()\n\n @job(resource_defs={"my_io_manager_key": my_io_manager})\n def my_job():\n my_op()\n\n """\n if callable(config_schema) and not is_callable_valid_config_arg(config_schema):\n config_schema = cast(IOManagerFunction, config_schema)\n return _IOManagerDecoratorCallable()(config_schema)\n\n def _wrap(resource_fn: IOManagerFunction) -> IOManagerDefinition:\n return _IOManagerDecoratorCallable(\n config_schema=cast(Optional[UserConfigSchema], config_schema),\n description=description,\n required_resource_keys=required_resource_keys,\n version=version,\n output_config_schema=output_config_schema,\n input_config_schema=input_config_schema,\n )(resource_fn)\n\n return _wrap
\n\n\ndef dagster_maintained_io_manager(io_manager_def: IOManagerDefinition) -> IOManagerDefinition:\n io_manager_def._dagster_maintained = True # noqa: SLF001\n return io_manager_def\n\n\nclass _IOManagerDecoratorCallable:\n def __init__(\n self,\n config_schema: CoercableToConfigSchema = None,\n description: Optional[str] = None,\n output_config_schema: CoercableToConfigSchema = None,\n input_config_schema: CoercableToConfigSchema = None,\n required_resource_keys: Optional[Set[str]] = None,\n version: Optional[str] = None,\n ):\n # type validation happens in IOManagerDefinition\n self.config_schema = config_schema\n self.description = description\n self.required_resource_keys = required_resource_keys\n self.version = version\n self.output_config_schema = output_config_schema\n self.input_config_schema = input_config_schema\n\n def __call__(self, fn: IOManagerFunction) -> IOManagerDefinition:\n check.callable_param(fn, "fn")\n\n io_manager_def = IOManagerDefinition(\n resource_fn=fn,\n config_schema=self.config_schema,\n description=self.description,\n required_resource_keys=self.required_resource_keys,\n version=self.version,\n output_config_schema=self.output_config_schema,\n input_config_schema=self.input_config_schema,\n )\n\n # `update_wrapper` typing cannot currently handle a Union of Callables correctly\n update_wrapper(io_manager_def, wrapped=fn) # type: ignore\n\n return io_manager_def\n
", "current_page_name": "_modules/dagster/_core/storage/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.io_manager"}, "local_compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.local_compute_log_manager

\nimport hashlib\nimport os\nimport shutil\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom typing import IO, TYPE_CHECKING, Generator, Iterator, Mapping, Optional, Sequence, Tuple\n\nfrom typing_extensions import Final\nfrom watchdog.events import PatternMatchingEventHandler\nfrom watchdog.observers.polling import PollingObserver\n\nfrom dagster import (\n    Field,\n    Float,\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.execution.compute_logs import mirror_stream_to_file\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._seven import json\nfrom dagster._utils import ensure_dir, ensure_file, touch_file\n\nfrom .captured_log_manager import (\n    CapturedLogContext,\n    CapturedLogData,\n    CapturedLogManager,\n    CapturedLogMetadata,\n    CapturedLogSubscription,\n)\nfrom .compute_log_manager import (\n    MAX_BYTES_FILE_READ,\n    ComputeIOType,\n    ComputeLogFileData,\n    ComputeLogManager,\n    ComputeLogSubscription,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.storage.cloud_storage_compute_log_manager import LogSubscription\n\nDEFAULT_WATCHDOG_POLLING_TIMEOUT: Final = 2.5\n\nIO_TYPE_EXTENSION: Final[Mapping[ComputeIOType, str]] = {\n    ComputeIOType.STDOUT: "out",\n    ComputeIOType.STDERR: "err",\n}\n\nMAX_FILENAME_LENGTH: Final = 255\n\n\n
[docs]class LocalComputeLogManager(CapturedLogManager, ComputeLogManager, ConfigurableClass):\n """Stores copies of stdout & stderr for each compute step locally on disk."""\n\n def __init__(\n self,\n base_dir: str,\n polling_timeout: Optional[float] = None,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._base_dir = base_dir\n self._polling_timeout = check.opt_float_param(\n polling_timeout, "polling_timeout", DEFAULT_WATCHDOG_POLLING_TIMEOUT\n )\n self._subscription_manager = LocalComputeLogSubscriptionManager(self)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @property\n def polling_timeout(self) -> float:\n return self._polling_timeout\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {\n "base_dir": StringSource,\n "polling_timeout": Field(Float, is_required=False),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value\n ) -> "LocalComputeLogManager":\n return LocalComputeLogManager(inst_data=inst_data, **config_value)\n\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:\n outpath = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT])\n errpath = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR])\n with mirror_stream_to_file(sys.stdout, outpath), mirror_stream_to_file(sys.stderr, errpath):\n yield CapturedLogContext(log_key)\n\n # leave artifact on filesystem so that we know the capture is completed\n touch_file(self.complete_artifact_path(log_key))\n\n @contextmanager\n def open_log_stream(\n self, log_key: Sequence[str], io_type: ComputeIOType\n ) -> Iterator[Optional[IO]]:\n path = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n with open(path, "+a", encoding="utf-8") as f:\n yield f\n\n def is_capture_complete(self, log_key: Sequence[str]) -> bool:\n return os.path.exists(self.complete_artifact_path(log_key))\n\n def get_log_data(\n self, log_key: Sequence[str], cursor: Optional[str] = None, max_bytes: Optional[int] = None\n ) -> CapturedLogData:\n stdout_cursor, stderr_cursor = self.parse_cursor(cursor)\n stdout, stdout_offset = self._read_bytes(\n log_key, ComputeIOType.STDOUT, offset=stdout_cursor, max_bytes=max_bytes\n )\n stderr, stderr_offset = self._read_bytes(\n log_key, ComputeIOType.STDERR, offset=stderr_cursor, max_bytes=max_bytes\n )\n return CapturedLogData(\n log_key=log_key,\n stdout=stdout,\n stderr=stderr,\n cursor=self.build_cursor(stdout_offset, stderr_offset),\n )\n\n def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:\n return CapturedLogMetadata(\n stdout_location=self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT]\n ),\n stderr_location=self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR]\n ),\n stdout_download_url=self.get_captured_log_download_url(log_key, ComputeIOType.STDOUT),\n stderr_download_url=self.get_captured_log_download_url(log_key, ComputeIOType.STDERR),\n )\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n if log_key:\n paths = [\n self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT]),\n self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR]),\n self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT], partial=True\n ),\n self.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR], partial=True\n ),\n self.get_captured_local_path(log_key, "complete"),\n ]\n for path in paths:\n if os.path.exists(path) and os.path.isfile(path):\n os.remove(path)\n elif prefix:\n dir_to_delete = os.path.join(self._base_dir, *prefix)\n if os.path.exists(dir_to_delete) and os.path.isdir(dir_to_delete):\n # recursively delete all files in dir\n shutil.rmtree(dir_to_delete)\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n def _read_bytes(\n self,\n log_key: Sequence[str],\n io_type: ComputeIOType,\n offset: Optional[int] = 0,\n max_bytes: Optional[int] = None,\n ):\n path = self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n return self.read_path(path, offset or 0, max_bytes)\n\n def parse_cursor(self, cursor: Optional[str] = None) -> Tuple[int, int]:\n # Translates a string cursor into a set of byte offsets for stdout, stderr\n if not cursor:\n return 0, 0\n\n parts = cursor.split(":")\n if not parts or len(parts) != 2:\n return 0, 0\n\n stdout, stderr = [int(_) for _ in parts]\n return stdout, stderr\n\n def build_cursor(self, stdout_offset: int, stderr_offset: int) -> str:\n return f"{stdout_offset}:{stderr_offset}"\n\n def complete_artifact_path(self, log_key):\n return self.get_captured_local_path(log_key, "complete")\n\n def read_path(\n self,\n path: str,\n offset: int = 0,\n max_bytes: Optional[int] = None,\n ):\n if not os.path.exists(path) or not os.path.isfile(path):\n return None, offset\n\n with open(path, "rb") as f:\n f.seek(offset, os.SEEK_SET)\n if max_bytes is None:\n data = f.read()\n else:\n data = f.read(max_bytes)\n new_offset = f.tell()\n return data, new_offset\n\n def get_captured_log_download_url(self, log_key, io_type):\n check.inst_param(io_type, "io_type", ComputeIOType)\n url = "/logs"\n for part in log_key:\n url = f"{url}/{part}"\n\n return f"{url}/{IO_TYPE_EXTENSION[io_type]}"\n\n def get_captured_local_path(self, log_key: Sequence[str], extension: str, partial=False):\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n if len(filename) > MAX_FILENAME_LENGTH:\n filename = "{}.{}".format(hashlib.md5(filebase.encode("utf-8")).hexdigest(), extension)\n return os.path.join(self._base_dir, *namespace, filename)\n\n def subscribe(\n self, log_key: Sequence[str], cursor: Optional[str] = None\n ) -> CapturedLogSubscription:\n subscription = CapturedLogSubscription(self, log_key, cursor)\n self.on_subscribe(subscription)\n return subscription\n\n def unsubscribe(self, subscription):\n self.on_unsubscribe(subscription)\n\n ###############################################\n #\n # Methods for the ComputeLogManager interface\n #\n ###############################################\n @contextmanager\n def _watch_logs(\n self, dagster_run: DagsterRun, step_key: Optional[str] = None\n ) -> Iterator[None]:\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n\n log_key = self.build_log_key_for_run(dagster_run.run_id, step_key or dagster_run.job_name)\n with self.capture_logs(log_key):\n yield\n\n def get_local_path(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n """Legacy adapter from compute log manager to more generic captured log manager API."""\n check.inst_param(io_type, "io_type", ComputeIOType)\n log_key = self.build_log_key_for_run(run_id, key)\n return self.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n\n def read_logs_file(\n self,\n run_id: str,\n key: str,\n io_type: ComputeIOType,\n cursor: int = 0,\n max_bytes: int = MAX_BYTES_FILE_READ,\n ) -> ComputeLogFileData:\n path = self.get_local_path(run_id, key, io_type)\n\n if not os.path.exists(path) or not os.path.isfile(path):\n return ComputeLogFileData(path=path, data=None, cursor=0, size=0, download_url=None)\n\n # See: https://docs.python.org/2/library/stdtypes.html#file.tell for Windows behavior\n with open(path, "rb") as f:\n f.seek(cursor, os.SEEK_SET)\n data = f.read(max_bytes)\n cursor = f.tell()\n stats = os.fstat(f.fileno())\n\n # local download path\n download_url = self.download_url(run_id, key, io_type)\n return ComputeLogFileData(\n path=path,\n data=data.decode("utf-8"),\n cursor=cursor,\n size=stats.st_size,\n download_url=download_url,\n )\n\n def get_key(self, dagster_run: DagsterRun, step_key: Optional[str]):\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n return step_key or dagster_run.job_name\n\n def is_watch_completed(self, run_id: str, key: str) -> bool:\n log_key = self.build_log_key_for_run(run_id, key)\n return self.is_capture_complete(log_key)\n\n def on_watch_start(self, dagster_run: DagsterRun, step_key: Optional[str]):\n pass\n\n def on_watch_finish(self, dagster_run: DagsterRun, step_key: Optional[str] = None):\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n check.opt_str_param(step_key, "step_key")\n log_key = self.build_log_key_for_run(dagster_run.run_id, step_key or dagster_run.job_name)\n touchpath = self.complete_artifact_path(log_key)\n touch_file(touchpath)\n\n def download_url(self, run_id: str, key: str, io_type: ComputeIOType):\n check.inst_param(io_type, "io_type", ComputeIOType)\n return f"/download/{run_id}/{key}/{io_type.value}"\n\n def on_subscribe(self, subscription: "LogSubscription") -> None:\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription: "LogSubscription") -> None:\n self._subscription_manager.remove_subscription(subscription)\n\n def dispose(self) -> None:\n self._subscription_manager.dispose()
\n\n\nclass LocalComputeLogSubscriptionManager:\n def __init__(self, manager):\n self._manager = manager\n self._subscriptions = defaultdict(list)\n self._watchers = {}\n self._observer = None\n\n def add_subscription(self, subscription: "LogSubscription") -> None:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n\n if self.is_complete(subscription):\n subscription.fetch()\n subscription.complete()\n else:\n log_key = self._log_key(subscription)\n watch_key = self._watch_key(log_key)\n self._subscriptions[watch_key].append(subscription)\n self.watch(subscription)\n\n def is_complete(self, subscription: "LogSubscription") -> bool:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n\n if isinstance(subscription, ComputeLogSubscription):\n return self._manager.is_watch_completed(subscription.run_id, subscription.key)\n return self._manager.is_capture_complete(subscription.log_key)\n\n def remove_subscription(self, subscription: "LogSubscription") -> None:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n log_key = self._log_key(subscription)\n watch_key = self._watch_key(log_key)\n if subscription in self._subscriptions[watch_key]:\n self._subscriptions[watch_key].remove(subscription)\n subscription.complete()\n\n def _log_key(self, subscription: "LogSubscription") -> Sequence[str]:\n check.inst_param(\n subscription, "subscription", (ComputeLogSubscription, CapturedLogSubscription)\n )\n\n if isinstance(subscription, ComputeLogSubscription):\n return self._manager.build_log_key_for_run(subscription.run_id, subscription.key)\n return subscription.log_key\n\n def _watch_key(self, log_key: Sequence[str]) -> str:\n return json.dumps(log_key)\n\n def remove_all_subscriptions(self, log_key: Sequence[str]) -> None:\n watch_key = self._watch_key(log_key)\n for subscription in self._subscriptions.pop(watch_key, []):\n subscription.complete()\n\n def watch(self, subscription: "LogSubscription") -> None:\n log_key = self._log_key(subscription)\n watch_key = self._watch_key(log_key)\n if watch_key in self._watchers:\n return\n\n update_paths = [\n self._manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT]),\n self._manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR]),\n self._manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDOUT], partial=True\n ),\n self._manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[ComputeIOType.STDERR], partial=True\n ),\n ]\n complete_paths = [self._manager.complete_artifact_path(log_key)]\n directory = os.path.dirname(\n self._manager.get_captured_local_path(log_key, ComputeIOType.STDERR),\n )\n\n if not self._observer:\n self._observer = PollingObserver(self._manager.polling_timeout)\n self._observer.start()\n\n ensure_dir(directory)\n\n self._watchers[watch_key] = self._observer.schedule(\n LocalComputeLogFilesystemEventHandler(self, log_key, update_paths, complete_paths),\n str(directory),\n )\n\n def notify_subscriptions(self, log_key: Sequence[str]) -> None:\n watch_key = self._watch_key(log_key)\n for subscription in self._subscriptions[watch_key]:\n subscription.fetch()\n\n def unwatch(self, log_key: Sequence[str], handler) -> None:\n watch_key = self._watch_key(log_key)\n if watch_key in self._watchers:\n self._observer.remove_handler_for_watch(handler, self._watchers[watch_key]) # type: ignore\n del self._watchers[watch_key]\n\n def dispose(self) -> None:\n if self._observer:\n self._observer.stop()\n self._observer.join(15)\n\n\nclass LocalComputeLogFilesystemEventHandler(PatternMatchingEventHandler):\n def __init__(self, manager, log_key, update_paths, complete_paths):\n self.manager = manager\n self.log_key = log_key\n self.update_paths = update_paths\n self.complete_paths = complete_paths\n patterns = update_paths + complete_paths\n super(LocalComputeLogFilesystemEventHandler, self).__init__(patterns=patterns)\n\n def on_created(self, event):\n if event.src_path in self.complete_paths:\n self.manager.remove_all_subscriptions(self.log_key)\n self.manager.unwatch(self.log_key, self)\n\n def on_modified(self, event):\n if event.src_path in self.update_paths:\n self.manager.notify_subscriptions(self.log_key)\n
", "current_page_name": "_modules/dagster/_core/storage/local_compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.local_compute_log_manager"}, "mem_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.mem_io_manager

\nfrom typing import Dict, Tuple\n\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import IOManager, dagster_maintained_io_manager, io_manager\n\n\n
[docs]class InMemoryIOManager(IOManager):\n """I/O manager that stores and retrieves values in memory. After execution is complete, the values will\n be garbage-collected. Note that this means that each run will not have access to values from previous runs.\n """\n\n def __init__(self):\n self.values: Dict[Tuple[object, ...], object] = {}\n\n def handle_output(self, context: OutputContext, obj: object):\n keys = tuple(context.get_identifier())\n self.values[keys] = obj\n\n def load_input(self, context: InputContext) -> object:\n keys = tuple(context.get_identifier())\n return self.values[keys]
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(description="Built-in IO manager that stores and retrieves values in memory.")\ndef mem_io_manager(_) -> InMemoryIOManager:\n """Built-in IO manager that stores and retrieves values in memory."""\n return InMemoryIOManager()
\n
", "current_page_name": "_modules/dagster/_core/storage/mem_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.mem_io_manager"}, "memoizable_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.memoizable_io_manager

\nimport os\nimport pickle\nfrom abc import abstractmethod\nfrom typing import Union\n\nimport dagster._check as check\nfrom dagster._annotations import experimental, public\nfrom dagster._config import Field, StringSource\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import IOManager, dagster_maintained_io_manager, io_manager\nfrom dagster._utils import PICKLE_PROTOCOL, mkdir_p\n\n\n
[docs]class MemoizableIOManager(IOManager):\n """Base class for IO manager enabled to work with memoized execution. Users should implement\n the ``load_input`` and ``handle_output`` methods described in the ``IOManager`` API, and the\n ``has_output`` method, which returns a boolean representing whether a data object can be found.\n """\n\n
[docs] @public\n @abstractmethod\n def has_output(self, context: OutputContext) -> bool:\n """The user-defined method that returns whether data exists given the metadata.\n\n Args:\n context (OutputContext): The context of the step performing this check.\n\n Returns:\n bool: True if there is data present that matches the provided context. False otherwise.\n """
\n\n\nclass VersionedPickledObjectFilesystemIOManager(MemoizableIOManager):\n def __init__(self, base_dir=None):\n self.base_dir = check.opt_str_param(base_dir, "base_dir")\n self.write_mode = "wb"\n self.read_mode = "rb"\n\n def _get_path(self, context: Union[InputContext, OutputContext]) -> str:\n output_context: OutputContext\n\n if isinstance(context, OutputContext):\n output_context = context\n else:\n if context.upstream_output is None:\n raise DagsterInvariantViolationError(\n "Missing value of InputContext.upstream_output. Cannot compute the input path."\n )\n\n output_context = context.upstream_output\n\n # automatically construct filepath\n step_key = check.str_param(output_context.step_key, "context.step_key")\n output_name = check.str_param(output_context.name, "context.name")\n version = check.str_param(output_context.version, "context.version")\n\n return os.path.join(self.base_dir, step_key, output_name, version)\n\n def handle_output(self, context, obj):\n """Pickle the data with the associated version, and store the object to a file.\n\n This method omits the AssetMaterialization event so assets generated by it won't be tracked\n by the Asset Catalog.\n """\n filepath = self._get_path(context)\n\n context.log.debug(f"Writing file at: {filepath}")\n\n # Ensure path exists\n mkdir_p(os.path.dirname(filepath))\n\n with open(filepath, self.write_mode) as write_obj:\n pickle.dump(obj, write_obj, PICKLE_PROTOCOL)\n\n def load_input(self, context):\n """Unpickle the file and Load it to a data object."""\n filepath = self._get_path(context)\n\n context.log.debug(f"Loading file from: {filepath}")\n\n with open(filepath, self.read_mode) as read_obj:\n return pickle.load(read_obj)\n\n def has_output(self, context):\n """Returns true if data object exists with the associated version, False otherwise."""\n filepath = self._get_path(context)\n\n context.log.debug(f"Checking for file at: {filepath}")\n\n return os.path.exists(filepath) and not os.path.isdir(filepath)\n\n\n@dagster_maintained_io_manager\n@io_manager(config_schema={"base_dir": Field(StringSource, is_required=False)})\n@experimental\ndef versioned_filesystem_io_manager(init_context):\n """Filesystem IO manager that utilizes versioning of stored objects.\n\n It requires users to specify a base directory where all the step outputs will be stored in. It\n serializes and deserializes output values (assets) using pickling and automatically constructs\n the filepaths for the assets using the provided directory, and the version for a provided step\n output.\n """\n return VersionedPickledObjectFilesystemIOManager(\n base_dir=init_context.resource_config.get(\n "base_dir", os.path.join(init_context.instance.storage_directory(), "versioned_outputs")\n )\n )\n
", "current_page_name": "_modules/dagster/_core/storage/memoizable_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.memoizable_io_manager"}, "noop_compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.noop_compute_log_manager

\nfrom contextlib import contextmanager\nfrom typing import IO, Any, Generator, Mapping, Optional, Sequence\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._core.storage.captured_log_manager import (\n    CapturedLogContext,\n    CapturedLogData,\n    CapturedLogManager,\n    CapturedLogMetadata,\n    CapturedLogSubscription,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\nfrom .compute_log_manager import (\n    MAX_BYTES_FILE_READ,\n    ComputeIOType,\n    ComputeLogFileData,\n    ComputeLogManager,\n)\n\n\n
[docs]class NoOpComputeLogManager(CapturedLogManager, ComputeLogManager, ConfigurableClass):\n """When enabled for a Dagster instance, stdout and stderr will not be available for any step."""\n\n def __init__(self, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {}\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return NoOpComputeLogManager(inst_data=inst_data, **config_value)\n\n def enabled(self, _dagster_run, _step_key):\n return False\n\n def _watch_logs(self, dagster_run, step_key=None):\n pass\n\n def get_local_path(self, run_id: str, key: str, io_type: ComputeIOType) -> str:\n raise NotImplementedError()\n\n def is_watch_completed(self, run_id, key):\n return True\n\n def on_watch_start(self, dagster_run, step_key):\n pass\n\n def on_watch_finish(self, dagster_run, step_key):\n pass\n\n def download_url(self, run_id, key, io_type):\n return None\n\n def read_logs_file(self, run_id, key, io_type, cursor=0, max_bytes=MAX_BYTES_FILE_READ):\n return ComputeLogFileData(\n path=f"{key}.{io_type}", data=None, cursor=0, size=0, download_url=None\n )\n\n def on_subscribe(self, subscription):\n pass\n\n def on_unsubscribe(self, subscription):\n pass\n\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Generator[CapturedLogContext, None, None]:\n yield CapturedLogContext(log_key=log_key)\n\n def is_capture_complete(self, log_key: Sequence[str]):\n return True\n\n @contextmanager\n def open_log_stream(\n self, log_key: Sequence[str], io_type: ComputeIOType\n ) -> Generator[Optional[IO], None, None]:\n yield None\n\n def get_log_data(\n self,\n log_key: Sequence[str],\n cursor: Optional[str] = None,\n max_bytes: Optional[int] = None,\n ) -> CapturedLogData:\n return CapturedLogData(log_key=log_key)\n\n def get_log_metadata(self, log_key: Sequence[str]) -> CapturedLogMetadata:\n return CapturedLogMetadata()\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n pass\n\n def subscribe(\n self, log_key: Sequence[str], cursor: Optional[str] = None\n ) -> CapturedLogSubscription:\n return CapturedLogSubscription(self, log_key, cursor)\n\n def unsubscribe(self, subscription: CapturedLogSubscription):\n pass
\n
", "current_page_name": "_modules/dagster/_core/storage/noop_compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.noop_compute_log_manager"}, "root": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.root

\nimport os\nfrom tempfile import TemporaryDirectory\nfrom typing import Optional\n\nfrom typing_extensions import TypedDict\n\nfrom dagster import (\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\n\n\nclass LocalArtifactStorageConfig(TypedDict):\n    base_dir: str\n\n\n
[docs]class LocalArtifactStorage(ConfigurableClass):\n def __init__(self, base_dir: str, inst_data: Optional[ConfigurableClassData] = None):\n self._base_dir = base_dir\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @property\n def base_dir(self) -> str:\n return self._base_dir\n\n def file_manager_dir(self, run_id: str) -> str:\n check.str_param(run_id, "run_id")\n return os.path.join(self.base_dir, "storage", run_id, "files")\n\n @property\n def storage_dir(self) -> str:\n return os.path.join(self.base_dir, "storage")\n\n @property\n def schedules_dir(self) -> str:\n return os.path.join(self.base_dir, "schedules")\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: LocalArtifactStorageConfig\n ) -> "LocalArtifactStorage":\n return LocalArtifactStorage(inst_data=inst_data, **config_value)\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n def dispose(self):\n pass
\n\n\nclass TemporaryLocalArtifactStorage(LocalArtifactStorage):\n """Used by ephemeral DagsterInstances, defers directory creation til\n access since many uses of ephemeral instance do not require artifact directory.\n """\n\n def __init__(self):\n self._temp_dir = None\n\n @property\n def base_dir(self):\n if self._temp_dir is None:\n self._temp_dir = TemporaryDirectory()\n return self._temp_dir.name\n\n def dispose(self):\n if self._temp_dir:\n self._temp_dir.cleanup()\n
", "current_page_name": "_modules/dagster/_core/storage/root", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.root"}, "runs": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.runs.base

\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Mapping, Optional, Sequence, Set, Tuple, Union\n\nfrom typing_extensions import TypedDict\n\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.snap import ExecutionPlanSnapshot, JobSnapshot\nfrom dagster._core.storage.dagster_run import (\n    DagsterRun,\n    JobBucket,\n    RunPartitionData,\n    RunRecord,\n    RunsFilter,\n    TagBucket,\n)\nfrom dagster._core.storage.sql import AlembicVersion\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._utils import PrintFn\n\nfrom ..daemon_cursor import DaemonCursorStorage\n\nif TYPE_CHECKING:\n    from dagster._core.host_representation.origin import ExternalJobOrigin\n\n\nclass RunGroupInfo(TypedDict):\n    count: int\n    runs: Sequence[DagsterRun]\n\n\n
[docs]class RunStorage(ABC, MayHaveInstanceWeakref[T_DagsterInstance], DaemonCursorStorage):\n """Abstract base class for storing pipeline run history.\n\n Note that run storages using SQL databases as backing stores should implement\n :py:class:`~dagster._core.storage.runs.SqlRunStorage`.\n\n Users should not directly instantiate concrete subclasses of this class; they are instantiated\n by internal machinery when ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the\n ``dagster.yaml`` file in ``$DAGSTER_HOME``. Configuration of concrete subclasses of this class\n should be done by setting values in that file.\n """\n\n @abstractmethod\n def add_run(self, dagster_run: DagsterRun) -> DagsterRun:\n """Add a run to storage.\n\n If a run already exists with the same ID, raise DagsterRunAlreadyExists\n If the run's snapshot ID does not exist raise DagsterSnapshotDoesNotExist\n\n Args:\n dagster_run (DagsterRun): The run to add.\n """\n\n @abstractmethod\n def handle_run_event(self, run_id: str, event: DagsterEvent) -> None:\n """Update run storage in accordance to a pipeline run related DagsterEvent.\n\n Args:\n run_id (str)\n event (DagsterEvent)\n """\n\n @abstractmethod\n def get_runs(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[DagsterRun]:\n """Return all the runs present in the storage that match the given filters.\n\n Args:\n filters (Optional[RunsFilter]) -- The\n :py:class:`~dagster._core.storage.pipeline_run.RunsFilter` by which to filter\n runs\n cursor (Optional[str]): Starting cursor (run_id) of range of runs\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n\n Returns:\n List[PipelineRun]\n """\n\n @abstractmethod\n def get_run_ids(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[str]:\n """Return all the run IDs for runs present in the storage that match the given filters.\n\n Args:\n filters (Optional[RunsFilter]) -- The\n :py:class:`~dagster._core.storage.pipeline_run.RunsFilter` by which to filter\n runs\n cursor (Optional[str]): Starting cursor (run_id) of range of runs\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n\n Returns:\n Sequence[str]\n """\n\n @abstractmethod\n def get_runs_count(self, filters: Optional[RunsFilter] = None) -> int:\n """Return the number of runs present in the storage that match the given filters.\n\n Args:\n filters (Optional[RunsFilter]) -- The\n :py:class:`~dagster._core.storage.pipeline_run.PipelineRunFilter` by which to filter\n runs\n\n Returns:\n int: The number of runs that match the given filters.\n """\n\n @abstractmethod\n def get_run_group(self, run_id: str) -> Optional[Tuple[str, Sequence[DagsterRun]]]:\n """Get the run group to which a given run belongs.\n\n Args:\n run_id (str): If the corresponding run is the descendant of some root run (i.e., there\n is a root_run_id on the :py:class:`PipelineRun`), that root run and all of its\n descendants are returned; otherwise, the group will consist only of the given run\n (a run that does not descend from any root is its own root).\n\n Returns:\n Optional[Tuple[string, List[PipelineRun]]]: If there is a corresponding run group, tuple\n whose first element is the root_run_id and whose second element is a list of all the\n descendent runs. Otherwise `None`.\n """\n\n @abstractmethod\n def get_run_records(\n self,\n filters: Optional[RunsFilter] = None,\n limit: Optional[int] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n cursor: Optional[str] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[RunRecord]:\n """Return a list of run records stored in the run storage, sorted by the given column in given order.\n\n Args:\n filters (Optional[RunsFilter]): the filter by which to filter runs.\n limit (Optional[int]): Number of results to get. Defaults to infinite.\n order_by (Optional[str]): Name of the column to sort by. Defaults to id.\n ascending (Optional[bool]): Sort the result in ascending order if True, descending\n otherwise. Defaults to descending.\n\n Returns:\n List[RunRecord]: List of run records stored in the run storage.\n """\n\n @abstractmethod\n def get_run_tags(\n self,\n tag_keys: Optional[Sequence[str]] = None,\n value_prefix: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[Tuple[str, Set[str]]]:\n """Get a list of tag keys and the values that have been associated with them.\n\n Args:\n tag_keys (Optional[Sequence[str]]): tag keys to filter by.\n\n Returns:\n List[Tuple[str, Set[str]]]\n """\n\n @abstractmethod\n def get_run_tag_keys(self) -> Sequence[str]:\n """Get a list of tag keys.\n\n Returns:\n List[str]\n """\n\n @abstractmethod\n def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]) -> None:\n """Add additional tags for a pipeline run.\n\n Args:\n run_id (str)\n new_tags (Dict[string, string])\n """\n\n @abstractmethod\n def has_run(self, run_id: str) -> bool:\n """Check if the storage contains a run.\n\n Args:\n run_id (str): The id of the run\n\n Returns:\n bool\n """\n\n def add_snapshot(\n self,\n snapshot: Union[JobSnapshot, ExecutionPlanSnapshot],\n snapshot_id: Optional[str] = None,\n ) -> None:\n """Add a snapshot to the storage.\n\n Args:\n snapshot (Union[PipelineSnapshot, ExecutionPlanSnapshot])\n snapshot_id (Optional[str]): [Internal] The id of the snapshot. If not provided, the\n snapshot id will be generated from a hash of the snapshot. This should only be used\n in debugging, where we might want to import a historical run whose snapshots were\n calculated using a different hash function than the current code.\n """\n if isinstance(snapshot, JobSnapshot):\n self.add_job_snapshot(snapshot, snapshot_id)\n else:\n self.add_execution_plan_snapshot(snapshot, snapshot_id)\n\n def has_snapshot(self, snapshot_id: str):\n return self.has_job_snapshot(snapshot_id) or self.has_execution_plan_snapshot(snapshot_id)\n\n @abstractmethod\n def has_job_snapshot(self, job_snapshot_id: str) -> bool:\n """Check to see if storage contains a pipeline snapshot.\n\n Args:\n pipeline_snapshot_id (str): The id of the run.\n\n Returns:\n bool\n """\n\n @abstractmethod\n def add_job_snapshot(self, job_snapshot: JobSnapshot, snapshot_id: Optional[str] = None) -> str:\n """Add a pipeline snapshot to the run store.\n\n Pipeline snapshots are content-addressable, meaning\n that the ID for a snapshot is a hash based on the\n body of the snapshot. This function returns\n that snapshot ID.\n\n Args:\n job_snapshot (PipelineSnapshot)\n snapshot_id (Optional[str]): [Internal] The id of the snapshot. If not provided, the\n snapshot id will be generated from a hash of the snapshot. This should only be used\n in debugging, where we might want to import a historical run whose snapshots were\n calculated using a different hash function than the current code.\n\n Return:\n str: The job_snapshot_id\n """\n\n @abstractmethod\n def get_job_snapshot(self, job_snapshot_id: str) -> JobSnapshot:\n """Fetch a snapshot by ID.\n\n Args:\n job_snapshot_id (str)\n\n Returns:\n PipelineSnapshot\n """\n\n @abstractmethod\n def has_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> bool:\n """Check to see if storage contains an execution plan snapshot.\n\n Args:\n execution_plan_snapshot_id (str): The id of the execution plan.\n\n Returns:\n bool\n """\n\n @abstractmethod\n def add_execution_plan_snapshot(\n self, execution_plan_snapshot: ExecutionPlanSnapshot, snapshot_id: Optional[str] = None\n ) -> str:\n """Add an execution plan snapshot to the run store.\n\n Execution plan snapshots are content-addressable, meaning\n that the ID for a snapshot is a hash based on the\n body of the snapshot. This function returns\n that snapshot ID.\n\n Args:\n execution_plan_snapshot (ExecutionPlanSnapshot)\n snapshot_id (Optional[str]): [Internal] The id of the snapshot. If not provided, the\n snapshot id will be generated from a hash of the snapshot. This should only be used\n in debugging, where we might want to import a historical run whose snapshots were\n calculated using a different hash function than the current code.\n\n Return:\n str: The execution_plan_snapshot_id\n """\n\n @abstractmethod\n def get_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> ExecutionPlanSnapshot:\n """Fetch a snapshot by ID.\n\n Args:\n execution_plan_snapshot_id (str)\n\n Returns:\n ExecutionPlanSnapshot\n """\n\n @abstractmethod\n def wipe(self) -> None:\n """Clears the run storage."""\n\n @abstractmethod\n def delete_run(self, run_id: str) -> None:\n """Remove a run from storage."""\n\n @property\n def supports_bucket_queries(self) -> bool:\n return False\n\n @abstractmethod\n def get_run_partition_data(self, runs_filter: RunsFilter) -> Sequence[RunPartitionData]:\n """Get run partition data for a given partitioned job."""\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any required data migrations."""\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any optional data migrations for optimized reads."""\n\n def dispose(self) -> None:\n """Explicit lifecycle management."""\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n """Allows for optimizing database connection / use in the context of a long lived webserver process."""\n\n # Daemon Heartbeat Storage\n #\n # Holds heartbeats from the Dagster Daemon so that other system components can alert when it's not\n # alive.\n # This is temporarily placed along with run storage to avoid adding a new instance concept. It\n # should be split out once all metadata storages are configured together.\n\n @abstractmethod\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n """Called on a regular interval by the daemon."""\n\n @abstractmethod\n def get_daemon_heartbeats(self) -> Mapping[str, DaemonHeartbeat]:\n """Latest heartbeats of all daemon types."""\n\n @abstractmethod\n def wipe_daemon_heartbeats(self) -> None:\n """Wipe all daemon heartbeats."""\n\n # Backfill storage\n @abstractmethod\n def get_backfills(\n self,\n status: Optional[BulkActionStatus] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[PartitionBackfill]:\n """Get a list of partition backfills."""\n\n @abstractmethod\n def get_backfill(self, backfill_id: str) -> Optional[PartitionBackfill]:\n """Get the partition backfill of the given backfill id."""\n\n @abstractmethod\n def add_backfill(self, partition_backfill: PartitionBackfill):\n """Add partition backfill to run storage."""\n\n @abstractmethod\n def update_backfill(self, partition_backfill: PartitionBackfill):\n """Update a partition backfill in run storage."""\n\n def alembic_version(self) -> Optional[AlembicVersion]:\n return None\n\n @abstractmethod\n def replace_job_origin(self, run: "DagsterRun", job_origin: "ExternalJobOrigin") -> None: ...
\n
", "current_page_name": "_modules/dagster/_core/storage/runs/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.runs.base"}, "sql_run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.runs.sql_run_storage

\nimport logging\nimport uuid\nimport zlib\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n    Any,\n    Callable,\n    ContextManager,\n    Dict,\n    Iterable,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection\n\nimport dagster._check as check\nfrom dagster._core.errors import (\n    DagsterInvariantViolationError,\n    DagsterRunAlreadyExists,\n    DagsterRunNotFoundError,\n    DagsterSnapshotDoesNotExist,\n)\nfrom dagster._core.events import EVENT_TYPE_TO_PIPELINE_RUN_STATUS, DagsterEvent, DagsterEventType\nfrom dagster._core.execution.backfill import BulkActionStatus, PartitionBackfill\nfrom dagster._core.host_representation.origin import ExternalJobOrigin\nfrom dagster._core.snap import (\n    ExecutionPlanSnapshot,\n    JobSnapshot,\n    create_execution_plan_snapshot_id,\n    create_job_snapshot_id,\n)\nfrom dagster._core.storage.sql import SqlAlchemyQuery\nfrom dagster._core.storage.sqlalchemy_compat import (\n    db_fetch_mappings,\n    db_scalar_subquery,\n    db_select,\n    db_subquery,\n)\nfrom dagster._core.storage.tags import (\n    PARTITION_NAME_TAG,\n    PARTITION_SET_TAG,\n    REPOSITORY_LABEL_TAG,\n    ROOT_RUN_ID_TAG,\n)\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._serdes import (\n    deserialize_value,\n    serialize_value,\n)\nfrom dagster._seven import JSONDecodeError\nfrom dagster._utils import PrintFn, utc_datetime_from_timestamp\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..dagster_run import (\n    DagsterRun,\n    DagsterRunStatus,\n    JobBucket,\n    RunPartitionData,\n    RunRecord,\n    RunsFilter,\n    TagBucket,\n)\nfrom .base import RunStorage\nfrom .migration import (\n    OPTIONAL_DATA_MIGRATIONS,\n    REQUIRED_DATA_MIGRATIONS,\n    RUN_PARTITIONS,\n    MigrationFn,\n)\nfrom .schema import (\n    BulkActionsTable,\n    DaemonHeartbeatsTable,\n    InstanceInfo,\n    KeyValueStoreTable,\n    RunsTable,\n    RunTagsTable,\n    SecondaryIndexMigrationTable,\n    SnapshotsTable,\n)\n\n\nclass SnapshotType(Enum):\n    PIPELINE = "PIPELINE"\n    EXECUTION_PLAN = "EXECUTION_PLAN"\n\n\n
[docs]class SqlRunStorage(RunStorage):\n """Base class for SQL based run storages."""\n\n @abstractmethod\n def connect(self) -> ContextManager[Connection]:\n """Context manager yielding a sqlalchemy.engine.Connection."""\n\n @abstractmethod\n def upgrade(self) -> None:\n """This method should perform any schema or data migrations necessary to bring an\n out-of-date instance of the storage up to date.\n """\n\n def fetchall(self, query: SqlAlchemyQuery) -> Sequence[Any]:\n with self.connect() as conn:\n return db_fetch_mappings(conn, query)\n\n def fetchone(self, query: SqlAlchemyQuery) -> Optional[Any]:\n with self.connect() as conn:\n if db.__version__.startswith("2."):\n return conn.execute(query).mappings().first()\n else:\n return conn.execute(query).fetchone()\n\n def add_run(self, dagster_run: DagsterRun) -> DagsterRun:\n check.inst_param(dagster_run, "dagster_run", DagsterRun)\n\n if dagster_run.job_snapshot_id and not self.has_job_snapshot(dagster_run.job_snapshot_id):\n raise DagsterSnapshotDoesNotExist(\n f"Snapshot {dagster_run.job_snapshot_id} does not exist in run storage"\n )\n\n has_tags = dagster_run.tags and len(dagster_run.tags) > 0\n partition = dagster_run.tags.get(PARTITION_NAME_TAG) if has_tags else None\n partition_set = dagster_run.tags.get(PARTITION_SET_TAG) if has_tags else None\n\n runs_insert = RunsTable.insert().values(\n run_id=dagster_run.run_id,\n pipeline_name=dagster_run.job_name,\n status=dagster_run.status.value,\n run_body=serialize_value(dagster_run),\n snapshot_id=dagster_run.job_snapshot_id,\n partition=partition,\n partition_set=partition_set,\n )\n with self.connect() as conn:\n try:\n conn.execute(runs_insert)\n except db_exc.IntegrityError as exc:\n raise DagsterRunAlreadyExists from exc\n\n tags_to_insert = dagster_run.tags_for_storage()\n if tags_to_insert:\n conn.execute(\n RunTagsTable.insert(),\n [\n dict(run_id=dagster_run.run_id, key=k, value=v)\n for k, v in tags_to_insert.items()\n ],\n )\n\n return dagster_run\n\n def handle_run_event(self, run_id: str, event: DagsterEvent) -> None:\n check.str_param(run_id, "run_id")\n check.inst_param(event, "event", DagsterEvent)\n\n if event.event_type not in EVENT_TYPE_TO_PIPELINE_RUN_STATUS:\n return\n\n run = self._get_run_by_id(run_id)\n if not run:\n # TODO log?\n return\n\n new_job_status = EVENT_TYPE_TO_PIPELINE_RUN_STATUS[event.event_type]\n\n run_stats_cols_in_index = self.has_run_stats_index_cols()\n\n kwargs = {}\n\n # consider changing the `handle_run_event` signature to get timestamp off of the\n # EventLogEntry instead of the DagsterEvent, for consistency\n now = pendulum.now("UTC")\n\n if run_stats_cols_in_index and event.event_type == DagsterEventType.PIPELINE_START:\n kwargs["start_time"] = now.timestamp()\n\n if run_stats_cols_in_index and event.event_type in {\n DagsterEventType.PIPELINE_CANCELED,\n DagsterEventType.PIPELINE_FAILURE,\n DagsterEventType.PIPELINE_SUCCESS,\n }:\n kwargs["end_time"] = now.timestamp()\n\n with self.connect() as conn:\n conn.execute(\n RunsTable.update()\n .where(RunsTable.c.run_id == run_id)\n .values(\n run_body=serialize_value(run.with_status(new_job_status)),\n status=new_job_status.value,\n update_timestamp=now,\n **kwargs,\n )\n )\n\n def _row_to_run(self, row: Dict) -> DagsterRun:\n run = deserialize_value(row["run_body"], DagsterRun)\n status = DagsterRunStatus(row["status"])\n # NOTE: the status column is more trustworthy than the status in the run body, since concurrent\n # writes (e.g. handle_run_event and add_tags) can cause the status in the body to be out of\n # overriden with an old value.\n return run.with_status(status)\n\n def _rows_to_runs(self, rows: Iterable[Dict]) -> Sequence[DagsterRun]:\n return list(map(self._row_to_run, rows))\n\n def _add_cursor_limit_to_query(\n self,\n query: SqlAlchemyQuery,\n cursor: Optional[str],\n limit: Optional[int],\n order_by: Optional[str],\n ascending: Optional[bool],\n ) -> SqlAlchemyQuery:\n """Helper function to deal with cursor/limit pagination args."""\n if cursor:\n cursor_query = db_select([RunsTable.c.id]).where(RunsTable.c.run_id == cursor)\n query = query.where(RunsTable.c.id < db_scalar_subquery(cursor_query))\n\n if limit:\n query = query.limit(limit)\n\n sorting_column = getattr(RunsTable.c, order_by) if order_by else RunsTable.c.id\n direction = db.asc if ascending else db.desc\n query = query.order_by(direction(sorting_column))\n\n return query\n\n @property\n def supports_intersect(self) -> bool:\n return True\n\n def _add_filters_to_query(self, query: SqlAlchemyQuery, filters: RunsFilter) -> SqlAlchemyQuery:\n check.inst_param(filters, "filters", RunsFilter)\n\n if filters.run_ids:\n query = query.where(RunsTable.c.run_id.in_(filters.run_ids))\n\n if filters.job_name:\n query = query.where(RunsTable.c.pipeline_name == filters.job_name)\n\n if filters.statuses:\n query = query.where(\n RunsTable.c.status.in_([status.value for status in filters.statuses])\n )\n\n if filters.snapshot_id:\n query = query.where(RunsTable.c.snapshot_id == filters.snapshot_id)\n\n if filters.updated_after:\n query = query.where(RunsTable.c.update_timestamp > filters.updated_after)\n\n if filters.updated_before:\n query = query.where(RunsTable.c.update_timestamp < filters.updated_before)\n\n if filters.created_after:\n query = query.where(RunsTable.c.create_timestamp > filters.created_after)\n\n if filters.created_before:\n query = query.where(RunsTable.c.create_timestamp < filters.created_before)\n\n return query\n\n def _runs_query(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n columns: Optional[Sequence[str]] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> SqlAlchemyQuery:\n filters = check.opt_inst_param(filters, "filters", RunsFilter, default=RunsFilter())\n check.opt_str_param(cursor, "cursor")\n check.opt_int_param(limit, "limit")\n check.opt_sequence_param(columns, "columns")\n check.opt_str_param(order_by, "order_by")\n check.opt_bool_param(ascending, "ascending")\n\n if columns is None:\n columns = ["run_body", "status"]\n\n if filters.tags:\n table = self._apply_tags_table_joins(RunsTable, filters.tags)\n else:\n table = RunsTable\n\n base_query = db_select([getattr(RunsTable.c, column) for column in columns]).select_from(\n table\n )\n base_query = self._add_filters_to_query(base_query, filters)\n return self._add_cursor_limit_to_query(base_query, cursor, limit, order_by, ascending)\n\n def _apply_tags_table_joins(\n self,\n table: db.Table,\n tags: Mapping[str, Union[str, Sequence[str]]],\n ) -> db.Table:\n multi_join = len(tags) > 1\n i = 0\n for key, value in tags.items():\n i += 1\n tags_table = (\n db_subquery(db_select([RunTagsTable]), f"run_tags_subquery_{i}")\n if multi_join\n else RunTagsTable\n )\n table = table.join(\n tags_table,\n db.and_(\n RunsTable.c.run_id == tags_table.c.run_id,\n tags_table.c.key == key,\n (\n tags_table.c.value == value\n if isinstance(value, str)\n else tags_table.c.value.in_(value)\n ),\n ),\n )\n return table\n\n def get_runs(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[DagsterRun]:\n query = self._runs_query(filters, cursor, limit, bucket_by=bucket_by)\n rows = self.fetchall(query)\n return self._rows_to_runs(rows)\n\n def get_run_ids(\n self,\n filters: Optional[RunsFilter] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[str]:\n query = self._runs_query(filters=filters, cursor=cursor, limit=limit, columns=["run_id"])\n rows = self.fetchall(query)\n return [row["run_id"] for row in rows]\n\n def get_runs_count(self, filters: Optional[RunsFilter] = None) -> int:\n subquery = db_subquery(self._runs_query(filters=filters))\n query = db_select([db.func.count().label("count")]).select_from(subquery)\n row = self.fetchone(query)\n count = row["count"] if row else 0\n return count\n\n def _get_run_by_id(self, run_id: str) -> Optional[DagsterRun]:\n check.str_param(run_id, "run_id")\n\n query = db_select([RunsTable.c.run_body, RunsTable.c.status]).where(\n RunsTable.c.run_id == run_id\n )\n rows = self.fetchall(query)\n return self._row_to_run(rows[0]) if rows else None\n\n def get_run_records(\n self,\n filters: Optional[RunsFilter] = None,\n limit: Optional[int] = None,\n order_by: Optional[str] = None,\n ascending: bool = False,\n cursor: Optional[str] = None,\n bucket_by: Optional[Union[JobBucket, TagBucket]] = None,\n ) -> Sequence[RunRecord]:\n filters = check.opt_inst_param(filters, "filters", RunsFilter, default=RunsFilter())\n check.opt_int_param(limit, "limit")\n\n columns = ["id", "run_body", "status", "create_timestamp", "update_timestamp"]\n\n if self.has_run_stats_index_cols():\n columns += ["start_time", "end_time"]\n # only fetch columns we use to build RunRecord\n query = self._runs_query(\n filters=filters,\n limit=limit,\n columns=columns,\n order_by=order_by,\n ascending=ascending,\n cursor=cursor,\n bucket_by=bucket_by,\n )\n\n rows = self.fetchall(query)\n return [\n RunRecord(\n storage_id=check.int_param(row["id"], "id"),\n dagster_run=self._row_to_run(row),\n create_timestamp=check.inst(row["create_timestamp"], datetime),\n update_timestamp=check.inst(row["update_timestamp"], datetime),\n start_time=(\n check.opt_inst(row["start_time"], float) if "start_time" in row else None\n ),\n end_time=check.opt_inst(row["end_time"], float) if "end_time" in row else None,\n )\n for row in rows\n ]\n\n def get_run_tags(\n self,\n tag_keys: Optional[Sequence[str]] = None,\n value_prefix: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[Tuple[str, Set[str]]]:\n result = defaultdict(set)\n query = (\n db_select([RunTagsTable.c.key, RunTagsTable.c.value])\n .distinct()\n .order_by(RunTagsTable.c.key, RunTagsTable.c.value)\n )\n if tag_keys:\n query = query.where(RunTagsTable.c.key.in_(tag_keys))\n if value_prefix:\n query = query.where(RunTagsTable.c.value.startswith(value_prefix))\n if limit:\n query = query.limit(limit)\n rows = self.fetchall(query)\n for r in rows:\n result[r["key"]].add(r["value"])\n return sorted(list([(k, v) for k, v in result.items()]), key=lambda x: x[0])\n\n def get_run_tag_keys(self) -> Sequence[str]:\n query = db_select([RunTagsTable.c.key]).distinct().order_by(RunTagsTable.c.key)\n rows = self.fetchall(query)\n return sorted([r["key"] for r in rows])\n\n def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]) -> None:\n check.str_param(run_id, "run_id")\n check.mapping_param(new_tags, "new_tags", key_type=str, value_type=str)\n\n run = self._get_run_by_id(run_id)\n if not run:\n raise DagsterRunNotFoundError(\n f"Run {run_id} was not found in instance.", invalid_run_id=run_id\n )\n current_tags = run.tags if run.tags else {}\n\n all_tags = merge_dicts(current_tags, new_tags)\n partition = all_tags.get(PARTITION_NAME_TAG)\n partition_set = all_tags.get(PARTITION_SET_TAG)\n\n with self.connect() as conn:\n conn.execute(\n RunsTable.update()\n .where(RunsTable.c.run_id == run_id)\n .values(\n run_body=serialize_value(run.with_tags(merge_dicts(current_tags, new_tags))),\n partition=partition,\n partition_set=partition_set,\n update_timestamp=pendulum.now("UTC"),\n )\n )\n\n current_tags_set = set(current_tags.keys())\n new_tags_set = set(new_tags.keys())\n\n existing_tags = current_tags_set & new_tags_set\n added_tags = new_tags_set.difference(existing_tags)\n\n for tag in existing_tags:\n conn.execute(\n RunTagsTable.update()\n .where(db.and_(RunTagsTable.c.run_id == run_id, RunTagsTable.c.key == tag))\n .values(value=new_tags[tag])\n )\n\n if added_tags:\n conn.execute(\n RunTagsTable.insert(),\n [dict(run_id=run_id, key=tag, value=new_tags[tag]) for tag in added_tags],\n )\n\n def get_run_group(self, run_id: str) -> Tuple[str, Sequence[DagsterRun]]:\n check.str_param(run_id, "run_id")\n dagster_run = self._get_run_by_id(run_id)\n if not dagster_run:\n raise DagsterRunNotFoundError(\n f"Run {run_id} was not found in instance.", invalid_run_id=run_id\n )\n\n # find root_run\n root_run_id = dagster_run.root_run_id if dagster_run.root_run_id else dagster_run.run_id\n root_run = self._get_run_by_id(root_run_id)\n if not root_run:\n raise DagsterRunNotFoundError(\n f"Run id {root_run_id} set as root run id for run {run_id} was not found in"\n " instance.",\n invalid_run_id=root_run_id,\n )\n\n # root_run_id to run_id 1:1 mapping\n # https://github.com/dagster-io/dagster/issues/2495\n # Note: we currently use tags to persist the run group info\n root_to_run = db_subquery(\n db_select(\n [RunTagsTable.c.value.label("root_run_id"), RunTagsTable.c.run_id.label("run_id")]\n ).where(\n db.and_(RunTagsTable.c.key == ROOT_RUN_ID_TAG, RunTagsTable.c.value == root_run_id)\n ),\n "root_to_run",\n )\n # get run group\n run_group_query = db_select([RunsTable.c.run_body, RunsTable.c.status]).select_from(\n root_to_run.join(\n RunsTable,\n root_to_run.c.run_id == RunsTable.c.run_id,\n isouter=True,\n )\n )\n\n res = self.fetchall(run_group_query)\n run_group = self._rows_to_runs(res)\n\n return (root_run_id, [root_run, *run_group])\n\n def has_run(self, run_id: str) -> bool:\n check.str_param(run_id, "run_id")\n return bool(self._get_run_by_id(run_id))\n\n def delete_run(self, run_id: str) -> None:\n check.str_param(run_id, "run_id")\n query = db.delete(RunsTable).where(RunsTable.c.run_id == run_id)\n with self.connect() as conn:\n conn.execute(query)\n\n def has_job_snapshot(self, job_snapshot_id: str) -> bool:\n check.str_param(job_snapshot_id, "job_snapshot_id")\n return self._has_snapshot_id(job_snapshot_id)\n\n def add_job_snapshot(self, job_snapshot: JobSnapshot, snapshot_id: Optional[str] = None) -> str:\n check.inst_param(job_snapshot, "job_snapshot", JobSnapshot)\n check.opt_str_param(snapshot_id, "snapshot_id")\n\n if not snapshot_id:\n snapshot_id = create_job_snapshot_id(job_snapshot)\n\n return self._add_snapshot(\n snapshot_id=snapshot_id,\n snapshot_obj=job_snapshot,\n snapshot_type=SnapshotType.PIPELINE,\n )\n\n def get_job_snapshot(self, job_snapshot_id: str) -> JobSnapshot:\n check.str_param(job_snapshot_id, "job_snapshot_id")\n return self._get_snapshot(job_snapshot_id) # type: ignore # (allowed to return None?)\n\n def has_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> bool:\n check.str_param(execution_plan_snapshot_id, "execution_plan_snapshot_id")\n return bool(self.get_execution_plan_snapshot(execution_plan_snapshot_id))\n\n def add_execution_plan_snapshot(\n self, execution_plan_snapshot: ExecutionPlanSnapshot, snapshot_id: Optional[str] = None\n ) -> str:\n check.inst_param(execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot)\n check.opt_str_param(snapshot_id, "snapshot_id")\n\n if not snapshot_id:\n snapshot_id = create_execution_plan_snapshot_id(execution_plan_snapshot)\n\n return self._add_snapshot(\n snapshot_id=snapshot_id,\n snapshot_obj=execution_plan_snapshot,\n snapshot_type=SnapshotType.EXECUTION_PLAN,\n )\n\n def get_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> ExecutionPlanSnapshot:\n check.str_param(execution_plan_snapshot_id, "execution_plan_snapshot_id")\n return self._get_snapshot(execution_plan_snapshot_id) # type: ignore # (allowed to return None?)\n\n def _add_snapshot(self, snapshot_id: str, snapshot_obj, snapshot_type: SnapshotType) -> str:\n check.str_param(snapshot_id, "snapshot_id")\n check.not_none_param(snapshot_obj, "snapshot_obj")\n check.inst_param(snapshot_type, "snapshot_type", SnapshotType)\n\n with self.connect() as conn:\n snapshot_insert = SnapshotsTable.insert().values(\n snapshot_id=snapshot_id,\n snapshot_body=zlib.compress(serialize_value(snapshot_obj).encode("utf-8")),\n snapshot_type=snapshot_type.value,\n )\n try:\n conn.execute(snapshot_insert)\n except db_exc.IntegrityError:\n # on_conflict_do_nothing equivalent\n pass\n\n return snapshot_id\n\n def get_run_storage_id(self) -> str:\n query = db_select([InstanceInfo.c.run_storage_id])\n row = self.fetchone(query)\n if not row:\n run_storage_id = str(uuid.uuid4())\n with self.connect() as conn:\n conn.execute(InstanceInfo.insert().values(run_storage_id=run_storage_id))\n return run_storage_id\n else:\n return row["run_storage_id"]\n\n def _has_snapshot_id(self, snapshot_id: str) -> bool:\n query = db_select([SnapshotsTable.c.snapshot_id]).where(\n SnapshotsTable.c.snapshot_id == snapshot_id\n )\n\n row = self.fetchone(query)\n\n return bool(row)\n\n def _get_snapshot(self, snapshot_id: str) -> Optional[JobSnapshot]:\n query = db_select([SnapshotsTable.c.snapshot_body]).where(\n SnapshotsTable.c.snapshot_id == snapshot_id\n )\n\n row = self.fetchone(query)\n\n return defensively_unpack_execution_plan_snapshot_query(logging, [row["snapshot_body"]]) if row else None # type: ignore\n\n def get_run_partition_data(self, runs_filter: RunsFilter) -> Sequence[RunPartitionData]:\n if self.has_built_index(RUN_PARTITIONS) and self.has_run_stats_index_cols():\n query = self._runs_query(\n filters=runs_filter,\n columns=["run_id", "status", "start_time", "end_time", "partition"],\n )\n rows = self.fetchall(query)\n\n # dedup by partition\n _partition_data_by_partition = {}\n for row in rows:\n if not row["partition"] or row["partition"] in _partition_data_by_partition:\n continue\n\n _partition_data_by_partition[row["partition"]] = RunPartitionData(\n run_id=row["run_id"],\n partition=row["partition"],\n status=DagsterRunStatus[row["status"]],\n start_time=row["start_time"],\n end_time=row["end_time"],\n )\n\n return list(_partition_data_by_partition.values())\n else:\n query = self._runs_query(filters=runs_filter)\n rows = self.fetchall(query)\n _partition_data_by_partition = {}\n for row in rows:\n run = self._row_to_run(row)\n partition = run.tags.get(PARTITION_NAME_TAG)\n if not partition or partition in _partition_data_by_partition:\n continue\n\n _partition_data_by_partition[partition] = RunPartitionData(\n run_id=run.run_id,\n partition=partition,\n status=run.status,\n start_time=None,\n end_time=None,\n )\n\n return list(_partition_data_by_partition.values())\n\n def _get_partition_runs(\n self, partition_set_name: str, partition_name: str\n ) -> Sequence[DagsterRun]:\n # utility method to help test reads off of the partition column\n if not self.has_built_index(RUN_PARTITIONS):\n # query by tags\n return self.get_runs(\n filters=RunsFilter(\n tags={\n PARTITION_SET_TAG: partition_set_name,\n PARTITION_NAME_TAG: partition_name,\n }\n )\n )\n else:\n query = (\n self._runs_query()\n .where(RunsTable.c.partition == partition_name)\n .where(RunsTable.c.partition_set == partition_set_name)\n )\n rows = self.fetchall(query)\n return self._rows_to_runs(rows)\n\n # Tracking data migrations over secondary indexes\n\n def _execute_data_migrations(\n self,\n migrations: Mapping[str, Callable[[], MigrationFn]],\n print_fn: Optional[PrintFn] = None,\n force_rebuild_all: bool = False,\n ) -> None:\n for migration_name, migration_fn in migrations.items():\n if self.has_built_index(migration_name):\n if not force_rebuild_all:\n if print_fn:\n print_fn(f"Skipping already applied data migration: {migration_name}")\n continue\n if print_fn:\n print_fn(f"Starting data migration: {migration_name}")\n migration_fn()(self, print_fn)\n self.mark_index_built(migration_name)\n if print_fn:\n print_fn(f"Finished data migration: {migration_name}")\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(REQUIRED_DATA_MIGRATIONS, print_fn, force_rebuild_all)\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(OPTIONAL_DATA_MIGRATIONS, print_fn, force_rebuild_all)\n\n def has_built_index(self, migration_name: str) -> bool:\n query = (\n db_select([1])\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711\n .limit(1)\n )\n results = self.fetchall(query)\n\n return len(results) > 0\n\n def mark_index_built(self, migration_name: str) -> None:\n query = SecondaryIndexMigrationTable.insert().values(\n name=migration_name,\n migration_completed=datetime.now(),\n )\n with self.connect() as conn:\n try:\n conn.execute(query)\n except db_exc.IntegrityError:\n conn.execute(\n SecondaryIndexMigrationTable.update()\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .values(migration_completed=datetime.now())\n )\n\n # Checking for migrations\n\n def has_run_stats_index_cols(self) -> bool:\n with self.connect() as conn:\n column_names = [x.get("name") for x in db.inspect(conn).get_columns(RunsTable.name)]\n return "start_time" in column_names and "end_time" in column_names\n\n def has_bulk_actions_selector_cols(self) -> bool:\n with self.connect() as conn:\n column_names = [\n x.get("name") for x in db.inspect(conn).get_columns(BulkActionsTable.name)\n ]\n return "selector_id" in column_names\n\n # Daemon heartbeats\n\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n with self.connect() as conn:\n # insert, or update if already present\n try:\n conn.execute(\n DaemonHeartbeatsTable.insert().values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_type=daemon_heartbeat.daemon_type,\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n )\n except db_exc.IntegrityError:\n conn.execute(\n DaemonHeartbeatsTable.update()\n .where(DaemonHeartbeatsTable.c.daemon_type == daemon_heartbeat.daemon_type)\n .values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n )\n\n def get_daemon_heartbeats(self) -> Mapping[str, DaemonHeartbeat]:\n rows = self.fetchall(db_select([DaemonHeartbeatsTable.c.body]))\n heartbeats = []\n for row in rows:\n heartbeats.append(deserialize_value(row["body"], DaemonHeartbeat))\n return {heartbeat.daemon_type: heartbeat for heartbeat in heartbeats}\n\n def wipe(self) -> None:\n """Clears the run storage."""\n with self.connect() as conn:\n # https://stackoverflow.com/a/54386260/324449\n conn.execute(RunsTable.delete())\n conn.execute(RunTagsTable.delete())\n conn.execute(SnapshotsTable.delete())\n conn.execute(DaemonHeartbeatsTable.delete())\n conn.execute(BulkActionsTable.delete())\n\n def wipe_daemon_heartbeats(self) -> None:\n with self.connect() as conn:\n # https://stackoverflow.com/a/54386260/324449\n conn.execute(DaemonHeartbeatsTable.delete())\n\n def get_backfills(\n self,\n status: Optional[BulkActionStatus] = None,\n cursor: Optional[str] = None,\n limit: Optional[int] = None,\n ) -> Sequence[PartitionBackfill]:\n check.opt_inst_param(status, "status", BulkActionStatus)\n query = db_select([BulkActionsTable.c.body])\n if status:\n query = query.where(BulkActionsTable.c.status == status.value)\n if cursor:\n cursor_query = db_select([BulkActionsTable.c.id]).where(\n BulkActionsTable.c.key == cursor\n )\n query = query.where(BulkActionsTable.c.id < cursor_query)\n if limit:\n query = query.limit(limit)\n query = query.order_by(BulkActionsTable.c.id.desc())\n rows = self.fetchall(query)\n return [deserialize_value(row["body"], PartitionBackfill) for row in rows]\n\n def get_backfill(self, backfill_id: str) -> Optional[PartitionBackfill]:\n check.str_param(backfill_id, "backfill_id")\n query = db_select([BulkActionsTable.c.body]).where(BulkActionsTable.c.key == backfill_id)\n row = self.fetchone(query)\n return deserialize_value(row["body"], PartitionBackfill) if row else None\n\n def add_backfill(self, partition_backfill: PartitionBackfill) -> None:\n check.inst_param(partition_backfill, "partition_backfill", PartitionBackfill)\n values: Dict[str, Any] = dict(\n key=partition_backfill.backfill_id,\n status=partition_backfill.status.value,\n timestamp=utc_datetime_from_timestamp(partition_backfill.backfill_timestamp),\n body=serialize_value(cast(NamedTuple, partition_backfill)),\n )\n\n if self.has_bulk_actions_selector_cols():\n values["selector_id"] = partition_backfill.selector_id\n values["action_type"] = partition_backfill.bulk_action_type.value\n\n with self.connect() as conn:\n conn.execute(BulkActionsTable.insert().values(**values))\n\n def update_backfill(self, partition_backfill: PartitionBackfill) -> None:\n check.inst_param(partition_backfill, "partition_backfill", PartitionBackfill)\n backfill_id = partition_backfill.backfill_id\n if not self.get_backfill(backfill_id):\n raise DagsterInvariantViolationError(\n f"Backfill {backfill_id} is not present in storage"\n )\n with self.connect() as conn:\n conn.execute(\n BulkActionsTable.update()\n .where(BulkActionsTable.c.key == backfill_id)\n .values(\n status=partition_backfill.status.value,\n body=serialize_value(partition_backfill),\n )\n )\n\n def get_cursor_values(self, keys: Set[str]) -> Mapping[str, str]:\n check.set_param(keys, "keys", of_type=str)\n\n rows = self.fetchall(\n db_select([KeyValueStoreTable.c.key, KeyValueStoreTable.c.value]).where(\n KeyValueStoreTable.c.key.in_(keys)\n ),\n )\n return {row["key"]: row["value"] for row in rows}\n\n def set_cursor_values(self, pairs: Mapping[str, str]) -> None:\n check.mapping_param(pairs, "pairs", key_type=str, value_type=str)\n db_values = [{"key": k, "value": v} for k, v in pairs.items()]\n\n with self.connect() as conn:\n try:\n conn.execute(KeyValueStoreTable.insert().values(db_values))\n except db_exc.IntegrityError:\n conn.execute(\n KeyValueStoreTable.update()\n .where(KeyValueStoreTable.c.key.in_(pairs.keys()))\n .values(value=db.sql.case(pairs, value=KeyValueStoreTable.c.key))\n )\n\n # Migrating run history\n def replace_job_origin(self, run: DagsterRun, job_origin: ExternalJobOrigin) -> None:\n new_label = job_origin.external_repository_origin.get_label()\n with self.connect() as conn:\n conn.execute(\n RunsTable.update()\n .where(RunsTable.c.run_id == run.run_id)\n .values(\n run_body=serialize_value(run.with_job_origin(job_origin)),\n )\n )\n conn.execute(\n RunTagsTable.update()\n .where(RunTagsTable.c.run_id == run.run_id)\n .where(RunTagsTable.c.key == REPOSITORY_LABEL_TAG)\n .values(value=new_label)\n )
\n\n\nGET_PIPELINE_SNAPSHOT_QUERY_ID = "get-pipeline-snapshot"\n\n\ndef defensively_unpack_execution_plan_snapshot_query(\n logger: logging.Logger, row: Sequence[Any]\n) -> Optional[Union[ExecutionPlanSnapshot, JobSnapshot]]:\n # minimal checking here because sqlalchemy returns a different type based on what version of\n # SqlAlchemy you are using\n\n def _warn(msg: str) -> None:\n logger.warning(f"get-pipeline-snapshot: {msg}")\n\n if not isinstance(row[0], bytes):\n _warn("First entry in row is not a binary type.")\n return None\n\n try:\n uncompressed_bytes = zlib.decompress(row[0])\n except zlib.error:\n _warn("Could not decompress bytes stored in snapshot table.")\n return None\n\n try:\n decoded_str = uncompressed_bytes.decode("utf-8")\n except UnicodeDecodeError:\n _warn("Could not unicode decode decompressed bytes stored in snapshot table.")\n return None\n\n try:\n return deserialize_value(decoded_str, (ExecutionPlanSnapshot, JobSnapshot))\n except JSONDecodeError:\n _warn("Could not parse json in snapshot table.")\n return None\n
", "current_page_name": "_modules/dagster/_core/storage/runs/sql_run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.runs.sql_run_storage"}, "sqlite": {"sqlite_run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.runs.sqlite.sqlite_run_storage

\nimport os\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Iterator, Optional\nfrom urllib.parse import urljoin, urlparse\n\nimport sqlalchemy as db\nfrom sqlalchemy.engine import Connection\nfrom sqlalchemy.pool import NullPool\nfrom typing_extensions import Self\n\nfrom dagster import (\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_downgrade,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlite import create_db_conn_string\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import InstanceInfo, RunsTable, RunStorageSqlMetadata, RunTagsTable\nfrom ..sql_run_storage import SqlRunStorage\n\nif TYPE_CHECKING:\n    from dagster._core.storage.sqlite_storage import SqliteStorageConfig\nMINIMUM_SQLITE_BUCKET_VERSION = [3, 25, 0]\n\n\n
[docs]class SqliteRunStorage(SqlRunStorage, ConfigurableClass):\n """SQLite-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n This is the default run storage when none is specified in the ``dagster.yaml``.\n\n To explicitly specify SQLite for run storage, you can add a block such as the following to your\n ``dagster.yaml``:\n\n .. code-block:: YAML\n\n run_storage:\n module: dagster._core.storage.runs\n class: SqliteRunStorage\n config:\n base_dir: /path/to/dir\n\n The ``base_dir`` param tells the run storage where on disk to store the database.\n """\n\n def __init__(self, conn_string: str, inst_data: Optional[ConfigurableClassData] = None):\n check.str_param(conn_string, "conn_string")\n self._conn_string = conn_string\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: "SqliteStorageConfig"\n ) -> "SqliteRunStorage":\n return SqliteRunStorage.from_local(inst_data=inst_data, **config_value)\n\n @classmethod\n def from_local(cls, base_dir: str, inst_data: Optional[ConfigurableClassData] = None) -> Self:\n check.str_param(base_dir, "base_dir")\n mkdir_p(base_dir)\n conn_string = create_db_conn_string(base_dir, "runs")\n engine = create_engine(conn_string, poolclass=NullPool)\n alembic_config = get_alembic_config(__file__)\n\n should_mark_indexes = False\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n if not (db_revision and head_revision):\n RunStorageSqlMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n should_mark_indexes = True\n\n table_names = db.inspect(engine).get_table_names()\n if "instance_info" not in table_names:\n InstanceInfo.create(engine)\n\n run_storage = cls(conn_string, inst_data)\n\n if should_mark_indexes:\n run_storage.migrate()\n run_storage.optimize()\n\n return run_storage\n\n @contextmanager\n def connect(self) -> Iterator[Connection]:\n engine = create_engine(self._conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n\n def _alembic_upgrade(self, rev: str = "head") -> None:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn, rev=rev)\n\n def _alembic_downgrade(self, rev: str = "head") -> None:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_downgrade(alembic_config, conn, rev=rev)\n\n def upgrade(self) -> None:\n self._check_for_version_066_migration_and_perform()\n self._alembic_upgrade()\n\n # In version 0.6.6, we changed the layout of the of the sqllite dbs on disk\n # to move from the root of DAGSTER_HOME/runs.db to DAGSTER_HOME/history/runs.bd\n # This function checks for that condition and does the move\n def _check_for_version_066_migration_and_perform(self) -> None:\n old_conn_string = "sqlite://" + urljoin(urlparse(self._conn_string).path, "../runs.db")\n path_to_old_db = urlparse(old_conn_string).path\n # sqlite URLs look like `sqlite:///foo/bar/baz on Unix/Mac` but on Windows they look like\n # `sqlite:///D:/foo/bar/baz` (or `sqlite:///D:\\foo\\bar\\baz`)\n if os.name == "nt":\n path_to_old_db = path_to_old_db.lstrip("/")\n if os.path.exists(path_to_old_db):\n old_storage = SqliteRunStorage(old_conn_string)\n old_runs = old_storage.get_runs()\n for run in old_runs:\n self.add_run(run)\n os.unlink(path_to_old_db)\n\n def delete_run(self, run_id: str) -> None:\n """Override the default sql delete run implementation until we can get full\n support on cascading deletes.\n """\n check.str_param(run_id, "run_id")\n remove_tags = db.delete(RunTagsTable).where(RunTagsTable.c.run_id == run_id)\n remove_run = db.delete(RunsTable).where(RunsTable.c.run_id == run_id)\n with self.connect() as conn:\n conn.execute(remove_tags)\n conn.execute(remove_run)\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster/_core/storage/runs/sqlite/sqlite_run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.runs.sqlite.sqlite_run_storage"}}}, "schedules": {"base": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.schedules.base

\nimport abc\nfrom typing import Mapping, Optional, Sequence, Set\n\nfrom dagster import AssetKey\nfrom dagster._core.definitions.auto_materialize_rule import AutoMaterializeAssetEvaluation\nfrom dagster._core.definitions.run_request import InstigatorType\nfrom dagster._core.instance import MayHaveInstanceWeakref, T_DagsterInstance\nfrom dagster._core.scheduler.instigation import (\n    AutoMaterializeAssetEvaluationRecord,\n    InstigatorState,\n    InstigatorStatus,\n    InstigatorTick,\n    TickData,\n    TickStatus,\n)\nfrom dagster._core.storage.sql import AlembicVersion\nfrom dagster._utils import PrintFn\n\n\n
[docs]class ScheduleStorage(abc.ABC, MayHaveInstanceWeakref[T_DagsterInstance]):\n """Abstract class for managing persistance of scheduler artifacts."""\n\n @abc.abstractmethod\n def wipe(self) -> None:\n """Delete all schedules from storage."""\n\n @abc.abstractmethod\n def all_instigator_state(\n self,\n repository_origin_id: Optional[str] = None,\n repository_selector_id: Optional[str] = None,\n instigator_type: Optional[InstigatorType] = None,\n instigator_statuses: Optional[Set[InstigatorStatus]] = None,\n ) -> Sequence[InstigatorState]:\n """Return all InstigationStates present in storage.\n\n Args:\n repository_origin_id (Optional[str]): The ExternalRepository target id to scope results to\n repository_selector_id (Optional[str]): The repository selector id to scope results to\n instigator_type (Optional[InstigatorType]): The InstigatorType to scope results to\n instigator_statuses (Optional[Set[InstigatorStatus]]): The InstigatorStatuses to scope results to\n """\n\n @abc.abstractmethod\n def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional[InstigatorState]:\n """Return the instigator state for the given id.\n\n Args:\n origin_id (str): The unique instigator identifier\n selector_id (str): The logical instigator identifier\n """\n\n @abc.abstractmethod\n def add_instigator_state(self, state: InstigatorState) -> InstigatorState:\n """Add an instigator state to storage.\n\n Args:\n state (InstigatorState): The state to add\n """\n\n @abc.abstractmethod\n def update_instigator_state(self, state: InstigatorState) -> InstigatorState:\n """Update an instigator state in storage.\n\n Args:\n state (InstigatorState): The state to update\n """\n\n @abc.abstractmethod\n def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:\n """Delete a state in storage.\n\n Args:\n origin_id (str): The id of the instigator target to delete\n selector_id (str): The logical instigator identifier\n """\n\n @property\n def supports_batch_queries(self) -> bool:\n return False\n\n def get_batch_ticks(\n self,\n selector_ids: Sequence[str],\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Mapping[str, Sequence[InstigatorTick]]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Sequence[InstigatorTick]:\n """Get the ticks for a given instigator.\n\n Args:\n origin_id (str): The id of the instigator target\n selector_id (str): The logical instigator identifier\n """\n\n @abc.abstractmethod\n def create_tick(self, tick_data: TickData) -> InstigatorTick:\n """Add a tick to storage.\n\n Args:\n tick_data (TickData): The tick to add\n """\n\n @abc.abstractmethod\n def update_tick(self, tick: InstigatorTick) -> InstigatorTick:\n """Update a tick already in storage.\n\n Args:\n tick (InstigatorTick): The tick to update\n """\n\n @abc.abstractmethod\n def purge_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: float,\n tick_statuses: Optional[Sequence[TickStatus]] = None,\n ) -> None:\n """Wipe ticks for an instigator for a certain status and timestamp.\n\n Args:\n origin_id (str): The id of the instigator target to delete\n selector_id (str): The logical instigator identifier\n before (datetime): All ticks before this datetime will get purged\n tick_statuses (Optional[List[TickStatus]]): The tick statuses to wipe\n """\n\n @property\n def supports_auto_materialize_asset_evaluations(self) -> bool:\n return True\n\n @abc.abstractmethod\n def add_auto_materialize_asset_evaluations(\n self,\n evaluation_id: int,\n asset_evaluations: Sequence[AutoMaterializeAssetEvaluation],\n ) -> None:\n """Add asset policy evaluations to storage."""\n\n @abc.abstractmethod\n def get_auto_materialize_asset_evaluations(\n self, asset_key: AssetKey, limit: int, cursor: Optional[int] = None\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n """Get the policy evaluations for a given asset.\n\n Args:\n asset_key (AssetKey): The asset key to query\n limit (Optional[int]): The maximum number of evaluations to return\n cursor (Optional[int]): The cursor to paginate from\n """\n\n @abc.abstractmethod\n def get_auto_materialize_evaluations_for_evaluation_id(\n self, evaluation_id: int\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n """Get all policy evaluations for a given evaluation ID.\n\n Args:\n evaluation_id (int): The evaluation ID to query.\n """\n\n @abc.abstractmethod\n def purge_asset_evaluations(self, before: float) -> None:\n """Wipe evaluations before a certain timestamp.\n\n Args:\n before (datetime): All evaluations before this datetime will get purged\n """\n\n @abc.abstractmethod\n def upgrade(self) -> None:\n """Perform any needed migrations."""\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any required data migrations."""\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n """Call this method to run any optional data migrations for optimized reads."""\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n """Allows for optimizing database connection / use in the context of a long lived webserver process."""\n\n def alembic_version(self) -> Optional[AlembicVersion]:\n return None\n\n def dispose(self) -> None:\n """Explicit lifecycle management."""
\n
", "current_page_name": "_modules/dagster/_core/storage/schedules/base", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.schedules.base"}, "sql_schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.schedules.sql_schedule_storage

\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import (\n    Any,\n    Callable,\n    ContextManager,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Type,\n    TypeVar,\n)\n\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.exc as db_exc\nfrom sqlalchemy.engine import Connection\n\nimport dagster._check as check\nfrom dagster._core.definitions.auto_materialize_rule import AutoMaterializeAssetEvaluation\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.definitions.run_request import InstigatorType\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.scheduler.instigation import (\n    AutoMaterializeAssetEvaluationRecord,\n    InstigatorState,\n    InstigatorStatus,\n    InstigatorTick,\n    TickData,\n    TickStatus,\n)\nfrom dagster._core.storage.sql import SqlAlchemyQuery, SqlAlchemyRow\nfrom dagster._core.storage.sqlalchemy_compat import db_fetch_mappings, db_select, db_subquery\nfrom dagster._serdes import serialize_value\nfrom dagster._serdes.serdes import deserialize_value\nfrom dagster._utils import PrintFn, utc_datetime_from_timestamp\n\nfrom .base import ScheduleStorage\nfrom .migration import (\n    OPTIONAL_SCHEDULE_DATA_MIGRATIONS,\n    REQUIRED_SCHEDULE_DATA_MIGRATIONS,\n    SCHEDULE_JOBS_SELECTOR_ID,\n    SCHEDULE_TICKS_SELECTOR_ID,\n)\nfrom .schema import (\n    AssetDaemonAssetEvaluationsTable,\n    InstigatorsTable,\n    JobTable,\n    JobTickTable,\n    SecondaryIndexMigrationTable,\n)\n\nT_NamedTuple = TypeVar("T_NamedTuple", bound=NamedTuple)\n\n\n
[docs]class SqlScheduleStorage(ScheduleStorage):\n """Base class for SQL backed schedule storage."""\n\n @abstractmethod\n def connect(self) -> ContextManager[Connection]:\n """Context manager yielding a sqlalchemy.engine.Connection."""\n\n def execute(self, query: SqlAlchemyQuery) -> Sequence[SqlAlchemyRow]:\n with self.connect() as conn:\n result_proxy = conn.execute(query)\n res = result_proxy.fetchall()\n result_proxy.close()\n\n return res\n\n def _deserialize_rows(\n self, rows: Sequence[SqlAlchemyRow], as_type: Type[T_NamedTuple]\n ) -> Sequence[T_NamedTuple]:\n return list(map(lambda r: deserialize_value(r[0], as_type), rows))\n\n def all_instigator_state(\n self,\n repository_origin_id: Optional[str] = None,\n repository_selector_id: Optional[str] = None,\n instigator_type: Optional[InstigatorType] = None,\n instigator_statuses: Optional[Set[InstigatorStatus]] = None,\n ) -> Sequence[InstigatorState]:\n check.opt_inst_param(instigator_type, "instigator_type", InstigatorType)\n\n if self.has_instigators_table() and self.has_built_index(SCHEDULE_JOBS_SELECTOR_ID):\n query = db_select([InstigatorsTable.c.instigator_body]).select_from(InstigatorsTable)\n if repository_selector_id:\n query = query.where(\n InstigatorsTable.c.repository_selector_id == repository_selector_id\n )\n if instigator_type:\n query = query.where(InstigatorsTable.c.instigator_type == instigator_type.value)\n if instigator_statuses:\n query = query.where(\n InstigatorsTable.c.status.in_([status.value for status in instigator_statuses])\n )\n\n else:\n query = db_select([JobTable.c.job_body]).select_from(JobTable)\n if repository_origin_id:\n query = query.where(JobTable.c.repository_origin_id == repository_origin_id)\n if instigator_type:\n query = query.where(JobTable.c.job_type == instigator_type.value)\n if instigator_statuses:\n query = query.where(\n JobTable.c.status.in_([status.value for status in instigator_statuses])\n )\n\n rows = self.execute(query)\n return self._deserialize_rows(rows, InstigatorState)\n\n def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional[InstigatorState]:\n check.str_param(origin_id, "origin_id")\n check.str_param(selector_id, "selector_id")\n\n if self.has_instigators_table() and self.has_built_index(SCHEDULE_JOBS_SELECTOR_ID):\n query = (\n db_select([InstigatorsTable.c.instigator_body])\n .select_from(InstigatorsTable)\n .where(InstigatorsTable.c.selector_id == selector_id)\n )\n else:\n query = (\n db_select([JobTable.c.job_body])\n .select_from(JobTable)\n .where(JobTable.c.job_origin_id == origin_id)\n )\n\n rows = self.execute(query)\n return self._deserialize_rows(rows[:1], InstigatorState)[0] if len(rows) else None\n\n def _has_instigator_state_by_selector(self, selector_id: str) -> bool:\n check.str_param(selector_id, "selector_id")\n\n query = (\n db_select([JobTable.c.job_body])\n .select_from(JobTable)\n .where(JobTable.c.selector_id == selector_id)\n )\n\n rows = self.execute(query)\n return self._deserialize_rows(rows[:1])[0] if len(rows) else None # type: ignore\n\n def _add_or_update_instigators_table(self, conn: Connection, state: InstigatorState) -> None:\n selector_id = state.selector_id\n try:\n conn.execute(\n InstigatorsTable.insert().values(\n selector_id=selector_id,\n repository_selector_id=state.repository_selector_id,\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n )\n )\n except db_exc.IntegrityError:\n conn.execute(\n InstigatorsTable.update()\n .where(InstigatorsTable.c.selector_id == selector_id)\n .values(\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n update_timestamp=pendulum.now("UTC"),\n )\n )\n\n def add_instigator_state(self, state: InstigatorState) -> InstigatorState:\n check.inst_param(state, "state", InstigatorState)\n with self.connect() as conn:\n try:\n conn.execute(\n JobTable.insert().values(\n job_origin_id=state.instigator_origin_id,\n repository_origin_id=state.repository_origin_id,\n status=state.status.value,\n job_type=state.instigator_type.value,\n job_body=serialize_value(state),\n )\n )\n except db_exc.IntegrityError as exc:\n raise DagsterInvariantViolationError(\n f"InstigatorState {state.instigator_origin_id} is already present in storage"\n ) from exc\n\n # try writing to the instigators table\n if self._has_instigators_table(conn):\n self._add_or_update_instigators_table(conn, state)\n\n return state\n\n def update_instigator_state(self, state: InstigatorState) -> InstigatorState:\n check.inst_param(state, "state", InstigatorState)\n if not self.get_instigator_state(state.instigator_origin_id, state.selector_id):\n raise DagsterInvariantViolationError(\n f"InstigatorState {state.instigator_origin_id} is not present in storage"\n )\n\n values = {\n "status": state.status.value,\n "job_body": serialize_value(state),\n "update_timestamp": pendulum.now("UTC"),\n }\n if self.has_instigators_table():\n values["selector_id"] = state.selector_id\n\n with self.connect() as conn:\n conn.execute(\n JobTable.update()\n .where(JobTable.c.job_origin_id == state.instigator_origin_id)\n .values(**values)\n )\n if self._has_instigators_table(conn):\n self._add_or_update_instigators_table(conn, state)\n\n return state\n\n def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:\n check.str_param(origin_id, "origin_id")\n check.str_param(selector_id, "selector_id")\n\n if not self.get_instigator_state(origin_id, selector_id):\n raise DagsterInvariantViolationError(\n f"InstigatorState {origin_id} is not present in storage"\n )\n\n with self.connect() as conn:\n conn.execute(JobTable.delete().where(JobTable.c.job_origin_id == origin_id))\n\n if self._has_instigators_table(conn):\n if not self._jobs_has_selector_state(conn, selector_id):\n conn.execute(\n InstigatorsTable.delete().where(\n InstigatorsTable.c.selector_id == selector_id\n )\n )\n\n def _jobs_has_selector_state(self, conn: Connection, selector_id: str) -> bool:\n query = (\n db_select([db.func.count()])\n .select_from(JobTable)\n .where(JobTable.c.selector_id == selector_id)\n )\n result = conn.execute(query)\n row = result.fetchone()\n result.close()\n return row[0] > 0 # type: ignore # (possible none)\n\n def _add_filter_limit(\n self,\n query: SqlAlchemyQuery,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses=None,\n ) -> SqlAlchemyQuery:\n check.opt_float_param(before, "before")\n check.opt_float_param(after, "after")\n check.opt_int_param(limit, "limit")\n check.opt_list_param(statuses, "statuses", of_type=TickStatus)\n\n if before:\n query = query.where(JobTickTable.c.timestamp < utc_datetime_from_timestamp(before))\n if after:\n query = query.where(JobTickTable.c.timestamp > utc_datetime_from_timestamp(after))\n if limit:\n query = query.limit(limit)\n if statuses:\n query = query.where(JobTickTable.c.status.in_([status.value for status in statuses]))\n return query\n\n @property\n def supports_batch_queries(self) -> bool:\n return self.has_instigators_table() and self.has_built_index(SCHEDULE_TICKS_SELECTOR_ID)\n\n def has_instigators_table(self) -> bool:\n with self.connect() as conn:\n return self._has_instigators_table(conn)\n\n def _has_instigators_table(self, conn: Connection) -> bool:\n table_names = db.inspect(conn).get_table_names()\n return "instigators" in table_names\n\n def _has_asset_daemon_asset_evaluations_table(self, conn: Connection) -> bool:\n table_names = db.inspect(conn).get_table_names()\n return "asset_daemon_asset_evaluations" in table_names\n\n def get_batch_ticks(\n self,\n selector_ids: Sequence[str],\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Mapping[str, Sequence[InstigatorTick]]:\n check.sequence_param(selector_ids, "selector_ids", of_type=str)\n check.opt_int_param(limit, "limit")\n check.opt_sequence_param(statuses, "statuses", of_type=TickStatus)\n\n bucket_rank_column = (\n db.func.rank()\n .over(\n order_by=db.desc(JobTickTable.c.timestamp),\n partition_by=JobTickTable.c.selector_id,\n )\n .label("rank")\n )\n subquery = db_subquery(\n db_select(\n [\n JobTickTable.c.id,\n JobTickTable.c.selector_id,\n JobTickTable.c.tick_body,\n bucket_rank_column,\n ]\n )\n .select_from(JobTickTable)\n .where(JobTickTable.c.selector_id.in_(selector_ids))\n )\n if statuses:\n subquery = subquery.where(\n JobTickTable.c.status.in_([status.value for status in statuses])\n )\n\n query = (\n db_select([subquery.c.id, subquery.c.selector_id, subquery.c.tick_body])\n .order_by(subquery.c.rank.asc())\n .where(subquery.c.rank <= limit)\n )\n\n rows = self.execute(query)\n results = defaultdict(list)\n for row in rows:\n tick_id = row[0]\n selector_id = row[1]\n tick_data = deserialize_value(row[2], TickData)\n results[selector_id].append(InstigatorTick(tick_id, tick_data))\n return results\n\n def get_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: Optional[float] = None,\n after: Optional[float] = None,\n limit: Optional[int] = None,\n statuses: Optional[Sequence[TickStatus]] = None,\n ) -> Sequence[InstigatorTick]:\n check.str_param(origin_id, "origin_id")\n check.opt_float_param(before, "before")\n check.opt_float_param(after, "after")\n check.opt_int_param(limit, "limit")\n check.opt_list_param(statuses, "statuses", of_type=TickStatus)\n\n base_query = (\n db_select([JobTickTable.c.id, JobTickTable.c.tick_body])\n .select_from(JobTickTable)\n .order_by(JobTickTable.c.timestamp.desc())\n )\n if self.has_instigators_table():\n query = base_query.where(\n db.or_(\n JobTickTable.c.selector_id == selector_id,\n db.and_(\n JobTickTable.c.selector_id.is_(None),\n JobTickTable.c.job_origin_id == origin_id,\n ),\n )\n )\n else:\n query = base_query.where(JobTickTable.c.job_origin_id == origin_id)\n\n query = self._add_filter_limit(\n query, before=before, after=after, limit=limit, statuses=statuses\n )\n\n rows = self.execute(query)\n return list(map(lambda r: InstigatorTick(r[0], deserialize_value(r[1], TickData)), rows))\n\n def create_tick(self, tick_data: TickData) -> InstigatorTick:\n check.inst_param(tick_data, "tick_data", TickData)\n\n values = {\n "job_origin_id": tick_data.instigator_origin_id,\n "status": tick_data.status.value,\n "type": tick_data.instigator_type.value,\n "timestamp": utc_datetime_from_timestamp(tick_data.timestamp),\n "tick_body": serialize_value(tick_data),\n }\n if self.has_instigators_table() and tick_data.selector_id:\n values["selector_id"] = tick_data.selector_id\n\n with self.connect() as conn:\n try:\n tick_insert = JobTickTable.insert().values(**values)\n result = conn.execute(tick_insert)\n tick_id = result.inserted_primary_key[0]\n return InstigatorTick(tick_id, tick_data)\n except db_exc.IntegrityError as exc:\n raise DagsterInvariantViolationError(\n f"Unable to insert InstigatorTick for job {tick_data.instigator_name} in"\n " storage"\n ) from exc\n\n def update_tick(self, tick: InstigatorTick) -> InstigatorTick:\n check.inst_param(tick, "tick", InstigatorTick)\n\n values = {\n "status": tick.status.value,\n "type": tick.instigator_type.value,\n "timestamp": utc_datetime_from_timestamp(tick.timestamp),\n "tick_body": serialize_value(tick.tick_data),\n }\n if self.has_instigators_table() and tick.selector_id:\n values["selector_id"] = tick.selector_id\n\n with self.connect() as conn:\n conn.execute(\n JobTickTable.update().where(JobTickTable.c.id == tick.tick_id).values(**values)\n )\n\n return tick\n\n def purge_ticks(\n self,\n origin_id: str,\n selector_id: str,\n before: float,\n tick_statuses: Optional[Sequence[TickStatus]] = None,\n ) -> None:\n check.str_param(origin_id, "origin_id")\n check.float_param(before, "before")\n check.opt_list_param(tick_statuses, "tick_statuses", of_type=TickStatus)\n\n utc_before = utc_datetime_from_timestamp(before)\n\n query = JobTickTable.delete().where(JobTickTable.c.timestamp < utc_before)\n if tick_statuses:\n query = query.where(\n JobTickTable.c.status.in_([tick_status.value for tick_status in tick_statuses])\n )\n\n if self.has_instigators_table():\n query = query.where(\n db.or_(\n JobTickTable.c.selector_id == selector_id,\n db.and_(\n JobTickTable.c.selector_id.is_(None),\n JobTickTable.c.job_origin_id == origin_id,\n ),\n )\n )\n else:\n query = query.where(JobTickTable.c.job_origin_id == origin_id)\n\n with self.connect() as conn:\n conn.execute(query)\n\n @property\n def supports_auto_materialize_asset_evaluations(self) -> bool:\n with self.connect() as conn:\n return self._has_asset_daemon_asset_evaluations_table(conn)\n\n def add_auto_materialize_asset_evaluations(\n self,\n evaluation_id: int,\n asset_evaluations: Sequence[AutoMaterializeAssetEvaluation],\n ):\n if not asset_evaluations:\n return\n\n with self.connect() as conn:\n bulk_insert = AssetDaemonAssetEvaluationsTable.insert().values(\n [\n {\n "evaluation_id": evaluation_id,\n "asset_key": evaluation.asset_key.to_string(),\n "asset_evaluation_body": serialize_value(evaluation),\n "num_requested": evaluation.num_requested,\n "num_skipped": evaluation.num_skipped,\n "num_discarded": evaluation.num_discarded,\n }\n for evaluation in asset_evaluations\n ]\n )\n conn.execute(bulk_insert)\n\n def get_auto_materialize_asset_evaluations(\n self, asset_key: AssetKey, limit: int, cursor: Optional[int] = None\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n with self.connect() as conn:\n query = (\n db_select(\n [\n AssetDaemonAssetEvaluationsTable.c.id,\n AssetDaemonAssetEvaluationsTable.c.asset_evaluation_body,\n AssetDaemonAssetEvaluationsTable.c.evaluation_id,\n AssetDaemonAssetEvaluationsTable.c.create_timestamp,\n AssetDaemonAssetEvaluationsTable.c.asset_key,\n ]\n )\n .where(AssetDaemonAssetEvaluationsTable.c.asset_key == asset_key.to_string())\n .order_by(AssetDaemonAssetEvaluationsTable.c.evaluation_id.desc())\n ).limit(limit)\n\n if cursor:\n query = query.where(AssetDaemonAssetEvaluationsTable.c.evaluation_id < cursor)\n\n rows = db_fetch_mappings(conn, query)\n return [AutoMaterializeAssetEvaluationRecord.from_db_row(row) for row in rows]\n\n def get_auto_materialize_evaluations_for_evaluation_id(\n self, evaluation_id: int\n ) -> Sequence[AutoMaterializeAssetEvaluationRecord]:\n with self.connect() as conn:\n query = db_select(\n [\n AssetDaemonAssetEvaluationsTable.c.id,\n AssetDaemonAssetEvaluationsTable.c.asset_evaluation_body,\n AssetDaemonAssetEvaluationsTable.c.evaluation_id,\n AssetDaemonAssetEvaluationsTable.c.create_timestamp,\n AssetDaemonAssetEvaluationsTable.c.asset_key,\n ]\n ).where(AssetDaemonAssetEvaluationsTable.c.evaluation_id == evaluation_id)\n\n rows = db_fetch_mappings(conn, query)\n return [AutoMaterializeAssetEvaluationRecord.from_db_row(row) for row in rows]\n\n def purge_asset_evaluations(self, before: float):\n check.float_param(before, "before")\n\n utc_before = utc_datetime_from_timestamp(before)\n query = AssetDaemonAssetEvaluationsTable.delete().where(\n AssetDaemonAssetEvaluationsTable.c.create_timestamp < utc_before\n )\n\n with self.connect() as conn:\n conn.execute(query)\n\n def wipe(self) -> None:\n """Clears the schedule storage."""\n with self.connect() as conn:\n # https://stackoverflow.com/a/54386260/324449\n conn.execute(JobTable.delete())\n conn.execute(JobTickTable.delete())\n if self._has_instigators_table(conn):\n conn.execute(InstigatorsTable.delete())\n if self._has_asset_daemon_asset_evaluations_table(conn):\n conn.execute(AssetDaemonAssetEvaluationsTable.delete())\n\n # MIGRATIONS\n\n def has_secondary_index_table(self) -> bool:\n with self.connect() as conn:\n return "secondary_indexes" in db.inspect(conn).get_table_names()\n\n def has_built_index(self, migration_name: str) -> bool:\n if not self.has_secondary_index_table():\n return False\n\n query = (\n db_select([1])\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711\n .limit(1)\n )\n with self.connect() as conn:\n results = conn.execute(query).fetchall()\n\n return len(results) > 0\n\n def mark_index_built(self, migration_name: str) -> None:\n query = SecondaryIndexMigrationTable.insert().values(\n name=migration_name,\n migration_completed=datetime.now(),\n )\n with self.connect() as conn:\n try:\n conn.execute(query)\n except db_exc.IntegrityError:\n conn.execute(\n SecondaryIndexMigrationTable.update()\n .where(SecondaryIndexMigrationTable.c.name == migration_name)\n .values(migration_completed=datetime.now())\n )\n\n def _execute_data_migrations(\n self,\n migrations: Mapping[str, Callable[..., Any]],\n print_fn: Optional[Callable] = None,\n force_rebuild_all: bool = False,\n ) -> None:\n for migration_name, migration_fn in migrations.items():\n if self.has_built_index(migration_name):\n if not force_rebuild_all:\n if print_fn:\n print_fn(f"Skipping already applied migration: {migration_name}")\n continue\n if print_fn:\n print_fn(f"Starting data migration: {migration_name}")\n migration_fn()(self, print_fn)\n self.mark_index_built(migration_name)\n if print_fn:\n print_fn(f"Finished data migration: {migration_name}")\n\n def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(\n REQUIRED_SCHEDULE_DATA_MIGRATIONS, print_fn, force_rebuild_all\n )\n\n def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:\n self._execute_data_migrations(\n OPTIONAL_SCHEDULE_DATA_MIGRATIONS, print_fn, force_rebuild_all\n )
\n
", "current_page_name": "_modules/dagster/_core/storage/schedules/sql_schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.schedules.sql_schedule_storage"}, "sqlite": {"sqlite_schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.schedules.sqlite.sqlite_schedule_storage

\nfrom contextlib import contextmanager\nfrom typing import Iterator, Optional\n\nimport sqlalchemy as db\nfrom packaging.version import parse\nfrom sqlalchemy.engine import Connection\nfrom sqlalchemy.pool import NullPool\n\nfrom dagster import (\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    get_alembic_config,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlite import create_db_conn_string, get_sqlite_version\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import mkdir_p\n\nfrom ..schema import ScheduleStorageSqlMetadata\nfrom ..sql_schedule_storage import SqlScheduleStorage\n\nMINIMUM_SQLITE_BATCH_VERSION = "3.25.0"\n\n\n
[docs]class SqliteScheduleStorage(SqlScheduleStorage, ConfigurableClass):\n """Local SQLite backed schedule storage."""\n\n def __init__(self, conn_string: str, inst_data: Optional[ConfigurableClassData] = None):\n check.str_param(conn_string, "conn_string")\n self._conn_string = conn_string\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n super().__init__()\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return {"base_dir": StringSource}\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value\n ) -> "SqliteScheduleStorage":\n return SqliteScheduleStorage.from_local(inst_data=inst_data, **config_value)\n\n @classmethod\n def from_local(\n cls, base_dir: str, inst_data: Optional[ConfigurableClassData] = None\n ) -> "SqliteScheduleStorage":\n check.str_param(base_dir, "base_dir")\n mkdir_p(base_dir)\n conn_string = create_db_conn_string(base_dir, "schedules")\n engine = create_engine(conn_string, poolclass=NullPool)\n alembic_config = get_alembic_config(__file__)\n\n should_migrate_data = False\n with engine.connect() as connection:\n db_revision, head_revision = check_alembic_revision(alembic_config, connection)\n if not (db_revision and head_revision):\n ScheduleStorageSqlMetadata.create_all(engine)\n connection.execute(db.text("PRAGMA journal_mode=WAL;"))\n stamp_alembic_rev(alembic_config, connection)\n should_migrate_data = True\n\n schedule_storage = cls(conn_string, inst_data)\n if should_migrate_data:\n schedule_storage.migrate()\n schedule_storage.optimize()\n\n return schedule_storage\n\n @contextmanager\n def connect(self) -> Iterator[Connection]:\n engine = create_engine(self._conn_string, poolclass=NullPool)\n with engine.connect() as conn:\n with conn.begin():\n yield conn\n\n @property\n def supports_batch_queries(self) -> bool:\n if not super().supports_batch_queries:\n return False\n\n return super().supports_batch_queries and parse(get_sqlite_version()) >= parse(\n MINIMUM_SQLITE_BATCH_VERSION\n )\n\n def upgrade(self) -> None:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = get_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster/_core/storage/schedules/sqlite/sqlite_schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.schedules.sqlite.sqlite_schedule_storage"}}}, "upath_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.storage.upath_io_manager

\nimport asyncio\nimport inspect\nfrom abc import abstractmethod\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union\n\nfrom fsspec import AbstractFileSystem\nfrom fsspec.implementations.local import LocalFileSystem\n\nfrom dagster import (\n    InputContext,\n    MetadataValue,\n    MultiPartitionKey,\n    OutputContext,\n    _check as check,\n)\nfrom dagster._core.storage.memoizable_io_manager import MemoizableIOManager\n\nif TYPE_CHECKING:\n    from upath import UPath\n\n\n
[docs]class UPathIOManager(MemoizableIOManager):\n """Abstract IOManager base class compatible with local and cloud storage via `universal-pathlib` and `fsspec`.\n\n Features:\n - handles partitioned assets\n - handles loading a single upstream partition\n - handles loading multiple upstream partitions (with respect to :py:class:`PartitionMapping`)\n - supports loading multiple partitions concurrently with async `load_from_path` method\n - the `get_metadata` method can be customized to add additional metadata to the output\n - the `allow_missing_partitions` metadata value can be set to `True` to skip missing partitions\n (the default behavior is to raise an error)\n\n """\n\n extension: Optional[str] = None # override in child class\n\n def __init__(\n self,\n base_path: Optional["UPath"] = None,\n ):\n from upath import UPath\n\n assert not self.extension or "." in self.extension\n self._base_path = base_path or UPath(".")\n\n @abstractmethod\n def dump_to_path(self, context: OutputContext, obj: Any, path: "UPath"):\n """Child classes should override this method to write the object to the filesystem."""\n\n @abstractmethod\n def load_from_path(self, context: InputContext, path: "UPath") -> Any:\n """Child classes should override this method to load the object from the filesystem."""\n\n @property\n def fs(self) -> AbstractFileSystem:\n """Utility function to get the IOManager filesystem.\n\n Returns:\n AbstractFileSystem: fsspec filesystem.\n\n """\n from upath import UPath\n\n if isinstance(self._base_path, UPath):\n return self._base_path.fs\n elif isinstance(self._base_path, Path):\n return LocalFileSystem()\n else:\n raise ValueError(f"Unsupported base_path type: {type(self._base_path)}")\n\n @property\n def storage_options(self) -> Dict[str, Any]:\n """Utility function to get the fsspec storage_options which are often consumed by various I/O functions.\n\n Returns:\n Dict[str, Any]: fsspec storage_options.\n """\n from upath import UPath\n\n if isinstance(self._base_path, UPath):\n return self._base_path._kwargs.copy() # noqa\n elif isinstance(self._base_path, Path):\n return {}\n else:\n raise ValueError(f"Unsupported base_path type: {type(self._base_path)}")\n\n def get_metadata(\n self,\n context: OutputContext,\n obj: Any,\n ) -> Dict[str, MetadataValue]:\n """Child classes should override this method to add custom metadata to the outputs."""\n return {}\n\n # Read/write operations on paths can generally be handled by methods on the\n # UPath class, but when the backend requires credentials, this isn't\n # always possible. Override these path_* methods to provide custom\n # implementations for targeting backends that require authentication.\n\n def unlink(self, path: "UPath") -> None:\n """Remove the file or object at the provided path."""\n path.unlink()\n\n def path_exists(self, path: "UPath") -> bool:\n """Check if a file or object exists at the provided path."""\n return path.exists()\n\n def make_directory(self, path: "UPath"):\n """Create a directory at the provided path.\n\n Override as a no-op if the target backend doesn't use directories.\n """\n path.mkdir(parents=True, exist_ok=True)\n\n def has_output(self, context: OutputContext) -> bool:\n return self.path_exists(self._get_path(context))\n\n def _with_extension(self, path: "UPath") -> "UPath":\n return path.with_suffix(path.suffix + self.extension) if self.extension else path\n\n def _get_path_without_extension(self, context: Union[InputContext, OutputContext]) -> "UPath":\n if context.has_asset_key:\n context_path = self.get_asset_relative_path(context)\n else:\n # we are dealing with an op output\n context_path = self.get_op_output_relative_path(context)\n\n return self._base_path.joinpath(context_path)\n\n def get_asset_relative_path(self, context: Union[InputContext, OutputContext]) -> "UPath":\n from upath import UPath\n\n # we are not using context.get_asset_identifier() because it already includes the partition_key\n return UPath(*context.asset_key.path)\n\n def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> "UPath":\n from upath import UPath\n\n return UPath(*context.get_identifier())\n\n def get_loading_input_log_message(self, path: "UPath") -> str:\n return f"Loading file from: {path} using {self.__class__.__name__}..."\n\n def get_writing_output_log_message(self, path: "UPath") -> str:\n return f"Writing file at: {path} using {self.__class__.__name__}..."\n\n def get_loading_input_partition_log_message(self, path: "UPath", partition_key: str) -> str:\n return f"Loading partition {partition_key} from {path} using {self.__class__.__name__}..."\n\n def get_missing_partition_log_message(self, partition_key: str) -> str:\n return (\n f"Couldn't load partition {partition_key} and skipped it "\n "because the input metadata includes allow_missing_partitions=True"\n )\n\n def _get_path(self, context: Union[InputContext, OutputContext]) -> "UPath":\n """Returns the I/O path for a given context.\n Should not be used with partitions (use `_get_paths_for_partitions` instead).\n """\n path = self._get_path_without_extension(context)\n return self._with_extension(path)\n\n def get_path_for_partition(\n self, context: Union[InputContext, OutputContext], path: "UPath", partition: str\n ) -> "UPath":\n """Override this method if you want to use a different partitioning scheme\n (for example, if the saving function handles partitioning instead).\n The extension will be added later.\n\n Args:\n context (Union[InputContext, OutputContext]): The context for the I/O operation.\n path (UPath): The path to the file or object.\n partition (str): Formatted partition/multipartition key\n\n Returns:\n UPath: The path to the file with the partition key appended.\n """\n return path / partition\n\n def _get_paths_for_partitions(\n self, context: Union[InputContext, OutputContext]\n ) -> Dict[str, "UPath"]:\n """Returns a dict of partition_keys into I/O paths for a given context."""\n if not context.has_asset_partitions:\n raise TypeError(\n f"Detected {context.dagster_type.typing_type} input type "\n "but the asset is not partitioned"\n )\n\n def _formatted_multipartitioned_path(partition_key: MultiPartitionKey) -> str:\n ordered_dimension_keys = [\n key[1]\n for key in sorted(partition_key.keys_by_dimension.items(), key=lambda x: x[0])\n ]\n return "/".join(ordered_dimension_keys)\n\n formatted_partition_keys = {\n partition_key: (\n _formatted_multipartitioned_path(partition_key)\n if isinstance(partition_key, MultiPartitionKey)\n else partition_key\n )\n for partition_key in context.asset_partition_keys\n }\n\n asset_path = self._get_path_without_extension(context)\n return {\n partition_key: self._with_extension(\n self.get_path_for_partition(context, asset_path, partition)\n )\n for partition_key, partition in formatted_partition_keys.items()\n }\n\n def _get_multipartition_backcompat_paths(\n self, context: Union[InputContext, OutputContext]\n ) -> Mapping[str, "UPath"]:\n if not context.has_asset_partitions:\n raise TypeError(\n f"Detected {context.dagster_type.typing_type} input type "\n "but the asset is not partitioned"\n )\n\n partition_keys = context.asset_partition_keys\n\n asset_path = self._get_path_without_extension(context)\n return {\n partition_key: self._with_extension(asset_path / partition_key)\n for partition_key in partition_keys\n if isinstance(partition_key, MultiPartitionKey)\n }\n\n def _load_single_input(\n self, path: "UPath", context: InputContext, backcompat_path: Optional["UPath"] = None\n ) -> Any:\n context.log.debug(self.get_loading_input_log_message(path))\n try:\n obj = self.load_from_path(context=context, path=path)\n if asyncio.iscoroutine(obj):\n obj = asyncio.run(obj)\n except FileNotFoundError as e:\n if backcompat_path is not None:\n try:\n obj = self.load_from_path(context=context, path=backcompat_path)\n if asyncio.iscoroutine(obj):\n obj = asyncio.run(obj)\n\n context.log.debug(\n f"File not found at {path}. Loaded instead from backcompat path:"\n f" {backcompat_path}"\n )\n except FileNotFoundError:\n raise e\n else:\n raise e\n\n context.add_input_metadata({"path": MetadataValue.path(str(path))})\n return obj\n\n def _load_partition_from_path(\n self,\n context: InputContext,\n partition_key: str,\n path: "UPath",\n backcompat_path: Optional["UPath"] = None,\n ) -> Any:\n """1. Try to load the partition from the normal path.\n 2. If it was not found, try to load it from the backcompat path.\n 3. If allow_missing_partitions metadata is True, skip the partition if it was not found in any of the paths.\n Otherwise, raise an error.\n\n Args:\n context (InputContext): IOManager Input context\n partition_key (str): the partition key corresponding to the partition being loaded\n path (UPath): The path to the partition.\n backcompat_path (Optional[UPath]): The path to the partition in the backcompat location.\n\n Returns:\n Any: The object loaded from the partition.\n """\n allow_missing_partitions = (\n context.metadata.get("allow_missing_partitions", False)\n if context.metadata is not None\n else False\n )\n\n try:\n context.log.debug(self.get_loading_input_partition_log_message(path, partition_key))\n obj = self.load_from_path(context=context, path=path)\n return obj\n except FileNotFoundError as e:\n if backcompat_path is not None:\n try:\n obj = self.load_from_path(context=context, path=path)\n context.log.debug(\n f"File not found at {path}. Loaded instead from backcompat path:"\n f" {backcompat_path}"\n )\n return obj\n except FileNotFoundError as e:\n if allow_missing_partitions:\n context.log.warning(self.get_missing_partition_log_message(partition_key))\n return None\n else:\n raise e\n if allow_missing_partitions:\n context.log.warning(self.get_missing_partition_log_message(partition_key))\n return None\n else:\n raise e\n\n def _load_multiple_inputs(self, context: InputContext) -> Dict[str, Any]:\n # load multiple partitions\n paths = self._get_paths_for_partitions(context) # paths for normal partitions\n backcompat_paths = self._get_multipartition_backcompat_paths(\n context\n ) # paths for multipartitions\n\n context.log.debug(f"Loading {len(paths)} partitions...")\n\n objs = {}\n\n if not inspect.iscoroutinefunction(self.load_from_path):\n for partition_key in context.asset_partition_keys:\n obj = self._load_partition_from_path(\n context,\n partition_key,\n paths[partition_key],\n backcompat_paths.get(partition_key),\n )\n if obj is not None: # in case some partitions were skipped\n objs[partition_key] = obj\n return objs\n else:\n # load_from_path returns a coroutine, so we need to await the results\n\n async def collect():\n loop = asyncio.get_running_loop()\n\n tasks = []\n\n for partition_key in context.asset_partition_keys:\n tasks.append(\n loop.create_task(\n self._load_partition_from_path(\n context,\n partition_key,\n paths[partition_key],\n backcompat_paths.get(partition_key),\n )\n )\n )\n\n results = await asyncio.gather(*tasks, return_exceptions=True)\n\n # need to handle missing partitions here because exceptions don't get propagated from async calls\n allow_missing_partitions = (\n context.metadata.get("allow_missing_partitions", False)\n if context.metadata is not None\n else False\n )\n\n results_without_errors = []\n found_errors = False\n for partition_key, result in zip(context.asset_partition_keys, results):\n if isinstance(result, FileNotFoundError):\n if allow_missing_partitions:\n context.log.warning(\n self.get_missing_partition_log_message(partition_key)\n )\n else:\n context.log.error(str(result))\n found_errors = True\n elif isinstance(result, Exception):\n context.log.error(str(result))\n found_errors = True\n else:\n results_without_errors.append(result)\n\n if found_errors:\n raise RuntimeError(\n f"{len(paths) - len(results_without_errors)} partitions could not be loaded"\n )\n\n return results_without_errors\n\n awaited_objects = asyncio.get_event_loop().run_until_complete(collect())\n\n return {\n partition_key: awaited_object\n for partition_key, awaited_object in zip(\n context.asset_partition_keys, awaited_objects\n )\n if awaited_object is not None\n }\n\n def load_input(self, context: InputContext) -> Union[Any, Dict[str, Any]]:\n # If no asset key, we are dealing with an op output which is always non-partitioned\n if not context.has_asset_key or not context.has_asset_partitions:\n path = self._get_path(context)\n return self._load_single_input(path, context)\n else:\n asset_partition_keys = context.asset_partition_keys\n if len(asset_partition_keys) == 0:\n return None\n elif len(asset_partition_keys) == 1:\n paths = self._get_paths_for_partitions(context)\n check.invariant(len(paths) == 1, f"Expected 1 path, but got {len(paths)}")\n path = next(iter(paths.values()))\n backcompat_paths = self._get_multipartition_backcompat_paths(context)\n backcompat_path = (\n None if not backcompat_paths else next(iter(backcompat_paths.values()))\n )\n\n return self._load_single_input(path, context, backcompat_path)\n else: # we are dealing with multiple partitions of an asset\n type_annotation = context.dagster_type.typing_type\n if type_annotation != Any and not is_dict_type(type_annotation):\n check.failed(\n "Loading an input that corresponds to multiple partitions, but the"\n " type annotation on the op input is not a dict, Dict, Mapping, or"\n f" Any: is '{type_annotation}'."\n )\n\n return self._load_multiple_inputs(context)\n\n def handle_output(self, context: OutputContext, obj: Any):\n if context.dagster_type.typing_type == type(None):\n check.invariant(\n obj is None,\n "Output had Nothing type or 'None' annotation, but handle_output received"\n f" value that was not None and was of type {type(obj)}.",\n )\n return None\n\n if context.has_asset_partitions:\n paths = self._get_paths_for_partitions(context)\n\n check.invariant(\n len(paths) == 1,\n f"The current IO manager {type(self)} does not support persisting an output"\n " associated with multiple partitions. This error is likely occurring because a"\n " backfill was launched using the 'single run' option. Instead, launch the"\n " backfill with the 'multiple runs' option.",\n )\n\n path = next(iter(paths.values()))\n else:\n path = self._get_path(context)\n self.make_directory(path.parent)\n context.log.debug(self.get_writing_output_log_message(path))\n self.dump_to_path(context=context, obj=obj, path=path)\n\n metadata = {"path": MetadataValue.path(str(path))}\n custom_metadata = self.get_metadata(context=context, obj=obj)\n metadata.update(custom_metadata) # type: ignore\n\n context.add_output_metadata(metadata)
\n\n\ndef is_dict_type(type_obj) -> bool:\n if type_obj == dict:\n return True\n\n if hasattr(type_obj, "__origin__") and type_obj.__origin__ in (dict, Dict, Mapping):\n return True\n\n return False\n
", "current_page_name": "_modules/dagster/_core/storage/upath_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.storage.upath_io_manager"}}, "types": {"config_schema": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.types.config_schema

\nimport hashlib\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Callable, Iterator, Optional, cast\n\nfrom typing_extensions import TypeAlias\n\nimport dagster._check as check\nfrom dagster._annotations import experimental_param\nfrom dagster._config import ConfigType\nfrom dagster._core.decorator_utils import get_function_params, validate_expected_params\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nfrom ..definitions.resource_requirement import (\n    ResourceRequirement,\n    TypeLoaderResourceRequirement,\n)\n\nif TYPE_CHECKING:\n    from dagster._core.execution.context.system import (\n        DagsterTypeLoaderContext,\n    )\n\n\n
[docs]class DagsterTypeLoader(ABC):\n """Dagster type loaders are used to load unconnected inputs of the dagster type they are attached\n to.\n\n The recommended way to define a type loader is with the\n :py:func:`@dagster_type_loader <dagster_type_loader>` decorator.\n """\n\n @property\n @abstractmethod\n def schema_type(self) -> ConfigType:\n pass\n\n @property\n def loader_version(self) -> Optional[str]:\n return None\n\n def compute_loaded_input_version(self, _config_value: object) -> Optional[str]:\n return None\n\n def construct_from_config_value(\n self, _context: "DagsterTypeLoaderContext", config_value: object\n ) -> object:\n """How to create a runtime value from config data."""\n return config_value\n\n def required_resource_keys(self) -> AbstractSet[str]:\n return frozenset()\n\n def get_resource_requirements(\n self, outer_context: Optional[object] = None\n ) -> Iterator[ResourceRequirement]:\n type_display_name = cast(str, outer_context)\n for resource_key in sorted(list(self.required_resource_keys())):\n yield TypeLoaderResourceRequirement(\n key=resource_key, type_display_name=type_display_name\n )
\n\n\n@experimental_param(param="loader_version")\n@experimental_param(param="external_version_fn")\nclass DagsterTypeLoaderFromDecorator(DagsterTypeLoader):\n def __init__(\n self,\n config_type,\n func,\n required_resource_keys,\n loader_version=None,\n external_version_fn=None,\n ):\n self._config_type = check.inst_param(config_type, "config_type", ConfigType)\n self._func = check.callable_param(func, "func")\n self._required_resource_keys = check.opt_set_param(\n required_resource_keys, "required_resource_keys", of_type=str\n )\n self._loader_version = check.opt_str_param(loader_version, "loader_version")\n self._external_version_fn = check.opt_callable_param(\n external_version_fn, "external_version_fn"\n )\n\n @property\n def schema_type(self) -> ConfigType:\n return self._config_type\n\n @property\n def loader_version(self) -> Optional[str]:\n return self._loader_version\n\n def compute_loaded_input_version(self, config_value: object) -> Optional[str]:\n """Compute the type-loaded input from a given config_value.\n\n Args:\n config_value (object): Config value to be ingested by the external version\n loading function.\n\n Returns:\n Optional[str]: Hash of concatenated loader version and external input version if both\n are provided, else None.\n """\n version = ""\n if self.loader_version:\n version += str(self.loader_version)\n if self._external_version_fn:\n ext_version = self._external_version_fn(config_value)\n version += str(ext_version)\n\n if version == "":\n return None # Sentinel value for no version provided.\n else:\n return hashlib.sha1(version.encode("utf-8")).hexdigest()\n\n def construct_from_config_value(\n self, context: "DagsterTypeLoaderContext", config_value: object\n ):\n return self._func(context, config_value)\n\n def required_resource_keys(self):\n return frozenset(self._required_resource_keys)\n\n\ndef _create_type_loader_for_decorator(\n config_type: ConfigType,\n func,\n required_resource_keys: Optional[AbstractSet[str]],\n loader_version: Optional[str] = None,\n external_version_fn: Optional[Callable[[object], str]] = None,\n):\n return DagsterTypeLoaderFromDecorator(\n config_type, func, required_resource_keys, loader_version, external_version_fn\n )\n\n\nDagsterTypeLoaderFn: TypeAlias = Callable[["DagsterTypeLoaderContext", Any], Any]\n\n\n
[docs]def dagster_type_loader(\n config_schema: object,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n loader_version: Optional[str] = None,\n external_version_fn: Optional[Callable[[object], str]] = None,\n) -> Callable[[DagsterTypeLoaderFn], DagsterTypeLoaderFromDecorator]:\n """Create an dagster type loader that maps config data to a runtime value.\n\n The decorated function should take the execution context and parsed config value and return the\n appropriate runtime value.\n\n Args:\n config_schema (ConfigSchema): The schema for the config that's passed to the decorated\n function.\n loader_version (str): (Experimental) The version of the decorated compute function. Two\n loading functions should have the same version if and only if they deterministically\n produce the same outputs when provided the same inputs.\n external_version_fn (Callable): (Experimental) A function that takes in the same parameters as the loader\n function (config_value) and returns a representation of the version of the external\n asset (str). Two external assets with identical versions are treated as identical to one\n another.\n\n Examples:\n .. code-block:: python\n\n @dagster_type_loader(Permissive())\n def load_dict(_context, value):\n return value\n """\n from dagster._config import resolve_to_config_type\n\n config_type = resolve_to_config_type(config_schema)\n assert isinstance(\n config_type, ConfigType\n ), f"{config_schema} could not be resolved to config type"\n EXPECTED_POSITIONALS = ["context", "*"]\n\n def wrapper(func: DagsterTypeLoaderFn) -> DagsterTypeLoaderFromDecorator:\n params = get_function_params(func)\n missing_positional = validate_expected_params(params, EXPECTED_POSITIONALS)\n if missing_positional:\n raise DagsterInvalidDefinitionError(\n f"@dagster_type_loader '{func.__name__}' decorated function does not have required"\n f" positional parameter '{missing_positional}'. @dagster_type_loader decorated"\n " functions should only have keyword arguments that match input names and a first"\n " positional parameter named 'context'."\n )\n\n return _create_type_loader_for_decorator(\n config_type, func, required_resource_keys, loader_version, external_version_fn\n )\n\n return wrapper
\n
", "current_page_name": "_modules/dagster/_core/types/config_schema", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.types.config_schema"}, "dagster_type": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.types.dagster_type

\nimport typing as t\nfrom abc import abstractmethod\nfrom enum import Enum as PythonEnum\nfrom functools import partial\nfrom typing import (\n    AbstractSet as TypingAbstractSet,\n    AnyStr,\n    Iterator as TypingIterator,\n    Mapping,\n    Optional as TypingOptional,\n    Sequence,\n    Type as TypingType,\n    cast,\n)\n\nfrom typing_extensions import get_args, get_origin\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._builtins import BuiltinEnum\nfrom dagster._config import (\n    Array,\n    ConfigType,\n    Noneable as ConfigNoneable,\n)\nfrom dagster._core.definitions.events import DynamicOutput, Output, TypeCheck\nfrom dagster._core.definitions.metadata import (\n    MetadataValue,\n    RawMetadataValue,\n    normalize_metadata,\n)\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError\nfrom dagster._serdes import whitelist_for_serdes\nfrom dagster._seven import is_subclass\n\nfrom ..definitions.resource_requirement import (\n    RequiresResources,\n    ResourceRequirement,\n    TypeResourceRequirement,\n)\nfrom .builtin_config_schemas import BuiltinSchemas\nfrom .config_schema import DagsterTypeLoader\n\nif t.TYPE_CHECKING:\n    from dagster._core.definitions.node_definition import NodeDefinition\n    from dagster._core.execution.context.system import DagsterTypeLoaderContext, TypeCheckContext\n\nTypeCheckFn = t.Callable[["TypeCheckContext", AnyStr], t.Union[TypeCheck, bool]]\n\n\n@whitelist_for_serdes\nclass DagsterTypeKind(PythonEnum):\n    ANY = "ANY"\n    SCALAR = "SCALAR"\n    LIST = "LIST"\n    NOTHING = "NOTHING"\n    NULLABLE = "NULLABLE"\n    REGULAR = "REGULAR"\n\n\n
[docs]class DagsterType(RequiresResources):\n """Define a type in dagster. These can be used in the inputs and outputs of ops.\n\n Args:\n type_check_fn (Callable[[TypeCheckContext, Any], [Union[bool, TypeCheck]]]):\n The function that defines the type check. It takes the value flowing\n through the input or output of the op. If it passes, return either\n ``True`` or a :py:class:`~dagster.TypeCheck` with ``success`` set to ``True``. If it fails,\n return either ``False`` or a :py:class:`~dagster.TypeCheck` with ``success`` set to ``False``.\n The first argument must be named ``context`` (or, if unused, ``_``, ``_context``, or ``context_``).\n Use ``required_resource_keys`` for access to resources.\n key (Optional[str]): The unique key to identify types programmatically.\n The key property always has a value. If you omit key to the argument\n to the init function, it instead receives the value of ``name``. If\n neither ``key`` nor ``name`` is provided, a ``CheckError`` is thrown.\n\n In the case of a generic type such as ``List`` or ``Optional``, this is\n generated programmatically based on the type parameters.\n\n For most use cases, name should be set and the key argument should\n not be specified.\n name (Optional[str]): A unique name given by a user. If ``key`` is ``None``, ``key``\n becomes this value. Name is not given in a case where the user does\n not specify a unique name for this type, such as a generic class.\n description (Optional[str]): A markdown-formatted string, displayed in tooling.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader` and can map config data to a value of\n this type. Specify this argument if you will need to shim values of this type using the\n config machinery. As a rule, you should use the\n :py:func:`@dagster_type_loader <dagster.dagster_type_loader>` decorator to construct\n these arguments.\n required_resource_keys (Optional[Set[str]]): Resource keys required by the ``type_check_fn``.\n is_builtin (bool): Defaults to False. This is used by tools to display or\n filter built-in types (such as :py:class:`~dagster.String`, :py:class:`~dagster.Int`) to visually distinguish\n them from user-defined types. Meant for internal use.\n kind (DagsterTypeKind): Defaults to None. This is used to determine the kind of runtime type\n for InputDefinition and OutputDefinition type checking.\n typing_type: Defaults to None. A valid python typing type (e.g. Optional[List[int]]) for the\n value contained within the DagsterType. Meant for internal use.\n """\n\n def __init__(\n self,\n type_check_fn: TypeCheckFn,\n key: t.Optional[str] = None,\n name: t.Optional[str] = None,\n is_builtin: bool = False,\n description: t.Optional[str] = None,\n loader: t.Optional[DagsterTypeLoader] = None,\n required_resource_keys: t.Optional[t.Set[str]] = None,\n kind: DagsterTypeKind = DagsterTypeKind.REGULAR,\n typing_type: t.Any = t.Any,\n metadata: t.Optional[t.Mapping[str, RawMetadataValue]] = None,\n ):\n check.opt_str_param(key, "key")\n check.opt_str_param(name, "name")\n\n check.invariant(not (name is None and key is None), "Must set key or name")\n if name is None:\n key = check.not_none(\n key,\n "If name is not provided, must provide key.",\n )\n self.key, self._name = key, None\n elif key is None:\n name = check.not_none(\n name,\n "If key is not provided, must provide name.",\n )\n self.key, self._name = name, name\n else:\n check.invariant(key and name)\n self.key, self._name = key, name\n\n self._description = check.opt_str_param(description, "description")\n self._loader = check.opt_inst_param(loader, "loader", DagsterTypeLoader)\n\n self._required_resource_keys = check.opt_set_param(\n required_resource_keys,\n "required_resource_keys",\n )\n\n self._type_check_fn = check.callable_param(type_check_fn, "type_check_fn")\n _validate_type_check_fn(self._type_check_fn, self._name)\n\n self.is_builtin = check.bool_param(is_builtin, "is_builtin")\n check.invariant(\n self.display_name is not None,\n f"All types must have a valid display name, got None for key {key}",\n )\n\n self.kind = check.inst_param(kind, "kind", DagsterTypeKind)\n\n self._typing_type = typing_type\n\n self._metadata = normalize_metadata(\n check.opt_mapping_param(metadata, "metadata", key_type=str),\n )\n\n
[docs] @public\n def type_check(self, context: "TypeCheckContext", value: object) -> TypeCheck:\n """Type check the value against the type.\n\n Args:\n context (TypeCheckContext): The context of the type check.\n value (Any): The value to check.\n\n Returns:\n TypeCheck: The result of the type check.\n """\n retval = self._type_check_fn(context, value)\n\n if not isinstance(retval, (bool, TypeCheck)):\n raise DagsterInvariantViolationError(\n f"You have returned {retval!r} of type {type(retval)} from the type "\n f'check function of type "{self.key}". Return value must be instance '\n "of TypeCheck or a bool."\n )\n\n return TypeCheck(success=retval) if isinstance(retval, bool) else retval
\n\n def __eq__(self, other):\n return isinstance(other, DagsterType) and self.key == other.key\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.key)\n\n @staticmethod\n def from_builtin_enum(builtin_enum) -> "DagsterType":\n check.invariant(BuiltinEnum.contains(builtin_enum), "must be member of BuiltinEnum")\n return _RUNTIME_MAP[builtin_enum]\n\n @property\n def metadata(self) -> t.Mapping[str, MetadataValue]:\n return self._metadata\n\n @public\n @property\n def required_resource_keys(self) -> TypingAbstractSet[str]:\n """AbstractSet[str]: Set of resource keys required by the type check function."""\n return self._required_resource_keys\n\n @public\n @property\n def display_name(self) -> str:\n """Either the name or key (if name is `None`) of the type, overridden in many subclasses."""\n return cast(str, self._name or self.key)\n\n @public\n @property\n def unique_name(self) -> t.Optional[str]:\n """The unique name of this type. Can be None if the type is not unique, such as container types."""\n # TODO: docstring and body inconsistent-- can this be None or not?\n check.invariant(\n self._name is not None,\n f"unique_name requested but is None for type {self.display_name}",\n )\n return self._name\n\n @public\n @property\n def has_unique_name(self) -> bool:\n """bool: Whether the type has a unique name."""\n return self._name is not None\n\n @public\n @property\n def typing_type(self) -> t.Any:\n """Any: The python typing type for this type."""\n return self._typing_type\n\n @public\n @property\n def loader(self) -> t.Optional[DagsterTypeLoader]:\n """Optional[DagsterTypeLoader]: Loader for this type, if any."""\n return self._loader\n\n @public\n @property\n def description(self) -> t.Optional[str]:\n """Optional[str]: Description of the type, or None if not provided."""\n return self._description\n\n @property\n def inner_types(self) -> t.Sequence["DagsterType"]:\n return []\n\n @property\n def loader_schema_key(self) -> t.Optional[str]:\n return self.loader.schema_type.key if self.loader else None\n\n @property\n def type_param_keys(self) -> t.Sequence[str]:\n return []\n\n @property\n def is_nothing(self) -> bool:\n return self.kind == DagsterTypeKind.NOTHING\n\n @property\n def supports_fan_in(self) -> bool:\n return False\n\n def get_inner_type_for_fan_in(self) -> "DagsterType":\n check.failed(\n "DagsterType {name} does not support fan-in, should have checked supports_fan_in before"\n " calling getter.".format(name=self.display_name)\n )\n\n def get_resource_requirements(\n self, _outer_context: TypingOptional[object] = None\n ) -> TypingIterator[ResourceRequirement]:\n for resource_key in sorted(list(self.required_resource_keys)):\n yield TypeResourceRequirement(key=resource_key, type_display_name=self.display_name)\n if self.loader:\n yield from self.loader.get_resource_requirements(outer_context=self.display_name)
\n\n\ndef _validate_type_check_fn(fn: t.Callable, name: t.Optional[str]) -> bool:\n from dagster._seven import get_arg_names\n\n args = get_arg_names(fn)\n\n # py2 doesn't filter out self\n if len(args) >= 1 and args[0] == "self":\n args = args[1:]\n\n if len(args) == 2:\n possible_names = {\n "_",\n "context",\n "_context",\n "context_",\n }\n if args[0] not in possible_names:\n DagsterInvalidDefinitionError(\n f'type_check function on type "{name}" must have first '\n 'argument named "context" (or _, _context, context_).'\n )\n return True\n\n raise DagsterInvalidDefinitionError(\n f'type_check_fn argument on type "{name}" must take 2 arguments, received {len(args)}.'\n )\n\n\nclass BuiltinScalarDagsterType(DagsterType):\n def __init__(self, name: str, type_check_fn: TypeCheckFn, typing_type: t.Type, **kwargs):\n super(BuiltinScalarDagsterType, self).__init__(\n key=name,\n name=name,\n kind=DagsterTypeKind.SCALAR,\n type_check_fn=type_check_fn,\n is_builtin=True,\n typing_type=typing_type,\n **kwargs,\n )\n\n # This is passed to the constructor of subclasses as the argument `type_check_fn`-- that's why\n # it exists together with the `type_check_fn` arg.\n def type_check_fn(self, _context, value) -> TypeCheck:\n return self.type_check_scalar_value(value)\n\n @abstractmethod\n def type_check_scalar_value(self, _value) -> TypeCheck:\n raise NotImplementedError()\n\n\ndef _typemismatch_error_str(value: object, expected_type_desc: str) -> str:\n return 'Value "{value}" of python type "{python_type}" must be a {type_desc}.'.format(\n value=value, python_type=type(value).__name__, type_desc=expected_type_desc\n )\n\n\ndef _fail_if_not_of_type(\n value: object, value_type: t.Type[t.Any], value_type_desc: str\n) -> TypeCheck:\n if not isinstance(value, value_type):\n return TypeCheck(success=False, description=_typemismatch_error_str(value, value_type_desc))\n\n return TypeCheck(success=True)\n\n\nclass _Int(BuiltinScalarDagsterType):\n def __init__(self):\n super(_Int, self).__init__(\n name="Int",\n loader=BuiltinSchemas.INT_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=int,\n )\n\n def type_check_scalar_value(self, value) -> TypeCheck:\n return _fail_if_not_of_type(value, int, "int")\n\n\nclass _String(BuiltinScalarDagsterType):\n def __init__(self):\n super(_String, self).__init__(\n name="String",\n loader=BuiltinSchemas.STRING_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=str,\n )\n\n def type_check_scalar_value(self, value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, str, "string")\n\n\nclass _Float(BuiltinScalarDagsterType):\n def __init__(self):\n super(_Float, self).__init__(\n name="Float",\n loader=BuiltinSchemas.FLOAT_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=float,\n )\n\n def type_check_scalar_value(self, value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, float, "float")\n\n\nclass _Bool(BuiltinScalarDagsterType):\n def __init__(self):\n super(_Bool, self).__init__(\n name="Bool",\n loader=BuiltinSchemas.BOOL_INPUT,\n type_check_fn=self.type_check_fn,\n typing_type=bool,\n )\n\n def type_check_scalar_value(self, value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, bool, "bool")\n\n\nclass Anyish(DagsterType):\n def __init__(\n self,\n key: t.Optional[str],\n name: t.Optional[str],\n loader: t.Optional[DagsterTypeLoader] = None,\n is_builtin: bool = False,\n description: t.Optional[str] = None,\n ):\n super(Anyish, self).__init__(\n key=key,\n name=name,\n kind=DagsterTypeKind.ANY,\n loader=loader,\n is_builtin=is_builtin,\n type_check_fn=self.type_check_method,\n description=description,\n typing_type=t.Any,\n )\n\n def type_check_method(self, _context: "TypeCheckContext", _value: object) -> TypeCheck:\n return TypeCheck(success=True)\n\n @property\n def supports_fan_in(self) -> bool:\n return True\n\n def get_inner_type_for_fan_in(self) -> DagsterType:\n # Anyish all the way down\n return self\n\n\nclass _Any(Anyish):\n def __init__(self):\n super(_Any, self).__init__(\n key="Any",\n name="Any",\n loader=BuiltinSchemas.ANY_INPUT,\n is_builtin=True,\n )\n\n\ndef create_any_type(\n name: str,\n loader: t.Optional[DagsterTypeLoader] = None,\n description: t.Optional[str] = None,\n) -> Anyish:\n return Anyish(\n key=name,\n name=name,\n description=description,\n loader=loader,\n )\n\n\nclass _Nothing(DagsterType):\n def __init__(self):\n super(_Nothing, self).__init__(\n key="Nothing",\n name="Nothing",\n kind=DagsterTypeKind.NOTHING,\n loader=None,\n type_check_fn=self.type_check_method,\n is_builtin=True,\n typing_type=type(None),\n )\n\n def type_check_method(self, _context: "TypeCheckContext", value: object) -> TypeCheck:\n if value is not None:\n return TypeCheck(\n success=False,\n description=f"Value must be None, got a {type(value)}",\n )\n\n return TypeCheck(success=True)\n\n @property\n def supports_fan_in(self) -> bool:\n return True\n\n def get_inner_type_for_fan_in(self) -> DagsterType:\n return self\n\n\ndef isinstance_type_check_fn(\n expected_python_type: t.Union[t.Type, t.Tuple[t.Type, ...]],\n dagster_type_name: str,\n expected_python_type_str: str,\n) -> TypeCheckFn:\n def type_check(_context: "TypeCheckContext", value: object) -> TypeCheck:\n if not isinstance(value, expected_python_type):\n return TypeCheck(\n success=False,\n description=(\n f"Value of type {type(value)} failed type check for Dagster type"\n f" {dagster_type_name}, expected value to be of Python type"\n f" {expected_python_type_str}."\n ),\n )\n\n return TypeCheck(success=True)\n\n return type_check\n\n\n
[docs]class PythonObjectDagsterType(DagsterType):\n """Define a type in dagster whose typecheck is an isinstance check.\n\n Specifically, the type can either be a single python type (e.g. int),\n or a tuple of types (e.g. (int, float)) which is treated as a union.\n\n Examples:\n .. code-block:: python\n\n ntype = PythonObjectDagsterType(python_type=int)\n assert ntype.name == 'int'\n assert_success(ntype, 1)\n assert_failure(ntype, 'a')\n\n .. code-block:: python\n\n ntype = PythonObjectDagsterType(python_type=(int, float))\n assert ntype.name == 'Union[int, float]'\n assert_success(ntype, 1)\n assert_success(ntype, 1.5)\n assert_failure(ntype, 'a')\n\n\n Args:\n python_type (Union[Type, Tuple[Type, ...]): The dagster typecheck function calls instanceof on\n this type.\n name (Optional[str]): Name the type. Defaults to the name of ``python_type``.\n key (Optional[str]): Key of the type. Defaults to name.\n description (Optional[str]): A markdown-formatted string, displayed in tooling.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader` and can map config data to a value of\n this type. Specify this argument if you will need to shim values of this type using the\n config machinery. As a rule, you should use the\n :py:func:`@dagster_type_loader <dagster.dagster_type_loader>` decorator to construct\n these arguments.\n """\n\n def __init__(\n self,\n python_type: t.Union[t.Type, t.Tuple[t.Type, ...]],\n key: t.Optional[str] = None,\n name: t.Optional[str] = None,\n **kwargs,\n ):\n if isinstance(python_type, tuple):\n self.python_type = check.tuple_param(\n python_type, "python_type", of_shape=tuple(type for item in python_type)\n )\n self.type_str = "Union[{}]".format(\n ", ".join(python_type.__name__ for python_type in python_type)\n )\n typing_type = t.Union[python_type] # type: ignore\n\n else:\n self.python_type = check.class_param(python_type, "python_type")\n self.type_str = cast(str, python_type.__name__)\n typing_type = self.python_type\n name = check.opt_str_param(name, "name", self.type_str)\n key = check.opt_str_param(key, "key", name)\n super(PythonObjectDagsterType, self).__init__(\n key=key,\n name=name,\n type_check_fn=isinstance_type_check_fn(python_type, name, self.type_str),\n typing_type=typing_type,\n **kwargs,\n )
\n\n\nclass NoneableInputSchema(DagsterTypeLoader):\n def __init__(self, inner_dagster_type: DagsterType):\n self._inner_dagster_type = check.inst_param(\n inner_dagster_type, "inner_dagster_type", DagsterType\n )\n self._inner_loader = check.not_none_param(inner_dagster_type.loader, "inner_dagster_type")\n self._schema_type = ConfigNoneable(self._inner_loader.schema_type)\n\n @property\n def schema_type(self) -> ConfigType:\n return self._schema_type\n\n def construct_from_config_value(\n self, context: "DagsterTypeLoaderContext", config_value: object\n ) -> object:\n if config_value is None:\n return None\n return self._inner_loader.construct_from_config_value(context, config_value)\n\n\ndef _create_nullable_input_schema(inner_type: DagsterType) -> t.Optional[DagsterTypeLoader]:\n if not inner_type.loader:\n return None\n\n return NoneableInputSchema(inner_type)\n\n\nclass OptionalType(DagsterType):\n def __init__(self, inner_type: DagsterType):\n inner_type = resolve_dagster_type(inner_type)\n\n if inner_type is Nothing:\n raise DagsterInvalidDefinitionError(\n "Type Nothing can not be wrapped in List or Optional"\n )\n\n key = "Optional." + cast(str, inner_type.key)\n self.inner_type = inner_type\n super(OptionalType, self).__init__(\n key=key,\n name=None,\n kind=DagsterTypeKind.NULLABLE,\n type_check_fn=self.type_check_method,\n loader=_create_nullable_input_schema(inner_type),\n # This throws a type error with Py\n typing_type=t.Optional[inner_type.typing_type],\n )\n\n @property\n def display_name(self) -> str:\n return self.inner_type.display_name + "?"\n\n def type_check_method(self, context, value):\n return (\n TypeCheck(success=True) if value is None else self.inner_type.type_check(context, value)\n )\n\n @property\n def inner_types(self):\n return [self.inner_type] + self.inner_type.inner_types\n\n @property\n def type_param_keys(self):\n return [self.inner_type.key]\n\n @property\n def supports_fan_in(self):\n return self.inner_type.supports_fan_in\n\n def get_inner_type_for_fan_in(self):\n return self.inner_type.get_inner_type_for_fan_in()\n\n\nclass ListInputSchema(DagsterTypeLoader):\n def __init__(self, inner_dagster_type):\n self._inner_dagster_type = check.inst_param(\n inner_dagster_type, "inner_dagster_type", DagsterType\n )\n check.param_invariant(inner_dagster_type.loader, "inner_dagster_type")\n self._schema_type = Array(inner_dagster_type.loader.schema_type)\n\n @property\n def schema_type(self):\n return self._schema_type\n\n def construct_from_config_value(self, context, config_value):\n convert_item = partial(self._inner_dagster_type.loader.construct_from_config_value, context)\n return list(map(convert_item, config_value))\n\n\ndef _create_list_input_schema(inner_type):\n if not inner_type.loader:\n return None\n\n return ListInputSchema(inner_type)\n\n\nclass ListType(DagsterType):\n def __init__(self, inner_type: DagsterType):\n key = "List." + inner_type.key\n self.inner_type = inner_type\n super(ListType, self).__init__(\n key=key,\n name=None,\n kind=DagsterTypeKind.LIST,\n type_check_fn=self.type_check_method,\n loader=_create_list_input_schema(inner_type),\n typing_type=t.List[inner_type.typing_type],\n )\n\n @property\n def display_name(self):\n return "[" + self.inner_type.display_name + "]"\n\n def type_check_method(self, context, value):\n value_check = _fail_if_not_of_type(value, list, "list")\n if not value_check.success:\n return value_check\n\n for item in value:\n item_check = self.inner_type.type_check(context, item)\n if not item_check.success:\n return item_check\n\n return TypeCheck(success=True)\n\n @property\n def inner_types(self):\n return [self.inner_type] + self.inner_type.inner_types\n\n @property\n def type_param_keys(self):\n return [self.inner_type.key]\n\n @property\n def supports_fan_in(self):\n return True\n\n def get_inner_type_for_fan_in(self):\n return self.inner_type\n\n\nclass DagsterListApi:\n def __getitem__(self, inner_type):\n check.not_none_param(inner_type, "inner_type")\n return _List(resolve_dagster_type(inner_type))\n\n def __call__(self, inner_type):\n check.not_none_param(inner_type, "inner_type")\n return _List(inner_type)\n\n\nList: DagsterListApi = DagsterListApi()\n\n\ndef _List(inner_type):\n check.inst_param(inner_type, "inner_type", DagsterType)\n if inner_type is Nothing:\n raise DagsterInvalidDefinitionError("Type Nothing can not be wrapped in List or Optional")\n return ListType(inner_type)\n\n\nclass Stringish(DagsterType):\n def __init__(self, key: t.Optional[str] = None, name: t.Optional[str] = None, **kwargs):\n name = check.opt_str_param(name, "name", type(self).__name__)\n key = check.opt_str_param(key, "key", name)\n super(Stringish, self).__init__(\n key=key,\n name=name,\n kind=DagsterTypeKind.SCALAR,\n type_check_fn=self.type_check_method,\n loader=BuiltinSchemas.STRING_INPUT,\n typing_type=str,\n **kwargs,\n )\n\n def type_check_method(self, _context: "TypeCheckContext", value: object) -> TypeCheck:\n return _fail_if_not_of_type(value, str, "string")\n\n\ndef create_string_type(name, description=None):\n return Stringish(name=name, key=name, description=description)\n\n\nAny = _Any()\nBool = _Bool()\nFloat = _Float()\nInt = _Int()\nString = _String()\nNothing = _Nothing()\n\n_RUNTIME_MAP = {\n BuiltinEnum.ANY: Any,\n BuiltinEnum.BOOL: Bool,\n BuiltinEnum.FLOAT: Float,\n BuiltinEnum.INT: Int,\n BuiltinEnum.STRING: String,\n BuiltinEnum.NOTHING: Nothing,\n}\n\n_PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY: t.Dict[type, DagsterType] = {}\n"""Python types corresponding to user-defined RunTime types created using @map_to_dagster_type or\nas_dagster_type are registered here so that we can remap the Python types to runtime types."""\n\n\n
[docs]def make_python_type_usable_as_dagster_type(\n python_type: TypingType[t.Any], dagster_type: DagsterType\n) -> None:\n """Take any existing python type and map it to a dagster type (generally created with\n :py:class:`DagsterType <dagster.DagsterType>`) This can only be called once\n on a given python type.\n """\n check.inst_param(python_type, "python_type", type)\n check.inst_param(dagster_type, "dagster_type", DagsterType)\n registered_dagster_type = _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY.get(python_type)\n\n if registered_dagster_type is None:\n _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type] = dagster_type\n elif registered_dagster_type is not dagster_type:\n # This would be just a great place to insert a short URL pointing to the type system\n # documentation into the error message\n # https://github.com/dagster-io/dagster/issues/1831\n if isinstance(registered_dagster_type, TypeHintInferredDagsterType):\n raise DagsterInvalidDefinitionError(\n "A Dagster type has already been registered for the Python type "\n f'{python_type}. The Dagster type was "auto-registered" - i.e. a solid definition '\n "used the Python type as an annotation for one of its arguments or for its return "\n "value before make_python_type_usable_as_dagster_type was called, and we "\n "generated a Dagster type to correspond to it. To override the auto-generated "\n "Dagster type, call make_python_type_usable_as_dagster_type before any solid "\n "definitions refer to the Python type."\n )\n else:\n raise DagsterInvalidDefinitionError(\n "A Dagster type has already been registered for the Python type "\n f"{python_type}. make_python_type_usable_as_dagster_type can only "\n "be called once on a python type as it is registering a 1:1 mapping "\n "between that python type and a dagster type."\n )
\n\n\nDAGSTER_INVALID_TYPE_ERROR_MESSAGE = (\n "Invalid type: dagster_type must be an instance of DagsterType or a Python type: "\n "got {dagster_type}{additional_msg}"\n)\n\n\nclass TypeHintInferredDagsterType(DagsterType):\n def __init__(self, python_type: t.Type):\n qualified_name = f"{python_type.__module__}.{python_type.__name__}"\n self.python_type = python_type\n super(TypeHintInferredDagsterType, self).__init__(\n key=f"_TypeHintInferred[{qualified_name}]",\n description=(\n f"DagsterType created from a type hint for the Python type {qualified_name}"\n ),\n type_check_fn=isinstance_type_check_fn(\n python_type, python_type.__name__, qualified_name\n ),\n typing_type=python_type,\n )\n\n @property\n def display_name(self) -> str:\n return self.python_type.__name__\n\n\ndef resolve_dagster_type(dagster_type: object) -> DagsterType:\n # circular dep\n from dagster._utils.typing_api import is_typing_type\n\n from ..definitions.result import MaterializeResult\n from .primitive_mapping import (\n is_supported_runtime_python_builtin,\n remap_python_builtin_for_runtime,\n )\n from .python_dict import (\n Dict as DDict,\n PythonDict,\n )\n from .python_set import DagsterSetApi, PythonSet\n from .python_tuple import DagsterTupleApi, PythonTuple\n from .transform_typing import transform_typing_type\n\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),\n "Cannot resolve a config type to a runtime type",\n )\n\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, DagsterType)),\n f"Do not pass runtime type classes. Got {dagster_type}",\n )\n\n # First, check to see if we're using Dagster's generic output type to do the type catching.\n if is_generic_output_annotation(dagster_type):\n type_args = get_args(dagster_type)\n # If no inner type was provided, forward Any type.\n dagster_type = type_args[0] if len(type_args) == 1 else Any\n elif is_dynamic_output_annotation(dagster_type):\n dynamic_out_annotation = get_args(dagster_type)[0]\n type_args = get_args(dynamic_out_annotation)\n dagster_type = type_args[0] if len(type_args) == 1 else Any\n elif dagster_type == MaterializeResult:\n # convert MaterializeResult type annotation to Nothing until returning\n # scalar values via MaterializeResult is supported\n # https://github.com/dagster-io/dagster/issues/16887\n dagster_type = Nothing\n\n # Then, check to see if it is part of python's typing library\n if is_typing_type(dagster_type):\n dagster_type = transform_typing_type(dagster_type)\n if isinstance(dagster_type, DagsterType):\n return dagster_type\n\n # Test for unhashable objects -- this is if, for instance, someone has passed us an instance of\n # a dict where they meant to pass dict or Dict, etc.\n try:\n hash(dagster_type)\n except TypeError as e:\n raise DagsterInvalidDefinitionError(\n DAGSTER_INVALID_TYPE_ERROR_MESSAGE.format(\n additional_msg=(\n ", which isn't hashable. Did you pass an instance of a type instead of "\n "the type?"\n ),\n dagster_type=str(dagster_type),\n )\n ) from e\n\n if BuiltinEnum.contains(dagster_type):\n return DagsterType.from_builtin_enum(dagster_type)\n\n if is_supported_runtime_python_builtin(dagster_type):\n return remap_python_builtin_for_runtime(dagster_type)\n\n if dagster_type is None:\n return Any\n\n if dagster_type is DDict:\n return PythonDict\n if isinstance(dagster_type, DagsterTupleApi):\n return PythonTuple\n if isinstance(dagster_type, DagsterSetApi):\n return PythonSet\n if isinstance(dagster_type, DagsterListApi):\n return List(Any)\n\n if isinstance(dagster_type, type):\n return resolve_python_type_to_dagster_type(dagster_type)\n\n raise DagsterInvalidDefinitionError(\n DAGSTER_INVALID_TYPE_ERROR_MESSAGE.format(\n dagster_type=str(dagster_type), additional_msg="."\n )\n )\n\n\ndef is_dynamic_output_annotation(dagster_type: object) -> bool:\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),\n "Cannot resolve a config type to a runtime type",\n )\n\n check.invariant(\n not (isinstance(dagster_type, type) and is_subclass(dagster_type, ConfigType)),\n f"Do not pass runtime type classes. Got {dagster_type}",\n )\n\n if dagster_type == DynamicOutput or get_origin(dagster_type) == DynamicOutput:\n raise DagsterInvariantViolationError(\n "Op annotated with return type DynamicOutput. DynamicOutputs can only be returned in"\n " the context of a List. If only one output is needed, use the Output API."\n )\n\n if get_origin(dagster_type) == list and len(get_args(dagster_type)) == 1:\n list_inner_type = get_args(dagster_type)[0]\n return list_inner_type == DynamicOutput or get_origin(list_inner_type) == DynamicOutput\n return False\n\n\ndef is_generic_output_annotation(dagster_type: object) -> bool:\n return dagster_type == Output or get_origin(dagster_type) == Output\n\n\ndef resolve_python_type_to_dagster_type(python_type: t.Type) -> DagsterType:\n """Resolves a Python type to a Dagster type."""\n check.inst_param(python_type, "python_type", type)\n\n if python_type in _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY:\n return _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type]\n else:\n dagster_type = TypeHintInferredDagsterType(python_type)\n _PYTHON_TYPE_TO_DAGSTER_TYPE_MAPPING_REGISTRY[python_type] = dagster_type\n return dagster_type\n\n\nALL_RUNTIME_BUILTINS = list(_RUNTIME_MAP.values())\n\n\ndef construct_dagster_type_dictionary(\n node_defs: Sequence["NodeDefinition"],\n) -> Mapping[str, DagsterType]:\n from dagster._core.definitions.graph_definition import GraphDefinition\n\n type_dict_by_name = {t.unique_name: t for t in ALL_RUNTIME_BUILTINS}\n type_dict_by_key = {t.key: t for t in ALL_RUNTIME_BUILTINS}\n\n def process_node_def(node_def: "NodeDefinition"):\n input_output_types = list(node_def.all_input_output_types())\n for dagster_type in input_output_types:\n # We don't do uniqueness check on key because with classes\n # like Array, Noneable, etc, those are ephemeral objects\n # and it is perfectly fine to have many of them.\n type_dict_by_key[dagster_type.key] = dagster_type\n\n if not dagster_type.has_unique_name:\n continue\n\n if dagster_type.unique_name not in type_dict_by_name:\n type_dict_by_name[dagster_type.unique_name] = dagster_type\n continue\n\n if type_dict_by_name[dagster_type.unique_name] is not dagster_type:\n raise DagsterInvalidDefinitionError(\n (\n 'You have created two dagster types with the same name "{type_name}". '\n "Dagster types have must have unique names."\n ).format(type_name=dagster_type.display_name)\n )\n\n if isinstance(node_def, GraphDefinition):\n for child_node_def in node_def.node_defs:\n process_node_def(child_node_def)\n\n for node_def in node_defs:\n process_node_def(node_def)\n\n return type_dict_by_key\n\n\nclass DagsterOptionalApi:\n def __getitem__(self, inner_type: t.Union[t.Type, DagsterType]) -> OptionalType:\n inner_type = resolve_dagster_type(check.not_none_param(inner_type, "inner_type"))\n return OptionalType(inner_type)\n\n\nOptional: DagsterOptionalApi = DagsterOptionalApi()\n
", "current_page_name": "_modules/dagster/_core/types/dagster_type", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.types.dagster_type"}, "decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._core.types.decorator

\nfrom typing import TYPE_CHECKING, Callable, Optional, Type, TypeVar, Union, overload\n\nimport dagster._check as check\n\nfrom .dagster_type import PythonObjectDagsterType, make_python_type_usable_as_dagster_type\n\nif TYPE_CHECKING:\n    from dagster._core.types.config_schema import DagsterTypeLoader\n\nT_Type = TypeVar("T_Type", bound=Type[object])\n\n\n@overload\ndef usable_as_dagster_type(\n    name: Optional[str] = ...,\n    description: Optional[str] = ...,\n    loader: Optional["DagsterTypeLoader"] = ...,\n) -> Callable[[T_Type], T_Type]: ...\n\n\n@overload\ndef usable_as_dagster_type(\n    name: T_Type,\n) -> T_Type: ...\n\n\n
[docs]def usable_as_dagster_type(\n name: Optional[Union[str, T_Type]] = None,\n description: Optional[str] = None,\n loader: Optional["DagsterTypeLoader"] = None,\n) -> Union[T_Type, Callable[[T_Type], T_Type]]:\n """Decorate a Python class to make it usable as a Dagster Type.\n\n This is intended to make it straightforward to annotate existing business logic classes to\n make them dagster types whose typecheck is an isinstance check against that python class.\n\n Args:\n python_type (cls): The python type to make usable as python type.\n name (Optional[str]): Name of the new Dagster type. If ``None``, the name (``__name__``) of\n the ``python_type`` will be used.\n description (Optional[str]): A user-readable description of the type.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`DagsterTypeLoader` and can map config data to a value of\n this type. Specify this argument if you will need to shim values of this type using the\n config machinery. As a rule, you should use the\n :py:func:`@dagster_type_loader <dagster.dagster_type_loader>` decorator to construct\n these arguments.\n\n Examples:\n .. code-block:: python\n\n # dagster_aws.s3.file_manager.S3FileHandle\n @usable_as_dagster_type\n class S3FileHandle(FileHandle):\n def __init__(self, s3_bucket, s3_key):\n self._s3_bucket = check.str_param(s3_bucket, 's3_bucket')\n self._s3_key = check.str_param(s3_key, 's3_key')\n\n @property\n def s3_bucket(self):\n return self._s3_bucket\n\n @property\n def s3_key(self):\n return self._s3_key\n\n @property\n def path_desc(self):\n return self.s3_path\n\n @property\n def s3_path(self):\n return 's3://{bucket}/{key}'.format(bucket=self.s3_bucket, key=self.s3_key)\n """\n # check for no args, no parens case\n if isinstance(name, type):\n bare_cls = name # with no parens, name is actually the decorated class\n make_python_type_usable_as_dagster_type(\n bare_cls,\n PythonObjectDagsterType(python_type=bare_cls, name=bare_cls.__name__, description=None),\n )\n return bare_cls\n\n def _with_args(bare_cls: T_Type) -> T_Type:\n check.class_param(bare_cls, "bare_cls")\n new_name = check.opt_str_param(name, "name") if name else bare_cls.__name__\n\n make_python_type_usable_as_dagster_type(\n bare_cls,\n PythonObjectDagsterType(\n name=new_name,\n description=description,\n python_type=bare_cls,\n loader=loader,\n ),\n )\n return bare_cls\n\n return _with_args
\n
", "current_page_name": "_modules/dagster/_core/types/decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._core.types.decorator"}}}, "_serdes": {"config_class": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._serdes.config_class

\nimport importlib\nfrom abc import ABC, abstractmethod\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Dict,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Type,\n    TypeVar,\n    Union,\n    overload,\n)\n\nfrom typing_extensions import Self\n\nimport dagster._check as check\nfrom dagster._utils import convert_dagster_submodule_name\nfrom dagster._utils.yaml_utils import load_run_config_yaml\n\nfrom .serdes import (\n    NamedTupleSerializer,\n    whitelist_for_serdes,\n)\n\nif TYPE_CHECKING:\n    from dagster._config.config_schema import UserConfigSchema\n\nT_ConfigurableClass = TypeVar("T_ConfigurableClass")\n\n\nclass ConfigurableClassDataSerializer(NamedTupleSerializer["ConfigurableClassData"]):\n    def after_pack(self, **packed: Any) -> Dict[str, Any]:\n        packed["module_name"] = convert_dagster_submodule_name(packed["module_name"], "public")\n        return packed\n\n\n
[docs]@whitelist_for_serdes(serializer=ConfigurableClassDataSerializer)\nclass ConfigurableClassData(\n NamedTuple(\n "_ConfigurableClassData",\n [\n ("module_name", str),\n ("class_name", str),\n ("config_yaml", str),\n ],\n )\n):\n """Serializable tuple describing where to find a class and the config fragment that should\n be used to instantiate it.\n\n Users should not instantiate this class directly.\n\n Classes intended to be serialized in this way should implement the\n :py:class:`dagster.serdes.ConfigurableClass` mixin.\n """\n\n def __new__(cls, module_name: str, class_name: str, config_yaml: str):\n return super(ConfigurableClassData, cls).__new__(\n cls,\n convert_dagster_submodule_name(check.str_param(module_name, "module_name"), "private"),\n check.str_param(class_name, "class_name"),\n check.str_param(config_yaml, "config_yaml"),\n )\n\n @property\n def config_dict(self) -> Mapping[str, Any]:\n return check.is_dict(load_run_config_yaml(self.config_yaml), key_type=str)\n\n def info_dict(self) -> Mapping[str, Any]:\n return {\n "module": self.module_name,\n "class": self.class_name,\n "config": self.config_dict,\n }\n\n @overload\n def rehydrate(self, as_type: Type[T_ConfigurableClass]) -> T_ConfigurableClass: ...\n\n @overload\n def rehydrate(self, as_type: None = ...) -> "ConfigurableClass": ...\n\n def rehydrate(\n self, as_type: Optional[Type[T_ConfigurableClass]] = None\n ) -> Union["ConfigurableClass", T_ConfigurableClass]:\n from dagster._config import process_config, resolve_to_config_type\n from dagster._core.errors import DagsterInvalidConfigError\n\n try:\n module = importlib.import_module(self.module_name)\n except ModuleNotFoundError:\n check.failed(\n f"Couldn't import module {self.module_name} when attempting to load the "\n f"configurable class {self.module_name}.{self.class_name}"\n )\n try:\n klass = getattr(module, self.class_name)\n except AttributeError:\n check.failed(\n f"Couldn't find class {self.class_name} in module when attempting to load the "\n f"configurable class {self.module_name}.{self.class_name}"\n )\n\n if not issubclass(klass, as_type or ConfigurableClass):\n raise check.CheckError(\n klass,\n f"class {self.class_name} in module {self.module_name}",\n ConfigurableClass,\n )\n\n config_dict = self.config_dict\n result = process_config(resolve_to_config_type(klass.config_type()), config_dict)\n if not result.success:\n raise DagsterInvalidConfigError(\n f"Errors whilst loading configuration for {klass.config_type()}.",\n result.errors,\n config_dict,\n )\n return klass.from_config_value(self, check.not_none(result.value))
\n\n\n
[docs]class ConfigurableClass(ABC):\n """Abstract mixin for classes that can be loaded from config.\n\n This supports a powerful plugin pattern which avoids both a) a lengthy, hard-to-synchronize list\n of conditional imports / optional extras_requires in dagster core and b) a magic directory or\n file in which third parties can place plugin packages. Instead, the intention is to make, e.g.,\n run storage, pluggable with a config chunk like:\n\n .. code-block:: yaml\n\n run_storage:\n module: very_cool_package.run_storage\n class: SplendidRunStorage\n config:\n magic_word: "quux"\n\n This same pattern should eventually be viable for other system components, e.g. engines.\n\n The ``ConfigurableClass`` mixin provides the necessary hooks for classes to be instantiated from\n an instance of ``ConfigurableClassData``.\n\n Pieces of the Dagster system which we wish to make pluggable in this way should consume a config\n type such as:\n\n .. code-block:: python\n\n {'module': str, 'class': str, 'config': Field(Permissive())}\n\n """\n\n @property\n @abstractmethod\n def inst_data(self) -> Optional[ConfigurableClassData]:\n """Subclass must be able to return the inst_data as a property if it has been constructed\n through the from_config_value code path.\n """\n\n @classmethod\n @abstractmethod\n def config_type(cls) -> "UserConfigSchema":\n """Get the config type against which to validate a config yaml fragment.\n\n The only place config values matching this type are used is inside `from_config_value`. This\n is an alternative constructor for a class. It is a common pattern for the config type to\n match constructor arguments, so `from_config_value`\n\n The config type against which to validate a config yaml fragment\n serialized in an instance of ``ConfigurableClassData``.\n """\n ...\n # We need to raise `NotImplementedError` here because nothing prevents abstract class\n # methods from being called.\n raise NotImplementedError(f"{cls.__name__} must implement the config_type classmethod")\n\n @classmethod\n @abstractmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n """Create an instance of the ConfigurableClass from a validated config value.\n\n The config value used here should be derived from the accompanying `inst_data` argument.\n `inst_data` contains the yaml-serialized config-- this must be parsed and\n validated/normalized, then passed to this method for object instantiation. This is done in\n ConfigurableClassData.rehydrate.\n\n Args:\n config_value (dict): The validated config value to use. Typically this should be the\n ``value`` attribute of a\n :py:class:`~dagster._core.types.evaluator.evaluation.EvaluateValueResult`.\n\n\n A common pattern is for the implementation to align the config_value with the signature\n of the ConfigurableClass's constructor:\n\n .. code-block:: python\n\n @classmethod\n def from_config_value(cls, inst_data, config_value):\n return MyConfigurableClass(inst_data=inst_data, **config_value)\n\n """
\n\n\ndef class_from_code_pointer(module_name: str, class_name: str) -> Type[object]:\n try:\n module = importlib.import_module(module_name)\n except ModuleNotFoundError:\n check.failed(\n "Couldn't import module {module_name} when attempting to load the class {klass}".format(\n module_name=module_name,\n klass=module_name + "." + class_name,\n )\n )\n try:\n return getattr(module, class_name)\n except AttributeError:\n check.failed(\n "Couldn't find class {class_name} in module when attempting to load the "\n "class {klass}".format(\n class_name=class_name,\n klass=module_name + "." + class_name,\n )\n )\n
", "current_page_name": "_modules/dagster/_serdes/config_class", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._serdes.config_class"}}, "_utils": {"alabaster_version": "0.7.13", "alert": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.alert

\nimport datetime\nimport smtplib\nimport ssl\nfrom typing import TYPE_CHECKING, Callable, Optional, Sequence, Union\n\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions.sensor_definition import DefaultSensorStatus, SensorDefinition\nfrom dagster._core.errors import DagsterInvalidDefinitionError\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.graph_definition import GraphDefinition\n    from dagster._core.definitions.job_definition import JobDefinition\n    from dagster._core.definitions.run_status_sensor_definition import RunFailureSensorContext\n    from dagster._core.definitions.selector import JobSelector, RepositorySelector\n    from dagster._core.definitions.unresolved_asset_job_definition import (\n        UnresolvedAssetJobDefinition,\n    )\n\n\ndef _default_failure_email_body(context: "RunFailureSensorContext") -> str:\n    from dagster._core.host_representation.external_data import DEFAULT_MODE_NAME\n\n    return "<br>".join(\n        [\n            f"Pipeline {context.dagster_run.job_name} failed!",\n            f"Run ID: {context.dagster_run.run_id}",\n            f"Mode: {DEFAULT_MODE_NAME}",\n            f"Error: {context.failure_event.message}",\n        ]\n    )\n\n\ndef _default_failure_email_subject(context) -> str:\n    return f"Dagster Run Failed: {context.pipeline_run.job_name}"\n\n\nEMAIL_MESSAGE = """From: {email_from}\nTo: {email_to}\nMIME-Version: 1.0\nContent-type: text/html\nSubject: {email_subject}\n\n{email_body}\n\n<!-- this ensures Gmail doesn't trim the email -->\n<span style="opacity: 0"> {randomness} </span>\n"""\n\n\ndef send_email_via_ssl(\n    email_from: str,\n    email_password: str,\n    email_to: Sequence[str],\n    message: str,\n    smtp_host: str,\n    smtp_port: int,\n):\n    context = ssl.create_default_context()\n    with smtplib.SMTP_SSL(smtp_host, smtp_port, context=context) as server:\n        server.login(email_from, email_password)\n        server.sendmail(email_from, email_to, message)\n\n\ndef send_email_via_starttls(\n    email_from: str,\n    email_password: str,\n    email_to: Sequence[str],\n    message: str,\n    smtp_host: str,\n    smtp_port: int,\n):\n    context = ssl.create_default_context()\n    with smtplib.SMTP(smtp_host, smtp_port) as server:\n        server.starttls(context=context)\n        server.login(email_from, email_password)\n        server.sendmail(email_from, email_to, message)\n\n\n
[docs]@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef make_email_on_run_failure_sensor(\n email_from: str,\n email_password: str,\n email_to: Sequence[str],\n email_body_fn: Callable[["RunFailureSensorContext"], str] = _default_failure_email_body,\n email_subject_fn: Callable[["RunFailureSensorContext"], str] = _default_failure_email_subject,\n smtp_host: str = "smtp.gmail.com",\n smtp_type: str = "SSL",\n smtp_port: Optional[int] = None,\n name: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n "JobDefinition",\n "GraphDefinition",\n "UnresolvedAssetJobDefinition",\n "RepositorySelector",\n "JobSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n "JobDefinition",\n "GraphDefinition",\n "UnresolvedAssetJobDefinition",\n "RepositorySelector",\n "JobSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n) -> SensorDefinition:\n """Create a job failure sensor that sends email via the SMTP protocol.\n\n Args:\n email_from (str): The sender email address to send the message from.\n email_password (str): The password of the sender.\n email_to (List[str]): The receipt email addresses to send the message to.\n email_body_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` outputs the email body you want to send.\n Defaults to the plain text that contains error message, job name, and run ID.\n email_subject_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` outputs the email subject you want to send.\n Defaults to "Dagster Run Failed: <job_name>".\n smtp_host (str): The hostname of the SMTP server. Defaults to "smtp.gmail.com".\n smtp_type (str): The protocol; either "SSL" or "STARTTLS". Defaults to SSL.\n smtp_port (Optional[int]): The SMTP port. Defaults to 465 for SSL, 587 for STARTTLS.\n name: (Optional[str]): The name of the sensor. Defaults to "email_on_job_failure".\n webserver_base_url: (Optional[str]): The base url of your dagster-webserver instance. Specify this to allow\n messages to include deeplinks to the failed run.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, JobDefinition, RepositorySelector, JobSelector]]]):\n The jobs that will be monitored by this failure sensor. Defaults to None, which means the alert will\n be sent when any job in the repository fails. To monitor jobs in external repositories,\n use RepositorySelector and JobSelector.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, JobDefinition, RepositorySelector, JobSelector]]]):\n (deprecated in favor of monitored_jobs) The jobs that will be monitored by this failure\n sensor. Defaults to None, which means the alert will be sent when any job in the repository fails.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from the Dagster UI or via the GraphQL API.\n\n Examples:\n .. code-block:: python\n\n email_on_run_failure = make_email_on_run_failure_sensor(\n email_from="no-reply@example.com",\n email_password=os.getenv("ALERT_EMAIL_PASSWORD"),\n email_to=["xxx@example.com"],\n )\n\n @repository\n def my_repo():\n return [my_job + email_on_run_failure]\n\n .. code-block:: python\n\n def my_message_fn(context: RunFailureSensorContext) -> str:\n return (\n f"Job {context.pipeline_run.job_name} failed!"\n f"Error: {context.failure_event.message}"\n )\n\n email_on_run_failure = make_email_on_run_failure_sensor(\n email_from="no-reply@example.com",\n email_password=os.getenv("ALERT_EMAIL_PASSWORD"),\n email_to=["xxx@example.com"],\n email_body_fn=my_message_fn,\n email_subject_fn=lambda _: "Dagster Alert",\n webserver_base_url="http://mycoolsite.com",\n )\n\n\n """\n from dagster._core.definitions.run_status_sensor_definition import (\n RunFailureSensorContext,\n run_failure_sensor,\n )\n\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n @run_failure_sensor(\n name=name,\n monitored_jobs=jobs,\n default_status=default_status,\n monitor_all_repositories=monitor_all_repositories,\n )\n def email_on_run_failure(context: RunFailureSensorContext):\n email_body = email_body_fn(context)\n if webserver_base_url:\n email_body += (\n f'<p><a href="{webserver_base_url}/runs/{context.dagster_run.run_id}">View in'\n " the Dagster UI</a></p>"\n )\n\n message = EMAIL_MESSAGE.format(\n email_to=",".join(email_to),\n email_from=email_from,\n email_subject=email_subject_fn(context),\n email_body=email_body,\n randomness=datetime.datetime.now(),\n )\n\n if smtp_type == "SSL":\n send_email_via_ssl(\n email_from, email_password, email_to, message, smtp_host, smtp_port=smtp_port or 465\n )\n elif smtp_type == "STARTTLS":\n send_email_via_starttls(\n email_from, email_password, email_to, message, smtp_host, smtp_port=smtp_port or 587\n )\n else:\n raise DagsterInvalidDefinitionError(f'smtp_type "{smtp_type}" is not supported.')\n\n return email_on_run_failure
\n
", "current_page_name": "_modules/dagster/_utils/alert", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.alert"}, "body": "

Source code for dagster._utils

\nimport _thread as thread\nimport contextlib\nimport contextvars\nimport datetime\nimport errno\nimport functools\nimport inspect\nimport multiprocessing\nimport os\nimport re\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom collections import OrderedDict\nfrom datetime import timezone\nfrom enum import Enum\nfrom signal import Signals\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Callable,\n    ContextManager,\n    Dict,\n    Generator,\n    Generic,\n    Hashable,\n    Iterator,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Type,\n    TypeVar,\n    Union,\n    cast,\n    overload,\n)\n\nimport packaging.version\nfrom typing_extensions import Literal, TypeAlias, TypeGuard\n\nimport dagster._check as check\nimport dagster._seven as seven\n\nfrom .internal_init import IHasInternalInit as IHasInternalInit\n\nif sys.version_info > (3,):\n    from pathlib import Path\nelse:\n    from pathlib2 import Path\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.definitions_class import Definitions\n    from dagster._core.definitions.repository_definition.repository_definition import (\n        RepositoryDefinition,\n    )\n    from dagster._core.events import DagsterEvent\n\nK = TypeVar("K")\nT = TypeVar("T")\nU = TypeVar("U")\nV = TypeVar("V")\n\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\nPICKLE_PROTOCOL = 4\n\n\nDEFAULT_WORKSPACE_YAML_FILENAME = "workspace.yaml"\n\nPrintFn: TypeAlias = Callable[[Any], None]\n\nSingleInstigatorDebugCrashFlags: TypeAlias = Mapping[str, int]\nDebugCrashFlags: TypeAlias = Mapping[str, SingleInstigatorDebugCrashFlags]\n\n\n# Use this to get the "library version" (pre-1.0 version) from the "core version" (post 1.0\n# version). 16 is from the 0.16.0 that library versions stayed on when core went to 1.0.0.\ndef library_version_from_core_version(core_version: str) -> str:\n    parsed_version = parse_package_version(core_version)\n\n    release = parsed_version.release\n    if release[0] >= 1:\n        library_version = ".".join(["0", str(16 + release[1]), str(release[2])])\n\n        if parsed_version.is_prerelease:\n            library_version = library_version + "".join(\n                [str(pre) for pre in check.not_none(parsed_version.pre)]\n            )\n\n        if parsed_version.is_postrelease:\n            library_version = library_version + "post" + str(parsed_version.post)\n\n        return library_version\n    else:\n        return core_version\n\n\ndef parse_package_version(version_str: str) -> packaging.version.Version:\n    parsed_version = packaging.version.parse(version_str)\n    assert isinstance(parsed_version, packaging.version.Version)\n    return parsed_version\n\n\ndef convert_dagster_submodule_name(name: str, mode: Literal["private", "public"]) -> str:\n    """This function was introduced when all Dagster submodules were marked private by\n    underscore-prefixing the root submodules (e.g. `dagster._core`). The function provides\n    backcompatibility by converting modules between the old and new (i.e. public and private) forms.\n    This is needed when reading older data or communicating with older versions of Dagster.\n    """\n    if mode == "private":\n        return re.sub(r"^dagster\\.([^_])", r"dagster._\\1", name)\n    elif mode == "public":\n        return re.sub(r"^dagster._", "dagster.", name)\n    else:\n        check.failed("`mode` must be 'private' or 'public'")\n\n\n
[docs]def file_relative_path(dunderfile: str, relative_path: str) -> str:\n """Get a path relative to the currently executing Python file.\n\n This function is useful when one needs to load a file that is relative to the position of\n the current file. (Such as when you encode a configuration file path in source file and want\n in runnable in any current working directory)\n\n Args:\n dunderfile (str): Should always be ``__file__``.\n relative_path (str): Path to get relative to the currently executing file.\n\n **Examples**:\n\n .. code-block:: python\n\n file_relative_path(__file__, 'path/relative/to/file')\n\n """\n check.str_param(dunderfile, "dunderfile")\n check.str_param(relative_path, "relative_path")\n\n return os.path.join(os.path.dirname(dunderfile), relative_path)
\n\n\ndef script_relative_path(file_path: str) -> str:\n """Useful for testing with local files. Use a path relative to where the\n test resides and this function will return the absolute path\n of that file. Otherwise it will be relative to script that\n ran the test.\n\n Note: this is function is very, very expensive (on the order of 1\n millisecond per invocation) so this should only be used in performance\n insensitive contexts. Prefer file_relative_path for anything with\n performance constraints.\n\n """\n # from http://bit.ly/2snyC6s\n\n check.str_param(file_path, "file_path")\n scriptdir = inspect.stack()[1][1]\n return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))\n\n\n# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py\ndef camelcase(string: str) -> str:\n check.str_param(string, "string")\n\n string = re.sub(r"^[\\-_\\.]", "", str(string))\n if not string:\n return string\n return str(string[0]).upper() + re.sub(\n r"[\\-_\\.\\s]([a-z])", lambda matched: str(matched.group(1)).upper(), string[1:]\n )\n\n\ndef ensure_single_item(ddict: Mapping[T, U]) -> Tuple[T, U]:\n check.mapping_param(ddict, "ddict")\n check.param_invariant(len(ddict) == 1, "ddict", "Expected dict with single item")\n return next(iter(ddict.items()))\n\n\n@contextlib.contextmanager\ndef pushd(path: str) -> Iterator[str]:\n old_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield path\n finally:\n os.chdir(old_cwd)\n\n\ndef safe_isfile(path: str) -> bool:\n """Backport of Python 3.8 os.path.isfile behavior.\n\n This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not\n sure that there are other ways to provoke this behavior on Unix other than the null byte,\n but there are certainly other ways to do it on Windows. Afaict, we won't mask other\n ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an\n unexpected, uncaught ValueError from very deep in our logic.\n """\n try:\n return os.path.isfile(path)\n except ValueError:\n return False\n\n\ndef mkdir_p(path: str) -> str:\n try:\n os.makedirs(path)\n return path\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n return path\n else:\n raise\n\n\ndef hash_collection(\n collection: Union[\n Mapping[Hashable, Any], Sequence[Any], AbstractSet[Any], Tuple[Any, ...], NamedTuple\n ]\n) -> int:\n """Hash a mutable collection or immutable collection containing mutable elements.\n\n This is useful for hashing Dagster-specific NamedTuples that contain mutable lists or dicts.\n The default NamedTuple __hash__ function assumes the contents of the NamedTuple are themselves\n hashable, and will throw an error if they are not. This can occur when trying to e.g. compute a\n cache key for the tuple for use with `lru_cache`.\n\n This alternative implementation will recursively process collection elements to convert basic\n lists and dicts to tuples prior to hashing. It is recommended to cache the result:\n\n Example:\n .. code-block:: python\n\n def __hash__(self):\n if not hasattr(self, '_hash'):\n self._hash = hash_named_tuple(self)\n return self._hash\n """\n assert isinstance(\n collection, (list, dict, set, tuple)\n ), f"Cannot hash collection of type {type(collection)}"\n return hash(make_hashable(collection))\n\n\n@overload\ndef make_hashable(value: Union[List[Any], Set[Any]]) -> Tuple[Any, ...]: ...\n\n\n@overload\ndef make_hashable(value: Dict[Any, Any]) -> Tuple[Tuple[Any, Any]]: ...\n\n\n@overload\ndef make_hashable(value: Any) -> Any: ...\n\n\ndef make_hashable(value: Any) -> Any:\n if isinstance(value, dict):\n return tuple(sorted((key, make_hashable(value)) for key, value in value.items()))\n elif isinstance(value, (list, tuple, set)):\n return tuple([make_hashable(x) for x in value])\n else:\n return value\n\n\ndef get_prop_or_key(elem, key):\n if isinstance(elem, Mapping):\n return elem.get(key)\n else:\n return getattr(elem, key)\n\n\ndef list_pull(alist, key):\n return list(map(lambda elem: get_prop_or_key(elem, key), alist))\n\n\ndef all_none(kwargs):\n for value in kwargs.values():\n if value is not None:\n return False\n return True\n\n\ndef check_script(path, return_code=0):\n try:\n subprocess.check_output([sys.executable, path])\n except subprocess.CalledProcessError as exc:\n if return_code != 0:\n if exc.returncode == return_code:\n return\n raise\n\n\ndef check_cli_execute_file_job(path, pipeline_fn_name, env_file=None):\n from dagster._core.test_utils import instance_for_test\n\n with instance_for_test():\n cli_cmd = [\n sys.executable,\n "-m",\n "dagster",\n "pipeline",\n "execute",\n "-f",\n path,\n "-a",\n pipeline_fn_name,\n ]\n\n if env_file:\n cli_cmd.append("-c")\n cli_cmd.append(env_file)\n\n try:\n subprocess.check_output(cli_cmd)\n except subprocess.CalledProcessError as cpe:\n print(cpe) # noqa: T201\n raise cpe\n\n\ndef safe_tempfile_path_unmanaged() -> str:\n # This gets a valid temporary file path in the safest possible way, although there is still no\n # guarantee that another process will not create a file at this path. The NamedTemporaryFile is\n # deleted when the context manager exits and the file object is closed.\n #\n # This is preferable to using NamedTemporaryFile as a context manager and passing the name\n # attribute of the file object around because NamedTemporaryFiles cannot be opened a second time\n # if already open on Windows NT or later:\n # https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile\n # https://github.com/dagster-io/dagster/issues/1582\n with tempfile.NamedTemporaryFile() as fd:\n path = fd.name\n return Path(path).as_posix()\n\n\n@contextlib.contextmanager\ndef safe_tempfile_path() -> Iterator[str]:\n path = None\n try:\n path = safe_tempfile_path_unmanaged()\n yield path\n finally:\n if path is not None and os.path.exists(path):\n os.unlink(path)\n\n\n@overload\ndef ensure_gen(thing_or_gen: Generator[T, Any, Any]) -> Generator[T, Any, Any]:\n pass\n\n\n@overload\ndef ensure_gen(thing_or_gen: T) -> Generator[T, Any, Any]:\n pass\n\n\ndef ensure_gen(\n thing_or_gen: Union[T, Iterator[T], Generator[T, Any, Any]]\n) -> Generator[T, Any, Any]:\n if not inspect.isgenerator(thing_or_gen):\n thing_or_gen = cast(T, thing_or_gen)\n\n def _gen_thing():\n yield thing_or_gen\n\n return _gen_thing()\n\n return thing_or_gen\n\n\ndef ensure_dir(file_path: str) -> str:\n try:\n os.makedirs(file_path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return file_path\n\n\ndef ensure_file(path: str) -> str:\n ensure_dir(os.path.dirname(path))\n if not os.path.exists(path):\n touch_file(path)\n return path\n\n\ndef touch_file(path):\n ensure_dir(os.path.dirname(path))\n with open(path, "a", encoding="utf8"):\n os.utime(path, None)\n\n\ndef _kill_on_event(termination_event):\n termination_event.wait()\n send_interrupt()\n\n\ndef send_interrupt():\n if seven.IS_WINDOWS:\n # This will raise a KeyboardInterrupt in python land - meaning this wont be able to\n # interrupt things like sleep()\n thread.interrupt_main()\n else:\n # If on unix send an os level signal to interrupt any situation we may be stuck in\n os.kill(os.getpid(), signal.SIGINT)\n\n\n# Function to be invoked by daemon thread in processes which seek to be cancellable.\n# The motivation for this approach is to be able to exit cleanly on Windows. An alternative\n# path is to change how the processes are opened and send CTRL_BREAK signals, which at\n# the time of authoring seemed a more costly approach.\n#\n# Reading for the curious:\n# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine\n# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/\ndef start_termination_thread(termination_event):\n check.inst_param(termination_event, "termination_event", ttype=type(multiprocessing.Event()))\n\n int_thread = threading.Thread(\n target=_kill_on_event, args=(termination_event,), name="kill-on-event"\n )\n int_thread.daemon = True\n int_thread.start()\n\n\n# Executes the next() function within an instance of the supplied context manager class\n# (leaving the context before yielding each result)\ndef iterate_with_context(\n context_fn: Callable[[], ContextManager[Any]], iterator: Iterator[T]\n) -> Iterator[T]:\n while True:\n # Allow interrupts during user code so that we can terminate slow/hanging steps\n with context_fn():\n try:\n next_output = next(iterator)\n except StopIteration:\n return\n\n yield next_output\n\n\ndef datetime_as_float(dt: datetime.datetime) -> float:\n check.inst_param(dt, "dt", datetime.datetime)\n return float((dt - EPOCH).total_seconds())\n\n\nT_GeneratedContext = TypeVar("T_GeneratedContext")\n\n\nclass EventGenerationManager(Generic[T_GeneratedContext]):\n """Utility class that wraps an event generator function, that also yields a single instance of\n a typed object. All events yielded before the typed object are yielded through the method\n `generate_setup_events` and all events yielded after the typed object are yielded through the\n method `generate_teardown_events`.\n\n This is used to help replace the context managers used in pipeline initialization with\n generators so that we can begin emitting initialization events AND construct a pipeline context\n object, while managing explicit setup/teardown.\n\n This does require calling `generate_setup_events` AND `generate_teardown_events` in order to\n get the typed object.\n """\n\n def __init__(\n self,\n generator: Iterator[Union["DagsterEvent", T_GeneratedContext]],\n object_cls: Type[T_GeneratedContext],\n require_object: Optional[bool] = True,\n ):\n self.generator = check.generator(generator)\n self.object_cls: Type[T_GeneratedContext] = check.class_param(object_cls, "object_cls")\n self.require_object = check.bool_param(require_object, "require_object")\n self.object: Optional[T_GeneratedContext] = None\n self.did_setup = False\n self.did_teardown = False\n\n def generate_setup_events(self) -> Iterator["DagsterEvent"]:\n self.did_setup = True\n try:\n while self.object is None:\n obj = next(self.generator)\n if isinstance(obj, self.object_cls):\n self.object = obj\n else:\n yield obj\n except StopIteration:\n if self.require_object:\n check.inst_param(\n self.object,\n "self.object",\n self.object_cls,\n f"generator never yielded object of type {self.object_cls.__name__}",\n )\n\n def get_object(self) -> T_GeneratedContext:\n if not self.did_setup:\n check.failed("Called `get_object` before `generate_setup_events`")\n return cast(T_GeneratedContext, self.object)\n\n def generate_teardown_events(self) -> Iterator["DagsterEvent"]:\n self.did_teardown = True\n if self.object:\n yield from self.generator\n\n\ndef utc_datetime_from_timestamp(timestamp: float) -> datetime.datetime:\n tz = timezone.utc\n return datetime.datetime.fromtimestamp(timestamp, tz=tz)\n\n\ndef utc_datetime_from_naive(dt: datetime.datetime) -> datetime.datetime:\n tz = timezone.utc\n return dt.replace(tzinfo=tz)\n\n\ndef is_enum_value(value: object) -> bool:\n return False if value is None else issubclass(value.__class__, Enum)\n\n\ndef git_repository_root() -> str:\n return subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("utf-8").strip()\n\n\ndef segfault() -> None:\n """Reliable cross-Python version segfault.\n\n https://bugs.python.org/issue1215#msg143236\n """\n import ctypes\n\n ctypes.string_at(0)\n\n\ndef find_free_port() -> int:\n with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(("", 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\ndef is_port_in_use(host, port) -> bool:\n # Similar to the socket options that uvicorn uses to bind ports:\n # https://github.com/encode/uvicorn/blob/62f19c1c39929c84968712c371c9b7b96a041dec/uvicorn/config.py#L565-L566\n sock = socket.socket(family=socket.AF_INET)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n sock.bind((host, port))\n return False\n except socket.error as e:\n return e.errno == errno.EADDRINUSE\n finally:\n sock.close()\n\n\n@contextlib.contextmanager\ndef alter_sys_path(to_add: Sequence[str], to_remove: Sequence[str]) -> Iterator[None]:\n to_restore = [path for path in sys.path]\n\n # remove paths\n for path in to_remove:\n if path in sys.path:\n sys.path.remove(path)\n\n # add paths\n for path in to_add:\n sys.path.insert(0, path)\n\n try:\n yield\n finally:\n sys.path = to_restore\n\n\n@contextlib.contextmanager\ndef restore_sys_modules() -> Iterator[None]:\n sys_modules = {k: v for k, v in sys.modules.items()}\n try:\n yield\n finally:\n to_delete = set(sys.modules) - set(sys_modules)\n for key in to_delete:\n del sys.modules[key]\n\n\ndef process_is_alive(pid: int) -> bool:\n if seven.IS_WINDOWS:\n import psutil\n\n return psutil.pid_exists(pid=pid)\n else:\n try:\n subprocess.check_output(["ps", str(pid)])\n except subprocess.CalledProcessError as exc:\n assert exc.returncode == 1\n return False\n return True\n\n\ndef compose(*args):\n """Compose python functions args such that compose(f, g)(x) is equivalent to f(g(x)).""" # noqa: D402\n # reduce using functional composition over all the arguments, with the identity function as\n # initializer\n return functools.reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)\n\n\ndef dict_without_keys(ddict, *keys):\n return {key: value for key, value in ddict.items() if key not in set(keys)}\n\n\nclass Counter:\n def __init__(self):\n self._lock = threading.Lock()\n self._counts = OrderedDict()\n super(Counter, self).__init__()\n\n def increment(self, key: str):\n with self._lock:\n self._counts[key] = self._counts.get(key, 0) + 1\n\n def counts(self) -> Mapping[str, int]:\n with self._lock:\n copy = {k: v for k, v in self._counts.items()}\n return copy\n\n\ntraced_counter = contextvars.ContextVar("traced_counts", default=Counter())\n\nT_Callable = TypeVar("T_Callable", bound=Callable)\n\n\ndef traced(func: T_Callable) -> T_Callable:\n """A decorator that keeps track of how many times a function is called."""\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n counter = traced_counter.get()\n if counter and isinstance(counter, Counter):\n counter.increment(func.__qualname__)\n\n return func(*args, **kwargs)\n\n return cast(T_Callable, inner)\n\n\ndef get_terminate_signal():\n if sys.platform == "win32":\n return signal.SIGTERM\n return signal.SIGKILL\n\n\ndef get_run_crash_explanation(prefix: str, exit_code: int):\n # As per https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess.returncode\n # negative exit code means a posix signal\n if exit_code < 0 and -exit_code in [signal.value for signal in Signals]:\n posix_signal = -exit_code\n signal_str = Signals(posix_signal).name\n exit_clause = f"was terminated by signal {posix_signal} ({signal_str})."\n if posix_signal == get_terminate_signal():\n exit_clause = (\n exit_clause\n + " This usually indicates that the process was"\n " killed by the operating system due to running out of"\n " memory. Possible solutions include increasing the"\n " amount of memory available to the run, reducing"\n " the amount of memory used by the ops in the run, or"\n " configuring the executor to run fewer ops concurrently."\n )\n else:\n exit_clause = f"unexpectedly exited with code {exit_code}."\n\n return prefix + " " + exit_clause\n\n\ndef last_file_comp(path: str) -> str:\n return os.path.basename(os.path.normpath(path))\n\n\ndef is_named_tuple_instance(obj: object) -> TypeGuard[NamedTuple]:\n return isinstance(obj, tuple) and hasattr(obj, "_fields")\n\n\ndef is_named_tuple_subclass(klass: Type[object]) -> TypeGuard[Type[NamedTuple]]:\n return isinstance(klass, type) and issubclass(klass, tuple) and hasattr(klass, "_fields")\n\n\n@overload\ndef normalize_to_repository(\n definitions_or_repository: Optional[Union["Definitions", "RepositoryDefinition"]] = ...,\n repository: Optional["RepositoryDefinition"] = ...,\n error_on_none: Literal[True] = ...,\n) -> "RepositoryDefinition": ...\n\n\n@overload\ndef normalize_to_repository(\n definitions_or_repository: Optional[Union["Definitions", "RepositoryDefinition"]] = ...,\n repository: Optional["RepositoryDefinition"] = ...,\n error_on_none: Literal[False] = ...,\n) -> Optional["RepositoryDefinition"]: ...\n\n\ndef normalize_to_repository(\n definitions_or_repository: Optional[Union["Definitions", "RepositoryDefinition"]] = None,\n repository: Optional["RepositoryDefinition"] = None,\n error_on_none: bool = True,\n) -> Optional["RepositoryDefinition"]:\n """Normalizes the arguments that take a RepositoryDefinition or Definitions object to a\n RepositoryDefinition.\n\n This is intended to handle both the case where a single argument takes a\n `Union[RepositoryDefinition, Definitions]` or separate keyword arguments accept\n `RepositoryDefinition` or `Definitions`.\n """\n from dagster._core.definitions.definitions_class import Definitions\n\n if (definitions_or_repository and repository) or (\n error_on_none and not (definitions_or_repository or repository)\n ):\n check.failed("Exactly one of `definitions` or `repository_def` must be provided.")\n elif isinstance(definitions_or_repository, Definitions):\n return definitions_or_repository.get_repository_def()\n elif definitions_or_repository:\n return definitions_or_repository\n elif repository:\n return repository\n else:\n return None\n\n\ndef xor(a, b):\n return bool(a) != bool(b)\n\n\ndef tail_file(path_or_fd: Union[str, int], should_stop: Callable[[], bool]) -> Iterator[str]:\n with open(path_or_fd, "r") as output_stream:\n while True:\n line = output_stream.readline()\n if line:\n yield line\n elif should_stop():\n break\n else:\n time.sleep(0.01)\n
", "current_page_name": "_modules/dagster/_utils", "customsidebar": null, "dagster_type": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.dagster_type

\nfrom typing import Any\n\nfrom dagster._core.definitions.events import Failure, TypeCheck\nfrom dagster._core.definitions.graph_definition import GraphDefinition\nfrom dagster._core.definitions.job_base import InMemoryJob\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.execution.api import create_execution_plan\nfrom dagster._core.execution.context_creation_job import scoped_job_context\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.types.dagster_type import resolve_dagster_type\n\nfrom .typing_api import is_typing_type\n\n\n
[docs]def check_dagster_type(dagster_type: Any, value: Any) -> TypeCheck:\n """Test a custom Dagster type.\n\n Args:\n dagster_type (Any): The Dagster type to test. Should be one of the\n :ref:`built-in types <builtin>`, a dagster type explicitly constructed with\n :py:func:`as_dagster_type`, :py:func:`@usable_as_dagster_type <dagster_type>`, or\n :py:func:`PythonObjectDagsterType`, or a Python type.\n value (Any): The runtime value to test.\n\n Returns:\n TypeCheck: The result of the type check.\n\n\n Examples:\n .. code-block:: python\n\n assert check_dagster_type(Dict[Any, Any], {'foo': 'bar'}).success\n """\n if is_typing_type(dagster_type):\n raise DagsterInvariantViolationError(\n f"Must pass in a type from dagster module. You passed {dagster_type} "\n "which is part of python's typing module."\n )\n\n dagster_type = resolve_dagster_type(dagster_type)\n\n job = InMemoryJob(GraphDefinition(node_defs=[], name="empty").to_job())\n job_def = job.get_definition()\n\n instance = DagsterInstance.ephemeral()\n execution_plan = create_execution_plan(job)\n dagster_run = instance.create_run_for_job(job_def)\n with scoped_job_context(execution_plan, job, {}, dagster_run, instance) as context:\n type_check_context = context.for_type(dagster_type)\n try:\n type_check = dagster_type.type_check(type_check_context, value)\n except Failure as failure:\n return TypeCheck(success=False, description=failure.description)\n\n if not isinstance(type_check, TypeCheck):\n raise DagsterInvariantViolationError(\n "Type checks can only return TypeCheck. Type {type_name} returned {value}.".format(\n type_name=dagster_type.display_name, value=repr(type_check)\n )\n )\n return type_check
\n
", "current_page_name": "_modules/dagster/_utils/dagster_type", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.dagster_type"}, "favicon_url": null, "forked_pdb": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.forked_pdb

\nimport pdb\nimport sys\n\n\n# From https://stackoverflow.com/questions/4716533/how-to-attach-debugger-to-a-python-subproccess\n
[docs]class ForkedPdb(pdb.Pdb):\n """A pdb subclass that may be used from a forked multiprocessing child.\n\n **Examples**:\n\n .. code-block:: python\n\n from dagster._utils.forked_pdb import ForkedPdb\n\n @solid\n def complex_solid(_):\n # some complicated stuff\n\n ForkedPdb().set_trace()\n\n # some other complicated stuff\n\n You can initiate pipeline execution via the webserver and use the pdb debugger to examine/step through\n execution at the breakpoint.\n """\n\n def interaction(self, frame, traceback):\n _stdin = sys.stdin\n try:\n sys.stdin = open("/dev/stdin", encoding="utf8")\n pdb.Pdb.interaction(self, frame, traceback)\n finally:\n sys.stdin = _stdin
\n
", "current_page_name": "_modules/dagster/_utils/forked_pdb", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.forked_pdb"}, "log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.log

\nimport copy\nimport logging\nimport sys\nimport traceback\nfrom typing import Mapping, NamedTuple, Optional\n\nimport coloredlogs\n\nimport dagster._check as check\nimport dagster._seven as seven\nfrom dagster._annotations import deprecated\nfrom dagster._config import Enum, EnumValue\nfrom dagster._core.definitions.logger_definition import logger\nfrom dagster._core.utils import PYTHON_LOGGING_LEVELS_MAPPING, coerce_valid_log_level\n\nLogLevelEnum = Enum("log_level", list(map(EnumValue, PYTHON_LOGGING_LEVELS_MAPPING.keys())))\n\n\nclass JsonFileHandler(logging.Handler):\n    def __init__(self, json_path: str):\n        super(JsonFileHandler, self).__init__()\n        self.json_path = check.str_param(json_path, "json_path")\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            log_dict = copy.copy(record.__dict__)\n\n            # This horrific monstrosity is to maintain backwards compatability\n            # with the old behavior of the JsonFileHandler, which the clarify\n            # project has a dependency on. It relied on the dagster-defined\n            # properties smashing all the properties of the LogRecord object\n            # and uploads all of those properties to a redshift table for\n            # in order to do analytics on the log\n\n            if "dagster_meta" in log_dict:\n                dagster_meta_dict = log_dict["dagster_meta"]\n                del log_dict["dagster_meta"]\n            else:\n                dagster_meta_dict = {}\n\n            log_dict.update(dagster_meta_dict)\n\n            with open(self.json_path, "a", encoding="utf8") as ff:\n                text_line = seven.json.dumps(log_dict)\n                ff.write(text_line + "\\n")\n        # Need to catch Exception here, so disabling lint\n        except Exception as e:\n            logging.critical("[%s] Error during logging!", self.__class__.__name__)\n            logging.exception(str(e))\n\n\nclass StructuredLoggerMessage(\n    NamedTuple(\n        "_StructuredLoggerMessage",\n        [\n            ("name", str),\n            ("message", str),\n            ("level", int),\n            ("meta", Mapping[object, object]),\n            ("record", logging.LogRecord),\n        ],\n    )\n):\n    def __new__(\n        cls,\n        name: str,\n        message: str,\n        level: int,\n        meta: Mapping[object, object],\n        record: logging.LogRecord,\n    ):\n        return super(StructuredLoggerMessage, cls).__new__(\n            cls,\n            check.str_param(name, "name"),\n            check.str_param(message, "message"),\n            coerce_valid_log_level(level),\n            check.mapping_param(meta, "meta"),\n            check.inst_param(record, "record", logging.LogRecord),\n        )\n\n\nclass JsonEventLoggerHandler(logging.Handler):\n    def __init__(self, json_path: str, construct_event_record):\n        super(JsonEventLoggerHandler, self).__init__()\n        self.json_path = check.str_param(json_path, "json_path")\n        self.construct_event_record = construct_event_record\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            event_record = self.construct_event_record(record)\n            with open(self.json_path, "a", encoding="utf8") as ff:\n                text_line = seven.json.dumps(event_record.to_dict())\n                ff.write(text_line + "\\n")\n\n        # Need to catch Exception here, so disabling lint\n        except Exception as e:\n            logging.critical("[%s] Error during logging!", self.__class__.__name__)\n            logging.exception(str(e))\n\n\nclass StructuredLoggerHandler(logging.Handler):\n    def __init__(self, callback):\n        super(StructuredLoggerHandler, self).__init__()\n        self.callback = check.is_callable(callback, "callback")\n\n    def emit(self, record: logging.LogRecord) -> None:\n        try:\n            self.callback(\n                StructuredLoggerMessage(\n                    name=record.name,\n                    message=record.msg,\n                    level=record.levelno,\n                    meta=record.dagster_meta,  # type: ignore\n                    record=record,\n                )\n            )\n        # Need to catch Exception here, so disabling lint\n        except Exception as e:\n            logging.critical("[%s] Error during logging!", self.__class__.__name__)\n            logging.exception(str(e))\n\n\ndef construct_single_handler_logger(name, level, handler):\n    check.str_param(name, "name")\n    check.inst_param(handler, "handler", logging.Handler)\n\n    level = coerce_valid_log_level(level)\n\n    @logger\n    def single_handler_logger(_init_context):\n        klass = logging.getLoggerClass()\n        logger_ = klass(name, level=level)\n        logger_.addHandler(handler)\n        handler.setLevel(level)\n        return logger_\n\n    return single_handler_logger\n\n\n# Base python logger whose messages will be captured as structured Dagster log messages.\nBASE_DAGSTER_LOGGER = logging.getLogger(name="dagster")\n\n\n
[docs]def get_dagster_logger(name: Optional[str] = None) -> logging.Logger:\n """Creates a python logger whose output messages will be captured and converted into Dagster log\n messages. This means they will have structured information such as the step_key, run_id, etc.\n embedded into them, and will show up in the Dagster event log.\n\n This can be used as a more convenient alternative to `context.log` in most cases. If log level\n is not set explicitly, defaults to DEBUG.\n\n Args:\n name (Optional[str]): If supplied, will create a logger with the name "dagster.builtin.{name}",\n with properties inherited from the base Dagster logger. If omitted, the returned logger\n will be named "dagster.builtin".\n\n Returns:\n :class:`logging.Logger`: A logger whose output will be captured by Dagster.\n\n Example:\n .. code-block:: python\n\n from dagster import get_dagster_logger, op\n\n @op\n def hello_op():\n log = get_dagster_logger()\n for i in range(5):\n # do something\n log.info(f"Did {i+1} things!")\n\n """\n # enforce that the parent logger will always have a DEBUG log level\n BASE_DAGSTER_LOGGER.setLevel(logging.DEBUG)\n base_builtin = BASE_DAGSTER_LOGGER.getChild("builtin")\n if name:\n return base_builtin.getChild(name)\n return base_builtin
\n\n\ndef define_structured_logger(name, callback, level):\n check.str_param(name, "name")\n check.callable_param(callback, "callback")\n level = coerce_valid_log_level(level)\n\n return construct_single_handler_logger(name, level, StructuredLoggerHandler(callback))\n\n\ndef define_json_file_logger(name, json_path, level):\n check.str_param(name, "name")\n check.str_param(json_path, "json_path")\n level = coerce_valid_log_level(level)\n\n stream_handler = JsonFileHandler(json_path)\n stream_handler.setFormatter(define_default_formatter())\n return construct_single_handler_logger(name, level, stream_handler)\n\n\ndef get_stack_trace_array(exception):\n check.inst_param(exception, "exception", Exception)\n if hasattr(exception, "__traceback__"):\n tb = exception.__traceback__\n else:\n _exc_type, _exc_value, tb = sys.exc_info()\n return traceback.format_tb(tb)\n\n\ndef default_format_string():\n return "%(asctime)s - %(name)s - %(levelname)s - %(message)s"\n\n\ndef default_date_format_string():\n return "%Y-%m-%d %H:%M:%S %z"\n\n\ndef define_default_formatter():\n return logging.Formatter(default_format_string(), default_date_format_string())\n\n\n@deprecated(\n breaking_version="2.0",\n subject="loggers.dagit",\n emit_runtime_warning=False,\n)\ndef configure_loggers(handler="default", log_level="INFO"):\n LOGGING_CONFIG = {\n "version": 1,\n "disable_existing_loggers": False,\n "formatters": {\n "colored": {\n "()": coloredlogs.ColoredFormatter,\n "fmt": default_format_string(),\n "datefmt": default_date_format_string(),\n "field_styles": {"levelname": {"color": "blue"}, "asctime": {"color": "green"}},\n "level_styles": {"debug": {}, "error": {"color": "red"}},\n },\n },\n "handlers": {\n "default": {\n "formatter": "colored",\n "class": "logging.StreamHandler",\n "stream": sys.stdout,\n "level": log_level,\n },\n "null": {\n "class": "logging.NullHandler",\n },\n },\n "loggers": {\n "dagster": {\n "handlers": [handler],\n "level": log_level,\n },\n # Only one of dagster or dagster-webserver will be used at a time. We configure them\n # both here to avoid a dependency on the dagster-webserver package.\n "dagit": {\n "handlers": [handler],\n "level": log_level,\n },\n "dagster-webserver": {\n "handlers": [handler],\n "level": log_level,\n },\n },\n }\n\n logging.config.dictConfig(LOGGING_CONFIG)\n\n\ndef create_console_logger(name, level):\n klass = logging.getLoggerClass()\n handler = klass(name, level=level)\n coloredlogs.install(\n logger=handler,\n level=level,\n fmt=default_format_string(),\n datefmt=default_date_format_string(),\n field_styles={"levelname": {"color": "blue"}, "asctime": {"color": "green"}},\n level_styles={"debug": {}, "error": {"color": "red"}},\n )\n return handler\n
", "current_page_name": "_modules/dagster/_utils/log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.log"}, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils", "warnings": {"alabaster_version": "0.7.13", "body": "

Source code for dagster._utils.warnings

\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, Optional, TypeVar\n\nimport dagster._check as check\nfrom dagster._core.decorator_utils import (\n    Decoratable,\n    apply_context_manager_decorator,\n)\n\nT = TypeVar("T")\n\n# ########################\n# ##### DEPRECATED\n# ########################\n\n\ndef normalize_renamed_param(\n    new_val: T,\n    new_arg: str,\n    old_val: T,\n    old_arg: str,\n    coerce_old_to_new: Optional[Callable[[T], T]] = None,\n) -> T:\n    """Utility for managing backwards compatibility of a renamed parameter.\n\n    .. code-block::\n\n       # The name of param `old_flag` is being updated to `new_flag`, but we are temporarily\n       # accepting either param.\n       def is_new(old_flag=None, new_flag=None):\n           return canonicalize_backcompat_args(\n               new_val=new_flag,\n               new_arg='new_flag',\n               old_val=old_flag,\n               old_arg='old_flag',\n               breaking_version='0.9.0',\n               coerce_old_to_new=lambda val: not val,\n           )\n\n    In the above example, if the caller sets both new_flag and old_flag, it will fail by throwing\n    a CheckError. If the caller sets the new_flag, it's returned unaltered. If the caller sets\n    old_flag, it will return the old_flag run through the coercion function.\n    """\n    check.str_param(new_arg, "new_arg")\n    check.str_param(old_arg, "old_arg")\n    check.opt_callable_param(coerce_old_to_new, "coerce_old_to_new")\n    if new_val is not None and old_val is not None:\n        check.failed(f'Do not use deprecated "{old_arg}" now that you are using "{new_arg}".')\n    elif old_val is not None:\n        return coerce_old_to_new(old_val) if coerce_old_to_new else old_val\n    else:\n        return new_val\n\n\ndef deprecation_warning(\n    subject: str,\n    breaking_version: str,\n    additional_warn_text: Optional[str] = None,\n    stacklevel: int = 3,\n):\n    warnings.warn(\n        f"{subject} is deprecated and will be removed in {breaking_version}."\n        + ((" " + additional_warn_text) if additional_warn_text else ""),\n        category=DeprecationWarning,\n        stacklevel=stacklevel,\n    )\n\n\n# ########################\n# ##### EXPERIMENTAL\n# ########################\n\nEXPERIMENTAL_WARNING_HELP = (\n    "To mute warnings for experimental functionality, invoke"\n    ' warnings.filterwarnings("ignore", category=dagster.ExperimentalWarning) or use'\n    " one of the other methods described at"\n    " https://docs.python.org/3/library/warnings.html#describing-warning-filters."\n)\n\n\n
[docs]class ExperimentalWarning(Warning):\n pass
\n\n\ndef experimental_warning(\n subject: str, additional_warn_text: Optional[str] = None, stacklevel: int = 3\n) -> None:\n extra_text = f" {additional_warn_text}" if additional_warn_text else ""\n warnings.warn(\n f"{subject} is experimental. It may break in future versions, even between dot"\n f" releases.{extra_text} {EXPERIMENTAL_WARNING_HELP}",\n ExperimentalWarning,\n stacklevel=stacklevel,\n )\n\n\n# ########################\n# ##### DISABLE DAGSTER WARNINGS\n# ########################\n\n\n@contextmanager\ndef disable_dagster_warnings() -> Iterator[None]:\n with warnings.catch_warnings():\n warnings.simplefilter("ignore", category=DeprecationWarning)\n warnings.simplefilter("ignore", category=ExperimentalWarning)\n yield\n\n\nT_Decoratable = TypeVar("T_Decoratable", bound=Decoratable)\n\n\ndef suppress_dagster_warnings(__obj: T_Decoratable) -> T_Decoratable:\n """Mark a method/function as ignoring Dagster-generated warnings. This suppresses any\n `ExperimentalWarnings` or `DeprecationWarnings` when the function is called.\n\n Usage:\n\n .. code-block:: python\n\n @suppress_dagster_warnings\n def invokes_some_experimental_stuff(my_arg):\n my_experimental_function(my_arg)\n """\n return apply_context_manager_decorator(__obj, disable_dagster_warnings)\n
", "current_page_name": "_modules/dagster/_utils/warnings", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}, {"link": "../", "title": "dagster._utils"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster._utils.warnings"}}}, "dagster_airbyte": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.asset_defs

\nimport hashlib\nimport inspect\nimport os\nimport re\nfrom abc import abstractmethod\nfrom functools import partial\nfrom itertools import chain\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport yaml\nfrom dagster import (\n    AssetKey,\n    AssetOut,\n    AutoMaterializePolicy,\n    FreshnessPolicy,\n    Nothing,\n    Output,\n    ResourceDefinition,\n    SourceAsset,\n    _check as check,\n)\nfrom dagster._core.definitions import AssetsDefinition, multi_asset\nfrom dagster._core.definitions.cacheable_assets import (\n    AssetsDefinitionCacheableData,\n    CacheableAssetsDefinition,\n)\nfrom dagster._core.definitions.events import CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.metadata import MetadataValue, TableSchemaMetadataValue\nfrom dagster._core.definitions.metadata.table import TableSchema\nfrom dagster._core.errors import DagsterInvalidDefinitionError, DagsterInvalidInvocationError\nfrom dagster._core.execution.context.init import build_init_resource_context\nfrom dagster._utils.merger import merge_dicts\n\nfrom dagster_airbyte.resources import AirbyteCloudResource, AirbyteResource, BaseAirbyteResource\nfrom dagster_airbyte.types import AirbyteTableMetadata\nfrom dagster_airbyte.utils import (\n    generate_materializations,\n    generate_table_schema,\n    is_basic_normalization_operation,\n)\n\n\ndef _table_to_output_name_fn(table: str) -> str:\n    return table.replace("-", "_")\n\n\ndef _build_airbyte_asset_defn_metadata(\n    connection_id: str,\n    destination_tables: Sequence[str],\n    table_to_asset_key_fn: Callable[[str], AssetKey],\n    asset_key_prefix: Optional[Sequence[str]] = None,\n    normalization_tables: Optional[Mapping[str, Set[str]]] = None,\n    upstream_assets: Optional[Iterable[AssetKey]] = None,\n    group_name: Optional[str] = None,\n    io_manager_key: Optional[str] = None,\n    schema_by_table_name: Optional[Mapping[str, TableSchema]] = None,\n    freshness_policy: Optional[FreshnessPolicy] = None,\n    auto_materialize_policy: Optional[AutoMaterializePolicy] = None,\n) -> AssetsDefinitionCacheableData:\n    asset_key_prefix = (\n        check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str) or []\n    )\n\n    # Generate a list of outputs, the set of destination tables plus any affiliated\n    # normalization tables\n    tables = list(\n        chain.from_iterable(\n            chain(\n                [destination_tables], normalization_tables.values() if normalization_tables else []\n            )\n        )\n    )\n\n    outputs = {\n        _table_to_output_name_fn(table): AssetKey(\n            [*asset_key_prefix, *table_to_asset_key_fn(table).path]\n        )\n        for table in tables\n    }\n\n    internal_deps: Dict[str, Set[AssetKey]] = {}\n\n    metadata_encodable_normalization_tables = (\n        {k: list(v) for k, v in normalization_tables.items()} if normalization_tables else {}\n    )\n\n    # If normalization tables are specified, we need to add a dependency from the destination table\n    # to the affilitated normalization table\n    if len(metadata_encodable_normalization_tables) > 0:\n        for base_table, derived_tables in metadata_encodable_normalization_tables.items():\n            for derived_table in derived_tables:\n                internal_deps[derived_table] = {\n                    AssetKey([*asset_key_prefix, *table_to_asset_key_fn(base_table).path])\n                }\n\n    # All non-normalization tables depend on any user-provided upstream assets\n    for table in destination_tables:\n        internal_deps[table] = set(upstream_assets or [])\n\n    return AssetsDefinitionCacheableData(\n        keys_by_input_name=(\n            {asset_key.path[-1]: asset_key for asset_key in upstream_assets}\n            if upstream_assets\n            else {}\n        ),\n        keys_by_output_name=outputs,\n        internal_asset_deps=internal_deps,\n        group_name=group_name,\n        key_prefix=asset_key_prefix,\n        can_subset=False,\n        metadata_by_output_name=(\n            {\n                table: {"table_schema": MetadataValue.table_schema(schema_by_table_name[table])}\n                for table in tables\n            }\n            if schema_by_table_name\n            else None\n        ),\n        freshness_policies_by_output_name=(\n            {output: freshness_policy for output in outputs} if freshness_policy else None\n        ),\n        auto_materialize_policies_by_output_name=(\n            {output: auto_materialize_policy for output in outputs}\n            if auto_materialize_policy\n            else None\n        ),\n        extra_metadata={\n            "connection_id": connection_id,\n            "group_name": group_name,\n            "destination_tables": destination_tables,\n            "normalization_tables": metadata_encodable_normalization_tables,\n            "io_manager_key": io_manager_key,\n        },\n    )\n\n\ndef _build_airbyte_assets_from_metadata(\n    assets_defn_meta: AssetsDefinitionCacheableData,\n    resource_defs: Optional[Mapping[str, ResourceDefinition]],\n) -> AssetsDefinition:\n    metadata = cast(Mapping[str, Any], assets_defn_meta.extra_metadata)\n    connection_id = cast(str, metadata["connection_id"])\n    group_name = cast(Optional[str], metadata["group_name"])\n    destination_tables = cast(List[str], metadata["destination_tables"])\n    normalization_tables = cast(Mapping[str, List[str]], metadata["normalization_tables"])\n    io_manager_key = cast(Optional[str], metadata["io_manager_key"])\n\n    @multi_asset(\n        name=f"airbyte_sync_{connection_id[:5]}",\n        deps=list((assets_defn_meta.keys_by_input_name or {}).values()),\n        outs={\n            k: AssetOut(\n                key=v,\n                metadata=(\n                    {\n                        k: cast(TableSchemaMetadataValue, v)\n                        for k, v in assets_defn_meta.metadata_by_output_name.get(k, {}).items()\n                    }\n                    if assets_defn_meta.metadata_by_output_name\n                    else None\n                ),\n                io_manager_key=io_manager_key,\n                freshness_policy=(\n                    assets_defn_meta.freshness_policies_by_output_name.get(k)\n                    if assets_defn_meta.freshness_policies_by_output_name\n                    else None\n                ),\n                dagster_type=Nothing,\n            )\n            for k, v in (assets_defn_meta.keys_by_output_name or {}).items()\n        },\n        internal_asset_deps={\n            k: set(v) for k, v in (assets_defn_meta.internal_asset_deps or {}).items()\n        },\n        compute_kind="airbyte",\n        group_name=group_name,\n        resource_defs=resource_defs,\n    )\n    def _assets(context, airbyte: AirbyteResource):\n        ab_output = airbyte.sync_and_poll(connection_id=connection_id)\n        for materialization in generate_materializations(\n            ab_output, assets_defn_meta.key_prefix or []\n        ):\n            table_name = materialization.asset_key.path[-1]\n            if table_name in destination_tables:\n                yield Output(\n                    value=None,\n                    output_name=_table_to_output_name_fn(table_name),\n                    metadata=materialization.metadata,\n                )\n                # Also materialize any normalization tables affiliated with this destination\n                # e.g. nested objects, lists etc\n                if normalization_tables:\n                    for dependent_table in normalization_tables.get(table_name, set()):\n                        yield Output(\n                            value=None,\n                            output_name=_table_to_output_name_fn(dependent_table),\n                        )\n            else:\n                yield materialization\n\n    return _assets\n\n\n
[docs]def build_airbyte_assets(\n connection_id: str,\n destination_tables: Sequence[str],\n asset_key_prefix: Optional[Sequence[str]] = None,\n group_name: Optional[str] = None,\n normalization_tables: Optional[Mapping[str, Set[str]]] = None,\n deps: Optional[Iterable[Union[CoercibleToAssetKey, AssetsDefinition, SourceAsset]]] = None,\n upstream_assets: Optional[Set[AssetKey]] = None,\n schema_by_table_name: Optional[Mapping[str, TableSchema]] = None,\n freshness_policy: Optional[FreshnessPolicy] = None,\n stream_to_asset_map: Optional[Mapping[str, str]] = None,\n) -> Sequence[AssetsDefinition]:\n """Builds a set of assets representing the tables created by an Airbyte sync operation.\n\n Args:\n connection_id (str): The Airbyte Connection ID that this op will sync. You can retrieve this\n value from the "Connections" tab of a given connector in the Airbyte UI.\n destination_tables (List[str]): The names of the tables that you want to be represented\n in the Dagster asset graph for this sync. This will generally map to the name of the\n stream in Airbyte, unless a stream prefix has been specified in Airbyte.\n normalization_tables (Optional[Mapping[str, List[str]]]): If you are using Airbyte's\n normalization feature, you may specify a mapping of destination table to a list of\n derived tables that will be created by the normalization process.\n asset_key_prefix (Optional[List[str]]): A prefix for the asset keys inside this asset.\n If left blank, assets will have a key of `AssetKey([table_name])`.\n deps (Optional[Sequence[Union[AssetsDefinition, SourceAsset, str, AssetKey]]]):\n A list of assets to add as sources.\n upstream_assets (Optional[Set[AssetKey]]): Deprecated, use deps instead. A list of assets to add as sources.\n freshness_policy (Optional[FreshnessPolicy]): A freshness policy to apply to the assets\n stream_to_asset_map (Optional[Mapping[str, str]]): A mapping of an Airbyte stream name to a Dagster asset.\n This allows the use of the "prefix" setting in Airbyte with special characters that aren't valid asset names.\n """\n if upstream_assets is not None and deps is not None:\n raise DagsterInvalidDefinitionError(\n "Cannot specify both deps and upstream_assets to build_airbyte_assets. Use only deps"\n " instead."\n )\n\n asset_key_prefix = check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n # Generate a list of outputs, the set of destination tables plus any affiliated\n # normalization tables\n tables = chain.from_iterable(\n chain([destination_tables], normalization_tables.values() if normalization_tables else [])\n )\n outputs = {\n table: AssetOut(\n key=AssetKey([*asset_key_prefix, table]),\n metadata=(\n {"table_schema": MetadataValue.table_schema(schema_by_table_name[table])}\n if schema_by_table_name\n else None\n ),\n freshness_policy=freshness_policy,\n )\n for table in tables\n }\n\n internal_deps = {}\n\n # If normalization tables are specified, we need to add a dependency from the destination table\n # to the affilitated normalization table\n if normalization_tables:\n for base_table, derived_tables in normalization_tables.items():\n for derived_table in derived_tables:\n internal_deps[derived_table] = {AssetKey([*asset_key_prefix, base_table])}\n\n upstream_deps = deps\n if upstream_assets is not None:\n upstream_deps = list(upstream_assets)\n\n # All non-normalization tables depend on any user-provided upstream assets\n for table in destination_tables:\n internal_deps[table] = set(upstream_deps) if upstream_deps else set()\n\n @multi_asset(\n name=f"airbyte_sync_{connection_id[:5]}",\n deps=upstream_deps,\n outs=outputs,\n internal_asset_deps=internal_deps,\n compute_kind="airbyte",\n group_name=group_name,\n )\n def _assets(context, airbyte: BaseAirbyteResource):\n ab_output = airbyte.sync_and_poll(connection_id=connection_id)\n\n # No connection details (e.g. using Airbyte Cloud) means we just assume\n # that the outputs were produced\n if len(ab_output.connection_details) == 0:\n for table_name in destination_tables:\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(table_name),\n )\n if normalization_tables:\n for dependent_table in normalization_tables.get(table_name, set()):\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(dependent_table),\n )\n else:\n for materialization in generate_materializations(\n ab_output, asset_key_prefix, stream_to_asset_map\n ):\n table_name = materialization.asset_key.path[-1]\n if table_name in destination_tables:\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(table_name),\n metadata=materialization.metadata,\n )\n # Also materialize any normalization tables affiliated with this destination\n # e.g. nested objects, lists etc\n if normalization_tables:\n for dependent_table in normalization_tables.get(table_name, set()):\n yield Output(\n value=None,\n output_name=_table_to_output_name_fn(dependent_table),\n )\n else:\n yield materialization\n\n return [_assets]
\n\n\ndef _get_schema_types(schema: Mapping[str, Any]) -> Sequence[str]:\n """Given a schema definition, return a list of data types that are valid for this schema."""\n types = schema.get("types") or schema.get("type")\n if not types:\n return []\n if isinstance(types, str):\n return [types]\n return types\n\n\ndef _get_sub_schemas(schema: Mapping[str, Any]) -> Sequence[Mapping[str, Any]]:\n """Returns a list of sub-schema definitions for a given schema. This is used to handle union types."""\n return schema.get("anyOf") or schema.get("oneOf") or [schema]\n\n\ndef _get_normalization_tables_for_schema(\n key: str, schema: Mapping[str, Any], prefix: str = ""\n) -> Mapping[str, AirbyteTableMetadata]:\n """Recursively traverses a schema, returning metadata for the tables that will be created by the Airbyte\n normalization process.\n\n For example, a table `cars` with a nested object field `limited_editions` will produce the tables\n `cars` and `cars_limited_editions`.\n\n For more information on Airbyte's normalization process, see:\n https://docs.airbyte.com/understanding-airbyte/basic-normalization/#nesting\n """\n out: Dict[str, AirbyteTableMetadata] = {}\n # Object types are broken into a new table, as long as they have children\n\n sub_schemas = _get_sub_schemas(schema)\n\n for sub_schema in sub_schemas:\n schema_types = _get_schema_types(sub_schema)\n if not schema_types:\n continue\n\n if "object" in schema_types and len(sub_schema.get("properties", {})) > 0:\n out[prefix + key] = AirbyteTableMetadata(\n schema=generate_table_schema(sub_schema.get("properties", {}))\n )\n for k, v in sub_schema["properties"].items():\n out = merge_dicts(\n out, _get_normalization_tables_for_schema(k, v, f"{prefix}{key}_")\n )\n # Array types are also broken into a new table\n elif "array" in schema_types:\n out[prefix + key] = AirbyteTableMetadata(\n schema=generate_table_schema(sub_schema.get("items", {}).get("properties", {}))\n )\n if sub_schema.get("items", {}).get("properties"):\n for k, v in sub_schema["items"]["properties"].items():\n out = merge_dicts(\n out, _get_normalization_tables_for_schema(k, v, f"{prefix}{key}_")\n )\n\n return out\n\n\ndef _clean_name(name: str) -> str:\n """Cleans an input to be a valid Dagster asset name."""\n return re.sub(r"[^a-z0-9]+", "_", name.lower())\n\n\nclass AirbyteConnectionMetadata(\n NamedTuple(\n "_AirbyteConnectionMetadata",\n [\n ("name", str),\n ("stream_prefix", str),\n ("has_basic_normalization", bool),\n ("stream_data", List[Mapping[str, Any]]),\n ],\n )\n):\n """Contains information about an Airbyte connection.\n\n Attributes:\n name (str): The name of the connection.\n stream_prefix (str): A prefix to add to all stream names.\n has_basic_normalization (bool): Whether or not the connection has basic normalization enabled.\n stream_data (List[Mapping[str, Any]]): Unparsed list of dicts with information about each stream.\n """\n\n @classmethod\n def from_api_json(\n cls, contents: Mapping[str, Any], operations: Mapping[str, Any]\n ) -> "AirbyteConnectionMetadata":\n return cls(\n name=contents["name"],\n stream_prefix=contents.get("prefix", ""),\n has_basic_normalization=any(\n is_basic_normalization_operation(op.get("operatorConfiguration", {}))\n for op in operations.get("operations", [])\n ),\n stream_data=contents.get("syncCatalog", {}).get("streams", []),\n )\n\n @classmethod\n def from_config(cls, contents: Mapping[str, Any]) -> "AirbyteConnectionMetadata":\n config_contents = cast(Mapping[str, Any], contents.get("configuration"))\n check.invariant(\n config_contents is not None, "Airbyte connection config is missing 'configuration' key"\n )\n\n return cls(\n name=contents["resource_name"],\n stream_prefix=config_contents.get("prefix", ""),\n has_basic_normalization=any(\n is_basic_normalization_operation(op.get("operator_configuration", {}))\n for op in config_contents.get("operations", [])\n ),\n stream_data=config_contents.get("sync_catalog", {}).get("streams", []),\n )\n\n def parse_stream_tables(\n self, return_normalization_tables: bool = False\n ) -> Mapping[str, AirbyteTableMetadata]:\n """Parses the stream data and returns a mapping, with keys representing destination\n tables associated with each enabled stream and values representing any affiliated\n tables created by Airbyte's normalization process, if enabled.\n """\n tables: Dict[str, AirbyteTableMetadata] = {}\n\n enabled_streams = [\n stream for stream in self.stream_data if stream.get("config", {}).get("selected", False)\n ]\n\n for stream in enabled_streams:\n name = cast(str, stream.get("stream", {}).get("name"))\n prefixed_name = f"{self.stream_prefix}{name}"\n\n schema = (\n stream["stream"]["json_schema"]\n if "json_schema" in stream["stream"]\n else stream["stream"]["jsonSchema"]\n )\n normalization_tables: Dict[str, AirbyteTableMetadata] = {}\n schema_props = schema.get("properties", schema.get("items", {}).get("properties", {}))\n if self.has_basic_normalization and return_normalization_tables:\n for k, v in schema_props.items():\n for normalization_table_name, meta in _get_normalization_tables_for_schema(\n k, v, f"{name}_"\n ).items():\n prefixed_norm_table_name = f"{self.stream_prefix}{normalization_table_name}"\n normalization_tables[prefixed_norm_table_name] = meta\n tables[prefixed_name] = AirbyteTableMetadata(\n schema=generate_table_schema(schema_props),\n normalization_tables=normalization_tables,\n )\n\n return tables\n\n\ndef _get_schema_by_table_name(\n stream_table_metadata: Mapping[str, AirbyteTableMetadata]\n) -> Mapping[str, TableSchema]:\n schema_by_base_table_name = [(k, v.schema) for k, v in stream_table_metadata.items()]\n schema_by_normalization_table_name = list(\n chain.from_iterable(\n [\n [\n (k, v.schema)\n for k, v in cast(\n Dict[str, AirbyteTableMetadata], meta.normalization_tables\n ).items()\n ]\n for meta in stream_table_metadata.values()\n ]\n )\n )\n\n return dict(schema_by_normalization_table_name + schema_by_base_table_name)\n\n\nclass AirbyteCoreCacheableAssetsDefinition(CacheableAssetsDefinition):\n def __init__(\n self,\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n ):\n self._key_prefix = key_prefix\n self._create_assets_for_normalization_tables = create_assets_for_normalization_tables\n self._connection_to_group_fn = connection_to_group_fn\n self._connection_to_io_manager_key_fn = connection_to_io_manager_key_fn\n self._connection_filter = connection_filter\n self._connection_to_asset_key_fn: Callable[[AirbyteConnectionMetadata, str], AssetKey] = (\n connection_to_asset_key_fn or (lambda _, table: AssetKey(path=[table]))\n )\n self._connection_to_freshness_policy_fn = connection_to_freshness_policy_fn or (\n lambda _: None\n )\n self._connection_to_auto_materialize_policy_fn = (\n connection_to_auto_materialize_policy_fn or (lambda _: None)\n )\n\n contents = hashlib.sha1() # so that hexdigest is 40, not 64 bytes\n contents.update(",".join(key_prefix).encode("utf-8"))\n contents.update(str(create_assets_for_normalization_tables).encode("utf-8"))\n if connection_filter:\n contents.update(inspect.getsource(connection_filter).encode("utf-8"))\n\n super().__init__(unique_id=f"airbyte-{contents.hexdigest()}")\n\n @abstractmethod\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n pass\n\n def compute_cacheable_data(self) -> Sequence[AssetsDefinitionCacheableData]:\n asset_defn_data: List[AssetsDefinitionCacheableData] = []\n for connection_id, connection in self._get_connections():\n stream_table_metadata = connection.parse_stream_tables(\n self._create_assets_for_normalization_tables\n )\n schema_by_table_name = _get_schema_by_table_name(stream_table_metadata)\n\n table_to_asset_key = partial(self._connection_to_asset_key_fn, connection)\n asset_data_for_conn = _build_airbyte_asset_defn_metadata(\n connection_id=connection_id,\n destination_tables=list(stream_table_metadata.keys()),\n normalization_tables={\n table: set(metadata.normalization_tables.keys())\n for table, metadata in stream_table_metadata.items()\n },\n asset_key_prefix=self._key_prefix,\n group_name=(\n self._connection_to_group_fn(connection.name)\n if self._connection_to_group_fn\n else None\n ),\n io_manager_key=(\n self._connection_to_io_manager_key_fn(connection.name)\n if self._connection_to_io_manager_key_fn\n else None\n ),\n schema_by_table_name=schema_by_table_name,\n table_to_asset_key_fn=table_to_asset_key,\n freshness_policy=self._connection_to_freshness_policy_fn(connection),\n auto_materialize_policy=self._connection_to_auto_materialize_policy_fn(connection),\n )\n\n asset_defn_data.append(asset_data_for_conn)\n\n return asset_defn_data\n\n def _build_definitions_with_resources(\n self,\n data: Sequence[AssetsDefinitionCacheableData],\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n ) -> Sequence[AssetsDefinition]:\n return [_build_airbyte_assets_from_metadata(meta, resource_defs) for meta in data]\n\n def build_definitions(\n self, data: Sequence[AssetsDefinitionCacheableData]\n ) -> Sequence[AssetsDefinition]:\n return self._build_definitions_with_resources(data)\n\n\nclass AirbyteInstanceCacheableAssetsDefinition(AirbyteCoreCacheableAssetsDefinition):\n def __init__(\n self,\n airbyte_resource_def: Union[ResourceDefinition, AirbyteResource],\n workspace_id: Optional[str],\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n ):\n super().__init__(\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )\n self._workspace_id = workspace_id\n self._airbyte_instance: AirbyteResource = (\n airbyte_resource_def.process_config_and_initialize()\n if isinstance(airbyte_resource_def, AirbyteResource)\n else airbyte_resource_def(build_init_resource_context())\n )\n\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n workspace_id = self._workspace_id\n if not workspace_id:\n workspaces = cast(\n List[Dict[str, Any]],\n check.not_none(\n self._airbyte_instance.make_request(endpoint="/workspaces/list", data={})\n ).get("workspaces", []),\n )\n\n check.invariant(len(workspaces) <= 1, "Airbyte instance has more than one workspace")\n check.invariant(len(workspaces) > 0, "Airbyte instance has no workspaces")\n\n workspace_id = workspaces[0].get("workspaceId")\n\n connections = cast(\n List[Dict[str, Any]],\n check.not_none(\n self._airbyte_instance.make_request(\n endpoint="/connections/list", data={"workspaceId": workspace_id}\n )\n ).get("connections", []),\n )\n\n output_connections: List[Tuple[str, AirbyteConnectionMetadata]] = []\n for connection_json in connections:\n connection_id = cast(str, connection_json.get("connectionId"))\n\n operations_json = cast(\n Dict[str, Any],\n check.not_none(\n self._airbyte_instance.make_request(\n endpoint="/operations/list",\n data={"connectionId": connection_id},\n )\n ),\n )\n connection = AirbyteConnectionMetadata.from_api_json(connection_json, operations_json)\n\n # Filter out connections that don't match the filter function\n if self._connection_filter and not self._connection_filter(connection):\n continue\n\n output_connections.append((connection_id, connection))\n return output_connections\n\n def build_definitions(\n self, data: Sequence[AssetsDefinitionCacheableData]\n ) -> Sequence[AssetsDefinition]:\n return super()._build_definitions_with_resources(\n data, {"airbyte": self._airbyte_instance.get_resource_definition()}\n )\n\n\nclass AirbyteYAMLCacheableAssetsDefinition(AirbyteCoreCacheableAssetsDefinition):\n def __init__(\n self,\n project_dir: str,\n workspace_id: Optional[str],\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]],\n connection_directories: Optional[Sequence[str]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n ):\n super().__init__(\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )\n self._workspace_id = workspace_id\n self._project_dir = project_dir\n self._connection_directories = connection_directories\n\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n connections_dir = os.path.join(self._project_dir, "connections")\n\n output_connections: List[Tuple[str, AirbyteConnectionMetadata]] = []\n\n connection_directories = self._connection_directories or os.listdir(connections_dir)\n for connection_name in connection_directories:\n connection_dir = os.path.join(connections_dir, connection_name)\n with open(os.path.join(connection_dir, "configuration.yaml"), encoding="utf-8") as f:\n connection = AirbyteConnectionMetadata.from_config(yaml.safe_load(f.read()))\n\n # Filter out connections that don't match the filter function\n if self._connection_filter and not self._connection_filter(connection):\n continue\n\n if self._workspace_id:\n state_file = f"state_{self._workspace_id}.yaml"\n check.invariant(\n state_file in os.listdir(connection_dir),\n f"Workspace state file {state_file} not found",\n )\n else:\n state_files = [\n filename\n for filename in os.listdir(connection_dir)\n if filename.startswith("state_")\n ]\n check.invariant(\n len(state_files) > 0,\n f"No state files found for connection {connection_name} in {connection_dir}",\n )\n check.invariant(\n len(state_files) <= 1,\n "More than one state file found for connection {} in {}, specify a workspace_id"\n " to disambiguate".format(connection_name, connection_dir),\n )\n state_file = state_files[0]\n\n with open(os.path.join(connection_dir, cast(str, state_file)), encoding="utf-8") as f:\n state = yaml.safe_load(f.read())\n connection_id = state.get("resource_id")\n\n output_connections.append((connection_id, connection))\n return output_connections\n\n\n
[docs]def load_assets_from_airbyte_instance(\n airbyte: Union[AirbyteResource, ResourceDefinition],\n workspace_id: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n create_assets_for_normalization_tables: bool = True,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]] = None,\n connection_to_asset_key_fn: Optional[\n Callable[[AirbyteConnectionMetadata, str], AssetKey]\n ] = None,\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ] = None,\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n) -> CacheableAssetsDefinition:\n """Loads Airbyte connection assets from a configured AirbyteResource instance. This fetches information\n about defined connections at initialization time, and will error on workspace load if the Airbyte\n instance is not reachable.\n\n Args:\n airbyte (ResourceDefinition): An AirbyteResource configured with the appropriate connection\n details.\n workspace_id (Optional[str]): The ID of the Airbyte workspace to load connections from. Only\n required if multiple workspaces exist in your instance.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n create_assets_for_normalization_tables (bool): If True, assets will be created for tables\n created by Airbyte's normalization feature. If False, only the destination tables\n will be created. Defaults to True.\n connection_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Airbyte connection name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The I/O manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connection_to_io_manager_key_fn.\n connection_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n I/O manager key for a given Airbyte connection name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connection_filter (Optional[Callable[[AirbyteConnectionMetadata], bool]]): Optional function which takes\n in connection metadata and returns False if the connection should be excluded from the output assets.\n connection_to_asset_key_fn (Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]]): Optional function which\n takes in connection metadata and table name and returns an asset key for the table. If None, the default asset\n key is based on the table name. Any asset key prefix will be applied to the output of this function.\n connection_to_freshness_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]]): Optional function\n which takes in connection metadata and returns a freshness policy for the connection's assets. If None, no freshness policies\n will be applied to the assets.\n connection_to_auto_materialize_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]]): Optional\n function which takes in connection metadata and returns an auto materialization policy for the connection's assets. If None, no\n auto materialization policies will be applied to the assets.\n\n **Examples:**\n\n Loading all Airbyte connections as assets:\n\n .. code-block:: python\n\n from dagster_airbyte import airbyte_resource, load_assets_from_airbyte_instance\n\n airbyte_instance = airbyte_resource.configured(\n {\n "host": "localhost",\n "port": "8000",\n }\n )\n airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance)\n\n Filtering the set of loaded connections:\n\n .. code-block:: python\n\n from dagster_airbyte import airbyte_resource, load_assets_from_airbyte_instance\n\n airbyte_instance = airbyte_resource.configured(\n {\n "host": "localhost",\n "port": "8000",\n }\n )\n airbyte_assets = load_assets_from_airbyte_instance(\n airbyte_instance,\n connection_filter=lambda meta: "snowflake" in meta.name,\n )\n """\n if isinstance(airbyte, AirbyteCloudResource):\n raise DagsterInvalidInvocationError(\n "load_assets_from_airbyte_instance is not yet supported for AirbyteCloudResource"\n )\n\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connection_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connection_to_io_manager_key_fn",\n )\n if not connection_to_io_manager_key_fn:\n connection_to_io_manager_key_fn = lambda _: io_manager_key\n\n return AirbyteInstanceCacheableAssetsDefinition(\n airbyte_resource_def=airbyte,\n workspace_id=workspace_id,\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )
\n\n\n
[docs]def load_assets_from_airbyte_project(\n project_dir: str,\n workspace_id: Optional[str] = None,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n create_assets_for_normalization_tables: bool = True,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connection_filter: Optional[Callable[[AirbyteConnectionMetadata], bool]] = None,\n connection_directories: Optional[Sequence[str]] = None,\n connection_to_asset_key_fn: Optional[\n Callable[[AirbyteConnectionMetadata, str], AssetKey]\n ] = None,\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ] = None,\n connection_to_auto_materialize_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]\n ] = None,\n) -> CacheableAssetsDefinition:\n """Loads an Airbyte project into a set of Dagster assets.\n\n Point to the root folder of an Airbyte project synced using the Octavia CLI. For\n more information, see https://github.com/airbytehq/airbyte/tree/master/octavia-cli#octavia-import-all.\n\n Args:\n project_dir (str): The path to the root of your Airbyte project, containing sources, destinations,\n and connections folders.\n workspace_id (Optional[str]): The ID of the Airbyte workspace to load connections from. Only\n required if multiple workspace state YAMLfiles exist in the project.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n create_assets_for_normalization_tables (bool): If True, assets will be created for tables\n created by Airbyte's normalization feature. If False, only the destination tables\n will be created. Defaults to True.\n connection_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Airbyte connection name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The I/O manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connection_to_io_manager_key_fn.\n connection_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n I/O manager key for a given Airbyte connection name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connection_filter (Optional[Callable[[AirbyteConnectionMetadata], bool]]): Optional function which\n takes in connection metadata and returns False if the connection should be excluded from the output assets.\n connection_directories (Optional[List[str]]): Optional list of connection directories to load assets from.\n If omitted, all connections in the Airbyte project are loaded. May be faster than connection_filter\n if the project has many connections or if the connection yaml files are large.\n connection_to_asset_key_fn (Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]]): Optional function which\n takes in connection metadata and table name and returns an asset key for the table. If None, the default asset\n key is based on the table name. Any asset key prefix will be applied to the output of this function.\n connection_to_freshness_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]]):\n Optional function which takes in connection metadata and returns a freshness policy for the connection's assets.\n If None, no freshness policies will be applied to the assets.\n connection_to_auto_materialize_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[AutoMaterializePolicy]]]):\n Optional function which takes in connection metadata and returns an auto materialization policy for the connection's assets.\n If None, no auto materialization policies will be applied to the assets.\n\n **Examples:**\n\n Loading all Airbyte connections as assets:\n\n .. code-block:: python\n\n from dagster_airbyte import load_assets_from_airbyte_project\n\n airbyte_assets = load_assets_from_airbyte_project(\n project_dir="path/to/airbyte/project",\n )\n\n Filtering the set of loaded connections:\n\n .. code-block:: python\n\n from dagster_airbyte import load_assets_from_airbyte_project\n\n airbyte_assets = load_assets_from_airbyte_project(\n project_dir="path/to/airbyte/project",\n connection_filter=lambda meta: "snowflake" in meta.name,\n )\n """\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connection_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connection_to_io_manager_key_fn",\n )\n if not connection_to_io_manager_key_fn:\n connection_to_io_manager_key_fn = lambda _: io_manager_key\n\n return AirbyteYAMLCacheableAssetsDefinition(\n project_dir=project_dir,\n workspace_id=workspace_id,\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=connection_filter,\n connection_directories=connection_directories,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n connection_to_auto_materialize_policy_fn=connection_to_auto_materialize_policy_fn,\n )
\n
", "current_page_name": "_modules/dagster_airbyte/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.asset_defs"}, "managed": {"generated": {"destinations": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.generated.destinations

\n# ruff: noqa: A001, A002\nfrom typing import Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\nfrom dagster_airbyte.managed.types import GeneratedAirbyteDestination\n\n\n
[docs]class DynamodbDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n dynamodb_table_name_prefix: str,\n dynamodb_region: str,\n access_key_id: str,\n secret_access_key: str,\n dynamodb_endpoint: Optional[str] = None,\n ):\n """Airbyte Destination for Dynamodb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/dynamodb\n\n Args:\n name (str): The name of the destination.\n dynamodb_endpoint (Optional[str]): This is your DynamoDB endpoint url.(if you are working with AWS DynamoDB, just leave empty).\n dynamodb_table_name_prefix (str): The prefix to use when naming DynamoDB tables.\n dynamodb_region (str): The region of the DynamoDB.\n access_key_id (str): The access key id to access the DynamoDB. Airbyte requires Read and Write permissions to the DynamoDB.\n secret_access_key (str): The corresponding secret to the access key id.\n """\n self.dynamodb_endpoint = check.opt_str_param(dynamodb_endpoint, "dynamodb_endpoint")\n self.dynamodb_table_name_prefix = check.str_param(\n dynamodb_table_name_prefix, "dynamodb_table_name_prefix"\n )\n self.dynamodb_region = check.str_param(dynamodb_region, "dynamodb_region")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n super().__init__("Dynamodb", name)
\n\n\n
[docs]class BigqueryDestination(GeneratedAirbyteDestination):\n
[docs] class StandardInserts:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class HMACKey:\n
[docs] @public\n def __init__(self, hmac_key_access_id: str, hmac_key_secret: str):\n self.credential_type = "HMAC_KEY"\n self.hmac_key_access_id = check.str_param(hmac_key_access_id, "hmac_key_access_id")\n self.hmac_key_secret = check.str_param(hmac_key_secret, "hmac_key_secret")
\n\n
[docs] class GCSStaging:\n
[docs] @public\n def __init__(\n self,\n credential: "BigqueryDestination.HMACKey",\n gcs_bucket_name: str,\n gcs_bucket_path: str,\n keep_files_in_gcs_bucket: Optional[str] = None,\n ):\n self.method = "GCS Staging"\n self.credential = check.inst_param(\n credential, "credential", BigqueryDestination.HMACKey\n )\n self.gcs_bucket_name = check.str_param(gcs_bucket_name, "gcs_bucket_name")\n self.gcs_bucket_path = check.str_param(gcs_bucket_path, "gcs_bucket_path")\n self.keep_files_in_gcs_bucket = check.opt_str_param(\n keep_files_in_gcs_bucket, "keep_files_in_gcs_bucket"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n project_id: str,\n dataset_location: str,\n dataset_id: str,\n loading_method: Union[\n "BigqueryDestination.StandardInserts", "BigqueryDestination.GCSStaging"\n ],\n credentials_json: Optional[str] = None,\n transformation_priority: Optional[str] = None,\n big_query_client_buffer_size_mb: Optional[int] = None,\n ):\n """Airbyte Destination for Bigquery.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/bigquery\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset. Read more here.\n dataset_location (str): The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.\n dataset_id (str): The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.\n loading_method (Union[BigqueryDestination.StandardInserts, BigqueryDestination.GCSStaging]): Loading method used to send select the way data will be uploaded to BigQuery. Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging. GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here.\n credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.\n transformation_priority (Optional[str]): Interactive run type means that the query is executed as soon as possible, and these queries count towards concurrent rate limit and daily limit. Read more about interactive run type here. Batch queries are queued and started as soon as idle resources are available in the BigQuery shared resource pool, which usually occurs within a few minutes. Batch queries don`t count towards your concurrent rate limit. Read more about batch queries here. The default "interactive" value is used if not set explicitly.\n big_query_client_buffer_size_mb (Optional[int]): Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.dataset_location = check.str_param(dataset_location, "dataset_location")\n self.dataset_id = check.str_param(dataset_id, "dataset_id")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (BigqueryDestination.StandardInserts, BigqueryDestination.GCSStaging),\n )\n self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")\n self.transformation_priority = check.opt_str_param(\n transformation_priority, "transformation_priority"\n )\n self.big_query_client_buffer_size_mb = check.opt_int_param(\n big_query_client_buffer_size_mb, "big_query_client_buffer_size_mb"\n )\n super().__init__("Bigquery", name)
\n\n\n
[docs]class RabbitmqDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n routing_key: str,\n ssl: Optional[bool] = None,\n port: Optional[int] = None,\n virtual_host: Optional[str] = None,\n username: Optional[str] = None,\n password: Optional[str] = None,\n exchange: Optional[str] = None,\n ):\n """Airbyte Destination for Rabbitmq.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/rabbitmq\n\n Args:\n name (str): The name of the destination.\n ssl (Optional[bool]): SSL enabled.\n host (str): The RabbitMQ host name.\n port (Optional[int]): The RabbitMQ port.\n virtual_host (Optional[str]): The RabbitMQ virtual host name.\n username (Optional[str]): The username to connect.\n password (Optional[str]): The password to connect.\n exchange (Optional[str]): The exchange name.\n routing_key (str): The routing key.\n """\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.host = check.str_param(host, "host")\n self.port = check.opt_int_param(port, "port")\n self.virtual_host = check.opt_str_param(virtual_host, "virtual_host")\n self.username = check.opt_str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.exchange = check.opt_str_param(exchange, "exchange")\n self.routing_key = check.str_param(routing_key, "routing_key")\n super().__init__("Rabbitmq", name)
\n\n\n
[docs]class KvdbDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, bucket_id: str, secret_key: str):\n """Airbyte Destination for Kvdb.\n\n Documentation can be found at https://kvdb.io/docs/api/\n\n Args:\n name (str): The name of the destination.\n bucket_id (str): The ID of your KVdb bucket.\n secret_key (str): Your bucket Secret Key.\n """\n self.bucket_id = check.str_param(bucket_id, "bucket_id")\n self.secret_key = check.str_param(secret_key, "secret_key")\n super().__init__("Kvdb", name)
\n\n\n
[docs]class ClickhouseDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Destination for Clickhouse.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/clickhouse\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): HTTP port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n ssl (Optional[bool]): Encrypt data using SSL.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Clickhouse", name)
\n\n\n
[docs]class AmazonSqsDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n queue_url: str,\n region: str,\n message_delay: Optional[int] = None,\n access_key: Optional[str] = None,\n secret_key: Optional[str] = None,\n message_body_key: Optional[str] = None,\n message_group_id: Optional[str] = None,\n ):\n """Airbyte Destination for Amazon Sqs.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/amazon-sqs\n\n Args:\n name (str): The name of the destination.\n queue_url (str): URL of the SQS Queue\n region (str): AWS Region of the SQS Queue\n message_delay (Optional[int]): Modify the Message Delay of the individual message from the Queue's default (seconds).\n access_key (Optional[str]): The Access Key ID of the AWS IAM Role to use for sending messages\n secret_key (Optional[str]): The Secret Key of the AWS IAM Role to use for sending messages\n message_body_key (Optional[str]): Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.\n message_group_id (Optional[str]): The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.\n """\n self.queue_url = check.str_param(queue_url, "queue_url")\n self.region = check.str_param(region, "region")\n self.message_delay = check.opt_int_param(message_delay, "message_delay")\n self.access_key = check.opt_str_param(access_key, "access_key")\n self.secret_key = check.opt_str_param(secret_key, "secret_key")\n self.message_body_key = check.opt_str_param(message_body_key, "message_body_key")\n self.message_group_id = check.opt_str_param(message_group_id, "message_group_id")\n super().__init__("Amazon Sqs", name)
\n\n\n
[docs]class MariadbColumnstoreDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Mariadb Columnstore.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mariadb-columnstore\n\n Args:\n name (str): The name of the destination.\n host (str): The Hostname of the database.\n port (int): The Port of the database.\n database (str): Name of the database.\n username (str): The Username which is used to access the database.\n password (Optional[str]): The Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Mariadb Columnstore", name)
\n\n\n
[docs]class KinesisDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n endpoint: str,\n region: str,\n shardCount: int,\n accessKey: str,\n privateKey: str,\n bufferSize: int,\n ):\n """Airbyte Destination for Kinesis.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/kinesis\n\n Args:\n name (str): The name of the destination.\n endpoint (str): AWS Kinesis endpoint.\n region (str): AWS region. Your account determines the Regions that are available to you.\n shardCount (int): Number of shards to which the data should be streamed.\n accessKey (str): Generate the AWS Access Key for current user.\n privateKey (str): The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".\n bufferSize (int): Buffer size for storing kinesis records before being batch streamed.\n """\n self.endpoint = check.str_param(endpoint, "endpoint")\n self.region = check.str_param(region, "region")\n self.shardCount = check.int_param(shardCount, "shardCount")\n self.accessKey = check.str_param(accessKey, "accessKey")\n self.privateKey = check.str_param(privateKey, "privateKey")\n self.bufferSize = check.int_param(bufferSize, "bufferSize")\n super().__init__("Kinesis", name)
\n\n\n
[docs]class AzureBlobStorageDestination(GeneratedAirbyteDestination):\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(self, flattening: str):\n self.format_type = "CSV"\n self.flattening = check.str_param(flattening, "flattening")
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n ):\n self.format_type = "JSONL"
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n azure_blob_storage_account_name: str,\n azure_blob_storage_account_key: str,\n format: Union[\n "AzureBlobStorageDestination.CSVCommaSeparatedValues",\n "AzureBlobStorageDestination.JSONLinesNewlineDelimitedJSON",\n ],\n azure_blob_storage_endpoint_domain_name: Optional[str] = None,\n azure_blob_storage_container_name: Optional[str] = None,\n azure_blob_storage_output_buffer_size: Optional[int] = None,\n ):\n """Airbyte Destination for Azure Blob Storage.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/azureblobstorage\n\n Args:\n name (str): The name of the destination.\n azure_blob_storage_endpoint_domain_name (Optional[str]): This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.\n azure_blob_storage_container_name (Optional[str]): The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp\n azure_blob_storage_account_name (str): The account's name of the Azure Blob Storage.\n azure_blob_storage_account_key (str): The Azure blob storage account key.\n azure_blob_storage_output_buffer_size (Optional[int]): The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.\n format (Union[AzureBlobStorageDestination.CSVCommaSeparatedValues, AzureBlobStorageDestination.JSONLinesNewlineDelimitedJSON]): Output data format\n """\n self.azure_blob_storage_endpoint_domain_name = check.opt_str_param(\n azure_blob_storage_endpoint_domain_name, "azure_blob_storage_endpoint_domain_name"\n )\n self.azure_blob_storage_container_name = check.opt_str_param(\n azure_blob_storage_container_name, "azure_blob_storage_container_name"\n )\n self.azure_blob_storage_account_name = check.str_param(\n azure_blob_storage_account_name, "azure_blob_storage_account_name"\n )\n self.azure_blob_storage_account_key = check.str_param(\n azure_blob_storage_account_key, "azure_blob_storage_account_key"\n )\n self.azure_blob_storage_output_buffer_size = check.opt_int_param(\n azure_blob_storage_output_buffer_size, "azure_blob_storage_output_buffer_size"\n )\n self.format = check.inst_param(\n format,\n "format",\n (\n AzureBlobStorageDestination.CSVCommaSeparatedValues,\n AzureBlobStorageDestination.JSONLinesNewlineDelimitedJSON,\n ),\n )\n super().__init__("Azure Blob Storage", name)
\n\n\n
[docs]class KafkaDestination(GeneratedAirbyteDestination):\n
[docs] class PLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")
\n\n
[docs] class SASLPLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] class SASLSSL:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n bootstrap_servers: str,\n topic_pattern: str,\n protocol: Union[\n "KafkaDestination.PLAINTEXT",\n "KafkaDestination.SASLPLAINTEXT",\n "KafkaDestination.SASLSSL",\n ],\n acks: str,\n enable_idempotence: bool,\n compression_type: str,\n batch_size: int,\n linger_ms: str,\n max_in_flight_requests_per_connection: int,\n client_dns_lookup: str,\n buffer_memory: str,\n max_request_size: int,\n retries: int,\n socket_connection_setup_timeout_ms: str,\n socket_connection_setup_timeout_max_ms: str,\n max_block_ms: str,\n request_timeout_ms: int,\n delivery_timeout_ms: int,\n send_buffer_bytes: int,\n receive_buffer_bytes: int,\n test_topic: Optional[str] = None,\n sync_producer: Optional[bool] = None,\n client_id: Optional[str] = None,\n ):\n """Airbyte Destination for Kafka.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/kafka\n\n Args:\n name (str): The name of the destination.\n bootstrap_servers (str): A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\n topic_pattern (str): Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.\n test_topic (Optional[str]): Topic to test if Airbyte can produce messages.\n sync_producer (Optional[bool]): Wait synchronously until the record has been sent to Kafka.\n protocol (Union[KafkaDestination.PLAINTEXT, KafkaDestination.SASLPLAINTEXT, KafkaDestination.SASLSSL]): Protocol used to communicate with brokers.\n client_id (Optional[str]): An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\n acks (str): The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent.\n enable_idempotence (bool): When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.\n compression_type (str): The compression type for all data generated by the producer.\n batch_size (int): The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition.\n linger_ms (str): The producer groups together any records that arrive in between request transmissions into a single batched request.\n max_in_flight_requests_per_connection (int): The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.\n client_dns_lookup (str): Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.\n buffer_memory (str): The total bytes of memory the producer can use to buffer records waiting to be sent to the server.\n max_request_size (int): The maximum size of a request in bytes.\n retries (int): Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.\n socket_connection_setup_timeout_ms (str): The amount of time the client will wait for the socket connection to be established.\n socket_connection_setup_timeout_max_ms (str): The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.\n max_block_ms (str): The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.\n request_timeout_ms (int): The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\n delivery_timeout_ms (int): An upper bound on the time to report success or failure after a call to 'send()' returns.\n send_buffer_bytes (int): The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.\n receive_buffer_bytes (int): The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\n """\n self.bootstrap_servers = check.str_param(bootstrap_servers, "bootstrap_servers")\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")\n self.test_topic = check.opt_str_param(test_topic, "test_topic")\n self.sync_producer = check.opt_bool_param(sync_producer, "sync_producer")\n self.protocol = check.inst_param(\n protocol,\n "protocol",\n (KafkaDestination.PLAINTEXT, KafkaDestination.SASLPLAINTEXT, KafkaDestination.SASLSSL),\n )\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.acks = check.str_param(acks, "acks")\n self.enable_idempotence = check.bool_param(enable_idempotence, "enable_idempotence")\n self.compression_type = check.str_param(compression_type, "compression_type")\n self.batch_size = check.int_param(batch_size, "batch_size")\n self.linger_ms = check.str_param(linger_ms, "linger_ms")\n self.max_in_flight_requests_per_connection = check.int_param(\n max_in_flight_requests_per_connection, "max_in_flight_requests_per_connection"\n )\n self.client_dns_lookup = check.str_param(client_dns_lookup, "client_dns_lookup")\n self.buffer_memory = check.str_param(buffer_memory, "buffer_memory")\n self.max_request_size = check.int_param(max_request_size, "max_request_size")\n self.retries = check.int_param(retries, "retries")\n self.socket_connection_setup_timeout_ms = check.str_param(\n socket_connection_setup_timeout_ms, "socket_connection_setup_timeout_ms"\n )\n self.socket_connection_setup_timeout_max_ms = check.str_param(\n socket_connection_setup_timeout_max_ms, "socket_connection_setup_timeout_max_ms"\n )\n self.max_block_ms = check.str_param(max_block_ms, "max_block_ms")\n self.request_timeout_ms = check.int_param(request_timeout_ms, "request_timeout_ms")\n self.delivery_timeout_ms = check.int_param(delivery_timeout_ms, "delivery_timeout_ms")\n self.send_buffer_bytes = check.int_param(send_buffer_bytes, "send_buffer_bytes")\n self.receive_buffer_bytes = check.int_param(receive_buffer_bytes, "receive_buffer_bytes")\n super().__init__("Kafka", name)
\n\n\n
[docs]class ElasticsearchDestination(GeneratedAirbyteDestination):\n
[docs] class None_:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "none"
\n\n
[docs] class ApiKeySecret:\n
[docs] @public\n def __init__(self, apiKeyId: str, apiKeySecret: str):\n self.method = "secret"\n self.apiKeyId = check.str_param(apiKeyId, "apiKeyId")\n self.apiKeySecret = check.str_param(apiKeySecret, "apiKeySecret")
\n\n
[docs] class UsernamePassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.method = "basic"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n endpoint: str,\n authenticationMethod: Union[\n "ElasticsearchDestination.None_",\n "ElasticsearchDestination.ApiKeySecret",\n "ElasticsearchDestination.UsernamePassword",\n ],\n upsert: Optional[bool] = None,\n ):\n r"""Airbyte Destination for Elasticsearch.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/elasticsearch\n\n Args:\n name (str): The name of the destination.\n endpoint (str): The full url of the Elasticsearch server\n upsert (Optional[bool]): If a primary key identifier is defined in the source, an upsert will be performed using the primary key value as the elasticsearch doc id. Does not support composite primary keys.\n authenticationMethod (Union[ElasticsearchDestination.None\\\\_, ElasticsearchDestination.ApiKeySecret, ElasticsearchDestination.UsernamePassword]): The type of authentication to be used\n """\n self.endpoint = check.str_param(endpoint, "endpoint")\n self.upsert = check.opt_bool_param(upsert, "upsert")\n self.authenticationMethod = check.inst_param(\n authenticationMethod,\n "authenticationMethod",\n (\n ElasticsearchDestination.None_,\n ElasticsearchDestination.ApiKeySecret,\n ElasticsearchDestination.UsernamePassword,\n ),\n )\n super().__init__("Elasticsearch", name)
\n\n\n
[docs]class MysqlDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n ssl: Optional[bool] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Mysql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mysql\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n ssl (Optional[bool]): Encrypt data using SSL.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Mysql", name)
\n\n\n
[docs]class SftpJsonDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n username: str,\n password: str,\n destination_path: str,\n port: Optional[int] = None,\n ):\n """Airbyte Destination for Sftp Json.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/sftp-json\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the SFTP server.\n port (Optional[int]): Port of the SFTP server.\n username (str): Username to use to access the SFTP server.\n password (str): Password associated with the username.\n destination_path (str): Path to the directory where json files will be written.\n """\n self.host = check.str_param(host, "host")\n self.port = check.opt_int_param(port, "port")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Sftp Json", name)
\n\n\n
[docs]class GcsDestination(GeneratedAirbyteDestination):\n
[docs] class HMACKey:\n
[docs] @public\n def __init__(self, credential_type: str, hmac_key_access_id: str, hmac_key_secret: str):\n self.credential_type = check.str_param(credential_type, "credential_type")\n self.hmac_key_access_id = check.str_param(hmac_key_access_id, "hmac_key_access_id")\n self.hmac_key_secret = check.str_param(hmac_key_secret, "hmac_key_secret")
\n\n
[docs] class NoCompression:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class Deflate:\n
[docs] @public\n def __init__(self, codec: str, compression_level: Optional[int] = None):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.opt_int_param(compression_level, "compression_level")
\n\n
[docs] class Bzip2:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class Xz:\n
[docs] @public\n def __init__(self, codec: str, compression_level: Optional[int] = None):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.opt_int_param(compression_level, "compression_level")
\n\n
[docs] class Zstandard:\n
[docs] @public\n def __init__(\n self,\n codec: str,\n compression_level: Optional[int] = None,\n include_checksum: Optional[bool] = None,\n ):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.opt_int_param(compression_level, "compression_level")\n self.include_checksum = check.opt_bool_param(include_checksum, "include_checksum")
\n\n
[docs] class Snappy:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class AvroApacheAvro:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Union[\n "GcsDestination.NoCompression",\n "GcsDestination.Deflate",\n "GcsDestination.Bzip2",\n "GcsDestination.Xz",\n "GcsDestination.Zstandard",\n "GcsDestination.Snappy",\n ],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.inst_param(\n compression_codec,\n "compression_codec",\n (\n GcsDestination.NoCompression,\n GcsDestination.Deflate,\n GcsDestination.Bzip2,\n GcsDestination.Xz,\n GcsDestination.Zstandard,\n GcsDestination.Snappy,\n ),\n )
\n\n
[docs] class GZIP:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["GcsDestination.NoCompression", "GcsDestination.GZIP"],\n flattening: Optional[str] = None,\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.flattening = check.opt_str_param(flattening, "flattening")\n self.compression = check.inst_param(\n compression, "compression", (GcsDestination.NoCompression, GcsDestination.GZIP)\n )
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["GcsDestination.NoCompression", "GcsDestination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression = check.inst_param(\n compression, "compression", (GcsDestination.NoCompression, GcsDestination.GZIP)\n )
\n\n
[docs] class ParquetColumnarStorage:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Optional[str] = None,\n block_size_mb: Optional[int] = None,\n max_padding_size_mb: Optional[int] = None,\n page_size_kb: Optional[int] = None,\n dictionary_page_size_kb: Optional[int] = None,\n dictionary_encoding: Optional[bool] = None,\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.opt_str_param(compression_codec, "compression_codec")\n self.block_size_mb = check.opt_int_param(block_size_mb, "block_size_mb")\n self.max_padding_size_mb = check.opt_int_param(\n max_padding_size_mb, "max_padding_size_mb"\n )\n self.page_size_kb = check.opt_int_param(page_size_kb, "page_size_kb")\n self.dictionary_page_size_kb = check.opt_int_param(\n dictionary_page_size_kb, "dictionary_page_size_kb"\n )\n self.dictionary_encoding = check.opt_bool_param(\n dictionary_encoding, "dictionary_encoding"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n gcs_bucket_name: str,\n gcs_bucket_path: str,\n credential: "GcsDestination.HMACKey",\n format: Union[\n "GcsDestination.AvroApacheAvro",\n "GcsDestination.CSVCommaSeparatedValues",\n "GcsDestination.JSONLinesNewlineDelimitedJSON",\n "GcsDestination.ParquetColumnarStorage",\n ],\n gcs_bucket_region: Optional[str] = None,\n ):\n """Airbyte Destination for Gcs.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/gcs\n\n Args:\n name (str): The name of the destination.\n gcs_bucket_name (str): You can find the bucket name in the App Engine Admin console Application Settings page, under the label Google Cloud Storage Bucket. Read more here.\n gcs_bucket_path (str): GCS Bucket Path string Subdirectory under the above bucket to sync the data into.\n gcs_bucket_region (Optional[str]): Select a Region of the GCS Bucket. Read more here.\n credential (GcsDestination.HMACKey): An HMAC key is a type of credential and can be associated with a service account or a user account in Cloud Storage. Read more here.\n format (Union[GcsDestination.AvroApacheAvro, GcsDestination.CSVCommaSeparatedValues, GcsDestination.JSONLinesNewlineDelimitedJSON, GcsDestination.ParquetColumnarStorage]): Output data format. One of the following formats must be selected - AVRO format, PARQUET format, CSV format, or JSONL format.\n """\n self.gcs_bucket_name = check.str_param(gcs_bucket_name, "gcs_bucket_name")\n self.gcs_bucket_path = check.str_param(gcs_bucket_path, "gcs_bucket_path")\n self.gcs_bucket_region = check.opt_str_param(gcs_bucket_region, "gcs_bucket_region")\n self.credential = check.inst_param(credential, "credential", GcsDestination.HMACKey)\n self.format = check.inst_param(\n format,\n "format",\n (\n GcsDestination.AvroApacheAvro,\n GcsDestination.CSVCommaSeparatedValues,\n GcsDestination.JSONLinesNewlineDelimitedJSON,\n GcsDestination.ParquetColumnarStorage,\n ),\n )\n super().__init__("Gcs", name)
\n\n\n
[docs]class CassandraDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n keyspace: str,\n username: str,\n password: str,\n address: str,\n port: int,\n datacenter: Optional[str] = None,\n replication: Optional[int] = None,\n ):\n """Airbyte Destination for Cassandra.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/cassandra\n\n Args:\n name (str): The name of the destination.\n keyspace (str): Default Cassandra keyspace to create data in.\n username (str): Username to use to access Cassandra.\n password (str): Password associated with Cassandra.\n address (str): Address to connect to.\n port (int): Port of Cassandra.\n datacenter (Optional[str]): Datacenter of the cassandra cluster.\n replication (Optional[int]): Indicates to how many nodes the data should be replicated to.\n """\n self.keyspace = check.str_param(keyspace, "keyspace")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.address = check.str_param(address, "address")\n self.port = check.int_param(port, "port")\n self.datacenter = check.opt_str_param(datacenter, "datacenter")\n self.replication = check.opt_int_param(replication, "replication")\n super().__init__("Cassandra", name)
\n\n\n
[docs]class FireboltDestination(GeneratedAirbyteDestination):\n
[docs] class SQLInserts:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "SQL"
\n\n
[docs] class ExternalTableViaS3:\n
[docs] @public\n def __init__(self, s3_bucket: str, s3_region: str, aws_key_id: str, aws_key_secret: str):\n self.method = "S3"\n self.s3_bucket = check.str_param(s3_bucket, "s3_bucket")\n self.s3_region = check.str_param(s3_region, "s3_region")\n self.aws_key_id = check.str_param(aws_key_id, "aws_key_id")\n self.aws_key_secret = check.str_param(aws_key_secret, "aws_key_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n password: str,\n database: str,\n loading_method: Union[\n "FireboltDestination.SQLInserts", "FireboltDestination.ExternalTableViaS3"\n ],\n account: Optional[str] = None,\n host: Optional[str] = None,\n engine: Optional[str] = None,\n ):\n """Airbyte Destination for Firebolt.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/firebolt\n\n Args:\n name (str): The name of the destination.\n username (str): Firebolt email address you use to login.\n password (str): Firebolt password.\n account (Optional[str]): Firebolt account to login.\n host (Optional[str]): The host name of your Firebolt database.\n database (str): The database to connect to.\n engine (Optional[str]): Engine name or url to connect to.\n loading_method (Union[FireboltDestination.SQLInserts, FireboltDestination.ExternalTableViaS3]): Loading method used to select the way data will be uploaded to Firebolt\n """\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.account = check.opt_str_param(account, "account")\n self.host = check.opt_str_param(host, "host")\n self.database = check.str_param(database, "database")\n self.engine = check.opt_str_param(engine, "engine")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (FireboltDestination.SQLInserts, FireboltDestination.ExternalTableViaS3),\n )\n super().__init__("Firebolt", name)
\n\n\n
[docs]class GoogleSheetsDestination(GeneratedAirbyteDestination):\n
[docs] class AuthenticationViaGoogleOAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n spreadsheet_id: str,\n credentials: "GoogleSheetsDestination.AuthenticationViaGoogleOAuth",\n ):\n """Airbyte Destination for Google Sheets.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/google-sheets\n\n Args:\n name (str): The name of the destination.\n spreadsheet_id (str): The link to your spreadsheet. See this guide for more details.\n credentials (GoogleSheetsDestination.AuthenticationViaGoogleOAuth): Google API Credentials for connecting to Google Sheets and Google Drive APIs\n """\n self.spreadsheet_id = check.str_param(spreadsheet_id, "spreadsheet_id")\n self.credentials = check.inst_param(\n credentials, "credentials", GoogleSheetsDestination.AuthenticationViaGoogleOAuth\n )\n super().__init__("Google Sheets", name)
\n\n\n
[docs]class DatabricksDestination(GeneratedAirbyteDestination):\n
[docs] class AmazonS3:\n
[docs] @public\n def __init__(\n self,\n data_source_type: str,\n s3_bucket_name: str,\n s3_bucket_path: str,\n s3_bucket_region: str,\n s3_access_key_id: str,\n s3_secret_access_key: str,\n file_name_pattern: Optional[str] = None,\n ):\n self.data_source_type = check.str_param(data_source_type, "data_source_type")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.str_param(s3_bucket_path, "s3_bucket_path")\n self.s3_bucket_region = check.str_param(s3_bucket_region, "s3_bucket_region")\n self.s3_access_key_id = check.str_param(s3_access_key_id, "s3_access_key_id")\n self.s3_secret_access_key = check.str_param(\n s3_secret_access_key, "s3_secret_access_key"\n )\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")
\n\n
[docs] class AzureBlobStorage:\n
[docs] @public\n def __init__(\n self,\n data_source_type: str,\n azure_blob_storage_account_name: str,\n azure_blob_storage_container_name: str,\n azure_blob_storage_sas_token: str,\n azure_blob_storage_endpoint_domain_name: Optional[str] = None,\n ):\n self.data_source_type = check.str_param(data_source_type, "data_source_type")\n self.azure_blob_storage_endpoint_domain_name = check.opt_str_param(\n azure_blob_storage_endpoint_domain_name, "azure_blob_storage_endpoint_domain_name"\n )\n self.azure_blob_storage_account_name = check.str_param(\n azure_blob_storage_account_name, "azure_blob_storage_account_name"\n )\n self.azure_blob_storage_container_name = check.str_param(\n azure_blob_storage_container_name, "azure_blob_storage_container_name"\n )\n self.azure_blob_storage_sas_token = check.str_param(\n azure_blob_storage_sas_token, "azure_blob_storage_sas_token"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n accept_terms: bool,\n databricks_server_hostname: str,\n databricks_http_path: str,\n databricks_personal_access_token: str,\n data_source: Union[\n "DatabricksDestination.AmazonS3", "DatabricksDestination.AzureBlobStorage"\n ],\n databricks_port: Optional[str] = None,\n database_schema: Optional[str] = None,\n purge_staging_data: Optional[bool] = None,\n ):\n """Airbyte Destination for Databricks.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/databricks\n\n Args:\n name (str): The name of the destination.\n accept_terms (bool): You must agree to the Databricks JDBC Driver Terms & Conditions to use this connector.\n databricks_server_hostname (str): Databricks Cluster Server Hostname.\n databricks_http_path (str): Databricks Cluster HTTP Path.\n databricks_port (Optional[str]): Databricks Cluster Port.\n databricks_personal_access_token (str): Databricks Personal Access Token for making authenticated requests.\n database_schema (Optional[str]): The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".\n data_source (Union[DatabricksDestination.AmazonS3, DatabricksDestination.AzureBlobStorage]): Storage on which the delta lake is built.\n purge_staging_data (Optional[bool]): Default to 'true'. Switch it to 'false' for debugging purpose.\n """\n self.accept_terms = check.bool_param(accept_terms, "accept_terms")\n self.databricks_server_hostname = check.str_param(\n databricks_server_hostname, "databricks_server_hostname"\n )\n self.databricks_http_path = check.str_param(databricks_http_path, "databricks_http_path")\n self.databricks_port = check.opt_str_param(databricks_port, "databricks_port")\n self.databricks_personal_access_token = check.str_param(\n databricks_personal_access_token, "databricks_personal_access_token"\n )\n self.database_schema = check.opt_str_param(database_schema, "database_schema")\n self.data_source = check.inst_param(\n data_source,\n "data_source",\n (DatabricksDestination.AmazonS3, DatabricksDestination.AzureBlobStorage),\n )\n self.purge_staging_data = check.opt_bool_param(purge_staging_data, "purge_staging_data")\n super().__init__("Databricks", name)
\n\n\n
[docs]class BigqueryDenormalizedDestination(GeneratedAirbyteDestination):\n
[docs] class StandardInserts:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class HMACKey:\n
[docs] @public\n def __init__(self, hmac_key_access_id: str, hmac_key_secret: str):\n self.credential_type = "HMAC_KEY"\n self.hmac_key_access_id = check.str_param(hmac_key_access_id, "hmac_key_access_id")\n self.hmac_key_secret = check.str_param(hmac_key_secret, "hmac_key_secret")
\n\n
[docs] class GCSStaging:\n
[docs] @public\n def __init__(\n self,\n credential: "BigqueryDenormalizedDestination.HMACKey",\n gcs_bucket_name: str,\n gcs_bucket_path: str,\n keep_files_in_gcs_bucket: Optional[str] = None,\n ):\n self.method = "GCS Staging"\n self.credential = check.inst_param(\n credential, "credential", BigqueryDenormalizedDestination.HMACKey\n )\n self.gcs_bucket_name = check.str_param(gcs_bucket_name, "gcs_bucket_name")\n self.gcs_bucket_path = check.str_param(gcs_bucket_path, "gcs_bucket_path")\n self.keep_files_in_gcs_bucket = check.opt_str_param(\n keep_files_in_gcs_bucket, "keep_files_in_gcs_bucket"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n project_id: str,\n dataset_id: str,\n loading_method: Union[\n "BigqueryDenormalizedDestination.StandardInserts",\n "BigqueryDenormalizedDestination.GCSStaging",\n ],\n credentials_json: Optional[str] = None,\n dataset_location: Optional[str] = None,\n big_query_client_buffer_size_mb: Optional[int] = None,\n ):\n """Airbyte Destination for Bigquery Denormalized.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/bigquery\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset. Read more here.\n dataset_id (str): The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.\n loading_method (Union[BigqueryDenormalizedDestination.StandardInserts, BigqueryDenormalizedDestination.GCSStaging]): Loading method used to send select the way data will be uploaded to BigQuery. Standard Inserts - Direct uploading using SQL INSERT statements. This method is extremely inefficient and provided only for quick testing. In almost all cases, you should use staging. GCS Staging - Writes large batches of records to a file, uploads the file to GCS, then uses COPY INTO table to upload the file. Recommended for most workloads for better speed and scalability. Read more about GCS Staging here.\n credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.\n dataset_location (Optional[str]): The location of the dataset. Warning: Changes made after creation will not be applied. The default "US" value is used if not set explicitly. Read more here.\n big_query_client_buffer_size_mb (Optional[int]): Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.dataset_id = check.str_param(dataset_id, "dataset_id")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (\n BigqueryDenormalizedDestination.StandardInserts,\n BigqueryDenormalizedDestination.GCSStaging,\n ),\n )\n self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")\n self.dataset_location = check.opt_str_param(dataset_location, "dataset_location")\n self.big_query_client_buffer_size_mb = check.opt_int_param(\n big_query_client_buffer_size_mb, "big_query_client_buffer_size_mb"\n )\n super().__init__("Bigquery Denormalized", name)
\n\n\n
[docs]class SqliteDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, destination_path: str):\n """Airbyte Destination for Sqlite.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/sqlite\n\n Args:\n name (str): The name of the destination.\n destination_path (str): Path to the sqlite.db file. The file will be placed inside that local mount. For more information check out our docs\n """\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Sqlite", name)
\n\n\n
[docs]class MongodbDestination(GeneratedAirbyteDestination):\n
[docs] class StandaloneMongoDbInstance:\n
[docs] @public\n def __init__(self, instance: str, host: str, port: int, tls: Optional[bool] = None):\n self.instance = check.str_param(instance, "instance")\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.tls = check.opt_bool_param(tls, "tls")
\n\n
[docs] class ReplicaSet:\n
[docs] @public\n def __init__(self, instance: str, server_addresses: str, replica_set: Optional[str] = None):\n self.instance = check.str_param(instance, "instance")\n self.server_addresses = check.str_param(server_addresses, "server_addresses")\n self.replica_set = check.opt_str_param(replica_set, "replica_set")
\n\n
[docs] class MongoDBAtlas:\n
[docs] @public\n def __init__(self, instance: str, cluster_url: str):\n self.instance = check.str_param(instance, "instance")\n self.cluster_url = check.str_param(cluster_url, "cluster_url")
\n\n
[docs] class None_:\n
[docs] @public\n def __init__(\n self,\n ):\n self.authorization = "none"
\n\n
[docs] class LoginPassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.authorization = "login/password"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n instance_type: Union[\n "MongodbDestination.StandaloneMongoDbInstance",\n "MongodbDestination.ReplicaSet",\n "MongodbDestination.MongoDBAtlas",\n ],\n database: str,\n auth_type: Union["MongodbDestination.None_", "MongodbDestination.LoginPassword"],\n ):\n r"""Airbyte Destination for Mongodb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mongodb\n\n Args:\n name (str): The name of the destination.\n instance_type (Union[MongodbDestination.StandaloneMongoDbInstance, MongodbDestination.ReplicaSet, MongodbDestination.MongoDBAtlas]): MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.\n database (str): Name of the database.\n auth_type (Union[MongodbDestination.None\\\\_, MongodbDestination.LoginPassword]): Authorization type.\n """\n self.instance_type = check.inst_param(\n instance_type,\n "instance_type",\n (\n MongodbDestination.StandaloneMongoDbInstance,\n MongodbDestination.ReplicaSet,\n MongodbDestination.MongoDBAtlas,\n ),\n )\n self.database = check.str_param(database, "database")\n self.auth_type = check.inst_param(\n auth_type, "auth_type", (MongodbDestination.None_, MongodbDestination.LoginPassword)\n )\n super().__init__("Mongodb", name)
\n\n\n
[docs]class RocksetDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, api_key: str, workspace: str, api_server: Optional[str] = None):\n """Airbyte Destination for Rockset.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/rockset\n\n Args:\n name (str): The name of the destination.\n api_key (str): Rockset api key\n workspace (str): The Rockset workspace in which collections will be created + written to.\n api_server (Optional[str]): Rockset api URL\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.workspace = check.str_param(workspace, "workspace")\n self.api_server = check.opt_str_param(api_server, "api_server")\n super().__init__("Rockset", name)
\n\n\n
[docs]class OracleDestination(GeneratedAirbyteDestination):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_method = "unencrypted"
\n\n
[docs] class NativeNetworkEncryptionNNE:\n
[docs] @public\n def __init__(self, encryption_algorithm: Optional[str] = None):\n self.encryption_method = "client_nne"\n self.encryption_algorithm = check.opt_str_param(\n encryption_algorithm, "encryption_algorithm"\n )
\n\n
[docs] class TLSEncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, ssl_certificate: str):\n self.encryption_method = "encrypted_verify_certificate"\n self.ssl_certificate = check.str_param(ssl_certificate, "ssl_certificate")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n sid: str,\n username: str,\n encryption: Union[\n "OracleDestination.Unencrypted",\n "OracleDestination.NativeNetworkEncryptionNNE",\n "OracleDestination.TLSEncryptedVerifyCertificate",\n ],\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n schema: Optional[str] = None,\n ):\n """Airbyte Destination for Oracle.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/oracle\n\n Args:\n name (str): The name of the destination.\n host (str): The hostname of the database.\n port (int): The port of the database.\n sid (str): The System Identifier uniquely distinguishes the instance from any other instance on the same computer.\n username (str): The username to access the database. This user must have CREATE USER privileges in the database.\n password (Optional[str]): The password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n schema (Optional[str]): The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.\n encryption (Union[OracleDestination.Unencrypted, OracleDestination.NativeNetworkEncryptionNNE, OracleDestination.TLSEncryptedVerifyCertificate]): The encryption method which is used when communicating with the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.sid = check.str_param(sid, "sid")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.schema = check.opt_str_param(schema, "schema")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (\n OracleDestination.Unencrypted,\n OracleDestination.NativeNetworkEncryptionNNE,\n OracleDestination.TLSEncryptedVerifyCertificate,\n ),\n )\n super().__init__("Oracle", name)
\n\n\n
[docs]class CsvDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, destination_path: str):\n """Airbyte Destination for Csv.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/local-csv\n\n Args:\n name (str): The name of the destination.\n destination_path (str): Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs\n """\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Csv", name)
\n\n\n
[docs]class S3Destination(GeneratedAirbyteDestination):\n
[docs] class NoCompression:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class Deflate:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Bzip2:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class Xz:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Zstandard:\n
[docs] @public\n def __init__(\n self, codec: str, compression_level: int, include_checksum: Optional[bool] = None\n ):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")\n self.include_checksum = check.opt_bool_param(include_checksum, "include_checksum")
\n\n
[docs] class Snappy:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class AvroApacheAvro:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Union[\n "S3Destination.NoCompression",\n "S3Destination.Deflate",\n "S3Destination.Bzip2",\n "S3Destination.Xz",\n "S3Destination.Zstandard",\n "S3Destination.Snappy",\n ],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.inst_param(\n compression_codec,\n "compression_codec",\n (\n S3Destination.NoCompression,\n S3Destination.Deflate,\n S3Destination.Bzip2,\n S3Destination.Xz,\n S3Destination.Zstandard,\n S3Destination.Snappy,\n ),\n )
\n\n
[docs] class GZIP:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n flattening: str,\n compression: Union["S3Destination.NoCompression", "S3Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.flattening = check.str_param(flattening, "flattening")\n self.compression = check.inst_param(\n compression, "compression", (S3Destination.NoCompression, S3Destination.GZIP)\n )
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["S3Destination.NoCompression", "S3Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression = check.inst_param(\n compression, "compression", (S3Destination.NoCompression, S3Destination.GZIP)\n )
\n\n
[docs] class ParquetColumnarStorage:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Optional[str] = None,\n block_size_mb: Optional[int] = None,\n max_padding_size_mb: Optional[int] = None,\n page_size_kb: Optional[int] = None,\n dictionary_page_size_kb: Optional[int] = None,\n dictionary_encoding: Optional[bool] = None,\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.opt_str_param(compression_codec, "compression_codec")\n self.block_size_mb = check.opt_int_param(block_size_mb, "block_size_mb")\n self.max_padding_size_mb = check.opt_int_param(\n max_padding_size_mb, "max_padding_size_mb"\n )\n self.page_size_kb = check.opt_int_param(page_size_kb, "page_size_kb")\n self.dictionary_page_size_kb = check.opt_int_param(\n dictionary_page_size_kb, "dictionary_page_size_kb"\n )\n self.dictionary_encoding = check.opt_bool_param(\n dictionary_encoding, "dictionary_encoding"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n s3_bucket_name: str,\n s3_bucket_path: str,\n s3_bucket_region: str,\n format: Union[\n "S3Destination.AvroApacheAvro",\n "S3Destination.CSVCommaSeparatedValues",\n "S3Destination.JSONLinesNewlineDelimitedJSON",\n "S3Destination.ParquetColumnarStorage",\n ],\n access_key_id: Optional[str] = None,\n secret_access_key: Optional[str] = None,\n s3_endpoint: Optional[str] = None,\n s3_path_format: Optional[str] = None,\n file_name_pattern: Optional[str] = None,\n ):\n """Airbyte Destination for S3.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/s3\n\n Args:\n name (str): The name of the destination.\n access_key_id (Optional[str]): The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.\n secret_access_key (Optional[str]): The corresponding secret to the access key ID. Read more here\n s3_bucket_name (str): The name of the S3 bucket. Read more here.\n s3_bucket_path (str): Directory under the S3 bucket where data will be written. Read more here\n s3_bucket_region (str): The region of the S3 bucket. See here for all region codes.\n format (Union[S3Destination.AvroApacheAvro, S3Destination.CSVCommaSeparatedValues, S3Destination.JSONLinesNewlineDelimitedJSON, S3Destination.ParquetColumnarStorage]): Format of the data output. See here for more details\n s3_endpoint (Optional[str]): Your S3 endpoint url. Read more here\n s3_path_format (Optional[str]): Format string on how data will be organized inside the S3 bucket directory. Read more here\n file_name_pattern (Optional[str]): The pattern allows you to set the file-name format for the S3 staging file(s)\n """\n self.access_key_id = check.opt_str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.opt_str_param(secret_access_key, "secret_access_key")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.str_param(s3_bucket_path, "s3_bucket_path")\n self.s3_bucket_region = check.str_param(s3_bucket_region, "s3_bucket_region")\n self.format = check.inst_param(\n format,\n "format",\n (\n S3Destination.AvroApacheAvro,\n S3Destination.CSVCommaSeparatedValues,\n S3Destination.JSONLinesNewlineDelimitedJSON,\n S3Destination.ParquetColumnarStorage,\n ),\n )\n self.s3_endpoint = check.opt_str_param(s3_endpoint, "s3_endpoint")\n self.s3_path_format = check.opt_str_param(s3_path_format, "s3_path_format")\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")\n super().__init__("S3", name)
\n\n\n
[docs]class AwsDatalakeDestination(GeneratedAirbyteDestination):\n
[docs] class IAMRole:\n
[docs] @public\n def __init__(self, role_arn: str):\n self.credentials_title = "IAM Role"\n self.role_arn = check.str_param(role_arn, "role_arn")
\n\n
[docs] class IAMUser:\n
[docs] @public\n def __init__(self, aws_access_key_id: str, aws_secret_access_key: str):\n self.credentials_title = "IAM User"\n self.aws_access_key_id = check.str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n region: str,\n credentials: Union["AwsDatalakeDestination.IAMRole", "AwsDatalakeDestination.IAMUser"],\n bucket_name: str,\n bucket_prefix: str,\n aws_account_id: Optional[str] = None,\n lakeformation_database_name: Optional[str] = None,\n ):\n """Airbyte Destination for Aws Datalake.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/aws-datalake\n\n Args:\n name (str): The name of the destination.\n aws_account_id (Optional[str]): target aws account id\n region (str): Region name\n credentials (Union[AwsDatalakeDestination.IAMRole, AwsDatalakeDestination.IAMUser]): Choose How to Authenticate to AWS.\n bucket_name (str): Name of the bucket\n bucket_prefix (str): S3 prefix\n lakeformation_database_name (Optional[str]): Which database to use\n """\n self.aws_account_id = check.opt_str_param(aws_account_id, "aws_account_id")\n self.region = check.str_param(region, "region")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (AwsDatalakeDestination.IAMRole, AwsDatalakeDestination.IAMUser),\n )\n self.bucket_name = check.str_param(bucket_name, "bucket_name")\n self.bucket_prefix = check.str_param(bucket_prefix, "bucket_prefix")\n self.lakeformation_database_name = check.opt_str_param(\n lakeformation_database_name, "lakeformation_database_name"\n )\n super().__init__("Aws Datalake", name)
\n\n\n
[docs]class MssqlDestination(GeneratedAirbyteDestination):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "unencrypted"
\n\n
[docs] class EncryptedTrustServerCertificate:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "encrypted_trust_server_certificate"
\n\n
[docs] class EncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, hostNameInCertificate: Optional[str] = None):\n self.ssl_method = "encrypted_verify_certificate"\n self.hostNameInCertificate = check.opt_str_param(\n hostNameInCertificate, "hostNameInCertificate"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n schema: str,\n username: str,\n ssl_method: Union[\n "MssqlDestination.Unencrypted",\n "MssqlDestination.EncryptedTrustServerCertificate",\n "MssqlDestination.EncryptedVerifyCertificate",\n ],\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Mssql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mssql\n\n Args:\n name (str): The name of the destination.\n host (str): The host name of the MSSQL database.\n port (int): The port of the MSSQL database.\n database (str): The name of the MSSQL database.\n schema (str): The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n ssl_method (Union[MssqlDestination.Unencrypted, MssqlDestination.EncryptedTrustServerCertificate, MssqlDestination.EncryptedVerifyCertificate]): The encryption method which is used to communicate with the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl_method = check.inst_param(\n ssl_method,\n "ssl_method",\n (\n MssqlDestination.Unencrypted,\n MssqlDestination.EncryptedTrustServerCertificate,\n MssqlDestination.EncryptedVerifyCertificate,\n ),\n )\n super().__init__("Mssql", name)
\n\n\n
[docs]class PubsubDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, project_id: str, topic_id: str, credentials_json: str):\n """Airbyte Destination for Pubsub.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/pubsub\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target PubSub.\n topic_id (str): The PubSub topic ID in the given GCP project ID.\n credentials_json (str): The contents of the JSON service account key. Check out the docs if you need help generating this key.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.topic_id = check.str_param(topic_id, "topic_id")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n super().__init__("Pubsub", name)
\n\n\n
[docs]class R2Destination(GeneratedAirbyteDestination):\n
[docs] class NoCompression:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class Deflate:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Bzip2:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class Xz:\n
[docs] @public\n def __init__(self, codec: str, compression_level: int):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")
\n\n
[docs] class Zstandard:\n
[docs] @public\n def __init__(\n self, codec: str, compression_level: int, include_checksum: Optional[bool] = None\n ):\n self.codec = check.str_param(codec, "codec")\n self.compression_level = check.int_param(compression_level, "compression_level")\n self.include_checksum = check.opt_bool_param(include_checksum, "include_checksum")
\n\n
[docs] class Snappy:\n
[docs] @public\n def __init__(self, codec: str):\n self.codec = check.str_param(codec, "codec")
\n\n
[docs] class AvroApacheAvro:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression_codec: Union[\n "R2Destination.NoCompression",\n "R2Destination.Deflate",\n "R2Destination.Bzip2",\n "R2Destination.Xz",\n "R2Destination.Zstandard",\n "R2Destination.Snappy",\n ],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression_codec = check.inst_param(\n compression_codec,\n "compression_codec",\n (\n R2Destination.NoCompression,\n R2Destination.Deflate,\n R2Destination.Bzip2,\n R2Destination.Xz,\n R2Destination.Zstandard,\n R2Destination.Snappy,\n ),\n )
\n\n
[docs] class GZIP:\n
[docs] @public\n def __init__(self, compression_type: Optional[str] = None):\n self.compression_type = check.opt_str_param(compression_type, "compression_type")
\n\n
[docs] class CSVCommaSeparatedValues:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n flattening: str,\n compression: Union["R2Destination.NoCompression", "R2Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.flattening = check.str_param(flattening, "flattening")\n self.compression = check.inst_param(\n compression, "compression", (R2Destination.NoCompression, R2Destination.GZIP)\n )
\n\n
[docs] class JSONLinesNewlineDelimitedJSON:\n
[docs] @public\n def __init__(\n self,\n format_type: str,\n compression: Union["R2Destination.NoCompression", "R2Destination.GZIP"],\n ):\n self.format_type = check.str_param(format_type, "format_type")\n self.compression = check.inst_param(\n compression, "compression", (R2Destination.NoCompression, R2Destination.GZIP)\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n access_key_id: str,\n secret_access_key: str,\n s3_bucket_name: str,\n s3_bucket_path: str,\n format: Union[\n "R2Destination.AvroApacheAvro",\n "R2Destination.CSVCommaSeparatedValues",\n "R2Destination.JSONLinesNewlineDelimitedJSON",\n ],\n s3_path_format: Optional[str] = None,\n file_name_pattern: Optional[str] = None,\n ):\n """Airbyte Destination for R2.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/r2\n\n Args:\n name (str): The name of the destination.\n account_id (str): Cloudflare account ID\n access_key_id (str): The access key ID to access the R2 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.\n secret_access_key (str): The corresponding secret to the access key ID. Read more here\n s3_bucket_name (str): The name of the R2 bucket. Read more here.\n s3_bucket_path (str): Directory under the R2 bucket where data will be written.\n format (Union[R2Destination.AvroApacheAvro, R2Destination.CSVCommaSeparatedValues, R2Destination.JSONLinesNewlineDelimitedJSON]): Format of the data output. See here for more details\n s3_path_format (Optional[str]): Format string on how data will be organized inside the R2 bucket directory. Read more here\n file_name_pattern (Optional[str]): The pattern allows you to set the file-name format for the R2 staging file(s)\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.str_param(s3_bucket_path, "s3_bucket_path")\n self.format = check.inst_param(\n format,\n "format",\n (\n R2Destination.AvroApacheAvro,\n R2Destination.CSVCommaSeparatedValues,\n R2Destination.JSONLinesNewlineDelimitedJSON,\n ),\n )\n self.s3_path_format = check.opt_str_param(s3_path_format, "s3_path_format")\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")\n super().__init__("R2", name)
\n\n\n
[docs]class JdbcDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n jdbc_url: str,\n password: Optional[str] = None,\n schema: Optional[str] = None,\n ):\n """Airbyte Destination for Jdbc.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/postgres\n\n Args:\n name (str): The name of the destination.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url (str): JDBC formatted url. See the standard here.\n schema (Optional[str]): If you leave the schema unspecified, JDBC defaults to a schema named "public".\n """\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url = check.str_param(jdbc_url, "jdbc_url")\n self.schema = check.opt_str_param(schema, "schema")\n super().__init__("Jdbc", name)
\n\n\n
[docs]class KeenDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self, name: str, project_id: str, api_key: str, infer_timestamp: Optional[bool] = None\n ):\n """Airbyte Destination for Keen.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/keen\n\n Args:\n name (str): The name of the destination.\n project_id (str): To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.\n api_key (str): To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.\n infer_timestamp (Optional[bool]): Allow connector to guess keen.timestamp value based on the streamed data.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.api_key = check.str_param(api_key, "api_key")\n self.infer_timestamp = check.opt_bool_param(infer_timestamp, "infer_timestamp")\n super().__init__("Keen", name)
\n\n\n
[docs]class TidbDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n ssl: Optional[bool] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Tidb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/tidb\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n ssl (Optional[bool]): Encrypt data using SSL.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Tidb", name)
\n\n\n
[docs]class FirestoreDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, project_id: str, credentials_json: Optional[str] = None):\n """Airbyte Destination for Firestore.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/firestore\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset.\n credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")\n super().__init__("Firestore", name)
\n\n\n
[docs]class ScyllaDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n keyspace: str,\n username: str,\n password: str,\n address: str,\n port: int,\n replication: Optional[int] = None,\n ):\n """Airbyte Destination for Scylla.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/scylla\n\n Args:\n name (str): The name of the destination.\n keyspace (str): Default Scylla keyspace to create data in.\n username (str): Username to use to access Scylla.\n password (str): Password associated with Scylla.\n address (str): Address to connect to.\n port (int): Port of Scylla.\n replication (Optional[int]): Indicates to how many nodes the data should be replicated to.\n """\n self.keyspace = check.str_param(keyspace, "keyspace")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.address = check.str_param(address, "address")\n self.port = check.int_param(port, "port")\n self.replication = check.opt_int_param(replication, "replication")\n super().__init__("Scylla", name)
\n\n\n
[docs]class RedisDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self, name: str, host: str, port: int, username: str, password: str, cache_type: str\n ):\n """Airbyte Destination for Redis.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/redis\n\n Args:\n name (str): The name of the destination.\n host (str): Redis host to connect to.\n port (int): Port of Redis.\n username (str): Username associated with Redis.\n password (str): Password associated with Redis.\n cache_type (str): Redis cache type to store data in.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.cache_type = check.str_param(cache_type, "cache_type")\n super().__init__("Redis", name)
\n\n\n
[docs]class MqttDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n broker_host: str,\n broker_port: int,\n use_tls: bool,\n topic_pattern: str,\n publisher_sync: bool,\n connect_timeout: int,\n automatic_reconnect: bool,\n clean_session: bool,\n message_retained: bool,\n message_qos: str,\n username: Optional[str] = None,\n password: Optional[str] = None,\n topic_test: Optional[str] = None,\n client: Optional[str] = None,\n ):\n """Airbyte Destination for Mqtt.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mqtt\n\n Args:\n name (str): The name of the destination.\n broker_host (str): Host of the broker to connect to.\n broker_port (int): Port of the broker.\n use_tls (bool): Whether to use TLS encryption on the connection.\n username (Optional[str]): User name to use for the connection.\n password (Optional[str]): Password to use for the connection.\n topic_pattern (str): Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.\n topic_test (Optional[str]): Topic to test if Airbyte can produce messages.\n client (Optional[str]): A client identifier that is unique on the server being connected to.\n publisher_sync (bool): Wait synchronously until the record has been sent to the broker.\n connect_timeout (int): Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.\n automatic_reconnect (bool): Whether the client will automatically attempt to reconnect to the server if the connection is lost.\n clean_session (bool): Whether the client and server should remember state across restarts and reconnects.\n message_retained (bool): Whether or not the publish message should be retained by the messaging engine.\n message_qos (str): Quality of service used for each message to be delivered.\n """\n self.broker_host = check.str_param(broker_host, "broker_host")\n self.broker_port = check.int_param(broker_port, "broker_port")\n self.use_tls = check.bool_param(use_tls, "use_tls")\n self.username = check.opt_str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")\n self.topic_test = check.opt_str_param(topic_test, "topic_test")\n self.client = check.opt_str_param(client, "client")\n self.publisher_sync = check.bool_param(publisher_sync, "publisher_sync")\n self.connect_timeout = check.int_param(connect_timeout, "connect_timeout")\n self.automatic_reconnect = check.bool_param(automatic_reconnect, "automatic_reconnect")\n self.clean_session = check.bool_param(clean_session, "clean_session")\n self.message_retained = check.bool_param(message_retained, "message_retained")\n self.message_qos = check.str_param(message_qos, "message_qos")\n super().__init__("Mqtt", name)
\n\n\n
[docs]class RedshiftDestination(GeneratedAirbyteDestination):\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class NoEncryption:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_type = "none"
\n\n
[docs] class AESCBCEnvelopeEncryption:\n
[docs] @public\n def __init__(self, key_encrypting_key: Optional[str] = None):\n self.encryption_type = "aes_cbc_envelope"\n self.key_encrypting_key = check.opt_str_param(key_encrypting_key, "key_encrypting_key")
\n\n
[docs] class S3Staging:\n
[docs] @public\n def __init__(\n self,\n s3_bucket_name: str,\n s3_bucket_region: str,\n access_key_id: str,\n secret_access_key: str,\n encryption: Union[\n "RedshiftDestination.NoEncryption", "RedshiftDestination.AESCBCEnvelopeEncryption"\n ],\n s3_bucket_path: Optional[str] = None,\n file_name_pattern: Optional[str] = None,\n purge_staging_data: Optional[bool] = None,\n ):\n self.method = "S3 Staging"\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_path = check.opt_str_param(s3_bucket_path, "s3_bucket_path")\n self.s3_bucket_region = check.str_param(s3_bucket_region, "s3_bucket_region")\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n self.purge_staging_data = check.opt_bool_param(purge_staging_data, "purge_staging_data")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (RedshiftDestination.NoEncryption, RedshiftDestination.AESCBCEnvelopeEncryption),\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n username: str,\n password: str,\n database: str,\n schema: str,\n uploading_method: Union["RedshiftDestination.Standard", "RedshiftDestination.S3Staging"],\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Redshift.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/redshift\n\n Args:\n name (str): The name of the destination.\n host (str): Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)\n port (int): Port of the database.\n username (str): Username to use to access the database.\n password (str): Password associated with the username.\n database (str): Name of the database.\n schema (str): The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n uploading_method (Union[RedshiftDestination.Standard, RedshiftDestination.S3Staging]): The method how the data will be uploaded to the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.uploading_method = check.inst_param(\n uploading_method,\n "uploading_method",\n (RedshiftDestination.Standard, RedshiftDestination.S3Staging),\n )\n super().__init__("Redshift", name)
\n\n\n
[docs]class PulsarDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(\n self,\n name: str,\n brokers: str,\n use_tls: bool,\n topic_type: str,\n topic_tenant: str,\n topic_namespace: str,\n topic_pattern: str,\n compression_type: str,\n send_timeout_ms: int,\n max_pending_messages: int,\n max_pending_messages_across_partitions: int,\n batching_enabled: bool,\n batching_max_messages: int,\n batching_max_publish_delay: int,\n block_if_queue_full: bool,\n topic_test: Optional[str] = None,\n producer_name: Optional[str] = None,\n producer_sync: Optional[bool] = None,\n ):\n """Airbyte Destination for Pulsar.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/pulsar\n\n Args:\n name (str): The name of the destination.\n brokers (str): A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.\n use_tls (bool): Whether to use TLS encryption on the connection.\n topic_type (str): It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.\n topic_tenant (str): The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.\n topic_namespace (str): The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.\n topic_pattern (str): Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.\n topic_test (Optional[str]): Topic to test if Airbyte can produce messages.\n producer_name (Optional[str]): Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.\n producer_sync (Optional[bool]): Wait synchronously until the record has been sent to Pulsar.\n compression_type (str): Compression type for the producer.\n send_timeout_ms (int): If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).\n max_pending_messages (int): The maximum size of a queue holding pending messages.\n max_pending_messages_across_partitions (int): The maximum number of pending messages across partitions.\n batching_enabled (bool): Control whether automatic batching of messages is enabled for the producer.\n batching_max_messages (int): Maximum number of messages permitted in a batch.\n batching_max_publish_delay (int): Time period in milliseconds within which the messages sent will be batched.\n block_if_queue_full (bool): If the send operation should block when the outgoing message queue is full.\n """\n self.brokers = check.str_param(brokers, "brokers")\n self.use_tls = check.bool_param(use_tls, "use_tls")\n self.topic_type = check.str_param(topic_type, "topic_type")\n self.topic_tenant = check.str_param(topic_tenant, "topic_tenant")\n self.topic_namespace = check.str_param(topic_namespace, "topic_namespace")\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")\n self.topic_test = check.opt_str_param(topic_test, "topic_test")\n self.producer_name = check.opt_str_param(producer_name, "producer_name")\n self.producer_sync = check.opt_bool_param(producer_sync, "producer_sync")\n self.compression_type = check.str_param(compression_type, "compression_type")\n self.send_timeout_ms = check.int_param(send_timeout_ms, "send_timeout_ms")\n self.max_pending_messages = check.int_param(max_pending_messages, "max_pending_messages")\n self.max_pending_messages_across_partitions = check.int_param(\n max_pending_messages_across_partitions, "max_pending_messages_across_partitions"\n )\n self.batching_enabled = check.bool_param(batching_enabled, "batching_enabled")\n self.batching_max_messages = check.int_param(batching_max_messages, "batching_max_messages")\n self.batching_max_publish_delay = check.int_param(\n batching_max_publish_delay, "batching_max_publish_delay"\n )\n self.block_if_queue_full = check.bool_param(block_if_queue_full, "block_if_queue_full")\n super().__init__("Pulsar", name)
\n\n\n
[docs]class SnowflakeDestination(GeneratedAirbyteDestination):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n access_token: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class KeyPairAuthentication:\n
[docs] @public\n def __init__(\n self,\n private_key: str,\n auth_type: Optional[str] = None,\n private_key_password: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.private_key = check.str_param(private_key, "private_key")\n self.private_key_password = check.opt_str_param(\n private_key_password, "private_key_password"\n )
\n\n
[docs] class UsernameAndPassword:\n
[docs] @public\n def __init__(self, password: str):\n self.password = check.str_param(password, "password")
\n\n
[docs] class SelectAnotherOption:\n
[docs] @public\n def __init__(self, method: str):\n self.method = check.str_param(method, "method")
\n\n
[docs] class RecommendedInternalStaging:\n
[docs] @public\n def __init__(self, method: str):\n self.method = check.str_param(method, "method")
\n\n
[docs] class NoEncryption:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_type = "none"
\n\n
[docs] class AESCBCEnvelopeEncryption:\n
[docs] @public\n def __init__(self, key_encrypting_key: Optional[str] = None):\n self.encryption_type = "aes_cbc_envelope"\n self.key_encrypting_key = check.opt_str_param(key_encrypting_key, "key_encrypting_key")
\n\n
[docs] class AWSS3Staging:\n
[docs] @public\n def __init__(\n self,\n method: str,\n s3_bucket_name: str,\n access_key_id: str,\n secret_access_key: str,\n encryption: Union[\n "SnowflakeDestination.NoEncryption", "SnowflakeDestination.AESCBCEnvelopeEncryption"\n ],\n s3_bucket_region: Optional[str] = None,\n purge_staging_data: Optional[bool] = None,\n file_name_pattern: Optional[str] = None,\n ):\n self.method = check.str_param(method, "method")\n self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")\n self.s3_bucket_region = check.opt_str_param(s3_bucket_region, "s3_bucket_region")\n self.access_key_id = check.str_param(access_key_id, "access_key_id")\n self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")\n self.purge_staging_data = check.opt_bool_param(purge_staging_data, "purge_staging_data")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (SnowflakeDestination.NoEncryption, SnowflakeDestination.AESCBCEnvelopeEncryption),\n )\n self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")
\n\n
[docs] class GoogleCloudStorageStaging:\n
[docs] @public\n def __init__(self, method: str, project_id: str, bucket_name: str, credentials_json: str):\n self.method = check.str_param(method, "method")\n self.project_id = check.str_param(project_id, "project_id")\n self.bucket_name = check.str_param(bucket_name, "bucket_name")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")
\n\n
[docs] class AzureBlobStorageStaging:\n
[docs] @public\n def __init__(\n self,\n method: str,\n azure_blob_storage_account_name: str,\n azure_blob_storage_container_name: str,\n azure_blob_storage_sas_token: str,\n azure_blob_storage_endpoint_domain_name: Optional[str] = None,\n ):\n self.method = check.str_param(method, "method")\n self.azure_blob_storage_endpoint_domain_name = check.opt_str_param(\n azure_blob_storage_endpoint_domain_name, "azure_blob_storage_endpoint_domain_name"\n )\n self.azure_blob_storage_account_name = check.str_param(\n azure_blob_storage_account_name, "azure_blob_storage_account_name"\n )\n self.azure_blob_storage_container_name = check.str_param(\n azure_blob_storage_container_name, "azure_blob_storage_container_name"\n )\n self.azure_blob_storage_sas_token = check.str_param(\n azure_blob_storage_sas_token, "azure_blob_storage_sas_token"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n role: str,\n warehouse: str,\n database: str,\n schema: str,\n username: str,\n credentials: Union[\n "SnowflakeDestination.OAuth20",\n "SnowflakeDestination.KeyPairAuthentication",\n "SnowflakeDestination.UsernameAndPassword",\n ],\n loading_method: Union[\n "SnowflakeDestination.SelectAnotherOption",\n "SnowflakeDestination.RecommendedInternalStaging",\n "SnowflakeDestination.AWSS3Staging",\n "SnowflakeDestination.GoogleCloudStorageStaging",\n "SnowflakeDestination.AzureBlobStorageStaging",\n ],\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Snowflake.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/snowflake\n\n Args:\n name (str): The name of the destination.\n host (str): Enter your Snowflake account's locator (in the format ...snowflakecomputing.com)\n role (str): Enter the role that you want to use to access Snowflake\n warehouse (str): Enter the name of the warehouse that you want to sync data into\n database (str): Enter the name of the database you want to sync data into\n schema (str): Enter the name of the default schema\n username (str): Enter the name of the user you want to use to access the database\n jdbc_url_params (Optional[str]): Enter the additional properties to pass to the JDBC URL string when connecting to the database (formatted as key=value pairs separated by the symbol &). Example: key1=value1&key2=value2&key3=value3\n loading_method (Union[SnowflakeDestination.SelectAnotherOption, SnowflakeDestination.RecommendedInternalStaging, SnowflakeDestination.AWSS3Staging, SnowflakeDestination.GoogleCloudStorageStaging, SnowflakeDestination.AzureBlobStorageStaging]): Select a data staging method\n """\n self.host = check.str_param(host, "host")\n self.role = check.str_param(role, "role")\n self.warehouse = check.str_param(warehouse, "warehouse")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.username = check.str_param(username, "username")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n SnowflakeDestination.OAuth20,\n SnowflakeDestination.KeyPairAuthentication,\n SnowflakeDestination.UsernameAndPassword,\n ),\n )\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.loading_method = check.inst_param(\n loading_method,\n "loading_method",\n (\n SnowflakeDestination.SelectAnotherOption,\n SnowflakeDestination.RecommendedInternalStaging,\n SnowflakeDestination.AWSS3Staging,\n SnowflakeDestination.GoogleCloudStorageStaging,\n SnowflakeDestination.AzureBlobStorageStaging,\n ),\n )\n super().__init__("Snowflake", name)
\n\n\n
[docs]class PostgresDestination(GeneratedAirbyteDestination):\n
[docs] class Disable:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "disable"
\n\n
[docs] class Allow:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "allow"
\n\n
[docs] class Prefer:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "prefer"
\n\n
[docs] class Require:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "require"
\n\n
[docs] class VerifyCa:\n
[docs] @public\n def __init__(self, ca_certificate: str, client_key_password: Optional[str] = None):\n self.mode = "verify-ca"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class VerifyFull:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: str,\n client_key: str,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify-full"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.str_param(client_certificate, "client_certificate")\n self.client_key = check.str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n schema: str,\n username: str,\n ssl_mode: Union[\n "PostgresDestination.Disable",\n "PostgresDestination.Allow",\n "PostgresDestination.Prefer",\n "PostgresDestination.Require",\n "PostgresDestination.VerifyCa",\n "PostgresDestination.VerifyFull",\n ],\n password: Optional[str] = None,\n ssl: Optional[bool] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Destination for Postgres.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/postgres\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n schema (str): The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n ssl (Optional[bool]): Encrypt data using SSL. When activating SSL, please select one of the connection modes.\n ssl_mode (Union[PostgresDestination.Disable, PostgresDestination.Allow, PostgresDestination.Prefer, PostgresDestination.Require, PostgresDestination.VerifyCa, PostgresDestination.VerifyFull]): SSL connection modes. disable - Chose this mode to disable encryption of communication between Airbyte and destination database allow - Chose this mode to enable encryption only when required by the source database prefer - Chose this mode to allow unencrypted connection only if the source database does not support encryption require - Chose this mode to always require encryption. If the source database server does not support encryption, connection will fail verify-ca - Chose this mode to always require encryption and to verify that the source database server has a valid SSL certificate verify-full - This is the most secure mode. Chose this mode to always require encryption and to verify the identity of the source database server See more information - in the docs.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.ssl_mode = check.inst_param(\n ssl_mode,\n "ssl_mode",\n (\n PostgresDestination.Disable,\n PostgresDestination.Allow,\n PostgresDestination.Prefer,\n PostgresDestination.Require,\n PostgresDestination.VerifyCa,\n PostgresDestination.VerifyFull,\n ),\n )\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Postgres", name)
\n\n\n
[docs]class ScaffoldDestinationPythonDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, TODO: Optional[str] = None):\n """Airbyte Destination for Scaffold Destination Python.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/scaffold-destination-python\n\n Args:\n name (str): The name of the destination.\n TODO (Optional[str]): FIX ME\n """\n self.TODO = check.opt_str_param(TODO, "TODO")\n super().__init__("Scaffold Destination Python", name)
\n\n\n
[docs]class LocalJsonDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, destination_path: str):\n """Airbyte Destination for Local Json.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/local-json\n\n Args:\n name (str): The name of the destination.\n destination_path (str): Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs\n """\n self.destination_path = check.str_param(destination_path, "destination_path")\n super().__init__("Local Json", name)
\n\n\n
[docs]class MeilisearchDestination(GeneratedAirbyteDestination):\n
[docs] @public\n def __init__(self, name: str, host: str, api_key: Optional[str] = None):\n """Airbyte Destination for Meilisearch.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/meilisearch\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the MeiliSearch instance.\n api_key (Optional[str]): MeiliSearch API Key. See the docs for more information on how to obtain this key.\n """\n self.host = check.str_param(host, "host")\n self.api_key = check.opt_str_param(api_key, "api_key")\n super().__init__("Meilisearch", name)
\n
", "current_page_name": "_modules/dagster_airbyte/managed/generated/destinations", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.generated.destinations"}, "sources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.generated.sources

\n# ruff: noqa: A001, A002\nfrom typing import List, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\nfrom dagster_airbyte.managed.types import GeneratedAirbyteSource\n\n\n
[docs]class StravaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n athlete_id: int,\n start_date: str,\n auth_type: Optional[str] = None,\n ):\n """Airbyte Source for Strava.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/strava\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Strava developer application.\n client_secret (str): The Client Secret of your Strava developer application.\n refresh_token (str): The Refresh Token with the activity: read_all permissions.\n athlete_id (int): The Athlete ID of your Strava developer application.\n start_date (str): UTC date and time. Any data before this date will not be replicated.\n """\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.athlete_id = check.int_param(athlete_id, "athlete_id")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Strava", name)
\n\n\n
[docs]class AppsflyerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n app_id: str,\n api_token: str,\n start_date: str,\n timezone: Optional[str] = None,\n ):\n """Airbyte Source for Appsflyer.\n\n Args:\n name (str): The name of the destination.\n app_id (str): App identifier as found in AppsFlyer.\n api_token (str): Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard.\n start_date (str): The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days.\n timezone (Optional[str]): Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console.\n """\n self.app_id = check.str_param(app_id, "app_id")\n self.api_token = check.str_param(api_token, "api_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.timezone = check.opt_str_param(timezone, "timezone")\n super().__init__("Appsflyer", name)
\n\n\n
[docs]class GoogleWorkspaceAdminReportsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, credentials_json: str, email: str, lookback: Optional[int] = None\n ):\n """Airbyte Source for Google Workspace Admin Reports.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-workspace-admin-reports\n\n Args:\n name (str): The name of the destination.\n credentials_json (str): The contents of the JSON service account key. See the docs for more information on how to generate this key.\n email (str): The email of the user, who has permissions to access the Google Workspace Admin APIs.\n lookback (Optional[int]): Sets the range of time shown in the report. The maximum value allowed by the Google API is 180 days.\n """\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n self.email = check.str_param(email, "email")\n self.lookback = check.opt_int_param(lookback, "lookback")\n super().__init__("Google Workspace Admin Reports", name)
\n\n\n
[docs]class CartSource(GeneratedAirbyteSource):\n
[docs] class CentralAPIRouter:\n
[docs] @public\n def __init__(self, user_name: str, user_secret: str, site_id: str):\n self.auth_type = "CENTRAL_API_ROUTER"\n self.user_name = check.str_param(user_name, "user_name")\n self.user_secret = check.str_param(user_secret, "user_secret")\n self.site_id = check.str_param(site_id, "site_id")
\n\n
[docs] class SingleStoreAccessToken:\n
[docs] @public\n def __init__(self, access_token: str, store_name: str):\n self.auth_type = "SINGLE_STORE_ACCESS_TOKEN"\n self.access_token = check.str_param(access_token, "access_token")\n self.store_name = check.str_param(store_name, "store_name")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["CartSource.CentralAPIRouter", "CartSource.SingleStoreAccessToken"],\n start_date: str,\n ):\n """Airbyte Source for Cart.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/cart\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate the data\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (CartSource.CentralAPIRouter, CartSource.SingleStoreAccessToken),\n )\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Cart", name)
\n\n\n
[docs]class LinkedinAdsSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_method: Optional[str] = None,\n ):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str, auth_method: Optional[str] = None):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["LinkedinAdsSource.OAuth20", "LinkedinAdsSource.AccessToken"],\n start_date: str,\n account_ids: Optional[List[int]] = None,\n ):\n """Airbyte Source for Linkedin Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/linkedin-ads\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date in the format 2020-09-17. Any data before this date will not be replicated.\n account_ids (Optional[List[int]]): Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (LinkedinAdsSource.OAuth20, LinkedinAdsSource.AccessToken)\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.account_ids = check.opt_nullable_list_param(account_ids, "account_ids", int)\n super().__init__("Linkedin Ads", name)
\n\n\n
[docs]class MongodbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n user: str,\n password: str,\n auth_source: str,\n replica_set: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Mongodb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mongodb\n\n Args:\n name (str): The name of the destination.\n host (str): Host of a Mongo database to be replicated.\n port (int): Port of a Mongo database to be replicated.\n database (str): Database to be replicated.\n user (str): User\n password (str): Password\n auth_source (str): Authentication source where user information is stored. See the Mongo docs for more info.\n replica_set (Optional[str]): The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.\n ssl (Optional[bool]): If this switch is enabled, TLS connections will be used to connect to MongoDB.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.user = check.str_param(user, "user")\n self.password = check.str_param(password, "password")\n self.auth_source = check.str_param(auth_source, "auth_source")\n self.replica_set = check.opt_str_param(replica_set, "replica_set")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Mongodb", name)
\n\n\n
[docs]class TimelySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, account_id: str, start_date: str, bearer_token: str):\n """Airbyte Source for Timely.\n\n Args:\n name (str): The name of the destination.\n account_id (str): Timely account id\n start_date (str): start date\n bearer_token (str): Timely bearer token\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.bearer_token = check.str_param(bearer_token, "bearer_token")\n super().__init__("Timely", name)
\n\n\n
[docs]class StockTickerApiTutorialSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, stock_ticker: str, api_key: str):\n """Airbyte Source for Stock Ticker Api Tutorial.\n\n Documentation can be found at https://polygon.io/docs/stocks/get_v2_aggs_grouped_locale_us_market_stocks__date\n\n Args:\n name (str): The name of the destination.\n stock_ticker (str): The stock ticker to track\n api_key (str): The Polygon.io Stocks API key to use to hit the API.\n """\n self.stock_ticker = check.str_param(stock_ticker, "stock_ticker")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Stock Ticker Api Tutorial", name)
\n\n\n
[docs]class WrikeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, access_token: str, wrike_instance: str, start_date: Optional[str] = None\n ):\n """Airbyte Source for Wrike.\n\n Args:\n name (str): The name of the destination.\n access_token (str): Permanent access token. You can find documentation on how to acquire a permanent access token here\n wrike_instance (str): Wrike's instance such as `app-us2.wrike.com`\n start_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Only comments after this date will be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.wrike_instance = check.str_param(wrike_instance, "wrike_instance")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Wrike", name)
\n\n\n
[docs]class CommercetoolsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n region: str,\n host: str,\n start_date: str,\n project_key: str,\n client_id: str,\n client_secret: str,\n ):\n """Airbyte Source for Commercetools.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/commercetools\n\n Args:\n name (str): The name of the destination.\n region (str): The region of the platform.\n host (str): The cloud provider your shop is hosted. See: https://docs.commercetools.com/api/authorization\n start_date (str): The date you would like to replicate data. Format: YYYY-MM-DD.\n project_key (str): The project key\n client_id (str): Id of API Client.\n client_secret (str): The password of secret of API Client.\n """\n self.region = check.str_param(region, "region")\n self.host = check.str_param(host, "host")\n self.start_date = check.str_param(start_date, "start_date")\n self.project_key = check.str_param(project_key, "project_key")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n super().__init__("Commercetools", name)
\n\n\n
[docs]class GutendexSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n author_year_start: Optional[str] = None,\n author_year_end: Optional[str] = None,\n copyright: Optional[str] = None,\n languages: Optional[str] = None,\n search: Optional[str] = None,\n sort: Optional[str] = None,\n topic: Optional[str] = None,\n ):\n """Airbyte Source for Gutendex.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/gutendex\n\n Args:\n name (str): The name of the destination.\n author_year_start (Optional[str]): (Optional) Defines the minimum birth year of the authors. Books by authors born prior to the start year will not be returned. Supports both positive (CE) or negative (BCE) integer values\n author_year_end (Optional[str]): (Optional) Defines the maximum birth year of the authors. Books by authors born after the end year will not be returned. Supports both positive (CE) or negative (BCE) integer values\n copyright (Optional[str]): (Optional) Use this to find books with a certain copyright status - true for books with existing copyrights, false for books in the public domain in the USA, or null for books with no available copyright information.\n languages (Optional[str]): (Optional) Use this to find books in any of a list of languages. They must be comma-separated, two-character language codes.\n search (Optional[str]): (Optional) Use this to search author names and book titles with given words. They must be separated by a space (i.e. %20 in URL-encoded format) and are case-insensitive.\n sort (Optional[str]): (Optional) Use this to sort books - ascending for Project Gutenberg ID numbers from lowest to highest, descending for IDs highest to lowest, or popular (the default) for most popular to least popular by number of downloads.\n topic (Optional[str]): (Optional) Use this to search for a case-insensitive key-phrase in books' bookshelves or subjects.\n """\n self.author_year_start = check.opt_str_param(author_year_start, "author_year_start")\n self.author_year_end = check.opt_str_param(author_year_end, "author_year_end")\n self.copyright = check.opt_str_param(copyright, "copyright")\n self.languages = check.opt_str_param(languages, "languages")\n self.search = check.opt_str_param(search, "search")\n self.sort = check.opt_str_param(sort, "sort")\n self.topic = check.opt_str_param(topic, "topic")\n super().__init__("Gutendex", name)
\n\n\n
[docs]class IterableSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str):\n """Airbyte Source for Iterable.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/iterable\n\n Args:\n name (str): The name of the destination.\n api_key (str): Iterable API Key. See the docs for more information on how to obtain this key.\n start_date (str): The date from which you'd like to replicate data for Iterable, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Iterable", name)
\n\n\n
[docs]class QuickbooksSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n realm_id: str,\n user_agent: str,\n start_date: str,\n sandbox: bool,\n ):\n """Airbyte Source for Quickbooks Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/quickbooks\n\n Args:\n name (str): The name of the destination.\n client_id (str): Identifies which app is making the request. Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production.\n client_secret (str): Obtain this value from the Keys tab on the app profile via My Apps on the developer site. There are two versions of this key: development and production.\n refresh_token (str): A token used when refreshing the access token.\n realm_id (str): Labeled Company ID. The Make API Calls panel is populated with the realm id and the current access token.\n user_agent (str): Process and email for API logging purposes. Example: tap-quickbooks .\n start_date (str): The default value to use if no bookmark exists for an endpoint (rfc3339 date string). E.g, 2021-03-20T00:00:00Z. Any data before this date will not be replicated.\n sandbox (bool): Determines whether to use the sandbox or production environment.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.realm_id = check.str_param(realm_id, "realm_id")\n self.user_agent = check.str_param(user_agent, "user_agent")\n self.start_date = check.str_param(start_date, "start_date")\n self.sandbox = check.bool_param(sandbox, "sandbox")\n super().__init__("Quickbooks Singer", name)
\n\n\n
[docs]class BigcommerceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, store_hash: str, access_token: str):\n """Airbyte Source for Bigcommerce.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bigcommerce\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date you would like to replicate data. Format: YYYY-MM-DD.\n store_hash (str): The hash code of the store. For https://api.bigcommerce.com/stores/HASH_CODE/v3/, The store's hash code is 'HASH_CODE'.\n access_token (str): Access Token for making authenticated requests.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.store_hash = check.str_param(store_hash, "store_hash")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Bigcommerce", name)
\n\n\n
[docs]class ShopifySource(GeneratedAirbyteSource):\n
[docs] class APIPassword:\n
[docs] @public\n def __init__(self, api_password: str):\n self.auth_method = "api_password"\n self.api_password = check.str_param(api_password, "api_password")
\n\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n access_token: Optional[str] = None,\n ):\n self.auth_method = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n shop: str,\n credentials: Union["ShopifySource.APIPassword", "ShopifySource.OAuth20"],\n start_date: str,\n ):\n """Airbyte Source for Shopify.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/shopify\n\n Args:\n name (str): The name of the destination.\n shop (str): The name of your Shopify store found in the URL. For example, if your URL was https://NAME.myshopify.com, then the name would be 'NAME'.\n credentials (Union[ShopifySource.APIPassword, ShopifySource.OAuth20]): The authorization method to use to retrieve data from Shopify\n start_date (str): The date you would like to replicate data from. Format: YYYY-MM-DD. Any data before this date will not be replicated.\n """\n self.shop = check.str_param(shop, "shop")\n self.credentials = check.inst_param(\n credentials, "credentials", (ShopifySource.APIPassword, ShopifySource.OAuth20)\n )\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Shopify", name)
\n\n\n
[docs]class AppstoreSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, key_id: str, private_key: str, issuer_id: str, vendor: str, start_date: str\n ):\n """Airbyte Source for Appstore Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/appstore\n\n Args:\n name (str): The name of the destination.\n key_id (str): Appstore Key ID. See the docs for more information on how to obtain this key.\n private_key (str): Appstore Private Key. See the docs for more information on how to obtain this key.\n issuer_id (str): Appstore Issuer ID. See the docs for more information on how to obtain this ID.\n vendor (str): Appstore Vendor ID. See the docs for more information on how to obtain this ID.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.key_id = check.str_param(key_id, "key_id")\n self.private_key = check.str_param(private_key, "private_key")\n self.issuer_id = check.str_param(issuer_id, "issuer_id")\n self.vendor = check.str_param(vendor, "vendor")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Appstore Singer", name)
\n\n\n
[docs]class GreenhouseSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Greenhouse.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/greenhouse\n\n Args:\n name (str): The name of the destination.\n api_key (str): Greenhouse API Key. See the docs for more information on how to generate this key.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Greenhouse", name)
\n\n\n
[docs]class ZoomSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, jwt: str):\n """Airbyte Source for Zoom Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zoom\n\n Args:\n name (str): The name of the destination.\n jwt (str): Zoom JWT Token. See the docs for more information on how to obtain this key.\n """\n self.jwt = check.str_param(jwt, "jwt")\n super().__init__("Zoom Singer", name)
\n\n\n
[docs]class TiktokMarketingSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self, app_id: str, secret: str, access_token: str, auth_type: Optional[str] = None\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.app_id = check.str_param(app_id, "app_id")\n self.secret = check.str_param(secret, "secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class SandboxAccessToken:\n
[docs] @public\n def __init__(self, advertiser_id: str, access_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.advertiser_id = check.str_param(advertiser_id, "advertiser_id")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "TiktokMarketingSource.OAuth20", "TiktokMarketingSource.SandboxAccessToken"\n ],\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n report_granularity: Optional[str] = None,\n ):\n """Airbyte Source for Tiktok Marketing.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/tiktok-marketing\n\n Args:\n name (str): The name of the destination.\n credentials (Union[TiktokMarketingSource.OAuth20, TiktokMarketingSource.SandboxAccessToken]): Authentication method\n start_date (Optional[str]): The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. If this parameter is not set, all data will be replicated.\n end_date (Optional[str]): The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DD. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the data till the current date.\n report_granularity (Optional[str]): The granularity used for aggregating performance data in reports. See the docs.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (TiktokMarketingSource.OAuth20, TiktokMarketingSource.SandboxAccessToken),\n )\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.report_granularity = check.opt_str_param(report_granularity, "report_granularity")\n super().__init__("Tiktok Marketing", name)
\n\n\n
[docs]class ZendeskChatSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n access_token: Optional[str] = None,\n refresh_token: Optional[str] = None,\n ):\n self.credentials = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str):\n self.credentials = "access_token"\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union["ZendeskChatSource.OAuth20", "ZendeskChatSource.AccessToken"],\n subdomain: Optional[str] = None,\n ):\n """Airbyte Source for Zendesk Chat.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-chat\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Zendesk Chat API, in the format YYYY-MM-DDT00:00:00Z.\n subdomain (Optional[str]): Required if you access Zendesk Chat from a Zendesk Support subdomain.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.subdomain = check.opt_str_param(subdomain, "subdomain")\n self.credentials = check.inst_param(\n credentials, "credentials", (ZendeskChatSource.OAuth20, ZendeskChatSource.AccessToken)\n )\n super().__init__("Zendesk Chat", name)
\n\n\n
[docs]class AwsCloudtrailSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, aws_key_id: str, aws_secret_key: str, aws_region_name: str, start_date: str\n ):\n """Airbyte Source for Aws Cloudtrail.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/aws-cloudtrail\n\n Args:\n name (str): The name of the destination.\n aws_key_id (str): AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.\n aws_secret_key (str): AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.\n aws_region_name (str): The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name.\n start_date (str): The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD.\n """\n self.aws_key_id = check.str_param(aws_key_id, "aws_key_id")\n self.aws_secret_key = check.str_param(aws_secret_key, "aws_secret_key")\n self.aws_region_name = check.str_param(aws_region_name, "aws_region_name")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Aws Cloudtrail", name)
\n\n\n
[docs]class OktaSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "oauth2.0"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, api_token: str):\n self.auth_type = "api_token"\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["OktaSource.OAuth20", "OktaSource.APIToken"],\n domain: Optional[str] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Okta.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/okta\n\n Args:\n name (str): The name of the destination.\n domain (Optional[str]): The Okta domain. See the docs for instructions on how to find it.\n start_date (Optional[str]): UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated.\n """\n self.domain = check.opt_str_param(domain, "domain")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials, "credentials", (OktaSource.OAuth20, OktaSource.APIToken)\n )\n super().__init__("Okta", name)
\n\n\n
[docs]class InsightlySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, token: Optional[str] = None, start_date: Optional[str] = None):\n """Airbyte Source for Insightly.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/insightly\n\n Args:\n name (str): The name of the destination.\n token (Optional[str]): Your Insightly API token.\n start_date (Optional[str]): The date from which you'd like to replicate data for Insightly in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only for incremental streams.\n """\n self.token = check.opt_str_param(token, "token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Insightly", name)
\n\n\n
[docs]class LinkedinPagesSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_method: Optional[str] = None,\n ):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str, auth_method: Optional[str] = None):\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n org_id: int,\n credentials: Union["LinkedinPagesSource.OAuth20", "LinkedinPagesSource.AccessToken"],\n ):\n """Airbyte Source for Linkedin Pages.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/linkedin-pages/\n\n Args:\n name (str): The name of the destination.\n org_id (int): Specify the Organization ID\n """\n self.org_id = check.int_param(org_id, "org_id")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (LinkedinPagesSource.OAuth20, LinkedinPagesSource.AccessToken),\n )\n super().__init__("Linkedin Pages", name)
\n\n\n
[docs]class PersistiqSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Persistiq.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/persistiq\n\n Args:\n name (str): The name of the destination.\n api_key (str): PersistIq API Key. See the docs for more information on where to find that key.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Persistiq", name)
\n\n\n
[docs]class FreshcallerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n api_key: str,\n start_date: str,\n requests_per_minute: Optional[int] = None,\n sync_lag_minutes: Optional[int] = None,\n ):\n """Airbyte Source for Freshcaller.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshcaller\n\n Args:\n name (str): The name of the destination.\n domain (str): Used to construct Base URL for the Freshcaller APIs\n api_key (str): Freshcaller API Key. See the docs for more information on how to obtain this key.\n requests_per_minute (Optional[int]): The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account.\n start_date (str): UTC date and time. Any data created after this date will be replicated.\n sync_lag_minutes (Optional[int]): Lag in minutes for each sync, i.e., at time T, data for the time range [prev_sync_time, T-30] will be fetched\n """\n self.domain = check.str_param(domain, "domain")\n self.api_key = check.str_param(api_key, "api_key")\n self.requests_per_minute = check.opt_int_param(requests_per_minute, "requests_per_minute")\n self.start_date = check.str_param(start_date, "start_date")\n self.sync_lag_minutes = check.opt_int_param(sync_lag_minutes, "sync_lag_minutes")\n super().__init__("Freshcaller", name)
\n\n\n
[docs]class AppfollowSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, ext_id: str, cid: str, api_secret: str, country: str):\n """Airbyte Source for Appfollow.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/appfollow\n\n Args:\n name (str): The name of the destination.\n ext_id (str): for App Store \u2014 this is 9-10 digits identification number; for Google Play \u2014 this is bundle name;\n cid (str): client id provided by Appfollow\n api_secret (str): api secret provided by Appfollow\n country (str): getting data by Country\n """\n self.ext_id = check.str_param(ext_id, "ext_id")\n self.cid = check.str_param(cid, "cid")\n self.api_secret = check.str_param(api_secret, "api_secret")\n self.country = check.str_param(country, "country")\n super().__init__("Appfollow", name)
\n\n\n
[docs]class FacebookPagesSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, access_token: str, page_id: str):\n """Airbyte Source for Facebook Pages.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/facebook-pages\n\n Args:\n name (str): The name of the destination.\n access_token (str): Facebook Page Access Token\n page_id (str): Page ID\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.page_id = check.str_param(page_id, "page_id")\n super().__init__("Facebook Pages", name)
\n\n\n
[docs]class JiraSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_token: str,\n domain: str,\n email: str,\n projects: Optional[List[str]] = None,\n start_date: Optional[str] = None,\n additional_fields: Optional[List[str]] = None,\n expand_issue_changelog: Optional[bool] = None,\n render_fields: Optional[bool] = None,\n enable_experimental_streams: Optional[bool] = None,\n ):\n """Airbyte Source for Jira.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/jira\n\n Args:\n name (str): The name of the destination.\n api_token (str): Jira API Token. See the docs for more information on how to generate this key.\n domain (str): The Domain for your Jira account, e.g. airbyteio.atlassian.net\n email (str): The user email for your Jira account.\n projects (Optional[List[str]]): List of Jira project keys to replicate data for.\n start_date (Optional[str]): The date from which you'd like to replicate data for Jira in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. Note that it will be used only in the following incremental streams: issues.\n additional_fields (Optional[List[str]]): List of additional fields to include in replicating issues.\n expand_issue_changelog (Optional[bool]): Expand the changelog when replicating issues.\n render_fields (Optional[bool]): Render issue fields in HTML format in addition to Jira JSON-like format.\n enable_experimental_streams (Optional[bool]): Allow the use of experimental streams which rely on undocumented Jira API endpoints. See https://docs.airbyte.com/integrations/sources/jira#experimental-tables for more info.\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.domain = check.str_param(domain, "domain")\n self.email = check.str_param(email, "email")\n self.projects = check.opt_nullable_list_param(projects, "projects", str)\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.additional_fields = check.opt_nullable_list_param(\n additional_fields, "additional_fields", str\n )\n self.expand_issue_changelog = check.opt_bool_param(\n expand_issue_changelog, "expand_issue_changelog"\n )\n self.render_fields = check.opt_bool_param(render_fields, "render_fields")\n self.enable_experimental_streams = check.opt_bool_param(\n enable_experimental_streams, "enable_experimental_streams"\n )\n super().__init__("Jira", name)
\n\n\n
[docs]class GoogleSheetsSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaGoogleOAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "Client"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, service_account_info: str):\n self.auth_type = "Service"\n self.service_account_info = check.str_param(\n service_account_info, "service_account_info"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n spreadsheet_id: str,\n credentials: Union[\n "GoogleSheetsSource.AuthenticateViaGoogleOAuth",\n "GoogleSheetsSource.ServiceAccountKeyAuthentication",\n ],\n row_batch_size: Optional[int] = None,\n ):\n """Airbyte Source for Google Sheets.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-sheets\n\n Args:\n name (str): The name of the destination.\n spreadsheet_id (str): Enter the link to the Google spreadsheet you want to sync\n row_batch_size (Optional[int]): Number of rows fetched when making a Google Sheet API call. Defaults to 200.\n credentials (Union[GoogleSheetsSource.AuthenticateViaGoogleOAuth, GoogleSheetsSource.ServiceAccountKeyAuthentication]): Credentials for connecting to the Google Sheets API\n """\n self.spreadsheet_id = check.str_param(spreadsheet_id, "spreadsheet_id")\n self.row_batch_size = check.opt_int_param(row_batch_size, "row_batch_size")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n GoogleSheetsSource.AuthenticateViaGoogleOAuth,\n GoogleSheetsSource.ServiceAccountKeyAuthentication,\n ),\n )\n super().__init__("Google Sheets", name)
\n\n\n
[docs]class DockerhubSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, docker_username: str):\n """Airbyte Source for Dockerhub.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/dockerhub\n\n Args:\n name (str): The name of the destination.\n docker_username (str): Username of DockerHub person or organization (for https://hub.docker.com/v2/repositories/USERNAME/ API call)\n """\n self.docker_username = check.str_param(docker_username, "docker_username")\n super().__init__("Dockerhub", name)
\n\n\n
[docs]class UsCensusSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, query_path: str, api_key: str, query_params: Optional[str] = None\n ):\n """Airbyte Source for Us Census.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/us-census\n\n Args:\n name (str): The name of the destination.\n query_params (Optional[str]): The query parameters portion of the GET request, without the api key\n query_path (str): The path portion of the GET request\n api_key (str): Your API Key. Get your key here.\n """\n self.query_params = check.opt_str_param(query_params, "query_params")\n self.query_path = check.str_param(query_path, "query_path")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Us Census", name)
\n\n\n
[docs]class KustomerSingerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str, start_date: str):\n """Airbyte Source for Kustomer Singer.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/kustomer\n\n Args:\n name (str): The name of the destination.\n api_token (str): Kustomer API Token. See the docs on how to obtain this\n start_date (str): The date from which you'd like to replicate the data\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Kustomer Singer", name)
\n\n\n
[docs]class AzureTableSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n storage_account_name: str,\n storage_access_key: str,\n storage_endpoint_suffix: Optional[str] = None,\n ):\n """Airbyte Source for Azure Table.\n\n Args:\n name (str): The name of the destination.\n storage_account_name (str): The name of your storage account.\n storage_access_key (str): Azure Table Storage Access Key. See the docs for more information on how to obtain this key.\n storage_endpoint_suffix (Optional[str]): Azure Table Storage service account URL suffix. See the docs for more information on how to obtain endpoint suffix\n """\n self.storage_account_name = check.str_param(storage_account_name, "storage_account_name")\n self.storage_access_key = check.str_param(storage_access_key, "storage_access_key")\n self.storage_endpoint_suffix = check.opt_str_param(\n storage_endpoint_suffix, "storage_endpoint_suffix"\n )\n super().__init__("Azure Table", name)
\n\n\n
[docs]class ScaffoldJavaJdbcSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n replication_method: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Scaffold Java Jdbc.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/scaffold_java_jdbc\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3)\n replication_method (str): Replication method to use for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.replication_method = check.str_param(replication_method, "replication_method")\n super().__init__("Scaffold Java Jdbc", name)
\n\n\n
[docs]class TidbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Tidb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/tidb\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3)\n ssl (Optional[bool]): Encrypt data using SSL.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Tidb", name)
\n\n\n
[docs]class QualarooSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n token: str,\n key: str,\n start_date: str,\n survey_ids: Optional[List[str]] = None,\n ):\n """Airbyte Source for Qualaroo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/qualaroo\n\n Args:\n name (str): The name of the destination.\n token (str): A Qualaroo token. See the docs for instructions on how to generate it.\n key (str): A Qualaroo token. See the docs for instructions on how to generate it.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n survey_ids (Optional[List[str]]): IDs of the surveys from which you'd like to replicate data. If left empty, data from all surveys to which you have access will be replicated.\n """\n self.token = check.str_param(token, "token")\n self.key = check.str_param(key, "key")\n self.start_date = check.str_param(start_date, "start_date")\n self.survey_ids = check.opt_nullable_list_param(survey_ids, "survey_ids", str)\n super().__init__("Qualaroo", name)
\n\n\n
[docs]class YahooFinancePriceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, tickers: str, interval: Optional[str] = None, range: Optional[str] = None\n ):\n """Airbyte Source for Yahoo Finance Price.\n\n Args:\n name (str): The name of the destination.\n tickers (str): Comma-separated identifiers for the stocks to be queried. Whitespaces are allowed.\n interval (Optional[str]): The interval of between prices queried.\n range (Optional[str]): The range of prices to be queried.\n """\n self.tickers = check.str_param(tickers, "tickers")\n self.interval = check.opt_str_param(interval, "interval")\n self.range = check.opt_str_param(range, "range")\n super().__init__("Yahoo Finance Price", name)
\n\n\n
[docs]class GoogleAnalyticsV4Source(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaGoogleOauth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n access_token: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, credentials_json: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth",\n "GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication",\n ],\n start_date: str,\n view_id: str,\n custom_reports: Optional[str] = None,\n window_in_days: Optional[int] = None,\n ):\n """Airbyte Source for Google Analytics V4.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-analytics-universal-analytics\n\n Args:\n name (str): The name of the destination.\n credentials (Union[GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth, GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication]): Credentials for the service\n start_date (str): The date in the format YYYY-MM-DD. Any data before this date will not be replicated.\n view_id (str): The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer.\n custom_reports (Optional[str]): A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field.\n window_in_days (Optional[int]): The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth,\n GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication,\n ),\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.view_id = check.str_param(view_id, "view_id")\n self.custom_reports = check.opt_str_param(custom_reports, "custom_reports")\n self.window_in_days = check.opt_int_param(window_in_days, "window_in_days")\n super().__init__("Google Analytics V4", name)
\n\n\n
[docs]class JdbcSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n jdbc_url: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Jdbc.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/postgres\n\n Args:\n name (str): The name of the destination.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url (str): JDBC formatted URL. See the standard here.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url = check.str_param(jdbc_url, "jdbc_url")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Jdbc", name)
\n\n\n
[docs]class FakerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n count: int,\n seed: Optional[int] = None,\n records_per_sync: Optional[int] = None,\n records_per_slice: Optional[int] = None,\n ):\n """Airbyte Source for Faker.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/faker\n\n Args:\n name (str): The name of the destination.\n count (int): How many users should be generated in total. This setting does not apply to the purchases or products stream.\n seed (Optional[int]): Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random)\n records_per_sync (Optional[int]): How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records.\n records_per_slice (Optional[int]): How many fake records will be in each page (stream slice), before a state message is emitted?\n """\n self.count = check.int_param(count, "count")\n self.seed = check.opt_int_param(seed, "seed")\n self.records_per_sync = check.opt_int_param(records_per_sync, "records_per_sync")\n self.records_per_slice = check.opt_int_param(records_per_slice, "records_per_slice")\n super().__init__("Faker", name)
\n\n\n
[docs]class TplcentralSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n url_base: str,\n client_id: str,\n client_secret: str,\n user_login_id: Optional[int] = None,\n user_login: Optional[str] = None,\n tpl_key: Optional[str] = None,\n customer_id: Optional[int] = None,\n facility_id: Optional[int] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Tplcentral.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/tplcentral\n\n Args:\n name (str): The name of the destination.\n user_login_id (Optional[int]): User login ID and/or name is required\n user_login (Optional[str]): User login ID and/or name is required\n start_date (Optional[str]): Date and time together in RFC 3339 format, for example, 2018-11-13T20:20:39+00:00.\n """\n self.url_base = check.str_param(url_base, "url_base")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.user_login_id = check.opt_int_param(user_login_id, "user_login_id")\n self.user_login = check.opt_str_param(user_login, "user_login")\n self.tpl_key = check.opt_str_param(tpl_key, "tpl_key")\n self.customer_id = check.opt_int_param(customer_id, "customer_id")\n self.facility_id = check.opt_int_param(facility_id, "facility_id")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Tplcentral", name)
\n\n\n
[docs]class ClickhouseSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Clickhouse.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/clickhouse\n\n Args:\n name (str): The name of the destination.\n host (str): The host endpoint of the Clickhouse cluster.\n port (int): The port of the database.\n database (str): The name of the database.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt data using SSL.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Clickhouse", name)
\n\n\n
[docs]class FreshserviceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, domain_name: str, api_key: str, start_date: str):\n """Airbyte Source for Freshservice.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshservice\n\n Args:\n name (str): The name of the destination.\n domain_name (str): The name of your Freshservice domain\n api_key (str): Freshservice API Key. See here. The key is case sensitive.\n start_date (str): UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated.\n """\n self.domain_name = check.str_param(domain_name, "domain_name")\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Freshservice", name)
\n\n\n
[docs]class ZenloopSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_token: str,\n date_from: Optional[str] = None,\n survey_id: Optional[str] = None,\n survey_group_id: Optional[str] = None,\n ):\n """Airbyte Source for Zenloop.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zenloop\n\n Args:\n name (str): The name of the destination.\n api_token (str): Zenloop API Token. You can get the API token in settings page here\n date_from (Optional[str]): Zenloop date_from. Format: 2021-10-24T03:30:30Z or 2021-10-24. Leave empty if only data from current data should be synced\n survey_id (Optional[str]): Zenloop Survey ID. Can be found here. Leave empty to pull answers from all surveys\n survey_group_id (Optional[str]): Zenloop Survey Group ID. Can be found by pulling All Survey Groups via SurveyGroups stream. Leave empty to pull answers from all survey groups\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.date_from = check.opt_str_param(date_from, "date_from")\n self.survey_id = check.opt_str_param(survey_id, "survey_id")\n self.survey_group_id = check.opt_str_param(survey_group_id, "survey_group_id")\n super().__init__("Zenloop", name)
\n\n\n
[docs]class OracleSource(GeneratedAirbyteSource):\n
[docs] class ServiceName:\n
[docs] @public\n def __init__(self, service_name: str, connection_type: Optional[str] = None):\n self.connection_type = check.opt_str_param(connection_type, "connection_type")\n self.service_name = check.str_param(service_name, "service_name")
\n\n
[docs] class SystemIDSID:\n
[docs] @public\n def __init__(self, sid: str, connection_type: Optional[str] = None):\n self.connection_type = check.opt_str_param(connection_type, "connection_type")\n self.sid = check.str_param(sid, "sid")
\n\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_method = "unencrypted"
\n\n
[docs] class NativeNetworkEncryptionNNE:\n
[docs] @public\n def __init__(self, encryption_algorithm: Optional[str] = None):\n self.encryption_method = "client_nne"\n self.encryption_algorithm = check.opt_str_param(\n encryption_algorithm, "encryption_algorithm"\n )
\n\n
[docs] class TLSEncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, ssl_certificate: str):\n self.encryption_method = "encrypted_verify_certificate"\n self.ssl_certificate = check.str_param(ssl_certificate, "ssl_certificate")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n connection_data: Union["OracleSource.ServiceName", "OracleSource.SystemIDSID"],\n username: str,\n encryption: Union[\n "OracleSource.Unencrypted",\n "OracleSource.NativeNetworkEncryptionNNE",\n "OracleSource.TLSEncryptedVerifyCertificate",\n ],\n password: Optional[str] = None,\n schemas: Optional[List[str]] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Oracle.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/oracle\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database. Oracle Corporations recommends the following port numbers: 1521 - Default listening port for client connections to the listener. 2484 - Recommended and officially registered listening port for client connections to the listener using TCP/IP with SSL\n connection_data (Union[OracleSource.ServiceName, OracleSource.SystemIDSID]): Connect data that will be used for DB connection\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with the username.\n schemas (Optional[List[str]]): The list of schemas to sync from. Defaults to user. Case sensitive.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n encryption (Union[OracleSource.Unencrypted, OracleSource.NativeNetworkEncryptionNNE, OracleSource.TLSEncryptedVerifyCertificate]): The encryption method with is used when communicating with the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.connection_data = check.inst_param(\n connection_data, "connection_data", (OracleSource.ServiceName, OracleSource.SystemIDSID)\n )\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (\n OracleSource.Unencrypted,\n OracleSource.NativeNetworkEncryptionNNE,\n OracleSource.TLSEncryptedVerifyCertificate,\n ),\n )\n super().__init__("Oracle", name)
\n\n\n
[docs]class KlaviyoSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str):\n """Airbyte Source for Klaviyo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/klaviyo\n\n Args:\n name (str): The name of the destination.\n api_key (str): Klaviyo API Key. See our docs if you need help finding this key.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Klaviyo", name)
\n\n\n
[docs]class GoogleDirectorySource(GeneratedAirbyteSource):\n
[docs] class SignInViaGoogleOAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n credentials_title: Optional[str] = None,\n ):\n self.credentials_title = check.opt_str_param(credentials_title, "credentials_title")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class ServiceAccountKey:\n
[docs] @public\n def __init__(\n self, credentials_json: str, email: str, credentials_title: Optional[str] = None\n ):\n self.credentials_title = check.opt_str_param(credentials_title, "credentials_title")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n self.email = check.str_param(email, "email")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "GoogleDirectorySource.SignInViaGoogleOAuth", "GoogleDirectorySource.ServiceAccountKey"\n ],\n ):\n """Airbyte Source for Google Directory.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-directory\n\n Args:\n name (str): The name of the destination.\n credentials (Union[GoogleDirectorySource.SignInViaGoogleOAuth, GoogleDirectorySource.ServiceAccountKey]): Google APIs use the OAuth 2.0 protocol for authentication and authorization. The Source supports Web server application and Service accounts scenarios.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (GoogleDirectorySource.SignInViaGoogleOAuth, GoogleDirectorySource.ServiceAccountKey),\n )\n super().__init__("Google Directory", name)
\n\n\n
[docs]class InstagramSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, access_token: str):\n """Airbyte Source for Instagram.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/instagram\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n access_token (str): The value of the access token generated. See the docs for more information\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Instagram", name)
\n\n\n
[docs]class ShortioSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, domain_id: str, secret_key: str, start_date: str):\n """Airbyte Source for Shortio.\n\n Documentation can be found at https://developers.short.io/reference\n\n Args:\n name (str): The name of the destination.\n secret_key (str): Short.io Secret Key\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.domain_id = check.str_param(domain_id, "domain_id")\n self.secret_key = check.str_param(secret_key, "secret_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Shortio", name)
\n\n\n
[docs]class SquareSource(GeneratedAirbyteSource):\n
[docs] class OauthAuthentication:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "Oauth"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIKey:\n
[docs] @public\n def __init__(self, api_key: str):\n self.auth_type = "Apikey"\n self.api_key = check.str_param(api_key, "api_key")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n is_sandbox: bool,\n credentials: Union["SquareSource.OauthAuthentication", "SquareSource.APIKey"],\n start_date: Optional[str] = None,\n include_deleted_objects: Optional[bool] = None,\n ):\n """Airbyte Source for Square.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/square\n\n Args:\n name (str): The name of the destination.\n is_sandbox (bool): Determines whether to use the sandbox or production environment.\n start_date (Optional[str]): UTC date in the format YYYY-MM-DD. Any data before this date will not be replicated. If not set, all data will be replicated.\n include_deleted_objects (Optional[bool]): In some streams there is an option to include deleted objects (Items, Categories, Discounts, Taxes)\n """\n self.is_sandbox = check.bool_param(is_sandbox, "is_sandbox")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.include_deleted_objects = check.opt_bool_param(\n include_deleted_objects, "include_deleted_objects"\n )\n self.credentials = check.inst_param(\n credentials, "credentials", (SquareSource.OauthAuthentication, SquareSource.APIKey)\n )\n super().__init__("Square", name)
\n\n\n
[docs]class DelightedSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, since: str, api_key: str):\n """Airbyte Source for Delighted.\n\n Args:\n name (str): The name of the destination.\n since (str): The date from which you'd like to replicate the data\n api_key (str): A Delighted API key.\n """\n self.since = check.str_param(since, "since")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Delighted", name)
\n\n\n
[docs]class AmazonSqsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n queue_url: str,\n region: str,\n delete_messages: bool,\n max_batch_size: Optional[int] = None,\n max_wait_time: Optional[int] = None,\n attributes_to_return: Optional[str] = None,\n visibility_timeout: Optional[int] = None,\n access_key: Optional[str] = None,\n secret_key: Optional[str] = None,\n ):\n """Airbyte Source for Amazon Sqs.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-sqs\n\n Args:\n name (str): The name of the destination.\n queue_url (str): URL of the SQS Queue\n region (str): AWS Region of the SQS Queue\n delete_messages (bool): If Enabled, messages will be deleted from the SQS Queue after being read. If Disabled, messages are left in the queue and can be read more than once. WARNING: Enabling this option can result in data loss in cases of failure, use with caution, see documentation for more detail.\n max_batch_size (Optional[int]): Max amount of messages to get in one batch (10 max)\n max_wait_time (Optional[int]): Max amount of time in seconds to wait for messages in a single poll (20 max)\n attributes_to_return (Optional[str]): Comma separated list of Mesage Attribute names to return\n visibility_timeout (Optional[int]): Modify the Visibility Timeout of the individual message from the Queue's default (seconds).\n access_key (Optional[str]): The Access Key ID of the AWS IAM Role to use for pulling messages\n secret_key (Optional[str]): The Secret Key of the AWS IAM Role to use for pulling messages\n """\n self.queue_url = check.str_param(queue_url, "queue_url")\n self.region = check.str_param(region, "region")\n self.delete_messages = check.bool_param(delete_messages, "delete_messages")\n self.max_batch_size = check.opt_int_param(max_batch_size, "max_batch_size")\n self.max_wait_time = check.opt_int_param(max_wait_time, "max_wait_time")\n self.attributes_to_return = check.opt_str_param(\n attributes_to_return, "attributes_to_return"\n )\n self.visibility_timeout = check.opt_int_param(visibility_timeout, "visibility_timeout")\n self.access_key = check.opt_str_param(access_key, "access_key")\n self.secret_key = check.opt_str_param(secret_key, "secret_key")\n super().__init__("Amazon Sqs", name)
\n\n\n
[docs]class YoutubeAnalyticsSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaOAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(self, name: str, credentials: "YoutubeAnalyticsSource.AuthenticateViaOAuth20"):\n """Airbyte Source for Youtube Analytics.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/youtube-analytics\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", YoutubeAnalyticsSource.AuthenticateViaOAuth20\n )\n super().__init__("Youtube Analytics", name)
\n\n\n
[docs]class ScaffoldSourcePythonSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, fix_me: Optional[str] = None):\n """Airbyte Source for Scaffold Source Python.\n\n Args:\n name (str): The name of the destination.\n fix_me (Optional[str]): describe me\n """\n self.fix_me = check.opt_str_param(fix_me, "fix_me")\n super().__init__("Scaffold Source Python", name)
\n\n\n
[docs]class LookerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n client_id: str,\n client_secret: str,\n run_look_ids: Optional[List[str]] = None,\n ):\n """Airbyte Source for Looker.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/looker\n\n Args:\n name (str): The name of the destination.\n domain (str): Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address\n client_id (str): The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.\n client_secret (str): The Client Secret is second part of an API3 key.\n run_look_ids (Optional[List[str]]): The IDs of any Looks to run\n """\n self.domain = check.str_param(domain, "domain")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.run_look_ids = check.opt_nullable_list_param(run_look_ids, "run_look_ids", str)\n super().__init__("Looker", name)
\n\n\n
[docs]class GitlabSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_url: str,\n private_token: str,\n start_date: str,\n groups: Optional[str] = None,\n projects: Optional[str] = None,\n ):\n """Airbyte Source for Gitlab.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/gitlab\n\n Args:\n name (str): The name of the destination.\n api_url (str): Please enter your basic URL from GitLab instance.\n private_token (str): Log into your GitLab account and then generate a personal Access Token.\n groups (Optional[str]): Space-delimited list of groups. e.g. airbyte.io.\n projects (Optional[str]): Space-delimited list of projects. e.g. airbyte.io/documentation meltano/tap-gitlab.\n start_date (str): The date from which you'd like to replicate data for GitLab API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.api_url = check.str_param(api_url, "api_url")\n self.private_token = check.str_param(private_token, "private_token")\n self.groups = check.opt_str_param(groups, "groups")\n self.projects = check.opt_str_param(projects, "projects")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Gitlab", name)
\n\n\n
[docs]class ExchangeRatesSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n access_key: str,\n base: Optional[str] = None,\n ignore_weekends: Optional[bool] = None,\n ):\n """Airbyte Source for Exchange Rates.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/exchangeratesapi\n\n Args:\n name (str): The name of the destination.\n start_date (str): Start getting data from that date.\n access_key (str): Your API Key. See here. The key is case sensitive.\n base (Optional[str]): ISO reference currency. See here. Free plan doesn't support Source Currency Switching, default base currency is EUR\n ignore_weekends (Optional[bool]): Ignore weekends? (Exchanges don't run on weekends)\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_key = check.str_param(access_key, "access_key")\n self.base = check.opt_str_param(base, "base")\n self.ignore_weekends = check.opt_bool_param(ignore_weekends, "ignore_weekends")\n super().__init__("Exchange Rates", name)
\n\n\n
[docs]class AmazonAdsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n region: Optional[str] = None,\n report_wait_timeout: Optional[int] = None,\n report_generation_max_retries: Optional[int] = None,\n start_date: Optional[str] = None,\n profiles: Optional[List[int]] = None,\n state_filter: Optional[List[str]] = None,\n ):\n """Airbyte Source for Amazon Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-ads\n\n Args:\n name (str): The name of the destination.\n client_id (str): The client ID of your Amazon Ads developer application. See the docs for more information.\n client_secret (str): The client secret of your Amazon Ads developer application. See the docs for more information.\n refresh_token (str): Amazon Ads refresh token. See the docs for more information on how to obtain this token.\n region (Optional[str]): Region to pull data from (EU/NA/FE). See docs for more details.\n report_wait_timeout (Optional[int]): Timeout duration in minutes for Reports. Default is 60 minutes.\n report_generation_max_retries (Optional[int]): Maximum retries Airbyte will attempt for fetching report data. Default is 5.\n start_date (Optional[str]): The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format\n profiles (Optional[List[int]]): Profile IDs you want to fetch data for. See docs for more details.\n state_filter (Optional[List[str]]): Reflects the state of the Display, Product, and Brand Campaign streams as enabled, paused, or archived. If you do not populate this field, it will be ignored completely.\n """\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.region = check.opt_str_param(region, "region")\n self.report_wait_timeout = check.opt_int_param(report_wait_timeout, "report_wait_timeout")\n self.report_generation_max_retries = check.opt_int_param(\n report_generation_max_retries, "report_generation_max_retries"\n )\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.profiles = check.opt_nullable_list_param(profiles, "profiles", int)\n self.state_filter = check.opt_nullable_list_param(state_filter, "state_filter", str)\n super().__init__("Amazon Ads", name)
\n\n\n
[docs]class MixpanelSource(GeneratedAirbyteSource):\n
[docs] class ServiceAccount:\n
[docs] @public\n def __init__(self, username: str, secret: str):\n self.username = check.str_param(username, "username")\n self.secret = check.str_param(secret, "secret")
\n\n
[docs] class ProjectSecret:\n
[docs] @public\n def __init__(self, api_secret: str):\n self.api_secret = check.str_param(api_secret, "api_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["MixpanelSource.ServiceAccount", "MixpanelSource.ProjectSecret"],\n project_id: Optional[int] = None,\n attribution_window: Optional[int] = None,\n project_timezone: Optional[str] = None,\n select_properties_by_default: Optional[bool] = None,\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n region: Optional[str] = None,\n date_window_size: Optional[int] = None,\n ):\n """Airbyte Source for Mixpanel.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mixpanel\n\n Args:\n name (str): The name of the destination.\n credentials (Union[MixpanelSource.ServiceAccount, MixpanelSource.ProjectSecret]): Choose how to authenticate to Mixpanel\n project_id (Optional[int]): Your project ID number. See the docs for more information on how to obtain this.\n attribution_window (Optional[int]): A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.\n project_timezone (Optional[str]): Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.\n select_properties_by_default (Optional[bool]): Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.\n start_date (Optional[str]): The date in the format YYYY-MM-DD. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.\n end_date (Optional[str]): The date in the format YYYY-MM-DD. Any data after this date will not be replicated. Left empty to always sync to most recent date\n region (Optional[str]): The region of mixpanel domain instance either US or EU.\n date_window_size (Optional[int]): Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (MixpanelSource.ServiceAccount, MixpanelSource.ProjectSecret),\n )\n self.project_id = check.opt_int_param(project_id, "project_id")\n self.attribution_window = check.opt_int_param(attribution_window, "attribution_window")\n self.project_timezone = check.opt_str_param(project_timezone, "project_timezone")\n self.select_properties_by_default = check.opt_bool_param(\n select_properties_by_default, "select_properties_by_default"\n )\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.region = check.opt_str_param(region, "region")\n self.date_window_size = check.opt_int_param(date_window_size, "date_window_size")\n super().__init__("Mixpanel", name)
\n\n\n
[docs]class OrbitSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str, workspace: str, start_date: Optional[str] = None):\n """Airbyte Source for Orbit.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/orbit\n\n Args:\n name (str): The name of the destination.\n api_token (str): Authorizes you to work with Orbit workspaces associated with the token.\n workspace (str): The unique name of the workspace that your API token is associated with.\n start_date (Optional[str]): Date in the format 2022-06-26. Only load members whose last activities are after this date.\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.workspace = check.str_param(workspace, "workspace")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Orbit", name)
\n\n\n
[docs]class AmazonSellerPartnerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n lwa_app_id: str,\n lwa_client_secret: str,\n refresh_token: str,\n aws_access_key: str,\n aws_secret_key: str,\n role_arn: str,\n replication_start_date: str,\n aws_environment: str,\n region: str,\n app_id: Optional[str] = None,\n auth_type: Optional[str] = None,\n replication_end_date: Optional[str] = None,\n period_in_days: Optional[int] = None,\n report_options: Optional[str] = None,\n max_wait_seconds: Optional[int] = None,\n ):\n """Airbyte Source for Amazon Seller Partner.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-seller-partner\n\n Args:\n name (str): The name of the destination.\n app_id (Optional[str]): Your Amazon App ID\n lwa_app_id (str): Your Login with Amazon Client ID.\n lwa_client_secret (str): Your Login with Amazon Client Secret.\n refresh_token (str): The Refresh Token obtained via OAuth flow authorization.\n aws_access_key (str): Specifies the AWS access key used as part of the credentials to authenticate the user.\n aws_secret_key (str): Specifies the AWS secret key used as part of the credentials to authenticate the user.\n role_arn (str): Specifies the Amazon Resource Name (ARN) of an IAM role that you want to use to perform operations requested using this profile. (Needs permission to 'Assume Role' STS).\n replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n replication_end_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated.\n period_in_days (Optional[int]): Will be used for stream slicing for initial full_refresh sync when no updated state is present for reports that support sliced incremental sync.\n report_options (Optional[str]): Additional information passed to reports. This varies by report type. Must be a valid json string.\n max_wait_seconds (Optional[int]): Sometimes report can take up to 30 minutes to generate. This will set the limit for how long to wait for a successful report.\n aws_environment (str): An enumeration.\n region (str): An enumeration.\n """\n self.app_id = check.opt_str_param(app_id, "app_id")\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.lwa_app_id = check.str_param(lwa_app_id, "lwa_app_id")\n self.lwa_client_secret = check.str_param(lwa_client_secret, "lwa_client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.aws_access_key = check.str_param(aws_access_key, "aws_access_key")\n self.aws_secret_key = check.str_param(aws_secret_key, "aws_secret_key")\n self.role_arn = check.str_param(role_arn, "role_arn")\n self.replication_start_date = check.str_param(\n replication_start_date, "replication_start_date"\n )\n self.replication_end_date = check.opt_str_param(\n replication_end_date, "replication_end_date"\n )\n self.period_in_days = check.opt_int_param(period_in_days, "period_in_days")\n self.report_options = check.opt_str_param(report_options, "report_options")\n self.max_wait_seconds = check.opt_int_param(max_wait_seconds, "max_wait_seconds")\n self.aws_environment = check.str_param(aws_environment, "aws_environment")\n self.region = check.str_param(region, "region")\n super().__init__("Amazon Seller Partner", name)
\n\n\n
[docs]class CourierSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Courier.\n\n Documentation can be found at https://docs.airbyte.io/integrations/sources/courier\n\n Args:\n name (str): The name of the destination.\n api_key (str): Courier API Key to retrieve your data.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Courier", name)
\n\n\n
[docs]class CloseComSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: Optional[str] = None):\n r"""Airbyte Source for Close Com.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/close-com\n\n Args:\n name (str): The name of the destination.\n api_key (str): Close.com API key (usually starts with 'api\\\\_'; find yours here).\n start_date (Optional[str]): The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Close Com", name)
\n\n\n
[docs]class BingAdsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n refresh_token: str,\n developer_token: str,\n reports_start_date: str,\n auth_method: Optional[str] = None,\n tenant_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n """Airbyte Source for Bing Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bing-ads\n\n Args:\n name (str): The name of the destination.\n tenant_id (Optional[str]): The Tenant ID of your Microsoft Advertising developer application. Set this to "common" unless you know you need a different value.\n client_id (str): The Client ID of your Microsoft Advertising developer application.\n client_secret (Optional[str]): The Client Secret of your Microsoft Advertising developer application.\n refresh_token (str): Refresh Token to renew the expired Access Token.\n developer_token (str): Developer token associated with user. See more info in the docs.\n reports_start_date (str): The start date from which to begin replicating report data. Any data generated before this date will not be replicated in reports. This is a UTC date in YYYY-MM-DD format.\n """\n self.auth_method = check.opt_str_param(auth_method, "auth_method")\n self.tenant_id = check.opt_str_param(tenant_id, "tenant_id")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.developer_token = check.str_param(developer_token, "developer_token")\n self.reports_start_date = check.str_param(reports_start_date, "reports_start_date")\n super().__init__("Bing Ads", name)
\n\n\n
[docs]class PrimetricSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, client_id: str, client_secret: str):\n """Airbyte Source for Primetric.\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Primetric developer application. The Client ID is visible here.\n client_secret (str): The Client Secret of your Primetric developer application. You can manage your client's credentials here.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n super().__init__("Primetric", name)
\n\n\n
[docs]class PivotalTrackerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str):\n """Airbyte Source for Pivotal Tracker.\n\n Args:\n name (str): The name of the destination.\n api_token (str): Pivotal Tracker API token\n """\n self.api_token = check.str_param(api_token, "api_token")\n super().__init__("Pivotal Tracker", name)
\n\n\n
[docs]class ElasticsearchSource(GeneratedAirbyteSource):\n
[docs] class None_:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "none"
\n\n
[docs] class ApiKeySecret:\n
[docs] @public\n def __init__(self, apiKeyId: str, apiKeySecret: str):\n self.method = "secret"\n self.apiKeyId = check.str_param(apiKeyId, "apiKeyId")\n self.apiKeySecret = check.str_param(apiKeySecret, "apiKeySecret")
\n\n
[docs] class UsernamePassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.method = "basic"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n endpoint: str,\n authenticationMethod: Union[\n "ElasticsearchSource.None_",\n "ElasticsearchSource.ApiKeySecret",\n "ElasticsearchSource.UsernamePassword",\n ],\n ):\n r"""Airbyte Source for Elasticsearch.\n\n Documentation can be found at https://docs.airbyte.com/integrations/source/elasticsearch\n\n Args:\n name (str): The name of the destination.\n endpoint (str): The full url of the Elasticsearch server\n authenticationMethod (Union[ElasticsearchSource.None\\\\_, ElasticsearchSource.ApiKeySecret, ElasticsearchSource.UsernamePassword]): The type of authentication to be used\n """\n self.endpoint = check.str_param(endpoint, "endpoint")\n self.authenticationMethod = check.inst_param(\n authenticationMethod,\n "authenticationMethod",\n (\n ElasticsearchSource.None_,\n ElasticsearchSource.ApiKeySecret,\n ElasticsearchSource.UsernamePassword,\n ),\n )\n super().__init__("Elasticsearch", name)
\n\n\n
[docs]class BigquerySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, project_id: str, credentials_json: str, dataset_id: Optional[str] = None\n ):\n """Airbyte Source for Bigquery.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bigquery\n\n Args:\n name (str): The name of the destination.\n project_id (str): The GCP project ID for the project containing the target BigQuery dataset.\n dataset_id (Optional[str]): The dataset ID to search for tables and views. If you are only loading data from one dataset, setting this option could result in much faster schema discovery.\n credentials_json (str): The contents of your Service Account Key JSON file. See the docs for more information on how to obtain this key.\n """\n self.project_id = check.str_param(project_id, "project_id")\n self.dataset_id = check.opt_str_param(dataset_id, "dataset_id")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")\n super().__init__("Bigquery", name)
\n\n\n
[docs]class WoocommerceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n shop: str,\n start_date: str,\n api_key: str,\n api_secret: str,\n conversion_window_days: Optional[int] = None,\n ):\n """Airbyte Source for Woocommerce.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/woocommerce\n\n Args:\n name (str): The name of the destination.\n shop (str): The name of the store. For https://EXAMPLE.com, the shop name is 'EXAMPLE.com'.\n start_date (str): The date you would like to replicate data. Format: YYYY-MM-DD.\n api_key (str): The CUSTOMER KEY for API in WooCommerce shop.\n api_secret (str): The CUSTOMER SECRET for API in WooCommerce shop.\n conversion_window_days (Optional[int]): A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads.\n """\n self.shop = check.str_param(shop, "shop")\n self.start_date = check.str_param(start_date, "start_date")\n self.api_key = check.str_param(api_key, "api_key")\n self.api_secret = check.str_param(api_secret, "api_secret")\n self.conversion_window_days = check.opt_int_param(\n conversion_window_days, "conversion_window_days"\n )\n super().__init__("Woocommerce", name)
\n\n\n
[docs]class SearchMetricsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, api_key: str, client_secret: str, country_code: str, start_date: str\n ):\n """Airbyte Source for Search Metrics.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/seacrh-metrics\n\n Args:\n name (str): The name of the destination.\n country_code (str): The region of the S3 staging bucket to use if utilising a copy strategy.\n start_date (str): Data generated in SearchMetrics after this date will be replicated. This date must be specified in the format YYYY-MM-DDT00:00:00Z.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.country_code = check.str_param(country_code, "country_code")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Search Metrics", name)
\n\n\n
[docs]class TypeformSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, start_date: str, token: str, form_ids: Optional[List[str]] = None\n ):\n """Airbyte Source for Typeform.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/typeform\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format: YYYY-MM-DDTHH:mm:ss[Z]. Any data before this date will not be replicated.\n token (str): The API Token for a Typeform account.\n form_ids (Optional[List[str]]): When this parameter is set, the connector will replicate data only from the input forms. Otherwise, all forms in your Typeform account will be replicated. You can find form IDs in your form URLs. For example, in the URL "https://mysite.typeform.com/to/u6nXL7" the form_id is u6nXL7. You can find form URLs on Share panel\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.token = check.str_param(token, "token")\n self.form_ids = check.opt_nullable_list_param(form_ids, "form_ids", str)\n super().__init__("Typeform", name)
\n\n\n
[docs]class WebflowSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, site_id: str, api_key: str):\n """Airbyte Source for Webflow.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/webflow\n\n Args:\n name (str): The name of the destination.\n site_id (str): The id of the Webflow site you are requesting data from. See https://developers.webflow.com/#sites\n api_key (str): The API token for authenticating to Webflow. See https://university.webflow.com/lesson/intro-to-the-webflow-api\n """\n self.site_id = check.str_param(site_id, "site_id")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Webflow", name)
\n\n\n
[docs]class FireboltSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n username: str,\n password: str,\n database: str,\n account: Optional[str] = None,\n host: Optional[str] = None,\n engine: Optional[str] = None,\n ):\n """Airbyte Source for Firebolt.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/firebolt\n\n Args:\n name (str): The name of the destination.\n username (str): Firebolt email address you use to login.\n password (str): Firebolt password.\n account (Optional[str]): Firebolt account to login.\n host (Optional[str]): The host name of your Firebolt database.\n database (str): The database to connect to.\n engine (Optional[str]): Engine name or url to connect to.\n """\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.account = check.opt_str_param(account, "account")\n self.host = check.opt_str_param(host, "host")\n self.database = check.str_param(database, "database")\n self.engine = check.opt_str_param(engine, "engine")\n super().__init__("Firebolt", name)
\n\n\n
[docs]class FaunaSource(GeneratedAirbyteSource):\n
[docs] class Disabled:\n
[docs] @public\n def __init__(\n self,\n ):\n self.deletion_mode = "ignore"
\n\n
[docs] class Enabled:\n
[docs] @public\n def __init__(self, column: str):\n self.deletion_mode = "deleted_field"\n self.column = check.str_param(column, "column")
\n\n
[docs] class Collection:\n
[docs] @public\n def __init__(\n self, page_size: int, deletions: Union["FaunaSource.Disabled", "FaunaSource.Enabled"]\n ):\n self.page_size = check.int_param(page_size, "page_size")\n self.deletions = check.inst_param(\n deletions, "deletions", (FaunaSource.Disabled, FaunaSource.Enabled)\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n port: int,\n scheme: str,\n secret: str,\n collection: "FaunaSource.Collection",\n ):\n """Airbyte Source for Fauna.\n\n Documentation can be found at https://github.com/fauna/airbyte/blob/source-fauna/docs/integrations/sources/fauna.md\n\n Args:\n name (str): The name of the destination.\n domain (str): Domain of Fauna to query. Defaults db.fauna.com. See the docs.\n port (int): Endpoint port.\n scheme (str): URL scheme.\n secret (str): Fauna secret, used when authenticating with the database.\n collection (FaunaSource.Collection): Settings for the Fauna Collection.\n """\n self.domain = check.str_param(domain, "domain")\n self.port = check.int_param(port, "port")\n self.scheme = check.str_param(scheme, "scheme")\n self.secret = check.str_param(secret, "secret")\n self.collection = check.inst_param(collection, "collection", FaunaSource.Collection)\n super().__init__("Fauna", name)
\n\n\n
[docs]class IntercomSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, access_token: str):\n """Airbyte Source for Intercom.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/intercom\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n access_token (str): Access token for making authenticated requests. See the Intercom docs for more information.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Intercom", name)
\n\n\n
[docs]class FreshsalesSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, domain_name: str, api_key: str):\n """Airbyte Source for Freshsales.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshsales\n\n Args:\n name (str): The name of the destination.\n domain_name (str): The Name of your Freshsales domain\n api_key (str): Freshsales API Key. See here. The key is case sensitive.\n """\n self.domain_name = check.str_param(domain_name, "domain_name")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Freshsales", name)
\n\n\n
[docs]class AdjustSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_token: str,\n dimensions: List[str],\n ingest_start: str,\n metrics: List[str],\n additional_metrics: Optional[List[str]] = None,\n until_today: Optional[bool] = None,\n ):\n """Airbyte Source for Adjust.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/adjust\n\n Args:\n name (str): The name of the destination.\n additional_metrics (Optional[List[str]]): Metrics names that are not pre-defined, such as cohort metrics or app specific metrics.\n api_token (str): Adjust API key, see https://help.adjust.com/en/article/report-service-api-authentication\n dimensions (List[str]): Dimensions allow a user to break down metrics into groups using one or several parameters. For example, the number of installs by date, country and network. See https://help.adjust.com/en/article/reports-endpoint#dimensions for more information about the dimensions.\n ingest_start (str): Data ingest start date.\n metrics (List[str]): Select at least one metric to query.\n until_today (Optional[bool]): Syncs data up until today. Useful when running daily incremental syncs, and duplicates are not desired.\n """\n self.additional_metrics = check.opt_nullable_list_param(\n additional_metrics, "additional_metrics", str\n )\n self.api_token = check.str_param(api_token, "api_token")\n self.dimensions = check.list_param(dimensions, "dimensions", str)\n self.ingest_start = check.str_param(ingest_start, "ingest_start")\n self.metrics = check.list_param(metrics, "metrics", str)\n self.until_today = check.opt_bool_param(until_today, "until_today")\n super().__init__("Adjust", name)
\n\n\n
[docs]class BambooHrSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n subdomain: str,\n api_key: str,\n custom_reports_fields: Optional[str] = None,\n custom_reports_include_default_fields: Optional[bool] = None,\n ):\n """Airbyte Source for Bamboo Hr.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/bamboo-hr\n\n Args:\n name (str): The name of the destination.\n subdomain (str): Sub Domain of bamboo hr\n api_key (str): Api key of bamboo hr\n custom_reports_fields (Optional[str]): Comma-separated list of fields to include in custom reports.\n custom_reports_include_default_fields (Optional[bool]): If true, the custom reports endpoint will include the default fields defined here: https://documentation.bamboohr.com/docs/list-of-field-names.\n """\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.api_key = check.str_param(api_key, "api_key")\n self.custom_reports_fields = check.opt_str_param(\n custom_reports_fields, "custom_reports_fields"\n )\n self.custom_reports_include_default_fields = check.opt_bool_param(\n custom_reports_include_default_fields, "custom_reports_include_default_fields"\n )\n super().__init__("Bamboo Hr", name)
\n\n\n
[docs]class GoogleAdsSource(GeneratedAirbyteSource):\n
[docs] class GoogleCredentials:\n
[docs] @public\n def __init__(\n self,\n developer_token: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n access_token: Optional[str] = None,\n ):\n self.developer_token = check.str_param(developer_token, "developer_token")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] class CustomGAQLQueriesEntry:\n
[docs] @public\n def __init__(self, query: str, table_name: str):\n self.query = check.str_param(query, "query")\n self.table_name = check.str_param(table_name, "table_name")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: "GoogleAdsSource.GoogleCredentials",\n customer_id: str,\n start_date: str,\n end_date: Optional[str] = None,\n custom_queries: Optional[List[CustomGAQLQueriesEntry]] = None,\n login_customer_id: Optional[str] = None,\n conversion_window_days: Optional[int] = None,\n ):\n """Airbyte Source for Google Ads.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-ads\n\n Args:\n name (str): The name of the destination.\n customer_id (str): Comma separated list of (client) customer IDs. Each customer ID must be specified as a 10-digit number without dashes. More instruction on how to find this value in our docs. Metrics streams like AdGroupAdReport cannot be requested for a manager account.\n start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated.\n end_date (Optional[str]): UTC date and time in the format 2017-01-25. Any data after this date will not be replicated.\n login_customer_id (Optional[str]): If your access to the customer account is through a manager account, this field is required and must be set to the customer ID of the manager account (10-digit number without dashes). More information about this field you can see here\n conversion_window_days (Optional[int]): A conversion window is the period of time after an ad interaction (such as an ad click or video view) during which a conversion, such as a purchase, is recorded in Google Ads. For more information, see Google's documentation.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", GoogleAdsSource.GoogleCredentials\n )\n self.customer_id = check.str_param(customer_id, "customer_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.custom_queries = check.opt_nullable_list_param(\n custom_queries, "custom_queries", GoogleAdsSource.CustomGAQLQueriesEntry\n )\n self.login_customer_id = check.opt_str_param(login_customer_id, "login_customer_id")\n self.conversion_window_days = check.opt_int_param(\n conversion_window_days, "conversion_window_days"\n )\n super().__init__("Google Ads", name)
\n\n\n
[docs]class HellobatonSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, company: str):\n """Airbyte Source for Hellobaton.\n\n Args:\n name (str): The name of the destination.\n api_key (str): authentication key required to access the api endpoints\n company (str): Company name that generates your base api url\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.company = check.str_param(company, "company")\n super().__init__("Hellobaton", name)
\n\n\n
[docs]class SendgridSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, apikey: str, start_time: Union[int, str]):\n """Airbyte Source for Sendgrid.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/sendgrid\n\n Args:\n name (str): The name of the destination.\n apikey (str): API Key, use admin to generate this key.\n start_time (Union[int, str]): Start time in ISO8601 format. Any data before this time point will not be replicated.\n """\n self.apikey = check.str_param(apikey, "apikey")\n self.start_time = check.inst_param(start_time, "start_time", (int, str))\n super().__init__("Sendgrid", name)
\n\n\n
[docs]class MondaySource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: str,\n subdomain: Optional[str] = None,\n ):\n self.auth_type = "oauth2.0"\n self.subdomain = check.opt_str_param(subdomain, "subdomain")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, api_token: str):\n self.auth_type = "api_token"\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self, name: str, credentials: Union["MondaySource.OAuth20", "MondaySource.APIToken"]\n ):\n """Airbyte Source for Monday.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/monday\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (MondaySource.OAuth20, MondaySource.APIToken)\n )\n super().__init__("Monday", name)
\n\n\n
[docs]class DixaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, api_token: str, start_date: str, batch_size: Optional[int] = None\n ):\n """Airbyte Source for Dixa.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/dixa\n\n Args:\n name (str): The name of the destination.\n api_token (str): Dixa API token\n start_date (str): The connector pulls records updated from this date onwards.\n batch_size (Optional[int]): Number of days to batch into one request. Max 31.\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.batch_size = check.opt_int_param(batch_size, "batch_size")\n super().__init__("Dixa", name)
\n\n\n
[docs]class SalesforceSource(GeneratedAirbyteSource):\n
[docs] class FilterSalesforceObjectsEntry:\n
[docs] @public\n def __init__(self, criteria: str, value: str):\n self.criteria = check.str_param(criteria, "criteria")\n self.value = check.str_param(value, "value")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n is_sandbox: Optional[bool] = None,\n auth_type: Optional[str] = None,\n start_date: Optional[str] = None,\n streams_criteria: Optional[List[FilterSalesforceObjectsEntry]] = None,\n ):\n """Airbyte Source for Salesforce.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/salesforce\n\n Args:\n name (str): The name of the destination.\n is_sandbox (Optional[bool]): Toggle if you're using a Salesforce Sandbox\n client_id (str): Enter your Salesforce developer application's Client ID\n client_secret (str): Enter your Salesforce developer application's Client secret\n refresh_token (str): Enter your application's Salesforce Refresh Token used for Airbyte to access your Salesforce account.\n start_date (Optional[str]): Enter the date in the YYYY-MM-DD format. Airbyte will replicate the data added on and after this date. If this field is blank, Airbyte will replicate all data.\n streams_criteria (Optional[List[SalesforceSource.FilterSalesforceObjectsEntry]]): Filter streams relevant to you\n """\n self.is_sandbox = check.opt_bool_param(is_sandbox, "is_sandbox")\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.streams_criteria = check.opt_nullable_list_param(\n streams_criteria, "streams_criteria", SalesforceSource.FilterSalesforceObjectsEntry\n )\n super().__init__("Salesforce", name)
\n\n\n
[docs]class PipedriveSource(GeneratedAirbyteSource):\n
[docs] class SignInViaPipedriveOAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.auth_type = "Client"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIKeyAuthentication:\n
[docs] @public\n def __init__(self, api_token: str):\n self.auth_type = "Token"\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n authorization: Union[\n "PipedriveSource.SignInViaPipedriveOAuth", "PipedriveSource.APIKeyAuthentication"\n ],\n replication_start_date: str,\n ):\n """Airbyte Source for Pipedrive.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/pipedrive\n\n Args:\n name (str): The name of the destination.\n authorization (Union[PipedriveSource.SignInViaPipedriveOAuth, PipedriveSource.APIKeyAuthentication]): Choose one of the possible authorization method\n replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. When specified and not None, then stream will behave as incremental\n """\n self.authorization = check.inst_param(\n authorization,\n "authorization",\n (PipedriveSource.SignInViaPipedriveOAuth, PipedriveSource.APIKeyAuthentication),\n )\n self.replication_start_date = check.str_param(\n replication_start_date, "replication_start_date"\n )\n super().__init__("Pipedrive", name)
\n\n\n
[docs]class FileSource(GeneratedAirbyteSource):\n
[docs] class HTTPSPublicWeb:\n
[docs] @public\n def __init__(self, user_agent: Optional[bool] = None):\n self.storage = "HTTPS"\n self.user_agent = check.opt_bool_param(user_agent, "user_agent")
\n\n
[docs] class GCSGoogleCloudStorage:\n
[docs] @public\n def __init__(self, service_account_json: Optional[str] = None):\n self.storage = "GCS"\n self.service_account_json = check.opt_str_param(\n service_account_json, "service_account_json"\n )
\n\n
[docs] class S3AmazonWebServices:\n
[docs] @public\n def __init__(\n self,\n aws_access_key_id: Optional[str] = None,\n aws_secret_access_key: Optional[str] = None,\n ):\n self.storage = "S3"\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )
\n\n
[docs] class AzBlobAzureBlobStorage:\n
[docs] @public\n def __init__(\n self,\n storage_account: str,\n sas_token: Optional[str] = None,\n shared_key: Optional[str] = None,\n ):\n self.storage = "AzBlob"\n self.storage_account = check.str_param(storage_account, "storage_account")\n self.sas_token = check.opt_str_param(sas_token, "sas_token")\n self.shared_key = check.opt_str_param(shared_key, "shared_key")
\n\n
[docs] class SSHSecureShell:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SSH"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SCPSecureCopyProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SCP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SFTPSecureFileTransferProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SFTP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class LocalFilesystemLimited:\n
[docs] @public\n def __init__(\n self,\n ):\n self.storage = "local"
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n dataset_name: str,\n format: str,\n url: str,\n provider: Union[\n "FileSource.HTTPSPublicWeb",\n "FileSource.GCSGoogleCloudStorage",\n "FileSource.S3AmazonWebServices",\n "FileSource.AzBlobAzureBlobStorage",\n "FileSource.SSHSecureShell",\n "FileSource.SCPSecureCopyProtocol",\n "FileSource.SFTPSecureFileTransferProtocol",\n "FileSource.LocalFilesystemLimited",\n ],\n reader_options: Optional[str] = None,\n ):\n """Airbyte Source for File.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/file\n\n Args:\n name (str): The name of the destination.\n dataset_name (str): The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only).\n format (str): The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs).\n reader_options (Optional[str]): This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior.\n url (str): The URL path to access the file which should be replicated.\n provider (Union[FileSource.HTTPSPublicWeb, FileSource.GCSGoogleCloudStorage, FileSource.S3AmazonWebServices, FileSource.AzBlobAzureBlobStorage, FileSource.SSHSecureShell, FileSource.SCPSecureCopyProtocol, FileSource.SFTPSecureFileTransferProtocol, FileSource.LocalFilesystemLimited]): The storage Provider or Location of the file(s) which should be replicated.\n """\n self.dataset_name = check.str_param(dataset_name, "dataset_name")\n self.format = check.str_param(format, "format")\n self.reader_options = check.opt_str_param(reader_options, "reader_options")\n self.url = check.str_param(url, "url")\n self.provider = check.inst_param(\n provider,\n "provider",\n (\n FileSource.HTTPSPublicWeb,\n FileSource.GCSGoogleCloudStorage,\n FileSource.S3AmazonWebServices,\n FileSource.AzBlobAzureBlobStorage,\n FileSource.SSHSecureShell,\n FileSource.SCPSecureCopyProtocol,\n FileSource.SFTPSecureFileTransferProtocol,\n FileSource.LocalFilesystemLimited,\n ),\n )\n super().__init__("File", name)
\n\n\n
[docs]class GlassfrogSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Glassfrog.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/glassfrog\n\n Args:\n name (str): The name of the destination.\n api_key (str): API key provided by Glassfrog\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Glassfrog", name)
\n\n\n
[docs]class ChartmogulSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str, interval: str):\n """Airbyte Source for Chartmogul.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/chartmogul\n\n Args:\n name (str): The name of the destination.\n api_key (str): Chartmogul API key\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated.\n interval (str): Some APIs such as Metrics require intervals to cluster data.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.interval = check.str_param(interval, "interval")\n super().__init__("Chartmogul", name)
\n\n\n
[docs]class OrbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_key: str,\n start_date: Optional[str] = None,\n lookback_window_days: Optional[int] = None,\n string_event_properties_keys: Optional[List[str]] = None,\n numeric_event_properties_keys: Optional[List[str]] = None,\n ):\n """Airbyte Source for Orb.\n\n Documentation can be found at https://docs.withorb.com/\n\n Args:\n name (str): The name of the destination.\n api_key (str): Orb API Key, issued from the Orb admin console.\n start_date (Optional[str]): UTC date and time in the format 2022-03-01T00:00:00Z. Any data with created_at before this data will not be synced.\n lookback_window_days (Optional[int]): When set to N, the connector will always refresh resources created within the past N days. By default, updated objects that are not newly created are not incrementally synced.\n string_event_properties_keys (Optional[List[str]]): Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction.\n numeric_event_properties_keys (Optional[List[str]]): Property key names to extract from all events, in order to enrich ledger entries corresponding to an event deduction.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.lookback_window_days = check.opt_int_param(\n lookback_window_days, "lookback_window_days"\n )\n self.string_event_properties_keys = check.opt_nullable_list_param(\n string_event_properties_keys, "string_event_properties_keys", str\n )\n self.numeric_event_properties_keys = check.opt_nullable_list_param(\n numeric_event_properties_keys, "numeric_event_properties_keys", str\n )\n super().__init__("Orb", name)
\n\n\n
[docs]class CockroachdbSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Cockroachdb.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/cockroachdb\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n username (str): Username to use to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt client/server communications for increased security.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n super().__init__("Cockroachdb", name)
\n\n\n
[docs]class ConfluenceSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str, domain_name: str, email: str):\n """Airbyte Source for Confluence.\n\n Args:\n name (str): The name of the destination.\n api_token (str): Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/\n domain_name (str): Your Confluence domain name\n email (str): Your Confluence login email\n """\n self.api_token = check.str_param(api_token, "api_token")\n self.domain_name = check.str_param(domain_name, "domain_name")\n self.email = check.str_param(email, "email")\n super().__init__("Confluence", name)
\n\n\n
[docs]class PlaidSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n access_token: str,\n api_key: str,\n client_id: str,\n plaid_env: str,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Plaid.\n\n Documentation can be found at https://plaid.com/docs/api/\n\n Args:\n name (str): The name of the destination.\n access_token (str): The end-user's Link access token.\n api_key (str): The Plaid API key to use to hit the API.\n client_id (str): The Plaid client id\n plaid_env (str): The Plaid environment\n start_date (Optional[str]): The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.api_key = check.str_param(api_key, "api_key")\n self.client_id = check.str_param(client_id, "client_id")\n self.plaid_env = check.str_param(plaid_env, "plaid_env")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Plaid", name)
\n\n\n
[docs]class SnapchatMarketingSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n ):\n """Airbyte Source for Snapchat Marketing.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/snapchat-marketing\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Snapchat developer application.\n client_secret (str): The Client Secret of your Snapchat developer application.\n refresh_token (str): Refresh Token to renew the expired Access Token.\n start_date (Optional[str]): Date in the format 2022-01-01. Any data before this date will not be replicated.\n end_date (Optional[str]): Date in the format 2017-01-25. Any data after this date will not be replicated.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n super().__init__("Snapchat Marketing", name)
\n\n\n
[docs]class MicrosoftTeamsSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaMicrosoftOAuth20:\n
[docs] @public\n def __init__(\n self,\n tenant_id: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.tenant_id = check.str_param(tenant_id, "tenant_id")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AuthenticateViaMicrosoft:\n
[docs] @public\n def __init__(\n self,\n tenant_id: str,\n client_id: str,\n client_secret: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.tenant_id = check.str_param(tenant_id, "tenant_id")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n period: str,\n credentials: Union[\n "MicrosoftTeamsSource.AuthenticateViaMicrosoftOAuth20",\n "MicrosoftTeamsSource.AuthenticateViaMicrosoft",\n ],\n ):\n """Airbyte Source for Microsoft Teams.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/microsoft-teams\n\n Args:\n name (str): The name of the destination.\n period (str): Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.\n credentials (Union[MicrosoftTeamsSource.AuthenticateViaMicrosoftOAuth20, MicrosoftTeamsSource.AuthenticateViaMicrosoft]): Choose how to authenticate to Microsoft\n """\n self.period = check.str_param(period, "period")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n MicrosoftTeamsSource.AuthenticateViaMicrosoftOAuth20,\n MicrosoftTeamsSource.AuthenticateViaMicrosoft,\n ),\n )\n super().__init__("Microsoft Teams", name)
\n\n\n
[docs]class LeverHiringSource(GeneratedAirbyteSource):\n
[docs] class OAuthCredentials:\n
[docs] @public\n def __init__(\n self,\n refresh_token: str,\n auth_type: Optional[str] = None,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: "LeverHiringSource.OAuthCredentials",\n start_date: str,\n environment: Optional[str] = None,\n ):\n """Airbyte Source for Lever Hiring.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/lever-hiring\n\n Args:\n name (str): The name of the destination.\n credentials (LeverHiringSource.OAuthCredentials): Choose how to authenticate to Lever Hiring.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Note that it will be used only in the following incremental streams: comments, commits, and issues.\n environment (Optional[str]): The environment in which you'd like to replicate data for Lever. This is used to determine which Lever API endpoint to use.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", LeverHiringSource.OAuthCredentials\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.environment = check.opt_str_param(environment, "environment")\n super().__init__("Lever Hiring", name)
\n\n\n
[docs]class TwilioSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_sid: str,\n auth_token: str,\n start_date: str,\n lookback_window: Optional[int] = None,\n ):\n """Airbyte Source for Twilio.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/twilio\n\n Args:\n name (str): The name of the destination.\n account_sid (str): Twilio account SID\n auth_token (str): Twilio Auth Token.\n start_date (str): UTC date and time in the format 2020-10-01T00:00:00Z. Any data before this date will not be replicated.\n lookback_window (Optional[int]): How far into the past to look for records. (in minutes)\n """\n self.account_sid = check.str_param(account_sid, "account_sid")\n self.auth_token = check.str_param(auth_token, "auth_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window = check.opt_int_param(lookback_window, "lookback_window")\n super().__init__("Twilio", name)
\n\n\n
[docs]class StripeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n client_secret: str,\n start_date: str,\n lookback_window_days: Optional[int] = None,\n slice_range: Optional[int] = None,\n ):\n r"""Airbyte Source for Stripe.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/stripe\n\n Args:\n name (str): The name of the destination.\n account_id (str): Your Stripe account ID (starts with 'acct\\\\_', find yours here).\n client_secret (str): Stripe API key (usually starts with 'sk_live\\\\_'; find yours here).\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Only data generated after this date will be replicated.\n lookback_window_days (Optional[int]): When set, the connector will always re-export data from the past N days, where N is the value set here. This is useful if your data is frequently updated after creation. More info here\n slice_range (Optional[int]): The time increment used by the connector when requesting data from the Stripe API. The bigger the value is, the less requests will be made and faster the sync will be. On the other hand, the more seldom the state is persisted.\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window_days = check.opt_int_param(\n lookback_window_days, "lookback_window_days"\n )\n self.slice_range = check.opt_int_param(slice_range, "slice_range")\n super().__init__("Stripe", name)
\n\n\n
[docs]class Db2Source(GeneratedAirbyteSource):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.encryption_method = "unencrypted"
\n\n
[docs] class TLSEncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, ssl_certificate: str, key_store_password: Optional[str] = None):\n self.encryption_method = "encrypted_verify_certificate"\n self.ssl_certificate = check.str_param(ssl_certificate, "ssl_certificate")\n self.key_store_password = check.opt_str_param(key_store_password, "key_store_password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n db: str,\n username: str,\n password: str,\n encryption: Union["Db2Source.Unencrypted", "Db2Source.TLSEncryptedVerifyCertificate"],\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Db2.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/db2\n\n Args:\n name (str): The name of the destination.\n host (str): Host of the Db2.\n port (int): Port of the database.\n db (str): Name of the database.\n username (str): Username to use to access the database.\n password (str): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n encryption (Union[Db2Source.Unencrypted, Db2Source.TLSEncryptedVerifyCertificate]): Encryption method to use when communicating with the database\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.db = check.str_param(db, "db")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.encryption = check.inst_param(\n encryption,\n "encryption",\n (Db2Source.Unencrypted, Db2Source.TLSEncryptedVerifyCertificate),\n )\n super().__init__("Db2", name)
\n\n\n
[docs]class SlackSource(GeneratedAirbyteSource):\n
[docs] class DefaultOAuth20Authorization:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: str,\n refresh_token: Optional[str] = None,\n ):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
\n\n
[docs] class APITokenCredentials:\n
[docs] @public\n def __init__(self, api_token: str):\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n lookback_window: int,\n join_channels: bool,\n credentials: Union[\n "SlackSource.DefaultOAuth20Authorization", "SlackSource.APITokenCredentials"\n ],\n channel_filter: Optional[List[str]] = None,\n ):\n """Airbyte Source for Slack.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/slack\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n lookback_window (int): How far into the past to look for messages in threads.\n join_channels (bool): Whether to join all channels or to sync data only from channels the bot is already in. If false, you'll need to manually add the bot to all the channels from which you'd like to sync messages.\n channel_filter (Optional[List[str]]): A channel name list (without leading '#' char) which limit the channels from which you'd like to sync. Empty list means no filter.\n credentials (Union[SlackSource.DefaultOAuth20Authorization, SlackSource.APITokenCredentials]): Choose how to authenticate into Slack\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window = check.int_param(lookback_window, "lookback_window")\n self.join_channels = check.bool_param(join_channels, "join_channels")\n self.channel_filter = check.opt_nullable_list_param(channel_filter, "channel_filter", str)\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (SlackSource.DefaultOAuth20Authorization, SlackSource.APITokenCredentials),\n )\n super().__init__("Slack", name)
\n\n\n
[docs]class RechargeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, access_token: str):\n """Airbyte Source for Recharge.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/recharge\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Recharge API, in the format YYYY-MM-DDT00:00:00Z. Any data before this date will not be replicated.\n access_token (str): The value of the Access Token generated. See the docs for more information.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.access_token = check.str_param(access_token, "access_token")\n super().__init__("Recharge", name)
\n\n\n
[docs]class OpenweatherSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n lat: str,\n lon: str,\n appid: str,\n units: Optional[str] = None,\n lang: Optional[str] = None,\n ):\n """Airbyte Source for Openweather.\n\n Args:\n name (str): The name of the destination.\n lat (str): Latitude for which you want to get weather condition from. (min -90, max 90)\n lon (str): Longitude for which you want to get weather condition from. (min -180, max 180)\n appid (str): Your OpenWeather API Key. See here. The key is case sensitive.\n units (Optional[str]): Units of measurement. standard, metric and imperial units are available. If you do not use the units parameter, standard units will be applied by default.\n lang (Optional[str]): You can use lang parameter to get the output in your language. The contents of the description field will be translated. See here for the list of supported languages.\n """\n self.lat = check.str_param(lat, "lat")\n self.lon = check.str_param(lon, "lon")\n self.appid = check.str_param(appid, "appid")\n self.units = check.opt_str_param(units, "units")\n self.lang = check.opt_str_param(lang, "lang")\n super().__init__("Openweather", name)
\n\n\n
[docs]class RetentlySource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaRetentlyOAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AuthenticateWithAPIToken:\n
[docs] @public\n def __init__(self, api_key: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.api_key = check.str_param(api_key, "api_key")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union[\n "RetentlySource.AuthenticateViaRetentlyOAuth", "RetentlySource.AuthenticateWithAPIToken"\n ],\n ):\n """Airbyte Source for Retently.\n\n Args:\n name (str): The name of the destination.\n credentials (Union[RetentlySource.AuthenticateViaRetentlyOAuth, RetentlySource.AuthenticateWithAPIToken]): Choose how to authenticate to Retently\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (RetentlySource.AuthenticateViaRetentlyOAuth, RetentlySource.AuthenticateWithAPIToken),\n )\n super().__init__("Retently", name)
\n\n\n
[docs]class ScaffoldSourceHttpSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, TODO: str):\n """Airbyte Source for Scaffold Source Http.\n\n Args:\n name (str): The name of the destination.\n TODO (str): describe me\n """\n self.TODO = check.str_param(TODO, "TODO")\n super().__init__("Scaffold Source Http", name)
\n\n\n
[docs]class YandexMetricaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, auth_token: str, counter_id: str, start_date: str, end_date: str):\n """Airbyte Source for Yandex Metrica.\n\n Args:\n name (str): The name of the destination.\n auth_token (str): Your Yandex Metrica API access token\n counter_id (str): Counter ID\n start_date (str): UTC date and time in the format YYYY-MM-DD.\n end_date (str): UTC date and time in the format YYYY-MM-DD.\n """\n self.auth_token = check.str_param(auth_token, "auth_token")\n self.counter_id = check.str_param(counter_id, "counter_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.str_param(end_date, "end_date")\n super().__init__("Yandex Metrica", name)
\n\n\n
[docs]class TalkdeskExploreSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n auth_url: str,\n api_key: str,\n timezone: Optional[str] = None,\n ):\n """Airbyte Source for Talkdesk Explore.\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Talkdesk Explore API, in the format YYYY-MM-DDT00:00:00. All data generated after this date will be replicated.\n timezone (Optional[str]): Timezone to use when generating reports. Only IANA timezones are supported (https://nodatime.org/TimeZones)\n auth_url (str): Talkdesk Auth URL. Only 'client_credentials' auth type supported at the moment.\n api_key (str): Talkdesk API key.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.timezone = check.opt_str_param(timezone, "timezone")\n self.auth_url = check.str_param(auth_url, "auth_url")\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Talkdesk Explore", name)
\n\n\n
[docs]class ChargifySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, domain: str):\n """Airbyte Source for Chargify.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/chargify\n\n Args:\n name (str): The name of the destination.\n api_key (str): Chargify API Key.\n domain (str): Chargify domain. Normally this domain follows the following format companyname.chargify.com\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.domain = check.str_param(domain, "domain")\n super().__init__("Chargify", name)
\n\n\n
[docs]class RkiCovidSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str):\n """Airbyte Source for Rki Covid.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/rki-covid\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date in the format 2017-01-25. Any data before this date will not be replicated.\n """\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Rki Covid", name)
\n\n\n
[docs]class PostgresSource(GeneratedAirbyteSource):\n
[docs] class Disable:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "disable"
\n\n
[docs] class Allow:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "allow"
\n\n
[docs] class Prefer:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "prefer"
\n\n
[docs] class Require:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "require"
\n\n
[docs] class VerifyCa:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify-ca"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class VerifyFull:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify-full"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "Standard"
\n\n
[docs] class LogicalReplicationCDC:\n
[docs] @public\n def __init__(\n self,\n replication_slot: str,\n publication: str,\n plugin: Optional[str] = None,\n initial_waiting_seconds: Optional[int] = None,\n ):\n self.method = "CDC"\n self.plugin = check.opt_str_param(plugin, "plugin")\n self.replication_slot = check.str_param(replication_slot, "replication_slot")\n self.publication = check.str_param(publication, "publication")\n self.initial_waiting_seconds = check.opt_int_param(\n initial_waiting_seconds, "initial_waiting_seconds"\n )
\n\n
[docs] class NoTunnel:\n
[docs] @public\n def __init__(\n self,\n ):\n self.tunnel_method = "NO_TUNNEL"
\n\n
[docs] class SSHKeyAuthentication:\n
[docs] @public\n def __init__(self, tunnel_host: str, tunnel_port: int, tunnel_user: str, ssh_key: str):\n self.tunnel_method = "SSH_KEY_AUTH"\n self.tunnel_host = check.str_param(tunnel_host, "tunnel_host")\n self.tunnel_port = check.int_param(tunnel_port, "tunnel_port")\n self.tunnel_user = check.str_param(tunnel_user, "tunnel_user")\n self.ssh_key = check.str_param(ssh_key, "ssh_key")
\n\n
[docs] class PasswordAuthentication:\n
[docs] @public\n def __init__(\n self, tunnel_host: str, tunnel_port: int, tunnel_user: str, tunnel_user_password: str\n ):\n self.tunnel_method = "SSH_PASSWORD_AUTH"\n self.tunnel_host = check.str_param(tunnel_host, "tunnel_host")\n self.tunnel_port = check.int_param(tunnel_port, "tunnel_port")\n self.tunnel_user = check.str_param(tunnel_user, "tunnel_user")\n self.tunnel_user_password = check.str_param(\n tunnel_user_password, "tunnel_user_password"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n ssl_mode: Union[\n "PostgresSource.Disable",\n "PostgresSource.Allow",\n "PostgresSource.Prefer",\n "PostgresSource.Require",\n "PostgresSource.VerifyCa",\n "PostgresSource.VerifyFull",\n ],\n replication_method: Union[\n "PostgresSource.Standard", "PostgresSource.LogicalReplicationCDC"\n ],\n tunnel_method: Union[\n "PostgresSource.NoTunnel",\n "PostgresSource.SSHKeyAuthentication",\n "PostgresSource.PasswordAuthentication",\n ],\n schemas: Optional[List[str]] = None,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Postgres.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/postgres\n\n Args:\n name (str): The name of the destination.\n host (str): Hostname of the database.\n port (int): Port of the database.\n database (str): Name of the database.\n schemas (Optional[List[str]]): The list of schemas (case sensitive) to sync from. Defaults to public.\n username (str): Username to access the database.\n password (Optional[str]): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt data using SSL. When activating SSL, please select one of the connection modes.\n ssl_mode (Union[PostgresSource.Disable, PostgresSource.Allow, PostgresSource.Prefer, PostgresSource.Require, PostgresSource.VerifyCa, PostgresSource.VerifyFull]): SSL connection modes. disable - Disables encryption of communication between Airbyte and source database allow - Enables encryption only when required by the source database prefer - allows unencrypted connection only if the source database does not support encryption require - Always require encryption. If the source database server does not support encryption, connection will fail verify-ca - Always require encryption and verifies that the source database server has a valid SSL certificate verify-full - This is the most secure mode. Always require encryption and verifies the identity of the source database server Read more in the docs.\n replication_method (Union[PostgresSource.Standard, PostgresSource.LogicalReplicationCDC]): Replication method for extracting data from the database.\n tunnel_method (Union[PostgresSource.NoTunnel, PostgresSource.SSHKeyAuthentication, PostgresSource.PasswordAuthentication]): Whether to initiate an SSH tunnel before connecting to the database, and if so, which kind of authentication to use.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.ssl_mode = check.inst_param(\n ssl_mode,\n "ssl_mode",\n (\n PostgresSource.Disable,\n PostgresSource.Allow,\n PostgresSource.Prefer,\n PostgresSource.Require,\n PostgresSource.VerifyCa,\n PostgresSource.VerifyFull,\n ),\n )\n self.replication_method = check.inst_param(\n replication_method,\n "replication_method",\n (PostgresSource.Standard, PostgresSource.LogicalReplicationCDC),\n )\n self.tunnel_method = check.inst_param(\n tunnel_method,\n "tunnel_method",\n (\n PostgresSource.NoTunnel,\n PostgresSource.SSHKeyAuthentication,\n PostgresSource.PasswordAuthentication,\n ),\n )\n super().__init__("Postgres", name)
\n\n\n
[docs]class TrelloSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n token: str,\n key: str,\n start_date: str,\n board_ids: Optional[List[str]] = None,\n ):\n """Airbyte Source for Trello.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/trello\n\n Args:\n name (str): The name of the destination.\n token (str): Trello v API token. See the docs for instructions on how to generate it.\n key (str): Trello API key. See the docs for instructions on how to generate it.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n board_ids (Optional[List[str]]): IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated.\n """\n self.token = check.str_param(token, "token")\n self.key = check.str_param(key, "key")\n self.start_date = check.str_param(start_date, "start_date")\n self.board_ids = check.opt_nullable_list_param(board_ids, "board_ids", str)\n super().__init__("Trello", name)
\n\n\n
[docs]class PrestashopSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, url: str, access_key: str):\n """Airbyte Source for Prestashop.\n\n Args:\n name (str): The name of the destination.\n url (str): Shop URL without trailing slash (domain name or IP address)\n access_key (str): Your PrestaShop access key. See the docs for info on how to obtain this.\n """\n self.url = check.str_param(url, "url")\n self.access_key = check.str_param(access_key, "access_key")\n super().__init__("Prestashop", name)
\n\n\n
[docs]class PaystackSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n secret_key: str,\n start_date: str,\n lookback_window_days: Optional[int] = None,\n ):\n r"""Airbyte Source for Paystack.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/paystack\n\n Args:\n name (str): The name of the destination.\n secret_key (str): The Paystack API key (usually starts with 'sk_live\\\\_'; find yours here).\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n lookback_window_days (Optional[int]): When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation.\n """\n self.secret_key = check.str_param(secret_key, "secret_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.lookback_window_days = check.opt_int_param(\n lookback_window_days, "lookback_window_days"\n )\n super().__init__("Paystack", name)
\n\n\n
[docs]class S3Source(GeneratedAirbyteSource):\n
[docs] class CSV:\n
[docs] @public\n def __init__(\n self,\n filetype: Optional[str] = None,\n delimiter: Optional[str] = None,\n infer_datatypes: Optional[bool] = None,\n quote_char: Optional[str] = None,\n escape_char: Optional[str] = None,\n encoding: Optional[str] = None,\n double_quote: Optional[bool] = None,\n newlines_in_values: Optional[bool] = None,\n additional_reader_options: Optional[str] = None,\n advanced_options: Optional[str] = None,\n block_size: Optional[int] = None,\n ):\n self.filetype = check.opt_str_param(filetype, "filetype")\n self.delimiter = check.opt_str_param(delimiter, "delimiter")\n self.infer_datatypes = check.opt_bool_param(infer_datatypes, "infer_datatypes")\n self.quote_char = check.opt_str_param(quote_char, "quote_char")\n self.escape_char = check.opt_str_param(escape_char, "escape_char")\n self.encoding = check.opt_str_param(encoding, "encoding")\n self.double_quote = check.opt_bool_param(double_quote, "double_quote")\n self.newlines_in_values = check.opt_bool_param(newlines_in_values, "newlines_in_values")\n self.additional_reader_options = check.opt_str_param(\n additional_reader_options, "additional_reader_options"\n )\n self.advanced_options = check.opt_str_param(advanced_options, "advanced_options")\n self.block_size = check.opt_int_param(block_size, "block_size")
\n\n
[docs] class Parquet:\n
[docs] @public\n def __init__(\n self,\n filetype: Optional[str] = None,\n columns: Optional[List[str]] = None,\n batch_size: Optional[int] = None,\n buffer_size: Optional[int] = None,\n ):\n self.filetype = check.opt_str_param(filetype, "filetype")\n self.columns = check.opt_nullable_list_param(columns, "columns", str)\n self.batch_size = check.opt_int_param(batch_size, "batch_size")\n self.buffer_size = check.opt_int_param(buffer_size, "buffer_size")
\n\n
[docs] class Avro:\n
[docs] @public\n def __init__(self, filetype: Optional[str] = None):\n self.filetype = check.opt_str_param(filetype, "filetype")
\n\n
[docs] class Jsonl:\n
[docs] @public\n def __init__(\n self,\n filetype: Optional[str] = None,\n newlines_in_values: Optional[bool] = None,\n unexpected_field_behavior: Optional[str] = None,\n block_size: Optional[int] = None,\n ):\n self.filetype = check.opt_str_param(filetype, "filetype")\n self.newlines_in_values = check.opt_bool_param(newlines_in_values, "newlines_in_values")\n self.unexpected_field_behavior = check.opt_str_param(\n unexpected_field_behavior, "unexpected_field_behavior"\n )\n self.block_size = check.opt_int_param(block_size, "block_size")
\n\n
[docs] class S3AmazonWebServices:\n
[docs] @public\n def __init__(\n self,\n bucket: str,\n aws_access_key_id: Optional[str] = None,\n aws_secret_access_key: Optional[str] = None,\n path_prefix: Optional[str] = None,\n endpoint: Optional[str] = None,\n ):\n self.bucket = check.str_param(bucket, "bucket")\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )\n self.path_prefix = check.opt_str_param(path_prefix, "path_prefix")\n self.endpoint = check.opt_str_param(endpoint, "endpoint")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n dataset: str,\n path_pattern: str,\n format: Union["S3Source.CSV", "S3Source.Parquet", "S3Source.Avro", "S3Source.Jsonl"],\n provider: "S3Source.S3AmazonWebServices",\n schema: Optional[str] = None,\n ):\n """Airbyte Source for S3.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/s3\n\n Args:\n name (str): The name of the destination.\n dataset (str): The name of the stream you would like this source to output. Can contain letters, numbers, or underscores.\n path_pattern (str): A regular expression which tells the connector which files to replicate. All files which match this pattern will be replicated. Use | to separate multiple patterns. See this page to understand pattern syntax (GLOBSTAR and SPLIT flags are enabled). Use pattern ** to pick up all files.\n format (Union[S3Source.CSV, S3Source.Parquet, S3Source.Avro, S3Source.Jsonl]): The format of the files you'd like to replicate\n schema (Optional[str]): Optionally provide a schema to enforce, as a valid JSON string. Ensure this is a mapping of { "column" : "type" }, where types are valid JSON Schema datatypes. Leave as {} to auto-infer the schema.\n provider (S3Source.S3AmazonWebServices): Use this to load files from S3 or S3-compatible services\n """\n self.dataset = check.str_param(dataset, "dataset")\n self.path_pattern = check.str_param(path_pattern, "path_pattern")\n self.format = check.inst_param(\n format, "format", (S3Source.CSV, S3Source.Parquet, S3Source.Avro, S3Source.Jsonl)\n )\n self.schema = check.opt_str_param(schema, "schema")\n self.provider = check.inst_param(provider, "provider", S3Source.S3AmazonWebServices)\n super().__init__("S3", name)
\n\n\n
[docs]class SnowflakeSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: Optional[str] = None,\n refresh_token: Optional[str] = None,\n ):\n self.auth_type = "OAuth"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")
\n\n
[docs] class UsernameAndPassword:\n
[docs] @public\n def __init__(self, username: str, password: str):\n self.auth_type = "username/password"\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["SnowflakeSource.OAuth20", "SnowflakeSource.UsernameAndPassword"],\n host: str,\n role: str,\n warehouse: str,\n database: str,\n schema: str,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Snowflake.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/snowflake\n\n Args:\n name (str): The name of the destination.\n host (str): The host domain of the snowflake instance (must include the account, region, cloud environment, and end with snowflakecomputing.com).\n role (str): The role you created for Airbyte to access Snowflake.\n warehouse (str): The warehouse you created for Airbyte to access data.\n database (str): The database you created for Airbyte to access data.\n schema (str): The source Snowflake schema tables.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (SnowflakeSource.OAuth20, SnowflakeSource.UsernameAndPassword),\n )\n self.host = check.str_param(host, "host")\n self.role = check.str_param(role, "role")\n self.warehouse = check.str_param(warehouse, "warehouse")\n self.database = check.str_param(database, "database")\n self.schema = check.str_param(schema, "schema")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Snowflake", name)
\n\n\n
[docs]class AmplitudeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, secret_key: str, start_date: str):\n """Airbyte Source for Amplitude.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/amplitude\n\n Args:\n name (str): The name of the destination.\n api_key (str): Amplitude API Key. See the setup guide for more information on how to obtain this key.\n secret_key (str): Amplitude Secret Key. See the setup guide for more information on how to obtain this key.\n start_date (str): UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.secret_key = check.str_param(secret_key, "secret_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Amplitude", name)
\n\n\n
[docs]class PosthogSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, api_key: str, base_url: Optional[str] = None):\n """Airbyte Source for Posthog.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/posthog\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate the data. Any data before this date will not be replicated.\n api_key (str): API Key. See the docs for information on how to generate this key.\n base_url (Optional[str]): Base PostHog url. Defaults to PostHog Cloud (https://app.posthog.com).\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.api_key = check.str_param(api_key, "api_key")\n self.base_url = check.opt_str_param(base_url, "base_url")\n super().__init__("Posthog", name)
\n\n\n
[docs]class PaypalTransactionSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n is_sandbox: bool,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n refresh_token: Optional[str] = None,\n ):\n """Airbyte Source for Paypal Transaction.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/paypal-transactions\n\n Args:\n name (str): The name of the destination.\n client_id (Optional[str]): The Client ID of your Paypal developer application.\n client_secret (Optional[str]): The Client Secret of your Paypal developer application.\n refresh_token (Optional[str]): The key to refresh the expired access token.\n start_date (str): Start Date for data extraction in ISO format. Date must be in range from 3 years till 12 hrs before present time.\n is_sandbox (bool): Determines whether to use the sandbox or production environment.\n """\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.opt_str_param(refresh_token, "refresh_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.is_sandbox = check.bool_param(is_sandbox, "is_sandbox")\n super().__init__("Paypal Transaction", name)
\n\n\n
[docs]class MssqlSource(GeneratedAirbyteSource):\n
[docs] class Unencrypted:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "unencrypted"
\n\n
[docs] class EncryptedTrustServerCertificate:\n
[docs] @public\n def __init__(\n self,\n ):\n self.ssl_method = "encrypted_trust_server_certificate"
\n\n
[docs] class EncryptedVerifyCertificate:\n
[docs] @public\n def __init__(self, hostNameInCertificate: Optional[str] = None):\n self.ssl_method = "encrypted_verify_certificate"\n self.hostNameInCertificate = check.opt_str_param(\n hostNameInCertificate, "hostNameInCertificate"\n )
\n\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "STANDARD"
\n\n
[docs] class LogicalReplicationCDC:\n
[docs] @public\n def __init__(\n self, data_to_sync: Optional[str] = None, snapshot_isolation: Optional[str] = None\n ):\n self.method = "CDC"\n self.data_to_sync = check.opt_str_param(data_to_sync, "data_to_sync")\n self.snapshot_isolation = check.opt_str_param(snapshot_isolation, "snapshot_isolation")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n ssl_method: Union[\n "MssqlSource.Unencrypted",\n "MssqlSource.EncryptedTrustServerCertificate",\n "MssqlSource.EncryptedVerifyCertificate",\n ],\n replication_method: Union["MssqlSource.Standard", "MssqlSource.LogicalReplicationCDC"],\n schemas: Optional[List[str]] = None,\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Mssql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/mssql\n\n Args:\n name (str): The name of the destination.\n host (str): The hostname of the database.\n port (int): The port of the database.\n database (str): The name of the database.\n schemas (Optional[List[str]]): The list of schemas to sync from. Defaults to user. Case sensitive.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n ssl_method (Union[MssqlSource.Unencrypted, MssqlSource.EncryptedTrustServerCertificate, MssqlSource.EncryptedVerifyCertificate]): The encryption method which is used when communicating with the database.\n replication_method (Union[MssqlSource.Standard, MssqlSource.LogicalReplicationCDC]): The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl_method = check.inst_param(\n ssl_method,\n "ssl_method",\n (\n MssqlSource.Unencrypted,\n MssqlSource.EncryptedTrustServerCertificate,\n MssqlSource.EncryptedVerifyCertificate,\n ),\n )\n self.replication_method = check.inst_param(\n replication_method,\n "replication_method",\n (MssqlSource.Standard, MssqlSource.LogicalReplicationCDC),\n )\n super().__init__("Mssql", name)
\n\n\n
[docs]class ZohoCrmSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n dc_region: str,\n environment: str,\n edition: str,\n start_datetime: Optional[str] = None,\n ):\n """Airbyte Source for Zoho Crm.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zoho-crm\n\n Args:\n name (str): The name of the destination.\n client_id (str): OAuth2.0 Client ID\n client_secret (str): OAuth2.0 Client Secret\n refresh_token (str): OAuth2.0 Refresh Token\n dc_region (str): Please choose the region of your Data Center location. More info by this Link\n environment (str): Please choose the environment\n start_datetime (Optional[str]): ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM`\n edition (str): Choose your Edition of Zoho CRM to determine API Concurrency Limits\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.dc_region = check.str_param(dc_region, "dc_region")\n self.environment = check.str_param(environment, "environment")\n self.start_datetime = check.opt_str_param(start_datetime, "start_datetime")\n self.edition = check.str_param(edition, "edition")\n super().__init__("Zoho Crm", name)
\n\n\n
[docs]class RedshiftSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n password: str,\n schemas: Optional[List[str]] = None,\n jdbc_url_params: Optional[str] = None,\n ):\n """Airbyte Source for Redshift.\n\n Documentation can be found at https://docs.airbyte.com/integrations/destinations/redshift\n\n Args:\n name (str): The name of the destination.\n host (str): Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com).\n port (int): Port of the database.\n database (str): Name of the database.\n schemas (Optional[List[str]]): The list of schemas to sync from. Specify one or more explicitly or keep empty to process all schemas. Schema names are case sensitive.\n username (str): Username to use to access the database.\n password (str): Password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.schemas = check.opt_nullable_list_param(schemas, "schemas", str)\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n super().__init__("Redshift", name)
\n\n\n
[docs]class AsanaSource(GeneratedAirbyteSource):\n
[docs] class PATCredentials:\n
[docs] @public\n def __init__(self, personal_access_token: str):\n self.personal_access_token = check.str_param(\n personal_access_token, "personal_access_token"\n )
\n\n
[docs] class OAuthCredentials:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["AsanaSource.PATCredentials", "AsanaSource.OAuthCredentials"],\n ):\n """Airbyte Source for Asana.\n\n Args:\n name (str): The name of the destination.\n credentials (Union[AsanaSource.PATCredentials, AsanaSource.OAuthCredentials]): Choose how to authenticate to Github\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (AsanaSource.PATCredentials, AsanaSource.OAuthCredentials)\n )\n super().__init__("Asana", name)
\n\n\n
[docs]class SmartsheetsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n access_token: str,\n spreadsheet_id: str,\n start_datetime: Optional[str] = None,\n ):\n """Airbyte Source for Smartsheets.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/smartsheets\n\n Args:\n name (str): The name of the destination.\n access_token (str): The access token to use for accessing your data from Smartsheets. This access token must be generated by a user with at least read access to the data you'd like to replicate. Generate an access token in the Smartsheets main menu by clicking Account > Apps & Integrations > API Access. See the setup guide for information on how to obtain this token.\n spreadsheet_id (str): The spreadsheet ID. Find it by opening the spreadsheet then navigating to File > Properties\n start_datetime (Optional[str]): Only rows modified after this date/time will be replicated. This should be an ISO 8601 string, for instance: `2000-01-01T13:00:00`\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.spreadsheet_id = check.str_param(spreadsheet_id, "spreadsheet_id")\n self.start_datetime = check.opt_str_param(start_datetime, "start_datetime")\n super().__init__("Smartsheets", name)
\n\n\n
[docs]class MailchimpSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n access_token: str,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_type = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIKey:\n
[docs] @public\n def __init__(self, apikey: str):\n self.auth_type = "apikey"\n self.apikey = check.str_param(apikey, "apikey")
\n\n
[docs] @public\n def __init__(\n self, name: str, credentials: Union["MailchimpSource.OAuth20", "MailchimpSource.APIKey"]\n ):\n """Airbyte Source for Mailchimp.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mailchimp\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (MailchimpSource.OAuth20, MailchimpSource.APIKey)\n )\n super().__init__("Mailchimp", name)
\n\n\n
[docs]class SentrySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n auth_token: str,\n organization: str,\n project: str,\n hostname: Optional[str] = None,\n discover_fields: Optional[List[str]] = None,\n ):\n """Airbyte Source for Sentry.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/sentry\n\n Args:\n name (str): The name of the destination.\n auth_token (str): Log into Sentry and then create authentication tokens.For self-hosted, you can find or create authentication tokens by visiting "{instance_url_prefix}/settings/account/api/auth-tokens/"\n hostname (Optional[str]): Host name of Sentry API server.For self-hosted, specify your host name here. Otherwise, leave it empty.\n organization (str): The slug of the organization the groups belong to.\n project (str): The name (slug) of the Project you want to sync.\n discover_fields (Optional[List[str]]): Fields to retrieve when fetching discover events\n """\n self.auth_token = check.str_param(auth_token, "auth_token")\n self.hostname = check.opt_str_param(hostname, "hostname")\n self.organization = check.str_param(organization, "organization")\n self.project = check.str_param(project, "project")\n self.discover_fields = check.opt_nullable_list_param(\n discover_fields, "discover_fields", str\n )\n super().__init__("Sentry", name)
\n\n\n
[docs]class MailgunSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n private_key: str,\n domain_region: Optional[str] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Mailgun.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mailgun\n\n Args:\n name (str): The name of the destination.\n private_key (str): Primary account API key to access your Mailgun data.\n domain_region (Optional[str]): Domain region code. 'EU' or 'US' are possible values. The default is 'US'.\n start_date (Optional[str]): UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.\n """\n self.private_key = check.str_param(private_key, "private_key")\n self.domain_region = check.opt_str_param(domain_region, "domain_region")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Mailgun", name)
\n\n\n
[docs]class OnesignalSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, user_auth_key: str, start_date: str, outcome_names: str):\n """Airbyte Source for Onesignal.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/onesignal\n\n Args:\n name (str): The name of the destination.\n user_auth_key (str): OneSignal User Auth Key, see the docs for more information on how to obtain this key.\n start_date (str): The date from which you'd like to replicate data for OneSignal API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n outcome_names (str): Comma-separated list of names and the value (sum/count) for the returned outcome data. See the docs for more details\n """\n self.user_auth_key = check.str_param(user_auth_key, "user_auth_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.outcome_names = check.str_param(outcome_names, "outcome_names")\n super().__init__("Onesignal", name)
\n\n\n
[docs]class PythonHttpTutorialSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, start_date: str, base: str, access_key: Optional[str] = None):\n """Airbyte Source for Python Http Tutorial.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/exchangeratesapi\n\n Args:\n name (str): The name of the destination.\n access_key (Optional[str]): API access key used to retrieve data from the Exchange Rates API.\n start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated.\n base (str): ISO reference currency. See here.\n """\n self.access_key = check.opt_str_param(access_key, "access_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.base = check.str_param(base, "base")\n super().__init__("Python Http Tutorial", name)
\n\n\n
[docs]class AirtableSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, base_id: str, tables: List[str]):\n """Airbyte Source for Airtable.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/airtable\n\n Args:\n name (str): The name of the destination.\n api_key (str): The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key.\n base_id (str): The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs.\n tables (List[str]): The list of Tables to integrate.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.base_id = check.str_param(base_id, "base_id")\n self.tables = check.list_param(tables, "tables", str)\n super().__init__("Airtable", name)
\n\n\n
[docs]class MongodbV2Source(GeneratedAirbyteSource):\n
[docs] class StandaloneMongoDbInstance:\n
[docs] @public\n def __init__(self, instance: str, host: str, port: int, tls: Optional[bool] = None):\n self.instance = check.str_param(instance, "instance")\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.tls = check.opt_bool_param(tls, "tls")
\n\n
[docs] class ReplicaSet:\n
[docs] @public\n def __init__(self, instance: str, server_addresses: str, replica_set: Optional[str] = None):\n self.instance = check.str_param(instance, "instance")\n self.server_addresses = check.str_param(server_addresses, "server_addresses")\n self.replica_set = check.opt_str_param(replica_set, "replica_set")
\n\n
[docs] class MongoDBAtlas:\n
[docs] @public\n def __init__(self, instance: str, cluster_url: str):\n self.instance = check.str_param(instance, "instance")\n self.cluster_url = check.str_param(cluster_url, "cluster_url")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n instance_type: Union[\n "MongodbV2Source.StandaloneMongoDbInstance",\n "MongodbV2Source.ReplicaSet",\n "MongodbV2Source.MongoDBAtlas",\n ],\n database: str,\n user: Optional[str] = None,\n password: Optional[str] = None,\n auth_source: Optional[str] = None,\n ):\n """Airbyte Source for Mongodb V2.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mongodb-v2\n\n Args:\n name (str): The name of the destination.\n instance_type (Union[MongodbV2Source.StandaloneMongoDbInstance, MongodbV2Source.ReplicaSet, MongodbV2Source.MongoDBAtlas]): The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.\n database (str): The database you want to replicate.\n user (Optional[str]): The username which is used to access the database.\n password (Optional[str]): The password associated with this username.\n auth_source (Optional[str]): The authentication source where the user information is stored.\n """\n self.instance_type = check.inst_param(\n instance_type,\n "instance_type",\n (\n MongodbV2Source.StandaloneMongoDbInstance,\n MongodbV2Source.ReplicaSet,\n MongodbV2Source.MongoDBAtlas,\n ),\n )\n self.database = check.str_param(database, "database")\n self.user = check.opt_str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.auth_source = check.opt_str_param(auth_source, "auth_source")\n super().__init__("Mongodb V2", name)
\n\n\n
[docs]class FileSecureSource(GeneratedAirbyteSource):\n
[docs] class HTTPSPublicWeb:\n
[docs] @public\n def __init__(self, user_agent: Optional[bool] = None):\n self.storage = "HTTPS"\n self.user_agent = check.opt_bool_param(user_agent, "user_agent")
\n\n
[docs] class GCSGoogleCloudStorage:\n
[docs] @public\n def __init__(self, service_account_json: Optional[str] = None):\n self.storage = "GCS"\n self.service_account_json = check.opt_str_param(\n service_account_json, "service_account_json"\n )
\n\n
[docs] class S3AmazonWebServices:\n
[docs] @public\n def __init__(\n self,\n aws_access_key_id: Optional[str] = None,\n aws_secret_access_key: Optional[str] = None,\n ):\n self.storage = "S3"\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )
\n\n
[docs] class AzBlobAzureBlobStorage:\n
[docs] @public\n def __init__(\n self,\n storage_account: str,\n sas_token: Optional[str] = None,\n shared_key: Optional[str] = None,\n ):\n self.storage = "AzBlob"\n self.storage_account = check.str_param(storage_account, "storage_account")\n self.sas_token = check.opt_str_param(sas_token, "sas_token")\n self.shared_key = check.opt_str_param(shared_key, "shared_key")
\n\n
[docs] class SSHSecureShell:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SSH"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SCPSecureCopyProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SCP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] class SFTPSecureFileTransferProtocol:\n
[docs] @public\n def __init__(\n self, user: str, host: str, password: Optional[str] = None, port: Optional[str] = None\n ):\n self.storage = "SFTP"\n self.user = check.str_param(user, "user")\n self.password = check.opt_str_param(password, "password")\n self.host = check.str_param(host, "host")\n self.port = check.opt_str_param(port, "port")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n dataset_name: str,\n format: str,\n url: str,\n provider: Union[\n "FileSecureSource.HTTPSPublicWeb",\n "FileSecureSource.GCSGoogleCloudStorage",\n "FileSecureSource.S3AmazonWebServices",\n "FileSecureSource.AzBlobAzureBlobStorage",\n "FileSecureSource.SSHSecureShell",\n "FileSecureSource.SCPSecureCopyProtocol",\n "FileSecureSource.SFTPSecureFileTransferProtocol",\n ],\n reader_options: Optional[str] = None,\n ):\n """Airbyte Source for File Secure.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/file\n\n Args:\n name (str): The name of the destination.\n dataset_name (str): The Name of the final table to replicate this file into (should include letters, numbers dash and underscores only).\n format (str): The Format of the file which should be replicated (Warning: some formats may be experimental, please refer to the docs).\n reader_options (Optional[str]): This should be a string in JSON format. It depends on the chosen file format to provide additional options and tune its behavior.\n url (str): The URL path to access the file which should be replicated.\n provider (Union[FileSecureSource.HTTPSPublicWeb, FileSecureSource.GCSGoogleCloudStorage, FileSecureSource.S3AmazonWebServices, FileSecureSource.AzBlobAzureBlobStorage, FileSecureSource.SSHSecureShell, FileSecureSource.SCPSecureCopyProtocol, FileSecureSource.SFTPSecureFileTransferProtocol]): The storage Provider or Location of the file(s) which should be replicated.\n """\n self.dataset_name = check.str_param(dataset_name, "dataset_name")\n self.format = check.str_param(format, "format")\n self.reader_options = check.opt_str_param(reader_options, "reader_options")\n self.url = check.str_param(url, "url")\n self.provider = check.inst_param(\n provider,\n "provider",\n (\n FileSecureSource.HTTPSPublicWeb,\n FileSecureSource.GCSGoogleCloudStorage,\n FileSecureSource.S3AmazonWebServices,\n FileSecureSource.AzBlobAzureBlobStorage,\n FileSecureSource.SSHSecureShell,\n FileSecureSource.SCPSecureCopyProtocol,\n FileSecureSource.SFTPSecureFileTransferProtocol,\n ),\n )\n super().__init__("File Secure", name)
\n\n\n
[docs]class ZendeskSupportSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, access_token: str, credentials: Optional[str] = None):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, email: str, api_token: str, credentials: Optional[str] = None):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.email = check.str_param(email, "email")\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n subdomain: str,\n credentials: Union["ZendeskSupportSource.OAuth20", "ZendeskSupportSource.APIToken"],\n ):\n """Airbyte Source for Zendesk Support.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-support\n\n Args:\n name (str): The name of the destination.\n start_date (str): The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n subdomain (str): This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain.\n credentials (Union[ZendeskSupportSource.OAuth20, ZendeskSupportSource.APIToken]): Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (ZendeskSupportSource.OAuth20, ZendeskSupportSource.APIToken),\n )\n super().__init__("Zendesk Support", name)
\n\n\n
[docs]class TempoSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_token: str):\n """Airbyte Source for Tempo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/\n\n Args:\n name (str): The name of the destination.\n api_token (str): Tempo API Token. Go to Tempo>Settings, scroll down to Data Access and select API integration.\n """\n self.api_token = check.str_param(api_token, "api_token")\n super().__init__("Tempo", name)
\n\n\n
[docs]class BraintreeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n merchant_id: str,\n public_key: str,\n private_key: str,\n environment: str,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Braintree.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/braintree\n\n Args:\n name (str): The name of the destination.\n merchant_id (str): The unique identifier for your entire gateway account. See the docs for more information on how to obtain this ID.\n public_key (str): Braintree Public Key. See the docs for more information on how to obtain this key.\n private_key (str): Braintree Private Key. See the docs for more information on how to obtain this key.\n start_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n environment (str): Environment specifies where the data will come from.\n """\n self.merchant_id = check.str_param(merchant_id, "merchant_id")\n self.public_key = check.str_param(public_key, "public_key")\n self.private_key = check.str_param(private_key, "private_key")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.environment = check.str_param(environment, "environment")\n super().__init__("Braintree", name)
\n\n\n
[docs]class SalesloftSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, client_id: str, client_secret: str, refresh_token: str, start_date: str\n ):\n """Airbyte Source for Salesloft.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/salesloft\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Salesloft developer application.\n client_secret (str): The Client Secret of your Salesloft developer application.\n refresh_token (str): The token for obtaining a new access token.\n start_date (str): The date from which you'd like to replicate data for Salesloft API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Salesloft", name)
\n\n\n
[docs]class LinnworksSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, application_id: str, application_secret: str, token: str, start_date: str\n ):\n """Airbyte Source for Linnworks.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/linnworks\n\n Args:\n name (str): The name of the destination.\n application_id (str): Linnworks Application ID\n application_secret (str): Linnworks Application Secret\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.application_id = check.str_param(application_id, "application_id")\n self.application_secret = check.str_param(application_secret, "application_secret")\n self.token = check.str_param(token, "token")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Linnworks", name)
\n\n\n
[docs]class ChargebeeSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, site: str, site_api_key: str, start_date: str, product_catalog: str\n ):\n """Airbyte Source for Chargebee.\n\n Documentation can be found at https://apidocs.chargebee.com/docs/api\n\n Args:\n name (str): The name of the destination.\n site (str): The site prefix for your Chargebee instance.\n site_api_key (str): Chargebee API Key. See the docs for more information on how to obtain this key.\n start_date (str): UTC date and time in the format 2021-01-25T00:00:00Z. Any data before this date will not be replicated.\n product_catalog (str): Product Catalog version of your Chargebee site. Instructions on how to find your version you may find here under `API Version` section.\n """\n self.site = check.str_param(site, "site")\n self.site_api_key = check.str_param(site_api_key, "site_api_key")\n self.start_date = check.str_param(start_date, "start_date")\n self.product_catalog = check.str_param(product_catalog, "product_catalog")\n super().__init__("Chargebee", name)
\n\n\n
[docs]class GoogleAnalyticsDataApiSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaGoogleOauth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n access_token: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.access_token = check.opt_str_param(access_token, "access_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, credentials_json: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.credentials_json = check.str_param(credentials_json, "credentials_json")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n property_id: str,\n credentials: Union[\n "GoogleAnalyticsDataApiSource.AuthenticateViaGoogleOauth",\n "GoogleAnalyticsDataApiSource.ServiceAccountKeyAuthentication",\n ],\n date_ranges_start_date: str,\n custom_reports: Optional[str] = None,\n window_in_days: Optional[int] = None,\n ):\n """Airbyte Source for Google Analytics Data Api.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-analytics-v4\n\n Args:\n name (str): The name of the destination.\n property_id (str): A Google Analytics GA4 property identifier whose events are tracked. Specified in the URL path and not the body\n credentials (Union[GoogleAnalyticsDataApiSource.AuthenticateViaGoogleOauth, GoogleAnalyticsDataApiSource.ServiceAccountKeyAuthentication]): Credentials for the service\n date_ranges_start_date (str): The start date. One of the values Ndaysago, yesterday, today or in the format YYYY-MM-DD\n custom_reports (Optional[str]): A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field.\n window_in_days (Optional[int]): The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364.\n """\n self.property_id = check.str_param(property_id, "property_id")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n GoogleAnalyticsDataApiSource.AuthenticateViaGoogleOauth,\n GoogleAnalyticsDataApiSource.ServiceAccountKeyAuthentication,\n ),\n )\n self.date_ranges_start_date = check.str_param(\n date_ranges_start_date, "date_ranges_start_date"\n )\n self.custom_reports = check.opt_str_param(custom_reports, "custom_reports")\n self.window_in_days = check.opt_int_param(window_in_days, "window_in_days")\n super().__init__("Google Analytics Data Api", name)
\n\n\n
[docs]class OutreachSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n redirect_uri: str,\n start_date: str,\n ):\n """Airbyte Source for Outreach.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/outreach\n\n Args:\n name (str): The name of the destination.\n client_id (str): The Client ID of your Outreach developer application.\n client_secret (str): The Client Secret of your Outreach developer application.\n refresh_token (str): The token for obtaining the new access token.\n redirect_uri (str): A Redirect URI is the location where the authorization server sends the user once the app has been successfully authorized and granted an authorization code or access token.\n start_date (str): The date from which you'd like to replicate data for Outreach API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.redirect_uri = check.str_param(redirect_uri, "redirect_uri")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Outreach", name)
\n\n\n
[docs]class LemlistSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Lemlist.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/lemlist\n\n Args:\n name (str): The name of the destination.\n api_key (str): Lemlist API key.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Lemlist", name)
\n\n\n
[docs]class ApifyDatasetSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, datasetId: str, clean: Optional[bool] = None):\n """Airbyte Source for Apify Dataset.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/apify-dataset\n\n Args:\n name (str): The name of the destination.\n datasetId (str): ID of the dataset you would like to load to Airbyte.\n clean (Optional[bool]): If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false.\n """\n self.datasetId = check.str_param(datasetId, "datasetId")\n self.clean = check.opt_bool_param(clean, "clean")\n super().__init__("Apify Dataset", name)
\n\n\n
[docs]class RecurlySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n api_key: str,\n begin_time: Optional[str] = None,\n end_time: Optional[str] = None,\n ):\n """Airbyte Source for Recurly.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/recurly\n\n Args:\n name (str): The name of the destination.\n api_key (str): Recurly API Key. See the docs for more information on how to generate this key.\n begin_time (Optional[str]): ISO8601 timestamp from which the replication from Recurly API will start from.\n end_time (Optional[str]): ISO8601 timestamp to which the replication from Recurly API will stop. Records after that date won't be imported.\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.begin_time = check.opt_str_param(begin_time, "begin_time")\n self.end_time = check.opt_str_param(end_time, "end_time")\n super().__init__("Recurly", name)
\n\n\n
[docs]class ZendeskTalkSource(GeneratedAirbyteSource):\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, email: str, api_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.email = check.str_param(email, "email")\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, access_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n subdomain: str,\n credentials: Union["ZendeskTalkSource.APIToken", "ZendeskTalkSource.OAuth20"],\n start_date: str,\n ):\n """Airbyte Source for Zendesk Talk.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-talk\n\n Args:\n name (str): The name of the destination.\n subdomain (str): This is your Zendesk subdomain that can be found in your account URL. For example, in https://{MY_SUBDOMAIN}.zendesk.com/, where MY_SUBDOMAIN is the value of your subdomain.\n credentials (Union[ZendeskTalkSource.APIToken, ZendeskTalkSource.OAuth20]): Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`.\n start_date (str): The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n """\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.credentials = check.inst_param(\n credentials, "credentials", (ZendeskTalkSource.APIToken, ZendeskTalkSource.OAuth20)\n )\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Zendesk Talk", name)
\n\n\n
[docs]class SftpSource(GeneratedAirbyteSource):\n
[docs] class PasswordAuthentication:\n
[docs] @public\n def __init__(self, auth_user_password: str):\n self.auth_method = "SSH_PASSWORD_AUTH"\n self.auth_user_password = check.str_param(auth_user_password, "auth_user_password")
\n\n
[docs] class SSHKeyAuthentication:\n
[docs] @public\n def __init__(self, auth_ssh_key: str):\n self.auth_method = "SSH_KEY_AUTH"\n self.auth_ssh_key = check.str_param(auth_ssh_key, "auth_ssh_key")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n user: str,\n host: str,\n port: int,\n credentials: Union["SftpSource.PasswordAuthentication", "SftpSource.SSHKeyAuthentication"],\n file_types: Optional[str] = None,\n folder_path: Optional[str] = None,\n file_pattern: Optional[str] = None,\n ):\n """Airbyte Source for Sftp.\n\n Documentation can be found at https://docs.airbyte.com/integrations/source/sftp\n\n Args:\n name (str): The name of the destination.\n user (str): The server user\n host (str): The server host address\n port (int): The server port\n credentials (Union[SftpSource.PasswordAuthentication, SftpSource.SSHKeyAuthentication]): The server authentication method\n file_types (Optional[str]): Coma separated file types. Currently only 'csv' and 'json' types are supported.\n folder_path (Optional[str]): The directory to search files for sync\n file_pattern (Optional[str]): The regular expression to specify files for sync in a chosen Folder Path\n """\n self.user = check.str_param(user, "user")\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (SftpSource.PasswordAuthentication, SftpSource.SSHKeyAuthentication),\n )\n self.file_types = check.opt_str_param(file_types, "file_types")\n self.folder_path = check.opt_str_param(folder_path, "folder_path")\n self.file_pattern = check.opt_str_param(file_pattern, "file_pattern")\n super().__init__("Sftp", name)
\n\n\n
[docs]class WhiskyHunterSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n ):\n """Airbyte Source for Whisky Hunter.\n\n Documentation can be found at https://docs.airbyte.io/integrations/sources/whisky-hunter\n\n Args:\n name (str): The name of the destination.\n\n """\n super().__init__("Whisky Hunter", name)
\n\n\n
[docs]class FreshdeskSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n api_key: str,\n requests_per_minute: Optional[int] = None,\n start_date: Optional[str] = None,\n ):\n """Airbyte Source for Freshdesk.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/freshdesk\n\n Args:\n name (str): The name of the destination.\n domain (str): Freshdesk domain\n api_key (str): Freshdesk API Key. See the docs for more information on how to obtain this key.\n requests_per_minute (Optional[int]): The number of requests per minute that this source allowed to use. There is a rate limit of 50 requests per minute per app per account.\n start_date (Optional[str]): UTC date and time. Any data created after this date will be replicated. If this parameter is not set, all data will be replicated.\n """\n self.domain = check.str_param(domain, "domain")\n self.api_key = check.str_param(api_key, "api_key")\n self.requests_per_minute = check.opt_int_param(requests_per_minute, "requests_per_minute")\n self.start_date = check.opt_str_param(start_date, "start_date")\n super().__init__("Freshdesk", name)
\n\n\n
[docs]class GocardlessSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n access_token: str,\n gocardless_environment: str,\n gocardless_version: str,\n start_date: str,\n ):\n """Airbyte Source for Gocardless.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/gocardless\n\n Args:\n name (str): The name of the destination.\n access_token (str): Gocardless API TOKEN\n gocardless_environment (str): Environment you are trying to connect to.\n gocardless_version (str): GoCardless version. This is a date. You can find the latest here: https://developer.gocardless.com/api-reference/#api-usage-making-requests\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.gocardless_environment = check.str_param(\n gocardless_environment, "gocardless_environment"\n )\n self.gocardless_version = check.str_param(gocardless_version, "gocardless_version")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Gocardless", name)
\n\n\n
[docs]class ZuoraSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n tenant_endpoint: str,\n data_query: str,\n client_id: str,\n client_secret: str,\n window_in_days: Optional[str] = None,\n ):\n """Airbyte Source for Zuora.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zuora\n\n Args:\n name (str): The name of the destination.\n start_date (str): Start Date in format: YYYY-MM-DD\n window_in_days (Optional[str]): The amount of days for each data-chunk begining from start_date. Bigger the value - faster the fetch. (0.1 - as for couple of hours, 1 - as for a Day; 364 - as for a Year).\n tenant_endpoint (str): Please choose the right endpoint where your Tenant is located. More info by this Link\n data_query (str): Choose between `Live`, or `Unlimited` - the optimized, replicated database at 12 hours freshness for high volume extraction Link\n client_id (str): Your OAuth user Client ID\n client_secret (str): Your OAuth user Client Secret\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.window_in_days = check.opt_str_param(window_in_days, "window_in_days")\n self.tenant_endpoint = check.str_param(tenant_endpoint, "tenant_endpoint")\n self.data_query = check.str_param(data_query, "data_query")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n super().__init__("Zuora", name)
\n\n\n
[docs]class MarketoSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, domain_url: str, client_id: str, client_secret: str, start_date: str\n ):\n """Airbyte Source for Marketo.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/marketo\n\n Args:\n name (str): The name of the destination.\n domain_url (str): Your Marketo Base URL. See the docs for info on how to obtain this.\n client_id (str): The Client ID of your Marketo developer application. See the docs for info on how to obtain this.\n client_secret (str): The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n """\n self.domain_url = check.str_param(domain_url, "domain_url")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Marketo", name)
\n\n\n
[docs]class DriftSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n access_token: str,\n refresh_token: str,\n credentials: Optional[str] = None,\n ):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str, credentials: Optional[str] = None):\n self.credentials = check.opt_str_param(credentials, "credentials")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self, name: str, credentials: Union["DriftSource.OAuth20", "DriftSource.AccessToken"]\n ):\n """Airbyte Source for Drift.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/drift\n\n Args:\n name (str): The name of the destination.\n\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (DriftSource.OAuth20, DriftSource.AccessToken)\n )\n super().__init__("Drift", name)
\n\n\n
[docs]class PokeapiSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, pokemon_name: str):\n """Airbyte Source for Pokeapi.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/pokeapi\n\n Args:\n name (str): The name of the destination.\n pokemon_name (str): Pokemon requested from the API.\n """\n self.pokemon_name = check.str_param(pokemon_name, "pokemon_name")\n super().__init__("Pokeapi", name)
\n\n\n
[docs]class NetsuiteSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n realm: str,\n consumer_key: str,\n consumer_secret: str,\n token_key: str,\n token_secret: str,\n start_datetime: str,\n object_types: Optional[List[str]] = None,\n window_in_days: Optional[int] = None,\n ):\n """Airbyte Source for Netsuite.\n\n Args:\n name (str): The name of the destination.\n realm (str): Netsuite realm e.g. 2344535, as for `production` or 2344535_SB1, as for the `sandbox`\n consumer_key (str): Consumer key associated with your integration\n consumer_secret (str): Consumer secret associated with your integration\n token_key (str): Access token key\n token_secret (str): Access token secret\n object_types (Optional[List[str]]): The API names of the Netsuite objects you want to sync. Setting this speeds up the connection setup process by limiting the number of schemas that need to be retrieved from Netsuite.\n start_datetime (str): Starting point for your data replication, in format of "YYYY-MM-DDTHH:mm:ssZ"\n window_in_days (Optional[int]): The amount of days used to query the data with date chunks. Set smaller value, if you have lots of data.\n """\n self.realm = check.str_param(realm, "realm")\n self.consumer_key = check.str_param(consumer_key, "consumer_key")\n self.consumer_secret = check.str_param(consumer_secret, "consumer_secret")\n self.token_key = check.str_param(token_key, "token_key")\n self.token_secret = check.str_param(token_secret, "token_secret")\n self.object_types = check.opt_nullable_list_param(object_types, "object_types", str)\n self.start_datetime = check.str_param(start_datetime, "start_datetime")\n self.window_in_days = check.opt_int_param(window_in_days, "window_in_days")\n super().__init__("Netsuite", name)
\n\n\n
[docs]class HubplannerSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str):\n """Airbyte Source for Hubplanner.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/hubplanner\n\n Args:\n name (str): The name of the destination.\n api_key (str): Hubplanner API key. See https://github.com/hubplanner/API#authentication for more details.\n """\n self.api_key = check.str_param(api_key, "api_key")\n super().__init__("Hubplanner", name)
\n\n\n
[docs]class Dv360Source(GeneratedAirbyteSource):\n
[docs] class Oauth2Credentials:\n
[docs] @public\n def __init__(\n self,\n access_token: str,\n refresh_token: str,\n token_uri: str,\n client_id: str,\n client_secret: str,\n ):\n self.access_token = check.str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.token_uri = check.str_param(token_uri, "token_uri")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: "Dv360Source.Oauth2Credentials",\n partner_id: int,\n start_date: str,\n end_date: Optional[str] = None,\n filters: Optional[List[str]] = None,\n ):\n """Airbyte Source for Dv 360.\n\n Args:\n name (str): The name of the destination.\n credentials (Dv360Source.Oauth2Credentials): Oauth2 credentials\n partner_id (int): Partner ID\n start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated\n end_date (Optional[str]): UTC date and time in the format 2017-01-25. Any data after this date will not be replicated.\n filters (Optional[List[str]]): filters for the dimensions. each filter object had 2 keys: 'type' for the name of the dimension to be used as. and 'value' for the value of the filter\n """\n self.credentials = check.inst_param(\n credentials, "credentials", Dv360Source.Oauth2Credentials\n )\n self.partner_id = check.int_param(partner_id, "partner_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.filters = check.opt_nullable_list_param(filters, "filters", str)\n super().__init__("Dv 360", name)
\n\n\n
[docs]class NotionSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, access_token: str):\n self.auth_type = "OAuth2.0"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, token: str):\n self.auth_type = "token"\n self.token = check.str_param(token, "token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union["NotionSource.OAuth20", "NotionSource.AccessToken"],\n ):\n """Airbyte Source for Notion.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/notion\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00.000Z. Any data before this date will not be replicated.\n credentials (Union[NotionSource.OAuth20, NotionSource.AccessToken]): Pick an authentication method.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials, "credentials", (NotionSource.OAuth20, NotionSource.AccessToken)\n )\n super().__init__("Notion", name)
\n\n\n
[docs]class ZendeskSunshineSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, access_token: str):\n self.auth_method = "oauth2.0"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class APIToken:\n
[docs] @public\n def __init__(self, api_token: str, email: str):\n self.auth_method = "api_token"\n self.api_token = check.str_param(api_token, "api_token")\n self.email = check.str_param(email, "email")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n subdomain: str,\n start_date: str,\n credentials: Union["ZendeskSunshineSource.OAuth20", "ZendeskSunshineSource.APIToken"],\n ):\n """Airbyte Source for Zendesk Sunshine.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk_sunshine\n\n Args:\n name (str): The name of the destination.\n subdomain (str): The subdomain for your Zendesk Account.\n start_date (str): The date from which you'd like to replicate data for Zendesk Sunshine API, in the format YYYY-MM-DDT00:00:00Z.\n """\n self.subdomain = check.str_param(subdomain, "subdomain")\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (ZendeskSunshineSource.OAuth20, ZendeskSunshineSource.APIToken),\n )\n super().__init__("Zendesk Sunshine", name)
\n\n\n
[docs]class PinterestSource(GeneratedAirbyteSource):\n
[docs] class OAuth20:\n
[docs] @public\n def __init__(\n self,\n refresh_token: str,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n ):\n self.auth_method = "oauth2.0"\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.client_secret = check.opt_str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AccessToken:\n
[docs] @public\n def __init__(self, access_token: str):\n self.auth_method = "access_token"\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union["PinterestSource.OAuth20", "PinterestSource.AccessToken"],\n ):\n """Airbyte Source for Pinterest.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/pinterest\n\n Args:\n name (str): The name of the destination.\n start_date (str): A date in the format YYYY-MM-DD. If you have not set a date, it would be defaulted to latest allowed date by api (914 days from today).\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials, "credentials", (PinterestSource.OAuth20, PinterestSource.AccessToken)\n )\n super().__init__("Pinterest", name)
\n\n\n
[docs]class MetabaseSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n instance_api_url: str,\n username: Optional[str] = None,\n password: Optional[str] = None,\n session_token: Optional[str] = None,\n ):\n r"""Airbyte Source for Metabase.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/metabase\n\n Args:\n name (str): The name of the destination.\n instance_api_url (str): URL to your metabase instance API\n session_token (Optional[str]): To generate your session token, you need to run the following command: ``` curl -X POST \\\\ -H "Content-Type: application/json" \\\\ -d '{"username": "person@metabase.com", "password": "fakepassword"}' \\\\ http://localhost:3000/api/session ``` Then copy the value of the `id` field returned by a successful call to that API. Note that by default, sessions are good for 14 days and needs to be regenerated.\n """\n self.instance_api_url = check.str_param(instance_api_url, "instance_api_url")\n self.username = check.opt_str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.session_token = check.opt_str_param(session_token, "session_token")\n super().__init__("Metabase", name)
\n\n\n
[docs]class HubspotSource(GeneratedAirbyteSource):\n
[docs] class OAuth:\n
[docs] @public\n def __init__(self, client_id: str, client_secret: str, refresh_token: str):\n self.credentials_title = "OAuth Credentials"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class APIKey:\n
[docs] @public\n def __init__(self, api_key: str):\n self.credentials_title = "API Key Credentials"\n self.api_key = check.str_param(api_key, "api_key")
\n\n
[docs] class PrivateAPP:\n
[docs] @public\n def __init__(self, access_token: str):\n self.credentials_title = "Private App Credentials"\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n start_date: str,\n credentials: Union[\n "HubspotSource.OAuth", "HubspotSource.APIKey", "HubspotSource.PrivateAPP"\n ],\n ):\n """Airbyte Source for Hubspot.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/hubspot\n\n Args:\n name (str): The name of the destination.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n credentials (Union[HubspotSource.OAuth, HubspotSource.APIKey, HubspotSource.PrivateAPP]): Choose how to authenticate to HubSpot.\n """\n self.start_date = check.str_param(start_date, "start_date")\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (HubspotSource.OAuth, HubspotSource.APIKey, HubspotSource.PrivateAPP),\n )\n super().__init__("Hubspot", name)
\n\n\n
[docs]class HarvestSource(GeneratedAirbyteSource):\n
[docs] class AuthenticateViaHarvestOAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n auth_type: Optional[str] = None,\n ):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class AuthenticateWithPersonalAccessToken:\n
[docs] @public\n def __init__(self, api_token: str, auth_type: Optional[str] = None):\n self.auth_type = check.opt_str_param(auth_type, "auth_type")\n self.api_token = check.str_param(api_token, "api_token")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n replication_start_date: str,\n credentials: Union[\n "HarvestSource.AuthenticateViaHarvestOAuth",\n "HarvestSource.AuthenticateWithPersonalAccessToken",\n ],\n ):\n """Airbyte Source for Harvest.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/harvest\n\n Args:\n name (str): The name of the destination.\n account_id (str): Harvest account ID. Required for all Harvest requests in pair with Personal Access Token\n replication_start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n credentials (Union[HarvestSource.AuthenticateViaHarvestOAuth, HarvestSource.AuthenticateWithPersonalAccessToken]): Choose how to authenticate to Harvest.\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.replication_start_date = check.str_param(\n replication_start_date, "replication_start_date"\n )\n self.credentials = check.inst_param(\n credentials,\n "credentials",\n (\n HarvestSource.AuthenticateViaHarvestOAuth,\n HarvestSource.AuthenticateWithPersonalAccessToken,\n ),\n )\n super().__init__("Harvest", name)
\n\n\n
[docs]class GithubSource(GeneratedAirbyteSource):\n
[docs] class OAuthCredentials:\n
[docs] @public\n def __init__(self, access_token: str):\n self.access_token = check.str_param(access_token, "access_token")
\n\n
[docs] class PATCredentials:\n
[docs] @public\n def __init__(self, personal_access_token: str):\n self.personal_access_token = check.str_param(\n personal_access_token, "personal_access_token"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n credentials: Union["GithubSource.OAuthCredentials", "GithubSource.PATCredentials"],\n start_date: str,\n repository: str,\n branch: Optional[str] = None,\n page_size_for_large_streams: Optional[int] = None,\n ):\n """Airbyte Source for Github.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/github\n\n Args:\n name (str): The name of the destination.\n credentials (Union[GithubSource.OAuthCredentials, GithubSource.PATCredentials]): Choose how to authenticate to GitHub\n start_date (str): The date from which you'd like to replicate data from GitHub in the format YYYY-MM-DDT00:00:00Z. For the streams which support this configuration, only data generated on or after the start date will be replicated. This field doesn't apply to all streams, see the docs for more info\n repository (str): Space-delimited list of GitHub organizations/repositories, e.g. `airbytehq/airbyte` for single repository, `airbytehq/*` for get all repositories from organization and `airbytehq/airbyte airbytehq/another-repo` for multiple repositories.\n branch (Optional[str]): Space-delimited list of GitHub repository branches to pull commits for, e.g. `airbytehq/airbyte/master`. If no branches are specified for a repository, the default branch will be pulled.\n page_size_for_large_streams (Optional[int]): The Github connector contains several streams with a large amount of data. The page size of such streams depends on the size of your repository. We recommended that you specify values between 10 and 30.\n """\n self.credentials = check.inst_param(\n credentials, "credentials", (GithubSource.OAuthCredentials, GithubSource.PATCredentials)\n )\n self.start_date = check.str_param(start_date, "start_date")\n self.repository = check.str_param(repository, "repository")\n self.branch = check.opt_str_param(branch, "branch")\n self.page_size_for_large_streams = check.opt_int_param(\n page_size_for_large_streams, "page_size_for_large_streams"\n )\n super().__init__("Github", name)
\n\n\n
[docs]class E2eTestSource(GeneratedAirbyteSource):\n
[docs] class SingleSchema:\n
[docs] @public\n def __init__(\n self, stream_name: str, stream_schema: str, stream_duplication: Optional[int] = None\n ):\n self.type = "SINGLE_STREAM"\n self.stream_name = check.str_param(stream_name, "stream_name")\n self.stream_schema = check.str_param(stream_schema, "stream_schema")\n self.stream_duplication = check.opt_int_param(stream_duplication, "stream_duplication")
\n\n
[docs] class MultiSchema:\n
[docs] @public\n def __init__(self, stream_schemas: str):\n self.type = "MULTI_STREAM"\n self.stream_schemas = check.str_param(stream_schemas, "stream_schemas")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n max_messages: int,\n mock_catalog: Union["E2eTestSource.SingleSchema", "E2eTestSource.MultiSchema"],\n type: Optional[str] = None,\n seed: Optional[int] = None,\n message_interval_ms: Optional[int] = None,\n ):\n """Airbyte Source for E2e Test.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/e2e-test\n\n Args:\n name (str): The name of the destination.\n max_messages (int): Number of records to emit per stream. Min 1. Max 100 billion.\n seed (Optional[int]): When the seed is unspecified, the current time millis will be used as the seed. Range: [0, 1000000].\n message_interval_ms (Optional[int]): Interval between messages in ms. Min 0 ms. Max 60000 ms (1 minute).\n """\n self.type = check.opt_str_param(type, "type")\n self.max_messages = check.int_param(max_messages, "max_messages")\n self.seed = check.opt_int_param(seed, "seed")\n self.message_interval_ms = check.opt_int_param(message_interval_ms, "message_interval_ms")\n self.mock_catalog = check.inst_param(\n mock_catalog, "mock_catalog", (E2eTestSource.SingleSchema, E2eTestSource.MultiSchema)\n )\n super().__init__("E2e Test", name)
\n\n\n
[docs]class MysqlSource(GeneratedAirbyteSource):\n
[docs] class Preferred:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "preferred"
\n\n
[docs] class Required:\n
[docs] @public\n def __init__(\n self,\n ):\n self.mode = "required"
\n\n
[docs] class VerifyCA:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify_ca"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class VerifyIdentity:\n
[docs] @public\n def __init__(\n self,\n ca_certificate: str,\n client_certificate: Optional[str] = None,\n client_key: Optional[str] = None,\n client_key_password: Optional[str] = None,\n ):\n self.mode = "verify_identity"\n self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")\n self.client_certificate = check.opt_str_param(client_certificate, "client_certificate")\n self.client_key = check.opt_str_param(client_key, "client_key")\n self.client_key_password = check.opt_str_param(\n client_key_password, "client_key_password"\n )
\n\n
[docs] class Standard:\n
[docs] @public\n def __init__(\n self,\n ):\n self.method = "STANDARD"
\n\n
[docs] class LogicalReplicationCDC:\n
[docs] @public\n def __init__(\n self,\n initial_waiting_seconds: Optional[int] = None,\n server_time_zone: Optional[str] = None,\n ):\n self.method = "CDC"\n self.initial_waiting_seconds = check.opt_int_param(\n initial_waiting_seconds, "initial_waiting_seconds"\n )\n self.server_time_zone = check.opt_str_param(server_time_zone, "server_time_zone")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n host: str,\n port: int,\n database: str,\n username: str,\n ssl_mode: Union[\n "MysqlSource.Preferred",\n "MysqlSource.Required",\n "MysqlSource.VerifyCA",\n "MysqlSource.VerifyIdentity",\n ],\n replication_method: Union["MysqlSource.Standard", "MysqlSource.LogicalReplicationCDC"],\n password: Optional[str] = None,\n jdbc_url_params: Optional[str] = None,\n ssl: Optional[bool] = None,\n ):\n """Airbyte Source for Mysql.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/mysql\n\n Args:\n name (str): The name of the destination.\n host (str): The host name of the database.\n port (int): The port to connect to.\n database (str): The database name.\n username (str): The username which is used to access the database.\n password (Optional[str]): The password associated with the username.\n jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.\n ssl (Optional[bool]): Encrypt data using SSL.\n ssl_mode (Union[MysqlSource.Preferred, MysqlSource.Required, MysqlSource.VerifyCA, MysqlSource.VerifyIdentity]): SSL connection modes. preferred - Automatically attempt SSL connection. If the MySQL server does not support SSL, continue with a regular connection.required - Always connect with SSL. If the MySQL server doesn`t support SSL, the connection will not be established. Certificate Authority (CA) and Hostname are not verified.verify-ca - Always connect with SSL. Verifies CA, but allows connection even if Hostname does not match.Verify Identity - Always connect with SSL. Verify both CA and Hostname.Read more in the docs.\n replication_method (Union[MysqlSource.Standard, MysqlSource.LogicalReplicationCDC]): Replication method to use for extracting data from the database.\n """\n self.host = check.str_param(host, "host")\n self.port = check.int_param(port, "port")\n self.database = check.str_param(database, "database")\n self.username = check.str_param(username, "username")\n self.password = check.opt_str_param(password, "password")\n self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")\n self.ssl = check.opt_bool_param(ssl, "ssl")\n self.ssl_mode = check.inst_param(\n ssl_mode,\n "ssl_mode",\n (\n MysqlSource.Preferred,\n MysqlSource.Required,\n MysqlSource.VerifyCA,\n MysqlSource.VerifyIdentity,\n ),\n )\n self.replication_method = check.inst_param(\n replication_method,\n "replication_method",\n (MysqlSource.Standard, MysqlSource.LogicalReplicationCDC),\n )\n super().__init__("Mysql", name)
\n\n\n
[docs]class MyHoursSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n email: str,\n password: str,\n start_date: str,\n logs_batch_size: Optional[int] = None,\n ):\n """Airbyte Source for My Hours.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/my-hours\n\n Args:\n name (str): The name of the destination.\n email (str): Your My Hours username\n password (str): The password associated to the username\n start_date (str): Start date for collecting time logs\n logs_batch_size (Optional[int]): Pagination size used for retrieving logs in days\n """\n self.email = check.str_param(email, "email")\n self.password = check.str_param(password, "password")\n self.start_date = check.str_param(start_date, "start_date")\n self.logs_batch_size = check.opt_int_param(logs_batch_size, "logs_batch_size")\n super().__init__("My Hours", name)
\n\n\n
[docs]class KyribaSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n domain: str,\n username: str,\n password: str,\n start_date: str,\n end_date: Optional[str] = None,\n ):\n """Airbyte Source for Kyriba.\n\n Args:\n name (str): The name of the destination.\n domain (str): Kyriba domain\n username (str): Username to be used in basic auth\n password (str): Password to be used in basic auth\n start_date (str): The date the sync should start from.\n end_date (Optional[str]): The date the sync should end. If let empty the sync will run to the current date.\n """\n self.domain = check.str_param(domain, "domain")\n self.username = check.str_param(username, "username")\n self.password = check.str_param(password, "password")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n super().__init__("Kyriba", name)
\n\n\n
[docs]class GoogleSearchConsoleSource(GeneratedAirbyteSource):\n
[docs] class OAuth:\n
[docs] @public\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n access_token: Optional[str] = None,\n ):\n self.auth_type = "Client"\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.access_token = check.opt_str_param(access_token, "access_token")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")
\n\n
[docs] class ServiceAccountKeyAuthentication:\n
[docs] @public\n def __init__(self, service_account_info: str, email: str):\n self.auth_type = "Service"\n self.service_account_info = check.str_param(\n service_account_info, "service_account_info"\n )\n self.email = check.str_param(email, "email")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n site_urls: List[str],\n start_date: str,\n authorization: Union[\n "GoogleSearchConsoleSource.OAuth",\n "GoogleSearchConsoleSource.ServiceAccountKeyAuthentication",\n ],\n end_date: Optional[str] = None,\n custom_reports: Optional[str] = None,\n ):\n """Airbyte Source for Google Search Console.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/google-search-console\n\n Args:\n name (str): The name of the destination.\n site_urls (List[str]): The URLs of the website property attached to your GSC account. Read more here.\n start_date (str): UTC date in the format 2017-01-25. Any data before this date will not be replicated.\n end_date (Optional[str]): UTC date in the format 2017-01-25. Any data after this date will not be replicated. Must be greater or equal to the start date field.\n custom_reports (Optional[str]): A JSON array describing the custom reports you want to sync from Google Search Console. See the docs for more information about the exact format you can use to fill out this field.\n """\n self.site_urls = check.list_param(site_urls, "site_urls", str)\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.authorization = check.inst_param(\n authorization,\n "authorization",\n (\n GoogleSearchConsoleSource.OAuth,\n GoogleSearchConsoleSource.ServiceAccountKeyAuthentication,\n ),\n )\n self.custom_reports = check.opt_str_param(custom_reports, "custom_reports")\n super().__init__("Google Search Console", name)
\n\n\n
[docs]class FacebookMarketingSource(GeneratedAirbyteSource):\n
[docs] class InsightConfig:\n
[docs] @public\n def __init__(\n self,\n name: str,\n fields: Optional[List[str]] = None,\n breakdowns: Optional[List[str]] = None,\n action_breakdowns: Optional[List[str]] = None,\n time_increment: Optional[int] = None,\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n insights_lookback_window: Optional[int] = None,\n ):\n self.name = check.str_param(name, "name")\n self.fields = check.opt_nullable_list_param(fields, "fields", str)\n self.breakdowns = check.opt_nullable_list_param(breakdowns, "breakdowns", str)\n self.action_breakdowns = check.opt_nullable_list_param(\n action_breakdowns, "action_breakdowns", str\n )\n self.time_increment = check.opt_int_param(time_increment, "time_increment")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.insights_lookback_window = check.opt_int_param(\n insights_lookback_window, "insights_lookback_window"\n )
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n account_id: str,\n start_date: str,\n access_token: str,\n end_date: Optional[str] = None,\n include_deleted: Optional[bool] = None,\n fetch_thumbnail_images: Optional[bool] = None,\n custom_insights: Optional[List[InsightConfig]] = None,\n page_size: Optional[int] = None,\n insights_lookback_window: Optional[int] = None,\n max_batch_size: Optional[int] = None,\n ):\n """Airbyte Source for Facebook Marketing.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/facebook-marketing\n\n Args:\n name (str): The name of the destination.\n account_id (str): The Facebook Ad account ID to use when pulling data from the Facebook Marketing API.\n start_date (str): The date from which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.\n end_date (Optional[str]): The date until which you'd like to replicate data for all incremental streams, in the format YYYY-MM-DDT00:00:00Z. All data generated between start_date and this date will be replicated. Not setting this option will result in always syncing the latest data.\n access_token (str): The value of the access token generated. See the docs for more information\n include_deleted (Optional[bool]): Include data from deleted Campaigns, Ads, and AdSets\n fetch_thumbnail_images (Optional[bool]): In each Ad Creative, fetch the thumbnail_url and store the result in thumbnail_data_url\n custom_insights (Optional[List[FacebookMarketingSource.InsightConfig]]): A list which contains insights entries, each entry must have a name and can contains fields, breakdowns or action_breakdowns)\n page_size (Optional[int]): Page size used when sending requests to Facebook API to specify number of records per page when response has pagination. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases.\n insights_lookback_window (Optional[int]): The attribution window\n max_batch_size (Optional[int]): Maximum batch size used when sending batch requests to Facebook API. Most users do not need to set this field unless they specifically need to tune the connector to address specific issues or use cases.\n """\n self.account_id = check.str_param(account_id, "account_id")\n self.start_date = check.str_param(start_date, "start_date")\n self.end_date = check.opt_str_param(end_date, "end_date")\n self.access_token = check.str_param(access_token, "access_token")\n self.include_deleted = check.opt_bool_param(include_deleted, "include_deleted")\n self.fetch_thumbnail_images = check.opt_bool_param(\n fetch_thumbnail_images, "fetch_thumbnail_images"\n )\n self.custom_insights = check.opt_nullable_list_param(\n custom_insights, "custom_insights", FacebookMarketingSource.InsightConfig\n )\n self.page_size = check.opt_int_param(page_size, "page_size")\n self.insights_lookback_window = check.opt_int_param(\n insights_lookback_window, "insights_lookback_window"\n )\n self.max_batch_size = check.opt_int_param(max_batch_size, "max_batch_size")\n super().__init__("Facebook Marketing", name)
\n\n\n
[docs]class SurveymonkeySource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self, name: str, access_token: str, start_date: str, survey_ids: Optional[List[str]] = None\n ):\n """Airbyte Source for Surveymonkey.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/surveymonkey\n\n Args:\n name (str): The name of the destination.\n access_token (str): Access Token for making authenticated requests. See the docs for information on how to generate this key.\n start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.\n survey_ids (Optional[List[str]]): IDs of the surveys from which you'd like to replicate data. If left empty, data from all boards to which you have access will be replicated.\n """\n self.access_token = check.str_param(access_token, "access_token")\n self.start_date = check.str_param(start_date, "start_date")\n self.survey_ids = check.opt_nullable_list_param(survey_ids, "survey_ids", str)\n super().__init__("Surveymonkey", name)
\n\n\n
[docs]class PardotSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(\n self,\n name: str,\n pardot_business_unit_id: str,\n client_id: str,\n client_secret: str,\n refresh_token: str,\n start_date: Optional[str] = None,\n is_sandbox: Optional[bool] = None,\n ):\n """Airbyte Source for Pardot.\n\n Args:\n name (str): The name of the destination.\n pardot_business_unit_id (str): Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup\n client_id (str): The Consumer Key that can be found when viewing your app in Salesforce\n client_secret (str): The Consumer Secret that can be found when viewing your app in Salesforce\n refresh_token (str): Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it.\n start_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter\n is_sandbox (Optional[bool]): Whether or not the the app is in a Salesforce sandbox. If you do not know what this, assume it is false.\n """\n self.pardot_business_unit_id = check.str_param(\n pardot_business_unit_id, "pardot_business_unit_id"\n )\n self.client_id = check.str_param(client_id, "client_id")\n self.client_secret = check.str_param(client_secret, "client_secret")\n self.refresh_token = check.str_param(refresh_token, "refresh_token")\n self.start_date = check.opt_str_param(start_date, "start_date")\n self.is_sandbox = check.opt_bool_param(is_sandbox, "is_sandbox")\n super().__init__("Pardot", name)
\n\n\n
[docs]class FlexportSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, api_key: str, start_date: str):\n """Airbyte Source for Flexport.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/flexport\n\n Args:\n name (str): The name of the destination.\n\n """\n self.api_key = check.str_param(api_key, "api_key")\n self.start_date = check.str_param(start_date, "start_date")\n super().__init__("Flexport", name)
\n\n\n
[docs]class ZenefitsSource(GeneratedAirbyteSource):\n
[docs] @public\n def __init__(self, name: str, token: str):\n """Airbyte Source for Zenefits.\n\n Args:\n name (str): The name of the destination.\n token (str): Use Sync with Zenefits button on the link given on the readme file, and get the token to access the api\n """\n self.token = check.str_param(token, "token")\n super().__init__("Zenefits", name)
\n\n\n
[docs]class KafkaSource(GeneratedAirbyteSource):\n
[docs] class JSON:\n
[docs] @public\n def __init__(self, deserialization_type: Optional[str] = None):\n self.deserialization_type = check.opt_str_param(\n deserialization_type, "deserialization_type"\n )
\n\n
[docs] class AVRO:\n
[docs] @public\n def __init__(\n self,\n deserialization_type: Optional[str] = None,\n deserialization_strategy: Optional[str] = None,\n schema_registry_url: Optional[str] = None,\n schema_registry_username: Optional[str] = None,\n schema_registry_password: Optional[str] = None,\n ):\n self.deserialization_type = check.opt_str_param(\n deserialization_type, "deserialization_type"\n )\n self.deserialization_strategy = check.opt_str_param(\n deserialization_strategy, "deserialization_strategy"\n )\n self.schema_registry_url = check.opt_str_param(\n schema_registry_url, "schema_registry_url"\n )\n self.schema_registry_username = check.opt_str_param(\n schema_registry_username, "schema_registry_username"\n )\n self.schema_registry_password = check.opt_str_param(\n schema_registry_password, "schema_registry_password"\n )
\n\n
[docs] class ManuallyAssignAListOfPartitions:\n
[docs] @public\n def __init__(self, topic_partitions: str):\n self.subscription_type = "assign"\n self.topic_partitions = check.str_param(topic_partitions, "topic_partitions")
\n\n
[docs] class SubscribeToAllTopicsMatchingSpecifiedPattern:\n
[docs] @public\n def __init__(self, topic_pattern: str):\n self.subscription_type = "subscribe"\n self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")
\n\n
[docs] class PLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")
\n\n
[docs] class SASLPLAINTEXT:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] class SASLSSL:\n
[docs] @public\n def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):\n self.security_protocol = check.str_param(security_protocol, "security_protocol")\n self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")\n self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n MessageFormat: Union["KafkaSource.JSON", "KafkaSource.AVRO"],\n bootstrap_servers: str,\n subscription: Union[\n "KafkaSource.ManuallyAssignAListOfPartitions",\n "KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern",\n ],\n protocol: Union[\n "KafkaSource.PLAINTEXT", "KafkaSource.SASLPLAINTEXT", "KafkaSource.SASLSSL"\n ],\n test_topic: Optional[str] = None,\n group_id: Optional[str] = None,\n max_poll_records: Optional[int] = None,\n polling_time: Optional[int] = None,\n client_id: Optional[str] = None,\n enable_auto_commit: Optional[bool] = None,\n auto_commit_interval_ms: Optional[int] = None,\n client_dns_lookup: Optional[str] = None,\n retry_backoff_ms: Optional[int] = None,\n request_timeout_ms: Optional[int] = None,\n receive_buffer_bytes: Optional[int] = None,\n auto_offset_reset: Optional[str] = None,\n repeated_calls: Optional[int] = None,\n max_records_process: Optional[int] = None,\n ):\n """Airbyte Source for Kafka.\n\n Documentation can be found at https://docs.airbyte.com/integrations/sources/kafka\n\n Args:\n name (str): The name of the destination.\n MessageFormat (Union[KafkaSource.JSON, KafkaSource.AVRO]): The serialization used based on this\n bootstrap_servers (str): A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).\n subscription (Union[KafkaSource.ManuallyAssignAListOfPartitions, KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern]): You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.\n test_topic (Optional[str]): The Topic to test in case the Airbyte can consume messages.\n group_id (Optional[str]): The Group ID is how you distinguish different consumer groups.\n max_poll_records (Optional[int]): The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.\n polling_time (Optional[int]): Amount of time Kafka connector should try to poll for messages.\n protocol (Union[KafkaSource.PLAINTEXT, KafkaSource.SASLPLAINTEXT, KafkaSource.SASLSSL]): The Protocol used to communicate with brokers.\n client_id (Optional[str]): An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.\n enable_auto_commit (Optional[bool]): If true, the consumer's offset will be periodically committed in the background.\n auto_commit_interval_ms (Optional[int]): The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.\n client_dns_lookup (Optional[str]): Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.\n retry_backoff_ms (Optional[int]): The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.\n request_timeout_ms (Optional[int]): The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.\n receive_buffer_bytes (Optional[int]): The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.\n auto_offset_reset (Optional[str]): What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer.\n repeated_calls (Optional[int]): The number of repeated calls to poll() if no messages were received.\n max_records_process (Optional[int]): The Maximum to be processed per execution\n """\n self.MessageFormat = check.inst_param(\n MessageFormat, "MessageFormat", (KafkaSource.JSON, KafkaSource.AVRO)\n )\n self.bootstrap_servers = check.str_param(bootstrap_servers, "bootstrap_servers")\n self.subscription = check.inst_param(\n subscription,\n "subscription",\n (\n KafkaSource.ManuallyAssignAListOfPartitions,\n KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern,\n ),\n )\n self.test_topic = check.opt_str_param(test_topic, "test_topic")\n self.group_id = check.opt_str_param(group_id, "group_id")\n self.max_poll_records = check.opt_int_param(max_poll_records, "max_poll_records")\n self.polling_time = check.opt_int_param(polling_time, "polling_time")\n self.protocol = check.inst_param(\n protocol,\n "protocol",\n (KafkaSource.PLAINTEXT, KafkaSource.SASLPLAINTEXT, KafkaSource.SASLSSL),\n )\n self.client_id = check.opt_str_param(client_id, "client_id")\n self.enable_auto_commit = check.opt_bool_param(enable_auto_commit, "enable_auto_commit")\n self.auto_commit_interval_ms = check.opt_int_param(\n auto_commit_interval_ms, "auto_commit_interval_ms"\n )\n self.client_dns_lookup = check.opt_str_param(client_dns_lookup, "client_dns_lookup")\n self.retry_backoff_ms = check.opt_int_param(retry_backoff_ms, "retry_backoff_ms")\n self.request_timeout_ms = check.opt_int_param(request_timeout_ms, "request_timeout_ms")\n self.receive_buffer_bytes = check.opt_int_param(\n receive_buffer_bytes, "receive_buffer_bytes"\n )\n self.auto_offset_reset = check.opt_str_param(auto_offset_reset, "auto_offset_reset")\n self.repeated_calls = check.opt_int_param(repeated_calls, "repeated_calls")\n self.max_records_process = check.opt_int_param(max_records_process, "max_records_process")\n super().__init__("Kafka", name)
\n
", "current_page_name": "_modules/dagster_airbyte/managed/generated/sources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.generated.sources"}}, "reconciliation": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.reconciliation

\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster import AssetKey\nfrom dagster._annotations import experimental, public\nfrom dagster._core.definitions.cacheable_assets import CacheableAssetsDefinition\nfrom dagster._core.definitions.events import CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.freshness_policy import FreshnessPolicy\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.execution.context.init import build_init_resource_context\nfrom dagster._utils.merger import deep_merge_dicts\nfrom dagster_managed_elements import (\n    ManagedElementCheckResult,\n    ManagedElementDiff,\n    ManagedElementError,\n)\nfrom dagster_managed_elements.types import (\n    SECRET_MASK_VALUE,\n    ManagedElementReconciler,\n    is_key_secret,\n)\nfrom dagster_managed_elements.utils import UNSET, diff_dicts\n\nfrom dagster_airbyte.asset_defs import (\n    AirbyteConnectionMetadata,\n    AirbyteInstanceCacheableAssetsDefinition,\n    _clean_name,\n)\nfrom dagster_airbyte.managed.types import (\n    AirbyteConnection,\n    AirbyteDestination,\n    AirbyteDestinationNamespace,\n    AirbyteSource,\n    AirbyteSyncMode,\n    InitializedAirbyteConnection,\n    InitializedAirbyteDestination,\n    InitializedAirbyteSource,\n)\nfrom dagster_airbyte.resources import AirbyteResource\nfrom dagster_airbyte.utils import is_basic_normalization_operation\n\n\ndef gen_configured_stream_json(\n    source_stream: Mapping[str, Any], user_stream_config: Mapping[str, AirbyteSyncMode]\n) -> Mapping[str, Any]:\n    """Generates an Airbyte API stream defintiion based on the succinct user-provided config and the\n    full stream definition from the source.\n    """\n    config = user_stream_config[source_stream["stream"]["name"]]\n    return deep_merge_dicts(\n        source_stream,\n        {"config": config.to_json()},\n    )\n\n\ndef _ignore_secrets_compare_fn(k: str, _cv: Any, dv: Any) -> Optional[bool]:\n    if is_key_secret(k):\n        return dv == SECRET_MASK_VALUE\n    return None\n\n\ndef _diff_configs(\n    config_dict: Mapping[str, Any], dst_dict: Mapping[str, Any], ignore_secrets: bool = True\n) -> ManagedElementDiff:\n    return diff_dicts(\n        config_dict=config_dict,\n        dst_dict=dst_dict,\n        custom_compare_fn=_ignore_secrets_compare_fn if ignore_secrets else None,\n    )\n\n\ndef diff_sources(\n    config_src: Optional[AirbyteSource],\n    curr_src: Optional[AirbyteSource],\n    ignore_secrets: bool = True,\n) -> ManagedElementCheckResult:\n    """Utility to diff two AirbyteSource objects."""\n    diff = _diff_configs(\n        config_src.source_configuration if config_src else {},\n        curr_src.source_configuration if curr_src else {},\n        ignore_secrets,\n    )\n    if not diff.is_empty():\n        name = config_src.name if config_src else curr_src.name if curr_src else "Unknown"\n        return ManagedElementDiff().with_nested(name, diff)\n\n    return ManagedElementDiff()\n\n\ndef diff_destinations(\n    config_dst: Optional[AirbyteDestination],\n    curr_dst: Optional[AirbyteDestination],\n    ignore_secrets: bool = True,\n) -> ManagedElementCheckResult:\n    """Utility to diff two AirbyteDestination objects."""\n    diff = _diff_configs(\n        config_dst.destination_configuration if config_dst else {},\n        curr_dst.destination_configuration if curr_dst else {},\n        ignore_secrets,\n    )\n    if not diff.is_empty():\n        name = config_dst.name if config_dst else curr_dst.name if curr_dst else "Unknown"\n        return ManagedElementDiff().with_nested(name, diff)\n\n    return ManagedElementDiff()\n\n\ndef conn_dict(conn: Optional[AirbyteConnection]) -> Mapping[str, Any]:\n    if not conn:\n        return {}\n    return {\n        "source": conn.source.name if conn.source else "Unknown",\n        "destination": conn.destination.name if conn.destination else "Unknown",\n        "normalize data": conn.normalize_data,\n        "streams": {k: v.to_json() for k, v in conn.stream_config.items()},\n        "destination namespace": (\n            conn.destination_namespace.name\n            if isinstance(conn.destination_namespace, AirbyteDestinationNamespace)\n            else conn.destination_namespace\n        ),\n        "prefix": conn.prefix,\n    }\n\n\nOPTIONAL_STREAM_SETTINGS = ("cursorField", "primaryKey")\n\n\ndef _compare_stream_values(k: str, cv: str, _dv: str):\n    """Don't register a diff for optional stream settings if the value is not set\n    in the user-provided config, this means it will default to the value in the\n    source.\n    """\n    return True if k in OPTIONAL_STREAM_SETTINGS and cv == UNSET else None\n\n\ndef diff_connections(\n    config_conn: Optional[AirbyteConnection], curr_conn: Optional[AirbyteConnection]\n) -> ManagedElementCheckResult:\n    """Utility to diff two AirbyteConnection objects."""\n    diff = diff_dicts(\n        conn_dict(config_conn),\n        conn_dict(curr_conn),\n        custom_compare_fn=_compare_stream_values,\n    )\n    if not diff.is_empty():\n        name = config_conn.name if config_conn else curr_conn.name if curr_conn else "Unknown"\n        return ManagedElementDiff().with_nested(name, diff)\n\n    return ManagedElementDiff()\n\n\ndef reconcile_sources(\n    res: AirbyteResource,\n    config_sources: Mapping[str, AirbyteSource],\n    existing_sources: Mapping[str, InitializedAirbyteSource],\n    workspace_id: str,\n    dry_run: bool,\n    should_delete: bool,\n    ignore_secrets: bool,\n) -> Tuple[Mapping[str, InitializedAirbyteSource], ManagedElementCheckResult]:\n    """Generates a diff of the configured and existing sources and reconciles them to match the\n    configured state if dry_run is False.\n    """\n    diff = ManagedElementDiff()\n\n    initialized_sources: Dict[str, InitializedAirbyteSource] = {}\n    for source_name in set(config_sources.keys()).union(existing_sources.keys()):\n        configured_source = config_sources.get(source_name)\n        existing_source = existing_sources.get(source_name)\n\n        # Ignore sources not mentioned in the user config unless the user specifies to delete\n        if not should_delete and existing_source and not configured_source:\n            initialized_sources[source_name] = existing_source\n            continue\n\n        diff = diff.join(\n            diff_sources(  # type: ignore\n                configured_source,\n                existing_source.source if existing_source else None,\n                ignore_secrets,\n            )\n        )\n\n        if existing_source and (\n            not configured_source or (configured_source.must_be_recreated(existing_source.source))\n        ):\n            initialized_sources[source_name] = existing_source\n            if not dry_run:\n                res.make_request(\n                    endpoint="/sources/delete",\n                    data={"sourceId": existing_source.source_id},\n                )\n            existing_source = None\n\n        if configured_source:\n            defn_id = check.not_none(\n                res.get_source_definition_by_name(configured_source.source_type)\n            )\n            base_source_defn_dict = {\n                "name": configured_source.name,\n                "connectionConfiguration": configured_source.source_configuration,\n            }\n            source_id = ""\n            if existing_source:\n                source_id = existing_source.source_id\n                if not dry_run:\n                    res.make_request(\n                        endpoint="/sources/update",\n                        data={"sourceId": source_id, **base_source_defn_dict},\n                    )\n            else:\n                if not dry_run:\n                    create_result = cast(\n                        Dict[str, str],\n                        check.not_none(\n                            res.make_request(\n                                endpoint="/sources/create",\n                                data={\n                                    "sourceDefinitionId": defn_id,\n                                    "workspaceId": workspace_id,\n                                    **base_source_defn_dict,\n                                },\n                            )\n                        ),\n                    )\n                    source_id = create_result["sourceId"]\n\n            if source_name in initialized_sources:\n                # Preserve to be able to initialize old connection object\n                initialized_sources[f"{source_name}_old"] = initialized_sources[source_name]\n            initialized_sources[source_name] = InitializedAirbyteSource(\n                source=configured_source,\n                source_id=source_id,\n                source_definition_id=defn_id,\n            )\n    return initialized_sources, diff\n\n\ndef reconcile_destinations(\n    res: AirbyteResource,\n    config_destinations: Mapping[str, AirbyteDestination],\n    existing_destinations: Mapping[str, InitializedAirbyteDestination],\n    workspace_id: str,\n    dry_run: bool,\n    should_delete: bool,\n    ignore_secrets: bool,\n) -> Tuple[Mapping[str, InitializedAirbyteDestination], ManagedElementCheckResult]:\n    """Generates a diff of the configured and existing destinations and reconciles them to match the\n    configured state if dry_run is False.\n    """\n    diff = ManagedElementDiff()\n\n    initialized_destinations: Dict[str, InitializedAirbyteDestination] = {}\n    for destination_name in set(config_destinations.keys()).union(existing_destinations.keys()):\n        configured_destination = config_destinations.get(destination_name)\n        existing_destination = existing_destinations.get(destination_name)\n\n        # Ignore destinations not mentioned in the user config unless the user specifies to delete\n        if not should_delete and existing_destination and not configured_destination:\n            initialized_destinations[destination_name] = existing_destination\n            continue\n\n        diff = diff.join(\n            diff_destinations(  # type: ignore\n                configured_destination,\n                existing_destination.destination if existing_destination else None,\n                ignore_secrets,\n            )\n        )\n\n        if existing_destination and (\n            not configured_destination\n            or (configured_destination.must_be_recreated(existing_destination.destination))\n        ):\n            initialized_destinations[destination_name] = existing_destination\n            if not dry_run:\n                res.make_request(\n                    endpoint="/destinations/delete",\n                    data={"destinationId": existing_destination.destination_id},\n                )\n            existing_destination = None\n\n        if configured_destination:\n            defn_id = res.get_destination_definition_by_name(\n                configured_destination.destination_type\n            )\n            base_destination_defn_dict = {\n                "name": configured_destination.name,\n                "connectionConfiguration": configured_destination.destination_configuration,\n            }\n            destination_id = ""\n            if existing_destination:\n                destination_id = existing_destination.destination_id\n                if not dry_run:\n                    res.make_request(\n                        endpoint="/destinations/update",\n                        data={"destinationId": destination_id, **base_destination_defn_dict},\n                    )\n            else:\n                if not dry_run:\n                    create_result = cast(\n                        Dict[str, str],\n                        check.not_none(\n                            res.make_request(\n                                endpoint="/destinations/create",\n                                data={\n                                    "destinationDefinitionId": defn_id,\n                                    "workspaceId": workspace_id,\n                                    **base_destination_defn_dict,\n                                },\n                            )\n                        ),\n                    )\n                    destination_id = create_result["destinationId"]\n\n            if destination_name in initialized_destinations:\n                # Preserve to be able to initialize old connection object\n                initialized_destinations[f"{destination_name}_old"] = initialized_destinations[\n                    destination_name\n                ]\n            initialized_destinations[destination_name] = InitializedAirbyteDestination(\n                destination=configured_destination,\n                destination_id=destination_id,\n                destination_definition_id=defn_id,\n            )\n    return initialized_destinations, diff\n\n\ndef reconcile_config(\n    res: AirbyteResource,\n    objects: Sequence[AirbyteConnection],\n    dry_run: bool = False,\n    should_delete: bool = False,\n    ignore_secrets: bool = True,\n) -> ManagedElementCheckResult:\n    """Main entry point for the reconciliation process. Takes a list of AirbyteConnection objects\n    and a pointer to an Airbyte instance and returns a diff, along with applying the diff\n    if dry_run is False.\n    """\n    with res.cache_requests():\n        config_connections = {conn.name: conn for conn in objects}\n        config_sources = {conn.source.name: conn.source for conn in objects}\n        config_dests = {conn.destination.name: conn.destination for conn in objects}\n\n        workspace_id = res.get_default_workspace()\n\n        existing_sources_raw = cast(\n            Dict[str, List[Dict[str, Any]]],\n            check.not_none(\n                res.make_request(endpoint="/sources/list", data={"workspaceId": workspace_id})\n            ),\n        )\n        existing_dests_raw = cast(\n            Dict[str, List[Dict[str, Any]]],\n            check.not_none(\n                res.make_request(endpoint="/destinations/list", data={"workspaceId": workspace_id})\n            ),\n        )\n\n        existing_sources: Dict[str, InitializedAirbyteSource] = {\n            source_json["name"]: InitializedAirbyteSource.from_api_json(source_json)\n            for source_json in existing_sources_raw.get("sources", [])\n        }\n        existing_dests: Dict[str, InitializedAirbyteDestination] = {\n            destination_json["name"]: InitializedAirbyteDestination.from_api_json(destination_json)\n            for destination_json in existing_dests_raw.get("destinations", [])\n        }\n\n        # First, remove any connections that need to be deleted, so that we can\n        # safely delete any sources/destinations that are no longer referenced\n        # or that need to be recreated.\n        connections_diff = reconcile_connections_pre(\n            res,\n            config_connections,\n            existing_sources,\n            existing_dests,\n            workspace_id,\n            dry_run,\n            should_delete,\n        )\n\n        all_sources, sources_diff = reconcile_sources(\n            res,\n            config_sources,\n            existing_sources,\n            workspace_id,\n            dry_run,\n            should_delete,\n            ignore_secrets,\n        )\n        all_dests, dests_diff = reconcile_destinations(\n            res, config_dests, existing_dests, workspace_id, dry_run, should_delete, ignore_secrets\n        )\n\n        # Now that we have updated the set of sources and destinations, we can\n        # recreate or update any connections which depend on them.\n        reconcile_connections_post(\n            res,\n            config_connections,\n            all_sources,\n            all_dests,\n            workspace_id,\n            dry_run,\n        )\n\n        return ManagedElementDiff().join(sources_diff).join(dests_diff).join(connections_diff)  # type: ignore\n\n\ndef reconcile_normalization(\n    res: AirbyteResource,\n    existing_connection_id: Optional[str],\n    destination: InitializedAirbyteDestination,\n    normalization_config: Optional[bool],\n    workspace_id: str,\n) -> Optional[str]:\n    """Reconciles the normalization configuration for a connection.\n\n    If normalization_config is None, then defaults to True on destinations that support normalization\n    and False on destinations that do not.\n    """\n    existing_basic_norm_op_id = None\n    if existing_connection_id:\n        operations = cast(\n            Dict[str, List[Dict[str, str]]],\n            check.not_none(\n                res.make_request(\n                    endpoint="/operations/list",\n                    data={"connectionId": existing_connection_id},\n                )\n            ),\n        )\n        existing_basic_norm_op = next(\n            (\n                operation\n                for operation in operations["operations"]\n                if is_basic_normalization_operation(operation)\n            ),\n            None,\n        )\n        existing_basic_norm_op_id = (\n            existing_basic_norm_op["operationId"] if existing_basic_norm_op else None\n        )\n\n    if normalization_config is not False:\n        if destination.destination_definition_id and res.does_dest_support_normalization(\n            destination.destination_definition_id, workspace_id\n        ):\n            if existing_basic_norm_op_id:\n                return existing_basic_norm_op_id\n            else:\n                return cast(\n                    Dict[str, str],\n                    check.not_none(\n                        res.make_request(\n                            endpoint="/operations/create",\n                            data={\n                                "workspaceId": workspace_id,\n                                "name": "Normalization",\n                                "operatorConfiguration": {\n                                    "operatorType": "normalization",\n                                    "normalization": {"option": "basic"},\n                                },\n                            },\n                        )\n                    ),\n                )["operationId"]\n        elif normalization_config is True:\n            raise Exception(\n                f"Destination {destination.destination.name} does not support normalization."\n            )\n\n    return None\n\n\ndef reconcile_connections_pre(\n    res: AirbyteResource,\n    config_connections: Mapping[str, AirbyteConnection],\n    existing_sources: Mapping[str, InitializedAirbyteSource],\n    existing_destinations: Mapping[str, InitializedAirbyteDestination],\n    workspace_id: str,\n    dry_run: bool,\n    should_delete: bool,\n) -> ManagedElementCheckResult:\n    """Generates the diff for connections, and deletes any connections that are not in the config if\n    dry_run is False.\n\n    It's necessary to do this in two steps because we need to remove connections that depend on\n    sources and destinations that are being deleted or recreated before Airbyte will allow us to\n    delete or recreate them.\n    """\n    diff = ManagedElementDiff()\n\n    existing_connections_raw = cast(\n        Dict[str, List[Dict[str, Any]]],\n        check.not_none(\n            res.make_request(endpoint="/connections/list", data={"workspaceId": workspace_id})\n        ),\n    )\n    existing_connections: Dict[str, InitializedAirbyteConnection] = {\n        connection_json["name"]: InitializedAirbyteConnection.from_api_json(\n            connection_json, existing_sources, existing_destinations\n        )\n        for connection_json in existing_connections_raw.get("connections", [])\n    }\n\n    for conn_name in set(config_connections.keys()).union(existing_connections.keys()):\n        config_conn = config_connections.get(conn_name)\n        existing_conn = existing_connections.get(conn_name)\n\n        # Ignore connections not mentioned in the user config unless the user specifies to delete\n        if not should_delete and not config_conn:\n            continue\n\n        diff = diff.join(\n            diff_connections(config_conn, existing_conn.connection if existing_conn else None)  # type: ignore\n        )\n\n        if existing_conn and (\n            not config_conn or config_conn.must_be_recreated(existing_conn.connection)\n        ):\n            if not dry_run:\n                res.make_request(\n                    endpoint="/connections/delete",\n                    data={"connectionId": existing_conn.connection_id},\n                )\n    return diff\n\n\ndef reconcile_connections_post(\n    res: AirbyteResource,\n    config_connections: Mapping[str, AirbyteConnection],\n    init_sources: Mapping[str, InitializedAirbyteSource],\n    init_dests: Mapping[str, InitializedAirbyteDestination],\n    workspace_id: str,\n    dry_run: bool,\n) -> None:\n    """Creates new and modifies existing connections based on the config if dry_run is False."""\n    existing_connections_raw = cast(\n        Dict[str, List[Dict[str, Any]]],\n        check.not_none(\n            res.make_request(endpoint="/connections/list", data={"workspaceId": workspace_id})\n        ),\n    )\n    existing_connections = {\n        connection_json["name"]: InitializedAirbyteConnection.from_api_json(\n            connection_json, init_sources, init_dests\n        )\n        for connection_json in existing_connections_raw.get("connections", [])\n    }\n\n    for conn_name, config_conn in config_connections.items():\n        existing_conn = existing_connections.get(conn_name)\n\n        normalization_operation_id = None\n        if not dry_run:\n            destination = init_dests[config_conn.destination.name]\n\n            # Enable or disable basic normalization based on config\n            normalization_operation_id = reconcile_normalization(\n                res,\n                existing_connections.get("name", {}).get("connectionId"),\n                destination,\n                config_conn.normalize_data,\n                workspace_id,\n            )\n\n        configured_streams = []\n        if not dry_run:\n            source = init_sources[config_conn.source.name]\n            schema = res.get_source_schema(source.source_id)\n            base_streams = schema["catalog"]["streams"]\n\n            configured_streams = [\n                gen_configured_stream_json(stream, config_conn.stream_config)\n                for stream in base_streams\n                if stream["stream"]["name"] in config_conn.stream_config\n            ]\n\n        connection_base_json = {\n            "name": conn_name,\n            "namespaceDefinition": "source",\n            "namespaceFormat": "${SOURCE_NAMESPACE}",\n            "prefix": "",\n            "operationIds": [normalization_operation_id] if normalization_operation_id else [],\n            "syncCatalog": {"streams": configured_streams},\n            "scheduleType": "manual",\n            "status": "active",\n        }\n\n        if isinstance(config_conn.destination_namespace, AirbyteDestinationNamespace):\n            connection_base_json["namespaceDefinition"] = config_conn.destination_namespace.value\n        else:\n            connection_base_json["namespaceDefinition"] = "customformat"\n            connection_base_json["namespaceFormat"] = cast(str, config_conn.destination_namespace)\n\n        if config_conn.prefix:\n            connection_base_json["prefix"] = config_conn.prefix\n\n        if existing_conn:\n            if not dry_run:\n                source = init_sources[config_conn.source.name]\n                res.make_request(\n                    endpoint="/connections/update",\n                    data={\n                        **connection_base_json,\n                        "sourceCatalogId": res.get_source_catalog_id(source.source_id),\n                        "connectionId": existing_conn.connection_id,\n                    },\n                )\n        else:\n            if not dry_run:\n                source = init_sources[config_conn.source.name]\n                destination = init_dests[config_conn.destination.name]\n\n                res.make_request(\n                    endpoint="/connections/create",\n                    data={\n                        **connection_base_json,\n                        "sourceCatalogId": res.get_source_catalog_id(source.source_id),\n                        "sourceId": source.source_id,\n                        "destinationId": destination.destination_id,\n                    },\n                )\n\n\n
[docs]@experimental\nclass AirbyteManagedElementReconciler(ManagedElementReconciler):\n """Reconciles Python-specified Airbyte connections with an Airbyte instance.\n\n Passing the module containing an AirbyteManagedElementReconciler to the dagster-airbyte\n CLI will allow you to check the state of your Python-code-specified Airbyte connections\n against an Airbyte instance, and reconcile them if necessary.\n\n This functionality is experimental and subject to change.\n """\n\n
[docs] @public\n def __init__(\n self,\n airbyte: Union[AirbyteResource, ResourceDefinition],\n connections: Iterable[AirbyteConnection],\n delete_unmentioned_resources: bool = False,\n ):\n """Reconciles Python-specified Airbyte connections with an Airbyte instance.\n\n Args:\n airbyte (Union[AirbyteResource, ResourceDefinition]): The Airbyte resource definition to reconcile against.\n connections (Iterable[AirbyteConnection]): The Airbyte connection objects to reconcile.\n delete_unmentioned_resources (bool): Whether to delete resources that are not mentioned in\n the set of connections provided. When True, all Airbyte instance contents are effectively\n managed by the reconciler. Defaults to False.\n """\n # airbyte = check.inst_param(airbyte, "airbyte", ResourceDefinition)\n\n self._airbyte_instance: AirbyteResource = (\n airbyte\n if isinstance(airbyte, AirbyteResource)\n else airbyte(build_init_resource_context())\n )\n self._connections = list(\n check.iterable_param(connections, "connections", of_type=AirbyteConnection)\n )\n self._delete_unmentioned_resources = check.bool_param(\n delete_unmentioned_resources, "delete_unmentioned_resources"\n )\n\n super().__init__()
\n\n def check(self, **kwargs) -> ManagedElementCheckResult:\n return reconcile_config(\n self._airbyte_instance,\n self._connections,\n dry_run=True,\n should_delete=self._delete_unmentioned_resources,\n ignore_secrets=(not kwargs.get("include_all_secrets", False)),\n )\n\n def apply(self, **kwargs) -> ManagedElementCheckResult:\n return reconcile_config(\n self._airbyte_instance,\n self._connections,\n dry_run=False,\n should_delete=self._delete_unmentioned_resources,\n ignore_secrets=(not kwargs.get("include_all_secrets", False)),\n )
\n\n\nclass AirbyteManagedElementCacheableAssetsDefinition(AirbyteInstanceCacheableAssetsDefinition):\n def __init__(\n self,\n airbyte_resource_def: AirbyteResource,\n key_prefix: Sequence[str],\n create_assets_for_normalization_tables: bool,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connections: Iterable[AirbyteConnection],\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connection_to_asset_key_fn: Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]],\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ],\n ):\n defined_conn_names = {conn.name for conn in connections}\n super().__init__(\n airbyte_resource_def=airbyte_resource_def,\n workspace_id=None,\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=create_assets_for_normalization_tables,\n connection_to_group_fn=connection_to_group_fn,\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connection_filter=lambda conn: conn.name in defined_conn_names,\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n )\n self._connections: List[AirbyteConnection] = list(connections)\n\n def _get_connections(self) -> Sequence[Tuple[str, AirbyteConnectionMetadata]]:\n diff = reconcile_config(self._airbyte_instance, self._connections, dry_run=True)\n if isinstance(diff, ManagedElementDiff) and not diff.is_empty():\n raise ValueError(\n "Airbyte connections are not in sync with provided configuration, diff:\\n{}".format(\n str(diff)\n )\n )\n elif isinstance(diff, ManagedElementError):\n raise ValueError(f"Error checking Airbyte connections: {diff}")\n\n return super()._get_connections()\n\n\n
[docs]@experimental\ndef load_assets_from_connections(\n airbyte: Union[AirbyteResource, ResourceDefinition],\n connections: Iterable[AirbyteConnection],\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n create_assets_for_normalization_tables: bool = True,\n connection_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connection_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connection_to_asset_key_fn: Optional[\n Callable[[AirbyteConnectionMetadata, str], AssetKey]\n ] = None,\n connection_to_freshness_policy_fn: Optional[\n Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]\n ] = None,\n) -> CacheableAssetsDefinition:\n """Loads Airbyte connection assets from a configured AirbyteResource instance, checking against a list of AirbyteConnection objects.\n This method will raise an error on repo load if the passed AirbyteConnection objects are not in sync with the Airbyte instance.\n\n Args:\n airbyte (Union[AirbyteResource, ResourceDefinition]): An AirbyteResource configured with the appropriate connection\n details.\n connections (Iterable[AirbyteConnection]): A list of AirbyteConnection objects to build assets for.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n create_assets_for_normalization_tables (bool): If True, assets will be created for tables\n created by Airbyte's normalization feature. If False, only the destination tables\n will be created. Defaults to True.\n connection_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Airbyte connection name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The IO manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connection_to_io_manager_key_fn.\n connection_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n IO manager key for a given Airbyte connection name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connection_to_asset_key_fn (Optional[Callable[[AirbyteConnectionMetadata, str], AssetKey]]): Optional function which\n takes in connection metadata and table name and returns an asset key for the table. If None, the default asset\n key is based on the table name. Any asset key prefix will be applied to the output of this function.\n connection_to_freshness_policy_fn (Optional[Callable[[AirbyteConnectionMetadata], Optional[FreshnessPolicy]]]): Optional function which\n takes in connection metadata and returns a freshness policy for the connection. If None, no freshness policy will be applied.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster_airbyte import (\n AirbyteConnection,\n AirbyteResource,\n load_assets_from_connections,\n )\n\n airbyte_instance = AirbyteResource(\n host: "localhost",\n port: "8000",\n )\n airbyte_connections = [\n AirbyteConnection(...),\n AirbyteConnection(...)\n ]\n airbyte_assets = load_assets_from_connections(airbyte_instance, airbyte_connections)\n """\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connection_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connection_to_io_manager_key_fn",\n )\n if not connection_to_io_manager_key_fn:\n connection_to_io_manager_key_fn = lambda _: io_manager_key\n\n return AirbyteManagedElementCacheableAssetsDefinition(\n airbyte_resource_def=(\n airbyte\n if isinstance(airbyte, AirbyteResource)\n else airbyte(build_init_resource_context())\n ),\n key_prefix=key_prefix,\n create_assets_for_normalization_tables=check.bool_param(\n create_assets_for_normalization_tables, "create_assets_for_normalization_tables"\n ),\n connection_to_group_fn=check.opt_callable_param(\n connection_to_group_fn, "connection_to_group_fn"\n ),\n connection_to_io_manager_key_fn=connection_to_io_manager_key_fn,\n connections=check.iterable_param(connections, "connections", of_type=AirbyteConnection),\n connection_to_asset_key_fn=connection_to_asset_key_fn,\n connection_to_freshness_policy_fn=connection_to_freshness_policy_fn,\n )
\n
", "current_page_name": "_modules/dagster_airbyte/managed/reconciliation", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.reconciliation"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.managed.types

\nimport json\nfrom abc import ABC\nfrom enum import Enum\nfrom typing import Any, Dict, List, Mapping, Optional, Union\n\nimport dagster._check as check\nfrom dagster._annotations import public\n\n\n
[docs]class AirbyteSyncMode(ABC):\n """Represents the sync mode for a given Airbyte stream, which governs how Airbyte reads\n from a source and writes to a destination.\n\n For more information, see https://docs.airbyte.com/understanding-airbyte/connections/.\n """\n\n def __eq__(self, other: Any) -> bool:\n return isinstance(other, AirbyteSyncMode) and self.to_json() == other.to_json()\n\n def __init__(self, json_repr: Dict[str, Any]):\n self.json_repr = json_repr\n\n def to_json(self) -> Dict[str, Any]:\n return self.json_repr\n\n @classmethod\n def from_json(cls, json_repr: Dict[str, Any]) -> "AirbyteSyncMode":\n return cls(\n {\n k: v\n for k, v in json_repr.items()\n if k in ("syncMode", "destinationSyncMode", "cursorField", "primaryKey")\n }\n )\n\n
[docs] @public\n @classmethod\n def full_refresh_append(cls) -> "AirbyteSyncMode":\n """Syncs the entire data stream from the source, appending rows to the destination.\n\n https://docs.airbyte.com/understanding-airbyte/connections/full-refresh-append/\n """\n return cls({"syncMode": "full_refresh", "destinationSyncMode": "append"})
\n\n
[docs] @public\n @classmethod\n def full_refresh_overwrite(cls) -> "AirbyteSyncMode":\n """Syncs the entire data stream from the source, replaces data in the destination by\n overwriting it.\n\n https://docs.airbyte.com/understanding-airbyte/connections/full-refresh-overwrite\n """\n return cls({"syncMode": "full_refresh", "destinationSyncMode": "overwrite"})
\n\n
[docs] @public\n @classmethod\n def incremental_append(\n cls,\n cursor_field: Optional[str] = None,\n ) -> "AirbyteSyncMode":\n """Syncs only new records from the source, appending rows to the destination.\n May optionally specify the cursor field used to determine which records\n are new.\n\n https://docs.airbyte.com/understanding-airbyte/connections/incremental-append/\n """\n cursor_field = check.opt_str_param(cursor_field, "cursor_field")\n\n return cls(\n {\n "syncMode": "incremental",\n "destinationSyncMode": "append",\n **({"cursorField": [cursor_field]} if cursor_field else {}),\n }\n )
\n\n
[docs] @public\n @classmethod\n def incremental_append_dedup(\n cls,\n cursor_field: Optional[str] = None,\n primary_key: Optional[Union[str, List[str]]] = None,\n ) -> "AirbyteSyncMode":\n """Syncs new records from the source, appending to an append-only history\n table in the destination. Also generates a deduplicated view mirroring the\n source table. May optionally specify the cursor field used to determine\n which records are new, and the primary key used to determine which records\n are duplicates.\n\n https://docs.airbyte.com/understanding-airbyte/connections/incremental-append-dedup/\n """\n cursor_field = check.opt_str_param(cursor_field, "cursor_field")\n if isinstance(primary_key, str):\n primary_key = [primary_key]\n primary_key = check.opt_list_param(primary_key, "primary_key", of_type=str)\n\n return cls(\n {\n "syncMode": "incremental",\n "destinationSyncMode": "append_dedup",\n **({"cursorField": [cursor_field]} if cursor_field else {}),\n **({"primaryKey": [[x] for x in primary_key]} if primary_key else {}),\n }\n )
\n\n\n
[docs]class AirbyteSource:\n """Represents a user-defined Airbyte source.\n\n Args:\n name (str): The display name of the source.\n source_type (str): The type of the source, from Airbyte's list\n of sources https://airbytehq.github.io/category/sources/.\n source_configuration (Mapping[str, Any]): The configuration for the\n source, as defined by Airbyte's API.\n """\n\n
[docs] @public\n def __init__(self, name: str, source_type: str, source_configuration: Mapping[str, Any]):\n self.name = check.str_param(name, "name")\n self.source_type = check.str_param(source_type, "source_type")\n self.source_configuration = check.mapping_param(\n source_configuration, "source_configuration", key_type=str\n )
\n\n def must_be_recreated(self, other: "AirbyteSource") -> bool:\n return self.name != other.name or self.source_type != other.source_type
\n\n\nclass InitializedAirbyteSource:\n """User-defined Airbyte source bound to actual created Airbyte source."""\n\n def __init__(self, source: AirbyteSource, source_id: str, source_definition_id: Optional[str]):\n self.source = source\n self.source_id = source_id\n self.source_definition_id = source_definition_id\n\n @classmethod\n def from_api_json(cls, api_json: Mapping[str, Any]):\n return cls(\n source=AirbyteSource(\n name=api_json["name"],\n source_type=api_json["sourceName"],\n source_configuration=api_json["connectionConfiguration"],\n ),\n source_id=api_json["sourceId"],\n source_definition_id=None,\n )\n\n\n
[docs]class AirbyteDestination:\n """Represents a user-defined Airbyte destination.\n\n Args:\n name (str): The display name of the destination.\n destination_type (str): The type of the destination, from Airbyte's list\n of destinations https://airbytehq.github.io/category/destinations/.\n destination_configuration (Mapping[str, Any]): The configuration for the\n destination, as defined by Airbyte's API.\n """\n\n
[docs] @public\n def __init__(\n self, name: str, destination_type: str, destination_configuration: Mapping[str, Any]\n ):\n self.name = check.str_param(name, "name")\n self.destination_type = check.str_param(destination_type, "destination_type")\n self.destination_configuration = check.mapping_param(\n destination_configuration, "destination_configuration", key_type=str\n )
\n\n def must_be_recreated(self, other: "AirbyteDestination") -> bool:\n return self.name != other.name or self.destination_type != other.destination_type
\n\n\nclass InitializedAirbyteDestination:\n """User-defined Airbyte destination bound to actual created Airbyte destination."""\n\n def __init__(\n self,\n destination: AirbyteDestination,\n destination_id: str,\n destination_definition_id: Optional[str],\n ):\n self.destination = destination\n self.destination_id = destination_id\n self.destination_definition_id = destination_definition_id\n\n @classmethod\n def from_api_json(cls, api_json: Mapping[str, Any]):\n return cls(\n destination=AirbyteDestination(\n name=api_json["name"],\n destination_type=api_json["destinationName"],\n destination_configuration=api_json["connectionConfiguration"],\n ),\n destination_id=api_json["destinationId"],\n destination_definition_id=None,\n )\n\n\nclass AirbyteDestinationNamespace(Enum):\n """Represents the sync mode for a given Airbyte stream."""\n\n SAME_AS_SOURCE = "source"\n DESTINATION_DEFAULT = "destination"\n\n\n
[docs]class AirbyteConnection:\n """A user-defined Airbyte connection, pairing an Airbyte source and destination and configuring\n which streams to sync.\n\n Args:\n name (str): The display name of the connection.\n source (AirbyteSource): The source to sync from.\n destination (AirbyteDestination): The destination to sync to.\n stream_config (Mapping[str, AirbyteSyncMode]): A mapping from stream name to\n the sync mode for that stream, including any additional configuration\n of primary key or cursor field.\n normalize_data (Optional[bool]): Whether to normalize the data in the\n destination.\n destination_namespace (Optional[Union[AirbyteDestinationNamespace, str]]):\n The namespace to sync to in the destination. If set to\n AirbyteDestinationNamespace.SAME_AS_SOURCE, the namespace will be the\n same as the source namespace. If set to\n AirbyteDestinationNamespace.DESTINATION_DEFAULT, the namespace will be\n the default namespace for the destination. If set to a string, the\n namespace will be that string.\n prefix (Optional[str]): A prefix to add to the table names in the destination.\n\n Example:\n .. code-block:: python\n\n from dagster_airbyte.managed.generated.sources import FileSource\n from dagster_airbyte.managed.generated.destinations import LocalJsonDestination\n from dagster_airbyte import AirbyteConnection, AirbyteSyncMode\n\n cereals_csv_source = FileSource(...)\n local_json_destination = LocalJsonDestination(...)\n\n cereals_connection = AirbyteConnection(\n name="download-cereals",\n source=cereals_csv_source,\n destination=local_json_destination,\n stream_config={"cereals": AirbyteSyncMode.full_refresh_overwrite()},\n )\n """\n\n
[docs] @public\n def __init__(\n self,\n name: str,\n source: AirbyteSource,\n destination: AirbyteDestination,\n stream_config: Mapping[str, AirbyteSyncMode],\n normalize_data: Optional[bool] = None,\n destination_namespace: Optional[\n Union[AirbyteDestinationNamespace, str]\n ] = AirbyteDestinationNamespace.SAME_AS_SOURCE,\n prefix: Optional[str] = None,\n ):\n self.name = check.str_param(name, "name")\n self.source = check.inst_param(source, "source", AirbyteSource)\n self.destination = check.inst_param(destination, "destination", AirbyteDestination)\n self.stream_config = check.mapping_param(\n stream_config, "stream_config", key_type=str, value_type=AirbyteSyncMode\n )\n self.normalize_data = check.opt_bool_param(normalize_data, "normalize_data")\n self.destination_namespace = check.opt_inst_param(\n destination_namespace, "destination_namespace", (str, AirbyteDestinationNamespace)\n )\n self.prefix = check.opt_str_param(prefix, "prefix")
\n\n def must_be_recreated(self, other: Optional["AirbyteConnection"]) -> bool:\n return (\n not other\n or self.source.must_be_recreated(other.source)\n or self.destination.must_be_recreated(other.destination)\n )
\n\n\nclass InitializedAirbyteConnection:\n """User-defined Airbyte connection bound to actual created Airbyte connection."""\n\n def __init__(\n self,\n connection: AirbyteConnection,\n connection_id: str,\n ):\n self.connection = connection\n self.connection_id = connection_id\n\n @classmethod\n def from_api_json(\n cls,\n api_dict: Mapping[str, Any],\n init_sources: Mapping[str, InitializedAirbyteSource],\n init_dests: Mapping[str, InitializedAirbyteDestination],\n ):\n source = next(\n (\n source.source\n for source in init_sources.values()\n if source.source_id == api_dict["sourceId"]\n ),\n None,\n )\n dest = next(\n (\n dest.destination\n for dest in init_dests.values()\n if dest.destination_id == api_dict["destinationId"]\n ),\n None,\n )\n\n source = check.not_none(source, f"Could not find source with id {api_dict['sourceId']}")\n dest = check.not_none(\n dest, f"Could not find destination with id {api_dict['destinationId']}"\n )\n\n streams = {\n stream["stream"]["name"]: AirbyteSyncMode.from_json(stream["config"])\n for stream in api_dict["syncCatalog"]["streams"]\n }\n return cls(\n AirbyteConnection(\n name=api_dict["name"],\n source=source,\n destination=dest,\n stream_config=streams,\n normalize_data=len(api_dict["operationIds"]) > 0,\n destination_namespace=(\n api_dict["namespaceFormat"]\n if api_dict["namespaceDefinition"] == "customformat"\n else AirbyteDestinationNamespace(api_dict["namespaceDefinition"])\n ),\n prefix=api_dict["prefix"] if api_dict.get("prefix") else None,\n ),\n api_dict["connectionId"],\n )\n\n\ndef _remove_none_values(obj: Dict[str, Any]) -> Dict[str, Any]:\n return {k: v for k, v in obj.items() if v is not None}\n\n\ndef _dump_class(obj: Any) -> Dict[str, Any]:\n return json.loads(json.dumps(obj, default=lambda o: _remove_none_values(o.__dict__)))\n\n\nclass GeneratedAirbyteSource(AirbyteSource):\n """Base class used by the codegen Airbyte sources. This class is not intended to be used directly.\n\n Converts all of its attributes into a source configuration dict which is passed down to the base\n AirbyteSource class.\n """\n\n def __init__(self, source_type: str, name: str):\n source_configuration = _dump_class(self)\n super().__init__(\n name=name, source_type=source_type, source_configuration=source_configuration\n )\n\n\nclass GeneratedAirbyteDestination(AirbyteDestination):\n """Base class used by the codegen Airbyte destinations. This class is not intended to be used directly.\n\n Converts all of its attributes into a destination configuration dict which is passed down to the\n base AirbyteDestination class.\n """\n\n def __init__(self, source_type: str, name: str):\n destination_configuration = _dump_class(self)\n super().__init__(\n name=name,\n destination_type=source_type,\n destination_configuration=destination_configuration,\n )\n
", "current_page_name": "_modules/dagster_airbyte/managed/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.managed.types"}}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.ops

\nfrom typing import Any, Iterable, List, Optional\n\nfrom dagster import Config, In, Nothing, Out, Output, op\nfrom pydantic import Field\n\nfrom dagster_airbyte.types import AirbyteOutput\nfrom dagster_airbyte.utils import _get_attempt, generate_materializations\n\nfrom .resources import DEFAULT_POLL_INTERVAL_SECONDS, BaseAirbyteResource\n\n\nclass AirbyteSyncConfig(Config):\n    connection_id: str = Field(\n        ...,\n        description=(\n            "Parsed json dictionary representing the details of the Airbyte connector after the"\n            " sync successfully completes. See the [Airbyte API"\n            " Docs](https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview)"\n            " to see detailed information on this response."\n        ),\n    )\n    poll_interval: float = Field(\n        DEFAULT_POLL_INTERVAL_SECONDS,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    poll_timeout: Optional[float] = Field(\n        None,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    yield_materializations: bool = Field(\n        True,\n        description=(\n            "If True, materializations corresponding to the results of the Airbyte sync will "\n            "be yielded when the op executes."\n        ),\n    )\n    asset_key_prefix: List[str] = Field(\n        ["airbyte"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(\n ins={"start_after": In(Nothing)},\n out=Out(\n AirbyteOutput,\n description=(\n "Parsed json dictionary representing the details of the Airbyte connector after the"\n " sync successfully completes. See the [Airbyte API"\n " Docs](https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview)"\n " to see detailed information on this response."\n ),\n ),\n tags={"kind": "airbyte"},\n)\ndef airbyte_sync_op(\n context, config: AirbyteSyncConfig, airbyte: BaseAirbyteResource\n) -> Iterable[Any]:\n """Executes a Airbyte job sync for a given ``connection_id``, and polls until that sync\n completes, raising an error if it is unsuccessful. It outputs a AirbyteOutput which contains\n the job details for a given ``connection_id``.\n\n It requires the use of the :py:class:`~dagster_airbyte.airbyte_resource`, which allows it to\n communicate with the Airbyte API.\n\n Examples:\n .. code-block:: python\n\n from dagster import job\n from dagster_airbyte import airbyte_resource, airbyte_sync_op\n\n my_airbyte_resource = airbyte_resource.configured(\n {\n "host": {"env": "AIRBYTE_HOST"},\n "port": {"env": "AIRBYTE_PORT"},\n }\n )\n\n sync_foobar = airbyte_sync_op.configured({"connection_id": "foobar"}, name="sync_foobar")\n\n @job(resource_defs={"airbyte": my_airbyte_resource})\n def my_simple_airbyte_job():\n sync_foobar()\n\n @job(resource_defs={"airbyte": my_airbyte_resource})\n def my_composed_airbyte_job():\n final_foobar_state = sync_foobar(start_after=some_op())\n other_op(final_foobar_state)\n """\n airbyte_output = airbyte.sync_and_poll(\n connection_id=config.connection_id,\n poll_interval=config.poll_interval,\n poll_timeout=config.poll_timeout,\n )\n if config.yield_materializations:\n yield from generate_materializations(\n airbyte_output, asset_key_prefix=config.asset_key_prefix\n )\n yield Output(\n airbyte_output,\n metadata={\n **_get_attempt(airbyte_output.job_details.get("attempts", [{}])[-1]).get(\n "totalStats", {}\n )\n },\n )
\n
", "current_page_name": "_modules/dagster_airbyte/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airbyte.resources

\nimport hashlib\nimport json\nimport logging\nimport sys\nimport time\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, List, Mapping, Optional, cast\n\nimport requests\nfrom dagster import (\n    ConfigurableResource,\n    Failure,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._config.pythonic_config import infer_schema_from_config_class\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.merger import deep_merge_dicts\nfrom pydantic import Field\nfrom requests.exceptions import RequestException\n\nfrom dagster_airbyte.types import AirbyteOutput\n\nDEFAULT_POLL_INTERVAL_SECONDS = 10\n\n\nclass AirbyteState:\n    RUNNING = "running"\n    SUCCEEDED = "succeeded"\n    CANCELLED = "cancelled"\n    PENDING = "pending"\n    FAILED = "failed"\n    ERROR = "error"\n    INCOMPLETE = "incomplete"\n\n\nclass AirbyteResourceState:\n    def __init__(self) -> None:\n        self.request_cache: Dict[str, Optional[Mapping[str, object]]] = {}\n        # Int in case we nest contexts\n        self.cache_enabled = 0\n\n\nclass BaseAirbyteResource(ConfigurableResource):\n    request_max_retries: int = Field(\n        default=3,\n        description=(\n            "The maximum number of times requests to the Airbyte API should be retried "\n            "before failing."\n        ),\n    )\n    request_retry_delay: float = Field(\n        default=0.25,\n        description="Time (in seconds) to wait between each request retry.",\n    )\n    request_timeout: int = Field(\n        default=15,\n        description="Time (in seconds) after which the requests to Airbyte are declared timed out.",\n    )\n    cancel_sync_on_run_termination: bool = Field(\n        default=True,\n        description=(\n            "Whether to cancel a sync in Airbyte if the Dagster runner is terminated. This may"\n            " be useful to disable if using Airbyte sources that cannot be cancelled and"\n            " resumed easily, or if your Dagster deployment may experience runner interruptions"\n            " that do not impact your Airbyte deployment."\n        ),\n    )\n    poll_interval: float = Field(\n        default=DEFAULT_POLL_INTERVAL_SECONDS,\n        description="Time (in seconds) to wait between checking a sync's status.",\n    )\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        return True\n\n    @property\n    @cached_method\n    def _log(self) -> logging.Logger:\n        return get_dagster_logger()\n\n    @property\n    @abstractmethod\n    def api_base_url(self) -> str:\n        raise NotImplementedError()\n\n    @property\n    @abstractmethod\n    def all_additional_request_params(self) -> Mapping[str, Any]:\n        raise NotImplementedError()\n\n    def make_request(\n        self, endpoint: str, data: Optional[Mapping[str, object]] = None, method: str = "POST"\n    ) -> Optional[Mapping[str, object]]:\n        """Creates and sends a request to the desired Airbyte REST API endpoint.\n\n        Args:\n            endpoint (str): The Airbyte API endpoint to send this request to.\n            data (Optional[str]): JSON-formatted data string to be included in the request.\n\n        Returns:\n            Optional[Dict[str, Any]]: Parsed json data from the response to this request\n        """\n        url = self.api_base_url + endpoint\n        headers = {"accept": "application/json"}\n\n        num_retries = 0\n        while True:\n            try:\n                request_args: Dict[str, Any] = dict(\n                    method=method,\n                    url=url,\n                    headers=headers,\n                    timeout=self.request_timeout,\n                )\n                if data:\n                    request_args["json"] = data\n\n                request_args = deep_merge_dicts(\n                    request_args,\n                    self.all_additional_request_params,\n                )\n\n                response = requests.request(\n                    **request_args,\n                )\n                response.raise_for_status()\n                if response.status_code == 204:\n                    return None\n                return response.json()\n            except RequestException as e:\n                self._log.error("Request to Airbyte API failed: %s", e)\n                if num_retries == self.request_max_retries:\n                    break\n                num_retries += 1\n                time.sleep(self.request_retry_delay)\n\n        raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.")\n\n    @abstractmethod\n    def start_sync(self, connection_id: str) -> Mapping[str, object]:\n        raise NotImplementedError()\n\n    @abstractmethod\n    def get_connection_details(self, connection_id: str) -> Mapping[str, object]:\n        raise NotImplementedError()\n\n    @abstractmethod\n    def get_job_status(self, connection_id: str, job_id: int) -> Mapping[str, object]:\n        raise NotImplementedError()\n\n    @abstractmethod\n    def cancel_job(self, job_id: int):\n        raise NotImplementedError()\n\n    @property\n    @abstractmethod\n    def _should_forward_logs(self) -> bool:\n        raise NotImplementedError()\n\n    def sync_and_poll(\n        self,\n        connection_id: str,\n        poll_interval: Optional[float] = None,\n        poll_timeout: Optional[float] = None,\n    ) -> AirbyteOutput:\n        """Initializes a sync operation for the given connector, and polls until it completes.\n\n        Args:\n            connection_id (str): The Airbyte Connector ID. You can retrieve this value from the\n                "Connection" tab of a given connection in the Arbyte UI.\n            poll_interval (float): The time (in seconds) that will be waited between successive polls.\n            poll_timeout (float): The maximum time that will waited before this operation is timed\n                out. By default, this will never time out.\n\n        Returns:\n            :py:class:`~AirbyteOutput`:\n                Details of the sync job.\n        """\n        connection_details = self.get_connection_details(connection_id)\n        job_details = self.start_sync(connection_id)\n        job_info = cast(Dict[str, object], job_details.get("job", {}))\n        job_id = cast(int, job_info.get("id"))\n\n        self._log.info(f"Job {job_id} initialized for connection_id={connection_id}.")\n        start = time.monotonic()\n        logged_attempts = 0\n        logged_lines = 0\n        state = None\n\n        try:\n            while True:\n                if poll_timeout and start + poll_timeout < time.monotonic():\n                    raise Failure(\n                        f"Timeout: Airbyte job {job_id} is not ready after the timeout"\n                        f" {poll_timeout} seconds"\n                    )\n                time.sleep(poll_interval or self.poll_interval)\n                job_details = self.get_job_status(connection_id, job_id)\n                attempts = cast(List, job_details.get("attempts", []))\n                cur_attempt = len(attempts)\n                # spit out the available Airbyte log info\n                if cur_attempt:\n                    if self._should_forward_logs:\n                        log_lines = attempts[logged_attempts].get("logs", {}).get("logLines", [])\n\n                        for line in log_lines[logged_lines:]:\n                            sys.stdout.write(line + "\\n")\n                            sys.stdout.flush()\n                        logged_lines = len(log_lines)\n\n                    # if there's a next attempt, this one will have no more log messages\n                    if logged_attempts < cur_attempt - 1:\n                        logged_lines = 0\n                        logged_attempts += 1\n\n                job_info = cast(Dict[str, object], job_details.get("job", {}))\n                state = job_info.get("status")\n\n                if state in (AirbyteState.RUNNING, AirbyteState.PENDING, AirbyteState.INCOMPLETE):\n                    continue\n                elif state == AirbyteState.SUCCEEDED:\n                    break\n                elif state == AirbyteState.ERROR:\n                    raise Failure(f"Job failed: {job_id}")\n                elif state == AirbyteState.CANCELLED:\n                    raise Failure(f"Job was cancelled: {job_id}")\n                else:\n                    raise Failure(f"Encountered unexpected state `{state}` for job_id {job_id}")\n        finally:\n            # if Airbyte sync has not completed, make sure to cancel it so that it doesn't outlive\n            # the python process\n            if (\n                state not in (AirbyteState.SUCCEEDED, AirbyteState.ERROR, AirbyteState.CANCELLED)\n                and self.cancel_sync_on_run_termination\n            ):\n                self.cancel_job(job_id)\n\n        return AirbyteOutput(job_details=job_details, connection_details=connection_details)\n\n\nclass AirbyteCloudResource(BaseAirbyteResource):\n    """This resource allows users to programatically interface with the Airbyte Cloud API to launch\n    syncs and monitor their progress.\n\n    **Examples:**\n\n    .. code-block:: python\n\n        from dagster import job, EnvVar\n        from dagster_airbyte import AirbyteResource\n\n        my_airbyte_resource = AirbyteCloudResource(\n            api_key=EnvVar("AIRBYTE_API_KEY"),\n        )\n\n        airbyte_assets = build_airbyte_assets(\n            connection_id="87b7fe85-a22c-420e-8d74-b30e7ede77df",\n            destination_tables=["releases", "tags", "teams"],\n        )\n\n        defs = Definitions(\n            assets=[airbyte_assets],\n            resources={"airbyte": my_airbyte_resource},\n        )\n    """\n\n    api_key: str = Field(..., description="The Airbyte Cloud API key.")\n\n    @property\n    def api_base_url(self) -> str:\n        return "https://api.airbyte.com/v1"\n\n    @property\n    def all_additional_request_params(self) -> Mapping[str, Any]:\n        return {"headers": {"Authorization": f"Bearer {self.api_key}", "User-Agent": "dagster"}}\n\n    def start_sync(self, connection_id: str) -> Mapping[str, object]:\n        job_sync = check.not_none(\n            self.make_request(\n                endpoint="/jobs",\n                data={\n                    "connectionId": connection_id,\n                    "jobType": "sync",\n                },\n            )\n        )\n        return {"job": {"id": job_sync["jobId"], "status": job_sync["status"]}}\n\n    def get_connection_details(self, connection_id: str) -> Mapping[str, object]:\n        return {}\n\n    def get_job_status(self, connection_id: str, job_id: int) -> Mapping[str, object]:\n        job_status = check.not_none(self.make_request(endpoint=f"/jobs/{job_id}", method="GET"))\n        return {"job": {"id": job_status["jobId"], "status": job_status["status"]}}\n\n    def cancel_job(self, job_id: int):\n        self.make_request(endpoint=f"/jobs/{job_id}", method="DELETE")\n\n    @property\n    def _should_forward_logs(self) -> bool:\n        # Airbyte Cloud does not support streaming logs yet\n        return False\n\n\n
[docs]class AirbyteResource(BaseAirbyteResource):\n """This resource allows users to programatically interface with the Airbyte REST API to launch\n syncs and monitor their progress.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job, EnvVar\n from dagster_airbyte import AirbyteResource\n\n my_airbyte_resource = AirbyteResource(\n host=EnvVar("AIRBYTE_HOST"),\n port=EnvVar("AIRBYTE_PORT"),\n # If using basic auth\n username=EnvVar("AIRBYTE_USERNAME"),\n password=EnvVar("AIRBYTE_PASSWORD"),\n )\n\n airbyte_assets = build_airbyte_assets(\n connection_id="87b7fe85-a22c-420e-8d74-b30e7ede77df",\n destination_tables=["releases", "tags", "teams"],\n )\n\n defs = Definitions(\n assets=[airbyte_assets],\n resources={"airbyte": my_airbyte_resource},\n )\n """\n\n host: str = Field(description="The Airbyte server address.")\n port: str = Field(description="Port used for the Airbyte server.")\n username: Optional[str] = Field(default=None, description="Username if using basic auth.")\n password: Optional[str] = Field(default=None, description="Password if using basic auth.")\n use_https: bool = Field(\n default=False, description="Whether to use HTTPS to connect to the Airbyte server."\n )\n forward_logs: bool = Field(\n default=True,\n description=(\n "Whether to forward Airbyte logs to the compute log, can be expensive for"\n " long-running syncs."\n ),\n )\n request_additional_params: Mapping[str, Any] = Field(\n default=dict(),\n description=(\n "Any additional kwargs to pass to the requests library when making requests to Airbyte."\n ),\n )\n\n @property\n @cached_method\n def _state(self) -> AirbyteResourceState:\n return AirbyteResourceState()\n\n @property\n @cached_method\n def _log(self) -> logging.Logger:\n return get_dagster_logger()\n\n @property\n def api_base_url(self) -> str:\n return (\n ("https://" if self.use_https else "http://")\n + (f"{self.host}:{self.port}" if self.port else self.host)\n + "/api/v1"\n )\n\n @property\n def _should_forward_logs(self) -> bool:\n return self.forward_logs\n\n @contextmanager\n def cache_requests(self):\n """Context manager that enables caching certain requests to the Airbyte API,\n cleared when the context is exited.\n """\n self.clear_request_cache()\n self._state.cache_enabled += 1\n try:\n yield\n finally:\n self.clear_request_cache()\n self._state.cache_enabled -= 1\n\n def clear_request_cache(self) -> None:\n self._state.request_cache = {}\n\n def make_request_cached(self, endpoint: str, data: Optional[Mapping[str, object]]):\n if not self._state.cache_enabled > 0:\n return self.make_request(endpoint, data)\n data_json = json.dumps(data, sort_keys=True)\n sha = hashlib.sha1()\n sha.update(endpoint.encode("utf-8"))\n sha.update(data_json.encode("utf-8"))\n digest = sha.hexdigest()\n\n if digest not in self._state.request_cache:\n self._state.request_cache[digest] = self.make_request(endpoint, data)\n return self._state.request_cache[digest]\n\n @property\n def all_additional_request_params(self) -> Mapping[str, Any]:\n auth_param = (\n {"auth": (self.username, self.password)} if self.username and self.password else {}\n )\n return {**auth_param, **self.request_additional_params}\n\n def make_request(\n self, endpoint: str, data: Optional[Mapping[str, object]]\n ) -> Optional[Mapping[str, object]]:\n """Creates and sends a request to the desired Airbyte REST API endpoint.\n\n Args:\n endpoint (str): The Airbyte API endpoint to send this request to.\n data (Optional[str]): JSON-formatted data string to be included in the request.\n\n Returns:\n Optional[Dict[str, Any]]: Parsed json data from the response to this request\n """\n url = self.api_base_url + endpoint\n headers = {"accept": "application/json"}\n\n num_retries = 0\n while True:\n try:\n response = requests.request(\n **deep_merge_dicts( # type: ignore\n dict(\n method="POST",\n url=url,\n headers=headers,\n json=data,\n timeout=self.request_timeout,\n auth=(\n (self.username, self.password)\n if self.username and self.password\n else None\n ),\n ),\n self.request_additional_params,\n ),\n )\n response.raise_for_status()\n if response.status_code == 204:\n return None\n return response.json()\n except RequestException as e:\n self._log.error("Request to Airbyte API failed: %s", e)\n if num_retries == self.request_max_retries:\n break\n num_retries += 1\n time.sleep(self.request_retry_delay)\n\n raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.")\n\n def cancel_job(self, job_id: int):\n self.make_request(endpoint="/jobs/cancel", data={"id": job_id})\n\n def get_default_workspace(self) -> str:\n workspaces = cast(\n List[Dict[str, Any]],\n check.not_none(self.make_request_cached(endpoint="/workspaces/list", data={})).get(\n "workspaces", []\n ),\n )\n return workspaces[0]["workspaceId"]\n\n def get_source_definition_by_name(self, name: str) -> Optional[str]:\n name_lower = name.lower()\n definitions = self.make_request_cached(endpoint="/source_definitions/list", data={})\n\n return next(\n (\n definition["sourceDefinitionId"]\n for definition in definitions["sourceDefinitions"]\n if definition["name"].lower() == name_lower\n ),\n None,\n )\n\n def get_destination_definition_by_name(self, name: str):\n name_lower = name.lower()\n definitions = cast(\n Dict[str, List[Dict[str, str]]],\n check.not_none(\n self.make_request_cached(endpoint="/destination_definitions/list", data={})\n ),\n )\n return next(\n (\n definition["destinationDefinitionId"]\n for definition in definitions["destinationDefinitions"]\n if definition["name"].lower() == name_lower\n ),\n None,\n )\n\n def get_source_catalog_id(self, source_id: str):\n result = cast(\n Dict[str, Any],\n check.not_none(\n self.make_request(endpoint="/sources/discover_schema", data={"sourceId": source_id})\n ),\n )\n return result["catalogId"]\n\n def get_source_schema(self, source_id: str) -> Mapping[str, Any]:\n return cast(\n Dict[str, Any],\n check.not_none(\n self.make_request(endpoint="/sources/discover_schema", data={"sourceId": source_id})\n ),\n )\n\n def does_dest_support_normalization(\n self, destination_definition_id: str, workspace_id: str\n ) -> bool:\n # Airbyte API changed source of truth for normalization in PR\n # https://github.com/airbytehq/airbyte/pull/21005\n norm_dest_def_spec: bool = cast(\n Dict[str, Any],\n check.not_none(\n self.make_request_cached(\n endpoint="/destination_definition_specifications/get",\n data={\n "destinationDefinitionId": destination_definition_id,\n "workspaceId": workspace_id,\n },\n )\n ),\n ).get("supportsNormalization", False)\n\n norm_dest_def: bool = (\n cast(\n Dict[str, Any],\n check.not_none(\n self.make_request_cached(\n endpoint="/destination_definitions/get",\n data={\n "destinationDefinitionId": destination_definition_id,\n },\n )\n ),\n )\n .get("normalizationConfig", {})\n .get("supported", False)\n )\n\n return any([norm_dest_def_spec, norm_dest_def])\n\n def get_job_status(self, connection_id: str, job_id: int) -> Mapping[str, object]:\n if self.forward_logs:\n return check.not_none(self.make_request(endpoint="/jobs/get", data={"id": job_id}))\n else:\n # the "list all jobs" endpoint doesn't return logs, which actually makes it much more\n # lightweight for long-running syncs with many logs\n out = check.not_none(\n self.make_request(\n endpoint="/jobs/list",\n data={\n "configTypes": ["sync"],\n "configId": connection_id,\n # sync should be the most recent, so pageSize 5 is sufficient\n "pagination": {"pageSize": 5},\n },\n )\n )\n job = next((job for job in cast(List, out["jobs"]) if job["job"]["id"] == job_id), None)\n\n return check.not_none(job)\n\n def start_sync(self, connection_id: str) -> Mapping[str, object]:\n return check.not_none(\n self.make_request(endpoint="/connections/sync", data={"connectionId": connection_id})\n )\n\n def get_connection_details(self, connection_id: str) -> Mapping[str, object]:\n return check.not_none(\n self.make_request(endpoint="/connections/get", data={"connectionId": connection_id})\n )\n\n def sync_and_poll(\n self,\n connection_id: str,\n poll_interval: Optional[float] = None,\n poll_timeout: Optional[float] = None,\n ) -> AirbyteOutput:\n """Initializes a sync operation for the given connector, and polls until it completes.\n\n Args:\n connection_id (str): The Airbyte Connector ID. You can retrieve this value from the\n "Connection" tab of a given connection in the Arbyte UI.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~AirbyteOutput`:\n Details of the sync job.\n """\n connection_details = self.get_connection_details(connection_id)\n job_details = self.start_sync(connection_id)\n job_info = cast(Dict[str, object], job_details.get("job", {}))\n job_id = cast(int, job_info.get("id"))\n\n self._log.info(f"Job {job_id} initialized for connection_id={connection_id}.")\n start = time.monotonic()\n logged_attempts = 0\n logged_lines = 0\n state = None\n\n try:\n while True:\n if poll_timeout and start + poll_timeout < time.monotonic():\n raise Failure(\n f"Timeout: Airbyte job {job_id} is not ready after the timeout"\n f" {poll_timeout} seconds"\n )\n time.sleep(poll_interval or self.poll_interval)\n job_details = self.get_job_status(connection_id, job_id)\n attempts = cast(List, job_details.get("attempts", []))\n cur_attempt = len(attempts)\n # spit out the available Airbyte log info\n if cur_attempt:\n if self.forward_logs:\n log_lines = attempts[logged_attempts].get("logs", {}).get("logLines", [])\n\n for line in log_lines[logged_lines:]:\n sys.stdout.write(line + "\\n")\n sys.stdout.flush()\n logged_lines = len(log_lines)\n\n # if there's a next attempt, this one will have no more log messages\n if logged_attempts < cur_attempt - 1:\n logged_lines = 0\n logged_attempts += 1\n\n job_info = cast(Dict[str, object], job_details.get("job", {}))\n state = job_info.get("status")\n\n if state in (AirbyteState.RUNNING, AirbyteState.PENDING, AirbyteState.INCOMPLETE):\n continue\n elif state == AirbyteState.SUCCEEDED:\n break\n elif state == AirbyteState.ERROR:\n raise Failure(f"Job failed: {job_id}")\n elif state == AirbyteState.CANCELLED:\n raise Failure(f"Job was cancelled: {job_id}")\n else:\n raise Failure(f"Encountered unexpected state `{state}` for job_id {job_id}")\n finally:\n # if Airbyte sync has not completed, make sure to cancel it so that it doesn't outlive\n # the python process\n if (\n state not in (AirbyteState.SUCCEEDED, AirbyteState.ERROR, AirbyteState.CANCELLED)\n and self.cancel_sync_on_run_termination\n ):\n self.cancel_job(job_id)\n\n return AirbyteOutput(job_details=job_details, connection_details=connection_details)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=AirbyteResource.to_config_schema())\ndef airbyte_resource(context) -> AirbyteResource:\n """This resource allows users to programatically interface with the Airbyte REST API to launch\n syncs and monitor their progress. This currently implements only a subset of the functionality\n exposed by the API.\n\n For a complete set of documentation on the Airbyte REST API, including expected response JSON\n schema, see the `Airbyte API Docs <https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_airbyte import airbyte_resource\n\n my_airbyte_resource = airbyte_resource.configured(\n {\n "host": {"env": "AIRBYTE_HOST"},\n "port": {"env": "AIRBYTE_PORT"},\n # If using basic auth\n "username": {"env": "AIRBYTE_USERNAME"},\n "password": {"env": "AIRBYTE_PASSWORD"},\n }\n )\n\n @job(resource_defs={"airbyte":my_airbyte_resource})\n def my_airbyte_job():\n ...\n\n """\n return AirbyteResource.from_resource_context(context)
\n\n\n@dagster_maintained_resource\n@resource(config_schema=infer_schema_from_config_class(AirbyteCloudResource))\ndef airbyte_cloud_resource(context) -> AirbyteCloudResource:\n """This resource allows users to programatically interface with the Airbyte Cloud REST API to launch\n syncs and monitor their progress. Currently, this resource may only be used with the more basic\n `dagster-airbyte` APIs, including the ops and assets.\n\n """\n return AirbyteCloudResource.from_resource_context(context)\n
", "current_page_name": "_modules/dagster_airbyte/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airbyte.resources"}}, "dagster_airflow": {"dagster_asset_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.dagster_asset_factory

\nfrom typing import AbstractSet, List, Mapping, Optional, Set, Tuple\n\nfrom airflow.models.connection import Connection\nfrom airflow.models.dag import DAG\nfrom dagster import (\n    AssetKey,\n    AssetsDefinition,\n    GraphDefinition,\n    OutputMapping,\n    TimeWindowPartitionsDefinition,\n)\nfrom dagster._core.definitions.graph_definition import create_adjacency_lists\nfrom dagster._utils.schedules import is_valid_cron_schedule\n\nfrom dagster_airflow.dagster_job_factory import make_dagster_job_from_airflow_dag\nfrom dagster_airflow.utils import (\n    DagsterAirflowError,\n    normalized_name,\n)\n\n\ndef _build_asset_dependencies(\n    dag: DAG,\n    graph: GraphDefinition,\n    task_ids_by_asset_key: Mapping[AssetKey, AbstractSet[str]],\n    upstream_dependencies_by_asset_key: Mapping[AssetKey, AbstractSet[AssetKey]],\n) -> Tuple[AbstractSet[OutputMapping], Mapping[str, AssetKey], Mapping[str, Set[AssetKey]]]:\n    """Builds the asset dependency graph for a given set of airflow task mappings and a dagster graph."""\n    output_mappings = set()\n    keys_by_output_name = {}\n    internal_asset_deps: dict[str, Set[AssetKey]] = {}\n\n    visited_nodes: dict[str, bool] = {}\n    upstream_deps = set()\n\n    def find_upstream_dependency(node_name: str) -> None:\n        """Uses Depth-Firs-Search to find all upstream asset dependencies\n        as described in task_ids_by_asset_key.\n        """\n        # node has been visited\n        if visited_nodes[node_name]:\n            return\n        # mark node as visted\n        visited_nodes[node_name] = True\n        # traverse upstream nodes\n        for output_handle in graph.dependency_structure.all_upstream_outputs_from_node(node_name):\n            forward_node = output_handle.node_name\n            match = False\n            # find any assets produced by upstream nodes and add them to the internal asset deps\n            for asset_key in task_ids_by_asset_key:\n                if (\n                    forward_node.replace(f"{normalized_name(dag.dag_id)}__", "")\n                    in task_ids_by_asset_key[asset_key]\n                ):\n                    upstream_deps.add(asset_key)\n                    match = True\n            # don't traverse past nodes that have assets\n            if not match:\n                find_upstream_dependency(forward_node)\n\n    # iterate through each asset to find all upstream asset dependencies\n    for asset_key in task_ids_by_asset_key:\n        asset_upstream_deps = set()\n        for task_id in task_ids_by_asset_key[asset_key]:\n            visited_nodes = {s.name: False for s in graph.nodes}\n            upstream_deps = set()\n            find_upstream_dependency(normalized_name(dag.dag_id, task_id))\n            for dep in upstream_deps:\n                asset_upstream_deps.add(dep)\n            keys_by_output_name[f"result_{normalized_name(dag.dag_id, task_id)}"] = asset_key\n            output_mappings.add(\n                OutputMapping(\n                    graph_output_name=f"result_{normalized_name(dag.dag_id, task_id)}",\n                    mapped_node_name=normalized_name(dag.dag_id, task_id),\n                    mapped_node_output_name="airflow_task_complete",  # Default output name\n                )\n            )\n\n        # the tasks for a given asset should have the same internal deps\n        for task_id in task_ids_by_asset_key[asset_key]:\n            if f"result_{normalized_name(dag.dag_id, task_id)}" in internal_asset_deps:\n                internal_asset_deps[f"result_{normalized_name(dag.dag_id, task_id)}"].update(\n                    asset_upstream_deps\n                )\n            else:\n                internal_asset_deps[f"result_{normalized_name(dag.dag_id, task_id)}"] = (\n                    asset_upstream_deps\n                )\n\n    # add new upstream asset dependencies to the internal deps\n    for asset_key in upstream_dependencies_by_asset_key:\n        for key in keys_by_output_name:\n            if keys_by_output_name[key] == asset_key:\n                internal_asset_deps[key].update(upstream_dependencies_by_asset_key[asset_key])\n\n    return (output_mappings, keys_by_output_name, internal_asset_deps)\n\n\n
[docs]def load_assets_from_airflow_dag(\n dag: DAG,\n task_ids_by_asset_key: Mapping[AssetKey, AbstractSet[str]] = {},\n upstream_dependencies_by_asset_key: Mapping[AssetKey, AbstractSet[AssetKey]] = {},\n connections: Optional[List[Connection]] = None,\n) -> List[AssetsDefinition]:\n """[Experimental] Construct Dagster Assets for a given Airflow DAG.\n\n Args:\n dag (DAG): The Airflow DAG to compile into a Dagster job\n task_ids_by_asset_key (Optional[Mapping[AssetKey, AbstractSet[str]]]): A mapping from asset\n keys to task ids. Used break up the Airflow Dag into multiple SDAs\n upstream_dependencies_by_asset_key (Optional[Mapping[AssetKey, AbstractSet[AssetKey]]]): A\n mapping from upstream asset keys to assets provided in task_ids_by_asset_key. Used to\n declare new upstream SDA depenencies.\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n List[AssetsDefinition]\n """\n cron_schedule = dag.normalized_schedule_interval\n if cron_schedule is not None and not is_valid_cron_schedule(str(cron_schedule)):\n raise DagsterAirflowError(f"Invalid cron schedule: {cron_schedule} in DAG {dag.dag_id}")\n\n job = make_dagster_job_from_airflow_dag(dag, connections=connections)\n graph = job._graph_def # noqa: SLF001\n start_date = dag.start_date if dag.start_date else dag.default_args.get("start_date")\n if start_date is None:\n raise DagsterAirflowError(f"Invalid start_date: {start_date} in DAG {dag.dag_id}")\n\n # leaf nodes have no downstream nodes\n forward_edges, _ = create_adjacency_lists(graph.nodes, graph.dependency_structure)\n leaf_nodes = {\n node_name.replace(f"{normalized_name(dag.dag_id)}__", "")\n for node_name, downstream_nodes in forward_edges.items()\n if not downstream_nodes\n }\n\n mutated_task_ids_by_asset_key: dict[AssetKey, set[str]] = {}\n\n if task_ids_by_asset_key is None or task_ids_by_asset_key == {}:\n # if no mappings are provided the dag becomes a single SDA\n task_ids_by_asset_key = {AssetKey(dag.dag_id): leaf_nodes}\n else:\n # if mappings were provide any unmapped leaf nodes are added to a default asset\n used_nodes: set[str] = set()\n for key in task_ids_by_asset_key:\n used_nodes.update(task_ids_by_asset_key[key])\n\n mutated_task_ids_by_asset_key[AssetKey(dag.dag_id)] = leaf_nodes - used_nodes\n\n for key in task_ids_by_asset_key:\n if key not in mutated_task_ids_by_asset_key:\n mutated_task_ids_by_asset_key[key] = set(task_ids_by_asset_key[key])\n else:\n mutated_task_ids_by_asset_key[key].update(task_ids_by_asset_key[key])\n\n output_mappings, keys_by_output_name, internal_asset_deps = _build_asset_dependencies(\n dag, graph, mutated_task_ids_by_asset_key, upstream_dependencies_by_asset_key\n )\n\n new_graph = graph.copy(\n output_mappings=list(output_mappings),\n )\n\n asset_def = AssetsDefinition.from_graph(\n graph_def=new_graph,\n partitions_def=(\n TimeWindowPartitionsDefinition(\n cron_schedule=str(cron_schedule),\n timezone=dag.timezone.name,\n start=start_date.strftime("%Y-%m-%dT%H:%M:%S"),\n fmt="%Y-%m-%dT%H:%M:%S",\n )\n if cron_schedule is not None\n else None\n ),\n group_name=dag.dag_id,\n keys_by_output_name=keys_by_output_name,\n internal_asset_deps=internal_asset_deps,\n can_subset=True,\n )\n return [asset_def]
\n
", "current_page_name": "_modules/dagster_airflow/dagster_asset_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.dagster_asset_factory"}, "dagster_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.dagster_factory

\nimport os\nfrom typing import List, Mapping, Optional, Tuple\n\nfrom airflow.models.connection import Connection\nfrom airflow.models.dagbag import DagBag\nfrom dagster import (\n    Definitions,\n    JobDefinition,\n    ResourceDefinition,\n    ScheduleDefinition,\n    _check as check,\n)\n\nfrom dagster_airflow.dagster_job_factory import make_dagster_job_from_airflow_dag\nfrom dagster_airflow.dagster_schedule_factory import (\n    _is_dag_is_schedule,\n    make_dagster_schedule_from_airflow_dag,\n)\nfrom dagster_airflow.patch_airflow_example_dag import patch_airflow_example_dag\nfrom dagster_airflow.resources import (\n    make_ephemeral_airflow_db_resource as make_ephemeral_airflow_db_resource,\n)\nfrom dagster_airflow.resources.airflow_ephemeral_db import AirflowEphemeralDatabase\nfrom dagster_airflow.resources.airflow_persistent_db import AirflowPersistentDatabase\nfrom dagster_airflow.utils import (\n    is_airflow_2_loaded_in_environment,\n)\n\n\n
[docs]def make_dagster_definitions_from_airflow_dag_bag(\n dag_bag: DagBag,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Definitions:\n """Construct a Dagster definition corresponding to Airflow DAGs in DagBag.\n\n Usage:\n Create `make_dagster_definition.py`:\n from dagster_airflow import make_dagster_definition_from_airflow_dag_bag\n from airflow_home import my_dag_bag\n\n def make_definition_from_dag_bag():\n return make_dagster_definition_from_airflow_dag_bag(my_dag_bag)\n\n Use Definitions as usual, for example:\n `dagster-webserver -f path/to/make_dagster_definition.py`\n\n Args:\n dag_bag (DagBag): Airflow DagBag Model\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n Definitions\n """\n check.inst_param(dag_bag, "dag_bag", DagBag)\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n resource_defs = check.opt_mapping_param(resource_defs, "resource_defs")\n if resource_defs is None or "airflow_db" not in resource_defs:\n resource_defs = dict(resource_defs) if resource_defs else {}\n resource_defs["airflow_db"] = make_ephemeral_airflow_db_resource(connections=connections)\n\n schedules, jobs = make_schedules_and_jobs_from_airflow_dag_bag(\n dag_bag=dag_bag,\n connections=connections,\n resource_defs=resource_defs,\n )\n\n return Definitions(\n schedules=schedules,\n jobs=jobs,\n resources=resource_defs,\n )
\n\n\n
[docs]def make_dagster_definitions_from_airflow_dags_path(\n dag_path: str,\n safe_mode: bool = True,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Definitions:\n """Construct a Dagster repository corresponding to Airflow DAGs in dag_path.\n\n Usage:\n Create ``make_dagster_definitions.py``:\n\n .. code-block:: python\n\n from dagster_airflow import make_dagster_definitions_from_airflow_dags_path\n\n def make_definitions_from_dir():\n return make_dagster_definitions_from_airflow_dags_path(\n '/path/to/dags/',\n )\n\n Use RepositoryDefinition as usual, for example:\n ``dagster-webserver -f path/to/make_dagster_repo.py -n make_repo_from_dir``\n\n Args:\n dag_path (str): Path to directory or file that contains Airflow Dags\n include_examples (bool): True to include Airflow's example DAGs. (default: False)\n safe_mode (bool): True to use Airflow's default heuristic to find files that contain DAGs\n (ie find files that contain both b'DAG' and b'airflow') (default: True)\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n Definitions\n """\n check.str_param(dag_path, "dag_path")\n check.bool_param(safe_mode, "safe_mode")\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n resource_defs = check.opt_mapping_param(resource_defs, "resource_defs")\n if resource_defs is None or "airflow_db" not in resource_defs:\n resource_defs = dict(resource_defs) if resource_defs else {}\n resource_defs["airflow_db"] = make_ephemeral_airflow_db_resource(connections=connections)\n\n if (\n resource_defs["airflow_db"].resource_fn.__qualname__.split(".")[0]\n == "AirflowEphemeralDatabase"\n ):\n AirflowEphemeralDatabase._initialize_database(connections=connections) # noqa: SLF001\n elif (\n resource_defs["airflow_db"].resource_fn.__qualname__.split(".")[0]\n == "AirflowPersistentDatabase"\n ):\n AirflowPersistentDatabase._initialize_database( # noqa: SLF001\n uri=(\n os.getenv("AIRFLOW__DATABASE__SQL_ALCHEMY_CONN", "")\n if is_airflow_2_loaded_in_environment()\n else os.getenv("AIRFLOW__CORE__SQL_ALCHEMY_CONN", "")\n ),\n connections=connections,\n )\n\n dag_bag = DagBag(\n dag_folder=dag_path,\n include_examples=False, # Exclude Airflow example dags\n safe_mode=safe_mode,\n )\n\n return make_dagster_definitions_from_airflow_dag_bag(\n dag_bag=dag_bag,\n connections=connections,\n resource_defs=resource_defs,\n )
\n\n\ndef make_dagster_definitions_from_airflow_example_dags(\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Definitions:\n """Construct a Dagster repository for Airflow's example DAGs.\n\n Usage:\n\n Create `make_dagster_definitions.py`:\n from dagster_airflow import make_dagster_definitions_from_airflow_example_dags\n\n def make_airflow_example_dags():\n return make_dagster_definitions_from_airflow_example_dags()\n\n Use Definitions as usual, for example:\n `dagster-webserver -f path/to/make_dagster_definitions.py`\n\n Args:\n resource_defs: Optional[Mapping[str, ResourceDefinition]]\n Resource definitions to be used with the definitions\n\n Returns:\n Definitions\n """\n dag_bag = DagBag(\n dag_folder="some/empty/folder/with/no/dags", # prevent defaulting to settings.DAGS_FOLDER\n include_examples=True,\n )\n\n # There is a bug in Airflow v1 where the python_callable for task\n # 'search_catalog' is missing a required position argument '_'. It is fixed in airflow v2\n patch_airflow_example_dag(dag_bag)\n\n return make_dagster_definitions_from_airflow_dag_bag(\n dag_bag=dag_bag, resource_defs=resource_defs\n )\n\n\n
[docs]def make_schedules_and_jobs_from_airflow_dag_bag(\n dag_bag: DagBag,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> Tuple[List[ScheduleDefinition], List[JobDefinition]]:\n """Construct Dagster Schedules and Jobs corresponding to Airflow DagBag.\n\n Args:\n dag_bag (DagBag): Airflow DagBag Model\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n\n Returns:\n - List[ScheduleDefinition]: The generated Dagster Schedules\n - List[JobDefinition]: The generated Dagster Jobs\n """\n check.inst_param(dag_bag, "dag_bag", DagBag)\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n\n job_defs = []\n schedule_defs = []\n count = 0\n # To enforce predictable iteration order\n sorted_dag_ids = sorted(dag_bag.dag_ids)\n for dag_id in sorted_dag_ids:\n dag = dag_bag.dags.get(dag_id)\n if not dag:\n continue\n if _is_dag_is_schedule(dag):\n schedule_defs.append(\n make_dagster_schedule_from_airflow_dag(\n dag=dag, tags=None, connections=connections, resource_defs=resource_defs\n )\n )\n else:\n job_defs.append(\n make_dagster_job_from_airflow_dag(\n dag=dag, tags=None, connections=connections, resource_defs=resource_defs\n )\n )\n\n count += 1\n\n return schedule_defs, job_defs
\n
", "current_page_name": "_modules/dagster_airflow/dagster_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.dagster_factory"}, "dagster_job_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.dagster_job_factory

\nfrom typing import List, Mapping, Optional\n\nfrom airflow.models.connection import Connection\nfrom airflow.models.dag import DAG\nfrom dagster import (\n    GraphDefinition,\n    JobDefinition,\n    ResourceDefinition,\n    _check as check,\n)\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.instance import IS_AIRFLOW_INGEST_PIPELINE_STR\n\nfrom dagster_airflow.airflow_dag_converter import get_graph_definition_args\nfrom dagster_airflow.resources import (\n    make_ephemeral_airflow_db_resource as make_ephemeral_airflow_db_resource,\n)\nfrom dagster_airflow.utils import (\n    normalized_name,\n)\n\n\n
[docs]def make_dagster_job_from_airflow_dag(\n dag: DAG,\n tags: Optional[Mapping[str, str]] = None,\n connections: Optional[List[Connection]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = {},\n) -> JobDefinition:\n """Construct a Dagster job corresponding to a given Airflow DAG.\n\n Tasks in the resulting job will execute the ``execute()`` method on the corresponding\n Airflow Operator. Dagster, any dependencies required by Airflow Operators, and the module\n containing your DAG definition must be available in the Python environment within which your\n Dagster solids execute.\n\n To set Airflow's ``execution_date`` for use with Airflow Operator's ``execute()`` methods,\n either:\n\n 1. (Best for ad hoc runs) Execute job directly. This will set execution_date to the\n time (in UTC) of the run.\n\n 2. Add ``{'airflow_execution_date': utc_date_string}`` to the job tags. This will override\n behavior from (1).\n\n .. code-block:: python\n\n my_dagster_job = make_dagster_job_from_airflow_dag(\n dag=dag,\n tags={'airflow_execution_date': utc_execution_date_str}\n )\n my_dagster_job.execute_in_process()\n\n 3. (Recommended) Add ``{'airflow_execution_date': utc_date_string}`` to the run tags,\n such as in the Dagster UI. This will override behavior from (1) and (2)\n\n\n We apply normalized_name() to the dag id and task ids when generating job name and op\n names to ensure that names conform to Dagster's naming conventions.\n\n Args:\n dag (DAG): The Airflow DAG to compile into a Dagster job\n tags (Dict[str, Field]): Job tags. Optionally include\n `tags={'airflow_execution_date': utc_date_string}` to specify execution_date used within\n execution of Airflow Operators.\n connections (List[Connection]): List of Airflow Connections to be created in the Ephemeral\n Airflow DB, if use_emphemeral_airflow_db is False this will be ignored.\n\n Returns:\n JobDefinition: The generated Dagster job\n\n """\n check.inst_param(dag, "dag", DAG)\n tags = check.opt_mapping_param(tags, "tags")\n connections = check.opt_list_param(connections, "connections", of_type=Connection)\n\n mutated_tags = dict(tags)\n if IS_AIRFLOW_INGEST_PIPELINE_STR not in tags:\n mutated_tags[IS_AIRFLOW_INGEST_PIPELINE_STR] = "true"\n\n mutated_tags = validate_tags(mutated_tags)\n\n node_dependencies, node_defs = get_graph_definition_args(dag=dag)\n\n graph_def = GraphDefinition(\n name=normalized_name(dag.dag_id),\n description="",\n node_defs=node_defs,\n dependencies=node_dependencies,\n tags=mutated_tags,\n )\n\n if resource_defs is None or "airflow_db" not in resource_defs:\n resource_defs = dict(resource_defs) if resource_defs else {}\n resource_defs["airflow_db"] = make_ephemeral_airflow_db_resource(connections=connections)\n\n job_def = JobDefinition(\n name=normalized_name(dag.dag_id),\n description="",\n graph_def=graph_def,\n resource_defs=resource_defs,\n tags=mutated_tags,\n metadata={},\n op_retry_policy=None,\n version_strategy=None,\n )\n return job_def
\n
", "current_page_name": "_modules/dagster_airflow/dagster_job_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.dagster_job_factory"}, "operators": {"dagster_operator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.operators.dagster_operator

\nimport json\n\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom dagster_airflow.hooks.dagster_hook import DagsterHook\nfrom dagster_airflow.links.dagster_link import LINK_FMT, DagsterLink\nfrom dagster_airflow.utils import is_airflow_2_loaded_in_environment\n\n\n
[docs]class DagsterOperator(BaseOperator):\n """DagsterOperator.\n\n Uses the dagster graphql api to run and monitor dagster jobs on remote dagster infrastructure\n\n Parameters:\n repository_name (str): the name of the repository to use\n repostitory_location_name (str): the name of the repostitory location to use\n job_name (str): the name of the job to run\n run_config (Optional[Dict[str, Any]]): the run config to use for the job run\n dagster_conn_id (Optional[str]): the id of the dagster connection, airflow 2.0+ only\n organization_id (Optional[str]): the id of the dagster cloud organization\n deployment_name (Optional[str]): the name of the dagster cloud deployment\n user_token (Optional[str]): the dagster cloud user token to use\n """\n\n template_fields = ["run_config"]\n template_ext = (".yaml", ".yml", ".json")\n ui_color = "#663399"\n ui_fgcolor = "#e0e3fc"\n operator_extra_links = (DagsterLink(),)\n\n @apply_defaults\n def __init__(\n self,\n dagster_conn_id="dagster_default",\n run_config=None,\n repository_name="",\n repostitory_location_name="",\n job_name="",\n # params for airflow < 2.0.0 were custom connections aren't supported\n deployment_name="prod",\n user_token=None,\n organization_id="",\n url="https://dagster.cloud/",\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n self.run_id = None\n self.dagster_conn_id = dagster_conn_id if is_airflow_2_loaded_in_environment() else None\n self.run_config = run_config or {}\n self.repository_name = repository_name\n self.repostitory_location_name = repostitory_location_name\n self.job_name = job_name\n\n self.user_token = user_token\n self.url = url\n self.organization_id = organization_id\n self.deployment_name = deployment_name\n\n self.hook = DagsterHook(\n dagster_conn_id=self.dagster_conn_id,\n user_token=self.user_token,\n url=f"{self.url}{self.organization_id}/{self.deployment_name}/graphql",\n )\n\n def _is_json(self, blob):\n try:\n json.loads(blob)\n except ValueError:\n return False\n return True\n\n def pre_execute(self, context):\n # force re-rendering to ensure run_config renders any templated\n # content from run_config that couldn't be accessed on init\n setattr(\n self,\n "run_config",\n self.render_template(self.run_config, context),\n )\n\n def on_kill(self):\n self.log.info("Terminating Run")\n self.hook.terminate_run(\n run_id=self.run_id,\n )\n\n def execute(self, context):\n try:\n return self._execute(context)\n except Exception as e:\n raise e\n\n def _execute(self, context):\n self.run_id = self.hook.launch_run(\n repository_name=self.repository_name,\n repostitory_location_name=self.repostitory_location_name,\n job_name=self.job_name,\n run_config=self.run_config,\n )\n # save relevant info in xcom for use in links\n context["task_instance"].xcom_push(key="run_id", value=self.run_id)\n context["task_instance"].xcom_push(\n key="organization_id",\n value=self.hook.organization_id if self.dagster_conn_id else self.organization_id,\n )\n context["task_instance"].xcom_push(\n key="deployment_name",\n value=self.hook.deployment_name if self.dagster_conn_id else self.deployment_name,\n )\n\n self.log.info("Run Starting....")\n self.log.info(\n "Run tracking: %s",\n LINK_FMT.format(\n organization_id=self.hook.organization_id,\n deployment_name=self.hook.deployment_name,\n run_id=self.run_id,\n ),\n )\n self.hook.wait_for_run(\n run_id=self.run_id,\n )
\n\n\n
[docs]class DagsterCloudOperator(DagsterOperator):\n """DagsterCloudOperator.\n\n Uses the dagster cloud graphql api to run and monitor dagster jobs on dagster cloud\n\n Parameters:\n repository_name (str): the name of the repository to use\n repostitory_location_name (str): the name of the repostitory location to use\n job_name (str): the name of the job to run\n run_config (Optional[Dict[str, Any]]): the run config to use for the job run\n dagster_conn_id (Optional[str]): the id of the dagster connection, airflow 2.0+ only\n organization_id (Optional[str]): the id of the dagster cloud organization\n deployment_name (Optional[str]): the name of the dagster cloud deployment\n user_token (Optional[str]): the dagster cloud user token to use\n """
\n
", "current_page_name": "_modules/dagster_airflow/operators/dagster_operator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.operators.dagster_operator"}}, "resources": {"airflow_ephemeral_db": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.resources.airflow_ephemeral_db

\nimport importlib\nimport os\nimport tempfile\nfrom typing import List, Optional\n\nimport airflow\nfrom airflow.models.connection import Connection\nfrom airflow.utils import db\nfrom dagster import (\n    Array,\n    DagsterRun,\n    Field,\n    InitResourceContext,\n    Noneable,\n    ResourceDefinition,\n    _check as check,\n)\n\nfrom dagster_airflow.resources.airflow_db import AirflowDatabase\nfrom dagster_airflow.utils import (\n    Locker,\n    create_airflow_connections,\n    is_airflow_2_loaded_in_environment,\n    serialize_connections,\n)\n\n\nclass AirflowEphemeralDatabase(AirflowDatabase):\n    """A ephemeral Airflow database Dagster resource."""\n\n    def __init__(\n        self, airflow_home_path: str, dagster_run: DagsterRun, dag_run_config: Optional[dict] = None\n    ):\n        self.airflow_home_path = airflow_home_path\n        super().__init__(dagster_run=dagster_run, dag_run_config=dag_run_config)\n\n    @staticmethod\n    def _initialize_database(\n        airflow_home_path: str = os.path.join(tempfile.gettempdir(), "dagster_airflow"),\n        connections: List[Connection] = [],\n    ):\n        os.environ["AIRFLOW_HOME"] = airflow_home_path\n        os.makedirs(airflow_home_path, exist_ok=True)\n        with Locker(airflow_home_path):\n            airflow_initialized = os.path.exists(f"{airflow_home_path}/airflow.db")\n            # because AIRFLOW_HOME has been overriden airflow needs to be reloaded\n            if is_airflow_2_loaded_in_environment():\n                importlib.reload(airflow.configuration)\n                importlib.reload(airflow.settings)\n                importlib.reload(airflow)\n            else:\n                importlib.reload(airflow)\n            if not airflow_initialized:\n                db.initdb()\n                create_airflow_connections(connections)\n\n    @staticmethod\n    def from_resource_context(context: InitResourceContext) -> "AirflowEphemeralDatabase":\n        airflow_home_path = os.path.join(tempfile.gettempdir(), f"dagster_airflow_{context.run_id}")\n        AirflowEphemeralDatabase._initialize_database(\n            airflow_home_path=airflow_home_path,\n            connections=[Connection(**c) for c in context.resource_config["connections"]],\n        )\n        return AirflowEphemeralDatabase(\n            airflow_home_path=airflow_home_path,\n            dagster_run=check.not_none(context.dagster_run, "Context must have run"),\n            dag_run_config=context.resource_config.get("dag_run_config"),\n        )\n\n\n
[docs]def make_ephemeral_airflow_db_resource(\n connections: List[Connection] = [], dag_run_config: Optional[dict] = None\n) -> ResourceDefinition:\n """Creates a Dagster resource that provides an ephemeral Airflow database.\n\n Args:\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n dag_run_config (Optional[dict]): dag_run configuration to be used when creating a DagRun\n\n Returns:\n ResourceDefinition: The ephemeral Airflow DB resource\n\n """\n serialized_connections = serialize_connections(connections)\n airflow_db_resource_def = ResourceDefinition(\n resource_fn=AirflowEphemeralDatabase.from_resource_context,\n config_schema={\n "connections": Field(\n Array(inner_type=dict),\n default_value=serialized_connections,\n is_required=False,\n ),\n "dag_run_config": Field(\n Noneable(dict),\n default_value=dag_run_config,\n is_required=False,\n ),\n },\n description="Ephemeral Airflow DB to be used by dagster-airflow ",\n )\n return airflow_db_resource_def
\n
", "current_page_name": "_modules/dagster_airflow/resources/airflow_ephemeral_db", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.resources.airflow_ephemeral_db"}, "airflow_persistent_db": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_airflow.resources.airflow_persistent_db

\nimport importlib\nimport os\nfrom typing import List, Optional\n\nimport airflow\nfrom airflow.models.connection import Connection\nfrom dagster import (\n    Array,\n    DagsterRun,\n    Field,\n    InitResourceContext,\n    ResourceDefinition,\n    StringSource,\n    _check as check,\n)\n\nfrom dagster_airflow.resources.airflow_db import AirflowDatabase\nfrom dagster_airflow.utils import (\n    create_airflow_connections,\n    is_airflow_2_loaded_in_environment,\n    serialize_connections,\n)\n\n\nclass AirflowPersistentDatabase(AirflowDatabase):\n    """A persistent Airflow database Dagster resource."""\n\n    def __init__(self, dagster_run: DagsterRun, uri: str, dag_run_config: Optional[dict] = None):\n        self.uri = uri\n        super().__init__(dagster_run=dagster_run, dag_run_config=dag_run_config)\n\n    @staticmethod\n    def _initialize_database(uri: str, connections: List[Connection] = []):\n        if is_airflow_2_loaded_in_environment("2.3.0"):\n            os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_CONN"] = uri\n            importlib.reload(airflow.configuration)\n            importlib.reload(airflow.settings)\n            importlib.reload(airflow)\n        else:\n            os.environ["AIRFLOW__CORE__SQL_ALCHEMY_CONN"] = uri\n            importlib.reload(airflow)\n        create_airflow_connections(connections)\n\n    @staticmethod\n    def from_resource_context(context: InitResourceContext) -> "AirflowPersistentDatabase":\n        uri = context.resource_config["uri"]\n        AirflowPersistentDatabase._initialize_database(\n            uri=uri, connections=[Connection(**c) for c in context.resource_config["connections"]]\n        )\n        return AirflowPersistentDatabase(\n            dagster_run=check.not_none(context.dagster_run, "Context must have run"),\n            uri=uri,\n            dag_run_config=context.resource_config["dag_run_config"],\n        )\n\n\n
[docs]def make_persistent_airflow_db_resource(\n uri: str = "",\n connections: List[Connection] = [],\n dag_run_config: Optional[dict] = {},\n) -> ResourceDefinition:\n """Creates a Dagster resource that provides an persistent Airflow database.\n\n\n Usage:\n .. code-block:: python\n\n from dagster_airflow import (\n make_dagster_definitions_from_airflow_dags_path,\n make_persistent_airflow_db_resource,\n )\n postgres_airflow_db = "postgresql+psycopg2://airflow:airflow@localhost:5432/airflow"\n airflow_db = make_persistent_airflow_db_resource(uri=postgres_airflow_db)\n definitions = make_dagster_definitions_from_airflow_example_dags(\n '/path/to/dags/',\n resource_defs={"airflow_db": airflow_db}\n )\n\n\n Args:\n uri: SQLAlchemy URI of the Airflow DB to be used\n connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB\n dag_run_config (Optional[dict]): dag_run configuration to be used when creating a DagRun\n\n Returns:\n ResourceDefinition: The persistent Airflow DB resource\n\n """\n if is_airflow_2_loaded_in_environment():\n os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_CONN"] = uri\n else:\n os.environ["AIRFLOW__CORE__SQL_ALCHEMY_CONN"] = uri\n\n serialized_connections = serialize_connections(connections)\n\n airflow_db_resource_def = ResourceDefinition(\n resource_fn=AirflowPersistentDatabase.from_resource_context,\n config_schema={\n "uri": Field(\n StringSource,\n default_value=uri,\n is_required=False,\n ),\n "connections": Field(\n Array(inner_type=dict),\n default_value=serialized_connections,\n is_required=False,\n ),\n "dag_run_config": Field(\n dict,\n default_value=dag_run_config,\n is_required=False,\n ),\n },\n description="Persistent Airflow DB to be used by dagster-airflow ",\n )\n return airflow_db_resource_def
\n
", "current_page_name": "_modules/dagster_airflow/resources/airflow_persistent_db", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_airflow.resources.airflow_persistent_db"}}}, "dagster_aws": {"ecs": {"launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.ecs.launcher

\nimport json\nimport logging\nimport os\nimport uuid\nimport warnings\nfrom collections import namedtuple\nfrom typing import Any, Dict, List, Mapping, Optional, Sequence\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom dagster import (\n    Array,\n    DagsterRunStatus,\n    Field,\n    Noneable,\n    Permissive,\n    ScalarUnion,\n    StringSource,\n    _check as check,\n)\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.instance import T_DagsterInstance\nfrom dagster._core.launcher.base import (\n    CheckRunHealthResult,\n    LaunchRunContext,\n    RunLauncher,\n    WorkerStatus,\n)\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import RUN_WORKER_ID_TAG\nfrom dagster._grpc.types import ExecuteRunArgs\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom dagster._utils.backoff import backoff\nfrom typing_extensions import Self\n\nfrom ..secretsmanager import get_secrets_from_arns\nfrom .container_context import SHARED_ECS_SCHEMA, SHARED_TASK_DEFINITION_FIELDS, EcsContainerContext\nfrom .tasks import (\n    DagsterEcsTaskDefinitionConfig,\n    get_current_ecs_task,\n    get_current_ecs_task_metadata,\n    get_task_definition_dict_from_current_task,\n    get_task_kwargs_from_current_task,\n)\nfrom .utils import get_task_definition_family, get_task_logs, task_definitions_match\n\nTags = namedtuple("Tags", ["arn", "cluster", "cpu", "memory"])\n\nRUNNING_STATUSES = [\n    "PROVISIONING",\n    "PENDING",\n    "ACTIVATING",\n    "RUNNING",\n    "DEACTIVATING",\n    "STOPPING",\n    "DEPROVISIONING",\n]\nSTOPPED_STATUSES = ["STOPPED"]\n\nDEFAULT_WINDOWS_RESOURCES = {"cpu": "1024", "memory": "2048"}\n\nDEFAULT_LINUX_RESOURCES = {"cpu": "256", "memory": "512"}\n\n\n
[docs]class EcsRunLauncher(RunLauncher[T_DagsterInstance], ConfigurableClass):\n """RunLauncher that starts a task in ECS for each Dagster job run."""\n\n def __init__(\n self,\n inst_data: Optional[ConfigurableClassData] = None,\n task_definition=None,\n container_name="run",\n secrets=None,\n secrets_tag="dagster",\n env_vars=None,\n include_sidecars=False,\n use_current_ecs_task_config: bool = True,\n run_task_kwargs: Optional[Mapping[str, Any]] = None,\n run_resources: Optional[Dict[str, Any]] = None,\n run_ecs_tags: Optional[List[Dict[str, Optional[str]]]] = None,\n ):\n self._inst_data = inst_data\n self.ecs = boto3.client("ecs")\n self.ec2 = boto3.resource("ec2")\n self.secrets_manager = boto3.client("secretsmanager")\n self.logs = boto3.client("logs")\n\n self.task_definition = None\n self.task_definition_dict = {}\n if isinstance(task_definition, str):\n self.task_definition = task_definition\n elif task_definition and "env" in task_definition:\n check.invariant(\n len(task_definition) == 1,\n "If `task_definition` is set to a dictionary with `env`, `env` must be the only"\n " key.",\n )\n env_var = task_definition["env"]\n self.task_definition = os.getenv(env_var)\n if not self.task_definition:\n raise Exception(\n f"You have attempted to fetch the environment variable {env_var} which is not"\n " set."\n )\n else:\n self.task_definition_dict = task_definition or {}\n\n self.container_name = container_name\n\n self.secrets = check.opt_list_param(secrets, "secrets")\n\n self.env_vars = check.opt_list_param(env_vars, "env_vars")\n\n if self.secrets and all(isinstance(secret, str) for secret in self.secrets):\n warnings.warn(\n "Setting secrets as a list of ARNs is deprecated. "\n "Secrets should instead follow the same structure as the ECS API: "\n "https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html",\n DeprecationWarning,\n )\n self.secrets = [\n {"name": name, "valueFrom": value_from}\n for name, value_from in get_secrets_from_arns(\n self.secrets_manager, self.secrets\n ).items()\n ]\n\n self.secrets_tags = [secrets_tag] if secrets_tag else []\n self.include_sidecars = include_sidecars\n\n if self.task_definition:\n task_definition = self.ecs.describe_task_definition(taskDefinition=self.task_definition)\n container_names = [\n container.get("name")\n for container in task_definition["taskDefinition"]["containerDefinitions"]\n ]\n check.invariant(\n container_name in container_names,\n f"Cannot override container '{container_name}' in task definition "\n f"'{self.task_definition}' because the container is not defined.",\n )\n self.task_definition = task_definition["taskDefinition"]["taskDefinitionArn"]\n\n self.use_current_ecs_task_config = check.opt_bool_param(\n use_current_ecs_task_config, "use_current_ecs_task_config"\n )\n\n self.run_task_kwargs = check.opt_mapping_param(run_task_kwargs, "run_task_kwargs")\n if run_task_kwargs:\n check.invariant(\n "taskDefinition" not in run_task_kwargs,\n "Use the `taskDefinition` config field to pass in a task definition to run.",\n )\n check.invariant(\n "overrides" not in run_task_kwargs,\n "Task overrides are set by the run launcher and cannot be set in run_task_kwargs.",\n )\n\n expected_keys = [\n key for key in self.ecs.meta.service_model.shape_for("RunTaskRequest").members\n ]\n\n for key in run_task_kwargs:\n check.invariant(\n key in expected_keys, f"Found an unexpected key {key} in run_task_kwargs"\n )\n\n self.run_resources = check.opt_mapping_param(run_resources, "run_resources")\n\n self.run_ecs_tags = check.opt_sequence_param(run_ecs_tags, "run_ecs_tags")\n\n self._current_task_metadata = None\n self._current_task = None\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @property\n def task_role_arn(self) -> Optional[str]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("task_role_arn")\n\n @property\n def execution_role_arn(self) -> Optional[str]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("execution_role_arn")\n\n @property\n def runtime_platform(self) -> Optional[Mapping[str, Any]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("runtime_platform")\n\n @property\n def mount_points(self) -> Optional[Sequence[Mapping[str, Any]]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("mount_points")\n\n @property\n def volumes(self) -> Optional[Sequence[Mapping[str, Any]]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("volumes")\n\n @property\n def repository_credentials(self) -> Optional[str]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("repository_credentials")\n\n @property\n def run_sidecar_containers(self) -> Optional[Sequence[Mapping[str, Any]]]:\n if not self.task_definition_dict:\n return None\n return self.task_definition_dict.get("sidecar_containers")\n\n @classmethod\n def config_type(cls):\n return {\n "task_definition": Field(\n ScalarUnion(\n scalar_type=str,\n non_scalar_schema={\n "log_group": Field(StringSource, is_required=False),\n "sidecar_containers": Field(Array(Permissive({})), is_required=False),\n "requires_compatibilities": Field(Array(str), is_required=False),\n "env": Field(\n str,\n is_required=False,\n description=(\n "Backwards-compatibility for when task_definition was a"\n " StringSource.Can be used to source the task_definition scalar"\n " from an environment variable."\n ),\n ),\n **SHARED_TASK_DEFINITION_FIELDS,\n },\n ),\n is_required=False,\n description=(\n "Either the short name of an existing task definition to use when launching new"\n " tasks, or a dictionary configuration to use when creating a task definition"\n " for the run.If neither is provided, the task definition will be created based"\n " on the current task's task definition."\n ),\n ),\n "container_name": Field(\n StringSource,\n is_required=False,\n default_value="run",\n description=(\n "The container name to use when launching new tasks. Defaults to 'run'."\n ),\n ),\n "secrets": Field(\n Array(\n ScalarUnion(\n scalar_type=str,\n non_scalar_schema={"name": StringSource, "valueFrom": StringSource},\n )\n ),\n is_required=False,\n description=(\n "An array of AWS Secrets Manager secrets. These secrets will "\n "be mounted as environment variables in the container. See "\n "https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html."\n ),\n ),\n "secrets_tag": Field(\n Noneable(StringSource),\n is_required=False,\n default_value="dagster",\n description=(\n "AWS Secrets Manager secrets with this tag will be mounted as "\n "environment variables in the container. Defaults to 'dagster'."\n ),\n ),\n "include_sidecars": Field(\n bool,\n is_required=False,\n default_value=False,\n description=(\n "Whether each run should use the same sidecars as the task that launches it. "\n "Defaults to False."\n ),\n ),\n "use_current_ecs_task_config": Field(\n bool,\n is_required=False,\n default_value=True,\n description=(\n "Whether to use the run launcher's current ECS task in order to determine "\n "the cluster and networking configuration for the launched task. Defaults to "\n "True. Should only be called if the run launcher is running within an ECS "\n "task."\n ),\n ),\n "run_task_kwargs": Field(\n Permissive(\n {\n "cluster": Field(\n StringSource,\n is_required=False,\n description="Name of the ECS cluster to launch ECS tasks in.",\n ),\n }\n ),\n is_required=False,\n description=(\n "Additional arguments to include while running the task. See"\n " https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task"\n " for the available parameters. The overrides and taskDefinition arguments will"\n " always be set by the run launcher."\n ),\n ),\n **SHARED_ECS_SCHEMA,\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return EcsRunLauncher(inst_data=inst_data, **config_value)\n\n def _set_run_tags(self, run_id: str, cluster: str, task_arn: str):\n tags = {\n "ecs/task_arn": task_arn,\n "ecs/cluster": cluster,\n RUN_WORKER_ID_TAG: str(uuid.uuid4().hex)[0:6],\n }\n self._instance.add_run_tags(run_id, tags)\n\n def build_ecs_tags_for_run_task(self, run, container_context: EcsContainerContext):\n if any(tag["key"] == "dagster/run_id" for tag in container_context.run_ecs_tags):\n raise Exception("Cannot override system ECS tag: dagster/run_id")\n\n return [{"key": "dagster/run_id", "value": run.run_id}, *container_context.run_ecs_tags]\n\n def _get_run_tags(self, run_id):\n run = self._instance.get_run_by_id(run_id)\n tags = run.tags if run else {}\n arn = tags.get("ecs/task_arn")\n cluster = tags.get("ecs/cluster")\n cpu = tags.get("ecs/cpu")\n memory = tags.get("ecs/memory")\n\n return Tags(arn, cluster, cpu, memory)\n\n def _get_command_args(self, run_args: ExecuteRunArgs, context: LaunchRunContext):\n return run_args.get_command_args()\n\n def _get_image_for_run(self, context: LaunchRunContext) -> Optional[str]:\n job_origin = check.not_none(context.job_code_origin)\n return job_origin.repository_origin.container_image\n\n def launch_run(self, context: LaunchRunContext) -> None:\n """Launch a run in an ECS task."""\n run = context.dagster_run\n container_context = EcsContainerContext.create_for_run(run, self)\n\n job_origin = check.not_none(context.job_code_origin)\n\n # ECS limits overrides to 8192 characters including json formatting\n # https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html\n # When container_context is serialized as part of the ExecuteRunArgs, we risk\n # going over this limit (for example, if many secrets have been set). This strips\n # the container context off of our job origin because we don't actually need\n # it to launch the run; we only needed it to create the task definition.\n repository_origin = job_origin.repository_origin\n\n stripped_repository_origin = repository_origin._replace(container_context={})\n stripped_job_origin = job_origin._replace(repository_origin=stripped_repository_origin)\n\n args = ExecuteRunArgs(\n job_origin=stripped_job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n )\n command = self._get_command_args(args, context)\n image = self._get_image_for_run(context)\n\n run_task_kwargs = self._run_task_kwargs(run, image, container_context)\n\n # Set cpu or memory overrides\n # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html\n cpu_and_memory_overrides = self.get_cpu_and_memory_overrides(container_context, run)\n\n task_overrides = self._get_task_overrides(container_context, run)\n\n container_overrides: List[Dict[str, Any]] = [\n {\n "name": self._get_container_name(container_context),\n "command": command,\n # containerOverrides expects cpu/memory as integers\n **{k: int(v) for k, v in cpu_and_memory_overrides.items()},\n }\n ]\n\n run_task_kwargs["overrides"] = {\n "containerOverrides": container_overrides,\n # taskOverrides expects cpu/memory as strings\n **cpu_and_memory_overrides,\n **task_overrides,\n }\n run_task_kwargs["tags"] = [\n *run_task_kwargs.get("tags", []),\n *self.build_ecs_tags_for_run_task(run, container_context),\n ]\n\n run_task_kwargs_from_run = self._get_run_task_kwargs_from_run(run)\n run_task_kwargs.update(run_task_kwargs_from_run)\n\n # launchType and capacityProviderStrategy are incompatible - prefer the latter if it is set\n if "launchType" in run_task_kwargs and run_task_kwargs.get("capacityProviderStrategy"):\n del run_task_kwargs["launchType"]\n\n # Run a task using the same network configuration as this processes's task.\n response = self.ecs.run_task(**run_task_kwargs)\n\n tasks = response["tasks"]\n\n if not tasks:\n failures = response["failures"]\n failure_messages = []\n for failure in failures:\n arn = failure.get("arn")\n reason = failure.get("reason")\n detail = failure.get("detail")\n\n failure_message = (\n "Task"\n + (f" {arn}" if arn else "")\n + " failed."\n + (f" Failure reason: {reason}" if reason else "")\n + (f" Failure details: {detail}" if detail else "")\n )\n failure_messages.append(failure_message)\n\n raise Exception("\\n".join(failure_messages) if failure_messages else "Task failed.")\n\n arn = tasks[0]["taskArn"]\n cluster_arn = tasks[0]["clusterArn"]\n self._set_run_tags(run.run_id, cluster=cluster_arn, task_arn=arn)\n self.report_launch_events(run, arn, cluster_arn)\n\n def report_launch_events(\n self, run: DagsterRun, arn: Optional[str] = None, cluster: Optional[str] = None\n ):\n # Extracted method to allow for subclasses to customize the launch reporting behavior\n\n metadata = {}\n if arn:\n metadata["ECS Task ARN"] = arn\n if cluster:\n metadata["ECS Cluster"] = cluster\n\n metadata["Run ID"] = run.run_id\n self._instance.report_engine_event(\n message="Launching run in ECS task",\n dagster_run=run,\n engine_event_data=EngineEventData(metadata),\n cls=self.__class__,\n )\n\n def get_cpu_and_memory_overrides(\n self, container_context: EcsContainerContext, run: DagsterRun\n ) -> Mapping[str, str]:\n overrides = {}\n\n cpu = run.tags.get("ecs/cpu", container_context.run_resources.get("cpu"))\n memory = run.tags.get("ecs/memory", container_context.run_resources.get("memory"))\n\n if cpu:\n overrides["cpu"] = cpu\n if memory:\n overrides["memory"] = memory\n\n return overrides\n\n def _get_task_overrides(\n self, container_context: EcsContainerContext, run: DagsterRun\n ) -> Mapping[str, Any]:\n tag_overrides = run.tags.get("ecs/task_overrides")\n\n overrides = {}\n\n if tag_overrides:\n overrides = json.loads(tag_overrides)\n\n ephemeral_storage = run.tags.get(\n "ecs/ephemeral_storage", container_context.run_resources.get("ephemeral_storage")\n )\n if ephemeral_storage:\n overrides["ephemeralStorage"] = {"sizeInGiB": int(ephemeral_storage)}\n\n return overrides\n\n def _get_run_task_kwargs_from_run(self, run: DagsterRun) -> Mapping[str, Any]:\n run_task_kwargs = run.tags.get("ecs/run_task_kwargs")\n if run_task_kwargs:\n return json.loads(run_task_kwargs)\n return {}\n\n def terminate(self, run_id):\n tags = self._get_run_tags(run_id)\n\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n if not (tags.arn and tags.cluster):\n return False\n\n tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")\n if not tasks:\n return False\n\n status = tasks[0].get("lastStatus")\n if status == "STOPPED":\n return False\n\n self.ecs.stop_task(task=tags.arn, cluster=tags.cluster)\n return True\n\n def _get_current_task_metadata(self):\n if self._current_task_metadata is None:\n self._current_task_metadata = get_current_ecs_task_metadata()\n return self._current_task_metadata\n\n def _get_current_task(self):\n if self._current_task is None:\n current_task_metadata = self._get_current_task_metadata()\n self._current_task = get_current_ecs_task(\n self.ecs, current_task_metadata.task_arn, current_task_metadata.cluster\n )\n\n return self._current_task\n\n def _get_run_task_definition_family(self, run: DagsterRun) -> str:\n return get_task_definition_family("run", check.not_none(run.external_job_origin))\n\n def _get_container_name(self, container_context) -> str:\n return container_context.container_name or self.container_name\n\n def _run_task_kwargs(self, run, image, container_context) -> Dict[str, Any]:\n """Return a dictionary of args to launch the ECS task, registering a new task\n definition if needed.\n """\n environment = self._environment(container_context)\n environment.append({"name": "DAGSTER_RUN_JOB_NAME", "value": run.job_name})\n\n secrets = self._secrets(container_context)\n\n if container_context.task_definition_arn:\n task_definition = container_context.task_definition_arn\n else:\n family = self._get_run_task_definition_family(run)\n\n if self.task_definition_dict or not self.use_current_ecs_task_config:\n runtime_platform = container_context.runtime_platform\n is_windows = container_context.runtime_platform.get(\n "operatingSystemFamily"\n ) not in {None, "LINUX"}\n\n default_resources = (\n DEFAULT_WINDOWS_RESOURCES if is_windows else DEFAULT_LINUX_RESOURCES\n )\n task_definition_config = DagsterEcsTaskDefinitionConfig(\n family,\n image,\n self._get_container_name(container_context),\n command=None,\n log_configuration=(\n {\n "logDriver": "awslogs",\n "options": {\n "awslogs-group": self.task_definition_dict["log_group"],\n "awslogs-region": self.ecs.meta.region_name,\n "awslogs-stream-prefix": family,\n },\n }\n if self.task_definition_dict.get("log_group")\n else None\n ),\n secrets=secrets if secrets else [],\n environment=environment,\n execution_role_arn=container_context.execution_role_arn,\n task_role_arn=container_context.task_role_arn,\n sidecars=container_context.run_sidecar_containers,\n requires_compatibilities=self.task_definition_dict.get(\n "requires_compatibilities", []\n ),\n cpu=container_context.run_resources.get("cpu", default_resources["cpu"]),\n memory=container_context.run_resources.get(\n "memory", default_resources["memory"]\n ),\n ephemeral_storage=container_context.run_resources.get("ephemeral_storage"),\n runtime_platform=runtime_platform,\n volumes=container_context.volumes,\n mount_points=container_context.mount_points,\n repository_credentials=container_context.repository_credentials,\n )\n task_definition_dict = task_definition_config.task_definition_dict()\n else:\n task_definition_dict = get_task_definition_dict_from_current_task(\n self.ecs,\n family,\n self._get_current_task(),\n image,\n self._get_container_name(container_context),\n environment=environment,\n secrets=secrets if secrets else {},\n include_sidecars=self.include_sidecars,\n task_role_arn=container_context.task_role_arn,\n execution_role_arn=container_context.execution_role_arn,\n cpu=container_context.run_resources.get("cpu"),\n memory=container_context.run_resources.get("memory"),\n runtime_platform=container_context.runtime_platform,\n ephemeral_storage=container_context.run_resources.get("ephemeral_storage"),\n volumes=container_context.volumes,\n mount_points=container_context.mount_points,\n additional_sidecars=container_context.run_sidecar_containers,\n repository_credentials=container_context.repository_credentials,\n )\n\n task_definition_config = DagsterEcsTaskDefinitionConfig.from_task_definition_dict(\n task_definition_dict,\n self._get_container_name(container_context),\n )\n\n container_name = self._get_container_name(container_context)\n\n backoff(\n self._reuse_or_register_task_definition,\n retry_on=(Exception,),\n kwargs={\n "desired_task_definition_config": task_definition_config,\n "container_name": container_name,\n "task_definition_dict": task_definition_dict,\n },\n max_retries=5,\n )\n\n task_definition = family\n\n if self.use_current_ecs_task_config:\n current_task_metadata = get_current_ecs_task_metadata()\n current_task = get_current_ecs_task(\n self.ecs, current_task_metadata.task_arn, current_task_metadata.cluster\n )\n task_kwargs = get_task_kwargs_from_current_task(\n self.ec2,\n current_task_metadata.cluster,\n current_task,\n )\n else:\n task_kwargs = {}\n\n return {**task_kwargs, **self.run_task_kwargs, "taskDefinition": task_definition}\n\n def _reuse_task_definition(\n self, desired_task_definition_config: DagsterEcsTaskDefinitionConfig, container_name: str\n ):\n family = desired_task_definition_config.family\n\n try:\n existing_task_definition = self.ecs.describe_task_definition(taskDefinition=family)[\n "taskDefinition"\n ]\n except ClientError:\n # task definition does not exist, do not reuse\n return False\n\n return task_definitions_match(\n desired_task_definition_config,\n existing_task_definition,\n container_name=container_name,\n )\n\n def _reuse_or_register_task_definition(\n self,\n desired_task_definition_config: DagsterEcsTaskDefinitionConfig,\n container_name: str,\n task_definition_dict: dict,\n ):\n if not self._reuse_task_definition(desired_task_definition_config, container_name):\n self.ecs.register_task_definition(**task_definition_dict)\n\n def _environment(self, container_context):\n return [\n {"name": key, "value": value}\n for key, value in container_context.get_environment_dict().items()\n ]\n\n def _secrets(self, container_context):\n secrets = container_context.get_secrets_dict(self.secrets_manager)\n return (\n [{"name": key, "valueFrom": value} for key, value in secrets.items()] if secrets else []\n )\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n @property\n def include_cluster_info_in_failure_messages(self):\n return True\n\n def _is_transient_startup_failure(self, run, task):\n if not task.get("stoppedReason"):\n return False\n return (\n run.status == DagsterRunStatus.STARTING\n and "Timeout waiting for network interface provisioning to complete"\n in task.get("stoppedReason")\n )\n\n def check_run_worker_health(self, run: DagsterRun):\n run_worker_id = run.tags.get(RUN_WORKER_ID_TAG)\n\n tags = self._get_run_tags(run.run_id)\n container_context = EcsContainerContext.create_for_run(run, self)\n\n if not (tags.arn and tags.cluster):\n return CheckRunHealthResult(WorkerStatus.UNKNOWN, "", run_worker_id=run_worker_id)\n\n tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")\n if not tasks:\n return CheckRunHealthResult(WorkerStatus.UNKNOWN, "", run_worker_id=run_worker_id)\n\n t = tasks[0]\n\n if t.get("lastStatus") in RUNNING_STATUSES:\n return CheckRunHealthResult(WorkerStatus.RUNNING, run_worker_id=run_worker_id)\n elif t.get("lastStatus") in STOPPED_STATUSES:\n failed_containers = []\n for c in t.get("containers"):\n if c.get("exitCode") != 0:\n failed_containers.append(c)\n if len(failed_containers) > 0:\n if len(failed_containers) > 1:\n container_str = "Containers"\n else:\n container_str = "Container"\n\n failure_text = []\n\n cluster_failure_info = (\n f"Task {t.get('taskArn')} failed. Stop code: {t.get('stopCode')}. Stop"\n + f" reason: {t.get('stoppedReason')}."\n + f" {container_str} {[c.get('name') for c in failed_containers]} failed."\n )\n\n logging.warning(\n "Run monitoring detected run worker failure: " + cluster_failure_info\n )\n\n if self.include_cluster_info_in_failure_messages:\n failure_text.append(cluster_failure_info)\n\n logs = []\n\n try:\n logs = get_task_logs(\n self.ecs,\n logs_client=self.logs,\n cluster=tags.cluster,\n task_arn=tags.arn,\n container_name=self._get_container_name(container_context),\n )\n except:\n logging.exception(f"Error trying to get logs for failed task {tags.arn}")\n\n if logs:\n failure_text.append("Run worker logs:\\n" + "\\n".join(logs))\n\n return CheckRunHealthResult(\n WorkerStatus.FAILED,\n "\\n\\n".join(failure_text),\n transient=self._is_transient_startup_failure(run, t),\n run_worker_id=run_worker_id,\n )\n\n return CheckRunHealthResult(WorkerStatus.SUCCESS, run_worker_id=run_worker_id)\n\n return CheckRunHealthResult(\n WorkerStatus.UNKNOWN, "ECS task health status is unknown.", run_worker_id=run_worker_id\n )
\n
", "current_page_name": "_modules/dagster_aws/ecs/launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.ecs.launcher"}}, "emr": {"emr": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.emr.emr

\n# Portions of this file are copied from the Yelp MRJob project:\n#\n#   https://github.com/Yelp/mrjob\n#\n#\n# Copyright 2009-2013 Yelp, David Marin\n# Copyright 2015 Yelp\n# Copyright 2017 Yelp\n# Copyright 2018 Contributors\n# Copyright 2019 Yelp and Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport gzip\nimport re\nfrom io import BytesIO\nfrom urllib.parse import urlparse\n\nimport boto3\nimport dagster\nimport dagster._check as check\nfrom botocore.exceptions import WaiterError\n\nfrom dagster_aws.utils.mrjob.utils import _boto3_now, _wrap_aws_client, strip_microseconds\n\nfrom .types import EMR_CLUSTER_TERMINATED_STATES, EmrClusterState, EmrStepState\n\n# if we can't create or find our own service role, use the one\n# created by the AWS console and CLI\n_FALLBACK_SERVICE_ROLE = "EMR_DefaultRole"\n\n# if we can't create or find our own instance profile, use the one\n# created by the AWS console and CLI\n_FALLBACK_INSTANCE_PROFILE = "EMR_EC2_DefaultRole"\n\n\n
[docs]class EmrError(Exception):\n pass
\n\n\n
[docs]class EmrJobRunner:\n def __init__(\n self,\n region,\n check_cluster_every=30,\n aws_access_key_id=None,\n aws_secret_access_key=None,\n ):\n """This object encapsulates various utilities for interacting with EMR clusters and invoking\n steps (jobs) on them.\n\n See also :py:class:`~dagster_aws.emr.EmrPySparkResource`, which wraps this job runner in a\n resource for pyspark workloads.\n\n Args:\n region (str): AWS region to use\n check_cluster_every (int, optional): How frequently to poll boto3 APIs for updates.\n Defaults to 30 seconds.\n aws_access_key_id ([type], optional): AWS access key ID. Defaults to None, which will\n use the default boto3 credentials chain.\n aws_secret_access_key ([type], optional): AWS secret access key. Defaults to None, which\n will use the default boto3 credentials chain.\n """\n self.region = check.str_param(region, "region")\n\n # This is in seconds\n self.check_cluster_every = check.int_param(check_cluster_every, "check_cluster_every")\n self.aws_access_key_id = check.opt_str_param(aws_access_key_id, "aws_access_key_id")\n self.aws_secret_access_key = check.opt_str_param(\n aws_secret_access_key, "aws_secret_access_key"\n )\n\n def make_emr_client(self):\n """Creates a boto3 EMR client. Construction is wrapped in retries in case client connection\n fails transiently.\n\n Returns:\n botocore.client.EMR: An EMR client\n """\n raw_emr_client = boto3.client(\n "emr",\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n region_name=self.region,\n )\n return _wrap_aws_client(raw_emr_client, min_backoff=self.check_cluster_every)\n\n def cluster_id_from_name(self, cluster_name):\n """Get a cluster ID in the format "j-123ABC123ABC1" given a cluster name "my cool cluster".\n\n Args:\n cluster_name (str): The name of the cluster for which to find an ID\n\n Returns:\n str: The ID of the cluster\n\n Raises:\n EmrError: No cluster with the specified name exists\n """\n check.str_param(cluster_name, "cluster_name")\n\n response = self.make_emr_client().list_clusters().get("Clusters", [])\n for cluster in response:\n if cluster["Name"] == cluster_name:\n return cluster["Id"]\n\n raise EmrError(f"cluster {cluster_name} not found in region {self.region}")\n\n @staticmethod\n def construct_step_dict_for_command(step_name, command, action_on_failure="CONTINUE"):\n """Construct an EMR step definition which uses command-runner.jar to execute a shell command\n on the EMR master.\n\n Args:\n step_name (str): The name of the EMR step (will show up in the EMR UI)\n command (str): The shell command to execute with command-runner.jar\n action_on_failure (str, optional): Configure action on failure (e.g., continue, or\n terminate the cluster). Defaults to 'CONTINUE'.\n\n Returns:\n dict: Step definition dict\n """\n check.str_param(step_name, "step_name")\n check.list_param(command, "command", of_type=str)\n check.str_param(action_on_failure, "action_on_failure")\n\n return {\n "Name": step_name,\n "ActionOnFailure": action_on_failure,\n "HadoopJarStep": {"Jar": "command-runner.jar", "Args": command},\n }\n\n def add_tags(self, log, tags, cluster_id):\n """Add tags in the dict tags to cluster cluster_id.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n tags (dict): Dictionary of {'key': 'value'} tags\n cluster_id (str): The ID of the cluster to tag\n """\n check.dict_param(tags, "tags")\n check.str_param(cluster_id, "cluster_id")\n\n tags_items = sorted(tags.items())\n\n self.make_emr_client().add_tags(\n ResourceId=cluster_id, Tags=[dict(Key=k, Value=v) for k, v in tags_items]\n )\n\n log.info(\n "Added EMR tags to cluster %s: %s"\n % (cluster_id, ", ".join("%s=%s" % (tag, value) for tag, value in tags_items))\n )\n\n def run_job_flow(self, log, cluster_config):\n """Create an empty cluster on EMR, and return the ID of that job flow.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n cluster_config (dict): Configuration for this EMR job flow. See:\n https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html\n\n Returns:\n str: The cluster ID, e.g. "j-ZKIY4CKQRX72"\n """\n check.dict_param(cluster_config, "cluster_config")\n\n log.debug("Creating Elastic MapReduce cluster")\n emr_client = self.make_emr_client()\n\n log.debug(\n "Calling run_job_flow(%s)"\n % (", ".join("%s=%r" % (k, v) for k, v in sorted(cluster_config.items())))\n )\n cluster_id = emr_client.run_job_flow(**cluster_config)["JobFlowId"]\n\n log.info("Created new cluster %s" % cluster_id)\n\n # set EMR tags for the cluster\n tags_items = cluster_config.get("Tags", [])\n tags = {k: v for k, v in tags_items}\n tags["__dagster_version"] = dagster.__version__\n self.add_tags(log, tags, cluster_id)\n return cluster_id\n\n def describe_cluster(self, cluster_id):\n """Thin wrapper over boto3 describe_cluster.\n\n Args:\n cluster_id (str): Cluster to inspect\n\n Returns:\n dict: The cluster info. See:\n https://docs.aws.amazon.com/emr/latest/APIReference/API_DescribeCluster.html\n """\n check.str_param(cluster_id, "cluster_id")\n\n emr_client = self.make_emr_client()\n return emr_client.describe_cluster(ClusterId=cluster_id)\n\n def describe_step(self, cluster_id, step_id):\n """Thin wrapper over boto3 describe_step.\n\n Args:\n cluster_id (str): Cluster to inspect\n step_id (str): Step ID to describe\n\n Returns:\n dict: The step info. See:\n https://docs.aws.amazon.com/emr/latest/APIReference/API_DescribeStep.html\n """\n check.str_param(cluster_id, "cluster_id")\n check.str_param(step_id, "step_id")\n\n emr_client = self.make_emr_client()\n return emr_client.describe_step(ClusterId=cluster_id, StepId=step_id)\n\n def add_job_flow_steps(self, log, cluster_id, step_defs):\n """Submit the constructed job flow steps to EMR for execution.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n cluster_id (str): The ID of the cluster\n step_defs (List[dict]): List of steps; see also `construct_step_dict_for_command`\n\n Returns:\n List[str]: list of step IDs.\n """\n check.str_param(cluster_id, "cluster_id")\n check.list_param(step_defs, "step_defs", of_type=dict)\n\n emr_client = self.make_emr_client()\n\n steps_kwargs = dict(JobFlowId=cluster_id, Steps=step_defs)\n log.debug(\n "Calling add_job_flow_steps(%s)"\n % ",".join(("%s=%r" % (k, v)) for k, v in steps_kwargs.items())\n )\n return emr_client.add_job_flow_steps(**steps_kwargs)["StepIds"]\n\n def is_emr_step_complete(self, log, cluster_id, emr_step_id):\n step = self.describe_step(cluster_id, emr_step_id)["Step"]\n step_state = EmrStepState(step["Status"]["State"])\n\n if step_state == EmrStepState.Pending:\n cluster = self.describe_cluster(cluster_id)["Cluster"]\n\n reason = _get_reason(cluster)\n reason_desc = (": %s" % reason) if reason else ""\n\n log.info("PENDING (cluster is %s%s)" % (cluster["Status"]["State"], reason_desc))\n return False\n\n elif step_state == EmrStepState.Running:\n time_running_desc = ""\n\n start = step["Status"]["Timeline"].get("StartDateTime")\n if start:\n time_running_desc = " for %s" % strip_microseconds(_boto3_now() - start)\n\n log.info("RUNNING%s" % time_running_desc)\n return False\n\n # we're done, will return at the end of this\n elif step_state == EmrStepState.Completed:\n log.info("COMPLETED")\n return True\n else:\n # step has failed somehow. *reason* seems to only be set\n # when job is cancelled (e.g. 'Job terminated')\n reason = _get_reason(step)\n reason_desc = (" (%s)" % reason) if reason else ""\n\n log.info("%s%s" % (step_state.value, reason_desc))\n\n # print cluster status; this might give more context\n # why step didn't succeed\n cluster = self.describe_cluster(cluster_id)["Cluster"]\n reason = _get_reason(cluster)\n reason_desc = (": %s" % reason) if reason else ""\n log.info(\n "Cluster %s %s %s%s"\n % (\n cluster["Id"],\n "was" if "ED" in cluster["Status"]["State"] else "is",\n cluster["Status"]["State"],\n reason_desc,\n )\n )\n\n if EmrClusterState(cluster["Status"]["State"]) in EMR_CLUSTER_TERMINATED_STATES:\n # was it caused by IAM roles?\n self._check_for_missing_default_iam_roles(log, cluster)\n\n # TODO: extract logs here to surface failure reason\n # See: https://github.com/dagster-io/dagster/issues/1954\n\n if step_state == EmrStepState.Failed:\n log.error("EMR step %s failed" % emr_step_id)\n\n raise EmrError("EMR step %s failed" % emr_step_id)\n\n def _check_for_missing_default_iam_roles(self, log, cluster):\n """If cluster couldn't start due to missing IAM roles, tell user what to do."""\n check.dict_param(cluster, "cluster")\n\n reason = _get_reason(cluster)\n if any(\n reason.endswith("/%s is invalid" % role)\n for role in (_FALLBACK_INSTANCE_PROFILE, _FALLBACK_SERVICE_ROLE)\n ):\n log.warning(\n "IAM roles are missing. See documentation for IAM roles on EMR here: "\n "https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-iam-roles.html"\n )\n\n def log_location_for_cluster(self, cluster_id):\n """EMR clusters are typically launched with S3 logging configured. This method inspects a\n cluster using boto3 describe_cluster to retrieve the log URI.\n\n Args:\n cluster_id (str): The cluster to inspect.\n\n Raises:\n EmrError: the log URI was missing (S3 log mirroring not enabled for this cluster)\n\n Returns:\n (str, str): log bucket and key\n """\n check.str_param(cluster_id, "cluster_id")\n\n # The S3 log URI is specified per job flow (cluster)\n log_uri = self.describe_cluster(cluster_id)["Cluster"].get("LogUri", None)\n\n # ugh, seriously boto3?! This will come back as string "None"\n if log_uri == "None" or log_uri is None:\n raise EmrError("Log URI not specified, cannot retrieve step execution logs")\n\n # For some reason the API returns an s3n:// protocol log URI instead of s3://\n log_uri = re.sub("^s3n", "s3", log_uri)\n log_uri_parsed = urlparse(log_uri)\n log_bucket = log_uri_parsed.netloc\n log_key_prefix = log_uri_parsed.path.lstrip("/")\n return log_bucket, log_key_prefix\n\n def retrieve_logs_for_step_id(self, log, cluster_id, step_id):\n """Retrieves stdout and stderr logs for the given step ID.\n\n Args:\n log (DagsterLogManager): Log manager, for logging\n cluster_id (str): EMR cluster ID\n step_id (str): EMR step ID for the job that was submitted.\n\n Returns:\n (str, str): Tuple of stdout log string contents, and stderr log string contents\n """\n check.str_param(cluster_id, "cluster_id")\n check.str_param(step_id, "step_id")\n\n log_bucket, log_key_prefix = self.log_location_for_cluster(cluster_id)\n\n prefix = f"{log_key_prefix}{cluster_id}/steps/{step_id}"\n stdout_log = self.wait_for_log(log, log_bucket, f"{prefix}/stdout.gz")\n stderr_log = self.wait_for_log(log, log_bucket, f"{prefix}/stderr.gz")\n return stdout_log, stderr_log\n\n def wait_for_log(self, log, log_bucket, log_key, waiter_delay=30, waiter_max_attempts=20):\n """Wait for gzipped EMR logs to appear on S3. Note that EMR syncs logs to S3 every 5\n minutes, so this may take a long time.\n\n Args:\n log_bucket (str): S3 bucket where log is expected to appear\n log_key (str): S3 key for the log file\n waiter_delay (int): How long to wait between attempts to check S3 for the log file\n waiter_max_attempts (int): Number of attempts before giving up on waiting\n\n Raises:\n EmrError: Raised if we waited the full duration and the logs did not appear\n\n Returns:\n str: contents of the log file\n """\n check.str_param(log_bucket, "log_bucket")\n check.str_param(log_key, "log_key")\n check.int_param(waiter_delay, "waiter_delay")\n check.int_param(waiter_max_attempts, "waiter_max_attempts")\n\n log.info(f"Attempting to get log: s3://{log_bucket}/{log_key}")\n\n s3 = _wrap_aws_client(boto3.client("s3"), min_backoff=self.check_cluster_every)\n waiter = s3.get_waiter("object_exists")\n try:\n waiter.wait(\n Bucket=log_bucket,\n Key=log_key,\n WaiterConfig={"Delay": waiter_delay, "MaxAttempts": waiter_max_attempts},\n )\n except WaiterError as err:\n raise EmrError("EMR log file did not appear on S3 after waiting") from err\n\n obj = BytesIO(s3.get_object(Bucket=log_bucket, Key=log_key)["Body"].read())\n gzip_file = gzip.GzipFile(fileobj=obj)\n return gzip_file.read().decode("utf-8")
\n\n\ndef _get_reason(cluster_or_step):\n """Get state change reason message."""\n # StateChangeReason is {} before the first state change\n return cluster_or_step["Status"]["StateChangeReason"].get("Message", "")\n
", "current_page_name": "_modules/dagster_aws/emr/emr", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.emr.emr"}, "pyspark_step_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.emr.pyspark_step_launcher

\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport time\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom dagster import (\n    Field,\n    StringSource,\n    _check as check,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.definitions.step_launcher import StepLauncher\nfrom dagster._core.errors import DagsterInvariantViolationError, raise_execution_interrupts\nfrom dagster._core.execution.plan.external_step import (\n    PICKLED_EVENTS_FILE_NAME,\n    PICKLED_STEP_RUN_REF_FILE_NAME,\n    step_context_to_step_run_ref,\n)\nfrom dagster._serdes import deserialize_value\n\nfrom dagster_aws.emr import EmrError, EmrJobRunner, emr_step_main\nfrom dagster_aws.emr.configs_spark import spark_config as get_spark_config\nfrom dagster_aws.utils.mrjob.log4j import parse_hadoop_log4j_records\n\n# On EMR, Spark is installed here\nEMR_SPARK_HOME = "/usr/lib/spark/"\n\nCODE_ZIP_NAME = "code.zip"\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n {\n "spark_config": get_spark_config(),\n "cluster_id": Field(\n StringSource, description="Name of the job flow (cluster) on which to execute."\n ),\n "region_name": Field(StringSource, description="The AWS region that the cluster is in."),\n "action_on_failure": Field(\n str,\n is_required=False,\n default_value="CANCEL_AND_WAIT",\n description=(\n "The EMR action to take when the cluster step fails: "\n "https://docs.aws.amazon.com/emr/latest/APIReference/API_StepConfig.html"\n ),\n ),\n "staging_bucket": Field(\n StringSource,\n is_required=True,\n description=(\n "S3 bucket to use for passing files between the plan process and EMR process."\n ),\n ),\n "staging_prefix": Field(\n StringSource,\n is_required=False,\n default_value="emr_staging",\n description=(\n "S3 key prefix inside the staging_bucket to use for files passed the plan "\n "process and EMR process"\n ),\n ),\n "wait_for_logs": Field(\n bool,\n is_required=False,\n default_value=False,\n description=(\n "If set, the system will wait for EMR logs to appear on S3. Note that logs "\n "are copied every 5 minutes, so enabling this will add several minutes to the job "\n "runtime."\n ),\n ),\n "local_job_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "Absolute path to the package that contains the job definition(s) whose steps will"\n " execute remotely on EMR. This is a path on the local fileystem of the process"\n " executing the job. The expectation is that this package will also be available on"\n " the python path of the launched process running the Spark step on EMR, either"\n " deployed on step launch via the deploy_local_job_package option, referenced on s3"\n " via the s3_job_package_path option, or installed on the cluster via bootstrap"\n " actions."\n ),\n ),\n "local_pipeline_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "(legacy) Absolute path to the package that contains the pipeline definition(s)"\n " whose steps will execute remotely on EMR. This is a path on the local fileystem"\n " of the process executing the pipeline. The expectation is that this package will"\n " also be available on the python path of the launched process running the Spark"\n " step on EMR, either deployed on step launch via the deploy_local_pipeline_package"\n " option, referenced on s3 via the s3_pipeline_package_path option, or installed on"\n " the cluster via bootstrap actions."\n ),\n ),\n "deploy_local_job_package": Field(\n bool,\n default_value=False,\n is_required=False,\n description=(\n "If set, before every step run, the launcher will zip up all the code in"\n " local_job_package_path, upload it to s3, and pass it to spark-submit's --py-files"\n " option. This gives the remote process access to up-to-date user code. If not set,"\n " the assumption is that some other mechanism is used for distributing code to the"\n " EMR cluster. If this option is set to True, s3_job_package_path should not also"\n " be set."\n ),\n ),\n "deploy_local_pipeline_package": Field(\n bool,\n default_value=False,\n is_required=False,\n description=(\n "(legacy) If set, before every step run, the launcher will zip up all the code in"\n " local_job_package_path, upload it to s3, and pass it to spark-submit's --py-files"\n " option. This gives the remote process access to up-to-date user code. If not set,"\n " the assumption is that some other mechanism is used for distributing code to the"\n " EMR cluster. If this option is set to True, s3_job_package_path should not also"\n " be set."\n ),\n ),\n "s3_job_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "If set, this path will be passed to the --py-files option of spark-submit. "\n "This should usually be a path to a zip file. If this option is set, "\n "deploy_local_job_package should not be set to True."\n ),\n ),\n "s3_pipeline_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "If set, this path will be passed to the --py-files option of spark-submit. "\n "This should usually be a path to a zip file. If this option is set, "\n "deploy_local_pipeline_package should not be set to True."\n ),\n ),\n }\n)\ndef emr_pyspark_step_launcher(context):\n # Resolve legacy arguments\n if context.resource_config.get("local_job_package_path") and context.resource_config.get(\n "local_pipeline_package_path"\n ):\n raise DagsterInvariantViolationError(\n "Provided both ``local_job_package_path`` and legacy version "\n "``local_pipeline_package_path`` arguments to ``emr_pyspark_step_launcher`` "\n "resource. Please choose one or the other."\n )\n\n if not context.resource_config.get(\n "local_job_package_path"\n ) and not context.resource_config.get("local_pipeline_package_path"):\n raise DagsterInvariantViolationError(\n "For resource ``emr_pyspark_step_launcher``, no config value provided for required "\n "schema entry ``local_job_package_path``."\n )\n\n local_job_package_path = context.resource_config.get(\n "local_job_package_path"\n ) or context.resource_config.get("local_pipeline_package_path")\n\n if context.resource_config.get("deploy_local_job_package") and context.resource_config.get(\n "deploy_local_pipeline_package"\n ):\n raise DagsterInvariantViolationError(\n "Provided both ``deploy_local_job_package`` and legacy version "\n "``deploy_local_pipeline_package`` arguments to ``emr_pyspark_step_launcher`` "\n "resource. Please choose one or the other."\n )\n\n deploy_local_job_package = context.resource_config.get(\n "deploy_local_job_package"\n ) or context.resource_config.get("deploy_local_pipeline_package")\n\n if context.resource_config.get("s3_job_package_path") and context.resource_config.get(\n "s3_pipeline_package_path"\n ):\n raise DagsterInvariantViolationError(\n "Provided both ``s3_job_package_path`` and legacy version "\n "``s3_pipeline_package_path`` arguments to ``emr_pyspark_step_launcher`` "\n "resource. Please choose one or the other."\n )\n\n s3_job_package_path = context.resource_config.get(\n "s3_job_package_path"\n ) or context.resource_config.get("s3_pipeline_package_path")\n\n return EmrPySparkStepLauncher(\n region_name=context.resource_config.get("region_name"),\n staging_bucket=context.resource_config.get("staging_bucket"),\n staging_prefix=context.resource_config.get("staging_prefix"),\n wait_for_logs=context.resource_config.get("wait_for_logs"),\n action_on_failure=context.resource_config.get("action_on_failure"),\n cluster_id=context.resource_config.get("cluster_id"),\n spark_config=context.resource_config.get("spark_config"),\n local_job_package_path=local_job_package_path,\n deploy_local_job_package=deploy_local_job_package,\n s3_job_package_path=s3_job_package_path,\n )
\n\n\nemr_pyspark_step_launcher.__doc__ = "\\n".join(\n "- **" + option + "**: " + (field.description or "")\n for option, field in emr_pyspark_step_launcher.config_schema.config_type.fields.items() # type: ignore\n)\n\n\nclass EmrPySparkStepLauncher(StepLauncher):\n def __init__(\n self,\n region_name,\n staging_bucket,\n staging_prefix,\n wait_for_logs,\n action_on_failure,\n cluster_id,\n spark_config,\n local_job_package_path,\n deploy_local_job_package,\n s3_job_package_path=None,\n ):\n self.region_name = check.str_param(region_name, "region_name")\n self.staging_bucket = check.str_param(staging_bucket, "staging_bucket")\n self.staging_prefix = check.str_param(staging_prefix, "staging_prefix")\n self.wait_for_logs = check.bool_param(wait_for_logs, "wait_for_logs")\n self.action_on_failure = check.str_param(action_on_failure, "action_on_failure")\n self.cluster_id = check.str_param(cluster_id, "cluster_id")\n self.spark_config = spark_config\n\n check.invariant(\n not deploy_local_job_package or not s3_job_package_path,\n "If deploy_local_job_package is set to True, s3_job_package_path should not "\n "also be set.",\n )\n\n self.local_job_package_path = check.str_param(\n local_job_package_path, "local_job_package_path"\n )\n self.deploy_local_job_package = check.bool_param(\n deploy_local_job_package, "deploy_local_job_package"\n )\n self.s3_job_package_path = check.opt_str_param(s3_job_package_path, "s3_job_package_path")\n\n self.emr_job_runner = EmrJobRunner(region=self.region_name)\n\n def _post_artifacts(self, log, step_run_ref, run_id, step_key):\n """Synchronize the step run ref and pyspark code to an S3 staging bucket for use on EMR.\n\n For the zip file, consider the following toy example:\n\n # Folder: my_pyspark_project/\n # a.py\n def foo():\n print(1)\n\n # b.py\n def bar():\n print(2)\n\n # main.py\n from a import foo\n from b import bar\n\n foo()\n bar()\n\n This will zip up `my_pyspark_project/` as `my_pyspark_project.zip`. Then, when running\n `spark-submit --py-files my_pyspark_project.zip emr_step_main.py` on EMR this will\n print 1, 2.\n """\n from dagster_pyspark.utils import build_pyspark_zip\n\n with tempfile.TemporaryDirectory() as temp_dir:\n s3 = boto3.client("s3", region_name=self.region_name)\n\n # Upload step run ref\n def _upload_file_to_s3(local_path, s3_filename):\n key = self._artifact_s3_key(run_id, step_key, s3_filename)\n s3_uri = self._artifact_s3_uri(run_id, step_key, s3_filename)\n log.debug(f"Uploading file {local_path} to {s3_uri}")\n s3.upload_file(Filename=local_path, Bucket=self.staging_bucket, Key=key)\n\n # Upload main file.\n # The remote Dagster installation should also have the file, but locating it there\n # could be a pain.\n main_local_path = self._main_file_local_path()\n _upload_file_to_s3(main_local_path, self._main_file_name())\n\n if self.deploy_local_job_package:\n # Zip and upload package containing job\n zip_local_path = os.path.join(temp_dir, CODE_ZIP_NAME)\n\n build_pyspark_zip(zip_local_path, self.local_job_package_path)\n _upload_file_to_s3(zip_local_path, CODE_ZIP_NAME)\n\n # Create step run ref pickle file\n step_run_ref_local_path = os.path.join(temp_dir, PICKLED_STEP_RUN_REF_FILE_NAME)\n with open(step_run_ref_local_path, "wb") as step_pickle_file:\n pickle.dump(step_run_ref, step_pickle_file)\n\n _upload_file_to_s3(step_run_ref_local_path, PICKLED_STEP_RUN_REF_FILE_NAME)\n\n def launch_step(self, step_context):\n step_run_ref = step_context_to_step_run_ref(step_context, self.local_job_package_path)\n\n run_id = step_context.dagster_run.run_id\n log = step_context.log\n\n step_key = step_run_ref.step_key\n self._post_artifacts(log, step_run_ref, run_id, step_key)\n\n emr_step_def = self._get_emr_step_def(run_id, step_key, step_context.op.name)\n emr_step_id = self.emr_job_runner.add_job_flow_steps(log, self.cluster_id, [emr_step_def])[\n 0\n ]\n\n yield from self.wait_for_completion_and_log(run_id, step_key, emr_step_id, step_context)\n\n def wait_for_completion_and_log(self, run_id, step_key, emr_step_id, step_context):\n s3 = boto3.resource("s3", region_name=self.region_name)\n try:\n for event in self.wait_for_completion(step_context, s3, run_id, step_key, emr_step_id):\n yield event\n except EmrError as emr_error:\n if self.wait_for_logs:\n self._log_logs_from_s3(step_context.log, emr_step_id)\n raise emr_error\n\n if self.wait_for_logs:\n self._log_logs_from_s3(step_context.log, emr_step_id)\n\n def wait_for_completion(\n self, step_context, s3, run_id, step_key, emr_step_id, check_interval=15\n ):\n """We want to wait for the EMR steps to complete, and while that's happening, we want to\n yield any events that have been written to S3 for us by the remote process.\n After the the EMR steps complete, we want a final chance to fetch events before finishing\n the step.\n """\n done = False\n all_events = []\n # If this is being called within a `capture_interrupts` context, allow interrupts\n # while waiting for the pyspark execution to complete, so that we can terminate slow or\n # hanging steps\n while not done:\n with raise_execution_interrupts():\n time.sleep(check_interval) # AWS rate-limits us if we poll it too often\n done = self.emr_job_runner.is_emr_step_complete(\n step_context.log, self.cluster_id, emr_step_id\n )\n\n all_events_new = self.read_events(s3, run_id, step_key)\n\n if len(all_events_new) > len(all_events):\n for i in range(len(all_events), len(all_events_new)):\n event = all_events_new[i]\n # write each event from the EMR instance to the local instance\n step_context.instance.handle_new_event(event)\n if event.is_dagster_event:\n yield event.dagster_event\n all_events = all_events_new\n\n def read_events(self, s3, run_id, step_key):\n events_s3_obj = s3.Object(\n self.staging_bucket, self._artifact_s3_key(run_id, step_key, PICKLED_EVENTS_FILE_NAME)\n )\n\n try:\n events_data = events_s3_obj.get()["Body"].read()\n return deserialize_value(pickle.loads(events_data))\n except ClientError as ex:\n # The file might not be there yet, which is fine\n if ex.response["Error"]["Code"] == "NoSuchKey":\n return []\n else:\n raise ex\n\n def _log_logs_from_s3(self, log, emr_step_id):\n """Retrieves the logs from the remote PySpark process that EMR posted to S3 and logs\n them to the given log.\n """\n stdout_log, stderr_log = self.emr_job_runner.retrieve_logs_for_step_id(\n log, self.cluster_id, emr_step_id\n )\n # Since stderr is YARN / Hadoop Log4J output, parse and reformat those log lines for\n # Dagster's logging system.\n records = parse_hadoop_log4j_records(stderr_log)\n for record in records:\n if record.level:\n log.log(\n level=record.level,\n msg="".join(["Spark Driver stderr: ", record.logger, ": ", record.message]),\n )\n else:\n log.debug(f"Spark Driver stderr: {record.message}")\n\n sys.stdout.write(\n "---------- Spark Driver stdout: ----------\\n"\n + stdout_log\n + "\\n"\n + "---------- End of Spark Driver stdout ----------\\n"\n )\n\n def _get_emr_step_def(self, run_id, step_key, solid_name):\n """From the local Dagster instance, construct EMR steps that will kick off execution on a\n remote EMR cluster.\n """\n from dagster_spark.utils import flatten_dict, format_for_cli\n\n action_on_failure = self.action_on_failure\n\n # Execute Solid via spark-submit\n conf = dict(flatten_dict(self.spark_config))\n conf["spark.app.name"] = conf.get("spark.app.name", solid_name)\n\n check.invariant(\n conf.get("spark.master", "yarn") == "yarn",\n desc=(\n "spark.master is configured as %s; cannot set Spark master on EMR to anything "\n 'other than "yarn"'\n )\n % conf.get("spark.master"),\n )\n\n command = (\n [\n EMR_SPARK_HOME + "bin/spark-submit",\n "--master",\n "yarn",\n "--deploy-mode",\n conf.get("spark.submit.deployMode", "client"),\n ]\n + format_for_cli(list(flatten_dict(conf)))\n + [\n "--py-files",\n self._artifact_s3_uri(run_id, step_key, CODE_ZIP_NAME),\n self._artifact_s3_uri(run_id, step_key, self._main_file_name()),\n self.staging_bucket,\n self._artifact_s3_key(run_id, step_key, PICKLED_STEP_RUN_REF_FILE_NAME),\n ]\n )\n\n return EmrJobRunner.construct_step_dict_for_command(\n "Execute Solid/Op %s" % solid_name, command, action_on_failure=action_on_failure\n )\n\n def _main_file_name(self):\n return os.path.basename(self._main_file_local_path())\n\n def _main_file_local_path(self):\n return emr_step_main.__file__\n\n def _sanitize_step_key(self, step_key: str) -> str:\n # step_keys of dynamic steps contain brackets, which are invalid characters\n return step_key.replace("[", "__").replace("]", "__")\n\n def _artifact_s3_uri(self, run_id, step_key, filename):\n key = self._artifact_s3_key(run_id, self._sanitize_step_key(step_key), filename)\n return f"s3://{self.staging_bucket}/{key}"\n\n def _artifact_s3_key(self, run_id, step_key, filename):\n return "/".join(\n [\n self.staging_prefix,\n run_id,\n self._sanitize_step_key(step_key),\n os.path.basename(filename),\n ]\n )\n
", "current_page_name": "_modules/dagster_aws/emr/pyspark_step_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.emr.pyspark_step_launcher"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.emr.types

\nfrom enum import Enum as PyEnum\n\nfrom dagster import Enum, EnumValue\n\nEbsVolumeType = Enum(\n    name="EbsVolumeType", enum_values=[EnumValue("gp2"), EnumValue("io1"), EnumValue("standard")]\n)\n\n\n
[docs]class EmrClusterState(PyEnum):\n """Cluster state for EMR."""\n\n Starting = "STARTING"\n Bootstrapping = "BOOTSTRAPPING"\n Running = "RUNNING"\n Waiting = "WAITING"\n Terminating = "TERMINATING"\n Terminated = "TERMINATED"\n TerminatedWithErrors = "TERMINATED_WITH_ERRORS"
\n\n\nEMR_CLUSTER_TERMINATED_STATES = [\n EmrClusterState.Terminating,\n EmrClusterState.Terminated,\n EmrClusterState.TerminatedWithErrors,\n]\n\nEMR_CLUSTER_DONE_STATES = EMR_CLUSTER_TERMINATED_STATES + [EmrClusterState.Waiting]\n\n\n
[docs]class EmrStepState(PyEnum):\n """Step state for EMR."""\n\n Pending = "PENDING"\n Running = "RUNNING"\n Continue = "CONTINUE"\n Completed = "COMPLETED"\n Cancelled = "CANCELLED"\n Failed = "FAILED"\n Interrupted = "INTERRUPTED"
\n\n\nEmrActionOnFailure = Enum(\n name="EmrActionOnFailure",\n enum_values=[\n EnumValue("TERMINATE_JOB_FLOW"),\n EnumValue("TERMINATE_CLUSTER"),\n EnumValue("CANCEL_AND_WAIT"),\n EnumValue("CONTINUE"),\n ],\n)\n\nEmrAdjustmentType = Enum(\n name="EmrAdjustmentType",\n enum_values=[\n EnumValue("CHANGE_IN_CAPACITY"),\n EnumValue("PERCENT_CHANGE_IN_CAPACITY"),\n EnumValue("EXACT_CAPACITY"),\n ],\n)\n\nEmrComparisonOperator = Enum(\n name="EmrComparisonOperator",\n enum_values=[\n EnumValue("GREATER_THAN_OR_EQUAL"),\n EnumValue("GREATER_THAN"),\n EnumValue("LESS_THAN"),\n EnumValue("LESS_THAN_OR_EQUAL"),\n ],\n)\n\nEmrInstanceRole = Enum(\n name="EmrInstanceRole", enum_values=[EnumValue("MASTER"), EnumValue("CORE"), EnumValue("TASK")]\n)\n\nEmrMarket = Enum(name="EmrMarket", enum_values=[EnumValue("ON_DEMAND"), EnumValue("SPOT")])\n\nEmrRepoUpgradeOnBoot = Enum(\n name="EmrRepoUpgradeOnBoot", enum_values=[EnumValue("SECURITY"), EnumValue("NONE")]\n)\n\nEmrScaleDownBehavior = Enum(\n name="EmrScaleDownBehavior",\n enum_values=[\n EnumValue("TERMINATE_AT_INSTANCE_HOUR"),\n EnumValue("TERMINATE_AT_TASK_COMPLETION"),\n ],\n)\n\nEmrStatistic = Enum(\n name="EmrStatistic",\n enum_values=[\n EnumValue("SAMPLE_COUNT"),\n EnumValue("AVERAGE"),\n EnumValue("SUM"),\n EnumValue("MINIMUM"),\n EnumValue("MAXIMUM"),\n ],\n)\n\nEmrSupportedProducts = Enum(\n name="EmrSupportedProducts", enum_values=[EnumValue("mapr-m3"), EnumValue("mapr-m5")]\n)\n\nEmrTimeoutAction = Enum(\n name="EmrTimeoutAction",\n enum_values=[EnumValue("SWITCH_TO_ON_DEMAND"), EnumValue("TERMINATE_CLUSTER")],\n)\n\nEmrUnit = Enum(\n name="EmrUnit",\n enum_values=[\n EnumValue("NONE"),\n EnumValue("SECONDS"),\n EnumValue("MICRO_SECONDS"),\n EnumValue("MILLI_SECONDS"),\n EnumValue("BYTES"),\n EnumValue("KILO_BYTES"),\n EnumValue("MEGA_BYTES"),\n EnumValue("GIGA_BYTES"),\n EnumValue("TERA_BYTES"),\n EnumValue("BITS"),\n EnumValue("KILO_BITS"),\n EnumValue("MEGA_BITS"),\n EnumValue("GIGA_BITS"),\n EnumValue("TERA_BITS"),\n EnumValue("PERCENT"),\n EnumValue("COUNT"),\n EnumValue("BYTES_PER_SECOND"),\n EnumValue("KILO_BYTES_PER_SECOND"),\n EnumValue("MEGA_BYTES_PER_SECOND"),\n EnumValue("GIGA_BYTES_PER_SECOND"),\n EnumValue("TERA_BYTES_PER_SECOND"),\n EnumValue("BITS_PER_SECOND"),\n EnumValue("KILO_BITS_PER_SECOND"),\n EnumValue("MEGA_BITS_PER_SECOND"),\n EnumValue("GIGA_BITS_PER_SECOND"),\n EnumValue("TERA_BITS_PER_SECOND"),\n EnumValue("COUNT_PER_SECOND"),\n ],\n)\n
", "current_page_name": "_modules/dagster_aws/emr/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.emr.types"}}, "redshift": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.redshift.resources

\nimport abc\nfrom contextlib import contextmanager\nfrom logging import Logger\nfrom typing import Any, Dict, Optional, cast\n\nimport psycopg2\nimport psycopg2.extensions\nfrom dagster import (\n    ConfigurableResource,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\n\nclass RedshiftError(Exception):\n    pass\n\n\nclass BaseRedshiftClient(abc.ABC):\n    @abc.abstractmethod\n    def execute_query(self, query, fetch_results=False, cursor_factory=None, error_callback=None):\n        pass\n\n    @abc.abstractmethod\n    def execute_queries(\n        self, queries, fetch_results=False, cursor_factory=None, error_callback=None\n    ):\n        pass\n\n\nclass RedshiftClient(BaseRedshiftClient):\n    def __init__(self, conn_args: Dict[str, Any], autocommit: Optional[bool], log: Logger):\n        # Extract parameters from resource config\n        self.conn_args = conn_args\n\n        self.autocommit = autocommit\n        self.log = log\n\n    def execute_query(self, query, fetch_results=False, cursor_factory=None, error_callback=None):\n        """Synchronously execute a single query against Redshift. Will return a list of rows, where\n        each row is a tuple of values, e.g. SELECT 1 will return [(1,)].\n\n        Args:\n            query (str): The query to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n                cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[Tuple[Any, ...]]]: Results of the query, as a list of tuples, when\n                fetch_results is set. Otherwise return None.\n        """\n        check.str_param(query, "query")\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        with self._get_conn() as conn:\n            with self._get_cursor(conn, cursor_factory=cursor_factory) as cursor:\n                try:\n                    self.log.info(f"Executing query '{query}'")\n                    cursor.execute(query)\n\n                    if fetch_results and cursor.rowcount > 0:\n                        return cursor.fetchall()\n                    else:\n                        self.log.info("Empty result from query")\n\n                except Exception as e:\n                    # If autocommit is disabled or not set (it is disabled by default), Redshift\n                    # will be in the middle of a transaction at exception time, and because of\n                    # the failure the current transaction will not accept any further queries.\n                    #\n                    # This conn.commit() call closes the open transaction before handing off\n                    # control to the error callback, so that the user can issue additional\n                    # queries. Notably, for e.g. pg_last_copy_id() to work, it requires you to\n                    # use the same conn/cursor, so you have to do this conn.commit() to ensure\n                    # things are in a usable state in the error callback.\n                    if not self.autocommit:\n                        conn.commit()\n\n                    if error_callback is not None:\n                        error_callback(e, cursor, self.log)\n                    else:\n                        raise\n\n    def execute_queries(\n        self, queries, fetch_results=False, cursor_factory=None, error_callback=None\n    ):\n        """Synchronously execute a list of queries against Redshift. Will return a list of list of\n        rows, where each row is a tuple of values, e.g. ['SELECT 1', 'SELECT 1'] will return\n        [[(1,)], [(1,)]].\n\n        Args:\n            queries (List[str]): The queries to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n            cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[List[Tuple[Any, ...]]]]: Results of the query, as a list of list of\n                tuples, when fetch_results is set. Otherwise return None.\n        """\n        check.list_param(queries, "queries", of_type=str)\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        results = []\n        with self._get_conn() as conn:\n            with self._get_cursor(conn, cursor_factory=cursor_factory) as cursor:\n                for query in queries:\n                    try:\n                        self.log.info(f"Executing query '{query}'")\n                        cursor.execute(query)\n\n                        if fetch_results and cursor.rowcount > 0:\n                            results.append(cursor.fetchall())\n                        else:\n                            results.append([])\n                            self.log.info("Empty result from query")\n\n                    except Exception as e:\n                        # If autocommit is disabled or not set (it is disabled by default), Redshift\n                        # will be in the middle of a transaction at exception time, and because of\n                        # the failure the current transaction will not accept any further queries.\n                        #\n                        # This conn.commit() call closes the open transaction before handing off\n                        # control to the error callback, so that the user can issue additional\n                        # queries. Notably, for e.g. pg_last_copy_id() to work, it requires you to\n                        # use the same conn/cursor, so you have to do this conn.commit() to ensure\n                        # things are in a usable state in the error callback.\n                        if not self.autocommit:\n                            conn.commit()\n\n                        if error_callback is not None:\n                            error_callback(e, cursor, self.log)\n                        else:\n                            raise\n\n        if fetch_results:\n            return results\n\n    @contextmanager\n    def _get_conn(self):\n        conn = None\n        try:\n            conn = psycopg2.connect(**self.conn_args)\n            yield conn\n        finally:\n            if conn:\n                conn.close()\n\n    @contextmanager\n    def _get_cursor(self, conn, cursor_factory=None):\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n\n        # Could be none, in which case we should respect the connection default. Otherwise\n        # explicitly set to true/false.\n        if self.autocommit is not None:\n            conn.autocommit = self.autocommit\n\n        with conn:\n            with conn.cursor(cursor_factory=cursor_factory) as cursor:\n                yield cursor\n\n            # If autocommit is set, we'll commit after each and every query execution. Otherwise, we\n            # want to do a final commit after we're wrapped up executing the full set of one or more\n            # queries.\n            if not self.autocommit:\n                conn.commit()\n\n\n@deprecated(breaking_version="2.0", additional_warn_text="Use RedshiftClientResource instead.")\nclass RedshiftResource(RedshiftClient):\n    """This class was used by the function-style Redshift resource."""\n\n\nclass FakeRedshiftClient(BaseRedshiftClient):\n    QUERY_RESULT = [(1,)]\n\n    def __init__(self, log: Logger):\n        # Extract parameters from resource config\n\n        self.log = log\n\n    def execute_query(self, query, fetch_results=False, cursor_factory=None, error_callback=None):\n        """Fake for execute_query; returns [self.QUERY_RESULT].\n\n        Args:\n            query (str): The query to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n                cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[Tuple[Any, ...]]]: Results of the query, as a list of tuples, when\n                fetch_results is set. Otherwise return None.\n        """\n        check.str_param(query, "query")\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        self.log.info(f"Executing query '{query}'")\n        if fetch_results:\n            return self.QUERY_RESULT\n\n    def execute_queries(\n        self, queries, fetch_results=False, cursor_factory=None, error_callback=None\n    ):\n        """Fake for execute_queries; returns [self.QUERY_RESULT] * 3.\n\n        Args:\n            queries (List[str]): The queries to execute.\n            fetch_results (Optional[bool]): Whether to return the results of executing the query.\n                Defaults to False, in which case the query will be executed without retrieving the\n                results.\n            cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative\n                cursor_factory; defaults to None. Will be used when constructing the cursor.\n            error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A\n                callback function, invoked when an exception is encountered during query execution;\n                this is intended to support executing additional queries to provide diagnostic\n                information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no\n                function is provided, exceptions during query execution will be raised directly.\n\n        Returns:\n            Optional[List[List[Tuple[Any, ...]]]]: Results of the query, as a list of list of\n                tuples, when fetch_results is set. Otherwise return None.\n        """\n        check.list_param(queries, "queries", of_type=str)\n        check.bool_param(fetch_results, "fetch_results")\n        check.opt_class_param(\n            cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor\n        )\n        check.opt_callable_param(error_callback, "error_callback")\n\n        for query in queries:\n            self.log.info(f"Executing query '{query}'")\n        if fetch_results:\n            return [self.QUERY_RESULT] * 3\n\n\n@deprecated(breaking_version="2.0", additional_warn_text="Use FakeRedshiftClientResource instead.")\nclass FakeRedshiftResource(FakeRedshiftClient):\n    """This class was used by the function-style fake Redshift resource."""\n\n\n
[docs]class RedshiftClientResource(ConfigurableResource):\n """This resource enables connecting to a Redshift cluster and issuing queries against that\n cluster.\n\n Example:\n .. code-block:: python\n\n from dagster import Definitions, asset, EnvVar\n from dagster_aws.redshift import RedshiftClientResource\n\n @asset\n def example_redshift_asset(context, redshift: RedshiftClientResource):\n redshift.get_client().execute_query('SELECT 1', fetch_results=True)\n\n redshift_configured = RedshiftClientResource(\n host='my-redshift-cluster.us-east-1.redshift.amazonaws.com',\n port=5439,\n user='dagster',\n password=EnvVar("DAGSTER_REDSHIFT_PASSWORD"),\n database='dev',\n )\n\n defs = Definitions(\n assets=[example_redshift_asset],\n resources={'redshift': redshift_configured},\n )\n\n """\n\n host: str = Field(description="Redshift host")\n port: int = Field(default=5439, description="Redshift port")\n user: Optional[str] = Field(default=None, description="Username for Redshift connection")\n password: Optional[str] = Field(default=None, description="Password for Redshift connection")\n database: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default database to use. After login, you can use USE DATABASE to change"\n " the database."\n ),\n )\n autocommit: Optional[bool] = Field(default=None, description="Whether to autocommit queries")\n connect_timeout: int = Field(\n default=5, description="Timeout for connection to Redshift cluster. Defaults to 5 seconds."\n )\n sslmode: str = Field(\n default="require",\n description=(\n "SSL mode to use. See the Redshift documentation for reference:"\n " https://docs.aws.amazon.com/redshift/latest/mgmt/connecting-ssl-support.html"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> RedshiftClient:\n conn_args = {\n k: getattr(self, k, None)\n for k in (\n "host",\n "port",\n "user",\n "password",\n "database",\n "connect_timeout",\n "sslmode",\n )\n if getattr(self, k, None) is not None\n }\n\n return RedshiftClient(conn_args, self.autocommit, get_dagster_logger())
\n\n\n
[docs]class FakeRedshiftClientResource(RedshiftClientResource):\n def get_client(self) -> FakeRedshiftClient:\n return FakeRedshiftClient(get_dagster_logger())
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=RedshiftClientResource.to_config_schema(),\n description="Resource for connecting to the Redshift data warehouse",\n)\ndef redshift_resource(context) -> RedshiftClient:\n """This resource enables connecting to a Redshift cluster and issuing queries against that\n cluster.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, op\n from dagster_aws.redshift import redshift_resource\n\n @op(required_resource_keys={'redshift'})\n def example_redshift_op(context):\n return context.resources.redshift.execute_query('SELECT 1', fetch_results=True)\n\n redshift_configured = redshift_resource.configured({\n 'host': 'my-redshift-cluster.us-east-1.redshift.amazonaws.com',\n 'port': 5439,\n 'user': 'dagster',\n 'password': 'dagster',\n 'database': 'dev',\n })\n context = build_op_context(resources={'redshift': redshift_configured})\n assert example_redshift_op(context) == [(1,)]\n\n """\n return RedshiftClientResource.from_resource_context(context).get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=FakeRedshiftClientResource.to_config_schema(),\n description=(\n "Fake resource for connecting to the Redshift data warehouse. Usage is identical "\n "to the real redshift_resource. Will always return [(1,)] for the single query case and "\n "[[(1,)], [(1,)], [(1,)]] for the multi query case."\n ),\n)\ndef fake_redshift_resource(context) -> FakeRedshiftClient:\n return cast(\n FakeRedshiftClient,\n FakeRedshiftClientResource.from_resource_context(context).get_client(),\n )
\n
", "current_page_name": "_modules/dagster_aws/redshift/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.redshift.resources"}}, "s3": {"compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.compute_log_manager

\nimport os\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Mapping, Optional, Sequence\n\nimport boto3\nimport dagster._seven as seven\nfrom botocore.errorfactory import ClientError\nfrom dagster import (\n    Field,\n    Permissive,\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_type import Noneable\nfrom dagster._core.storage.captured_log_manager import CapturedLogContext\nfrom dagster._core.storage.cloud_storage_compute_log_manager import (\n    CloudStorageComputeLogManager,\n    PollingComputeLogSubscriptionManager,\n)\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\nfrom dagster._core.storage.local_compute_log_manager import (\n    IO_TYPE_EXTENSION,\n    LocalComputeLogManager,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import ensure_dir, ensure_file\nfrom typing_extensions import Self\n\nPOLLING_INTERVAL = 5\n\n\n
[docs]class S3ComputeLogManager(CloudStorageComputeLogManager, ConfigurableClass):\n """Logs compute function stdout and stderr to S3.\n\n Users should not instantiate this class directly. Instead, use a YAML block in ``dagster.yaml``\n such as the following:\n\n .. code-block:: YAML\n\n compute_logs:\n module: dagster_aws.s3.compute_log_manager\n class: S3ComputeLogManager\n config:\n bucket: "mycorp-dagster-compute-logs"\n local_dir: "/tmp/cool"\n prefix: "dagster-test-"\n use_ssl: true\n verify: true\n verify_cert_path: "/path/to/cert/bundle.pem"\n endpoint_url: "http://alternate-s3-host.io"\n skip_empty_files: true\n upload_interval: 30\n upload_extra_args:\n ServerSideEncryption: "AES256"\n show_url_only: false\n region: "us-west-1"\n\n Args:\n bucket (str): The name of the s3 bucket to which to log.\n local_dir (Optional[str]): Path to the local directory in which to stage logs. Default:\n ``dagster._seven.get_system_temp_directory()``.\n prefix (Optional[str]): Prefix for the log file keys.\n use_ssl (Optional[bool]): Whether or not to use SSL. Default True.\n verify (Optional[bool]): Whether or not to verify SSL certificates. Default True.\n verify_cert_path (Optional[str]): A filename of the CA cert bundle to use. Only used if\n `verify` set to False.\n endpoint_url (Optional[str]): Override for the S3 endpoint url.\n skip_empty_files: (Optional[bool]): Skip upload of empty log files.\n upload_interval: (Optional[int]): Interval in seconds to upload partial log files to S3. By default, will only upload when the capture is complete.\n upload_extra_args: (Optional[dict]): Extra args for S3 file upload\n show_url_only: (Optional[bool]): Only show the URL of the log file in the UI, instead of fetching and displaying the full content. Default False.\n region: (Optional[str]): The region of the S3 bucket. If not specified, will use the default region of the AWS session.\n inst_data (Optional[ConfigurableClassData]): Serializable representation of the compute\n log manager when newed up from config.\n """\n\n def __init__(\n self,\n bucket,\n local_dir=None,\n inst_data: Optional[ConfigurableClassData] = None,\n prefix="dagster",\n use_ssl=True,\n verify=True,\n verify_cert_path=None,\n endpoint_url=None,\n skip_empty_files=False,\n upload_interval=None,\n upload_extra_args=None,\n show_url_only=False,\n region=None,\n ):\n _verify = False if not verify else verify_cert_path\n self._s3_session = boto3.resource(\n "s3", use_ssl=use_ssl, verify=_verify, endpoint_url=endpoint_url\n ).meta.client\n self._s3_bucket = check.str_param(bucket, "bucket")\n self._s3_prefix = self._clean_prefix(check.str_param(prefix, "prefix"))\n\n # proxy calls to local compute log manager (for subscriptions, etc)\n if not local_dir:\n local_dir = seven.get_system_temp_directory()\n\n self._local_manager = LocalComputeLogManager(local_dir)\n self._subscription_manager = PollingComputeLogSubscriptionManager(self)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self._skip_empty_files = check.bool_param(skip_empty_files, "skip_empty_files")\n self._upload_interval = check.opt_int_param(upload_interval, "upload_interval")\n check.opt_dict_param(upload_extra_args, "upload_extra_args")\n self._upload_extra_args = upload_extra_args\n self._show_url_only = show_url_only\n if region is None:\n # if unspecified, use the current session name\n self._region = self._s3_session.meta.region_name\n else:\n self._region = region\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "bucket": StringSource,\n "local_dir": Field(StringSource, is_required=False),\n "prefix": Field(StringSource, is_required=False, default_value="dagster"),\n "use_ssl": Field(bool, is_required=False, default_value=True),\n "verify": Field(bool, is_required=False, default_value=True),\n "verify_cert_path": Field(StringSource, is_required=False),\n "endpoint_url": Field(StringSource, is_required=False),\n "skip_empty_files": Field(bool, is_required=False, default_value=False),\n "upload_interval": Field(Noneable(int), is_required=False, default_value=None),\n "upload_extra_args": Field(\n Permissive(), is_required=False, description="Extra args for S3 file upload"\n ),\n "show_url_only": Field(bool, is_required=False, default_value=False),\n "region": Field(StringSource, is_required=False),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return S3ComputeLogManager(inst_data=inst_data, **config_value)\n\n @property\n def local_manager(self) -> LocalComputeLogManager:\n return self._local_manager\n\n @property\n def upload_interval(self) -> Optional[int]:\n return self._upload_interval if self._upload_interval else None\n\n def _clean_prefix(self, prefix):\n parts = prefix.split("/")\n return "/".join([part for part in parts if part])\n\n def _s3_key(self, log_key, io_type, partial=False):\n check.inst_param(io_type, "io_type", ComputeIOType)\n extension = IO_TYPE_EXTENSION[io_type]\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n paths = [self._s3_prefix, "storage", *namespace, filename]\n return "/".join(paths) # s3 path delimiter\n\n @contextmanager\n def capture_logs(self, log_key: Sequence[str]) -> Iterator[CapturedLogContext]:\n with super().capture_logs(log_key) as local_context:\n if not self._show_url_only:\n yield local_context\n else:\n out_key = self._s3_key(log_key, ComputeIOType.STDOUT)\n err_key = self._s3_key(log_key, ComputeIOType.STDERR)\n s3_base = f"https://s3.console.aws.amazon.com/s3/object/{self._s3_bucket}?region={self._region}"\n yield CapturedLogContext(\n local_context.log_key,\n external_stdout_url=f"{s3_base}&prefix={out_key}",\n external_stderr_url=f"{s3_base}&prefix={err_key}",\n )\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n self.local_manager.delete_logs(log_key=log_key, prefix=prefix)\n\n s3_keys_to_remove = None\n if log_key:\n s3_keys_to_remove = [\n self._s3_key(log_key, ComputeIOType.STDOUT),\n self._s3_key(log_key, ComputeIOType.STDERR),\n self._s3_key(log_key, ComputeIOType.STDOUT, partial=True),\n self._s3_key(log_key, ComputeIOType.STDERR, partial=True),\n ]\n elif prefix:\n # add the trailing '' to make sure that ['a'] does not match ['apple']\n s3_prefix = "/".join([self._s3_prefix, "storage", *prefix, ""])\n matching = self._s3_session.list_objects(Bucket=self._s3_bucket, Prefix=s3_prefix)\n s3_keys_to_remove = [obj["Key"] for obj in matching.get("Contents", [])]\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n if s3_keys_to_remove:\n to_delete = [{"Key": key} for key in s3_keys_to_remove]\n self._s3_session.delete_objects(Bucket=self._s3_bucket, Delete={"Objects": to_delete})\n\n def download_url_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n\n s3_key = self._s3_key(log_key, io_type)\n return self._s3_session.generate_presigned_url(\n ClientMethod="get_object", Params={"Bucket": self._s3_bucket, "Key": s3_key}\n )\n\n def display_path_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n s3_key = self._s3_key(log_key, io_type)\n return f"s3://{self._s3_bucket}/{s3_key}"\n\n def cloud_storage_has_logs(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial: bool = False\n ) -> bool:\n s3_key = self._s3_key(log_key, io_type, partial=partial)\n try: # https://stackoverflow.com/a/38376288/14656695\n self._s3_session.head_object(Bucket=self._s3_bucket, Key=s3_key)\n except ClientError:\n return False\n return True\n\n def upload_to_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n\n if (self._skip_empty_files or partial) and os.stat(path).st_size == 0:\n return\n\n s3_key = self._s3_key(log_key, io_type, partial=partial)\n with open(path, "rb") as data:\n extra_args = {\n "ContentType": "text/plain",\n **(self._upload_extra_args if self._upload_extra_args else {}),\n }\n self._s3_session.upload_fileobj(data, self._s3_bucket, s3_key, ExtraArgs=extra_args)\n\n def download_from_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self._local_manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[io_type], partial=partial\n )\n ensure_dir(os.path.dirname(path))\n s3_key = self._s3_key(log_key, io_type, partial=partial)\n with open(path, "wb") as fileobj:\n self._s3_session.download_fileobj(self._s3_bucket, s3_key, fileobj)\n\n def on_subscribe(self, subscription):\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription):\n self._subscription_manager.remove_subscription(subscription)\n\n def dispose(self):\n self._subscription_manager.dispose()\n self._local_manager.dispose()
\n
", "current_page_name": "_modules/dagster_aws/s3/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.compute_log_manager"}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.file_manager

\nimport io\nimport uuid\nfrom contextlib import contextmanager\n\nimport dagster._check as check\nfrom dagster._core.storage.file_manager import (\n    FileHandle,\n    FileManager,\n    TempfileManager,\n    check_file_like_obj,\n)\n\n\n
[docs]class S3FileHandle(FileHandle):\n """A reference to a file on S3."""\n\n def __init__(self, s3_bucket: str, s3_key: str):\n self._s3_bucket = check.str_param(s3_bucket, "s3_bucket")\n self._s3_key = check.str_param(s3_key, "s3_key")\n\n @property\n def s3_bucket(self) -> str:\n """str: The name of the S3 bucket."""\n return self._s3_bucket\n\n @property\n def s3_key(self) -> str:\n """str: The S3 key."""\n return self._s3_key\n\n @property\n def path_desc(self) -> str:\n """str: The file's S3 URL."""\n return self.s3_path\n\n @property\n def s3_path(self) -> str:\n """str: The file's S3 URL."""\n return f"s3://{self.s3_bucket}/{self.s3_key}"
\n\n\nclass S3FileManager(FileManager):\n def __init__(self, s3_session, s3_bucket, s3_base_key):\n self._s3_session = s3_session\n self._s3_bucket = check.str_param(s3_bucket, "s3_bucket")\n self._s3_base_key = check.str_param(s3_base_key, "s3_base_key")\n self._local_handle_cache = {}\n self._temp_file_manager = TempfileManager()\n\n def copy_handle_to_local_temp(self, file_handle):\n self._download_if_not_cached(file_handle)\n return self._get_local_path(file_handle)\n\n def _download_if_not_cached(self, file_handle):\n if not self._file_handle_cached(file_handle):\n # instigate download\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_name = temp_file_obj.name\n self._s3_session.download_file(\n Bucket=file_handle.s3_bucket, Key=file_handle.s3_key, Filename=temp_name\n )\n self._local_handle_cache[file_handle.s3_path] = temp_name\n\n return file_handle\n\n @contextmanager\n def read(self, file_handle, mode="rb"):\n check.inst_param(file_handle, "file_handle", S3FileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n self._download_if_not_cached(file_handle)\n\n encoding = None if mode == "rb" else "utf-8"\n with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:\n yield file_obj\n\n def _file_handle_cached(self, file_handle):\n return file_handle.s3_path in self._local_handle_cache\n\n def _get_local_path(self, file_handle):\n return self._local_handle_cache[file_handle.s3_path]\n\n def read_data(self, file_handle):\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read()\n\n def write_data(self, data, ext=None):\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", ext=ext)\n\n def write(self, file_obj, mode="wb", ext=None):\n check_file_like_obj(file_obj)\n s3_key = self.get_full_key(str(uuid.uuid4()) + (("." + ext) if ext is not None else ""))\n self._s3_session.put_object(Body=file_obj, Bucket=self._s3_bucket, Key=s3_key)\n return S3FileHandle(self._s3_bucket, s3_key)\n\n def get_full_key(self, file_key):\n return f"{self._s3_base_key}/{file_key}"\n\n def delete_local_temp(self):\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster_aws/s3/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.file_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.io_manager

\nimport io\nimport pickle\nfrom typing import Any, Dict, Optional, Union\n\nfrom dagster import (\n    ConfigurableIOManager,\n    InputContext,\n    MetadataValue,\n    OutputContext,\n    ResourceDependency,\n    _check as check,\n    io_manager,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL\nfrom dagster._utils.cached_method import cached_method\nfrom pydantic import Field\nfrom upath import UPath\n\nfrom .resources import S3Resource\n\n\nclass PickledObjectS3IOManager(UPathIOManager):\n    def __init__(\n        self,\n        s3_bucket: str,\n        s3_session: Any,\n        s3_prefix: Optional[str] = None,\n    ):\n        self.bucket = check.str_param(s3_bucket, "s3_bucket")\n        check.opt_str_param(s3_prefix, "s3_prefix")\n        self.s3 = s3_session\n        self.s3.list_objects(Bucket=s3_bucket, Prefix=s3_prefix, MaxKeys=1)\n        base_path = UPath(s3_prefix) if s3_prefix else None\n        super().__init__(base_path=base_path)\n\n    def load_from_path(self, context: InputContext, path: UPath) -> Any:\n        try:\n            s3_obj = self.s3.get_object(Bucket=self.bucket, Key=str(path))["Body"].read()\n            return pickle.loads(s3_obj)\n        except self.s3.exceptions.NoSuchKey:\n            raise FileNotFoundError(f"Could not find file {path} in S3 bucket {self.bucket}")\n\n    def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:\n        if self.path_exists(path):\n            context.log.warning(f"Removing existing S3 object: {path}")\n            self.unlink(path)\n\n        pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)\n        pickled_obj_bytes = io.BytesIO(pickled_obj)\n        self.s3.upload_fileobj(pickled_obj_bytes, self.bucket, str(path))\n\n    def path_exists(self, path: UPath) -> bool:\n        try:\n            self.s3.get_object(Bucket=self.bucket, Key=str(path))\n        except self.s3.exceptions.NoSuchKey:\n            return False\n        return True\n\n    def get_loading_input_log_message(self, path: UPath) -> str:\n        return f"Loading S3 object from: {self._uri_for_path(path)}"\n\n    def get_writing_output_log_message(self, path: UPath) -> str:\n        return f"Writing S3 object at: {self._uri_for_path(path)}"\n\n    def unlink(self, path: UPath) -> None:\n        self.s3.delete_object(Bucket=self.bucket, Key=str(path))\n\n    def make_directory(self, path: UPath) -> None:\n        # It is not necessary to create directories in S3\n        return None\n\n    def get_metadata(self, context: OutputContext, obj: Any) -> Dict[str, MetadataValue]:\n        path = self._get_path(context)\n        return {"uri": MetadataValue.path(self._uri_for_path(path))}\n\n    def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:\n        return UPath("storage", super().get_op_output_relative_path(context))\n\n    def _uri_for_path(self, path: UPath) -> str:\n        return f"s3://{self.bucket}/{path}"\n\n\n
[docs]class S3PickleIOManager(ConfigurableIOManager):\n """Persistent IO manager using S3 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for S3 and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n .. code-block:: python\n\n from dagster import asset, Definitions\n from dagster_aws.s3 import S3PickleIOManager, S3Resource\n\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": S3PickleIOManager(\n s3_resource=S3Resource(),\n s3_bucket="my-cool-bucket",\n s3_prefix="my-cool-prefix",\n )\n }\n )\n\n """\n\n s3_resource: ResourceDependency[S3Resource]\n s3_bucket: str = Field(description="S3 bucket to use for the file manager.")\n s3_prefix: str = Field(\n default="dagster", description="Prefix to use for the S3 bucket for this file manager."\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @cached_method\n def inner_io_manager(self) -> PickledObjectS3IOManager:\n return PickledObjectS3IOManager(\n s3_bucket=self.s3_bucket,\n s3_session=self.s3_resource.get_client(),\n s3_prefix=self.s3_prefix,\n )\n\n def load_input(self, context: InputContext) -> Any:\n return self.inner_io_manager().load_input(context)\n\n def handle_output(self, context: OutputContext, obj: Any) -> None:\n return self.inner_io_manager().handle_output(context, obj)
\n\n\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use S3PickleIOManager instead.",\n)\nclass ConfigurablePickledObjectS3IOManager(S3PickleIOManager):\n """Renamed to S3PickleIOManager. See S3PickleIOManager for documentation."""\n\n pass
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=S3PickleIOManager.to_config_schema(),\n required_resource_keys={"s3"},\n)\ndef s3_pickle_io_manager(init_context):\n """Persistent IO manager using S3 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for S3 and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_aws.s3 import s3_pickle_io_manager, s3_resource\n\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": s3_pickle_io_manager.configured(\n {"s3_bucket": "my-cool-bucket", "s3_prefix": "my-cool-prefix"}\n ),\n "s3": s3_resource,\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_aws.s3 import s3_pickle_io_manager, s3_resource\n\n @job(\n resource_defs={\n "io_manager": s3_pickle_io_manager.configured(\n {"s3_bucket": "my-cool-bucket", "s3_prefix": "my-cool-prefix"}\n ),\n "s3": s3_resource,\n },\n )\n def my_job():\n ...\n """\n s3_session = init_context.resources.s3\n s3_bucket = init_context.resource_config["s3_bucket"]\n s3_prefix = init_context.resource_config.get("s3_prefix") # s3_prefix is optional\n pickled_io_manager = PickledObjectS3IOManager(s3_bucket, s3_session, s3_prefix=s3_prefix)\n return pickled_io_manager
\n
", "current_page_name": "_modules/dagster_aws/s3/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.io_manager"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.ops

\nfrom typing import Any, Generator, Mapping\n\nfrom dagster import (\n    AssetMaterialization,\n    Field,\n    FileHandle,\n    In,\n    MetadataValue,\n    Out,\n    Output,\n    StringSource,\n    _check as check,\n    dagster_type_loader,\n    op,\n)\nfrom dagster._core.types.dagster_type import PythonObjectDagsterType\n\nfrom .file_manager import S3FileHandle\n\n\ndef dict_with_fields(name: str, fields: Mapping[str, object]):\n    check.str_param(name, "name")\n    check.mapping_param(fields, "fields", key_type=str)\n    field_names = set(fields.keys())\n\n    @dagster_type_loader(fields)\n    def _input_schema(_context, value):\n        check.dict_param(value, "value")\n        check.param_invariant(set(value.keys()) == field_names, "value")\n        return value\n\n    class _DictWithSchema(PythonObjectDagsterType):\n        def __init__(self):\n            super(_DictWithSchema, self).__init__(python_type=dict, name=name, loader=_input_schema)\n\n    return _DictWithSchema()\n\n\nS3Coordinate = dict_with_fields(\n    "S3Coordinate",\n    fields={\n        "bucket": Field(StringSource, description="S3 bucket name"),\n        "key": Field(StringSource, description="S3 key name"),\n    },\n)\n\n\ndef last_key(key: str) -> str:\n    if "/" not in key:\n        return key\n    comps = key.split("/")\n    return comps[-1]\n\n\n@op(\n    config_schema={\n        "Bucket": Field(\n            StringSource, description="The name of the bucket to upload to.", is_required=True\n        ),\n        "Key": Field(\n            StringSource, description="The name of the key to upload to.", is_required=True\n        ),\n    },\n    ins={"file_handle": In(FileHandle, description="The file to upload.")},\n    out={"s3_file_handle": Out(S3FileHandle)},\n    description="""Take a file handle and upload it to s3. Returns an S3FileHandle.""",\n    required_resource_keys={"s3", "file_manager"},\n)\ndef file_handle_to_s3(context, file_handle) -> Generator[Any, None, None]:\n    bucket = context.op_config["Bucket"]\n    key = context.op_config["Key"]\n\n    file_manager = context.resources.file_manager\n    s3 = context.resources.s3\n\n    with file_manager.read(file_handle, "rb") as fileobj:\n        s3.upload_fileobj(fileobj, bucket, key)\n        s3_file_handle = S3FileHandle(bucket, key)\n\n        yield AssetMaterialization(\n            asset_key=s3_file_handle.s3_path,\n            metadata={last_key(key): MetadataValue.path(s3_file_handle.s3_path)},\n        )\n\n        yield Output(value=s3_file_handle, output_name="s3_file_handle")\n
", "current_page_name": "_modules/dagster_aws/s3/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.s3.resources

\nfrom typing import Any, Optional, TypeVar\n\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\nfrom .file_manager import S3FileManager\nfrom .utils import construct_s3_client\n\nT = TypeVar("T")\n\n\nclass ResourceWithS3Configuration(ConfigurableResource):\n    use_unsigned_session: bool = Field(\n        default=False, description="Specifies whether to use an unsigned S3 session."\n    )\n    region_name: Optional[str] = Field(\n        default=None, description="Specifies a custom region for the S3 session."\n    )\n    endpoint_url: Optional[str] = Field(\n        default=None, description="Specifies a custom endpoint for the S3 session."\n    )\n    max_attempts: int = Field(\n        default=5,\n        description=(\n            "This provides Boto3's retry handler with a value of maximum retry attempts, where the"\n            " initial call counts toward the max_attempts value that you provide."\n        ),\n    )\n    profile_name: Optional[str] = Field(\n        default=None, description="Specifies a profile to connect that session."\n    )\n    use_ssl: bool = Field(\n        default=True, description="Whether or not to use SSL. By default, SSL is used."\n    )\n    verify: Optional[str] = Field(\n        default=None,\n        description=(\n            "Whether or not to verify SSL certificates. By default SSL certificates are verified."\n            " You can also specify this argument if you want to use a different CA cert bundle than"\n            " the one used by botocore."\n        ),\n    )\n    aws_access_key_id: Optional[str] = Field(\n        default=None, description="AWS access key ID to use when creating the boto3 session."\n    )\n    aws_secret_access_key: Optional[str] = Field(\n        default=None, description="AWS secret access key to use when creating the boto3 session."\n    )\n    aws_session_token: str = Field(\n        default=None, description="AWS session token to use when creating the boto3 session."\n    )\n\n\n
[docs]class S3Resource(ResourceWithS3Configuration, IAttachDifferentObjectToOpContext):\n """Resource that gives access to S3.\n\n The underlying S3 session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is an S3 client, an instance of `botocore.client.S3`.\n\n Example:\n .. code-block:: python\n\n from dagster import job, op, Definitions\n from dagster_aws.s3 import S3Resource\n\n @op\n def example_s3_op(s3: S3Resource):\n return s3.get_client().list_objects_v2(\n Bucket='my-bucket',\n Prefix='some-key'\n )\n\n @job\n def example_job():\n example_s3_op()\n\n defs = Definitions(\n jobs=[example_job],\n resources={'s3': S3Resource(region_name='us-west-1')}\n )\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> Any:\n return construct_s3_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n endpoint_url=self.endpoint_url,\n use_unsigned_session=self.use_unsigned_session,\n profile_name=self.profile_name,\n use_ssl=self.use_ssl,\n verify=self.verify,\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n aws_session_token=self.aws_session_token,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=S3Resource.to_config_schema())\ndef s3_resource(context) -> Any:\n """Resource that gives access to S3.\n\n The underlying S3 session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is an S3 client, an instance of `botocore.client.S3`.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, job, op\n from dagster_aws.s3 import s3_resource\n\n @op(required_resource_keys={'s3'})\n def example_s3_op(context):\n return context.resources.s3.list_objects_v2(\n Bucket='my-bucket',\n Prefix='some-key'\n )\n\n @job(resource_defs={'s3': s3_resource})\n def example_job():\n example_s3_op()\n\n example_job.execute_in_process(\n run_config={\n 'resources': {\n 's3': {\n 'config': {\n 'region_name': 'us-west-1',\n }\n }\n }\n }\n )\n\n Note that your ops must also declare that they require this resource with\n `required_resource_keys`, or it will not be initialized for the execution of their compute\n functions.\n\n You may configure this resource as follows:\n\n .. code-block:: YAML\n\n resources:\n s3:\n config:\n region_name: "us-west-1"\n # Optional[str]: Specifies a custom region for the S3 session. Default is chosen\n # through the ordinary boto credential chain.\n use_unsigned_session: false\n # Optional[bool]: Specifies whether to use an unsigned S3 session. Default: True\n endpoint_url: "http://localhost"\n # Optional[str]: Specifies a custom endpoint for the S3 session. Default is None.\n profile_name: "dev"\n # Optional[str]: Specifies a custom profile for S3 session. Default is default\n # profile as specified in ~/.aws/credentials file\n use_ssl: true\n # Optional[bool]: Whether or not to use SSL. By default, SSL is used.\n verify: None\n # Optional[str]: Whether or not to verify SSL certificates. By default SSL certificates are verified.\n # You can also specify this argument if you want to use a different CA cert bundle than the one used by botocore."\n aws_access_key_id: None\n # Optional[str]: The access key to use when creating the client.\n aws_secret_access_key: None\n # Optional[str]: The secret key to use when creating the client.\n aws_session_token: None\n # Optional[str]: The session token to use when creating the client.\n """\n return S3Resource.from_resource_context(context).get_client()
\n\n\n
[docs]class S3FileManagerResource(ResourceWithS3Configuration, IAttachDifferentObjectToOpContext):\n s3_bucket: str = Field(description="S3 bucket to use for the file manager.")\n s3_prefix: str = Field(\n default="dagster", description="Prefix to use for the S3 bucket for this file manager."\n )\n\n def get_client(self) -> S3FileManager:\n return S3FileManager(\n s3_session=construct_s3_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n endpoint_url=self.endpoint_url,\n use_unsigned_session=self.use_unsigned_session,\n profile_name=self.profile_name,\n use_ssl=self.use_ssl,\n verify=self.verify,\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n aws_session_token=self.aws_session_token,\n ),\n s3_bucket=self.s3_bucket,\n s3_base_key=self.s3_prefix,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=S3FileManagerResource.to_config_schema(),\n)\ndef s3_file_manager(context) -> S3FileManager:\n """FileManager that provides abstract access to S3.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n """\n return S3FileManagerResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_aws/s3/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.s3.resources"}}, "secretsmanager": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_aws.secretsmanager.resources

\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Dict, Generator, List, Optional, cast\n\nfrom dagster import (\n    Field as LegacyDagsterField,\n    resource,\n)\nfrom dagster._config.field_utils import Shape\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.test_utils import environ\nfrom dagster._utils.merger import merge_dicts\nfrom pydantic import Field\n\nfrom dagster_aws.utils import ResourceWithBoto3Configuration\n\nfrom .secrets import construct_secretsmanager_client, get_secrets_from_arns, get_tagged_secrets\n\nif TYPE_CHECKING:\n    import botocore\n\n\n
[docs]class SecretsManagerResource(ResourceWithBoto3Configuration):\n """Resource that gives access to AWS SecretsManager.\n\n The underlying SecretsManager session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is a SecretsManager client, an instance of `botocore.client.SecretsManager`.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, job, op\n from dagster_aws.secretsmanager import SecretsManagerResource\n\n @op\n def example_secretsmanager_op(secretsmanager: SecretsManagerResource):\n return secretsmanager.get_client().get_secret_value(\n SecretId='arn:aws:secretsmanager:region:aws_account_id:secret:appauthexample-AbCdEf'\n )\n\n @job\n def example_job():\n example_secretsmanager_op()\n\n defs = Definitions(\n jobs=[example_job],\n resources={\n 'secretsmanager': SecretsManagerResource(\n region_name='us-west-1'\n )\n }\n )\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> "botocore.client.SecretsManager":\n return construct_secretsmanager_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n profile_name=self.profile_name,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(SecretsManagerResource.to_config_schema())\ndef secretsmanager_resource(context) -> "botocore.client.SecretsManager":\n """Resource that gives access to AWS SecretsManager.\n\n The underlying SecretsManager session is created by calling\n :py:func:`boto3.session.Session(profile_name) <boto3:boto3.session>`.\n The returned resource object is a SecretsManager client, an instance of `botocore.client.SecretsManager`.\n\n Example:\n .. code-block:: python\n\n from dagster import build_op_context, job, op\n from dagster_aws.secretsmanager import secretsmanager_resource\n\n @op(required_resource_keys={'secretsmanager'})\n def example_secretsmanager_op(context):\n return context.resources.secretsmanager.get_secret_value(\n SecretId='arn:aws:secretsmanager:region:aws_account_id:secret:appauthexample-AbCdEf'\n )\n\n @job(resource_defs={'secretsmanager': secretsmanager_resource})\n def example_job():\n example_secretsmanager_op()\n\n example_job.execute_in_process(\n run_config={\n 'resources': {\n 'secretsmanager': {\n 'config': {\n 'region_name': 'us-west-1',\n }\n }\n }\n }\n )\n\n You may configure this resource as follows:\n\n .. code-block:: YAML\n\n resources:\n secretsmanager:\n config:\n region_name: "us-west-1"\n # Optional[str]: Specifies a custom region for the SecretsManager session. Default is chosen\n # through the ordinary boto credential chain.\n profile_name: "dev"\n # Optional[str]: Specifies a custom profile for SecretsManager session. Default is default\n # profile as specified in ~/.aws/credentials file\n\n """\n return SecretsManagerResource.from_resource_context(context).get_client()
\n\n\n
[docs]class SecretsManagerSecretsResource(ResourceWithBoto3Configuration):\n """Resource that provides a dict which maps selected SecretsManager secrets to\n their string values. Also optionally sets chosen secrets as environment variables.\n\n Example:\n .. code-block:: python\n\n import os\n from dagster import build_op_context, job, op, ResourceParam\n from dagster_aws.secretsmanager import SecretsManagerSecretsResource\n\n @op\n def example_secretsmanager_secrets_op(secrets: SecretsManagerSecretsResource):\n return secrets.fetch_secrets().get("my-secret-name")\n\n @op\n def example_secretsmanager_secrets_op_2(secrets: SecretsManagerSecretsResource):\n with secrets.secrets_in_environment():\n return os.getenv("my-other-secret-name")\n\n @job\n def example_job():\n example_secretsmanager_secrets_op()\n example_secretsmanager_secrets_op_2()\n\n defs = Definitions(\n jobs=[example_job],\n resources={\n 'secrets': SecretsManagerSecretsResource(\n region_name='us-west-1',\n secrets_tag="dagster",\n add_to_environment=True,\n )\n }\n )\n\n Note that your ops must also declare that they require this resource with or it will not be initialized\n for the execution of their compute functions.\n """\n\n secrets: List[str] = Field(\n default=[], description="An array of AWS Secrets Manager secrets arns to fetch."\n )\n secrets_tag: Optional[str] = Field(\n default=None,\n description="AWS Secrets Manager secrets with this tag will be fetched and made available.",\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @contextmanager\n def secrets_in_environment(\n self,\n secrets: Optional[List[str]] = None,\n secrets_tag: Optional[str] = None,\n ) -> Generator[Dict[str, str], None, None]:\n """Yields a dict which maps selected SecretsManager secrets to their string values. Also\n sets chosen secrets as environment variables.\n\n Args:\n secrets (Optional[List[str]]): An array of AWS Secrets Manager secrets arns to fetch.\n Note that this will override the secrets specified in the resource config.\n secrets_tag (Optional[str]): AWS Secrets Manager secrets with this tag will be fetched\n and made available. Note that this will override the secrets_tag specified in the\n resource config.\n """\n secrets_manager = construct_secretsmanager_client(\n max_attempts=self.max_attempts,\n region_name=self.region_name,\n profile_name=self.profile_name,\n )\n\n secrets_tag_to_fetch = secrets_tag if secrets_tag is not None else self.secrets_tag\n secrets_to_fetch = secrets if secrets is not None else self.secrets\n\n secret_arns = merge_dicts(\n (\n get_tagged_secrets(secrets_manager, [secrets_tag_to_fetch])\n if secrets_tag_to_fetch\n else {}\n ),\n get_secrets_from_arns(secrets_manager, secrets_to_fetch),\n )\n\n secrets_map = {\n name: secrets_manager.get_secret_value(SecretId=arn).get("SecretString")\n for name, arn in secret_arns.items()\n }\n with environ(secrets_map):\n yield secrets_map\n\n def fetch_secrets(\n self,\n secrets: Optional[List[str]] = None,\n secrets_tag: Optional[str] = None,\n ) -> Dict[str, str]:\n """Fetches secrets from AWS Secrets Manager and returns them as a dict.\n\n Args:\n secrets (Optional[List[str]]): An array of AWS Secrets Manager secrets arns to fetch.\n Note that this will override the secrets specified in the resource config.\n secrets_tag (Optional[str]): AWS Secrets Manager secrets with this tag will be fetched\n and made available. Note that this will override the secrets_tag specified in the\n resource config.\n """\n with self.secrets_in_environment(secrets=secrets, secrets_tag=secrets_tag) as secret_values:\n return secret_values
\n\n\nLEGACY_SECRETSMANAGER_SECRETS_SCHEMA = {\n **cast(Shape, SecretsManagerSecretsResource.to_config_schema().as_field().config_type).fields,\n "add_to_environment": LegacyDagsterField(\n bool,\n default_value=False,\n description="Whether to add the secrets to the environment. Defaults to False.",\n ),\n}\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=LEGACY_SECRETSMANAGER_SECRETS_SCHEMA)\n@contextmanager\ndef secretsmanager_secrets_resource(context):\n """Resource that provides a dict which maps selected SecretsManager secrets to\n their string values. Also optionally sets chosen secrets as environment variables.\n\n Example:\n .. code-block:: python\n\n import os\n from dagster import build_op_context, job, op\n from dagster_aws.secretsmanager import secretsmanager_secrets_resource\n\n @op(required_resource_keys={'secrets'})\n def example_secretsmanager_secrets_op(context):\n return context.resources.secrets.get("my-secret-name")\n\n @op(required_resource_keys={'secrets'})\n def example_secretsmanager_secrets_op_2(context):\n return os.getenv("my-other-secret-name")\n\n @job(resource_defs={'secrets': secretsmanager_secrets_resource})\n def example_job():\n example_secretsmanager_secrets_op()\n example_secretsmanager_secrets_op_2()\n\n example_job.execute_in_process(\n run_config={\n 'resources': {\n 'secrets': {\n 'config': {\n 'region_name': 'us-west-1',\n 'secrets_tag': 'dagster',\n 'add_to_environment': True,\n }\n }\n }\n }\n )\n\n Note that your ops must also declare that they require this resource with\n `required_resource_keys`, or it will not be initialized for the execution of their compute\n functions.\n\n You may configure this resource as follows:\n\n .. code-block:: YAML\n\n resources:\n secretsmanager:\n config:\n region_name: "us-west-1"\n # Optional[str]: Specifies a custom region for the SecretsManager session. Default is chosen\n # through the ordinary boto credential chain.\n profile_name: "dev"\n # Optional[str]: Specifies a custom profile for SecretsManager session. Default is default\n # profile as specified in ~/.aws/credentials file\n secrets: ["arn:aws:secretsmanager:region:aws_account_id:secret:appauthexample-AbCdEf"]\n # Optional[List[str]]: Specifies a list of secret ARNs to pull from SecretsManager.\n secrets_tag: "dagster"\n # Optional[str]: Specifies a tag, all secrets which have the tag set will be pulled\n # from SecretsManager.\n add_to_environment: true\n # Optional[bool]: Whether to set the selected secrets as environment variables. Defaults\n # to false.\n\n """\n add_to_environment = context.resource_config.get("add_to_environment", False)\n if add_to_environment:\n with SecretsManagerSecretsResource.from_resource_context(\n context\n ).secrets_in_environment() as secrets:\n yield secrets\n else:\n yield SecretsManagerSecretsResource.from_resource_context(context).fetch_secrets()
\n
", "current_page_name": "_modules/dagster_aws/secretsmanager/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_aws.secretsmanager.resources"}}}, "dagster_azure": {"adls2": {"fake_adls2_resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.fake_adls2_resource

\nimport io\nimport random\nfrom typing import Any, Dict, Optional\nfrom unittest import mock\n\nfrom dagster import resource\nfrom dagster._config.pythonic_config import ConfigurableResource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\n\nfrom dagster_azure.blob import FakeBlobServiceClient\n\nfrom .utils import ResourceNotFoundError\n\n\n@dagster_maintained_resource\n@resource({"account_name": str})\ndef fake_adls2_resource(context):\n    return FakeADLS2Resource(account_name=context.resource_config["account_name"])\n\n\n
[docs]class FakeADLS2Resource(ConfigurableResource):\n """Stateful mock of an ADLS2Resource for testing.\n\n Wraps a ``mock.MagicMock``. Containers are implemented using an in-memory dict.\n """\n\n account_name: str\n storage_account: Optional[str] = None\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def adls2_client(self) -> "FakeADLS2ServiceClient":\n return FakeADLS2ServiceClient(self.account_name)\n\n @property\n @cached_method\n def blob_client(self) -> FakeBlobServiceClient:\n return FakeBlobServiceClient(self.account_name)\n\n @property\n def lease_client_constructor(self) -> Any:\n return FakeLeaseClient
\n\n\nclass FakeLeaseClient:\n def __init__(self, client):\n self.client = client\n self.id = None\n\n # client needs a ref to self to check if a given lease is valid\n self.client._lease = self # noqa: SLF001\n\n def acquire(self, lease_duration=-1):\n if self.id is None:\n self.id = random.randint(0, 2**9)\n else:\n raise Exception("Lease already held")\n\n def release(self):\n self.id = None\n\n def is_valid(self, lease):\n if self.id is None:\n # no lease is held so any operation is valid\n return True\n return lease == self.id\n\n\nclass FakeADLS2ServiceClient:\n """Stateful mock of an ADLS2 service client for testing.\n\n Wraps a ``mock.MagicMock``. Containers are implemented using an in-memory dict.\n """\n\n def __init__(self, account_name, credential="fake-creds"):\n self._account_name = account_name\n self._credential = mock.MagicMock()\n self._credential.account_key = credential\n self._file_systems = {}\n\n @property\n def account_name(self):\n return self._account_name\n\n @property\n def credential(self):\n return self._credential\n\n @property\n def file_systems(self):\n return self._file_systems\n\n def get_file_system_client(self, file_system):\n return self._file_systems.setdefault(\n file_system, FakeADLS2FilesystemClient(self.account_name, file_system)\n )\n\n def get_file_client(self, file_system, file_path):\n return self.get_file_system_client(file_system).get_file_client(file_path)\n\n\nclass FakeADLS2FilesystemClient:\n """Stateful mock of an ADLS2 filesystem client for testing."""\n\n def __init__(self, account_name, file_system_name):\n self._file_system: Dict[str, FakeADLS2FileClient] = {}\n self._account_name = account_name\n self._file_system_name = file_system_name\n\n @property\n def account_name(self):\n return self._account_name\n\n @property\n def file_system_name(self):\n return self._file_system_name\n\n def keys(self):\n return self._file_system.keys()\n\n def get_file_system_properties(self):\n return {"account_name": self.account_name, "file_system_name": self.file_system_name}\n\n def has_file(self, path):\n return bool(self._file_system.get(path))\n\n def get_file_client(self, file_path):\n # pass fileclient a ref to self and its name so the file can delete itself\n self._file_system.setdefault(file_path, FakeADLS2FileClient(self, file_path))\n return self._file_system[file_path]\n\n def create_file(self, file):\n # pass fileclient a ref to self and the file's name so the file can delete itself by\n # accessing the self._file_system dict\n self._file_system.setdefault(file, FakeADLS2FileClient(fs_client=self, name=file))\n return self._file_system[file]\n\n def delete_file(self, file):\n for k in list(self._file_system.keys()):\n if k.startswith(file):\n del self._file_system[k]\n\n\nclass FakeADLS2FileClient:\n """Stateful mock of an ADLS2 file client for testing."""\n\n def __init__(self, name, fs_client):\n self.name = name\n self.contents = None\n self._lease = None\n self.fs_client = fs_client\n\n @property\n def lease(self):\n return self._lease if self._lease is None else self._lease.id\n\n def get_file_properties(self):\n if self.contents is None:\n raise ResourceNotFoundError("File does not exist!")\n lease_id = None if self._lease is None else self._lease.id\n return {"lease": lease_id}\n\n def upload_data(self, contents, overwrite=False, lease=None):\n if self._lease is not None:\n if not self._lease.is_valid(lease):\n raise Exception("Invalid lease!")\n if self.contents is not None or overwrite is True:\n if isinstance(contents, str):\n self.contents = contents.encode("utf8")\n elif isinstance(contents, io.BytesIO):\n self.contents = contents.read()\n elif isinstance(contents, io.StringIO):\n self.contents = contents.read().encode("utf8")\n elif isinstance(contents, bytes):\n self.contents = contents\n else:\n self.contents = contents\n\n def download_file(self):\n if self.contents is None:\n raise ResourceNotFoundError("File does not exist!")\n return FakeADLS2FileDownloader(contents=self.contents)\n\n def delete_file(self, lease=None):\n if self._lease is not None:\n if not self._lease.is_valid(lease):\n raise Exception("Invalid lease!")\n self.fs_client.delete_file(self.name)\n\n\nclass FakeADLS2FileDownloader:\n """Mock of an ADLS2 file downloader for testing."""\n\n def __init__(self, contents):\n self.contents = contents\n\n def readall(self):\n return self.contents\n\n def readinto(self, fileobj):\n fileobj.write(self.contents)\n
", "current_page_name": "_modules/dagster_azure/adls2/fake_adls2_resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.fake_adls2_resource"}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.file_manager

\nimport io\nimport uuid\nfrom contextlib import contextmanager\n\nimport dagster._check as check\nfrom dagster._core.storage.file_manager import (\n    FileHandle,\n    FileManager,\n    TempfileManager,\n    check_file_like_obj,\n)\n\n\n
[docs]class ADLS2FileHandle(FileHandle):\n """A reference to a file on ADLS2."""\n\n def __init__(self, account: str, file_system: str, key: str):\n self._account = check.str_param(account, "account")\n self._file_system = check.str_param(file_system, "file_system")\n self._key = check.str_param(key, "key")\n\n @property\n def account(self):\n """str: The name of the ADLS2 account."""\n return self._account\n\n @property\n def file_system(self):\n """str: The name of the ADLS2 file system."""\n return self._file_system\n\n @property\n def key(self):\n """str: The ADLS2 key."""\n return self._key\n\n @property\n def path_desc(self):\n """str: The file's ADLS2 URL."""\n return self.adls2_path\n\n @property\n def adls2_path(self):\n """str: The file's ADLS2 URL."""\n return f"adfss://{self.file_system}@{self.account}.dfs.core.windows.net/{self.key}"
\n\n\nclass ADLS2FileManager(FileManager):\n def __init__(self, adls2_client, file_system, prefix):\n self._client = adls2_client\n self._file_system = check.str_param(file_system, "file_system")\n self._prefix = check.str_param(prefix, "prefix")\n self._local_handle_cache = {}\n self._temp_file_manager = TempfileManager()\n\n def copy_handle_to_local_temp(self, file_handle):\n self._download_if_not_cached(file_handle)\n return self._get_local_path(file_handle)\n\n def _download_if_not_cached(self, file_handle):\n if not self._file_handle_cached(file_handle):\n # instigate download\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_name = temp_file_obj.name\n file = self._client.get_file_client(\n file_system=file_handle.file_system,\n file_path=file_handle.key,\n )\n download = file.download_file()\n with open(temp_name, "wb") as file_obj:\n download.readinto(file_obj)\n self._local_handle_cache[file_handle.adls2_path] = temp_name\n\n return file_handle\n\n @contextmanager\n def read(self, file_handle, mode="rb"):\n check.inst_param(file_handle, "file_handle", ADLS2FileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n self._download_if_not_cached(file_handle)\n\n encoding = None if "b" in mode else "utf-8"\n with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:\n yield file_obj\n\n def _file_handle_cached(self, file_handle):\n return file_handle.adls2_path in self._local_handle_cache\n\n def _get_local_path(self, file_handle):\n return self._local_handle_cache[file_handle.adls2_path]\n\n def read_data(self, file_handle):\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read()\n\n def write_data(self, data, ext=None):\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", ext=ext)\n\n def write(self, file_obj, mode="wb", ext=None):\n check_file_like_obj(file_obj)\n adls2_key = self.get_full_key(str(uuid.uuid4()) + (("." + ext) if ext is not None else ""))\n adls2_file = self._client.get_file_client(\n file_system=self._file_system, file_path=adls2_key\n )\n adls2_file.upload_data(file_obj, overwrite=True)\n return ADLS2FileHandle(self._client.account_name, self._file_system, adls2_key)\n\n def get_full_key(self, file_key):\n return f"{self._prefix}/{file_key}"\n\n def delete_local_temp(self):\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster_azure/adls2/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.file_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.io_manager

\nimport pickle\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Union\n\nfrom dagster import (\n    InputContext,\n    OutputContext,\n    ResourceDependency,\n    _check as check,\n    io_manager,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._config.pythonic_config import ConfigurableIOManager\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL\nfrom dagster._utils.cached_method import cached_method\nfrom pydantic import Field\nfrom upath import UPath\n\nfrom dagster_azure.adls2.resources import ADLS2Resource\nfrom dagster_azure.adls2.utils import ResourceNotFoundError\n\n_LEASE_DURATION = 60  # One minute\n\n\nclass PickledObjectADLS2IOManager(UPathIOManager):\n    def __init__(\n        self,\n        file_system: Any,\n        adls2_client: Any,\n        blob_client: Any,\n        lease_client_constructor: Any,\n        prefix: str = "dagster",\n    ):\n        self.adls2_client = adls2_client\n        self.file_system_client = self.adls2_client.get_file_system_client(file_system)\n        # We also need a blob client to handle copying as ADLS doesn't have a copy API yet\n        self.blob_client = blob_client\n        self.blob_container_client = self.blob_client.get_container_client(file_system)\n        self.prefix = check.str_param(prefix, "prefix")\n\n        self.lease_client_constructor = lease_client_constructor\n        self.lease_duration = _LEASE_DURATION\n        self.file_system_client.get_file_system_properties()\n        super().__init__(base_path=UPath(self.prefix))\n\n    def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:\n        parts = context.get_identifier()\n        run_id = parts[0]\n        output_parts = parts[1:]\n        return UPath("storage", run_id, "files", *output_parts)\n\n    def get_loading_input_log_message(self, path: UPath) -> str:\n        return f"Loading ADLS2 object from: {self._uri_for_path(path)}"\n\n    def get_writing_output_log_message(self, path: UPath) -> str:\n        return f"Writing ADLS2 object at: {self._uri_for_path(path)}"\n\n    def unlink(self, path: UPath) -> None:\n        file_client = self.file_system_client.get_file_client(str(path))\n        with self._acquire_lease(file_client, is_rm=True) as lease:\n            file_client.delete_file(lease=lease, recursive=True)\n\n    def make_directory(self, path: UPath) -> None:\n        # It is not necessary to create directories in ADLS2\n        return None\n\n    def path_exists(self, path: UPath) -> bool:\n        try:\n            self.file_system_client.get_file_client(str(path)).get_file_properties()\n        except ResourceNotFoundError:\n            return False\n        return True\n\n    def _uri_for_path(self, path: UPath, protocol: str = "abfss://") -> str:\n        return "{protocol}{filesystem}@{account}.dfs.core.windows.net/{key}".format(\n            protocol=protocol,\n            filesystem=self.file_system_client.file_system_name,\n            account=self.file_system_client.account_name,\n            key=path,\n        )\n\n    @contextmanager\n    def _acquire_lease(self, client: Any, is_rm: bool = False) -> Iterator[str]:\n        lease_client = self.lease_client_constructor(client=client)\n        try:\n            lease_client.acquire(lease_duration=self.lease_duration)\n            yield lease_client.id\n        finally:\n            # cannot release a lease on a file that no longer exists, so need to check\n            if not is_rm:\n                lease_client.release()\n\n    def load_from_path(self, context: InputContext, path: UPath) -> Any:\n        if context.dagster_type.typing_type == type(None):\n            return None\n        file = self.file_system_client.get_file_client(str(path))\n        stream = file.download_file()\n        return pickle.loads(stream.readall())\n\n    def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:\n        if self.path_exists(path):\n            context.log.warning(f"Removing existing ADLS2 key: {path}")\n            self.unlink(path)\n\n        pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)\n        file = self.file_system_client.create_file(str(path))\n        with self._acquire_lease(file) as lease:\n            file.upload_data(pickled_obj, lease=lease, overwrite=True)\n\n\n
[docs]class ADLS2PickleIOManager(ConfigurableIOManager):\n """Persistent IO manager using Azure Data Lake Storage Gen2 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for ADLS and the backing\n container.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_azure.adls2 import ADLS2PickleIOManager, adls2_resource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return df[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": ADLS2PickleIOManager(\n adls2_file_system="my-cool-fs",\n adls2_prefix="my-cool-prefix"\n ),\n "adls2": adls2_resource,\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_azure.adls2 import ADLS2PickleIOManager, adls2_resource\n\n @job(\n resource_defs={\n "io_manager": ADLS2PickleIOManager(\n adls2_file_system="my-cool-fs",\n adls2_prefix="my-cool-prefix"\n ),\n "adls2": adls2_resource,\n },\n )\n def my_job():\n ...\n """\n\n adls2: ResourceDependency[ADLS2Resource]\n adls2_file_system: str = Field(description="ADLS Gen2 file system name.")\n adls2_prefix: str = Field(\n default="dagster", description="ADLS Gen2 file system prefix to write to."\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _internal_io_manager(self) -> PickledObjectADLS2IOManager:\n return PickledObjectADLS2IOManager(\n self.adls2_file_system,\n self.adls2.adls2_client,\n self.adls2.blob_client,\n self.adls2.lease_client_constructor,\n self.adls2_prefix,\n )\n\n def load_input(self, context: "InputContext") -> Any:\n return self._internal_io_manager.load_input(context)\n\n def handle_output(self, context: "OutputContext", obj: Any) -> None:\n self._internal_io_manager.handle_output(context, obj)
\n\n\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use GCSPickleIOManager instead.",\n)\nclass ConfigurablePickledObjectADLS2IOManager(ADLS2PickleIOManager):\n """Renamed to ADLS2PickleIOManager. See ADLS2PickleIOManager for documentation."""\n\n pass
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=ADLS2PickleIOManager.to_config_schema(),\n required_resource_keys={"adls2"},\n)\ndef adls2_pickle_io_manager(init_context):\n """Persistent IO manager using Azure Data Lake Storage Gen2 for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for ADLS and the backing\n container.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of "/my/base/path", an asset with key\n `AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory\n with path "/my/base/path/one/two/".\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return df[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": adls2_pickle_io_manager.configured(\n {"adls2_file_system": "my-cool-fs", "adls2_prefix": "my-cool-prefix"}\n ),\n "adls2": adls2_resource,\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource\n\n @job(\n resource_defs={\n "io_manager": adls2_pickle_io_manager.configured(\n {"adls2_file_system": "my-cool-fs", "adls2_prefix": "my-cool-prefix"}\n ),\n "adls2": adls2_resource,\n },\n )\n def my_job():\n ...\n """\n adls_resource = init_context.resources.adls2\n adls2_client = adls_resource.adls2_client\n blob_client = adls_resource.blob_client\n lease_client = adls_resource.lease_client_constructor\n pickled_io_manager = PickledObjectADLS2IOManager(\n init_context.resource_config["adls2_file_system"],\n adls2_client,\n blob_client,\n lease_client,\n init_context.resource_config.get("adls2_prefix"),\n )\n return pickled_io_manager
\n
", "current_page_name": "_modules/dagster_azure/adls2/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.io_manager"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.adls2.resources

\nfrom typing import Any, Dict, Union\n\nfrom azure.identity import DefaultAzureCredential\nfrom azure.storage.filedatalake import DataLakeLeaseClient\nfrom dagster import (\n    Config,\n    ConfigurableResource,\n    Field as DagsterField,\n    Permissive,\n    Selector,\n    StringSource,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\nfrom dagster._utils.merger import merge_dicts\nfrom pydantic import Field\nfrom typing_extensions import Literal\n\nfrom dagster_azure.blob.utils import BlobServiceClient, create_blob_client\n\nfrom .file_manager import ADLS2FileManager\nfrom .utils import DataLakeServiceClient, create_adls2_client\n\n\nclass ADLS2SASToken(Config):\n    credential_type: Literal["sas"] = "sas"\n    token: str\n\n\nclass ADLS2Key(Config):\n    credential_type: Literal["key"] = "key"\n    key: str\n\n\nclass ADLS2DefaultAzureCredential(Config):\n    credential_type: Literal["default_azure_credential"] = "default_azure_credential"\n    kwargs: Dict[str, Any]\n\n\nclass ADLS2BaseResource(ConfigurableResource):\n    storage_account: str = Field(description="The storage account name.")\n    credential: Union[ADLS2SASToken, ADLS2Key, ADLS2DefaultAzureCredential] = Field(\n        discriminator="credential_type", description="The credentials with which to authenticate."\n    )\n\n\nDEFAULT_AZURE_CREDENTIAL_CONFIG = DagsterField(\n    Permissive(\n        description="Uses DefaultAzureCredential to authenticate and passed as keyword arguments",\n    )\n)\n\nADLS2_CLIENT_CONFIG = {\n    "storage_account": DagsterField(StringSource, description="The storage account name."),\n    "credential": DagsterField(\n        Selector(\n            {\n                "sas": DagsterField(StringSource, description="SAS token for the account."),\n                "key": DagsterField(StringSource, description="Shared Access Key for the account."),\n                "DefaultAzureCredential": DEFAULT_AZURE_CREDENTIAL_CONFIG,\n            }\n        ),\n        description="The credentials with which to authenticate.",\n    ),\n}\n\n\n
[docs]class ADLS2Resource(ADLS2BaseResource):\n """Resource containing clients to access Azure Data Lake Storage Gen2.\n\n Contains a client for both the Data Lake and Blob APIs, to work around the limitations\n of each.\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _raw_credential(self) -> Any:\n if isinstance(self.credential, ADLS2Key):\n return self.credential.key\n elif isinstance(self.credential, ADLS2SASToken):\n return self.credential.token\n else:\n return DefaultAzureCredential(**self.credential.kwargs)\n\n @property\n @cached_method\n def adls2_client(self) -> DataLakeServiceClient:\n return create_adls2_client(self.storage_account, self._raw_credential)\n\n @property\n @cached_method\n def blob_client(self) -> BlobServiceClient:\n return create_blob_client(self.storage_account, self._raw_credential)\n\n @property\n def lease_client_constructor(self) -> Any:\n return DataLakeLeaseClient
\n\n\n# Due to a limitation of the discriminated union type, we can't directly mirror these old\n# config fields in the new resource config. Instead, we'll just use the old config fields\n# to construct the new config and then use that to construct the resource.\n
[docs]@dagster_maintained_resource\n@resource(ADLS2_CLIENT_CONFIG)\ndef adls2_resource(context):\n """Resource that gives ops access to Azure Data Lake Storage Gen2.\n\n The underlying client is a :py:class:`~azure.storage.filedatalake.DataLakeServiceClient`.\n\n Attach this resource definition to a :py:class:`~dagster.JobDefinition` in order to make it\n available to your ops.\n\n Example:\n .. code-block:: python\n\n from dagster import job, op\n from dagster_azure.adls2 import adls2_resource\n\n @op(required_resource_keys={'adls2'})\n def example_adls2_op(context):\n return list(context.resources.adls2.adls2_client.list_file_systems())\n\n @job(resource_defs={"adls2": adls2_resource})\n def my_job():\n example_adls2_op()\n\n Note that your ops must also declare that they require this resource with\n `required_resource_keys`, or it will not be initialized for the execution of their compute\n functions.\n\n You may pass credentials to this resource using either a SAS token, a key or by passing the\n `DefaultAzureCredential` object.\n\n .. code-block:: YAML\n\n resources:\n adls2:\n config:\n storage_account: my_storage_account\n # str: The storage account name.\n credential:\n sas: my_sas_token\n # str: the SAS token for the account.\n key:\n env: AZURE_DATA_LAKE_STORAGE_KEY\n # str: The shared access key for the account.\n DefaultAzureCredential: {}\n # dict: The keyword arguments used for DefaultAzureCredential\n # or leave the object empty for no arguments\n DefaultAzureCredential:\n exclude_environment_credential: true\n\n """\n return _adls2_resource_from_config(context.resource_config)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n merge_dicts(\n ADLS2_CLIENT_CONFIG,\n {\n "adls2_file_system": DagsterField(\n StringSource, description="ADLS Gen2 file system name"\n ),\n "adls2_prefix": DagsterField(StringSource, is_required=False, default_value="dagster"),\n },\n )\n)\ndef adls2_file_manager(context):\n """FileManager that provides abstract access to ADLS2.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n """\n adls2_client = _adls2_resource_from_config(context.resource_config).adls2_client\n\n return ADLS2FileManager(\n adls2_client=adls2_client,\n file_system=context.resource_config["adls2_file_system"],\n prefix=context.resource_config["adls2_prefix"],\n )
\n\n\ndef _adls2_resource_from_config(config) -> ADLS2Resource:\n """Args:\n config: A configuration containing the fields in ADLS2_CLIENT_CONFIG.\n\n Returns: An adls2 client.\n """\n storage_account = config["storage_account"]\n if "DefaultAzureCredential" in config["credential"]:\n credential = ADLS2DefaultAzureCredential(\n kwargs=config["credential"]["DefaultAzureCredential"]\n )\n elif "sas" in config["credential"]:\n credential = ADLS2SASToken(token=config["credential"]["sas"])\n else:\n credential = ADLS2Key(key=config["credential"]["key"])\n\n return ADLS2Resource(storage_account=storage_account, credential=credential)\n
", "current_page_name": "_modules/dagster_azure/adls2/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.adls2.resources"}}, "blob": {"compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_azure.blob.compute_log_manager

\nimport os\nfrom contextlib import contextmanager\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._seven as seven\nfrom azure.identity import DefaultAzureCredential\nfrom dagster import (\n    Field,\n    Noneable,\n    Permissive,\n    StringSource,\n    _check as check,\n)\nfrom dagster._core.storage.cloud_storage_compute_log_manager import (\n    CloudStorageComputeLogManager,\n    PollingComputeLogSubscriptionManager,\n)\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\nfrom dagster._core.storage.local_compute_log_manager import (\n    IO_TYPE_EXTENSION,\n    LocalComputeLogManager,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import ensure_dir, ensure_file\nfrom typing_extensions import Self\n\nfrom .utils import create_blob_client, generate_blob_sas\n\n\n
[docs]class AzureBlobComputeLogManager(CloudStorageComputeLogManager, ConfigurableClass):\n """Logs op compute function stdout and stderr to Azure Blob Storage.\n\n This is also compatible with Azure Data Lake Storage.\n\n Users should not instantiate this class directly. Instead, use a YAML block in ``dagster.yaml``\n such as the following:\n\n .. code-block:: YAML\n\n compute_logs:\n module: dagster_azure.blob.compute_log_manager\n class: AzureBlobComputeLogManager\n config:\n storage_account: my-storage-account\n container: my-container\n credential: sas-token-or-secret-key\n default_azure_credential:\n exclude_environment_credential: true\n prefix: "dagster-test-"\n local_dir: "/tmp/cool"\n upload_interval: 30\n\n Args:\n storage_account (str): The storage account name to which to log.\n container (str): The container (or ADLS2 filesystem) to which to log.\n secret_key (Optional[str]): Secret key for the storage account. SAS tokens are not\n supported because we need a secret key to generate a SAS token for a download URL.\n default_azure_credential (Optional[dict]): Use and configure DefaultAzureCredential.\n Cannot be used with sas token or secret key config.\n local_dir (Optional[str]): Path to the local directory in which to stage logs. Default:\n ``dagster._seven.get_system_temp_directory()``.\n prefix (Optional[str]): Prefix for the log file keys.\n upload_interval: (Optional[int]): Interval in seconds to upload partial log files blob storage. By default, will only upload when the capture is complete.\n inst_data (Optional[ConfigurableClassData]): Serializable representation of the compute\n log manager when newed up from config.\n """\n\n def __init__(\n self,\n storage_account,\n container,\n secret_key=None,\n local_dir=None,\n inst_data: Optional[ConfigurableClassData] = None,\n prefix="dagster",\n upload_interval=None,\n default_azure_credential=None,\n ):\n self._storage_account = check.str_param(storage_account, "storage_account")\n self._container = check.str_param(container, "container")\n self._blob_prefix = self._clean_prefix(check.str_param(prefix, "prefix"))\n self._default_azure_credential = check.opt_dict_param(\n default_azure_credential, "default_azure_credential"\n )\n check.opt_str_param(secret_key, "secret_key")\n check.invariant(\n secret_key is not None or default_azure_credential is not None,\n "Missing config: need to provide one of secret_key or default_azure_credential",\n )\n\n if default_azure_credential is None:\n self._blob_client = create_blob_client(storage_account, secret_key)\n else:\n credential = DefaultAzureCredential(**self._default_azure_credential)\n self._blob_client = create_blob_client(storage_account, credential)\n\n self._container_client = self._blob_client.get_container_client(container)\n self._download_urls = {}\n\n # proxy calls to local compute log manager (for subscriptions, etc)\n if not local_dir:\n local_dir = seven.get_system_temp_directory()\n\n self._local_manager = LocalComputeLogManager(local_dir)\n self._subscription_manager = PollingComputeLogSubscriptionManager(self)\n self._upload_interval = check.opt_int_param(upload_interval, "upload_interval")\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @contextmanager\n def _watch_logs(self, dagster_run, step_key=None):\n # proxy watching to the local compute log manager, interacting with the filesystem\n with self.local_manager._watch_logs(dagster_run, step_key): # noqa: SLF001\n yield\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "storage_account": StringSource,\n "container": StringSource,\n "secret_key": Field(StringSource, is_required=False),\n "default_azure_credential": Field(\n Noneable(Permissive(description="keyword arguments for DefaultAzureCredential")),\n is_required=False,\n default_value=None,\n ),\n "local_dir": Field(StringSource, is_required=False),\n "prefix": Field(StringSource, is_required=False, default_value="dagster"),\n "upload_interval": Field(Noneable(int), is_required=False, default_value=None),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return AzureBlobComputeLogManager(inst_data=inst_data, **config_value)\n\n @property\n def local_manager(self) -> LocalComputeLogManager:\n return self._local_manager\n\n @property\n def upload_interval(self) -> Optional[int]:\n return self._upload_interval if self._upload_interval else None\n\n def _clean_prefix(self, prefix):\n parts = prefix.split("/")\n return "/".join([part for part in parts if part])\n\n def _blob_key(self, log_key, io_type, partial=False):\n check.inst_param(io_type, "io_type", ComputeIOType)\n extension = IO_TYPE_EXTENSION[io_type]\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n paths = [self._blob_prefix, "storage", *namespace, filename]\n return "/".join(paths) # blob path delimiter\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n self.local_manager.delete_logs(log_key=log_key, prefix=prefix)\n if log_key:\n prefix_path = "/".join([self._blob_prefix, "storage", *log_key])\n elif prefix:\n # add the trailing '/' to make sure that ['a'] does not match ['apple']\n prefix_path = "/".join([self._blob_prefix, "storage", *prefix, ""])\n else:\n prefix_path = None\n\n blob_list = {\n b.name for b in list(self._container_client.list_blobs(name_starts_with=prefix_path))\n }\n\n to_remove = None\n if log_key:\n # filter to the known set of keys\n known_keys = [\n self._blob_key(log_key, ComputeIOType.STDOUT),\n self._blob_key(log_key, ComputeIOType.STDERR),\n self._blob_key(log_key, ComputeIOType.STDOUT, partial=True),\n self._blob_key(log_key, ComputeIOType.STDERR, partial=True),\n ]\n to_remove = [key for key in known_keys if key in blob_list]\n elif prefix:\n to_remove = list(blob_list)\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n if to_remove:\n self._container_client.delete_blobs(*to_remove)\n\n def download_url_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n\n blob_key = self._blob_key(log_key, io_type)\n if blob_key in self._download_urls:\n return self._download_urls[blob_key]\n blob = self._container_client.get_blob_client(blob_key)\n sas = generate_blob_sas(\n self._storage_account,\n self._container,\n blob_key,\n account_key=self._blob_client.credential.account_key,\n )\n url = blob.url + sas\n self._download_urls[blob_key] = url\n return url\n\n def display_path_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n\n blob_key = self._blob_key(log_key, io_type)\n return f"https://{self._storage_account}.blob.core.windows.net/{self._container}/{blob_key}"\n\n def cloud_storage_has_logs(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial: bool = False\n ) -> bool:\n blob_key = self._blob_key(log_key, io_type, partial=partial)\n blob_objects = self._container_client.list_blobs(blob_key)\n exact_matches = [blob for blob in blob_objects if blob.name == blob_key]\n return len(exact_matches) > 0\n\n def upload_to_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n blob_key = self._blob_key(log_key, io_type, partial=partial)\n with open(path, "rb") as data:\n blob = self._container_client.get_blob_client(blob_key)\n blob.upload_blob(data)\n\n def download_from_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[io_type], partial=partial\n )\n ensure_dir(os.path.dirname(path))\n blob_key = self._blob_key(log_key, io_type, partial=partial)\n with open(path, "wb") as fileobj:\n blob = self._container_client.get_blob_client(blob_key)\n blob.download_blob().readinto(fileobj)\n\n def on_subscribe(self, subscription):\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription):\n self._subscription_manager.remove_subscription(subscription)
\n
", "current_page_name": "_modules/dagster_azure/blob/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_azure.blob.compute_log_manager"}}}, "dagster_celery": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery.executor

\nfrom dagster import (\n    Executor,\n    Field,\n    Noneable,\n    Permissive,\n    StringSource,\n    _check as check,\n    executor,\n    multiple_process_executor_requirements,\n)\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._grpc.types import ExecuteStepArgs\nfrom dagster._serdes import pack_value\n\nfrom .config import DEFAULT_CONFIG, dict_wrapper\nfrom .defaults import broker_url, result_backend\n\nCELERY_CONFIG = {\n    "broker": Field(\n        Noneable(StringSource),\n        is_required=False,\n        description=(\n            "The URL of the Celery broker. Default: "\n            "'pyamqp://guest@{os.getenv('DAGSTER_CELERY_BROKER_HOST',"\n            "'localhost')}//'."\n        ),\n    ),\n    "backend": Field(\n        Noneable(StringSource),\n        is_required=False,\n        default_value="rpc://",\n        description="The URL of the Celery results backend. Default: 'rpc://'.",\n    ),\n    "include": Field(\n        [str], is_required=False, description="List of modules every worker should import"\n    ),\n    "config_source": Field(\n        Noneable(Permissive()),\n        is_required=False,\n        description="Additional settings for the Celery app.",\n    ),\n    "retries": get_retries_config(),\n}\n\n\n
[docs]@executor(\n name="celery",\n config_schema=CELERY_CONFIG,\n requirements=multiple_process_executor_requirements(),\n)\ndef celery_executor(init_context):\n """Celery-based executor.\n\n The Celery executor exposes config settings for the underlying Celery app under\n the ``config_source`` key. This config corresponds to the "new lowercase settings" introduced\n in Celery version 4.0 and the object constructed from config will be passed to the\n :py:class:`celery.Celery` constructor as its ``config_source`` argument.\n (See https://docs.celeryq.dev/en/stable/userguide/configuration.html for details.)\n\n The executor also exposes the ``broker``, `backend`, and ``include`` arguments to the\n :py:class:`celery.Celery` constructor.\n\n In the most common case, you may want to modify the ``broker`` and ``backend`` (e.g., to use\n Redis instead of RabbitMQ). We expect that ``config_source`` will be less frequently\n modified, but that when solid executions are especially fast or slow, or when there are\n different requirements around idempotence or retry, it may make sense to execute jobs\n with variations on these settings.\n\n To use the `celery_executor`, set it as the `executor_def` when defining a job:\n\n .. code-block:: python\n\n from dagster import job\n from dagster_celery import celery_executor\n\n @job(executor_def=celery_executor)\n def celery_enabled_job():\n pass\n\n Then you can configure the executor as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n broker: 'pyamqp://guest@localhost//' # Optional[str]: The URL of the Celery broker\n backend: 'rpc://' # Optional[str]: The URL of the Celery results backend\n include: ['my_module'] # Optional[List[str]]: Modules every worker should import\n config_source: # Dict[str, Any]: Any additional parameters to pass to the\n #... # Celery workers. This dict will be passed as the `config_source`\n #... # argument of celery.Celery().\n\n Note that the YAML you provide here must align with the configuration with which the Celery\n workers on which you hope to run were started. If, for example, you point the executor at a\n different broker than the one your workers are listening to, the workers will never be able to\n pick up tasks for execution.\n """\n return CeleryExecutor(\n broker=init_context.executor_config.get("broker"),\n backend=init_context.executor_config.get("backend"),\n config_source=init_context.executor_config.get("config_source"),\n include=init_context.executor_config.get("include"),\n retries=RetryMode.from_config(init_context.executor_config["retries"]),\n )
\n\n\ndef _submit_task(app, plan_context, step, queue, priority, known_state):\n from .tasks import create_task\n\n execute_step_args = ExecuteStepArgs(\n job_origin=plan_context.reconstructable_job.get_python_origin(),\n run_id=plan_context.dagster_run.run_id,\n step_keys_to_execute=[step.key],\n instance_ref=plan_context.instance.get_ref(),\n retry_mode=plan_context.executor.retries.for_inner_plan(),\n known_state=known_state,\n print_serialized_events=True, # Not actually checked by the celery task\n )\n\n task = create_task(app)\n task_signature = task.si(\n execute_step_args_packed=pack_value(execute_step_args),\n executable_dict=plan_context.reconstructable_job.to_dict(),\n )\n return task_signature.apply_async(\n priority=priority,\n queue=queue,\n routing_key=f"{queue}.execute_plan",\n )\n\n\nclass CeleryExecutor(Executor):\n def __init__(\n self,\n retries,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n ):\n self.broker = check.opt_str_param(broker, "broker", default=broker_url)\n self.backend = check.opt_str_param(backend, "backend", default=result_backend)\n self.include = check.opt_list_param(include, "include", of_type=str)\n self.config_source = dict_wrapper(\n dict(DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source"))\n )\n self._retries = check.inst_param(retries, "retries", RetryMode)\n\n @property\n def retries(self):\n return self._retries\n\n def execute(self, plan_context, execution_plan):\n from .core_execution_loop import core_celery_execution_loop\n\n return core_celery_execution_loop(\n plan_context, execution_plan, step_execution_fn=_submit_task\n )\n\n @staticmethod\n def for_cli(broker=None, backend=None, include=None, config_source=None):\n return CeleryExecutor(\n retries=RetryMode(RetryMode.DISABLED),\n broker=broker,\n backend=backend,\n include=include,\n config_source=config_source,\n )\n\n def app_args(self):\n return {\n "broker": self.broker,\n "backend": self.backend,\n "include": self.include,\n "config_source": self.config_source,\n "retries": self.retries,\n }\n
", "current_page_name": "_modules/dagster_celery/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery.executor"}}, "dagster_celery_docker": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery_docker.executor

\nimport os\n\nimport docker.client\nfrom dagster import (\n    DagsterInstance,\n    Executor,\n    Field,\n    Permissive,\n    StringSource,\n    _check as check,\n    executor,\n    multiple_process_executor_requirements,\n)\nfrom dagster._cli.api import ExecuteStepArgs\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.events.utils import filter_dagster_events_from_cli_logs\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._serdes import pack_value, serialize_value, unpack_value\nfrom dagster._utils.merger import merge_dicts\nfrom dagster_celery.config import DEFAULT_CONFIG, dict_wrapper\nfrom dagster_celery.core_execution_loop import DELEGATE_MARKER, core_celery_execution_loop\nfrom dagster_celery.defaults import broker_url, result_backend\nfrom dagster_celery.executor import CELERY_CONFIG\n\nCELERY_DOCKER_CONFIG_KEY = "celery-docker"\n\n\ndef celery_docker_config():\n    additional_config = {\n        "docker": Field(\n            {\n                "image": Field(\n                    StringSource,\n                    is_required=False,\n                    description="The docker image to be used for step execution.",\n                ),\n                "registry": Field(\n                    {\n                        "url": Field(StringSource),\n                        "username": Field(StringSource),\n                        "password": Field(StringSource),\n                    },\n                    is_required=False,\n                    description="Information for using a non local/public docker registry",\n                ),\n                "env_vars": Field(\n                    [str],\n                    is_required=False,\n                    description=(\n                        "The list of environment variables names to forward from the celery worker"\n                        " in to the docker container"\n                    ),\n                ),\n                "network": Field(\n                    str,\n                    is_required=False,\n                    description=(\n                        "Name of the network this container will be connected to at creation time"\n                    ),\n                ),\n                "container_kwargs": Field(\n                    Permissive(),\n                    is_required=False,\n                    description="Additional keyword args for the docker container",\n                ),\n            },\n            is_required=True,\n            description="The configuration for interacting with docker in the celery worker.",\n        ),\n    }\n\n    cfg = merge_dicts(CELERY_CONFIG, additional_config)\n    return cfg\n\n\n
[docs]@executor(\n name=CELERY_DOCKER_CONFIG_KEY,\n config_schema=celery_docker_config(),\n requirements=multiple_process_executor_requirements(),\n)\ndef celery_docker_executor(init_context):\n """Celery-based executor which launches tasks in docker containers.\n\n The Celery executor exposes config settings for the underlying Celery app under\n the ``config_source`` key. This config corresponds to the "new lowercase settings" introduced\n in Celery version 4.0 and the object constructed from config will be passed to the\n :py:class:`celery.Celery` constructor as its ``config_source`` argument.\n (See https://docs.celeryq.dev/en/stable/userguide/configuration.html for details.)\n\n The executor also exposes the ``broker``, `backend`, and ``include`` arguments to the\n :py:class:`celery.Celery` constructor.\n\n In the most common case, you may want to modify the ``broker`` and ``backend`` (e.g., to use\n Redis instead of RabbitMQ). We expect that ``config_source`` will be less frequently\n modified, but that when op executions are especially fast or slow, or when there are\n different requirements around idempotence or retry, it may make sense to execute jobs\n with variations on these settings.\n\n To use the `celery_docker_executor`, set it as the `executor_def` when defining a job:\n\n .. code-block:: python\n\n from dagster import job\n from dagster_celery_docker.executor import celery_docker_executor\n\n @job(executor_def=celery_docker_executor)\n def celery_enabled_job():\n pass\n\n Then you can configure the executor as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n docker:\n image: 'my_repo.com/image_name:latest'\n registry:\n url: 'my_repo.com'\n username: 'my_user'\n password: {env: 'DOCKER_PASSWORD'}\n env_vars: ["DAGSTER_HOME"] # environment vars to pass from celery worker to docker\n container_kwargs: # keyword args to be passed to the container. example:\n volumes: ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']\n\n broker: 'pyamqp://guest@localhost//' # Optional[str]: The URL of the Celery broker\n backend: 'rpc://' # Optional[str]: The URL of the Celery results backend\n include: ['my_module'] # Optional[List[str]]: Modules every worker should import\n config_source: # Dict[str, Any]: Any additional parameters to pass to the\n #... # Celery workers. This dict will be passed as the `config_source`\n #... # argument of celery.Celery().\n\n Note that the YAML you provide here must align with the configuration with which the Celery\n workers on which you hope to run were started. If, for example, you point the executor at a\n different broker than the one your workers are listening to, the workers will never be able to\n pick up tasks for execution.\n\n In deployments where the celery_docker_job_executor is used all appropriate celery and dagster_celery\n commands must be invoked with the `-A dagster_celery_docker.app` argument.\n """\n exc_cfg = init_context.executor_config\n\n return CeleryDockerExecutor(\n broker=exc_cfg.get("broker"),\n backend=exc_cfg.get("backend"),\n config_source=exc_cfg.get("config_source"),\n include=exc_cfg.get("include"),\n retries=RetryMode.from_config(exc_cfg.get("retries")),\n docker_config=exc_cfg.get("docker"),\n )
\n\n\nclass CeleryDockerExecutor(Executor):\n def __init__(\n self,\n retries,\n docker_config,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n ):\n self._retries = check.inst_param(retries, "retries", RetryMode)\n self.broker = check.opt_str_param(broker, "broker", default=broker_url)\n self.backend = check.opt_str_param(backend, "backend", default=result_backend)\n self.include = check.opt_list_param(include, "include", of_type=str)\n self.config_source = dict_wrapper(\n dict(DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source"))\n )\n self.docker_config = check.dict_param(docker_config, "docker_config")\n\n @property\n def retries(self):\n return self._retries\n\n def execute(self, plan_context, execution_plan):\n return core_celery_execution_loop(\n plan_context, execution_plan, step_execution_fn=_submit_task_docker\n )\n\n def app_args(self):\n return {\n "broker": self.broker,\n "backend": self.backend,\n "include": self.include,\n "config_source": self.config_source,\n "retries": self.retries,\n }\n\n\ndef _submit_task_docker(app, plan_context, step, queue, priority, known_state):\n execute_step_args = ExecuteStepArgs(\n job_origin=plan_context.reconstructable_job.get_python_origin(),\n run_id=plan_context.dagster_run.run_id,\n step_keys_to_execute=[step.key],\n instance_ref=plan_context.instance.get_ref(),\n retry_mode=plan_context.executor.retries.for_inner_plan(),\n known_state=known_state,\n print_serialized_events=True,\n )\n\n task = create_docker_task(app)\n task_signature = task.si(\n execute_step_args_packed=pack_value(execute_step_args),\n docker_config=plan_context.executor.docker_config,\n )\n return task_signature.apply_async(\n priority=priority,\n queue=queue,\n routing_key=f"{queue}.execute_step_docker",\n )\n\n\ndef create_docker_task(celery_app, **task_kwargs):\n @celery_app.task(bind=True, name="execute_step_docker", **task_kwargs)\n def _execute_step_docker(\n self,\n execute_step_args_packed,\n docker_config,\n ):\n """Run step execution in a Docker container."""\n execute_step_args = unpack_value(\n check.dict_param(\n execute_step_args_packed,\n "execute_step_args_packed",\n ),\n as_type=ExecuteStepArgs,\n )\n\n check.dict_param(docker_config, "docker_config")\n\n instance = DagsterInstance.from_ref(execute_step_args.instance_ref)\n dagster_run = instance.get_run_by_id(execute_step_args.run_id)\n check.inst(\n dagster_run,\n DagsterRun,\n f"Could not load run {execute_step_args.run_id}",\n )\n step_keys_str = ", ".join(execute_step_args.step_keys_to_execute)\n\n docker_image = (\n docker_config["image"]\n if docker_config.get("image")\n else dagster_run.job_code_origin.repository_origin.container_image\n )\n\n if not docker_image:\n raise Exception("No docker image specified by either the job or the repository")\n\n client = docker.client.from_env()\n\n if docker_config.get("registry"):\n client.login(\n registry=docker_config["registry"]["url"],\n username=docker_config["registry"]["username"],\n password=docker_config["registry"]["password"],\n )\n\n # Post event for starting execution\n engine_event = instance.report_engine_event(\n f"Executing steps {step_keys_str} in Docker container {docker_image}",\n dagster_run,\n EngineEventData(\n {\n "Step keys": step_keys_str,\n "Image": docker_image,\n "Celery worker": self.request.hostname,\n },\n marker_end=DELEGATE_MARKER,\n ),\n CeleryDockerExecutor,\n step_key=execute_step_args.step_keys_to_execute[0],\n )\n\n serialized_events = [serialize_value(engine_event)]\n\n docker_env = {}\n if docker_config.get("env_vars"):\n docker_env = {env_name: os.getenv(env_name) for env_name in docker_config["env_vars"]}\n\n container_kwargs = check.opt_dict_param(\n docker_config.get("container_kwargs"), "container_kwargs", key_type=str\n )\n\n # set defaults for detach and auto_remove\n container_kwargs["detach"] = container_kwargs.get("detach", False)\n container_kwargs["auto_remove"] = container_kwargs.get("auto_remove", True)\n\n # if environment variables are provided via container_kwargs, merge with env_vars\n if container_kwargs.get("environment") is not None:\n e_vars = container_kwargs.get("environment")\n if isinstance(e_vars, dict):\n docker_env.update(e_vars)\n else:\n for v in e_vars:\n key, val = v.split("=")\n docker_env[key] = val\n del container_kwargs["environment"]\n\n try:\n docker_response = client.containers.run(\n docker_image,\n command=execute_step_args.get_command_args(),\n # pass through this worker's environment for things like AWS creds etc.\n environment=docker_env,\n network=docker_config.get("network", None),\n **container_kwargs,\n )\n\n res = docker_response.decode("utf-8")\n except docker.errors.ContainerError as err:\n metadata = {"Job image": docker_image}\n if err.stderr is not None:\n metadata["Docker stderr"] = err.stderr\n\n instance.report_engine_event(\n f"Failed to run steps {step_keys_str} in Docker container {docker_image}",\n dagster_run,\n EngineEventData(metadata),\n CeleryDockerExecutor,\n step_key=execute_step_args.step_keys_to_execute[0],\n )\n raise\n else:\n if res is None:\n raise Exception("No response from execute_step in CeleryDockerExecutor")\n\n events = filter_dagster_events_from_cli_logs(res.split("\\n"))\n serialized_events += [serialize_value(event) for event in events]\n\n return serialized_events\n\n return _execute_step_docker\n
", "current_page_name": "_modules/dagster_celery_docker/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery_docker.executor"}}, "dagster_celery_k8s": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery_k8s.executor

\nimport logging\nimport os\nimport sys\nimport time\n\nimport kubernetes\nfrom dagster import (\n    DagsterEvent,\n    DagsterEventType,\n    DagsterInstance,\n    Executor,\n    _check as check,\n    executor,\n    multiple_process_executor_requirements,\n)\nfrom dagster._cli.api import ExecuteStepArgs\nfrom dagster._core.errors import DagsterUnmetExecutorRequirementsError\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.events.utils import filter_dagster_events_from_cli_logs\nfrom dagster._core.execution.plan.objects import StepFailureData, UserFailureData\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._serdes import pack_value, serialize_value, unpack_value\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster_celery.config import DEFAULT_CONFIG, dict_wrapper\nfrom dagster_celery.core_execution_loop import DELEGATE_MARKER\nfrom dagster_celery.defaults import broker_url, result_backend\nfrom dagster_k8s import DagsterK8sJobConfig, construct_dagster_k8s_job\nfrom dagster_k8s.client import (\n    DagsterK8sAPIRetryLimitExceeded,\n    DagsterK8sError,\n    DagsterK8sJobStatusException,\n    DagsterK8sTimeoutError,\n    DagsterK8sUnrecoverableAPIError,\n    DagsterKubernetesClient,\n)\nfrom dagster_k8s.job import (\n    UserDefinedDagsterK8sConfig,\n    get_k8s_job_name,\n    get_user_defined_k8s_config,\n)\n\nfrom .config import CELERY_K8S_CONFIG_KEY, celery_k8s_executor_config\nfrom .launcher import CeleryK8sRunLauncher\n\n\n
[docs]@executor(\n name=CELERY_K8S_CONFIG_KEY,\n config_schema=celery_k8s_executor_config(),\n requirements=multiple_process_executor_requirements(),\n)\ndef celery_k8s_job_executor(init_context):\n """Celery-based executor which launches tasks as Kubernetes Jobs.\n\n The Celery executor exposes config settings for the underlying Celery app under\n the ``config_source`` key. This config corresponds to the "new lowercase settings" introduced\n in Celery version 4.0 and the object constructed from config will be passed to the\n :py:class:`celery.Celery` constructor as its ``config_source`` argument.\n (See https://docs.celeryq.dev/en/stable/userguide/configuration.html for details.)\n\n The executor also exposes the ``broker``, `backend`, and ``include`` arguments to the\n :py:class:`celery.Celery` constructor.\n\n In the most common case, you may want to modify the ``broker`` and ``backend`` (e.g., to use\n Redis instead of RabbitMQ). We expect that ``config_source`` will be less frequently\n modified, but that when op executions are especially fast or slow, or when there are\n different requirements around idempotence or retry, it may make sense to execute dagster jobs\n with variations on these settings.\n\n To use the `celery_k8s_job_executor`, set it as the `executor_def` when defining a job:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-celery-k8s/dagster_celery_k8s_tests/example_celery_mode_def.py\n :language: python\n\n Then you can configure the executor as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n job_image: 'my_repo.com/image_name:latest'\n job_namespace: 'some-namespace'\n broker: 'pyamqp://guest@localhost//' # Optional[str]: The URL of the Celery broker\n backend: 'rpc://' # Optional[str]: The URL of the Celery results backend\n include: ['my_module'] # Optional[List[str]]: Modules every worker should import\n config_source: # Dict[str, Any]: Any additional parameters to pass to the\n #... # Celery workers. This dict will be passed as the `config_source`\n #... # argument of celery.Celery().\n\n Note that the YAML you provide here must align with the configuration with which the Celery\n workers on which you hope to run were started. If, for example, you point the executor at a\n different broker than the one your workers are listening to, the workers will never be able to\n pick up tasks for execution.\n\n In deployments where the celery_k8s_job_executor is used all appropriate celery and dagster_celery\n commands must be invoked with the `-A dagster_celery_k8s.app` argument.\n """\n run_launcher = init_context.instance.run_launcher\n exc_cfg = init_context.executor_config\n\n if not isinstance(run_launcher, CeleryK8sRunLauncher):\n raise DagsterUnmetExecutorRequirementsError(\n "This engine is only compatible with a CeleryK8sRunLauncher; configure the "\n "CeleryK8sRunLauncher on your instance to use it.",\n )\n\n job_config = run_launcher.get_k8s_job_config(\n job_image=exc_cfg.get("job_image") or os.getenv("DAGSTER_CURRENT_IMAGE"), exc_config=exc_cfg\n )\n\n # Set on the instance but overrideable here\n broker = run_launcher.broker or exc_cfg.get("broker")\n backend = run_launcher.backend or exc_cfg.get("backend")\n config_source = run_launcher.config_source or exc_cfg.get("config_source")\n include = run_launcher.include or exc_cfg.get("include")\n retries = run_launcher.retries or RetryMode.from_config(exc_cfg.get("retries"))\n\n return CeleryK8sJobExecutor(\n broker=broker,\n backend=backend,\n config_source=config_source,\n include=include,\n retries=retries,\n job_config=job_config,\n job_namespace=exc_cfg.get("job_namespace", run_launcher.job_namespace),\n load_incluster_config=exc_cfg.get("load_incluster_config"),\n kubeconfig_file=exc_cfg.get("kubeconfig_file"),\n repo_location_name=exc_cfg.get("repo_location_name"),\n job_wait_timeout=exc_cfg.get("job_wait_timeout"),\n )
\n\n\nclass CeleryK8sJobExecutor(Executor):\n def __init__(\n self,\n retries,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n job_config=None,\n job_namespace=None,\n load_incluster_config=False,\n kubeconfig_file=None,\n repo_location_name=None,\n job_wait_timeout=None,\n ):\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n\n self._retries = check.inst_param(retries, "retries", RetryMode)\n self.broker = check.opt_str_param(broker, "broker", default=broker_url)\n self.backend = check.opt_str_param(backend, "backend", default=result_backend)\n self.include = check.opt_list_param(include, "include", of_type=str)\n self.config_source = dict_wrapper(\n dict(DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source"))\n )\n self.job_config = check.inst_param(job_config, "job_config", DagsterK8sJobConfig)\n self.job_namespace = check.opt_str_param(job_namespace, "job_namespace")\n\n self.load_incluster_config = check.bool_param(\n load_incluster_config, "load_incluster_config"\n )\n\n self.kubeconfig_file = check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n self.repo_location_name = check.opt_str_param(repo_location_name, "repo_location_name")\n self.job_wait_timeout = check.float_param(job_wait_timeout, "job_wait_timeout")\n\n @property\n def retries(self):\n return self._retries\n\n def execute(self, plan_context, execution_plan):\n from dagster_celery.core_execution_loop import core_celery_execution_loop\n\n return core_celery_execution_loop(\n plan_context, execution_plan, step_execution_fn=_submit_task_k8s_job\n )\n\n def app_args(self):\n return {\n "broker": self.broker,\n "backend": self.backend,\n "include": self.include,\n "config_source": self.config_source,\n "retries": self.retries,\n }\n\n\ndef _submit_task_k8s_job(app, plan_context, step, queue, priority, known_state):\n user_defined_k8s_config = get_user_defined_k8s_config(step.tags)\n\n job_origin = plan_context.reconstructable_job.get_python_origin()\n\n execute_step_args = ExecuteStepArgs(\n job_origin=job_origin,\n run_id=plan_context.dagster_run.run_id,\n step_keys_to_execute=[step.key],\n instance_ref=plan_context.instance.get_ref(),\n retry_mode=plan_context.executor.retries.for_inner_plan(),\n known_state=known_state,\n should_verify_step=True,\n print_serialized_events=True,\n )\n\n job_config = plan_context.executor.job_config\n if not job_config.job_image:\n job_config = job_config.with_image(job_origin.repository_origin.container_image)\n\n if not job_config.job_image:\n raise Exception("No image included in either executor config or the dagster job")\n\n task = create_k8s_job_task(app)\n task_signature = task.si(\n execute_step_args_packed=pack_value(execute_step_args),\n job_config_dict=job_config.to_dict(),\n job_namespace=plan_context.executor.job_namespace,\n user_defined_k8s_config_dict=user_defined_k8s_config.to_dict(),\n load_incluster_config=plan_context.executor.load_incluster_config,\n job_wait_timeout=plan_context.executor.job_wait_timeout,\n kubeconfig_file=plan_context.executor.kubeconfig_file,\n )\n\n return task_signature.apply_async(\n priority=priority,\n queue=queue,\n routing_key=f"{queue}.execute_step_k8s_job",\n )\n\n\ndef construct_step_failure_event_and_handle(dagster_run, step_key, err, instance):\n step_failure_event = DagsterEvent(\n event_type_value=DagsterEventType.STEP_FAILURE.value,\n job_name=dagster_run.job_name,\n step_key=step_key,\n event_specific_data=StepFailureData(\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n user_failure_data=UserFailureData(label="K8sError"),\n ),\n )\n event_record = EventLogEntry(\n user_message=str(err),\n level=logging.ERROR,\n job_name=dagster_run.job_name,\n run_id=dagster_run.run_id,\n error_info=None,\n step_key=step_key,\n timestamp=time.time(),\n dagster_event=step_failure_event,\n )\n instance.handle_new_event(event_record)\n return step_failure_event\n\n\ndef create_k8s_job_task(celery_app, **task_kwargs):\n @celery_app.task(bind=True, name="execute_step_k8s_job", **task_kwargs)\n def _execute_step_k8s_job(\n self,\n execute_step_args_packed,\n job_config_dict,\n job_namespace,\n load_incluster_config,\n job_wait_timeout,\n user_defined_k8s_config_dict=None,\n kubeconfig_file=None,\n ):\n """Run step execution in a K8s job pod."""\n execute_step_args = unpack_value(\n check.dict_param(\n execute_step_args_packed,\n "execute_step_args_packed",\n )\n )\n check.inst_param(execute_step_args, "execute_step_args", ExecuteStepArgs)\n check.invariant(\n len(execute_step_args.step_keys_to_execute) == 1,\n "Celery K8s task executor can only execute 1 step at a time",\n )\n\n # Celery will serialize this as a list\n job_config = DagsterK8sJobConfig.from_dict(job_config_dict)\n check.inst_param(job_config, "job_config", DagsterK8sJobConfig)\n check.str_param(job_namespace, "job_namespace")\n\n check.bool_param(load_incluster_config, "load_incluster_config")\n\n user_defined_k8s_config = UserDefinedDagsterK8sConfig.from_dict(\n user_defined_k8s_config_dict\n )\n check.opt_inst_param(\n user_defined_k8s_config,\n "user_defined_k8s_config",\n UserDefinedDagsterK8sConfig,\n )\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n\n # For when launched via DinD or running the cluster\n if load_incluster_config:\n kubernetes.config.load_incluster_config()\n else:\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n api_client = DagsterKubernetesClient.production_client()\n instance = DagsterInstance.from_ref(execute_step_args.instance_ref)\n dagster_run = instance.get_run_by_id(execute_step_args.run_id)\n\n check.inst(\n dagster_run,\n DagsterRun,\n f"Could not load run {execute_step_args.run_id}",\n )\n step_key = execute_step_args.step_keys_to_execute[0]\n\n celery_worker_name = self.request.hostname\n celery_pod_name = os.environ.get("HOSTNAME")\n instance.report_engine_event(\n f"Task for step {step_key} picked up by Celery",\n dagster_run,\n EngineEventData(\n {\n "Celery worker name": celery_worker_name,\n "Celery worker Kubernetes Pod name": celery_pod_name,\n }\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n\n if dagster_run.status != DagsterRunStatus.STARTED:\n instance.report_engine_event(\n "Not scheduling step because dagster run status is not STARTED",\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n }\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n # Ensure we stay below k8s name length limits\n k8s_name_key = get_k8s_job_name(execute_step_args.run_id, step_key)\n\n retry_state = execute_step_args.known_state.get_retry_state()\n\n if retry_state.get_attempt_count(step_key):\n attempt_number = retry_state.get_attempt_count(step_key)\n job_name = "dagster-step-%s-%d" % (k8s_name_key, attempt_number)\n pod_name = "dagster-step-%s-%d" % (k8s_name_key, attempt_number)\n else:\n job_name = "dagster-step-%s" % (k8s_name_key)\n pod_name = "dagster-step-%s" % (k8s_name_key)\n\n args = execute_step_args.get_command_args()\n\n labels = {\n "dagster/job": dagster_run.job_name,\n "dagster/op": step_key,\n "dagster/run-id": execute_step_args.run_id,\n }\n if dagster_run.external_job_origin:\n labels["dagster/code-location"] = (\n dagster_run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n job = construct_dagster_k8s_job(\n job_config,\n args,\n job_name,\n user_defined_k8s_config,\n pod_name,\n component="step_worker",\n labels=labels,\n env_vars=[\n {\n "name": "DAGSTER_RUN_JOB_NAME",\n "value": dagster_run.job_name,\n },\n {"name": "DAGSTER_RUN_STEP_KEY", "value": step_key},\n ],\n )\n\n # Running list of events generated from this task execution\n events = []\n\n # Post event for starting execution\n job_name = job.metadata.name\n engine_event = instance.report_engine_event(\n f'Executing step "{step_key}" in Kubernetes job {job_name}.',\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n "Kubernetes Job name": job_name,\n "Job image": job_config.job_image,\n "Image pull policy": job_config.image_pull_policy,\n "Image pull secrets": str(job_config.image_pull_secrets),\n "Service account name": str(job_config.service_account_name),\n },\n marker_end=DELEGATE_MARKER,\n ),\n CeleryK8sJobExecutor,\n # validated above that step_keys is length 1, and it is not possible to use ETH or\n # execution plan in this function (Celery K8s workers should not access to user code)\n step_key=step_key,\n )\n events.append(engine_event)\n try:\n api_client.batch_api.create_namespaced_job(body=job, namespace=job_namespace)\n except kubernetes.client.rest.ApiException as e:\n if e.reason == "Conflict":\n # There is an existing job with the same name so proceed and see if the existing job succeeded\n instance.report_engine_event(\n "Did not create Kubernetes job {} for step {} since job name already "\n "exists, proceeding with existing job.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n "Kubernetes Job name": job_name,\n },\n marker_end=DELEGATE_MARKER,\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n else:\n instance.report_engine_event(\n "Encountered unexpected error while creating Kubernetes job {} for step {}, "\n "exiting.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n try:\n api_client.wait_for_job_success(\n job_name=job_name,\n namespace=job_namespace,\n instance=instance,\n run_id=execute_step_args.run_id,\n wait_timeout=job_wait_timeout,\n )\n except (DagsterK8sError, DagsterK8sTimeoutError) as err:\n step_failure_event = construct_step_failure_event_and_handle(\n dagster_run, step_key, err, instance=instance\n )\n events.append(step_failure_event)\n except DagsterK8sJobStatusException:\n instance.report_engine_event(\n "Terminating Kubernetes Job because dagster run status is not STARTED",\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n "Kubernetes Job name": job_name,\n "Kubernetes Job namespace": job_namespace,\n }\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n api_client.delete_job(job_name=job_name, namespace=job_namespace)\n return []\n except (\n DagsterK8sUnrecoverableAPIError,\n DagsterK8sAPIRetryLimitExceeded,\n # We shouldn't see unwrapped APIExceptions anymore, as they should all be wrapped in\n # a retry boundary. We still catch it here just in case we missed one so that we can\n # report it to the event log\n kubernetes.client.rest.ApiException,\n ):\n instance.report_engine_event(\n "Encountered unexpected error while waiting on Kubernetes job {} for step {}, "\n "exiting.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n try:\n pod_names = api_client.get_pod_names_in_job(job_name, namespace=job_namespace)\n except kubernetes.client.rest.ApiException:\n instance.report_engine_event(\n "Encountered unexpected error retreiving Pods for Kubernetes job {} for step {}, "\n "exiting.".format(job_name, step_key),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n return []\n\n # Post engine event for log retrieval\n engine_event = instance.report_engine_event(\n "Retrieving logs from Kubernetes Job pods",\n dagster_run,\n EngineEventData({"Pod names": "\\n".join(pod_names)}),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n events.append(engine_event)\n\n logs = []\n for pod_name in pod_names:\n try:\n raw_logs = api_client.retrieve_pod_logs(pod_name, namespace=job_namespace)\n logs += raw_logs.split("\\n")\n except kubernetes.client.exceptions.ApiException:\n instance.report_engine_event(\n "Encountered unexpected error while fetching pod logs for Kubernetes job {}, "\n "Pod name {} for step {}. Will attempt to continue with other pods.".format(\n job_name, pod_name, step_key\n ),\n dagster_run,\n EngineEventData(\n {\n "Step key": step_key,\n },\n error=serializable_error_info_from_exc_info(sys.exc_info()),\n ),\n CeleryK8sJobExecutor,\n step_key=step_key,\n )\n\n events += filter_dagster_events_from_cli_logs(logs)\n serialized_events = [serialize_value(event) for event in events]\n return serialized_events\n\n return _execute_step_k8s_job\n
", "current_page_name": "_modules/dagster_celery_k8s/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery_k8s.executor"}, "launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_celery_k8s.launcher

\nimport sys\nfrom typing import Optional, cast\n\nimport kubernetes\nfrom dagster import (\n    DagsterInvariantViolationError,\n    _check as check,\n)\nfrom dagster._config import process_config, resolve_to_config_type\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.launcher import LaunchRunContext, RunLauncher\nfrom dagster._core.launcher.base import CheckRunHealthResult, WorkerStatus\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import DOCKER_IMAGE_TAG\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom dagster._utils.merger import merge_dicts\nfrom dagster_k8s.client import DagsterKubernetesClient\nfrom dagster_k8s.job import (\n    DagsterK8sJobConfig,\n    construct_dagster_k8s_job,\n    get_job_name_from_run_id,\n    get_user_defined_k8s_config,\n)\n\nfrom .config import CELERY_K8S_CONFIG_KEY, celery_k8s_executor_config\n\n\n
[docs]class CeleryK8sRunLauncher(RunLauncher, ConfigurableClass):\n """In contrast to the :py:class:`K8sRunLauncher`, which launches dagster runs as single K8s\n Jobs, this run launcher is intended for use in concert with\n :py:func:`dagster_celery_k8s.celery_k8s_job_executor`.\n\n With this run launcher, execution is delegated to:\n\n 1. A run worker Kubernetes Job, which traverses the dagster run execution plan and\n submits steps to Celery queues for execution;\n 2. The step executions which are submitted to Celery queues are picked up by Celery workers,\n and each step execution spawns a step execution Kubernetes Job. See the implementation\n defined in :py:func:`dagster_celery_k8.executor.create_k8s_job_task`.\n\n You can configure a Dagster instance to use this RunLauncher by adding a section to your\n ``dagster.yaml`` like the following:\n\n .. code-block:: yaml\n\n run_launcher:\n module: dagster_k8s.launcher\n class: CeleryK8sRunLauncher\n config:\n instance_config_map: "dagster-k8s-instance-config-map"\n dagster_home: "/some/path"\n postgres_password_secret: "dagster-k8s-pg-password"\n broker: "some_celery_broker_url"\n backend: "some_celery_backend_url"\n\n """\n\n def __init__(\n self,\n instance_config_map,\n dagster_home,\n postgres_password_secret,\n load_incluster_config=True,\n kubeconfig_file=None,\n broker=None,\n backend=None,\n include=None,\n config_source=None,\n retries=None,\n inst_data: Optional[ConfigurableClassData] = None,\n k8s_client_batch_api=None,\n env_config_maps=None,\n env_secrets=None,\n volume_mounts=None,\n volumes=None,\n service_account_name=None,\n image_pull_policy=None,\n image_pull_secrets=None,\n labels=None,\n fail_pod_on_run_failure=None,\n job_namespace=None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n kubernetes.config.load_incluster_config()\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n self._api_client = DagsterKubernetesClient.production_client(\n batch_api_override=k8s_client_batch_api\n )\n\n self.instance_config_map = check.str_param(instance_config_map, "instance_config_map")\n self.dagster_home = check.str_param(dagster_home, "dagster_home")\n self.postgres_password_secret = check.str_param(\n postgres_password_secret, "postgres_password_secret"\n )\n self.broker = check.opt_str_param(broker, "broker")\n self.backend = check.opt_str_param(backend, "backend")\n self.include = check.opt_list_param(include, "include")\n self.config_source = check.opt_dict_param(config_source, "config_source")\n\n retries = check.opt_dict_param(retries, "retries") or {"enabled": {}}\n self.retries = RetryMode.from_config(retries)\n\n self._env_config_maps = check.opt_list_param(\n env_config_maps, "env_config_maps", of_type=str\n )\n self._env_secrets = check.opt_list_param(env_secrets, "env_secrets", of_type=str)\n\n self._volume_mounts = check.opt_list_param(volume_mounts, "volume_mounts")\n self._volumes = check.opt_list_param(volumes, "volumes")\n\n self._service_account_name = check.opt_str_param(\n service_account_name, "service_account_name"\n )\n self._image_pull_policy = check.opt_str_param(\n image_pull_policy, "image_pull_policy", "IfNotPresent"\n )\n self._image_pull_secrets = check.opt_list_param(\n image_pull_secrets, "image_pull_secrets", of_type=dict\n )\n self._labels = check.opt_dict_param(labels, "labels", key_type=str, value_type=str)\n self._fail_pod_on_run_failure = check.opt_bool_param(\n fail_pod_on_run_failure, "fail_pod_on_run_failure"\n )\n self.job_namespace = check.opt_str_param(job_namespace, "job_namespace", default="default")\n\n super().__init__()\n\n @classmethod\n def config_type(cls):\n from dagster_celery.executor import CELERY_CONFIG\n\n return merge_dicts(DagsterK8sJobConfig.config_type_run_launcher(), CELERY_CONFIG)\n\n @classmethod\n def from_config_value(cls, inst_data, config_value):\n return cls(inst_data=inst_data, **config_value)\n\n @property\n def inst_data(self):\n return self._inst_data\n\n def launch_run(self, context: LaunchRunContext) -> None:\n run = context.dagster_run\n\n job_name = get_job_name_from_run_id(run.run_id)\n pod_name = job_name\n exc_config = _get_validated_celery_k8s_executor_config(run.run_config)\n\n job_image_from_executor_config = exc_config.get("job_image")\n\n job_origin = cast(JobPythonOrigin, context.job_code_origin)\n repository_origin = job_origin.repository_origin\n\n job_image = repository_origin.container_image\n\n if job_image:\n if job_image_from_executor_config:\n job_image = job_image_from_executor_config\n self._instance.report_engine_event(\n f"You have specified a job_image {job_image_from_executor_config} in your"\n f" executor configuration, but also {job_image} in your user-code"\n f" deployment. Using the job image {job_image_from_executor_config} from"\n " executor configuration as it takes precedence.",\n run,\n cls=self.__class__,\n )\n else:\n if not job_image_from_executor_config:\n raise DagsterInvariantViolationError(\n "You have not specified a job_image in your executor configuration. To resolve"\n " this error, specify the job_image configuration in the executor config"\n " section in your run config. \\nNote: You may also be seeing this error because"\n " you are using the configured API. Using configured with the celery-k8s"\n " executor is not supported at this time, and the job_image must be configured"\n " at the top-level executor config without using configured."\n )\n\n job_image = job_image_from_executor_config\n\n job_config = self.get_k8s_job_config(job_image, exc_config)\n\n self._instance.add_run_tags(\n run.run_id,\n {DOCKER_IMAGE_TAG: job_config.job_image},\n )\n\n user_defined_k8s_config = get_user_defined_k8s_config(run.tags)\n\n from dagster._cli.api import ExecuteRunArgs\n\n run_args = ExecuteRunArgs(\n job_origin=job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n set_exit_code_on_failure=self._fail_pod_on_run_failure,\n ).get_command_args()\n\n labels = {\n "dagster/job": job_origin.job_name,\n "dagster/run-id": run.run_id,\n }\n if run.external_job_origin:\n labels["dagster/code-location"] = (\n run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n job = construct_dagster_k8s_job(\n job_config,\n args=run_args,\n job_name=job_name,\n pod_name=pod_name,\n component="run_worker",\n user_defined_k8s_config=user_defined_k8s_config,\n labels=labels,\n env_vars=[{"name": "DAGSTER_RUN_JOB_NAME", "value": job_origin.job_name}],\n )\n\n job_namespace = exc_config.get("job_namespace", self.job_namespace)\n\n self._instance.report_engine_event(\n "Creating Kubernetes run worker job",\n run,\n EngineEventData(\n {\n "Kubernetes Job name": job_name,\n "Kubernetes Namespace": job_namespace,\n "Run ID": run.run_id,\n }\n ),\n cls=self.__class__,\n )\n\n self._api_client.batch_api.create_namespaced_job(body=job, namespace=job_namespace)\n self._instance.report_engine_event(\n "Kubernetes run worker job created",\n run,\n EngineEventData(\n {\n "Kubernetes Job name": job_name,\n "Kubernetes Namespace": job_namespace,\n "Run ID": run.run_id,\n }\n ),\n cls=self.__class__,\n )\n\n def get_k8s_job_config(self, job_image, exc_config):\n return DagsterK8sJobConfig(\n dagster_home=self.dagster_home,\n instance_config_map=self.instance_config_map,\n postgres_password_secret=self.postgres_password_secret,\n job_image=check.opt_str_param(job_image, "job_image"),\n image_pull_policy=exc_config.get("image_pull_policy", self._image_pull_policy),\n image_pull_secrets=exc_config.get("image_pull_secrets", []) + self._image_pull_secrets,\n service_account_name=exc_config.get("service_account_name", self._service_account_name),\n env_config_maps=exc_config.get("env_config_maps", []) + self._env_config_maps,\n env_secrets=exc_config.get("env_secrets", []) + self._env_secrets,\n volume_mounts=exc_config.get("volume_mounts", []) + self._volume_mounts,\n volumes=exc_config.get("volumes", []) + self._volumes,\n labels=merge_dicts(self._labels, exc_config.get("labels", {})),\n )\n\n def terminate(self, run_id):\n check.str_param(run_id, "run_id")\n\n run = self._instance.get_run_by_id(run_id)\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n job_name = get_job_name_from_run_id(run_id)\n\n job_namespace = self.get_namespace_from_run_config(run_id)\n\n try:\n termination_result = self._api_client.delete_job(\n job_name=job_name, namespace=job_namespace\n )\n if termination_result:\n self._instance.report_engine_event(\n message="Dagster Job was terminated successfully.",\n dagster_run=run,\n cls=self.__class__,\n )\n else:\n self._instance.report_engine_event(\n message=(\n "Dagster Job was not terminated successfully; delete_job returned {}"\n .format(termination_result)\n ),\n dagster_run=run,\n cls=self.__class__,\n )\n return termination_result\n except Exception:\n self._instance.report_engine_event(\n message=(\n "Dagster Job was not terminated successfully; encountered error in delete_job"\n ),\n dagster_run=run,\n engine_event_data=EngineEventData.engine_error(\n serializable_error_info_from_exc_info(sys.exc_info())\n ),\n cls=self.__class__,\n )\n\n def get_namespace_from_run_config(self, run_id):\n check.str_param(run_id, "run_id")\n\n dagster_run = self._instance.get_run_by_id(run_id)\n run_config = dagster_run.run_config\n executor_config = _get_validated_celery_k8s_executor_config(run_config)\n return executor_config.get("job_namespace", self.job_namespace)\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n def check_run_worker_health(self, run: DagsterRun):\n job_namespace = _get_validated_celery_k8s_executor_config(run.run_config).get(\n "job_namespace", self.job_namespace\n )\n job_name = get_job_name_from_run_id(run.run_id)\n try:\n status = self._api_client.get_job_status(namespace=job_namespace, job_name=job_name)\n except Exception:\n return CheckRunHealthResult(\n WorkerStatus.UNKNOWN, str(serializable_error_info_from_exc_info(sys.exc_info()))\n )\n if status.failed:\n return CheckRunHealthResult(WorkerStatus.FAILED, "K8s job failed")\n return CheckRunHealthResult(WorkerStatus.RUNNING)
\n\n\ndef _get_validated_celery_k8s_executor_config(run_config):\n check.dict_param(run_config, "run_config")\n\n executor_config = run_config.get("execution", {})\n execution_config_schema = resolve_to_config_type(celery_k8s_executor_config())\n\n # In run config on jobs, we don't have an executor key\n if CELERY_K8S_CONFIG_KEY not in executor_config:\n execution_run_config = executor_config.get("config", {})\n else:\n execution_run_config = (run_config["execution"][CELERY_K8S_CONFIG_KEY] or {}).get(\n "config", {}\n )\n\n res = process_config(execution_config_schema, execution_run_config)\n\n check.invariant(\n res.success,\n "Incorrect execution schema provided. Note: You may also be seeing this error "\n "because you are using the configured API. "\n "Using configured with the {config_key} executor is not supported at this time, "\n "and all executor config must be directly in the run config without using configured."\n .format(\n config_key=CELERY_K8S_CONFIG_KEY,\n ),\n )\n\n return res.value\n
", "current_page_name": "_modules/dagster_celery_k8s/launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_celery_k8s.launcher"}}, "dagster_census": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_census.ops

\nfrom dagster import Array, Bool, Field, In, Noneable, Nothing, Out, Output, op\n\nfrom .resources import DEFAULT_POLL_INTERVAL\nfrom .types import CensusOutput\nfrom .utils import generate_materialization\n\n\n
[docs]@op(\n required_resource_keys={"census"},\n ins={"start_after": In(Nothing)},\n out=Out(\n CensusOutput,\n description=(\n "Parsed json dictionary representing the details of the Census sync after "\n "the sync successfully completes."\n ),\n ),\n config_schema={\n "sync_id": Field(\n int,\n is_required=True,\n description="Id of the parent sync.",\n ),\n "force_full_sync": Field(\n config=Bool,\n default_value=False,\n description=(\n "If this trigger request should be a Full Sync. "\n "Note that some sync configurations such as Append do not support full syncs."\n ),\n ),\n "poll_interval": Field(\n float,\n default_value=DEFAULT_POLL_INTERVAL,\n description="The time (in seconds) to wait between successive polls.",\n ),\n "poll_timeout": Field(\n Noneable(float),\n default_value=None,\n description=(\n "The maximum time to wait before this operation is timed out. By "\n "default, this will never time out."\n ),\n ),\n "yield_materializations": Field(\n config=Bool,\n default_value=True,\n description=(\n "If True, materializations corresponding to the results of the Census sync will "\n "be yielded when the op executes."\n ),\n ),\n "asset_key_prefix": Field(\n config=Array(str),\n default_value=["census"],\n description=(\n "If provided and yield_materializations is True, these components will be used to "\n "prefix the generated asset keys."\n ),\n ),\n },\n tags={"kind": "census"},\n)\ndef census_trigger_sync_op(context):\n """Executes a Census sync for a given ``sync_id`` and polls until that sync completes, raising\n an error if it is unsuccessful.\n\n It outputs a :py:class:`~dagster_census.CensusOutput` which contains the details of the Census\n sync after it successfully completes.\n\n It requires the use of the :py:class:`~dagster_census.census_resource`, which allows it to\n communicate with the Census API.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_census import census_resource, census_sync_op\n\n my_census_resource = census_resource.configured(\n {\n "api_key": {"env": "CENSUS_API_KEY"},\n }\n )\n\n sync_foobar = census_sync_op.configured({"sync_id": "foobar"}, name="sync_foobar")\n\n @job(resource_defs={"census": my_census_resource})\n def my_simple_census_job():\n sync_foobar()\n\n """\n census_output = context.resources.census.trigger_sync_and_poll(\n sync_id=context.op_config["sync_id"],\n force_full_sync=context.op_config["force_full_sync"],\n poll_interval=context.op_config["poll_interval"],\n poll_timeout=context.op_config["poll_timeout"],\n )\n if context.op_config["yield_materializations"]:\n yield generate_materialization(\n census_output, asset_key_prefix=context.op_config["asset_key_prefix"]\n )\n yield Output(census_output)
\n
", "current_page_name": "_modules/dagster_census/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_census.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_census.resources

\nimport datetime\nimport json\nimport logging\nimport time\nfrom typing import Any, Mapping, Optional\n\nimport requests\nfrom dagster import Failure, Field, StringSource, __version__, get_dagster_logger, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom requests.auth import HTTPBasicAuth\nfrom requests.exceptions import RequestException\n\nfrom .types import CensusOutput\n\nCENSUS_API_BASE = "app.getcensus.com/api"\nCENSUS_VERSION = "v1"\n\nDEFAULT_POLL_INTERVAL = 10\n\nSYNC_RUN_STATUSES = {"completed", "failed", "queued", "skipped", "working"}\n\n\n
[docs]class CensusResource:\n """This class exposes methods on top of the Census REST API."""\n\n def __init__(\n self,\n api_key: str,\n request_max_retries: int = 3,\n request_retry_delay: float = 0.25,\n log: logging.Logger = get_dagster_logger(),\n ):\n self.api_key = api_key\n\n self._request_max_retries = request_max_retries\n self._request_retry_delay = request_retry_delay\n\n self._log = log\n\n @property\n def _api_key(self):\n if self.api_key.startswith("secret-token:"):\n return self.api_key\n return "secret-token:" + self.api_key\n\n @property\n def api_base_url(self) -> str:\n return f"https://{CENSUS_API_BASE}/{CENSUS_VERSION}"\n\n def make_request(\n self, method: str, endpoint: str, data: Optional[str] = None\n ) -> Mapping[str, Any]:\n """Creates and sends a request to the desired Census API endpoint.\n\n Args:\n method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").\n endpoint (str): The Census API endpoint to send this request to.\n data (Optional[str]): JSON-formatted data string to be included in the request.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n url = f"{self.api_base_url}/{endpoint}"\n headers = {\n "User-Agent": f"dagster-census/{__version__}",\n "Content-Type": "application/json;version=2",\n }\n\n num_retries = 0\n while True:\n try:\n response = requests.request(\n method=method,\n url=url,\n headers=headers,\n auth=HTTPBasicAuth("bearer", self._api_key),\n data=data,\n )\n response.raise_for_status()\n return response.json()\n except RequestException as e:\n self._log.error("Request to Census API failed: %s", e)\n if num_retries == self._request_max_retries:\n break\n num_retries += 1\n time.sleep(self._request_retry_delay)\n\n raise Failure(f"Max retries ({self._request_max_retries}) exceeded with url: {url}.")\n\n def get_sync(self, sync_id: int) -> Mapping[str, Any]:\n """Gets details about a given sync from the Census API.\n\n Args:\n sync_id (int): The Census Sync ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"syncs/{sync_id}")\n\n def get_source(self, source_id: int) -> Mapping[str, Any]:\n """Gets details about a given source from the Census API.\n\n Args:\n source_id (int): The Census Source ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"sources/{source_id}")\n\n def get_destination(self, destination_id: int) -> Mapping[str, Any]:\n """Gets details about a given destination from the Census API.\n\n Args:\n destination_id (int): The Census Destination ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"destinations/{destination_id}")\n\n def get_sync_run(self, sync_run_id: int) -> Mapping[str, Any]:\n """Gets details about a specific sync run from the Census API.\n\n Args:\n sync_run_id (int): The Census Sync Run ID.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n return self.make_request(method="GET", endpoint=f"sync_runs/{sync_run_id}")\n\n def poll_sync_run(\n self,\n sync_run_id: int,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> Mapping[str, Any]:\n """Given a Census sync run, poll until the run is complete.\n\n Args:\n sync_id (int): The Census Sync Run ID.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n log_url = f"https://app.getcensus.com/syncs_runs/{sync_run_id}"\n poll_start = datetime.datetime.now()\n\n while True:\n time.sleep(poll_interval)\n response_dict = self.get_sync_run(sync_run_id)\n if "data" not in response_dict.keys():\n raise ValueError(\n f"Getting status of sync failed, please visit Census Logs at {log_url} to see"\n " more."\n )\n\n sync_status = response_dict["data"]["status"]\n sync_id = response_dict["data"]["sync_id"]\n\n if sync_status not in SYNC_RUN_STATUSES:\n raise ValueError(\n f"Unexpected response status '{sync_status}'; "\n f"must be one of {','.join(sorted(SYNC_RUN_STATUSES))}. "\n "See Management API docs for more information: "\n "https://docs.getcensus.com/basics/developers/api/sync-runs"\n )\n\n if sync_status in {"queued", "working"}:\n self._log.debug(\n f"Sync {sync_id} still running after {datetime.datetime.now() - poll_start}."\n )\n continue\n\n if poll_timeout and datetime.datetime.now() > poll_start + datetime.timedelta(\n seconds=poll_timeout\n ):\n raise Failure(\n f"Sync for sync '{sync_id}' timed out after"\n f" {datetime.datetime.now() - poll_start}."\n )\n\n break\n\n self._log.debug(\n f"Sync {sync_id} has finished running after {datetime.datetime.now() - poll_start}."\n )\n self._log.info(f"View sync details here: {log_url}.")\n\n return response_dict\n\n def trigger_sync(self, sync_id: int, force_full_sync: bool = False) -> Mapping[str, Any]:\n """Trigger an asynchronous run for a specific sync.\n\n Args:\n sync_id (int): The Census Sync Run ID.\n force_full_sync (bool): If the Sync should perform a full sync\n\n Returns:\n Dict[str, Any]: JSON data from the response to this request\n """\n data = {"force_full_sync": force_full_sync}\n return self.make_request(\n method="POST", endpoint=f"syncs/{sync_id}/trigger", data=json.dumps(data)\n )\n\n def trigger_sync_and_poll(\n self,\n sync_id: int,\n force_full_sync: bool = False,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> CensusOutput:\n """Trigger a run for a specific sync and poll until it has completed.\n\n Args:\n sync_id (int): The Census Sync Run ID.\n force_full_sync (bool): If the Sync should perform a full sync\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~CensusOutput`:\n Object containing details about the sync run and the sync details\n """\n sync_details = self.get_sync(sync_id=sync_id)\n source_details = self.get_source(\n source_id=sync_details["data"]["source_attributes"]["connection_id"]\n )["data"]\n destination_details = self.get_destination(\n destination_id=sync_details["data"]["destination_attributes"]["connection_id"]\n )["data"]\n\n trigger_sync_resp = self.trigger_sync(sync_id=sync_id, force_full_sync=force_full_sync)\n sync_run_details = self.poll_sync_run(\n sync_run_id=trigger_sync_resp["data"]["sync_run_id"],\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )["data"]\n return CensusOutput(\n sync_run=sync_run_details,\n source=source_details,\n destination=destination_details,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema={\n "api_key": Field(\n StringSource,\n is_required=True,\n description="Census API Key.",\n ),\n "request_max_retries": Field(\n int,\n default_value=3,\n description=(\n "The maximum number of times requests to the Census API should be retried "\n "before failing."\n ),\n ),\n "request_retry_delay": Field(\n float,\n default_value=0.25,\n description="Time (in seconds) to wait between each request retry.",\n ),\n },\n description="This resource helps manage Census connectors",\n)\ndef census_resource(context) -> CensusResource:\n """This resource allows users to programatically interface with the Census REST API to launch\n syncs and monitor their progress. This currently implements only a subset of the functionality\n exposed by the API.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_census import census_resource\n\n my_census_resource = census_resource.configured(\n {\n "api_key": {"env": "CENSUS_API_KEY"},\n }\n )\n\n @job(resource_defs={"census":my_census_resource})\n def my_census_job():\n ...\n\n """\n return CensusResource(\n api_key=context.resource_config["api_key"],\n request_max_retries=context.resource_config["request_max_retries"],\n request_retry_delay=context.resource_config["request_retry_delay"],\n log=context.log,\n )
\n
", "current_page_name": "_modules/dagster_census/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_census.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_census.types

\nfrom typing import Any, Mapping, NamedTuple\n\n\n
[docs]class CensusOutput(\n NamedTuple(\n "_CensusOutput",\n [\n ("sync_run", Mapping[str, Any]),\n ("source", Mapping[str, Any]),\n ("destination", Mapping[str, Any]),\n ],\n )\n):\n """Contains recorded information about the state of a Census sync after a sync completes.\n\n Attributes:\n sync_run (Dict[str, Any]):\n The details of the specific sync run.\n source (Dict[str, Any]):\n Information about the source for the Census sync.\n destination (Dict[str, Any]):\n Information about the destination for the Census sync.\n """
\n
", "current_page_name": "_modules/dagster_census/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_census.types"}}, "dagster_dask": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dask.executor

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dask\nimport dask.distributed\nfrom dagster import (\n    Executor,\n    Field,\n    Permissive,\n    Selector,\n    StringSource,\n    _check as check,\n    _seven,\n    multiple_process_executor_requirements,\n)\nfrom dagster._core.definitions.executor_definition import executor\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.errors import raise_execution_interrupts\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.api import create_execution_plan, execute_plan\nfrom dagster._core.execution.context.system import PlanOrchestrationContext\nfrom dagster._core.execution.plan.plan import ExecutionPlan\nfrom dagster._core.execution.plan.state import KnownExecutionState\nfrom dagster._core.execution.retries import RetryMode\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._utils import iterate_with_context\n\n# Dask resource requirements are specified under this key\nDASK_RESOURCE_REQUIREMENTS_KEY = "dagster-dask/resource_requirements"\n\n\n
[docs]@executor(\n name="dask",\n requirements=multiple_process_executor_requirements(),\n config_schema={\n "cluster": Field(\n Selector(\n {\n "existing": Field(\n {"address": StringSource},\n description="Connect to an existing scheduler.",\n ),\n "local": Field(\n Permissive(), is_required=False, description="Local cluster configuration."\n ),\n "yarn": Field(\n Permissive(), is_required=False, description="YARN cluster configuration."\n ),\n "ssh": Field(\n Permissive(), is_required=False, description="SSH cluster configuration."\n ),\n "pbs": Field(\n Permissive(), is_required=False, description="PBS cluster configuration."\n ),\n "moab": Field(\n Permissive(), is_required=False, description="Moab cluster configuration."\n ),\n "sge": Field(\n Permissive(), is_required=False, description="SGE cluster configuration."\n ),\n "lsf": Field(\n Permissive(), is_required=False, description="LSF cluster configuration."\n ),\n "slurm": Field(\n Permissive(), is_required=False, description="SLURM cluster configuration."\n ),\n "oar": Field(\n Permissive(), is_required=False, description="OAR cluster configuration."\n ),\n "kube": Field(\n Permissive(),\n is_required=False,\n description="Kubernetes cluster configuration.",\n ),\n }\n )\n )\n },\n)\ndef dask_executor(init_context):\n """Dask-based executor.\n\n The 'cluster' can be one of the following:\n ('existing', 'local', 'yarn', 'ssh', 'pbs', 'moab', 'sge', 'lsf', 'slurm', 'oar', 'kube').\n\n If the Dask executor is used without providing executor-specific config, a local Dask cluster\n will be created (as when calling :py:class:`dask.distributed.Client() <dask:distributed.Client>`\n with :py:class:`dask.distributed.LocalCluster() <dask:distributed.LocalCluster>`).\n\n The Dask executor optionally takes the following config:\n\n .. code-block:: none\n\n cluster:\n {\n local?: # takes distributed.LocalCluster parameters\n {\n timeout?: 5, # Timeout duration for initial connection to the scheduler\n n_workers?: 4 # Number of workers to start\n threads_per_worker?: 1 # Number of threads per each worker\n }\n }\n\n To use the `dask_executor`, set it as the `executor_def` when defining a job:\n\n .. code-block:: python\n\n from dagster import job\n from dagster_dask import dask_executor\n\n @job(executor_def=dask_executor)\n def dask_enabled_job():\n pass\n\n """\n ((cluster_type, cluster_configuration),) = init_context.executor_config["cluster"].items()\n return DaskExecutor(cluster_type, cluster_configuration)
\n\n\ndef query_on_dask_worker(\n dependencies: Any,\n recon_job: ReconstructableJob,\n dagster_run: DagsterRun,\n run_config: Optional[Mapping[str, object]],\n step_keys: Optional[Sequence[str]],\n instance_ref: InstanceRef,\n known_state: Optional[KnownExecutionState],\n) -> Sequence[DagsterEvent]:\n """Note that we need to pass "dependencies" to ensure Dask sequences futures during task\n scheduling, even though we do not use this argument within the function.\n """\n with DagsterInstance.from_ref(instance_ref) as instance:\n subset_job = recon_job.get_subset(op_selection=dagster_run.resolved_op_selection)\n\n execution_plan = create_execution_plan(\n subset_job,\n run_config=run_config,\n step_keys_to_execute=step_keys,\n known_state=known_state,\n )\n\n return execute_plan(\n execution_plan, subset_job, instance, dagster_run, run_config=run_config\n )\n\n\ndef get_dask_resource_requirements(tags: Mapping[str, str]):\n check.mapping_param(tags, "tags", key_type=str, value_type=str)\n req_str = tags.get(DASK_RESOURCE_REQUIREMENTS_KEY)\n if req_str is not None:\n return _seven.json.loads(req_str)\n\n return {}\n\n\nclass DaskExecutor(Executor):\n def __init__(self, cluster_type, cluster_configuration):\n self.cluster_type = check.opt_str_param(cluster_type, "cluster_type", default="local")\n self.cluster_configuration = check.opt_dict_param(\n cluster_configuration, "cluster_configuration"\n )\n\n @property\n def retries(self):\n return RetryMode.DISABLED\n\n def execute(self, plan_context: PlanOrchestrationContext, execution_plan: ExecutionPlan):\n check.inst_param(plan_context, "plan_context", PlanOrchestrationContext)\n check.inst_param(execution_plan, "execution_plan", ExecutionPlan)\n check.param_invariant(\n isinstance(plan_context.executor, DaskExecutor),\n "plan_context",\n f"Expected executor to be DaskExecutor got {plan_context.executor}",\n )\n\n check.invariant(\n plan_context.instance.is_persistent,\n "Dask execution requires a persistent DagsterInstance",\n )\n\n step_levels = execution_plan.get_steps_to_execute_by_level()\n\n job_name = plan_context.job_name\n\n instance = plan_context.instance\n\n cluster_type = self.cluster_type\n if cluster_type == "existing":\n # address passed directly to Client() below to connect to existing Scheduler\n cluster = self.cluster_configuration["address"]\n elif cluster_type == "local":\n from dask.distributed import LocalCluster\n\n cluster = LocalCluster(**self.build_dict(job_name))\n elif cluster_type == "yarn":\n from dask_yarn import YarnCluster\n\n cluster = YarnCluster(**self.build_dict(job_name))\n elif cluster_type == "ssh":\n from dask.distributed import SSHCluster\n\n cluster = SSHCluster(**self.build_dict(job_name))\n elif cluster_type == "pbs":\n from dask_jobqueue import PBSCluster\n\n cluster = PBSCluster(**self.build_dict(job_name))\n elif cluster_type == "moab":\n from dask_jobqueue import MoabCluster\n\n cluster = MoabCluster(**self.build_dict(job_name))\n elif cluster_type == "sge":\n from dask_jobqueue import SGECluster\n\n cluster = SGECluster(**self.build_dict(job_name))\n elif cluster_type == "lsf":\n from dask_jobqueue import LSFCluster\n\n cluster = LSFCluster(**self.build_dict(job_name))\n elif cluster_type == "slurm":\n from dask_jobqueue import SLURMCluster\n\n cluster = SLURMCluster(**self.build_dict(job_name))\n elif cluster_type == "oar":\n from dask_jobqueue import OARCluster\n\n cluster = OARCluster(**self.build_dict(job_name))\n elif cluster_type == "kube":\n from dask_kubernetes import KubeCluster\n\n cluster = KubeCluster(**self.build_dict(job_name))\n else:\n raise ValueError(\n "Must be providing one of the following ('existing', 'local', 'yarn', 'ssh',"\n f" 'pbs', 'moab', 'sge', 'lsf', 'slurm', 'oar', 'kube') not {cluster_type}"\n )\n\n with dask.distributed.Client(cluster) as client:\n execution_futures = []\n execution_futures_dict = {}\n\n for step_level in step_levels:\n for step in step_level:\n # We ensure correctness in sequencing by letting Dask schedule futures and\n # awaiting dependencies within each step.\n dependencies = []\n for step_input in step.step_inputs:\n for key in step_input.dependency_keys:\n dependencies.append(execution_futures_dict[key])\n\n run_config = plan_context.run_config\n\n dask_task_name = "%s.%s" % (job_name, step.key)\n\n recon_job = plan_context.reconstructable_job\n\n future = client.submit(\n query_on_dask_worker,\n dependencies,\n recon_job,\n plan_context.dagster_run,\n run_config,\n [step.key],\n instance.get_ref(),\n execution_plan.known_state,\n key=dask_task_name,\n resources=get_dask_resource_requirements(step.tags),\n )\n\n execution_futures.append(future)\n execution_futures_dict[step.key] = future\n\n # This tells Dask to awaits the step executions and retrieve their results to the\n # master\n futures = dask.distributed.as_completed(execution_futures, with_results=True)\n\n # Allow interrupts while waiting for the results from Dask\n for future, result in iterate_with_context(raise_execution_interrupts, futures):\n for step_event in result:\n check.inst(step_event, DagsterEvent)\n yield step_event\n\n def build_dict(self, job_name):\n """Returns a dict we can use for kwargs passed to dask client instantiation.\n\n Intended to be used like:\n\n with dask.distributed.Client(**cfg.build_dict()) as client:\n << use client here >>\n\n """\n if self.cluster_type in ["yarn", "pbs", "moab", "sge", "lsf", "slurm", "oar", "kube"]:\n dask_cfg = {"name": job_name}\n else:\n dask_cfg = {}\n\n if self.cluster_configuration:\n for k, v in self.cluster_configuration.items():\n dask_cfg[k] = v\n\n # if address is set, don't add LocalCluster args\n # context: https://github.com/dask/distributed/issues/3313\n if (self.cluster_type == "local") and ("address" not in dask_cfg):\n # We set threads_per_worker because Dagster is not thread-safe. Even though\n # environments=True by default, there is a clever piece of machinery\n # (dask.distributed.deploy.local.nprocesses_nthreads) that automagically makes execution\n # multithreaded by default when the number of available cores is greater than 4.\n # See: https://github.com/dagster-io/dagster/issues/2181\n # We may want to try to figure out a way to enforce this on remote Dask clusters against\n # which users run Dagster workloads.\n dask_cfg["threads_per_worker"] = 1\n\n return dask_cfg\n
", "current_page_name": "_modules/dagster_dask/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dask.executor"}}, "dagster_databricks": {"databricks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.databricks

\nimport base64\nimport logging\nimport time\nfrom typing import IO, Any, Mapping, Optional, Tuple, Union, cast\n\nimport dagster\nimport dagster._check as check\nimport dagster_pyspark\nimport databricks_api\nimport databricks_cli.sdk\nimport requests.exceptions\nfrom dagster._annotations import deprecated, public\nfrom databricks.sdk import WorkspaceClient\nfrom databricks.sdk.service import compute, jobs\nfrom typing_extensions import Final\n\nimport dagster_databricks\n\nfrom .types import (\n    DatabricksRunState,\n)\nfrom .version import __version__\n\n# wait at most 24 hours by default for run execution\nDEFAULT_RUN_MAX_WAIT_TIME_SEC: Final = 24 * 60 * 60\n\n\n
[docs]class DatabricksError(Exception):\n pass
\n\n\n
[docs]class DatabricksClient:\n """A thin wrapper over the Databricks REST API."""\n\n def __init__(\n self,\n host: str,\n token: Optional[str] = None,\n oauth_client_id: Optional[str] = None,\n oauth_client_secret: Optional[str] = None,\n workspace_id: Optional[str] = None,\n ):\n self.host = host\n self.workspace_id = workspace_id\n\n self._workspace_client = WorkspaceClient(\n host=host,\n token=token,\n client_id=oauth_client_id,\n client_secret=oauth_client_secret,\n product="dagster-databricks",\n product_version=__version__,\n )\n\n # TODO: This is the old shim client that we were previously using. Arguably this is\n # confusing for users to use since this is an unofficial wrapper around the documented\n # Databricks REST API. We should consider removing this in the next minor release.\n if token:\n self._client = databricks_api.DatabricksAPI(host=host, token=token)\n self.__setup_user_agent(self._client.client)\n # TODO: This is the old `databricks_cli` client that was previously recommended by Databricks.\n # It is no longer supported and should be removed in favour of `databricks-sdk` in the next\n # minor release.\n self._api_client = databricks_cli.sdk.ApiClient(host=host, token=token)\n self.__setup_user_agent(self._api_client)\n else:\n self._client = None\n self._api_client = None\n\n def __setup_user_agent(\n self,\n client: Union[WorkspaceClient, databricks_api.DatabricksAPI, databricks_cli.sdk.ApiClient],\n ) -> None:\n """Overrides the user agent for the Databricks API client."""\n client.default_headers["user-agent"] = f"dagster-databricks/{__version__}"\n\n @deprecated(\n breaking_version="0.21.0", additional_warn_text="Use `workspace_client` property instead."\n )\n @public\n @property\n def client(self) -> databricks_api.DatabricksAPI:\n """Retrieve the legacy Databricks API client. Note: accessing this property will throw an exception if oauth\n credentials are used to initialize the DatabricksClient, because oauth credentials are not supported by the\n legacy Databricks API client.\n """\n if self._client is None:\n raise ValueError(\n "Legacy Databricks API client from `databricks-api` was not initialized because"\n " oauth credentials were used instead of an access token. This legacy Databricks"\n " API client is not supported when using oauth credentials. Use the"\n " `workspace_client` property instead."\n )\n return self._client\n\n @client.setter\n def client(self, value: Optional[databricks_api.DatabricksAPI]) -> None:\n self._client = value\n\n @deprecated(\n breaking_version="0.21.0", additional_warn_text="Use `workspace_client` property instead."\n )\n @public\n @property\n def api_client(self) -> databricks_cli.sdk.ApiClient:\n """Retrieve a reference to the underlying Databricks API client. For more information,\n see the `Databricks Python API <https://docs.databricks.com/dev-tools/python-api.html>`_.\n Noe: accessing this property will throw an exception if oauth credentials are used to initialize the\n DatabricksClient, because oauth credentials are not supported by the legacy Databricks API client.\n **Examples:**.\n\n .. code-block:: python\n\n from dagster import op\n from databricks_cli.jobs.api import JobsApi\n from databricks_cli.runs.api import RunsApi\n from databricks.sdk import WorkspaceClient\n\n @op(required_resource_keys={"databricks_client"})\n def op1(context):\n # Initialize the Databricks Jobs API\n jobs_client = JobsApi(context.resources.databricks_client.api_client)\n runs_client = RunsApi(context.resources.databricks_client.api_client)\n client = context.resources.databricks_client.api_client\n\n # Example 1: Run a Databricks job with some parameters.\n jobs_client.run_now(...)\n client.jobs.run_now(...)\n\n # Example 2: Trigger a one-time run of a Databricks workload.\n runs_client.submit_run(...)\n client.jobs.submit(...)\n\n # Example 3: Get an existing run.\n runs_client.get_run(...)\n client.jobs.get_run(...)\n\n # Example 4: Cancel a run.\n runs_client.cancel_run(...)\n client.jobs.cancel_run(...)\n\n Returns:\n ApiClient: The authenticated Databricks API client.\n """\n if self._api_client is None:\n raise ValueError(\n "Legacy Databricks API client from `databricks-cli` was not initialized because"\n " oauth credentials were used instead of an access token. This legacy Databricks"\n " API client is not supported when using oauth credentials. Use the"\n " `workspace_client` property instead."\n )\n return self._api_client\n\n @public\n @property\n def workspace_client(self) -> WorkspaceClient:\n """Retrieve a reference to the underlying Databricks Workspace client. For more information,\n see the `Databricks SDK for Python <https://docs.databricks.com/dev-tools/sdk-python.html>`_.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import op\n from databricks.sdk import WorkspaceClient\n\n @op(required_resource_keys={"databricks_client"})\n def op1(context):\n # Initialize the Databricks Jobs API\n client = context.resources.databricks_client.api_client\n\n # Example 1: Run a Databricks job with some parameters.\n client.jobs.run_now(...)\n\n # Example 2: Trigger a one-time run of a Databricks workload.\n client.jobs.submit(...)\n\n # Example 3: Get an existing run.\n client.jobs.get_run(...)\n\n # Example 4: Cancel a run.\n client.jobs.cancel_run(...)\n\n Returns:\n WorkspaceClient: The authenticated Databricks SDK Workspace Client.\n """\n return self._workspace_client\n\n def read_file(self, dbfs_path: str, block_size: int = 1024**2) -> bytes:\n """Read a file from DBFS to a **byte string**."""\n if dbfs_path.startswith("dbfs://"):\n dbfs_path = dbfs_path[7:]\n\n data = b""\n bytes_read = 0\n dbfs_service = self.workspace_client.dbfs\n\n jdoc = dbfs_service.read(path=dbfs_path, length=block_size)\n data += base64.b64decode(jdoc.data)\n while jdoc.bytes_read == block_size:\n bytes_read += jdoc.bytes_read\n jdoc = dbfs_service.read(path=dbfs_path, offset=bytes_read, length=block_size)\n data += base64.b64decode(jdoc.data)\n\n return data\n\n def put_file(\n self, file_obj: IO, dbfs_path: str, overwrite: bool = False, block_size: int = 1024**2\n ) -> None:\n """Upload an arbitrary large file to DBFS.\n\n This doesn't use the DBFS `Put` API because that endpoint is limited to 1MB.\n """\n if dbfs_path.startswith("dbfs://"):\n dbfs_path = dbfs_path[7:]\n\n dbfs_service = self.workspace_client.dbfs\n\n create_response = dbfs_service.create(path=dbfs_path, overwrite=overwrite)\n handle = create_response.handle\n\n block = file_obj.read(block_size)\n while block:\n data = base64.b64encode(block).decode("utf-8")\n dbfs_service.add_block(data=data, handle=handle)\n block = file_obj.read(block_size)\n\n dbfs_service.close(handle=handle)\n\n def get_run_state(self, databricks_run_id: int) -> "DatabricksRunState":\n """Get the state of a run by Databricks run ID.\n\n Return a `DatabricksRunState` object. Note that the `result_state`\n attribute may be `None` if the run hasn't yet terminated.\n """\n run = self.workspace_client.jobs.get_run(databricks_run_id)\n return DatabricksRunState.from_databricks(run.state)\n\n def poll_run_state(\n self,\n logger: logging.Logger,\n start_poll_time: float,\n databricks_run_id: int,\n max_wait_time_sec: float,\n verbose_logs: bool = True,\n ) -> bool:\n run_state = self.get_run_state(databricks_run_id)\n\n if run_state.has_terminated():\n if run_state.is_successful():\n logger.info(f"Run `{databricks_run_id}` completed successfully.")\n return True\n if run_state.is_skipped():\n logger.info(f"Run `{databricks_run_id}` was skipped.")\n return True\n else:\n error_message = (\n f"Run `{databricks_run_id}` failed with result state:"\n f" `{run_state.result_state}`. Message: {run_state.state_message}."\n )\n logger.error(error_message)\n raise DatabricksError(error_message)\n else:\n if verbose_logs:\n logger.debug(f"Run `{databricks_run_id}` in state {run_state}.")\n if time.time() - start_poll_time > max_wait_time_sec:\n raise DatabricksError(\n f"Run `{databricks_run_id}` took more than {max_wait_time_sec}s to complete."\n " Failing the run."\n )\n return False\n\n def wait_for_run_to_complete(\n self,\n logger: logging.Logger,\n databricks_run_id: int,\n poll_interval_sec: float,\n max_wait_time_sec: int,\n verbose_logs: bool = True,\n ) -> None:\n logger.info(f"Waiting for Databricks run `{databricks_run_id}` to complete...")\n\n start_poll_time = time.time()\n while True:\n if self.poll_run_state(\n logger=logger,\n start_poll_time=start_poll_time,\n databricks_run_id=databricks_run_id,\n max_wait_time_sec=max_wait_time_sec,\n verbose_logs=verbose_logs,\n ):\n return\n\n time.sleep(poll_interval_sec)
\n\n\nclass DatabricksJobRunner:\n """Submits jobs created using Dagster config to Databricks, and monitors their progress.\n\n Attributes:\n host (str): Databricks host, e.g. https://uksouth.azuredatabricks.net.\n token (str): Databricks authentication token.\n poll_interval_sec (float): How often to poll Databricks for run status.\n max_wait_time_sec (int): How long to wait for a run to complete before failing.\n """\n\n def __init__(\n self,\n host: str,\n token: Optional[str] = None,\n oauth_client_id: Optional[str] = None,\n oauth_client_secret: Optional[str] = None,\n poll_interval_sec: float = 5,\n max_wait_time_sec: int = DEFAULT_RUN_MAX_WAIT_TIME_SEC,\n ):\n self.host = check.str_param(host, "host")\n check.invariant(\n token is None or (oauth_client_id is None and oauth_client_secret is None),\n "Must provide either databricks_token or oauth_credentials, but cannot provide both",\n )\n self.token = check.opt_str_param(token, "token")\n self.oauth_client_id = check.opt_str_param(oauth_client_id, "oauth_client_id")\n self.oauth_client_secret = check.opt_str_param(oauth_client_secret, "oauth_client_secret")\n self.poll_interval_sec = check.numeric_param(poll_interval_sec, "poll_interval_sec")\n self.max_wait_time_sec = check.int_param(max_wait_time_sec, "max_wait_time_sec")\n\n self._client: DatabricksClient = DatabricksClient(\n host=self.host,\n token=self.token,\n oauth_client_id=oauth_client_id,\n oauth_client_secret=oauth_client_secret,\n )\n\n @property\n def client(self) -> DatabricksClient:\n """Return the underlying `DatabricksClient` object."""\n return self._client\n\n def submit_run(self, run_config: Mapping[str, Any], task: Mapping[str, Any]) -> int:\n """Submit a new run using the 'Runs submit' API."""\n existing_cluster_id = run_config["cluster"].get("existing")\n\n new_cluster = run_config["cluster"].get("new")\n\n # The Databricks API needs different keys to be present in API calls depending\n # on new/existing cluster, so we need to process the new_cluster\n # config first.\n if new_cluster:\n new_cluster = new_cluster.copy()\n\n nodes = new_cluster.pop("nodes")\n if "instance_pool_id" in nodes:\n new_cluster["instance_pool_id"] = nodes["instance_pool_id"]\n else:\n node_types = nodes["node_types"]\n new_cluster["node_type_id"] = node_types["node_type_id"]\n if "driver_node_type_id" in node_types:\n new_cluster["driver_node_type_id"] = node_types["driver_node_type_id"]\n\n cluster_size = new_cluster.pop("size")\n if "num_workers" in cluster_size:\n new_cluster["num_workers"] = cluster_size["num_workers"]\n else:\n new_cluster["autoscale"] = cluster_size["autoscale"]\n\n tags = new_cluster.get("custom_tags", {})\n if isinstance(tags, list):\n tags = {x["key"]: x["value"] for x in tags}\n tags["__dagster_version"] = dagster.__version__\n new_cluster["custom_tags"] = tags\n\n check.invariant(\n existing_cluster_id is not None or new_cluster is not None,\n "Invalid value for run_config.cluster",\n )\n\n # We'll always need some libraries, namely dagster/dagster_databricks/dagster_pyspark,\n # since they're imported by our scripts.\n # Add them if they're not already added by users in config.\n libraries = list(run_config.get("libraries", []))\n install_default_libraries = run_config.get("install_default_libraries", True)\n if install_default_libraries:\n python_libraries = {\n x["pypi"]["package"].split("==")[0].replace("_", "-")\n for x in libraries\n if "pypi" in x\n }\n\n for library_name, library in [\n ("dagster", dagster),\n ("dagster-databricks", dagster_databricks),\n ("dagster-pyspark", dagster_pyspark),\n ]:\n if library_name not in python_libraries:\n libraries.append(\n {"pypi": {"package": f"{library_name}=={library.__version__}"}}\n )\n\n # Only one task should be able to be chosen really; make sure of that here.\n check.invariant(\n sum(\n task.get(key) is not None\n for key in [\n "notebook_task",\n "spark_python_task",\n "spark_jar_task",\n "spark_submit_task",\n ]\n )\n == 1,\n "Multiple tasks specified in Databricks run",\n )\n\n return self.client.workspace_client.jobs.submit(\n run_name=run_config.get("run_name"),\n tasks=[\n jobs.SubmitTask.from_dict(\n {\n "new_cluster": new_cluster,\n "existing_cluster_id": existing_cluster_id,\n # "libraries": [compute.Library.from_dict(lib) for lib in libraries],\n "libraries": libraries,\n **task,\n "task_key": "dagster-task",\n },\n )\n ],\n ).bind()["run_id"]\n\n def retrieve_logs_for_run_id(\n self, log: logging.Logger, databricks_run_id: int\n ) -> Optional[Tuple[Optional[str], Optional[str]]]:\n """Retrieve the stdout and stderr logs for a run."""\n run = self.client.workspace_client.jobs.get_run(databricks_run_id)\n\n # Run.cluster_instance can be None. In that case, fall back to cluster instance on first\n # task. Currently pyspark step launcher runs jobs with singleton tasks.\n cluster_instance = run.cluster_instance or run.tasks[0].cluster_instance\n cluster_id = check.inst(\n cluster_instance.cluster_id,\n str,\n "cluster_id should be string like `1234-123456-abcdefgh` got:"\n f" `{cluster_instance.cluster_id}`",\n )\n cluster = self.client.workspace_client.clusters.get(cluster_id)\n log_config = cluster.cluster_log_conf\n if log_config is None:\n log.warn(\n f"Logs not configured for cluster {cluster_id} used for run {databricks_run_id}"\n )\n return None\n if cast(Optional[compute.S3StorageInfo], log_config.s3) is not None:\n logs_prefix = log_config.s3.destination\n log.warn("Retrieving S3 logs not yet implemented")\n return None\n elif cast(Optional[compute.DbfsStorageInfo], log_config.dbfs) is not None:\n logs_prefix = log_config.dbfs.destination\n stdout = self.wait_for_dbfs_logs(log, logs_prefix, cluster_id, "stdout")\n stderr = self.wait_for_dbfs_logs(log, logs_prefix, cluster_id, "stderr")\n return stdout, stderr\n\n def wait_for_dbfs_logs(\n self,\n log: logging.Logger,\n prefix: str,\n cluster_id: str,\n filename: str,\n waiter_delay: int = 10,\n waiter_max_attempts: int = 10,\n ) -> Optional[str]:\n """Attempt up to `waiter_max_attempts` attempts to get logs from DBFS."""\n path = "/".join([prefix, cluster_id, "driver", filename])\n log.info(f"Retrieving logs from {path}")\n num_attempts = 0\n while num_attempts <= waiter_max_attempts:\n try:\n logs = self.client.read_file(path)\n return logs.decode("utf-8")\n except requests.exceptions.HTTPError:\n num_attempts += 1\n time.sleep(waiter_delay)\n log.warn("Could not retrieve cluster logs!")\n
", "current_page_name": "_modules/dagster_databricks/databricks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.databricks"}, "databricks_pyspark_step_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.databricks_pyspark_step_launcher

\nimport gzip\nimport io\nimport os.path\nimport pickle\nimport sys\nimport tempfile\nimport time\nimport zlib\nfrom typing import Any, Dict, Iterator, Mapping, Optional, Sequence, cast\n\nfrom dagster import (\n    Bool,\n    Field,\n    IntSource,\n    Noneable,\n    StringSource,\n    _check as check,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.definitions.step_launcher import StepLauncher, StepRunRef\nfrom dagster._core.errors import raise_execution_interrupts\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster._core.execution.context.system import StepExecutionContext\nfrom dagster._core.execution.plan.external_step import (\n    PICKLED_EVENTS_FILE_NAME,\n    PICKLED_STEP_RUN_REF_FILE_NAME,\n    step_context_to_step_run_ref,\n)\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._serdes import deserialize_value\nfrom dagster._utils.backoff import backoff\nfrom dagster_pyspark.utils import build_pyspark_zip\nfrom databricks.sdk.core import DatabricksError\nfrom databricks.sdk.service import jobs\n\nfrom dagster_databricks import databricks_step_main\nfrom dagster_databricks.databricks import (\n    DEFAULT_RUN_MAX_WAIT_TIME_SEC,\n    DatabricksJobRunner,\n)\n\nfrom .configs import (\n    define_databricks_env_variables,\n    define_databricks_permissions,\n    define_databricks_secrets_config,\n    define_databricks_storage_config,\n    define_databricks_submit_run_config,\n    define_oauth_credentials,\n)\n\nCODE_ZIP_NAME = "code.zip"\nPICKLED_CONFIG_FILE_NAME = "config.pkl"\nDAGSTER_SYSTEM_ENV_VARS = {\n    "DAGSTER_CLOUD_DEPLOYMENT_NAME",\n    "DAGSTER_CLOUD_IS_BRANCH_DEPLOYMENT",\n    "DAGSTER_CLOUD_GIT_SHA",\n    "DAGSTER_CLOUD_GIT_TIMESTAMP",\n    "DAGSTER_CLOUD_GIT_AUTHOR_EMAIL",\n    "DAGSTER_CLOUD_GIT_AUTHOR_NAME",\n    "DAGSTER_CLOUD_GIT_MESSAGE",\n    "DAGSTER_CLOUD_GIT_BRANCH",\n    "DAGSTER_CLOUD_GIT_REPO",\n    "DAGSTER_CLOUD_PULL_REQUEST_ID",\n    "DAGSTER_CLOUD_PULL_REQUEST_STATUS",\n}\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n {\n "run_config": define_databricks_submit_run_config(),\n "permissions": define_databricks_permissions(),\n "databricks_host": Field(\n StringSource,\n is_required=True,\n description="Databricks host, e.g. uksouth.azuredatabricks.com",\n ),\n "databricks_token": Field(\n Noneable(StringSource),\n default_value=None,\n description="Databricks access token",\n ),\n "oauth_credentials": define_oauth_credentials(),\n "env_variables": define_databricks_env_variables(),\n "secrets_to_env_variables": define_databricks_secrets_config(),\n "storage": define_databricks_storage_config(),\n "local_pipeline_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "Absolute path to root python package containing your Dagster code. If you set this"\n " value to a directory lower than the root package, and have user relative imports"\n " in your code (e.g. `from .foo import bar`), it's likely you'll encounter an"\n " import error on the remote step. Before every step run, the launcher will zip up"\n " the code in this local path, upload it to DBFS, and unzip it into the Python path"\n " of the remote Spark process. This gives the remote process access to up-to-date"\n " user code."\n ),\n ),\n "local_dagster_job_package_path": Field(\n StringSource,\n is_required=False,\n description=(\n "Absolute path to root python package containing your Dagster code. If you set this"\n " value to a directory lower than the root package, and have user relative imports"\n " in your code (e.g. `from .foo import bar`), it's likely you'll encounter an"\n " import error on the remote step. Before every step run, the launcher will zip up"\n " the code in this local path, upload it to DBFS, and unzip it into the Python path"\n " of the remote Spark process. This gives the remote process access to up-to-date"\n " user code."\n ),\n ),\n "staging_prefix": Field(\n StringSource,\n is_required=False,\n default_value="/dagster_staging",\n description="Directory in DBFS to use for uploaded job code. Must be absolute.",\n ),\n "wait_for_logs": Field(\n Bool,\n is_required=False,\n default_value=False,\n description=(\n "If set, and if the specified cluster is configured to export logs, the system will"\n " wait after job completion for the logs to appear in the configured location. Note"\n " that logs are copied every 5 minutes, so enabling this will add several minutes"\n " to the job runtime. NOTE: this integration will export stdout/stderrfrom the"\n " remote Databricks process automatically, so this option is not generally"\n " necessary."\n ),\n ),\n "max_completion_wait_time_seconds": Field(\n IntSource,\n is_required=False,\n default_value=DEFAULT_RUN_MAX_WAIT_TIME_SEC,\n description=(\n "If the Databricks job run takes more than this many seconds, then "\n "consider it failed and terminate the step."\n ),\n ),\n "poll_interval_sec": Field(\n float,\n is_required=False,\n default_value=5.0,\n description=(\n "How frequently Dagster will poll Databricks to determine the state of the job."\n ),\n ),\n "verbose_logs": Field(\n bool,\n default_value=True,\n description=(\n "Determines whether to display debug logs emitted while job is being polled. It can"\n " be helpful for Dagster UI performance to set to False when running long-running"\n " or fan-out Databricks jobs, to avoid forcing the UI to fetch large amounts of"\n " debug logs."\n ),\n ),\n "add_dagster_env_variables": Field(\n bool,\n default_value=True,\n description=(\n "Automatically add Dagster system environment variables. This option is only"\n " applicable when the code being executed is deployed on Dagster Cloud. It will be"\n " ignored when the environment variables provided by Dagster Cloud are not present."\n ),\n ),\n }\n)\ndef databricks_pyspark_step_launcher(\n context: InitResourceContext,\n) -> "DatabricksPySparkStepLauncher":\n """Resource for running ops as a Databricks Job.\n\n When this resource is used, the op will be executed in Databricks using the 'Run Submit'\n API. Pipeline code will be zipped up and copied to a directory in DBFS along with the op's\n execution context.\n\n Use the 'run_config' configuration to specify the details of the Databricks cluster used, and\n the 'storage' key to configure persistent storage on that cluster. Storage is accessed by\n setting the credentials in the Spark context, as documented `here for S3`_ and `here for ADLS`_.\n\n .. _`here for S3`: https://docs.databricks.com/data/data-sources/aws/amazon-s3.html#alternative-1-set-aws-keys-in-the-spark-context\n .. _`here for ADLS`: https://docs.microsoft.com/en-gb/azure/databricks/data/data-sources/azure/azure-datalake-gen2#--access-directly-using-the-storage-account-access-key\n """\n return DatabricksPySparkStepLauncher(**context.resource_config)
\n\n\nclass DatabricksPySparkStepLauncher(StepLauncher):\n def __init__(\n self,\n run_config: Mapping[str, Any],\n permissions: Mapping[str, Any],\n databricks_host: str,\n secrets_to_env_variables: Sequence[Mapping[str, Any]],\n staging_prefix: str,\n wait_for_logs: bool,\n max_completion_wait_time_seconds: int,\n databricks_token: Optional[str] = None,\n oauth_credentials: Optional[Mapping[str, str]] = None,\n env_variables: Optional[Mapping[str, str]] = None,\n storage: Optional[Mapping[str, Any]] = None,\n poll_interval_sec: int = 5,\n local_pipeline_package_path: Optional[str] = None,\n local_dagster_job_package_path: Optional[str] = None,\n verbose_logs: bool = True,\n add_dagster_env_variables: bool = True,\n ):\n self.run_config = check.mapping_param(run_config, "run_config")\n self.permissions = check.mapping_param(permissions, "permissions")\n self.databricks_host = check.str_param(databricks_host, "databricks_host")\n\n check.invariant(\n databricks_token is not None or oauth_credentials is not None,\n "Must provide either databricks_token or oauth_credentials",\n )\n check.invariant(\n databricks_token is None or oauth_credentials is None,\n "Must provide either databricks_token or oauth_credentials, but cannot provide both",\n )\n self.databricks_token = check.opt_str_param(databricks_token, "databricks_token")\n oauth_credentials = check.opt_mapping_param(\n oauth_credentials,\n "oauth_credentials",\n key_type=str,\n value_type=str,\n )\n\n self.secrets = check.sequence_param(\n secrets_to_env_variables, "secrets_to_env_variables", dict\n )\n self.env_variables = check.opt_mapping_param(env_variables, "env_variables")\n self.storage = check.opt_mapping_param(storage, "storage")\n check.invariant(\n local_dagster_job_package_path is not None or local_pipeline_package_path is not None,\n "Missing config: need to provide either 'local_dagster_job_package_path' or"\n " 'local_pipeline_package_path' config entry",\n )\n check.invariant(\n local_dagster_job_package_path is None or local_pipeline_package_path is None,\n "Error in config: Provided both 'local_dagster_job_package_path' and"\n " 'local_pipeline_package_path' entries. Need to specify one or the other.",\n )\n self.local_dagster_job_package_path = check.str_param(\n local_pipeline_package_path or local_dagster_job_package_path,\n "local_dagster_job_package_path",\n )\n self.staging_prefix = check.str_param(staging_prefix, "staging_prefix")\n check.invariant(staging_prefix.startswith("/"), "staging_prefix must be an absolute path")\n self.wait_for_logs = check.bool_param(wait_for_logs, "wait_for_logs")\n\n self.databricks_runner = DatabricksJobRunner(\n host=databricks_host,\n token=databricks_token,\n oauth_client_id=oauth_credentials.get("client_id"),\n oauth_client_secret=oauth_credentials.get("client_secret"),\n poll_interval_sec=poll_interval_sec,\n max_wait_time_sec=max_completion_wait_time_seconds,\n )\n self.verbose_logs = check.bool_param(verbose_logs, "verbose_logs")\n self.add_dagster_env_variables = check.bool_param(\n add_dagster_env_variables, "add_dagster_env_variables"\n )\n\n def launch_step(self, step_context: StepExecutionContext) -> Iterator[DagsterEvent]:\n step_run_ref = step_context_to_step_run_ref(\n step_context, self.local_dagster_job_package_path\n )\n run_id = step_context.dagster_run.run_id\n log = step_context.log\n\n step_key = step_run_ref.step_key\n self._upload_artifacts(log, step_run_ref, run_id, step_key)\n\n task = self._get_databricks_task(run_id, step_key)\n databricks_run_id = self.databricks_runner.submit_run(self.run_config, task)\n\n if self.permissions:\n self._grant_permissions(log, databricks_run_id)\n\n try:\n # If this is being called within a `capture_interrupts` context, allow interrupts while\n # waiting for the execution to complete, so that we can terminate slow or hanging steps\n with raise_execution_interrupts():\n yield from self.step_events_iterator(step_context, step_key, databricks_run_id)\n except:\n # if executon is interrupted before the step is completed, cancel the run\n self.databricks_runner.client.workspace_client.jobs.cancel_run(databricks_run_id)\n raise\n finally:\n self.log_compute_logs(log, run_id, step_key)\n # this is somewhat obsolete\n if self.wait_for_logs:\n self._log_logs_from_cluster(log, databricks_run_id)\n\n def log_compute_logs(self, log: DagsterLogManager, run_id: str, step_key: str) -> None:\n try:\n stdout = self.databricks_runner.client.read_file(\n self._dbfs_path(run_id, step_key, "stdout")\n ).decode()\n log.info(f"Captured stdout for step {step_key}:")\n log.info(stdout)\n sys.stdout.write(stdout)\n except Exception as e:\n log.error(\n f"Encountered exception {e} when attempting to load stdout logs for step"\n f" {step_key}. Check the databricks console for more info."\n )\n try:\n stderr = self.databricks_runner.client.read_file(\n self._dbfs_path(run_id, step_key, "stderr")\n ).decode()\n log.info(f"Captured stderr for step {step_key}:")\n log.info(stderr)\n sys.stderr.write(stderr)\n except Exception as e:\n log.error(\n f"Encountered exception {e} when attempting to load stderr logs for step"\n f" {step_key}. Check the databricks console for more info."\n )\n\n def step_events_iterator(\n self, step_context: StepExecutionContext, step_key: str, databricks_run_id: int\n ) -> Iterator[DagsterEvent]:\n """The launched Databricks job writes all event records to a specific dbfs file. This iterator\n regularly reads the contents of the file, adds any events that have not yet been seen to\n the instance, and yields any DagsterEvents.\n\n By doing this, we simulate having the remote Databricks process able to directly write to\n the local DagsterInstance. Importantly, this means that timestamps (and all other record\n properties) will be sourced from the Databricks process, rather than recording when this\n process happens to log them.\n """\n check.int_param(databricks_run_id, "databricks_run_id")\n processed_events = 0\n start_poll_time = time.time()\n done = False\n step_context.log.info("Waiting for Databricks run %s to complete..." % databricks_run_id)\n while not done:\n with raise_execution_interrupts():\n if self.verbose_logs:\n step_context.log.debug(\n "Waiting %.1f seconds...", self.databricks_runner.poll_interval_sec\n )\n time.sleep(self.databricks_runner.poll_interval_sec)\n try:\n done = self.databricks_runner.client.poll_run_state(\n logger=step_context.log,\n start_poll_time=start_poll_time,\n databricks_run_id=databricks_run_id,\n max_wait_time_sec=self.databricks_runner.max_wait_time_sec,\n verbose_logs=self.verbose_logs,\n )\n finally:\n all_events = self.get_step_events(\n step_context.run_id, step_key, step_context.previous_attempt_count\n )\n # we get all available records on each poll, but we only want to process the\n # ones we haven't seen before\n for event in all_events[processed_events:]:\n # write each event from the DataBricks instance to the local instance\n step_context.instance.handle_new_event(event)\n if event.is_dagster_event:\n yield event.get_dagster_event()\n processed_events = len(all_events)\n\n step_context.log.info(f"Databricks run {databricks_run_id} completed.")\n\n def get_step_events(\n self, run_id: str, step_key: str, retry_number: int\n ) -> Sequence[EventLogEntry]:\n path = self._dbfs_path(run_id, step_key, f"{retry_number}_{PICKLED_EVENTS_FILE_NAME}")\n\n def _get_step_records() -> Sequence[EventLogEntry]:\n serialized_records = self.databricks_runner.client.read_file(path)\n if not serialized_records:\n return []\n return cast(\n Sequence[EventLogEntry],\n deserialize_value(pickle.loads(gzip.decompress(serialized_records))),\n )\n\n try:\n # reading from dbfs while it writes can be flaky\n # allow for retry if we get malformed data\n return backoff(\n fn=_get_step_records,\n retry_on=(pickle.UnpicklingError, OSError, zlib.error, EOFError),\n max_retries=4,\n )\n # if you poll before the Databricks process has had a chance to create the file,\n # we expect to get this error\n except DatabricksError as e:\n if e.error_code == "RESOURCE_DOES_NOT_EXIST":\n return []\n raise\n\n def _grant_permissions(\n self, log: DagsterLogManager, databricks_run_id: int, request_retries: int = 3\n ) -> None:\n client = self.databricks_runner.client.workspace_client\n # Retrieve run info\n cluster_id = None\n for i in range(1, request_retries + 1):\n run_info = client.jobs.get_run(databricks_run_id)\n # if a new job cluster is created, the cluster_instance key may not be immediately present in the run response\n try:\n cluster_id = run_info.cluster_instance.cluster_id\n break\n except:\n log.warning(\n f"Failed to retrieve cluster info for databricks_run_id {databricks_run_id}. "\n f"Retrying {i} of {request_retries} times."\n )\n time.sleep(5)\n if not cluster_id:\n log.warning(\n f"Failed to retrieve cluster info for databricks_run_id {databricks_run_id} "\n f"{request_retries} times. Skipping permission updates..."\n )\n return\n\n # Update job permissions\n if "job_permissions" in self.permissions:\n job_permissions = self._format_permissions(self.permissions["job_permissions"])\n job_id = run_info.job_id # type: ignore # (??)\n log.debug(f"Updating job permissions with following json: {job_permissions}")\n client.permissions.update("jobs", job_id, access_control_list=job_permissions)\n log.info("Successfully updated cluster permissions")\n\n # Update cluster permissions\n if "cluster_permissions" in self.permissions:\n if "existing" in self.run_config["cluster"]:\n raise ValueError(\n "Attempting to update permissions of an existing cluster. "\n "This is dangerous and thus unsupported."\n )\n cluster_permissions = self._format_permissions(self.permissions["cluster_permissions"])\n log.debug(f"Updating cluster permissions with following json: {cluster_permissions}")\n client.permissions.update(\n "clusters", cluster_id, access_control_list=cluster_permissions\n )\n log.info("Successfully updated cluster permissions")\n\n def _format_permissions(\n self, input_permissions: Mapping[str, Sequence[Mapping[str, str]]]\n ) -> Sequence[Mapping[str, str]]:\n access_control_list = []\n for permission, accessors in input_permissions.items():\n access_control_list.extend(\n [\n jobs.JobAccessControlRequest.from_dict(\n {"permission_level": permission, **accessor}\n )\n for accessor in accessors\n ]\n )\n return access_control_list\n\n def _get_databricks_task(self, run_id: str, step_key: str) -> Mapping[str, Any]:\n """Construct the 'task' parameter to be submitted to the Databricks API.\n\n This will create a 'spark_python_task' dict where `python_file` is a path on DBFS\n pointing to the 'databricks_step_main.py' file, and `parameters` is an array with a single\n element, a path on DBFS pointing to the picked `step_run_ref` data.\n\n See https://docs.databricks.com/dev-tools/api/latest/jobs.html#jobssparkpythontask.\n """\n python_file = self._dbfs_path(run_id, step_key, self._main_file_name())\n parameters = [\n self._internal_dbfs_path(run_id, step_key, PICKLED_STEP_RUN_REF_FILE_NAME),\n self._internal_dbfs_path(run_id, step_key, PICKLED_CONFIG_FILE_NAME),\n self._internal_dbfs_path(run_id, step_key, CODE_ZIP_NAME),\n ]\n return {"spark_python_task": {"python_file": python_file, "parameters": parameters}}\n\n def _upload_artifacts(\n self, log: DagsterLogManager, step_run_ref: StepRunRef, run_id: str, step_key: str\n ) -> None:\n """Upload the step run ref and pyspark code to DBFS to run as a job."""\n log.info("Uploading main file to DBFS")\n main_local_path = self._main_file_local_path()\n with open(main_local_path, "rb") as infile:\n self.databricks_runner.client.put_file(\n infile, self._dbfs_path(run_id, step_key, self._main_file_name()), overwrite=True\n )\n\n log.info("Uploading dagster job to DBFS")\n with tempfile.TemporaryDirectory() as temp_dir:\n # Zip and upload package containing dagster job\n zip_local_path = os.path.join(temp_dir, CODE_ZIP_NAME)\n build_pyspark_zip(zip_local_path, self.local_dagster_job_package_path)\n with open(zip_local_path, "rb") as infile:\n self.databricks_runner.client.put_file(\n infile, self._dbfs_path(run_id, step_key, CODE_ZIP_NAME), overwrite=True\n )\n\n log.info("Uploading step run ref file to DBFS")\n step_pickle_file = io.BytesIO()\n\n pickle.dump(step_run_ref, step_pickle_file)\n step_pickle_file.seek(0)\n self.databricks_runner.client.put_file(\n step_pickle_file,\n self._dbfs_path(run_id, step_key, PICKLED_STEP_RUN_REF_FILE_NAME),\n overwrite=True,\n )\n\n databricks_config = self.create_remote_config()\n log.info("Uploading Databricks configuration to DBFS")\n databricks_config_file = io.BytesIO()\n pickle.dump(databricks_config, databricks_config_file)\n databricks_config_file.seek(0)\n self.databricks_runner.client.put_file(\n databricks_config_file,\n self._dbfs_path(run_id, step_key, PICKLED_CONFIG_FILE_NAME),\n overwrite=True,\n )\n\n def get_dagster_env_variables(self) -> Dict[str, str]:\n out = {}\n if self.add_dagster_env_variables:\n for var in DAGSTER_SYSTEM_ENV_VARS:\n if os.getenv(var):\n out.update({var: os.getenv(var)})\n return out\n\n def create_remote_config(self) -> "DatabricksConfig":\n env_variables = self.get_dagster_env_variables()\n env_variables.update(self.env_variables)\n databricks_config = DatabricksConfig(\n env_variables=env_variables,\n storage=self.storage,\n secrets=self.secrets,\n )\n return databricks_config\n\n def _log_logs_from_cluster(self, log: DagsterLogManager, run_id: int) -> None:\n logs = self.databricks_runner.retrieve_logs_for_run_id(log, run_id)\n if logs is None:\n return\n stdout, stderr = logs\n if stderr:\n log.info(stderr)\n if stdout:\n log.info(stdout)\n\n def _main_file_name(self) -> str:\n return os.path.basename(self._main_file_local_path())\n\n def _main_file_local_path(self) -> str:\n return databricks_step_main.__file__\n\n def _sanitize_step_key(self, step_key: str) -> str:\n # step_keys of dynamic steps contain brackets, which are invalid characters\n return step_key.replace("[", "__").replace("]", "__")\n\n def _dbfs_path(self, run_id: str, step_key: str, filename: str) -> str:\n path = "/".join(\n [\n self.staging_prefix,\n run_id,\n self._sanitize_step_key(step_key),\n os.path.basename(filename),\n ]\n )\n return f"dbfs://{path}"\n\n def _internal_dbfs_path(self, run_id: str, step_key: str, filename: str) -> str:\n """Scripts running on Databricks should access DBFS at /dbfs/."""\n path = "/".join(\n [\n self.staging_prefix,\n run_id,\n self._sanitize_step_key(step_key),\n os.path.basename(filename),\n ]\n )\n return f"/dbfs/{path}"\n\n\nclass DatabricksConfig:\n """Represents configuration required by Databricks to run jobs.\n\n Instances of this class will be created when a Databricks step is launched and will contain\n all configuration and secrets required to set up storage and environment variables within\n the Databricks environment. The instance will be serialized and uploaded to Databricks\n by the step launcher, then deserialized as part of the 'main' script when the job is running\n in Databricks.\n\n The `setup` method handles the actual setup prior to op execution on the Databricks side.\n\n This config is separated out from the regular Dagster run config system because the setup\n is done by the 'main' script before entering a Dagster context (i.e. using `run_step_from_ref`).\n We use a separate class to avoid coupling the setup to the format of the `step_run_ref` object.\n """\n\n def __init__(\n self,\n env_variables: Mapping[str, str],\n storage: Mapping[str, Any],\n secrets: Sequence[Mapping[str, Any]],\n ):\n """Create a new DatabricksConfig object.\n\n `storage` and `secrets` should be of the same shape as the `storage` and\n `secrets_to_env_variables` config passed to `databricks_pyspark_step_launcher`.\n """\n self.env_variables = env_variables\n self.storage = storage\n self.secrets = secrets\n\n def setup(self, dbutils: Any, sc: Any) -> None:\n """Set up storage and environment variables on Databricks.\n\n The `dbutils` and `sc` arguments must be passed in by the 'main' script, as they\n aren't accessible by any other modules.\n """\n self.setup_storage(dbutils, sc)\n self.setup_environment(dbutils)\n\n def setup_storage(self, dbutils: Any, sc: Any) -> None:\n """Set up storage using either S3 or ADLS2."""\n if "s3" in self.storage:\n self.setup_s3_storage(self.storage["s3"], dbutils, sc)\n elif "adls2" in self.storage:\n self.setup_adls2_storage(self.storage["adls2"], dbutils, sc)\n\n def setup_s3_storage(self, s3_storage: Mapping[str, Any], dbutils: Any, sc: Any) -> None:\n """Obtain AWS credentials from Databricks secrets and export so both Spark and boto can use them."""\n scope = s3_storage["secret_scope"]\n\n access_key = dbutils.secrets.get(scope=scope, key=s3_storage["access_key_key"])\n secret_key = dbutils.secrets.get(scope=scope, key=s3_storage["secret_key_key"])\n\n # Spark APIs will use this.\n # See https://docs.databricks.com/data/data-sources/aws/amazon-s3.html#alternative-1-set-aws-keys-in-the-spark-context.\n sc._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", access_key) # noqa: SLF001\n sc._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", secret_key) # noqa: SLF001\n\n # Boto will use these.\n os.environ["AWS_ACCESS_KEY_ID"] = access_key\n os.environ["AWS_SECRET_ACCESS_KEY"] = secret_key\n\n def setup_adls2_storage(self, adls2_storage: Mapping[str, Any], dbutils: Any, sc: Any) -> None:\n """Obtain an Azure Storage Account key from Databricks secrets and export so Spark can use it."""\n storage_account_key = dbutils.secrets.get(\n scope=adls2_storage["secret_scope"], key=adls2_storage["storage_account_key_key"]\n )\n # Spark APIs will use this.\n # See https://docs.microsoft.com/en-gb/azure/databricks/data/data-sources/azure/azure-datalake-gen2#--access-directly-using-the-storage-account-access-key\n # sc is globally defined in the Databricks runtime and points to the Spark context\n sc._jsc.hadoopConfiguration().set( # noqa: SLF001\n "fs.azure.account.key.{}.dfs.core.windows.net".format(\n adls2_storage["storage_account_name"]\n ),\n storage_account_key,\n )\n\n def setup_environment(self, dbutils: Any) -> None:\n """Setup any environment variables required by the run.\n\n Extract any secrets in the run config and export them as environment variables.\n\n This is important for any `StringSource` config since the environment variables\n won't ordinarily be available in the Databricks execution environment.\n """\n for env_k, env_v in self.env_variables.items():\n os.environ[env_k] = env_v\n\n for secret in self.secrets:\n name = secret["name"]\n key = secret["key"]\n scope = secret["scope"]\n print(f"Exporting {name} from Databricks secret {key}, scope {scope}") # noqa: T201\n val = dbutils.secrets.get(scope=scope, key=key)\n os.environ[name] = val\n
", "current_page_name": "_modules/dagster_databricks/databricks_pyspark_step_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.databricks_pyspark_step_launcher"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.ops

\nfrom typing import TYPE_CHECKING, Optional\n\nfrom dagster import (\n    In,\n    Nothing,\n    OpExecutionContext,\n    _check as check,\n    op,\n)\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom databricks.sdk.service import jobs\nfrom pydantic import Field\n\nDEFAULT_POLL_INTERVAL_SECONDS = 10\n# wait at most 24 hours by default for run execution\nDEFAULT_MAX_WAIT_TIME_SECONDS = 24 * 60 * 60\nfrom dagster import Config\n\nif TYPE_CHECKING:\n    from .databricks import DatabricksClient\n\n\n
[docs]def create_databricks_run_now_op(\n databricks_job_id: int,\n databricks_job_configuration: Optional[dict] = None,\n poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS,\n max_wait_time_seconds: float = DEFAULT_MAX_WAIT_TIME_SECONDS,\n name: Optional[str] = None,\n databricks_resource_key: str = "databricks",\n) -> OpDefinition:\n """Creates an op that launches an existing databricks job.\n\n As config, the op accepts a blob of the form described in Databricks' Job API:\n https://docs.databricks.com/api-explorer/workspace/jobs/runnow. The only required field is\n ``job_id``, which is the ID of the job to be executed. Additional fields can be used to specify\n override parameters for the Databricks Job.\n\n Arguments:\n databricks_job_id (int): The ID of the Databricks Job to be executed.\n databricks_job_configuration (dict): Configuration for triggering a new job run of a\n Databricks Job. See https://docs.databricks.com/api-explorer/workspace/jobs/runnow\n for the full configuration.\n poll_interval_seconds (float): How often to poll the Databricks API to check whether the\n Databricks job has finished running.\n max_wait_time_seconds (float): How long to wait for the Databricks job to finish running\n before raising an error.\n name (Optional[str]): The name of the op. If not provided, the name will be\n _databricks_run_now_op.\n databricks_resource_key (str): The name of the resource key used by this op. If not\n provided, the resource key will be "databricks".\n\n Returns:\n OpDefinition: An op definition to run the Databricks Job.\n\n Example:\n .. code-block:: python\n\n from dagster import job\n from dagster_databricks import create_databricks_run_now_op, DatabricksClientResource\n\n DATABRICKS_JOB_ID = 1234\n\n\n run_now_op = create_databricks_run_now_op(\n databricks_job_id=DATABRICKS_JOB_ID,\n databricks_job_configuration={\n "python_params": [\n "--input",\n "schema.db.input_table",\n "--output",\n "schema.db.output_table",\n ],\n },\n )\n\n @job(\n resource_defs={\n "databricks": DatabricksClientResource(\n host=EnvVar("DATABRICKS_HOST"),\n token=EnvVar("DATABRICKS_TOKEN")\n )\n }\n )\n def do_stuff():\n run_now_op()\n """\n _poll_interval_seconds = poll_interval_seconds\n _max_wait_time_seconds = max_wait_time_seconds\n\n class DatabricksRunNowOpConfig(Config):\n poll_interval_seconds: float = Field(\n default=_poll_interval_seconds,\n description="Check whether the Databricks Job is done at this interval, in seconds.",\n )\n max_wait_time_seconds: int = Field(\n default=_max_wait_time_seconds,\n description=(\n "If the Databricks Job is not complete after this length of time, in seconds,"\n " raise an error."\n ),\n )\n\n @op(\n ins={"start_after": In(Nothing)},\n required_resource_keys={databricks_resource_key},\n tags={"kind": "databricks"},\n name=name,\n )\n def _databricks_run_now_op(context: OpExecutionContext, config: DatabricksRunNowOpConfig):\n databricks: DatabricksClient = getattr(context.resources, databricks_resource_key)\n jobs_service = databricks.workspace_client.jobs\n\n run = jobs_service.run_now(\n job_id=databricks_job_id,\n **(databricks_job_configuration or {}),\n )\n run_id = run.bind()["run_id"]\n\n get_run_response = jobs_service.get_run(run_id=run_id)\n\n context.log.info(\n f"Launched databricks job run for '{get_run_response.run_name}' (`{run_id}`). URL:"\n f" {get_run_response.run_page_url}. Waiting to run to complete."\n )\n\n databricks.wait_for_run_to_complete(\n logger=context.log,\n databricks_run_id=run_id,\n poll_interval_sec=config.poll_interval_seconds,\n max_wait_time_sec=config.max_wait_time_seconds,\n )\n\n return _databricks_run_now_op
\n\n\n
[docs]def create_databricks_submit_run_op(\n databricks_job_configuration: dict,\n poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS,\n max_wait_time_seconds: float = DEFAULT_MAX_WAIT_TIME_SECONDS,\n name: Optional[str] = None,\n databricks_resource_key: str = "databricks",\n) -> OpDefinition:\n """Creates an op that submits a one-time run of a set of tasks on Databricks.\n\n As config, the op accepts a blob of the form described in Databricks' Job API:\n https://docs.databricks.com/api-explorer/workspace/jobs/submit.\n\n Arguments:\n databricks_job_configuration (dict): Configuration for submitting a one-time run of a set\n of tasks on Databricks. See https://docs.databricks.com/api-explorer/workspace/jobs/submit\n for the full configuration.\n poll_interval_seconds (float): How often to poll the Databricks API to check whether the\n Databricks job has finished running.\n max_wait_time_seconds (float): How long to wait for the Databricks job to finish running\n before raising an error.\n name (Optional[str]): The name of the op. If not provided, the name will be\n _databricks_submit_run_op.\n databricks_resource_key (str): The name of the resource key used by this op. If not\n provided, the resource key will be "databricks".\n\n Returns:\n OpDefinition: An op definition to submit a one-time run of a set of tasks on Databricks.\n\n Example:\n .. code-block:: python\n\n from dagster import job\n from dagster_databricks import create_databricks_submit_run_op, DatabricksClientResource\n\n\n submit_run_op = create_databricks_submit_run_op(\n databricks_job_configuration={\n "new_cluster": {\n "spark_version": '2.1.0-db3-scala2.11',\n "num_workers": 2\n },\n "notebook_task": {\n "notebook_path": "/Users/dagster@example.com/PrepareData",\n },\n }\n )\n\n @job(\n resource_defs={\n "databricks": DatabricksClientResource(\n host=EnvVar("DATABRICKS_HOST"),\n token=EnvVar("DATABRICKS_TOKEN")\n )\n }\n )\n def do_stuff():\n submit_run_op()\n """\n check.invariant(\n bool(databricks_job_configuration),\n "Configuration for the one-time Databricks Job is required.",\n )\n\n _poll_interval_seconds = poll_interval_seconds\n _max_wait_time_seconds = max_wait_time_seconds\n\n class DatabricksSubmitRunOpConfig(Config):\n poll_interval_seconds: float = Field(\n default=_poll_interval_seconds,\n description="Check whether the Databricks Job is done at this interval, in seconds.",\n )\n max_wait_time_seconds: int = Field(\n default=_max_wait_time_seconds,\n description=(\n "If the Databricks Job is not complete after this length of time, in seconds,"\n " raise an error."\n ),\n )\n\n @op(\n ins={"start_after": In(Nothing)},\n required_resource_keys={databricks_resource_key},\n tags={"kind": "databricks"},\n name=name,\n )\n def _databricks_submit_run_op(\n context: OpExecutionContext, config: DatabricksSubmitRunOpConfig\n ) -> None:\n databricks: DatabricksClient = getattr(context.resources, databricks_resource_key)\n jobs_service = databricks.workspace_client.jobs\n\n run = jobs_service.submit(\n tasks=[jobs.SubmitTask.from_dict(databricks_job_configuration)],\n )\n run_id: int = run.bind()["run_id"]\n\n get_run_response = jobs_service.get_run(run_id=run_id)\n\n context.log.info(\n f"Launched databricks job run for '{get_run_response.run_name}' (`{run_id}`). URL:"\n f" {get_run_response.run_page_url}. Waiting to run to complete."\n )\n\n databricks.wait_for_run_to_complete(\n logger=context.log,\n databricks_run_id=run_id,\n poll_interval_sec=config.poll_interval_seconds,\n max_wait_time_sec=config.max_wait_time_seconds,\n )\n\n return _databricks_submit_run_op
\n
", "current_page_name": "_modules/dagster_databricks/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.ops"}, "pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.pipes

\nimport base64\nimport json\nimport os\nimport random\nimport string\nimport sys\nimport time\nfrom contextlib import ExitStack, contextmanager\nfrom typing import Iterator, Literal, Mapping, Optional, TextIO\n\nimport dagster._check as check\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.resource_annotation import ResourceParam\nfrom dagster._core.errors import DagsterPipesExecutionError\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dagster._core.pipes.client import (\n    PipesClient,\n    PipesClientCompletedInvocation,\n    PipesContextInjector,\n    PipesMessageReader,\n)\nfrom dagster._core.pipes.utils import (\n    PipesBlobStoreMessageReader,\n    PipesBlobStoreStdioReader,\n    PipesChunkedStdioReader,\n    open_pipes_session,\n)\nfrom dagster_pipes import (\n    DAGSTER_PIPES_MESSAGES_ENV_VAR,\n    PipesContextData,\n    PipesExtras,\n    PipesParams,\n)\nfrom databricks.sdk import WorkspaceClient\nfrom databricks.sdk.service import files, jobs\nfrom pydantic import Field\n\n# Number of seconds between status checks on Databricks jobs launched by the\n# `PipesDatabricksClient`.\n_RUN_POLL_INTERVAL = 5\n\n\n@experimental\nclass _PipesDatabricksClient(PipesClient):\n    """Pipes client for databricks.\n\n    Args:\n        client (WorkspaceClient): A databricks `WorkspaceClient` object.\n        env (Optional[Mapping[str,str]]: An optional dict of environment variables to pass to the\n            databricks job.\n        context_injector (Optional[PipesContextInjector]): A context injector to use to inject\n            context into the k8s container process. Defaults to :py:class:`PipesDbfsContextInjector`.\n        message_reader (Optional[PipesMessageReader]): A message reader to use to read messages\n            from the databricks job. Defaults to :py:class:`PipesDbfsMessageReader`.\n    """\n\n    env: Optional[Mapping[str, str]] = Field(\n        default=None,\n        description="An optional dict of environment variables to pass to the subprocess.",\n    )\n\n    def __init__(\n        self,\n        client: WorkspaceClient,\n        env: Optional[Mapping[str, str]] = None,\n        context_injector: Optional[PipesContextInjector] = None,\n        message_reader: Optional[PipesMessageReader] = None,\n    ):\n        self.client = client\n        self.env = env\n        self.context_injector = check.opt_inst_param(\n            context_injector,\n            "context_injector",\n            PipesContextInjector,\n        ) or PipesDbfsContextInjector(client=self.client)\n        self.message_reader = check.opt_inst_param(\n            message_reader,\n            "message_reader",\n            PipesMessageReader,\n        ) or PipesDbfsMessageReader(\n            client=self.client,\n            stdout_reader=PipesDbfsStdioReader(\n                client=self.client, remote_log_name="stdout", target_stream=sys.stdout\n            ),\n            stderr_reader=PipesDbfsStdioReader(\n                client=self.client, remote_log_name="stderr", target_stream=sys.stderr\n            ),\n        )\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        return True\n\n    def run(\n        self,\n        *,\n        context: OpExecutionContext,\n        extras: Optional[PipesExtras] = None,\n        task: jobs.SubmitTask,\n        submit_args: Optional[Mapping[str, str]] = None,\n    ) -> PipesClientCompletedInvocation:\n        """Synchronously execute a Databricks job with the pipes protocol.\n\n        Args:\n            task (databricks.sdk.service.jobs.SubmitTask): Specification of the databricks\n                task to run. Environment variables used by dagster-pipes will be set under the\n                `spark_env_vars` key of the `new_cluster` field (if there is an existing dictionary\n                here, the EXT environment variables will be merged in). Everything else will be\n                passed unaltered under the `tasks` arg to `WorkspaceClient.jobs.submit`.\n            context (OpExecutionContext): The context from the executing op or asset.\n            extras (Optional[PipesExtras]): An optional dict of extra parameters to pass to the\n                subprocess.\n            submit_args (Optional[Mapping[str, str]]): Additional keyword arguments that will be\n                forwarded as-is to `WorkspaceClient.jobs.submit`.\n\n        Returns:\n            PipesClientCompletedInvocation: Wrapper containing results reported by the external\n                process.\n        """\n        with open_pipes_session(\n            context=context,\n            extras=extras,\n            context_injector=self.context_injector,\n            message_reader=self.message_reader,\n        ) as pipes_session:\n            submit_task_dict = task.as_dict()\n            submit_task_dict["new_cluster"]["spark_env_vars"] = {\n                **submit_task_dict["new_cluster"].get("spark_env_vars", {}),\n                **(self.env or {}),\n                **pipes_session.get_bootstrap_env_vars(),\n            }\n            cluster_log_root = pipes_session.get_bootstrap_params()[\n                DAGSTER_PIPES_MESSAGES_ENV_VAR\n            ].get("cluster_log_root")\n            if cluster_log_root is not None:\n                submit_task_dict["new_cluster"]["cluster_log_conf"] = {\n                    "dbfs": {"destination": f"dbfs:{cluster_log_root}"}\n                }\n            task = jobs.SubmitTask.from_dict(submit_task_dict)\n            run_id = self.client.jobs.submit(\n                tasks=[task],\n                **(submit_args or {}),\n            ).bind()["run_id"]\n\n            while True:\n                run = self.client.jobs.get_run(run_id)\n                context.log.info(\n                    f"Databricks run {run_id} current state: {run.state.life_cycle_state}"\n                )\n                if run.state.life_cycle_state in (\n                    jobs.RunLifeCycleState.TERMINATED,\n                    jobs.RunLifeCycleState.SKIPPED,\n                ):\n                    if run.state.result_state == jobs.RunResultState.SUCCESS:\n                        break\n                    else:\n                        raise DagsterPipesExecutionError(\n                            f"Error running Databricks job: {run.state.state_message}"\n                        )\n                elif run.state.life_cycle_state == jobs.RunLifeCycleState.INTERNAL_ERROR:\n                    raise DagsterPipesExecutionError(\n                        f"Error running Databricks job: {run.state.state_message}"\n                    )\n                time.sleep(_RUN_POLL_INTERVAL)\n        return PipesClientCompletedInvocation(tuple(pipes_session.get_results()))\n\n\nPipesDatabricksClient = ResourceParam[_PipesDatabricksClient]\n\n_CONTEXT_FILENAME = "context.json"\n\n\n@contextmanager\ndef dbfs_tempdir(dbfs_client: files.DbfsAPI) -> Iterator[str]:\n    dirname = "".join(random.choices(string.ascii_letters, k=30))\n    tempdir = f"/tmp/{dirname}"\n    dbfs_client.mkdirs(tempdir)\n    try:\n        yield tempdir\n    finally:\n        dbfs_client.delete(tempdir, recursive=True)\n\n\n
[docs]@experimental\nclass PipesDbfsContextInjector(PipesContextInjector):\n """A context injector that injects context into a Databricks job by writing a JSON file to DBFS.\n\n Args:\n client (WorkspaceClient): A databricks `WorkspaceClient` object.\n """\n\n def __init__(self, *, client: WorkspaceClient):\n super().__init__()\n self.dbfs_client = files.DbfsAPI(client.api_client)\n\n @contextmanager\n def inject_context(self, context: "PipesContextData") -> Iterator[PipesParams]:\n """Inject context to external environment by writing it to an automatically-generated\n DBFS temporary file as JSON and exposing the path to the file.\n\n Args:\n context_data (PipesContextData): The context data to inject.\n\n Yields:\n PipesParams: A dict of parameters that can be used by the external process to locate and\n load the injected context data.\n """\n with dbfs_tempdir(self.dbfs_client) as tempdir:\n path = os.path.join(tempdir, _CONTEXT_FILENAME)\n contents = base64.b64encode(json.dumps(context).encode("utf-8")).decode("utf-8")\n self.dbfs_client.put(path, contents=contents, overwrite=True)\n yield {"path": path}\n\n def no_messages_debug_text(self) -> str:\n return (\n "Attempted to inject context via a temporary file in dbfs. Expected"\n " PipesDbfsContextLoader to be explicitly passed to open_dagster_pipes in the external"\n " process."\n )
\n\n\n
[docs]@experimental\nclass PipesDbfsMessageReader(PipesBlobStoreMessageReader):\n """Message reader that reads messages by periodically reading message chunks from an\n automatically-generated temporary directory on DBFS.\n\n If `stdout_reader` or `stderr_reader` are passed, this reader will also start them when\n `read_messages` is called. If they are not passed, then the reader performs no stdout/stderr\n forwarding.\n\n Args:\n interval (float): interval in seconds between attempts to download a chunk\n client (WorkspaceClient): A databricks `WorkspaceClient` object.\n cluster_log_root (Optional[str]): The root path on DBFS where the cluster logs are written.\n If set, this will be used to read stderr/stdout logs.\n stdout_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stdout logs.\n stderr_reader (Optional[PipesBlobStoreStdioReader]): A reader for reading stderr logs.\n """\n\n def __init__(\n self,\n *,\n interval: float = 10,\n client: WorkspaceClient,\n stdout_reader: Optional[PipesBlobStoreStdioReader] = None,\n stderr_reader: Optional[PipesBlobStoreStdioReader] = None,\n ):\n super().__init__(\n interval=interval, stdout_reader=stdout_reader, stderr_reader=stderr_reader\n )\n self.dbfs_client = files.DbfsAPI(client.api_client)\n\n @contextmanager\n def get_params(self) -> Iterator[PipesParams]:\n with ExitStack() as stack:\n params: PipesParams = {}\n params["path"] = stack.enter_context(dbfs_tempdir(self.dbfs_client))\n if self.stdout_reader or self.stderr_reader:\n params["cluster_log_root"] = stack.enter_context(dbfs_tempdir(self.dbfs_client))\n yield params\n\n def download_messages_chunk(self, index: int, params: PipesParams) -> Optional[str]:\n message_path = os.path.join(params["path"], f"{index}.json")\n try:\n raw_message = self.dbfs_client.read(message_path)\n # Files written to dbfs using the Python IO interface used in PipesDbfsMessageWriter are\n # base64-encoded.\n return base64.b64decode(raw_message.data).decode("utf-8")\n # An error here is an expected result, since an IOError will be thrown if the next message\n # chunk doesn't yet exist. Swallowing the error here is equivalent to doing a no-op on a\n # status check showing a non-existent file.\n except IOError:\n return None\n\n def no_messages_debug_text(self) -> str:\n return (\n "Attempted to read messages from a temporary file in dbfs. Expected"\n " PipesDbfsMessageWriter to be explicitly passed to open_dagster_pipes in the external"\n " process."\n )
\n\n\n@experimental\nclass PipesDbfsStdioReader(PipesChunkedStdioReader):\n """Reader that reads stdout/stderr logs from DBFS.\n\n Args:\n interval (float): interval in seconds between attempts to download a log chunk\n remote_log_name (Literal["stdout", "stderr"]): The name of the log file to read.\n target_stream (TextIO): The stream to which to forward log chunk that have been read.\n client (WorkspaceClient): A databricks `WorkspaceClient` object.\n """\n\n def __init__(\n self,\n *,\n interval: float = 10,\n remote_log_name: Literal["stdout", "stderr"],\n target_stream: TextIO,\n client: WorkspaceClient,\n ):\n super().__init__(interval=interval, target_stream=target_stream)\n self.dbfs_client = files.DbfsAPI(client.api_client)\n self.remote_log_name = remote_log_name\n self.log_position = 0\n self.log_path = None\n\n def download_log_chunk(self, params: PipesParams) -> Optional[str]:\n log_path = self._get_log_path(params)\n if log_path is None:\n return None\n else:\n try:\n read_response = self.dbfs_client.read(log_path)\n assert read_response.data\n content = base64.b64decode(read_response.data).decode("utf-8")\n chunk = content[self.log_position :]\n self.log_position = len(content)\n return chunk\n except IOError:\n return None\n\n def is_ready(self, params: PipesParams) -> bool:\n return self._get_log_path(params) is not None\n\n # The directory containing logs will not exist until either 5 minutes have elapsed or the\n # job has finished.\n def _get_log_path(self, params: PipesParams) -> Optional[str]:\n if self.log_path is None:\n log_root_path = os.path.join(params["cluster_log_root"])\n child_dirs = list(self.dbfs_client.list(log_root_path))\n if len(child_dirs) > 0:\n self.log_path = f"dbfs:{child_dirs[0].path}/driver/{self.remote_log_name}"\n return self.log_path\n
", "current_page_name": "_modules/dagster_databricks/pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.pipes"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_databricks.resources

\nfrom typing import Any, Optional\n\nfrom dagster import (\n    Config,\n    ConfigurableResource,\n    IAttachDifferentObjectToOpContext,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field, root_validator\n\nfrom .databricks import DatabricksClient\n\n\nclass OauthCredentials(Config):\n    """OAuth credentials for Databricks.\n\n    See https://docs.databricks.com/dev-tools/api/latest/authentication.html#oauth-2-0.\n    """\n\n    client_id: str = Field(description="OAuth client ID")\n    client_secret: str = Field(description="OAuth client secret")\n\n\n
[docs]class DatabricksClientResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource which provides a Python client for interacting with Databricks within an\n op or asset.\n """\n\n host: str = Field(description="Databricks host, e.g. https://uksouth.azuredatabricks.com")\n token: Optional[str] = Field(default=None, description="Databricks access token")\n oauth_credentials: Optional[OauthCredentials] = Field(\n default=None,\n description=(\n "Databricks OAuth credentials for using a service principal. See"\n " https://docs.databricks.com/en/dev-tools/auth.html#oauth-2-0"\n ),\n )\n workspace_id: Optional[str] = Field(\n default=None,\n description=(\n "DEPRECATED: The Databricks workspace ID, as described in"\n " https://docs.databricks.com/workspace/workspace-details.html#workspace-instance-names-urls-and-ids."\n " This is no longer used and will be removed in a 0.21."\n ),\n )\n\n @root_validator()\n def has_token_or_oauth_credentials(cls, values):\n token = values.get("token")\n oauth_credentials = values.get("oauth_credentials")\n if not token and not oauth_credentials:\n raise ValueError("Must provide either token or oauth_credentials")\n if token and oauth_credentials:\n raise ValueError("Must provide either token or oauth_credentials, not both")\n return values\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> DatabricksClient:\n if self.oauth_credentials:\n client_id = self.oauth_credentials.client_id\n client_secret = self.oauth_credentials.client_secret\n else:\n client_id = None\n client_secret = None\n\n return DatabricksClient(\n host=self.host,\n token=self.token,\n oauth_client_id=client_id,\n oauth_client_secret=client_secret,\n workspace_id=self.workspace_id,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=DatabricksClientResource.to_config_schema())\ndef databricks_client(init_context) -> DatabricksClient:\n return DatabricksClientResource.from_resource_context(init_context).get_client()
\n
", "current_page_name": "_modules/dagster_databricks/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_databricks.resources"}}, "dagster_datadog": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_datadog.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom datadog import DogStatsd, initialize, statsd\nfrom pydantic import Field\n\n\nclass DatadogClient:\n    # Mirroring levels from the dogstatsd library\n    OK, WARNING, CRITICAL, UNKNOWN = (\n        DogStatsd.OK,\n        DogStatsd.WARNING,\n        DogStatsd.CRITICAL,\n        DogStatsd.UNKNOWN,\n    )\n\n    def __init__(self, api_key: str, app_key: str):\n        self.api_key = api_key\n        self.app_key = app_key\n        initialize(api_key=api_key, app_key=app_key)\n\n        # Pull in methods from the dogstatsd library\n        for method in [\n            "event",\n            "gauge",\n            "increment",\n            "decrement",\n            "histogram",\n            "distribution",\n            "set",\n            "service_check",\n            "timed",\n            "timing",\n        ]:\n            setattr(self, method, getattr(statsd, method))\n\n\n
[docs]class DatadogResource(ConfigurableResource):\n """This resource is a thin wrapper over the\n `dogstatsd library <https://datadogpy.readthedocs.io/en/latest/>`_.\n\n As such, we directly mirror the public API methods of DogStatsd here; you can refer to the\n `DataDog documentation <https://docs.datadoghq.com/developers/dogstatsd/>`_ for how to use this\n resource.\n\n Examples:\n .. code-block:: python\n\n @op\n def datadog_op(datadog_client: ResourceParam[DatadogClient]):\n datadog_client.event('Man down!', 'This server needs assistance.')\n datadog_client.gauge('users.online', 1001, tags=["protocol:http"])\n datadog_client.increment('page.views')\n datadog_client.decrement('page.views')\n datadog_client.histogram('album.photo.count', 26, tags=["gender:female"])\n datadog_client.distribution('album.photo.count', 26, tags=["color:blue"])\n datadog_client.set('visitors.uniques', 999, tags=["browser:ie"])\n datadog_client.service_check('svc.check_name', datadog_client.WARNING)\n datadog_client.timing("query.response.time", 1234)\n\n # Use timed decorator\n @datadog_client.timed('run_fn')\n def run_fn():\n pass\n\n run_fn()\n\n @job\n def job_for_datadog_op() -> None:\n datadog_op()\n\n job_for_datadog_op.execute_in_process(\n resources={"datadog_client": DatadogResource(api_key="FOO", app_key="BAR")}\n )\n\n """\n\n api_key: str = Field(\n description=(\n "Datadog API key. See https://docs.datadoghq.com/account_management/api-app-keys/"\n )\n )\n app_key: str = Field(\n description=(\n "Datadog application key. See"\n " https://docs.datadoghq.com/account_management/api-app-keys/."\n )\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> DatadogClient:\n return DatadogClient(self.api_key, self.app_key)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=DatadogResource.to_config_schema(),\n description="This resource is for publishing to DataDog",\n)\ndef datadog_resource(context) -> DatadogClient:\n """This legacy resource is a thin wrapper over the\n `dogstatsd library <https://datadogpy.readthedocs.io/en/latest/>`_.\n\n Prefer using :py:class:`DatadogResource`.\n\n As such, we directly mirror the public API methods of DogStatsd here; you can refer to the\n `DataDog documentation <https://docs.datadoghq.com/developers/dogstatsd/>`_ for how to use this\n resource.\n\n Examples:\n .. code-block:: python\n\n @op(required_resource_keys={'datadog'})\n def datadog_op(context):\n dd = context.resources.datadog\n\n dd.event('Man down!', 'This server needs assistance.')\n dd.gauge('users.online', 1001, tags=["protocol:http"])\n dd.increment('page.views')\n dd.decrement('page.views')\n dd.histogram('album.photo.count', 26, tags=["gender:female"])\n dd.distribution('album.photo.count', 26, tags=["color:blue"])\n dd.set('visitors.uniques', 999, tags=["browser:ie"])\n dd.service_check('svc.check_name', dd.WARNING)\n dd.timing("query.response.time", 1234)\n\n # Use timed decorator\n @dd.timed('run_fn')\n def run_fn():\n pass\n\n run_fn()\n\n @job(resource_defs={'datadog': datadog_resource})\n def dd_job():\n datadog_op()\n\n result = dd_job.execute_in_process(\n run_config={'resources': {'datadog': {'config': {'api_key': 'YOUR_KEY', 'app_key': 'YOUR_KEY'}}}}\n )\n\n """\n return DatadogResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_datadog/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_datadog.resources"}}, "dagster_datahub": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_datahub.resources

\nfrom typing import Any, Dict, List, Optional\n\nfrom dagster import InitResourceContext, resource\nfrom dagster._config.pythonic_config import Config, ConfigurableResource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom datahub.emitter.kafka_emitter import (\n    DEFAULT_MCE_KAFKA_TOPIC,\n    DEFAULT_MCP_KAFKA_TOPIC,\n    MCE_KEY,\n    MCP_KEY,\n    DatahubKafkaEmitter,\n    KafkaEmitterConfig,\n)\nfrom datahub.emitter.rest_emitter import DatahubRestEmitter\nfrom pydantic import Field\n\n\n
[docs]class DatahubRESTEmitterResource(ConfigurableResource):\n connection: str = Field(description="Datahub GMS Server")\n token: Optional[str] = Field(default=None, description="Personal Access Token")\n connect_timeout_sec: Optional[float] = None\n read_timeout_sec: Optional[float] = None\n retry_status_codes: Optional[List[int]] = None\n retry_methods: Optional[List[str]] = None\n retry_max_times: Optional[int] = None\n extra_headers: Optional[Dict[str, str]] = None\n ca_certificate_path: Optional[str] = None\n server_telemetry_id: Optional[str] = None # No-op - no longer accepted in DatahubRestEmitter\n disable_ssl_verification: bool = False\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_emitter(self) -> DatahubRestEmitter:\n return DatahubRestEmitter(\n gms_server=self.connection,\n token=self.token,\n connect_timeout_sec=self.connect_timeout_sec,\n read_timeout_sec=self.read_timeout_sec,\n retry_status_codes=self.retry_status_codes,\n retry_methods=self.retry_methods,\n retry_max_times=self.retry_max_times,\n extra_headers=self.extra_headers,\n ca_certificate_path=self.ca_certificate_path,\n disable_ssl_verification=self.disable_ssl_verification,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=DatahubRESTEmitterResource.to_config_schema())\ndef datahub_rest_emitter(init_context: InitResourceContext) -> DatahubRestEmitter:\n emitter = DatahubRestEmitter(\n gms_server=init_context.resource_config.get("connection"),\n token=init_context.resource_config.get("token"),\n connect_timeout_sec=init_context.resource_config.get("connect_timeout_sec"),\n read_timeout_sec=init_context.resource_config.get("read_timeout_sec"),\n retry_status_codes=init_context.resource_config.get("retry_status_codes"),\n retry_methods=init_context.resource_config.get("retry_methods"),\n retry_max_times=init_context.resource_config.get("retry_max_times"),\n extra_headers=init_context.resource_config.get("extra_headers"),\n ca_certificate_path=init_context.resource_config.get("ca_certificate_path"),\n disable_ssl_verification=init_context.resource_config.get("disable_ssl_verification"),\n )\n # Attempt to hit the server to ensure the resource is properly configured\n emitter.test_connection()\n return emitter
\n\n\nclass DatahubConnection(Config):\n bootstrap: str = Field(description="Kafka Boostrap Servers. Comma delimited")\n schema_registry_url: str = Field(description="Schema Registry Location.")\n schema_registry_config: Dict[str, Any] = Field(\n default={}, description="Extra Schema Registry Config."\n )\n\n\n
[docs]class DatahubKafkaEmitterResource(ConfigurableResource):\n connection: DatahubConnection\n topic: Optional[str] = None\n topic_routes: Dict[str, str] = Field(\n default={\n MCE_KEY: DEFAULT_MCE_KAFKA_TOPIC,\n MCP_KEY: DEFAULT_MCP_KAFKA_TOPIC,\n }\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_emitter(self) -> DatahubKafkaEmitter:\n return DatahubKafkaEmitter(\n KafkaEmitterConfig.parse_obj(\n {k: v for k, v in self._convert_to_config_dictionary().items() if v is not None}\n )\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=DatahubKafkaEmitterResource.to_config_schema())\ndef datahub_kafka_emitter(init_context: InitResourceContext) -> DatahubKafkaEmitter:\n return DatahubKafkaEmitter(KafkaEmitterConfig.parse_obj(init_context.resource_config))
\n
", "current_page_name": "_modules/dagster_datahub/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_datahub.resources"}}, "dagster_dbt": {"asset_decorator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.asset_decorator

\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    FrozenSet,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n)\n\nimport dagster._check as check\nfrom dagster import (\n    AssetCheckSpec,\n    AssetKey,\n    AssetOut,\n    AssetsDefinition,\n    BackfillPolicy,\n    DagsterInvalidDefinitionError,\n    Nothing,\n    PartitionsDefinition,\n    multi_asset,\n)\n\nfrom .asset_utils import (\n    DAGSTER_DBT_TRANSLATOR_METADATA_KEY,\n    MANIFEST_METADATA_KEY,\n    default_asset_check_fn,\n    default_code_version_fn,\n    get_deps,\n)\nfrom .dagster_dbt_translator import DagsterDbtTranslator, DbtManifestWrapper\nfrom .dbt_manifest import DbtManifestParam, validate_manifest\nfrom .utils import (\n    ASSET_RESOURCE_TYPES,\n    get_dbt_resource_props_by_dbt_unique_id_from_manifest,\n    output_name_fn,\n    select_unique_ids_from_manifest,\n)\n\n\n
[docs]def dbt_assets(\n *,\n manifest: DbtManifestParam,\n select: str = "fqn:*",\n exclude: Optional[str] = None,\n io_manager_key: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n dagster_dbt_translator: DagsterDbtTranslator = DagsterDbtTranslator(),\n backfill_policy: Optional[BackfillPolicy] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n) -> Callable[..., AssetsDefinition]:\n """Create a definition for how to compute a set of dbt resources, described by a manifest.json.\n When invoking dbt commands using :py:class:`~dagster_dbt.DbtCliResource`'s\n :py:meth:`~dagster_dbt.DbtCliResource.cli` method, Dagster events are emitted by calling\n ``yield from`` on the event stream returned by :py:meth:`~dagster_dbt.DbtCliInvocation.stream`.\n\n Args:\n manifest (Union[Mapping[str, Any], str, Path]): The contents of a manifest.json file\n or the path to a manifest.json file. A manifest.json contains a representation of a\n dbt project (models, tests, macros, etc). We use this representation to create\n corresponding Dagster assets.\n select (str): A dbt selection string for the models in a project that you want\n to include. Defaults to ``fqn:*``.\n exclude (Optional[str]): A dbt selection string for the models in a project that you want\n to exclude. Defaults to "".\n io_manager_key (Optional[str]): The IO manager key that will be set on each of the returned\n assets. When other ops are downstream of the loaded assets, the IOManager specified\n here determines how the inputs to those ops are loaded. Defaults to "io_manager".\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the dbt assets.\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): Allows customizing how to map\n dbt models, seeds, etc. to asset keys and asset metadata.\n backfill_policy (Optional[BackfillPolicy]): If a partitions_def is defined, this determines\n how to execute backfills that target multiple partitions.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the assets.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n\n Examples:\n Running ``dbt build`` for a dbt project:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["build"], context=context).stream()\n\n Running dbt commands with flags:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["build", "--full-refresh"], context=context).stream()\n\n Running dbt commands with ``--vars``:\n\n .. code-block:: python\n\n import json\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_vars = {"key": "value"}\n\n yield from dbt.cli(["build", "--vars", json.dumps(dbt_vars)], context=context).stream()\n\n Retrieving dbt artifacts after running a dbt command:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_build_invocation = dbt.cli(["build"], context=context)\n\n yield from dbt_build_invocation.stream()\n\n run_results_json = dbt_build_invocation.get_artifact("run_results.json")\n\n Running multiple dbt commands for a dbt project:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["run"], context=context).stream()\n yield from dbt.cli(["test"], context=context).stream()\n\n Customizing the Dagster asset metadata inferred from a dbt project using :py:class:`~dagster_dbt.DagsterDbtTranslator`:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DagsterDbtTranslator, DbtCliResource, dbt_assets\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n ...\n\n\n @dbt_assets(\n manifest=Path("target", "manifest.json"),\n dagster_dbt_translator=CustomDagsterDbtTranslator(),\n )\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["build"], context=context).stream()\n\n Invoking another Dagster :py:class:`~dagster.ResourceDefinition` alongside dbt:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DagsterDbtTranslator, DbtCliResource, dbt_assets\n from dagster_slack import SlackResource\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource, slack: SlackResource):\n yield from dbt.cli(["build"], context=context).stream()\n\n slack_client = slack.get_client()\n slack_client.chat_postMessage(channel="#my-channel", text="dbt build succeeded!")\n\n Defining and accessing Dagster :py:class:`~dagster.Config` alongside dbt:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext, Config\n from dagster_dbt import DagsterDbtTranslator, DbtCliResource, dbt_assets\n\n\n class MyDbtConfig(Config):\n full_refresh: bool\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource, config: MyDbtConfig):\n dbt_build_args = ["build"]\n if config.full_refresh:\n dbt_build_args += ["--full-refresh"]\n\n yield from dbt.cli(dbt_build_args, context=context).stream()\n\n Defining Dagster :py:class:`~dagster.PartitionDefinition` alongside dbt:\n\n\n .. code-block:: python\n\n import json\n from pathlib import Path\n\n from dagster import AssetExecutionContext, DailyPartitionDefinition\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(\n manifest=Path("target", "manifest.json"),\n partitions_def=DailyPartitionsDefinition(start_date="2023-01-01")\n )\n def partitionshop_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n time_window = context.asset_partitions_time_window_for_output(\n list(context.selected_output_names)[0]\n )\n\n dbt_vars = {\n "min_date": time_window.start.isoformat(),\n "max_date": time_window.end.isoformat()\n }\n dbt_build_args = ["build", "--vars", json.dumps(dbt_vars)]\n\n yield from dbt.cli(dbt_build_args, context=context).stream()\n\n """\n check.inst_param(\n dagster_dbt_translator,\n "dagster_dbt_translator",\n DagsterDbtTranslator,\n additional_message=(\n "Ensure that the argument is an instantiated class that subclasses"\n " DagsterDbtTranslator."\n ),\n )\n manifest = validate_manifest(manifest)\n\n unique_ids = select_unique_ids_from_manifest(\n select=select, exclude=exclude or "", manifest_json=manifest\n )\n node_info_by_dbt_unique_id = get_dbt_resource_props_by_dbt_unique_id_from_manifest(manifest)\n deps = get_deps(\n dbt_nodes=node_info_by_dbt_unique_id,\n selected_unique_ids=unique_ids,\n asset_resource_types=ASSET_RESOURCE_TYPES,\n )\n (\n non_argument_deps,\n outs,\n internal_asset_deps,\n check_specs,\n ) = get_dbt_multi_asset_args(\n dbt_nodes=node_info_by_dbt_unique_id,\n deps=deps,\n io_manager_key=io_manager_key,\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n )\n\n if op_tags and "dagster-dbt/select" in op_tags:\n raise DagsterInvalidDefinitionError(\n "To specify a dbt selection, use the 'select' argument, not 'dagster-dbt/select'"\n " with op_tags"\n )\n\n if op_tags and "dagster-dbt/exclude" in op_tags:\n raise DagsterInvalidDefinitionError(\n "To specify a dbt exclusion, use the 'exclude' argument, not 'dagster-dbt/exclude'"\n " with op_tags"\n )\n\n resolved_op_tags = {\n **({"dagster-dbt/select": select} if select else {}),\n **({"dagster-dbt/exclude": exclude} if exclude else {}),\n **(op_tags if op_tags else {}),\n }\n\n def inner(fn) -> AssetsDefinition:\n asset_definition = multi_asset(\n outs=outs,\n internal_asset_deps=internal_asset_deps,\n deps=non_argument_deps,\n compute_kind="dbt",\n partitions_def=partitions_def,\n can_subset=True,\n op_tags=resolved_op_tags,\n check_specs=check_specs,\n backfill_policy=backfill_policy,\n )(fn)\n\n return asset_definition\n\n return inner
\n\n\ndef get_dbt_multi_asset_args(\n dbt_nodes: Mapping[str, Any],\n deps: Mapping[str, FrozenSet[str]],\n io_manager_key: Optional[str],\n manifest: Mapping[str, Any],\n dagster_dbt_translator: DagsterDbtTranslator,\n) -> Tuple[\n Sequence[AssetKey],\n Dict[str, AssetOut],\n Dict[str, Set[AssetKey]],\n Sequence[AssetCheckSpec],\n]:\n non_argument_deps: Set[AssetKey] = set()\n outs: Dict[str, AssetOut] = {}\n internal_asset_deps: Dict[str, Set[AssetKey]] = {}\n check_specs: Sequence[AssetCheckSpec] = []\n\n for unique_id, parent_unique_ids in deps.items():\n dbt_resource_props = dbt_nodes[unique_id]\n\n output_name = output_name_fn(dbt_resource_props)\n asset_key = dagster_dbt_translator.get_asset_key(dbt_resource_props)\n\n outs[output_name] = AssetOut(\n key=asset_key,\n dagster_type=Nothing,\n io_manager_key=io_manager_key,\n description=dagster_dbt_translator.get_description(dbt_resource_props),\n is_required=False,\n metadata={ # type: ignore\n **dagster_dbt_translator.get_metadata(dbt_resource_props),\n MANIFEST_METADATA_KEY: DbtManifestWrapper(manifest=manifest),\n DAGSTER_DBT_TRANSLATOR_METADATA_KEY: dagster_dbt_translator,\n },\n group_name=dagster_dbt_translator.get_group_name(dbt_resource_props),\n code_version=default_code_version_fn(dbt_resource_props),\n freshness_policy=dagster_dbt_translator.get_freshness_policy(dbt_resource_props),\n auto_materialize_policy=dagster_dbt_translator.get_auto_materialize_policy(\n dbt_resource_props\n ),\n )\n\n test_unique_ids = [\n child_unique_id\n for child_unique_id in manifest["child_map"][unique_id]\n if child_unique_id.startswith("test")\n ]\n for test_unique_id in test_unique_ids:\n test_resource_props = manifest["nodes"][test_unique_id]\n check_spec = default_asset_check_fn(\n asset_key, unique_id, dagster_dbt_translator.settings, test_resource_props\n )\n\n if check_spec:\n check_specs.append(check_spec)\n\n # Translate parent unique ids to internal asset deps and non argument dep\n output_internal_deps = internal_asset_deps.setdefault(output_name, set())\n for parent_unique_id in parent_unique_ids:\n parent_resource_props = dbt_nodes[parent_unique_id]\n parent_asset_key = dagster_dbt_translator.get_asset_key(parent_resource_props)\n\n # Add this parent as an internal dependency\n output_internal_deps.add(parent_asset_key)\n\n # Mark this parent as an input if it has no dependencies\n if parent_unique_id not in deps:\n non_argument_deps.add(parent_asset_key)\n\n return list(non_argument_deps), outs, internal_asset_deps, check_specs\n
", "current_page_name": "_modules/dagster_dbt/asset_decorator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.asset_decorator"}, "asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.asset_defs

\nimport hashlib\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dateutil\nfrom dagster import (\n    AssetCheckResult,\n    AssetKey,\n    AssetsDefinition,\n    AutoMaterializePolicy,\n    FreshnessPolicy,\n    In,\n    OpExecutionContext,\n    Out,\n    PartitionsDefinition,\n    PermissiveConfig,\n    _check as check,\n    get_dagster_logger,\n    op,\n)\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions.events import (\n    AssetMaterialization,\n    AssetObservation,\n    CoercibleToAssetKeyPrefix,\n    Output,\n)\nfrom dagster._core.definitions.metadata import MetadataUserInput, RawMetadataValue\nfrom dagster._core.errors import DagsterInvalidSubsetError\nfrom dagster._utils.merger import deep_merge_dicts\nfrom dagster._utils.warnings import (\n    deprecation_warning,\n    normalize_renamed_param,\n)\n\nfrom dagster_dbt.asset_utils import (\n    default_asset_key_fn,\n    default_auto_materialize_policy_fn,\n    default_description_fn,\n    default_freshness_policy_fn,\n    default_group_from_dbt_resource_props,\n    default_metadata_from_dbt_resource_props,\n    get_asset_deps,\n    get_deps,\n)\nfrom dagster_dbt.core.resources import DbtCliClient\nfrom dagster_dbt.core.resources_v2 import DbtCliResource\nfrom dagster_dbt.core.types import DbtCliOutput\nfrom dagster_dbt.core.utils import build_command_args_from_flags, execute_cli\nfrom dagster_dbt.dagster_dbt_translator import DagsterDbtTranslator\nfrom dagster_dbt.errors import DagsterDbtError\nfrom dagster_dbt.types import DbtOutput\nfrom dagster_dbt.utils import (\n    ASSET_RESOURCE_TYPES,\n    output_name_fn,\n    result_to_events,\n    select_unique_ids_from_manifest,\n)\n\n\ndef _load_manifest_for_project(\n    project_dir: str,\n    profiles_dir: str,\n    target_dir: str,\n    select: str,\n    exclude: str,\n) -> Tuple[Mapping[str, Any], DbtCliOutput]:\n    # running "dbt ls" regenerates the manifest.json, which includes a superset of the actual\n    # "dbt ls" output\n    cli_output = execute_cli(\n        executable="dbt",\n        command="ls",\n        log=get_dagster_logger(),\n        flags_dict={\n            "project-dir": project_dir,\n            "profiles-dir": profiles_dir,\n            "select": select,\n            "exclude": exclude,\n            "output": "json",\n        },\n        warn_error=False,\n        ignore_handled_error=False,\n        target_path=target_dir,\n        json_log_format=True,\n        capture_logs=True,\n    )\n    manifest_path = os.path.join(target_dir, "manifest.json")\n    with open(manifest_path, "r", encoding="utf8") as f:\n        return json.load(f), cli_output\n\n\ndef _can_stream_events(dbt_resource: Union[DbtCliClient, DbtCliResource]) -> bool:\n    """Check if the installed dbt version supports streaming events."""\n    import dbt.version\n    from packaging import version\n\n    if version.parse(dbt.version.__version__) >= version.parse("1.4.0"):\n        # The json log format is required for streaming events. DbtCliResource always uses this format, but\n        # DbtCliClient has an option to disable it.\n        if isinstance(dbt_resource, DbtCliResource):\n            return True\n        else:\n            return dbt_resource._json_log_format  # noqa: SLF001\n    else:\n        return False\n\n\ndef _batch_event_iterator(\n    context: OpExecutionContext,\n    dbt_resource: DbtCliClient,\n    use_build_command: bool,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    kwargs: Dict[str, Any],\n) -> Iterator[Union[AssetObservation, AssetMaterialization, Output]]:\n    """Yields events for a dbt cli invocation. Waits until the entire command has completed before\n    emitting outputs.\n    """\n    # clean up any run results from the last run\n    dbt_resource.remove_run_results_json()\n\n    dbt_output: Optional[DbtOutput] = None\n    try:\n        if use_build_command:\n            dbt_output = dbt_resource.build(**kwargs)\n        else:\n            dbt_output = dbt_resource.run(**kwargs)\n    finally:\n        # in the case that the project only partially runs successfully, still attempt to generate\n        # events for the parts that were successful\n        if dbt_output is None:\n            dbt_output = DbtOutput(result=check.not_none(dbt_resource.get_run_results_json()))\n\n        manifest_json = check.not_none(dbt_resource.get_manifest_json())\n\n        dbt_output = check.not_none(dbt_output)\n        for result in dbt_output.result["results"]:\n            extra_metadata: Optional[Mapping[str, RawMetadataValue]] = None\n            if runtime_metadata_fn:\n                node_info = manifest_json["nodes"][result["unique_id"]]\n                extra_metadata = runtime_metadata_fn(context, node_info)\n            yield from result_to_events(\n                result=result,\n                docs_url=dbt_output.docs_url,\n                node_info_to_asset_key=node_info_to_asset_key,\n                manifest_json=manifest_json,\n                extra_metadata=extra_metadata,\n                generate_asset_outputs=True,\n            )\n\n\ndef _events_for_structured_json_line(\n    json_line: Mapping[str, Any],\n    context: OpExecutionContext,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    manifest_json: Mapping[str, Any],\n) -> Iterator[Union[AssetObservation, Output]]:\n    """Parses a json line into a Dagster event. Attempts to replicate the behavior of result_to_events\n    as closely as possible.\n    """\n    runtime_node_info = json_line.get("data", {}).get("node_info", {})\n    if not runtime_node_info:\n        return\n\n    node_resource_type = runtime_node_info.get("resource_type")\n    node_status = runtime_node_info.get("node_status")\n    unique_id = runtime_node_info.get("unique_id")\n\n    if not node_resource_type or not unique_id:\n        return\n\n    compiled_node_info = manifest_json["nodes"][unique_id]\n\n    if node_resource_type in ASSET_RESOURCE_TYPES and node_status == "success":\n        metadata = dict(\n            runtime_metadata_fn(context, compiled_node_info) if runtime_metadata_fn else {}\n        )\n        started_at_str = runtime_node_info.get("node_started_at")\n        finished_at_str = runtime_node_info.get("node_finished_at")\n        if started_at_str is None or finished_at_str is None:\n            return\n\n        started_at = dateutil.parser.isoparse(started_at_str)  # type: ignore\n        completed_at = dateutil.parser.isoparse(finished_at_str)  # type: ignore\n        duration = completed_at - started_at\n        metadata.update(\n            {\n                "Execution Started At": started_at.isoformat(timespec="seconds"),\n                "Execution Completed At": completed_at.isoformat(timespec="seconds"),\n                "Execution Duration": duration.total_seconds(),\n            }\n        )\n        yield Output(\n            value=None,\n            output_name=output_name_fn(compiled_node_info),\n            metadata=metadata,\n        )\n    elif node_resource_type == "test" and runtime_node_info.get("node_finished_at"):\n        upstream_unique_ids = (\n            manifest_json["nodes"][unique_id].get("depends_on", {}).get("nodes", [])\n        )\n        # tests can apply to multiple asset keys\n        for upstream_id in upstream_unique_ids:\n            # the upstream id can reference a node or a source\n            upstream_node_info = manifest_json["nodes"].get(upstream_id) or manifest_json[\n                "sources"\n            ].get(upstream_id)\n            if upstream_node_info is None:\n                continue\n            upstream_asset_key = node_info_to_asset_key(upstream_node_info)\n            yield AssetObservation(\n                asset_key=upstream_asset_key,\n                metadata={\n                    "Test ID": unique_id,\n                    "Test Status": node_status,\n                },\n            )\n\n\ndef _stream_event_iterator(\n    context: OpExecutionContext,\n    dbt_resource: Union[DbtCliResource, DbtCliClient],\n    use_build_command: bool,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    kwargs: Dict[str, Any],\n    manifest_json: Mapping[str, Any],\n) -> Iterator[Union[AssetObservation, Output, AssetCheckResult]]:\n    """Yields events for a dbt cli invocation. Emits outputs as soon as the relevant dbt logs are\n    emitted.\n    """\n    if isinstance(dbt_resource, DbtCliClient):\n        for parsed_json_line in dbt_resource.cli_stream_json(\n            command="build" if use_build_command else "run",\n            **kwargs,\n        ):\n            yield from _events_for_structured_json_line(\n                parsed_json_line,\n                context,\n                node_info_to_asset_key,\n                runtime_metadata_fn,\n                manifest_json,\n            )\n    else:\n        if runtime_metadata_fn is not None:\n            raise DagsterDbtError(\n                "The runtime_metadata_fn argument on the load_assets_from_dbt_manifest and"\n                " load_assets_from_dbt_project functions is not supported when using the"\n                " DbtCliResource resource. Use the @dbt_assets decorator instead if you want"\n                " control over what metadata is yielded at runtime."\n            )\n\n        class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n            @classmethod\n            def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n                return node_info_to_asset_key(dbt_resource_props)\n\n        cli_output = dbt_resource.cli(\n            args=["build" if use_build_command else "run", *build_command_args_from_flags(kwargs)],\n            manifest=manifest_json,\n            dagster_dbt_translator=CustomDagsterDbtTranslator(),\n        )\n        yield from cli_output.stream()\n\n\nclass DbtOpConfig(PermissiveConfig):\n    """Keyword arguments to pass to the underlying dbt command. Additional arguments not listed in the schema will\n    be passed through as well, e.g. {'bool_flag': True, 'string_flag': 'hi'} will result in the flags\n    '--bool_flag --string_flag hi' being passed to the dbt command.\n    """\n\n    select: Optional[str] = None\n    exclude: Optional[str] = None\n    vars: Optional[Dict[str, Any]] = None\n    full_refresh: Optional[bool] = None\n\n\ndef _get_dbt_op(\n    op_name: str,\n    ins: Mapping[str, In],\n    outs: Mapping[str, Out],\n    select: str,\n    exclude: str,\n    use_build_command: bool,\n    fqns_by_output_name: Mapping[str, List[str]],\n    dbt_resource_key: str,\n    node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n    partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    manifest_json: Mapping[str, Any],\n):\n    @op(\n        name=op_name,\n        tags={"kind": "dbt"},\n        ins=ins,\n        out=outs,\n        required_resource_keys={dbt_resource_key},\n    )\n    def _dbt_op(context, config: DbtOpConfig):\n        dbt_resource: Union[DbtCliResource, DbtCliClient] = getattr(\n            context.resources, dbt_resource_key\n        )\n        check.inst(\n            dbt_resource,\n            (DbtCliResource, DbtCliClient),\n            "Resource with key 'dbt_resource_key' must be a DbtCliResource or DbtCliClient"\n            f" object, but is a {type(dbt_resource)}",\n        )\n\n        kwargs: Dict[str, Any] = {}\n        # in the case that we're running everything, opt for the cleaner selection string\n        if len(context.selected_output_names) == len(outs):\n            kwargs["select"] = select\n            kwargs["exclude"] = exclude\n        else:\n            # for each output that we want to emit, translate to a dbt select string by converting\n            # the out to its corresponding fqn\n            kwargs["select"] = [\n                ".".join(fqns_by_output_name[output_name])\n                for output_name in context.selected_output_names\n            ]\n        # variables to pass into the command\n        if partition_key_to_vars_fn:\n            kwargs["vars"] = partition_key_to_vars_fn(context.partition_key)\n        # merge in any additional kwargs from the config\n        kwargs = deep_merge_dicts(kwargs, context.op_config)\n\n        if _can_stream_events(dbt_resource):\n            yield from _stream_event_iterator(\n                context,\n                dbt_resource,\n                use_build_command,\n                node_info_to_asset_key,\n                runtime_metadata_fn,\n                kwargs,\n                manifest_json=manifest_json,\n            )\n        else:\n            if not isinstance(dbt_resource, DbtCliClient):\n                check.failed(\n                    "Chose batch event iterator, but it only works with DbtCliClient, and"\n                    f" resource has type {type(dbt_resource)}"\n                )\n            yield from _batch_event_iterator(\n                context,\n                dbt_resource,\n                use_build_command,\n                node_info_to_asset_key,\n                runtime_metadata_fn,\n                kwargs,\n            )\n\n    return _dbt_op\n\n\ndef _dbt_nodes_to_assets(\n    dbt_nodes: Mapping[str, Any],\n    select: str,\n    exclude: str,\n    selected_unique_ids: AbstractSet[str],\n    project_id: str,\n    dbt_resource_key: str,\n    manifest_json: Mapping[str, Any],\n    op_name: Optional[str],\n    runtime_metadata_fn: Optional[\n        Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, RawMetadataValue]]\n    ],\n    io_manager_key: Optional[str],\n    use_build_command: bool,\n    partitions_def: Optional[PartitionsDefinition],\n    partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n    dagster_dbt_translator: DagsterDbtTranslator,\n) -> AssetsDefinition:\n    if use_build_command:\n        deps = get_deps(\n            dbt_nodes,\n            selected_unique_ids,\n            asset_resource_types=["model", "seed", "snapshot"],\n        )\n    else:\n        deps = get_deps(dbt_nodes, selected_unique_ids, asset_resource_types=["model"])\n\n    (\n        asset_deps,\n        asset_ins,\n        asset_outs,\n        group_names_by_key,\n        freshness_policies_by_key,\n        auto_materialize_policies_by_key,\n        check_specs_by_output_name,\n        fqns_by_output_name,\n        _,\n    ) = get_asset_deps(\n        dbt_nodes=dbt_nodes,\n        deps=deps,\n        io_manager_key=io_manager_key,\n        manifest=manifest_json,\n        dagster_dbt_translator=dagster_dbt_translator,\n    )\n\n    # prevent op name collisions between multiple dbt multi-assets\n    if not op_name:\n        op_name = f"run_dbt_{project_id}"\n        if select != "fqn:*" or exclude:\n            op_name += "_" + hashlib.md5(select.encode() + exclude.encode()).hexdigest()[-5:]\n\n    check_outs_by_output_name: Mapping[str, Out] = {}\n    if check_specs_by_output_name:\n        check_outs_by_output_name = {\n            output_name: Out(dagster_type=None, is_required=False)\n            for output_name in check_specs_by_output_name.keys()\n        }\n\n    dbt_op = _get_dbt_op(\n        op_name=op_name,\n        ins=dict(asset_ins.values()),\n        outs={\n            **dict(asset_outs.values()),\n            **check_outs_by_output_name,\n        },\n        select=select,\n        exclude=exclude,\n        use_build_command=use_build_command,\n        fqns_by_output_name=fqns_by_output_name,\n        dbt_resource_key=dbt_resource_key,\n        node_info_to_asset_key=dagster_dbt_translator.get_asset_key,\n        partition_key_to_vars_fn=partition_key_to_vars_fn,\n        runtime_metadata_fn=runtime_metadata_fn,\n        manifest_json=manifest_json,\n    )\n\n    return AssetsDefinition(\n        keys_by_input_name={\n            input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n        },\n        keys_by_output_name={\n            output_name: asset_key for asset_key, (output_name, _) in asset_outs.items()\n        },\n        node_def=dbt_op,\n        can_subset=True,\n        asset_deps=asset_deps,\n        group_names_by_key=group_names_by_key,\n        freshness_policies_by_key=freshness_policies_by_key,\n        auto_materialize_policies_by_key=auto_materialize_policies_by_key,\n        check_specs_by_output_name=check_specs_by_output_name,\n        partitions_def=partitions_def,\n    )\n\n\n
[docs]def load_assets_from_dbt_project(\n project_dir: str,\n profiles_dir: Optional[str] = None,\n *,\n select: Optional[str] = None,\n exclude: Optional[str] = None,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n io_manager_key: Optional[str] = None,\n target_dir: Optional[str] = None,\n # All arguments below are deprecated\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n op_name: Optional[str] = None,\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ] = None,\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey] = default_asset_key_fn,\n use_build_command: bool = True,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n node_info_to_group_fn: Callable[\n [Mapping[str, Any]], Optional[str]\n ] = default_group_from_dbt_resource_props,\n node_info_to_freshness_policy_fn: Callable[\n [Mapping[str, Any]], Optional[FreshnessPolicy]\n ] = default_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ] = default_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ] = default_metadata_from_dbt_resource_props,\n display_raw_sql: Optional[bool] = None,\n dbt_resource_key: str = "dbt",\n) -> Sequence[AssetsDefinition]:\n """Loads a set of dbt models from a dbt project into Dagster assets.\n\n Creates one Dagster asset for each dbt model. All assets will be re-materialized using a single\n `dbt run` or `dbt build` command.\n\n When searching for more flexibility in defining the computations that materialize your\n dbt assets, we recommend that you use :py:class:`~dagster_dbt.dbt_assets`.\n\n Args:\n project_dir (Optional[str]): The directory containing the dbt project to load.\n profiles_dir (Optional[str]): The profiles directory to use for loading the DBT project.\n Defaults to a directory called "config" inside the project_dir.\n target_dir (Optional[str]): The target directory where dbt will place compiled artifacts.\n Defaults to "target" underneath the project_dir.\n select (Optional[str]): A dbt selection string for the models in a project that you want\n to include. Defaults to `"fqn:*"`.\n exclude (Optional[str]): A dbt selection string for the models in a project that you want\n to exclude. Defaults to "".\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): Allows customizing how to map\n dbt models, seeds, etc. to asset keys and asset metadata.\n key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all assets loaded\n from the dbt project. Does not apply to input assets. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(key_prefix=...) instead.\n source_key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all input\n assets for the set of assets loaded from the dbt project. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(source_key_prefix=...) instead.\n op_name (Optional[str]): [Deprecated] Sets the name of the underlying Op that will generate the dbt assets.\n Deprecated: use the `@dbt_assets` decorator if you need to customize the op name.\n dbt_resource_key (Optional[str]): [Deprecated] The resource key that the dbt resource will be specified at.\n Defaults to "dbt". Deprecated: use the `@dbt_assets` decorator if you need to customize\n the resource key.\n runtime_metadata_fn (Optional[Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]]): [Deprecated]\n A function that will be run after any of the assets are materialized and returns\n metadata entries for the asset, to be displayed in the asset catalog for that run.\n Deprecated: use the @dbt_assets decorator if you need to customize runtime metadata.\n manifest_json (Optional[Mapping[str, Any]]): [Deprecated] Use the manifest argument instead.\n selected_unique_ids (Optional[Set[str]]): [Deprecated] The set of dbt unique_ids that you want to load\n as assets. Deprecated: use the select argument instead.\n node_info_to_asset_key (Mapping[str, Any] -> AssetKey): [Deprecated] A function that takes a dictionary\n of dbt node info and returns the AssetKey that you want to represent that node. By\n default, the asset key will simply be the name of the dbt model. Deprecated: instead,\n provide a custom DagsterDbtTranslator that overrides node_info_to_asset_key.\n use_build_command (bool): Flag indicating if you want to use `dbt build` as the core computation\n for this asset. Defaults to True. If set to False, then `dbt run` will be used, and\n seeds and snapshots won't be loaded as assets.\n partitions_def (Optional[PartitionsDefinition]): [Deprecated] Defines the set of partition keys that\n compose the dbt assets. Deprecated: use the @dbt_assets decorator to define partitioned\n dbt assets.\n partition_key_to_vars_fn (Optional[str -> Dict[str, Any]]): [Deprecated] A function to translate a given\n partition key (e.g. '2022-01-01') to a dictionary of vars to be passed into the dbt\n invocation (e.g. {"run_date": "2022-01-01"}). Deprecated: use the @dbt_assets decorator\n to define partitioned dbt assets.\n node_info_to_group_fn (Dict[str, Any] -> Optional[str]): [Deprecated] A function that takes a\n dictionary of dbt node info and returns the group that this node should be assigned to.\n Deprecated: instead, configure dagster groups on a dbt resource's meta field or assign\n dbt groups.\n node_info_to_freshness_policy_fn (Dict[str, Any] -> Optional[FreshnessPolicy]): [Deprecated] A function\n that takes a dictionary of dbt node info and optionally returns a FreshnessPolicy that\n should be applied to this node. By default, freshness policies will be created from\n config applied to dbt models, i.e.:\n `dagster_freshness_policy={"maximum_lag_minutes": 60, "cron_schedule": "0 9 * * *"}`\n will result in that model being assigned\n `FreshnessPolicy(maximum_lag_minutes=60, cron_schedule="0 9 * * *")`. Deprecated:\n instead, configure auto-materialize policies on a dbt resource's meta field.\n node_info_to_auto_materialize_policy_fn (Dict[str, Any] -> Optional[AutoMaterializePolicy]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a AutoMaterializePolicy\n that should be applied to this node. By default, AutoMaterializePolicies will be created from\n config applied to dbt models, i.e.:\n `dagster_auto_materialize_policy={"type": "lazy"}` will result in that model being assigned\n `AutoMaterializePolicy.lazy()`. Deprecated: instead, configure auto-materialize\n policies on a dbt resource's meta field.\n node_info_to_definition_metadata_fn (Dict[str, Any] -> Optional[Dict[str, MetadataUserInput]]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a dictionary\n of metadata to be attached to the corresponding definition. This is added to the default\n metadata assigned to the node, which consists of the node's schema (if present).\n Deprecated: instead, provide a custom DagsterDbtTranslator that overrides\n node_info_to_metadata.\n display_raw_sql (Optional[bool]): [Deprecated] A flag to indicate if the raw sql associated\n with each model should be included in the asset description. For large projects, setting\n this flag to False is advised to reduce the size of the resulting snapshot. Deprecated:\n instead, provide a custom DagsterDbtTranslator that overrides node_info_to_description.\n """\n project_dir = check.str_param(project_dir, "project_dir")\n profiles_dir = check.opt_str_param(\n profiles_dir, "profiles_dir", os.path.join(project_dir, "config")\n )\n target_dir = check.opt_str_param(target_dir, "target_dir", os.path.join(project_dir, "target"))\n select = check.opt_str_param(select, "select", "fqn:*")\n exclude = check.opt_str_param(exclude, "exclude", "")\n\n _raise_warnings_for_deprecated_args(\n "load_assets_from_dbt_manifest",\n selected_unique_ids=None,\n dbt_resource_key=dbt_resource_key,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n runtime_metadata_fn=runtime_metadata_fn,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n )\n\n manifest, cli_output = _load_manifest_for_project(\n project_dir, profiles_dir, target_dir, select, exclude\n )\n selected_unique_ids: Set[str] = set(\n filter(None, (line.get("unique_id") for line in cli_output.logs))\n )\n return _load_assets_from_dbt_manifest(\n manifest=manifest,\n select=select,\n exclude=exclude,\n key_prefix=key_prefix,\n source_key_prefix=source_key_prefix,\n dagster_dbt_translator=dagster_dbt_translator,\n op_name=op_name,\n runtime_metadata_fn=runtime_metadata_fn,\n io_manager_key=io_manager_key,\n selected_unique_ids=selected_unique_ids,\n node_info_to_asset_key=node_info_to_asset_key,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n display_raw_sql=display_raw_sql,\n dbt_resource_key=dbt_resource_key,\n )
\n\n\n
[docs]@deprecated_param(\n param="manifest_json", breaking_version="0.21", additional_warn_text="Use manifest instead"\n)\n@deprecated_param(\n param="selected_unique_ids",\n breaking_version="0.21",\n additional_warn_text="Use the select parameter instead.",\n)\n@deprecated_param(\n param="dbt_resource_key",\n breaking_version="0.21",\n additional_warn_text=(\n "Use the `@dbt_assets` decorator if you need to customize your resource key."\n ),\n)\n@deprecated_param(\n param="use_build_command",\n breaking_version="0.21",\n additional_warn_text=(\n "Use the `@dbt_assets` decorator if you need to customize the underlying dbt commands."\n ),\n)\n@deprecated_param(\n param="partitions_def",\n breaking_version="0.21",\n additional_warn_text="Use the `@dbt_assets` decorator to define partitioned dbt assets.",\n)\n@deprecated_param(\n param="partition_key_to_vars_fn",\n breaking_version="0.21",\n additional_warn_text="Use the `@dbt_assets` decorator to define partitioned dbt assets.",\n)\n@deprecated_param(\n param="runtime_metadata_fn",\n breaking_version="0.21",\n additional_warn_text=(\n "Use the `@dbt_assets` decorator if you need to customize runtime metadata."\n ),\n)\ndef load_assets_from_dbt_manifest(\n manifest: Optional[Union[Path, Mapping[str, Any]]] = None,\n *,\n select: Optional[str] = None,\n exclude: Optional[str] = None,\n io_manager_key: Optional[str] = None,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n # All arguments below are deprecated\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n selected_unique_ids: Optional[AbstractSet[str]] = None,\n display_raw_sql: Optional[bool] = None,\n dbt_resource_key: str = "dbt",\n op_name: Optional[str] = None,\n manifest_json: Optional[Mapping[str, Any]] = None,\n use_build_command: bool = True,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ] = None,\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey] = default_asset_key_fn,\n node_info_to_group_fn: Callable[\n [Mapping[str, Any]], Optional[str]\n ] = default_group_from_dbt_resource_props,\n node_info_to_freshness_policy_fn: Callable[\n [Mapping[str, Any]], Optional[FreshnessPolicy]\n ] = default_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ] = default_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ] = default_metadata_from_dbt_resource_props,\n) -> Sequence[AssetsDefinition]:\n """Loads a set of dbt models, described in a manifest.json, into Dagster assets.\n\n Creates one Dagster asset for each dbt model. All assets will be re-materialized using a single\n `dbt run` command.\n\n When searching for more flexibility in defining the computations that materialize your\n dbt assets, we recommend that you use :py:class:`~dagster_dbt.dbt_assets`.\n\n Args:\n manifest (Optional[Mapping[str, Any]]): The contents of a DBT manifest.json, which contains\n a set of models to load into assets.\n select (Optional[str]): A dbt selection string for the models in a project that you want\n to include. Defaults to `"fqn:*"`.\n exclude (Optional[str]): A dbt selection string for the models in a project that you want\n to exclude. Defaults to "".\n io_manager_key (Optional[str]): The IO manager key that will be set on each of the returned\n assets. When other ops are downstream of the loaded assets, the IOManager specified\n here determines how the inputs to those ops are loaded. Defaults to "io_manager".\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): Allows customizing how to map\n dbt models, seeds, etc. to asset keys and asset metadata.\n key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all assets loaded\n from the dbt project. Does not apply to input assets. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(key_prefix=...) instead.\n source_key_prefix (Optional[Union[str, List[str]]]): [Deprecated] A key prefix to apply to all input\n assets for the set of assets loaded from the dbt project. Deprecated: use\n dagster_dbt_translator=KeyPrefixDagsterDbtTranslator(source_key_prefix=...) instead.\n op_name (Optional[str]): [Deprecated] Sets the name of the underlying Op that will generate the dbt assets.\n Deprecated: use the `@dbt_assets` decorator if you need to customize the op name.\n dbt_resource_key (Optional[str]): [Deprecated] The resource key that the dbt resource will be specified at.\n Defaults to "dbt". Deprecated: use the `@dbt_assets` decorator if you need to customize\n the resource key.\n runtime_metadata_fn (Optional[Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]]): [Deprecated]\n A function that will be run after any of the assets are materialized and returns\n metadata entries for the asset, to be displayed in the asset catalog for that run.\n Deprecated: use the @dbt_assets decorator if you need to customize runtime metadata.\n selected_unique_ids (Optional[Set[str]]): [Deprecated] The set of dbt unique_ids that you want to load\n as assets. Deprecated: use the select argument instead.\n node_info_to_asset_key (Mapping[str, Any] -> AssetKey): [Deprecated] A function that takes a dictionary\n of dbt node info and returns the AssetKey that you want to represent that node. By\n default, the asset key will simply be the name of the dbt model.\n use_build_command (bool): Flag indicating if you want to use `dbt build` as the core computation\n for this asset. Defaults to True. If set to False, then `dbt run` will be used, and\n seeds and snapshots won't be loaded as assets.\n partitions_def (Optional[PartitionsDefinition]): [Deprecated] Defines the set of partition keys that\n compose the dbt assets. Deprecated: use the @dbt_assets decorator to define partitioned\n dbt assets.\n partition_key_to_vars_fn (Optional[str -> Dict[str, Any]]): [Deprecated] A function to translate a given\n partition key (e.g. '2022-01-01') to a dictionary of vars to be passed into the dbt\n invocation (e.g. {"run_date": "2022-01-01"}). Deprecated: use the @dbt_assets decorator\n to define partitioned dbt assets.\n node_info_to_group_fn (Dict[str, Any] -> Optional[str]): [Deprecated] A function that takes a\n dictionary of dbt node info and returns the group that this node should be assigned to.\n Deprecated: instead, configure dagster groups on a dbt resource's meta field or assign\n dbt groups.\n node_info_to_freshness_policy_fn (Dict[str, Any] -> Optional[FreshnessPolicy]): [Deprecated] A function\n that takes a dictionary of dbt node info and optionally returns a FreshnessPolicy that\n should be applied to this node. By default, freshness policies will be created from\n config applied to dbt models, i.e.:\n `dagster_freshness_policy={"maximum_lag_minutes": 60, "cron_schedule": "0 9 * * *"}`\n will result in that model being assigned\n `FreshnessPolicy(maximum_lag_minutes=60, cron_schedule="0 9 * * *")`. Deprecated:\n instead, configure auto-materialize policies on a dbt resource's meta field.\n node_info_to_auto_materialize_policy_fn (Dict[str, Any] -> Optional[AutoMaterializePolicy]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a AutoMaterializePolicy\n that should be applied to this node. By default, AutoMaterializePolicies will be created from\n config applied to dbt models, i.e.:\n `dagster_auto_materialize_policy={"type": "lazy"}` will result in that model being assigned\n `AutoMaterializePolicy.lazy()`. Deprecated: instead, configure auto-materialize\n policies on a dbt resource's meta field.\n node_info_to_definition_metadata_fn (Dict[str, Any] -> Optional[Dict[str, MetadataUserInput]]): [Deprecated]\n A function that takes a dictionary of dbt node info and optionally returns a dictionary\n of metadata to be attached to the corresponding definition. This is added to the default\n metadata assigned to the node, which consists of the node's schema (if present).\n Deprecated: instead, provide a custom DagsterDbtTranslator that overrides\n node_info_to_metadata.\n display_raw_sql (Optional[bool]): [Deprecated] A flag to indicate if the raw sql associated\n with each model should be included in the asset description. For large projects, setting\n this flag to False is advised to reduce the size of the resulting snapshot. Deprecated:\n instead, provide a custom DagsterDbtTranslator that overrides node_info_to_description.\n """\n manifest = normalize_renamed_param(\n manifest,\n "manifest",\n manifest_json,\n "manifest_json",\n )\n manifest = cast(\n Union[Mapping[str, Any], Path], check.inst_param(manifest, "manifest", (Path, dict))\n )\n if isinstance(manifest, Path):\n manifest = cast(Mapping[str, Any], json.loads(manifest.read_bytes()))\n\n _raise_warnings_for_deprecated_args(\n "load_assets_from_dbt_manifest",\n selected_unique_ids=selected_unique_ids,\n dbt_resource_key=dbt_resource_key,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n runtime_metadata_fn=runtime_metadata_fn,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n )\n\n return _load_assets_from_dbt_manifest(\n manifest=manifest,\n select=select,\n exclude=exclude,\n io_manager_key=io_manager_key,\n dagster_dbt_translator=dagster_dbt_translator,\n key_prefix=key_prefix,\n source_key_prefix=source_key_prefix,\n selected_unique_ids=selected_unique_ids,\n display_raw_sql=display_raw_sql,\n dbt_resource_key=dbt_resource_key,\n op_name=op_name,\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n runtime_metadata_fn=runtime_metadata_fn,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n node_info_to_definition_metadata_fn=node_info_to_definition_metadata_fn,\n )
\n\n\ndef _load_assets_from_dbt_manifest(\n manifest: Mapping[str, Any],\n select: Optional[str],\n exclude: Optional[str],\n io_manager_key: Optional[str],\n dagster_dbt_translator: Optional[DagsterDbtTranslator],\n key_prefix: Optional[CoercibleToAssetKeyPrefix],\n source_key_prefix: Optional[CoercibleToAssetKeyPrefix],\n selected_unique_ids: Optional[AbstractSet[str]],\n display_raw_sql: Optional[bool],\n dbt_resource_key: str,\n op_name: Optional[str],\n use_build_command: bool,\n partitions_def: Optional[PartitionsDefinition],\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ],\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n node_info_to_group_fn: Callable[[Mapping[str, Any]], Optional[str]],\n node_info_to_freshness_policy_fn: Callable[[Mapping[str, Any]], Optional[FreshnessPolicy]],\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ],\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ],\n) -> Sequence[AssetsDefinition]:\n if partition_key_to_vars_fn:\n check.invariant(\n partitions_def is not None,\n "Cannot supply a `partition_key_to_vars_fn` without a `partitions_def`.",\n )\n\n dbt_resource_key = check.str_param(dbt_resource_key, "dbt_resource_key")\n\n dbt_nodes = {\n **manifest["nodes"],\n **manifest["sources"],\n **manifest["metrics"],\n **manifest["exposures"],\n }\n\n if selected_unique_ids:\n select = (\n " ".join(".".join(dbt_nodes[uid]["fqn"]) for uid in selected_unique_ids)\n if select is None\n else select\n )\n exclude = "" if exclude is None else exclude\n else:\n select = select if select is not None else "fqn:*"\n exclude = exclude if exclude is not None else ""\n\n selected_unique_ids = select_unique_ids_from_manifest(\n select=select, exclude=exclude, manifest_json=manifest\n )\n if len(selected_unique_ids) == 0:\n raise DagsterInvalidSubsetError(f"No dbt models match the selection string '{select}'.")\n\n if dagster_dbt_translator is not None:\n check.invariant(\n node_info_to_asset_key == default_asset_key_fn,\n "Can't specify both dagster_dbt_translator and node_info_to_asset_key",\n )\n check.invariant(\n key_prefix is None,\n "Can't specify both dagster_dbt_translator and key_prefix",\n )\n check.invariant(\n source_key_prefix is None,\n "Can't specify both dagster_dbt_translator and source_key_prefix",\n )\n check.invariant(\n node_info_to_group_fn == default_group_from_dbt_resource_props,\n "Can't specify both dagster_dbt_translator and node_info_to_group_fn",\n )\n check.invariant(\n display_raw_sql is None,\n "Can't specify both dagster_dbt_translator and display_raw_sql",\n )\n check.invariant(\n node_info_to_definition_metadata_fn is default_metadata_from_dbt_resource_props,\n "Can't specify both dagster_dbt_translator and node_info_to_definition_metadata_fn",\n )\n else:\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_asset_key(cls, dbt_resource_props):\n base_key = node_info_to_asset_key(dbt_resource_props)\n if dbt_resource_props["resource_type"] == "source":\n return base_key.with_prefix(source_key_prefix or [])\n else:\n return base_key.with_prefix(key_prefix or [])\n\n @classmethod\n def get_metadata(cls, dbt_resource_props):\n return node_info_to_definition_metadata_fn(dbt_resource_props)\n\n @classmethod\n def get_description(cls, dbt_resource_props):\n return default_description_fn(\n dbt_resource_props,\n display_raw_sql=display_raw_sql if display_raw_sql is not None else True,\n )\n\n @classmethod\n def get_group_name(cls, dbt_resource_props):\n return node_info_to_group_fn(dbt_resource_props)\n\n @classmethod\n def get_freshness_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[FreshnessPolicy]:\n return node_info_to_freshness_policy_fn(dbt_resource_props)\n\n @classmethod\n def get_auto_materialize_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[AutoMaterializePolicy]:\n return node_info_to_auto_materialize_policy_fn(dbt_resource_props)\n\n dagster_dbt_translator = CustomDagsterDbtTranslator()\n\n dbt_assets_def = _dbt_nodes_to_assets(\n dbt_nodes,\n runtime_metadata_fn=runtime_metadata_fn,\n io_manager_key=io_manager_key,\n select=select,\n exclude=exclude,\n selected_unique_ids=selected_unique_ids,\n dbt_resource_key=dbt_resource_key,\n op_name=op_name,\n project_id=manifest["metadata"]["project_id"][:5],\n use_build_command=use_build_command,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n dagster_dbt_translator=dagster_dbt_translator,\n manifest_json=manifest,\n )\n\n return [dbt_assets_def]\n\n\ndef _raise_warnings_for_deprecated_args(\n public_fn_name: str,\n selected_unique_ids: Optional[AbstractSet[str]],\n dbt_resource_key: Optional[str],\n use_build_command: Optional[bool],\n partitions_def: Optional[PartitionsDefinition],\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]],\n runtime_metadata_fn: Optional[\n Callable[[OpExecutionContext, Mapping[str, Any]], Mapping[str, Any]]\n ],\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n node_info_to_group_fn: Callable[[Mapping[str, Any]], Optional[str]],\n node_info_to_freshness_policy_fn: Callable[[Mapping[str, Any]], Optional[FreshnessPolicy]],\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ],\n node_info_to_definition_metadata_fn: Callable[\n [Mapping[str, Any]], Mapping[str, MetadataUserInput]\n ],\n):\n if node_info_to_asset_key != default_asset_key_fn:\n deprecation_warning(\n f"The node_info_to_asset_key_fn arg of {public_fn_name}",\n "0.21",\n "Instead, provide a custom DagsterDbtTranslator that overrides get_asset_key.",\n stacklevel=4,\n )\n\n if node_info_to_group_fn != default_group_from_dbt_resource_props:\n deprecation_warning(\n f"The node_info_to_group_fn arg of {public_fn_name}",\n "0.21",\n "Instead, configure dagster groups on a dbt resource's meta field or assign dbt"\n " groups or provide a custom DagsterDbtTranslator that overrides get_group_name.",\n stacklevel=4,\n )\n\n if node_info_to_auto_materialize_policy_fn != default_auto_materialize_policy_fn:\n deprecation_warning(\n f"The node_info_to_auto_materialize_policy_fn arg of {public_fn_name}",\n "0.21",\n "Instead, configure Dagster auto-materialize policies on a dbt resource's meta field.",\n stacklevel=4,\n )\n\n if node_info_to_freshness_policy_fn != default_freshness_policy_fn:\n deprecation_warning(\n f"The node_info_to_freshness_policy_fn arg of {public_fn_name}",\n "0.21",\n "Instead, configure Dagster freshness policies on a dbt resource's meta field.",\n stacklevel=4,\n )\n\n if node_info_to_definition_metadata_fn != default_metadata_from_dbt_resource_props:\n deprecation_warning(\n f"The node_info_to_definition_metadata_fn arg of {public_fn_name}",\n "0.21",\n "Instead, provide a custom DagsterDbtTranslator that overrides get_metadata.",\n stacklevel=4,\n )\n
", "current_page_name": "_modules/dagster_dbt/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.asset_defs"}, "asset_utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.asset_utils

\nimport hashlib\nimport textwrap\nfrom typing import (\n    TYPE_CHECKING,\n    AbstractSet,\n    Any,\n    Dict,\n    FrozenSet,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    cast,\n)\n\nfrom dagster import (\n    AssetCheckSpec,\n    AssetKey,\n    AssetsDefinition,\n    AssetSelection,\n    AutoMaterializePolicy,\n    DagsterInvariantViolationError,\n    FreshnessPolicy,\n    In,\n    MetadataValue,\n    Nothing,\n    Out,\n    RunConfig,\n    ScheduleDefinition,\n    TableColumn,\n    TableSchema,\n    _check as check,\n    define_asset_job,\n)\nfrom dagster._core.definitions.decorators.asset_decorator import (\n    _validate_and_assign_output_names_to_check_specs,\n)\nfrom dagster._utils.merger import merge_dicts\nfrom dagster._utils.warnings import deprecation_warning\n\nfrom .utils import input_name_fn, output_name_fn\n\nif TYPE_CHECKING:\n    from .dagster_dbt_translator import (\n        DagsterDbtTranslator,\n        DagsterDbtTranslatorSettings,\n        DbtManifestWrapper,\n    )\n\nMANIFEST_METADATA_KEY = "dagster_dbt/manifest"\nDAGSTER_DBT_TRANSLATOR_METADATA_KEY = "dagster_dbt/dagster_dbt_translator"\n\n\n
[docs]def get_asset_key_for_model(dbt_assets: Sequence[AssetsDefinition], model_name: str) -> AssetKey:\n """Return the corresponding Dagster asset key for a dbt model.\n\n Args:\n dbt_assets (AssetsDefinition): An AssetsDefinition object produced by\n load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets.\n model_name (str): The name of the dbt model.\n\n Returns:\n AssetKey: The corresponding Dagster asset key.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_dbt import dbt_assets, get_asset_key_for_model\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n\n @asset(deps={get_asset_key_for_model([all_dbt_assets], "customers")})\n def cleaned_customers():\n ...\n """\n check.sequence_param(dbt_assets, "dbt_assets", of_type=AssetsDefinition)\n check.str_param(model_name, "model_name")\n\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n\n matching_models = [\n value\n for value in manifest["nodes"].values()\n if value["name"] == model_name and value["resource_type"] == "model"\n ]\n\n if len(matching_models) == 0:\n raise KeyError(f"Could not find a dbt model with name: {model_name}")\n\n return dagster_dbt_translator.get_asset_key(next(iter(matching_models)))
\n\n\n
[docs]def get_asset_keys_by_output_name_for_source(\n dbt_assets: Sequence[AssetsDefinition], source_name: str\n) -> Mapping[str, AssetKey]:\n """Returns the corresponding Dagster asset keys for all tables in a dbt source.\n\n This is a convenience method that makes it easy to define a multi-asset that generates\n all the tables for a given dbt source.\n\n Args:\n source_name (str): The name of the dbt source.\n\n Returns:\n Mapping[str, AssetKey]: A mapping of the table name to corresponding Dagster asset key\n for all tables in the given dbt source.\n\n Examples:\n .. code-block:: python\n\n from dagster import AssetOut, multi_asset\n from dagster_dbt import dbt_assets, get_asset_keys_by_output_name_for_source\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n @multi_asset(\n outs={\n name: AssetOut(key=asset_key)\n for name, asset_key in get_asset_keys_by_output_name_for_source(\n [all_dbt_assets], "raw_data"\n ).items()\n },\n )\n def upstream_python_asset():\n ...\n\n """\n check.sequence_param(dbt_assets, "dbt_assets", of_type=AssetsDefinition)\n check.str_param(source_name, "source_name")\n\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n\n matching_nodes = [\n value for value in manifest["sources"].values() if value["source_name"] == source_name\n ]\n\n if len(matching_nodes) == 0:\n raise KeyError(f"Could not find a dbt source with name: {source_name}")\n\n return {\n output_name_fn(value): dagster_dbt_translator.get_asset_key(value)\n for value in matching_nodes\n }
\n\n\n
[docs]def get_asset_key_for_source(dbt_assets: Sequence[AssetsDefinition], source_name: str) -> AssetKey:\n """Returns the corresponding Dagster asset key for a dbt source with a singular table.\n\n Args:\n source_name (str): The name of the dbt source.\n\n Raises:\n DagsterInvalidInvocationError: If the source has more than one table.\n\n Returns:\n AssetKey: The corresponding Dagster asset key.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_dbt import dbt_assets, get_asset_key_for_source\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n @asset(key=get_asset_key_for_source([all_dbt_assets], "my_source"))\n def upstream_python_asset():\n ...\n """\n asset_keys_by_output_name = get_asset_keys_by_output_name_for_source(dbt_assets, source_name)\n\n if len(asset_keys_by_output_name) > 1:\n raise KeyError(\n f"Source {source_name} has more than one table:"\n f" {asset_keys_by_output_name.values()}. Use"\n " `get_asset_keys_by_output_name_for_source` instead to get all tables for a"\n " source."\n )\n\n return next(iter(asset_keys_by_output_name.values()))
\n\n\n
[docs]def build_dbt_asset_selection(\n dbt_assets: Sequence[AssetsDefinition],\n dbt_select: str = "fqn:*",\n dbt_exclude: Optional[str] = None,\n) -> AssetSelection:\n """Build an asset selection for a dbt selection string.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work for\n more information.\n\n Args:\n dbt_select (str): A dbt selection string to specify a set of dbt resources.\n dbt_exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n\n Returns:\n AssetSelection: An asset selection for the selected dbt nodes.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import dbt_assets, build_dbt_asset_selection\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n # Select the dbt assets that have the tag "foo".\n foo_selection = build_dbt_asset_selection([dbt_assets], dbt_select="tag:foo")\n\n # Select the dbt assets that have the tag "foo" and all Dagster assets downstream\n # of them (dbt-related or otherwise)\n foo_and_downstream_selection = foo_selection.downstream()\n\n """\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(dbt_assets)\n from .dbt_manifest_asset_selection import DbtManifestAssetSelection\n\n return DbtManifestAssetSelection(\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n select=dbt_select,\n exclude=dbt_exclude,\n )
\n\n\n
[docs]def build_schedule_from_dbt_selection(\n dbt_assets: Sequence[AssetsDefinition],\n job_name: str,\n cron_schedule: str,\n dbt_select: str = "fqn:*",\n dbt_exclude: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n config: Optional[RunConfig] = None,\n execution_timezone: Optional[str] = None,\n) -> ScheduleDefinition:\n """Build a schedule to materialize a specified set of dbt resources from a dbt selection string.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work for\n more information.\n\n Args:\n job_name (str): The name of the job to materialize the dbt resources.\n cron_schedule (str): The cron schedule to define the schedule.\n dbt_select (str): A dbt selection string to specify a set of dbt resources.\n dbt_exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n tags (Optional[Mapping[str, str]]): A dictionary of tags (string key-value pairs) to attach\n to the scheduled runs.\n config (Optional[RunConfig]): The config that parameterizes the execution of this schedule.\n execution_timezone (Optional[str]): Timezone in which the schedule should run.\n Supported strings for timezones are the ones provided by the\n `IANA time zone database <https://www.iana.org/time-zones>` - e.g. "America/Los_Angeles".\n\n Returns:\n ScheduleDefinition: A definition to materialize the selected dbt resources on a cron schedule.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import dbt_assets, build_schedule_from_dbt_selection\n\n @dbt_assets(manifest=...)\n def all_dbt_assets():\n ...\n\n daily_dbt_assets_schedule = build_schedule_from_dbt_selection(\n [all_dbt_assets],\n job_name="all_dbt_assets",\n cron_schedule="0 0 * * *",\n dbt_select="fqn:*",\n )\n """\n return ScheduleDefinition(\n cron_schedule=cron_schedule,\n job=define_asset_job(\n name=job_name,\n selection=build_dbt_asset_selection(\n dbt_assets,\n dbt_select=dbt_select,\n dbt_exclude=dbt_exclude,\n ),\n config=config,\n tags=tags,\n ),\n execution_timezone=execution_timezone,\n )
\n\n\ndef get_manifest_and_translator_from_dbt_assets(\n dbt_assets: Sequence[AssetsDefinition],\n) -> Tuple[Mapping[str, Any], "DagsterDbtTranslator"]:\n check.invariant(len(dbt_assets) == 1, "Exactly one dbt AssetsDefinition is required")\n dbt_assets_def = dbt_assets[0]\n metadata_by_key = dbt_assets_def.metadata_by_key or {}\n first_asset_key = next(iter(dbt_assets_def.metadata_by_key.keys()))\n first_metadata = metadata_by_key.get(first_asset_key, {})\n manifest_wrapper: Optional["DbtManifestWrapper"] = first_metadata.get(MANIFEST_METADATA_KEY)\n if manifest_wrapper is None:\n raise DagsterInvariantViolationError(\n f"Expected to find dbt manifest metadata on asset {first_asset_key.to_user_string()},"\n " but did not. Did you pass in assets that weren't generated by"\n " load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets?"\n )\n\n dagster_dbt_translator = first_metadata.get(DAGSTER_DBT_TRANSLATOR_METADATA_KEY)\n if dagster_dbt_translator is None:\n raise DagsterInvariantViolationError(\n f"Expected to find dbt translator metadata on asset {first_asset_key.to_user_string()},"\n " but did not. Did you pass in assets that weren't generated by"\n " load_assets_from_dbt_project, load_assets_from_dbt_manifest, or @dbt_assets?"\n )\n\n return manifest_wrapper.manifest, dagster_dbt_translator\n\n\n###################\n# DEFAULT FUNCTIONS\n###################\n\n\ndef default_asset_key_fn(dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n """Get the asset key for a dbt node.\n\n By default, if the dbt node has a Dagster asset key configured in its metadata, then that is\n parsed and used.\n\n Otherwise:\n dbt sources: a dbt source's key is the union of its source name and its table name\n dbt models: a dbt model's key is the union of its model name and any schema configured on\n the model itself.\n """\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n asset_key_config = dagster_metadata.get("asset_key", [])\n if asset_key_config:\n return AssetKey(asset_key_config)\n\n if dbt_resource_props["resource_type"] == "source":\n components = [dbt_resource_props["source_name"], dbt_resource_props["name"]]\n else:\n configured_schema = dbt_resource_props["config"].get("schema")\n if configured_schema is not None:\n components = [configured_schema, dbt_resource_props["name"]]\n else:\n components = [dbt_resource_props["name"]]\n\n return AssetKey(components)\n\n\n
[docs]def default_metadata_from_dbt_resource_props(\n dbt_resource_props: Mapping[str, Any]\n) -> Mapping[str, Any]:\n metadata: Dict[str, Any] = {}\n columns = dbt_resource_props.get("columns", {})\n if len(columns) > 0:\n metadata["table_schema"] = MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(\n name=column_name,\n type=column_info.get("data_type") or "?",\n description=column_info.get("description"),\n )\n for column_name, column_info in columns.items()\n ]\n )\n )\n return metadata
\n\n\n
[docs]def default_group_from_dbt_resource_props(dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n """Get the group name for a dbt node.\n\n If a Dagster group is configured in the metadata for the node, use that.\n\n Otherwise, if a dbt group is configured for the node, use that.\n """\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n\n dagster_group = dagster_metadata.get("group")\n if dagster_group:\n return dagster_group\n\n dbt_group = dbt_resource_props.get("config", {}).get("group")\n if dbt_group:\n return dbt_group\n\n return None
\n\n\n
[docs]def group_from_dbt_resource_props_fallback_to_directory(\n dbt_resource_props: Mapping[str, Any]\n) -> Optional[str]:\n """Get the group name for a dbt node.\n\n Has the same behavior as the default_group_from_dbt_resource_props, except for that, if no group can be determined\n from config or metadata, falls back to using the subdirectory of the models directory that the\n source file is in.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import group_from_dbt_resource_props_fallback_to_directory\n\n dbt_assets = load_assets_from_dbt_manifest(\n manifest=manifest,\n node_info_to_group_fn=group_from_dbt_resource_props_fallback_to_directory,\n )\n """\n group_name = default_group_from_dbt_resource_props(dbt_resource_props)\n if group_name is not None:\n return group_name\n\n fqn = dbt_resource_props.get("fqn", [])\n # the first component is the package name, and the last component is the model name\n if len(fqn) < 3:\n return None\n return fqn[1]
\n\n\ndef default_freshness_policy_fn(dbt_resource_props: Mapping[str, Any]) -> Optional[FreshnessPolicy]:\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n freshness_policy_config = dagster_metadata.get("freshness_policy", {})\n\n freshness_policy = _legacy_freshness_policy_fn(freshness_policy_config)\n if freshness_policy:\n return freshness_policy\n\n legacy_freshness_policy_config = dbt_resource_props["config"].get(\n "dagster_freshness_policy", {}\n )\n legacy_freshness_policy = _legacy_freshness_policy_fn(legacy_freshness_policy_config)\n\n if legacy_freshness_policy:\n deprecation_warning(\n "dagster_freshness_policy",\n "0.21.0",\n "Instead, configure a Dagster freshness policy on a dbt model using"\n " +meta.dagster.freshness_policy.",\n )\n\n return legacy_freshness_policy\n\n\ndef _legacy_freshness_policy_fn(\n freshness_policy_config: Mapping[str, Any]\n) -> Optional[FreshnessPolicy]:\n if freshness_policy_config:\n return FreshnessPolicy(\n maximum_lag_minutes=float(freshness_policy_config["maximum_lag_minutes"]),\n cron_schedule=freshness_policy_config.get("cron_schedule"),\n cron_schedule_timezone=freshness_policy_config.get("cron_schedule_timezone"),\n )\n return None\n\n\ndef default_auto_materialize_policy_fn(\n dbt_resource_props: Mapping[str, Any]\n) -> Optional[AutoMaterializePolicy]:\n dagster_metadata = dbt_resource_props.get("meta", {}).get("dagster", {})\n auto_materialize_policy_config = dagster_metadata.get("auto_materialize_policy", {})\n\n auto_materialize_policy = _auto_materialize_policy_fn(auto_materialize_policy_config)\n if auto_materialize_policy:\n return auto_materialize_policy\n\n legacy_auto_materialize_policy_config = dbt_resource_props["config"].get(\n "dagster_auto_materialize_policy", {}\n )\n legacy_auto_materialize_policy = _auto_materialize_policy_fn(\n legacy_auto_materialize_policy_config\n )\n\n if legacy_auto_materialize_policy:\n deprecation_warning(\n "dagster_auto_materialize_policy",\n "0.21.0",\n "Instead, configure a Dagster auto-materialize policy on a dbt model using"\n " +meta.dagster.auto_materialize_policy.",\n )\n\n return legacy_auto_materialize_policy\n\n\ndef _auto_materialize_policy_fn(\n auto_materialize_policy_config: Mapping[str, Any]\n) -> Optional[AutoMaterializePolicy]:\n if auto_materialize_policy_config.get("type") == "eager":\n return AutoMaterializePolicy.eager()\n elif auto_materialize_policy_config.get("type") == "lazy":\n return AutoMaterializePolicy.lazy()\n return None\n\n\ndef default_description_fn(dbt_resource_props: Mapping[str, Any], display_raw_sql: bool = True):\n code_block = textwrap.indent(\n dbt_resource_props.get("raw_sql") or dbt_resource_props.get("raw_code", ""), " "\n )\n description_sections = [\n dbt_resource_props["description"]\n or f"dbt {dbt_resource_props['resource_type']} {dbt_resource_props['name']}",\n ]\n if display_raw_sql:\n description_sections.append(f"#### Raw SQL:\\n```\\n{code_block}\\n```")\n return "\\n\\n".join(filter(None, description_sections))\n\n\ndef is_generic_test_on_attached_node_from_dbt_resource_props(\n unique_id: str, dbt_resource_props: Mapping[str, Any]\n) -> bool:\n attached_node_unique_id = dbt_resource_props.get("attached_node")\n is_generic_test = bool(attached_node_unique_id)\n\n return is_generic_test and attached_node_unique_id == unique_id\n\n\ndef default_asset_check_fn(\n asset_key: AssetKey,\n unique_id: str,\n dagster_dbt_translator_settings: "DagsterDbtTranslatorSettings",\n dbt_resource_props: Mapping[str, Any],\n) -> Optional[AssetCheckSpec]:\n is_generic_test_on_attached_node = is_generic_test_on_attached_node_from_dbt_resource_props(\n unique_id, dbt_resource_props\n )\n\n if not all(\n [\n dagster_dbt_translator_settings.enable_asset_checks,\n is_generic_test_on_attached_node,\n ]\n ):\n return None\n\n return AssetCheckSpec(\n name=dbt_resource_props["name"],\n asset=asset_key,\n description=dbt_resource_props["description"],\n )\n\n\ndef default_code_version_fn(dbt_resource_props: Mapping[str, Any]) -> str:\n return hashlib.sha1(\n (dbt_resource_props.get("raw_sql") or dbt_resource_props.get("raw_code", "")).encode(\n "utf-8"\n )\n ).hexdigest()\n\n\n###################\n# DEPENDENCIES\n###################\n\n\ndef is_non_asset_node(dbt_resource_props: Mapping[str, Any]):\n # some nodes exist inside the dbt graph but are not assets\n resource_type = dbt_resource_props["resource_type"]\n if resource_type == "metric":\n return True\n if (\n resource_type == "model"\n and dbt_resource_props.get("config", {}).get("materialized") == "ephemeral"\n ):\n return True\n return False\n\n\ndef get_deps(\n dbt_nodes: Mapping[str, Any],\n selected_unique_ids: AbstractSet[str],\n asset_resource_types: List[str],\n) -> Mapping[str, FrozenSet[str]]:\n def _valid_parent_node(dbt_resource_props):\n # sources are valid parents, but not assets\n return dbt_resource_props["resource_type"] in asset_resource_types + ["source"]\n\n asset_deps: Dict[str, Set[str]] = {}\n for unique_id in selected_unique_ids:\n dbt_resource_props = dbt_nodes[unique_id]\n node_resource_type = dbt_resource_props["resource_type"]\n\n # skip non-assets, such as metrics, tests, and ephemeral models\n if is_non_asset_node(dbt_resource_props) or node_resource_type not in asset_resource_types:\n continue\n\n asset_deps[unique_id] = set()\n for parent_unique_id in dbt_resource_props.get("depends_on", {}).get("nodes", []):\n parent_node_info = dbt_nodes[parent_unique_id]\n # for metrics or ephemeral dbt models, BFS to find valid parents\n if is_non_asset_node(parent_node_info):\n visited = set()\n replaced_parent_ids = set()\n # make a copy to avoid mutating the actual dictionary\n queue = list(parent_node_info.get("depends_on", {}).get("nodes", []))\n while queue:\n candidate_parent_id = queue.pop()\n if candidate_parent_id in visited:\n continue\n visited.add(candidate_parent_id)\n\n candidate_parent_info = dbt_nodes[candidate_parent_id]\n if is_non_asset_node(candidate_parent_info):\n queue.extend(candidate_parent_info.get("depends_on", {}).get("nodes", []))\n elif _valid_parent_node(candidate_parent_info):\n replaced_parent_ids.add(candidate_parent_id)\n\n asset_deps[unique_id] |= replaced_parent_ids\n # ignore nodes which are not assets / sources\n elif _valid_parent_node(parent_node_info):\n asset_deps[unique_id].add(parent_unique_id)\n\n frozen_asset_deps = {\n unique_id: frozenset(parent_ids) for unique_id, parent_ids in asset_deps.items()\n }\n\n return frozen_asset_deps\n\n\ndef get_asset_deps(\n dbt_nodes,\n deps,\n io_manager_key,\n manifest: Optional[Mapping[str, Any]],\n dagster_dbt_translator: "DagsterDbtTranslator",\n) -> Tuple[\n Dict[AssetKey, Set[AssetKey]],\n Dict[AssetKey, Tuple[str, In]],\n Dict[AssetKey, Tuple[str, Out]],\n Dict[AssetKey, str],\n Dict[AssetKey, FreshnessPolicy],\n Dict[AssetKey, AutoMaterializePolicy],\n Dict[str, AssetCheckSpec],\n Dict[str, List[str]],\n Dict[str, Dict[str, Any]],\n]:\n from .dagster_dbt_translator import DbtManifestWrapper\n\n asset_deps: Dict[AssetKey, Set[AssetKey]] = {}\n asset_ins: Dict[AssetKey, Tuple[str, In]] = {}\n asset_outs: Dict[AssetKey, Tuple[str, Out]] = {}\n\n # These dicts could be refactored as a single dict, mapping from output name to arbitrary\n # metadata that we need to store for reference.\n group_names_by_key: Dict[AssetKey, str] = {}\n freshness_policies_by_key: Dict[AssetKey, FreshnessPolicy] = {}\n auto_materialize_policies_by_key: Dict[AssetKey, AutoMaterializePolicy] = {}\n check_specs: List[AssetCheckSpec] = []\n fqns_by_output_name: Dict[str, List[str]] = {}\n metadata_by_output_name: Dict[str, Dict[str, Any]] = {}\n\n for unique_id, parent_unique_ids in deps.items():\n dbt_resource_props = dbt_nodes[unique_id]\n\n output_name = output_name_fn(dbt_resource_props)\n fqns_by_output_name[output_name] = dbt_resource_props["fqn"]\n\n metadata_by_output_name[output_name] = {\n key: dbt_resource_props[key] for key in ["unique_id", "resource_type"]\n }\n\n asset_key = dagster_dbt_translator.get_asset_key(dbt_resource_props)\n\n asset_deps[asset_key] = set()\n\n metadata = merge_dicts(\n dagster_dbt_translator.get_metadata(dbt_resource_props),\n {\n MANIFEST_METADATA_KEY: DbtManifestWrapper(manifest=manifest) if manifest else None,\n DAGSTER_DBT_TRANSLATOR_METADATA_KEY: dagster_dbt_translator,\n },\n )\n asset_outs[asset_key] = (\n output_name,\n Out(\n io_manager_key=io_manager_key,\n description=dagster_dbt_translator.get_description(dbt_resource_props),\n metadata=metadata,\n is_required=False,\n dagster_type=Nothing,\n code_version=default_code_version_fn(dbt_resource_props),\n ),\n )\n\n group_name = dagster_dbt_translator.get_group_name(dbt_resource_props)\n if group_name is not None:\n group_names_by_key[asset_key] = group_name\n\n freshness_policy = dagster_dbt_translator.get_freshness_policy(dbt_resource_props)\n if freshness_policy is not None:\n freshness_policies_by_key[asset_key] = freshness_policy\n\n auto_materialize_policy = dagster_dbt_translator.get_auto_materialize_policy(\n dbt_resource_props\n )\n if auto_materialize_policy is not None:\n auto_materialize_policies_by_key[asset_key] = auto_materialize_policy\n\n test_unique_ids = []\n if manifest:\n test_unique_ids = [\n child_unique_id\n for child_unique_id in manifest["child_map"][unique_id]\n if child_unique_id.startswith("test")\n ]\n\n for test_unique_id in test_unique_ids:\n test_resource_props = manifest["nodes"][test_unique_id]\n check_spec = default_asset_check_fn(\n asset_key, unique_id, dagster_dbt_translator.settings, test_resource_props\n )\n\n if check_spec:\n check_specs.append(check_spec)\n\n for parent_unique_id in parent_unique_ids:\n parent_node_info = dbt_nodes[parent_unique_id]\n parent_asset_key = dagster_dbt_translator.get_asset_key(parent_node_info)\n\n asset_deps[asset_key].add(parent_asset_key)\n\n # if this parent is not one of the selected nodes, it's an input\n if parent_unique_id not in deps:\n input_name = input_name_fn(parent_node_info)\n asset_ins[parent_asset_key] = (input_name, In(Nothing))\n\n check_specs_by_output_name = cast(\n Dict[str, AssetCheckSpec],\n _validate_and_assign_output_names_to_check_specs(check_specs, list(asset_outs.keys())),\n )\n\n return (\n asset_deps,\n asset_ins,\n asset_outs,\n group_names_by_key,\n freshness_policies_by_key,\n auto_materialize_policies_by_key,\n check_specs_by_output_name,\n fqns_by_output_name,\n metadata_by_output_name,\n )\n
", "current_page_name": "_modules/dagster_dbt/asset_utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.asset_utils"}, "cloud": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.cloud.asset_defs

\nimport json\nimport shlex\nfrom argparse import Namespace\nfrom contextlib import suppress\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    FrozenSet,\n    List,\n    Mapping,\n    Optional,\n    Sequence,\n    Set,\n    Tuple,\n    Union,\n    cast,\n)\n\nimport dagster._check as check\nfrom dagster import (\n    AssetExecutionContext,\n    AssetKey,\n    AssetOut,\n    AssetsDefinition,\n    AutoMaterializePolicy,\n    FreshnessPolicy,\n    MetadataValue,\n    PartitionsDefinition,\n    ResourceDefinition,\n    multi_asset,\n    with_resources,\n)\nfrom dagster._annotations import experimental, experimental_param\nfrom dagster._core.definitions.cacheable_assets import (\n    AssetsDefinitionCacheableData,\n    CacheableAssetsDefinition,\n)\nfrom dagster._core.definitions.metadata import MetadataUserInput\nfrom dagster._core.execution.context.init import build_init_resource_context\n\nfrom dagster_dbt.asset_utils import (\n    default_asset_key_fn,\n    default_auto_materialize_policy_fn,\n    default_description_fn,\n    default_freshness_policy_fn,\n    default_group_from_dbt_resource_props,\n    get_asset_deps,\n    get_deps,\n)\nfrom dagster_dbt.dagster_dbt_translator import DagsterDbtTranslator\n\nfrom ..errors import DagsterDbtCloudJobInvariantViolationError\nfrom ..utils import ASSET_RESOURCE_TYPES, result_to_events\nfrom .resources import DbtCloudClient, DbtCloudClientResource, DbtCloudRunStatus\n\nDAGSTER_DBT_COMPILE_RUN_ID_ENV_VAR = "DBT_DAGSTER_COMPILE_RUN_ID"\n\n\nclass DbtCloudCacheableAssetsDefinition(CacheableAssetsDefinition):\n    def __init__(\n        self,\n        dbt_cloud_resource_def: Union[DbtCloudClientResource, ResourceDefinition],\n        job_id: int,\n        node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey],\n        node_info_to_group_fn: Callable[[Mapping[str, Any]], Optional[str]],\n        node_info_to_freshness_policy_fn: Callable[[Mapping[str, Any]], Optional[FreshnessPolicy]],\n        node_info_to_auto_materialize_policy_fn: Callable[\n            [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n        ],\n        partitions_def: Optional[PartitionsDefinition] = None,\n        partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n    ):\n        self._dbt_cloud_resource_def: ResourceDefinition = (\n            dbt_cloud_resource_def.get_resource_definition()\n            if isinstance(dbt_cloud_resource_def, DbtCloudClientResource)\n            else dbt_cloud_resource_def\n        )\n\n        self._dbt_cloud: DbtCloudClient = (\n            dbt_cloud_resource_def.process_config_and_initialize().get_dbt_client()\n            if isinstance(dbt_cloud_resource_def, DbtCloudClientResource)\n            else dbt_cloud_resource_def(build_init_resource_context())\n        )\n        self._job_id = job_id\n        self._project_id: int\n        self._has_generate_docs: bool\n        self._job_commands: List[str]\n        self._job_materialization_command_step: int\n        self._node_info_to_asset_key = node_info_to_asset_key\n        self._node_info_to_group_fn = node_info_to_group_fn\n        self._node_info_to_freshness_policy_fn = node_info_to_freshness_policy_fn\n        self._node_info_to_auto_materialize_policy_fn = node_info_to_auto_materialize_policy_fn\n        self._partitions_def = partitions_def\n        self._partition_key_to_vars_fn = partition_key_to_vars_fn\n\n        super().__init__(unique_id=f"dbt-cloud-{job_id}")\n\n    def compute_cacheable_data(self) -> Sequence[AssetsDefinitionCacheableData]:\n        dbt_nodes, dbt_dependencies = self._get_dbt_nodes_and_dependencies()\n        return [self._build_dbt_cloud_assets_cacheable_data(dbt_nodes, dbt_dependencies)]\n\n    def build_definitions(\n        self, data: Sequence[AssetsDefinitionCacheableData]\n    ) -> Sequence[AssetsDefinition]:\n        return with_resources(\n            [\n                self._build_dbt_cloud_assets_from_cacheable_data(assets_definition_metadata)\n                for assets_definition_metadata in data\n            ],\n            {"dbt_cloud": self._dbt_cloud_resource_def},\n        )\n\n    @staticmethod\n    def parse_dbt_command(dbt_command: str) -> Namespace:\n        args = shlex.split(dbt_command)[1:]\n        try:\n            from dbt.cli.flags import (\n                Flags,\n                args_to_context,\n            )\n\n            # nasty hack to get dbt to parse the args\n            # dbt >= 1.5.0 requires that profiles-dir is set to an existing directory\n            return Namespace(**vars(Flags(args_to_context(args + ["--profiles-dir", "."]))))\n        except ImportError:\n            # dbt < 1.5.0 compat\n            from dbt.main import parse_args  # type: ignore\n\n            return parse_args(args=args)\n\n    @staticmethod\n    def get_job_materialization_command_step(execute_steps: List[str]) -> int:\n        materialization_command_filter = [\n            DbtCloudCacheableAssetsDefinition.parse_dbt_command(command).which in ["run", "build"]\n            for command in execute_steps\n        ]\n\n        if sum(materialization_command_filter) != 1:\n            raise DagsterDbtCloudJobInvariantViolationError(\n                "The dbt Cloud job must have a single `dbt run` or `dbt build` in its commands. "\n                f"Received commands: {execute_steps}."\n            )\n\n        return materialization_command_filter.index(True)\n\n    @staticmethod\n    def get_compile_filters(parsed_args: Namespace) -> List[str]:\n        dbt_compile_options: List[str] = []\n\n        selected_models = parsed_args.select or []\n        if selected_models:\n            dbt_compile_options.append(f"--select {' '.join(selected_models)}")\n\n        excluded_models = parsed_args.exclude or []\n        if excluded_models:\n            dbt_compile_options.append(f"--exclude {' '.join(excluded_models)}")\n\n        selector = getattr(parsed_args, "selector_name", None) or getattr(\n            parsed_args, "selector", None\n        )\n        if selector:\n            dbt_compile_options.append(f"--selector {selector}")\n\n        return dbt_compile_options\n\n    def _get_cached_compile_dbt_cloud_job_run(self, compile_run_id: int) -> Tuple[int, int]:\n        # If the compile run is ongoing, allow it a grace period of 10 minutes to finish.\n        with suppress(Exception):\n            self._dbt_cloud.poll_run(run_id=compile_run_id, poll_timeout=600)\n\n        compile_run = self._dbt_cloud.get_run(\n            run_id=compile_run_id, include_related=["trigger", "run_steps"]\n        )\n\n        compile_run_status: str = compile_run["status_humanized"]\n        if compile_run_status != DbtCloudRunStatus.SUCCESS:\n            raise DagsterDbtCloudJobInvariantViolationError(\n                f"The cached dbt Cloud job run `{compile_run_id}` must have a status of"\n                f" `{DbtCloudRunStatus.SUCCESS}`. Received status: `{compile_run_status}. You can"\n                f" view the full status of your dbt Cloud run at {compile_run['href']}. Once it has"\n                " successfully completed, reload your Dagster definitions. If your run has failed,"\n                " you must manually refresh the cache using the `dagster-dbt"\n                " cache-compile-references` CLI."\n            )\n\n        compile_run_has_generate_docs = compile_run["trigger"]["generate_docs_override"]\n\n        compile_job_materialization_command_step = len(compile_run["run_steps"])\n        if compile_run_has_generate_docs:\n            compile_job_materialization_command_step -= 1\n\n        return compile_run_id, compile_job_materialization_command_step\n\n    def _compile_dbt_cloud_job(self, dbt_cloud_job: Mapping[str, Any]) -> Tuple[int, int]:\n        # Retrieve the filters options from the dbt Cloud job's materialization command.\n        #\n        # There are three filters: `--select`, `--exclude`, and `--selector`.\n        materialization_command = self._job_commands[self._job_materialization_command_step]\n        parsed_args = DbtCloudCacheableAssetsDefinition.parse_dbt_command(materialization_command)\n        dbt_compile_options = DbtCloudCacheableAssetsDefinition.get_compile_filters(\n            parsed_args=parsed_args\n        )\n\n        # Add the partition variable as a variable to the dbt Cloud job command.\n        #\n        # If existing variables passed through the dbt Cloud job's command, an error will be\n        # raised. Since these are static variables anyways, they can be moved to the\n        # `dbt_project.yml` without loss of functionality.\n        #\n        # Since we're only doing this to generate the dependency structure, just use an arbitrary\n        # partition key (e.g. the last one) to retrieve the partition variable.\n        if parsed_args.vars and parsed_args.vars != "{}":\n            raise DagsterDbtCloudJobInvariantViolationError(\n                f"The dbt Cloud job '{dbt_cloud_job['name']}' ({dbt_cloud_job['id']}) must not have"\n                " variables defined from `--vars` in its `dbt run` or `dbt build` command."\n                " Instead, declare the variables in the `dbt_project.yml` file. Received commands:"\n                f" {self._job_commands}."\n            )\n\n        if self._partitions_def and self._partition_key_to_vars_fn:\n            last_partition_key = self._partitions_def.get_last_partition_key()\n            if last_partition_key is None:\n                check.failed("PartitionsDefinition has no partitions")\n            partition_var = self._partition_key_to_vars_fn(last_partition_key)\n\n            dbt_compile_options.append(f"--vars '{json.dumps(partition_var)}'")\n\n        # We need to retrieve the dependency structure for the assets in the dbt Cloud project.\n        # However, we can't just use the dependency structure from the latest run, because\n        # this historical structure may not be up-to-date with the current state of the project.\n        #\n        # By always doing a compile step, we can always get the latest dependency structure.\n        # This incurs some latency, but at least it doesn't run through the entire materialization\n        # process.\n        dbt_compile_command = f"dbt compile {' '.join(dbt_compile_options)}"\n        compile_run_dbt_output = self._dbt_cloud.run_job_and_poll(\n            job_id=self._job_id,\n            cause="Generating software-defined assets for Dagster.",\n            steps_override=[dbt_compile_command],\n        )\n\n        # Target the compile execution step when retrieving run artifacts, rather than assuming\n        # that the last step is the correct target.\n        #\n        # Here, we ignore the `dbt docs generate` step.\n        compile_job_materialization_command_step = len(\n            compile_run_dbt_output.run_details.get("run_steps", [])\n        )\n        if self._has_generate_docs:\n            compile_job_materialization_command_step -= 1\n\n        return compile_run_dbt_output.run_id, compile_job_materialization_command_step\n\n    def _get_dbt_nodes_and_dependencies(\n        self,\n    ) -> Tuple[Mapping[str, Any], Mapping[str, FrozenSet[str]]]:\n        """For a given dbt Cloud job, fetch the latest run's dependency structure of executed nodes."""\n        # Fetch information about the job.\n        job = self._dbt_cloud.get_job(job_id=self._job_id)\n        self._project_id = job["project_id"]\n        self._has_generate_docs = job["generate_docs"]\n\n        # We constraint the kinds of dbt Cloud jobs that we support running.\n        #\n        # A simple constraint is that we only support jobs that run multiple steps,\n        # but it must contain one of either `dbt run` or `dbt build`.\n        #\n        # As a reminder, `dbt deps` is automatically run before the job's configured commands.\n        # And if the settings are enabled, `dbt docs generate` and `dbt source freshness` can\n        # automatically run after the job's configured commands.\n        #\n        # These commands that execute before and after the job's configured commands do not count\n        # towards the single command constraint.\n        self._job_commands = job["execute_steps"]\n        self._job_materialization_command_step = (\n            DbtCloudCacheableAssetsDefinition.get_job_materialization_command_step(\n                execute_steps=self._job_commands\n            )\n        )\n\n        # Determine whether to use a cached compile run. This should only be set up if the user is\n        # using a GitHub action along with their dbt project.\n        dbt_cloud_job_env_vars = self._dbt_cloud.get_job_environment_variables(\n            project_id=self._project_id, job_id=self._job_id\n        )\n        compile_run_id = (\n            dbt_cloud_job_env_vars.get(DAGSTER_DBT_COMPILE_RUN_ID_ENV_VAR, {})\n            .get("job", {})\n            .get("value")\n        )\n\n        compile_run_id, compile_job_materialization_command_step = (\n            # If a compile run is cached, then use it.\n            self._get_cached_compile_dbt_cloud_job_run(compile_run_id=int(compile_run_id))\n            if compile_run_id\n            # Otherwise, compile the dbt Cloud project in an ad-hoc manner.\n            else self._compile_dbt_cloud_job(dbt_cloud_job=job)\n        )\n\n        manifest_json = self._dbt_cloud.get_manifest(\n            run_id=compile_run_id, step=compile_job_materialization_command_step\n        )\n        run_results_json = self._dbt_cloud.get_run_results(\n            run_id=compile_run_id, step=compile_job_materialization_command_step\n        )\n\n        # Filter the manifest to only include the nodes that were executed.\n        dbt_nodes: Dict[str, Any] = {\n            **manifest_json.get("nodes", {}),\n            **manifest_json.get("sources", {}),\n            **manifest_json.get("metrics", {}),\n        }\n        executed_node_ids: Set[str] = set(\n            result["unique_id"] for result in run_results_json["results"]\n        )\n\n        # If there are no executed nodes, then there are no assets to generate.\n        # Inform the user to inspect their dbt Cloud job's command.\n        if not executed_node_ids:\n            raise DagsterDbtCloudJobInvariantViolationError(\n                f"The dbt Cloud job '{job['name']}' ({job['id']}) does not generate any "\n                "software-defined assets. Ensure that your dbt project has nodes to execute, "\n                "and that your dbt Cloud job's materialization command has the proper filter "\n                f"options applied. Received commands: {self._job_commands}."\n            )\n\n        # Generate the dependency structure for the executed nodes.\n        dbt_dependencies = get_deps(\n            dbt_nodes=dbt_nodes,\n            selected_unique_ids=executed_node_ids,\n            asset_resource_types=ASSET_RESOURCE_TYPES,\n        )\n\n        return dbt_nodes, dbt_dependencies\n\n    def _build_dbt_cloud_assets_cacheable_data(\n        self, dbt_nodes: Mapping[str, Any], dbt_dependencies: Mapping[str, FrozenSet[str]]\n    ) -> AssetsDefinitionCacheableData:\n        """Given all of the nodes and dependencies for a dbt Cloud job, build the cacheable\n        representation that generate the asset definition for the job.\n        """\n\n        class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n            @classmethod\n            def get_asset_key(cls, dbt_resource_props):\n                return self._node_info_to_asset_key(dbt_resource_props)\n\n            @classmethod\n            def get_description(cls, dbt_resource_props):\n                # We shouldn't display the raw sql. Instead, inspect if dbt docs were generated,\n                # and attach metadata to link to the docs.\n                return default_description_fn(dbt_resource_props, display_raw_sql=False)\n\n            @classmethod\n            def get_group_name(cls, dbt_resource_props):\n                return self._node_info_to_group_fn(dbt_resource_props)\n\n            @classmethod\n            def get_freshness_policy(cls, dbt_resource_props):\n                return self._node_info_to_freshness_policy_fn(dbt_resource_props)\n\n            @classmethod\n            def get_auto_materialize_policy(cls, dbt_resource_props):\n                return self._node_info_to_auto_materialize_policy_fn(dbt_resource_props)\n\n        (\n            asset_deps,\n            asset_ins,\n            asset_outs,\n            group_names_by_key,\n            freshness_policies_by_key,\n            auto_materialize_policies_by_key,\n            _,\n            fqns_by_output_name,\n            metadata_by_output_name,\n        ) = get_asset_deps(\n            dbt_nodes=dbt_nodes,\n            deps=dbt_dependencies,\n            # TODO: In the future, allow the IO manager to be specified.\n            io_manager_key=None,\n            dagster_dbt_translator=CustomDagsterDbtTranslator(),\n            manifest=None,\n        )\n\n        return AssetsDefinitionCacheableData(\n            # TODO: In the future, we should allow additional upstream assets to be specified.\n            keys_by_input_name={\n                input_name: asset_key for asset_key, (input_name, _) in asset_ins.items()\n            },\n            keys_by_output_name={\n                output_name: asset_key for asset_key, (output_name, _) in asset_outs.items()\n            },\n            internal_asset_deps={\n                asset_outs[asset_key][0]: asset_deps for asset_key, asset_deps in asset_deps.items()\n            },\n            # We don't rely on a static group name. Instead, we map over the dbt metadata to\n            # determine the group name for each asset.\n            group_name=None,\n            metadata_by_output_name={\n                output_name: self._build_dbt_cloud_assets_metadata(dbt_metadata)\n                for output_name, dbt_metadata in metadata_by_output_name.items()\n            },\n            # TODO: In the future, we should allow the key prefix to be specified.\n            key_prefix=None,\n            can_subset=True,\n            extra_metadata={\n                "job_id": self._job_id,\n                "job_commands": self._job_commands,\n                "job_materialization_command_step": self._job_materialization_command_step,\n                "group_names_by_output_name": {\n                    asset_outs[asset_key][0]: group_name\n                    for asset_key, group_name in group_names_by_key.items()\n                },\n                "fqns_by_output_name": fqns_by_output_name,\n            },\n            freshness_policies_by_output_name={\n                asset_outs[asset_key][0]: freshness_policy\n                for asset_key, freshness_policy in freshness_policies_by_key.items()\n            },\n            auto_materialize_policies_by_output_name={\n                asset_outs[asset_key][0]: auto_materialize_policy\n                for asset_key, auto_materialize_policy in auto_materialize_policies_by_key.items()\n            },\n        )\n\n    def _build_dbt_cloud_assets_metadata(self, dbt_metadata: Dict[str, Any]) -> MetadataUserInput:\n        metadata = {\n            "dbt Cloud Job": MetadataValue.url(\n                self._dbt_cloud.build_url_for_job(\n                    project_id=self._project_id,\n                    job_id=self._job_id,\n                )\n            ),\n        }\n\n        if self._has_generate_docs:\n            metadata["dbt Cloud Documentation"] = MetadataValue.url(\n                self._dbt_cloud.build_url_for_cloud_docs(\n                    job_id=self._job_id,\n                    resource_type=dbt_metadata["resource_type"],\n                    unique_id=dbt_metadata["unique_id"],\n                )\n            )\n\n        return metadata\n\n    def _build_dbt_cloud_assets_from_cacheable_data(\n        self, assets_definition_cacheable_data: AssetsDefinitionCacheableData\n    ) -> AssetsDefinition:\n        metadata = cast(Mapping[str, Any], assets_definition_cacheable_data.extra_metadata)\n        job_id = cast(int, metadata["job_id"])\n        job_commands = cast(List[str], list(metadata["job_commands"]))\n        job_materialization_command_step = cast(int, metadata["job_materialization_command_step"])\n        group_names_by_output_name = cast(Mapping[str, str], metadata["group_names_by_output_name"])\n        fqns_by_output_name = cast(Mapping[str, List[str]], metadata["fqns_by_output_name"])\n\n        @multi_asset(\n            name=f"dbt_cloud_job_{job_id}",\n            deps=list((assets_definition_cacheable_data.keys_by_input_name or {}).values()),\n            outs={\n                output_name: AssetOut(\n                    key=asset_key,\n                    group_name=group_names_by_output_name.get(output_name),\n                    freshness_policy=(\n                        assets_definition_cacheable_data.freshness_policies_by_output_name or {}\n                    ).get(\n                        output_name,\n                    ),\n                    auto_materialize_policy=(\n                        assets_definition_cacheable_data.auto_materialize_policies_by_output_name\n                        or {}\n                    ).get(\n                        output_name,\n                    ),\n                    metadata=(assets_definition_cacheable_data.metadata_by_output_name or {}).get(\n                        output_name\n                    ),\n                    is_required=False,\n                )\n                for output_name, asset_key in (\n                    assets_definition_cacheable_data.keys_by_output_name or {}\n                ).items()\n            },\n            internal_asset_deps={\n                output_name: set(asset_deps)\n                for output_name, asset_deps in (\n                    assets_definition_cacheable_data.internal_asset_deps or {}\n                ).items()\n            },\n            partitions_def=self._partitions_def,\n            can_subset=assets_definition_cacheable_data.can_subset,\n            required_resource_keys={"dbt_cloud"},\n            compute_kind="dbt",\n        )\n        def _assets(context: AssetExecutionContext):\n            dbt_cloud = cast(DbtCloudClient, context.resources.dbt_cloud)\n\n            # Add the partition variable as a variable to the dbt Cloud job command.\n            dbt_options: List[str] = []\n            if context.has_partition_key and self._partition_key_to_vars_fn:\n                partition_var = self._partition_key_to_vars_fn(context.partition_key)\n\n                dbt_options.append(f"--vars '{json.dumps(partition_var)}'")\n\n            # Prepare the materialization step to be overriden with the selection filter\n            materialization_command = job_commands[job_materialization_command_step]\n\n            # Map the selected outputs to dbt models that should be materialized.\n            #\n            # HACK: This selection filter works even if an existing `--select` is specified in the\n            # dbt Cloud job. We take advantage of the fact that the last `--select` will be used.\n            #\n            # This is not ideal, as the triggered run for the dbt Cloud job will still have both\n            # `--select` options when displayed in the UI, but parsing the command line argument\n            # to remove the initial select using argparse.\n            if len(context.selected_output_names) != len(\n                assets_definition_cacheable_data.keys_by_output_name or {}\n            ):\n                selected_models = [\n                    ".".join(fqns_by_output_name[output_name])\n                    for output_name in context.selected_output_names\n                ]\n\n                dbt_options.append(f"--select {' '.join(sorted(selected_models))}")\n\n                # If the `--selector` option is used, we need to remove it from the command, since\n                # it disables other selection options from being functional.\n                #\n                # See https://docs.getdbt.com/reference/node-selection/syntax for details.\n                split_materialization_command = shlex.split(materialization_command)\n                if "--selector" in split_materialization_command:\n                    idx = split_materialization_command.index("--selector")\n\n                    materialization_command = " ".join(\n                        split_materialization_command[:idx]\n                        + split_materialization_command[idx + 2 :]\n                    )\n\n            job_commands[job_materialization_command_step] = (\n                f"{materialization_command} {' '.join(dbt_options)}".strip()\n            )\n\n            # Run the dbt Cloud job to rematerialize the assets.\n            dbt_cloud_output = dbt_cloud.run_job_and_poll(\n                job_id=job_id,\n                cause=f"Materializing software-defined assets in Dagster run {context.run_id[:8]}",\n                steps_override=job_commands,\n            )\n\n            # Target the materialization step when retrieving run artifacts, rather than assuming\n            # that the last step is the correct target.\n            #\n            # We ignore the commands in front of the materialization command. And again, we ignore\n            # the `dbt docs generate` step.\n            materialization_command_step = len(dbt_cloud_output.run_details.get("run_steps", []))\n            materialization_command_step -= len(job_commands) - job_materialization_command_step - 1\n            if dbt_cloud_output.run_details.get("job", {}).get("generate_docs"):\n                materialization_command_step -= 1\n\n            # TODO: Assume the run completely fails or completely succeeds.\n            # In the future, we can relax this assumption.\n            manifest_json = dbt_cloud.get_manifest(\n                run_id=dbt_cloud_output.run_id, step=materialization_command_step\n            )\n            run_results_json = self._dbt_cloud.get_run_results(\n                run_id=dbt_cloud_output.run_id, step=materialization_command_step\n            )\n\n            for result in run_results_json.get("results", []):\n                yield from result_to_events(\n                    result=result,\n                    docs_url=dbt_cloud_output.docs_url,\n                    node_info_to_asset_key=self._node_info_to_asset_key,\n                    manifest_json=manifest_json,\n                    # TODO: In the future, allow arbitrary mappings to Dagster output metadata from\n                    # the dbt metadata.\n                    extra_metadata=None,\n                    generate_asset_outputs=True,\n                )\n\n        return _assets\n\n\n
[docs]@experimental\n@experimental_param(param="partitions_def")\n@experimental_param(param="partition_key_to_vars_fn")\ndef load_assets_from_dbt_cloud_job(\n dbt_cloud: ResourceDefinition,\n job_id: int,\n node_info_to_asset_key: Callable[[Mapping[str, Any]], AssetKey] = default_asset_key_fn,\n node_info_to_group_fn: Callable[\n [Mapping[str, Any]], Optional[str]\n ] = default_group_from_dbt_resource_props,\n node_info_to_freshness_policy_fn: Callable[\n [Mapping[str, Any]], Optional[FreshnessPolicy]\n ] = default_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn: Callable[\n [Mapping[str, Any]], Optional[AutoMaterializePolicy]\n ] = default_auto_materialize_policy_fn,\n partitions_def: Optional[PartitionsDefinition] = None,\n partition_key_to_vars_fn: Optional[Callable[[str], Mapping[str, Any]]] = None,\n) -> CacheableAssetsDefinition:\n """Loads a set of dbt models, managed by a dbt Cloud job, into Dagster assets. In order to\n determine the set of dbt models, the project is compiled to generate the necessary artifacts\n that define the dbt models and their dependencies.\n\n One Dagster asset is created for each dbt model.\n\n Args:\n dbt_cloud (ResourceDefinition): The dbt Cloud resource to use to connect to the dbt Cloud API.\n job_id (int): The ID of the dbt Cloud job to load assets from.\n node_info_to_asset_key: (Mapping[str, Any] -> AssetKey): A function that takes a dictionary\n of dbt metadata and returns the AssetKey that you want to represent a given model or\n source. By default: dbt model -> AssetKey([model_name]) and\n dbt source -> AssetKey([source_name, table_name])\n node_info_to_group_fn (Dict[str, Any] -> Optional[str]): A function that takes a\n dictionary of dbt node info and returns the group that this node should be assigned to.\n node_info_to_freshness_policy_fn (Dict[str, Any] -> Optional[FreshnessPolicy]): A function\n that takes a dictionary of dbt node info and optionally returns a FreshnessPolicy that\n should be applied to this node. By default, freshness policies will be created from\n config applied to dbt models, i.e.:\n `dagster_freshness_policy={"maximum_lag_minutes": 60, "cron_schedule": "0 9 * * *"}`\n will result in that model being assigned\n `FreshnessPolicy(maximum_lag_minutes=60, cron_schedule="0 9 * * *")`\n node_info_to_auto_materialize_policy_fn (Dict[str, Any] -> Optional[AutoMaterializePolicy]):\n A function that takes a dictionary of dbt node info and optionally returns a AutoMaterializePolicy\n that should be applied to this node. By default, AutoMaterializePolicies will be created from\n config applied to dbt models, i.e.:\n `dagster_auto_materialize_policy={"type": "lazy"}` will result in that model being assigned\n `AutoMaterializePolicy.lazy()`\n node_info_to_definition_metadata_fn (Dict[str, Any] -> Optional[Dict[str, MetadataUserInput]]):\n A function that takes a dictionary of dbt node info and optionally returns a dictionary\n of metadata to be attached to the corresponding definition. This is added to the default\n metadata assigned to the node, which consists of the node's schema (if present).\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the dbt assets.\n partition_key_to_vars_fn (Optional[str -> Dict[str, Any]]): A function to translate a given\n partition key (e.g. '2022-01-01') to a dictionary of vars to be passed into the dbt\n invocation (e.g. {"run_date": "2022-01-01"})\n\n Returns:\n CacheableAssetsDefinition: A definition for the loaded assets.\n\n Examples:\n .. code-block:: python\n\n from dagster import repository\n from dagster_dbt import dbt_cloud_resource, load_assets_from_dbt_cloud_job\n\n DBT_CLOUD_JOB_ID = 1234\n\n dbt_cloud = dbt_cloud_resource.configured(\n {\n "auth_token": {"env": "DBT_CLOUD_API_TOKEN"},\n "account_id": {"env": "DBT_CLOUD_ACCOUNT_ID"},\n }\n )\n\n dbt_cloud_assets = load_assets_from_dbt_cloud_job(\n dbt_cloud=dbt_cloud, job_id=DBT_CLOUD_JOB_ID\n )\n\n\n @repository\n def dbt_cloud_sandbox():\n return [dbt_cloud_assets]\n """\n if partition_key_to_vars_fn:\n check.invariant(\n partitions_def is not None,\n "Cannot supply a `partition_key_to_vars_fn` without a `partitions_def`.",\n )\n\n return DbtCloudCacheableAssetsDefinition(\n dbt_cloud_resource_def=dbt_cloud,\n job_id=job_id,\n node_info_to_asset_key=node_info_to_asset_key,\n node_info_to_group_fn=node_info_to_group_fn,\n node_info_to_freshness_policy_fn=node_info_to_freshness_policy_fn,\n node_info_to_auto_materialize_policy_fn=node_info_to_auto_materialize_policy_fn,\n partitions_def=partitions_def,\n partition_key_to_vars_fn=partition_key_to_vars_fn,\n )
\n
", "current_page_name": "_modules/dagster_dbt/cloud/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.cloud.asset_defs"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.cloud.ops

\nfrom typing import List, Optional\n\nfrom dagster import Config, In, Nothing, Out, Output, op\nfrom pydantic import Field\n\nfrom ..utils import generate_materializations\nfrom .resources import DEFAULT_POLL_INTERVAL\nfrom .types import DbtCloudOutput\n\n\nclass DbtCloudRunOpConfig(Config):\n    job_id: int = Field(\n        description=(\n            "The integer ID of the relevant dbt Cloud job. You can find this value by going to the"\n            " details page of your job in the dbt Cloud UI. It will be the final number in the url,"\n            " e.g.:    "\n            " https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/"\n        )\n    )\n    poll_interval: float = Field(\n        default=DEFAULT_POLL_INTERVAL,\n        description="The time (in seconds) that will be waited between successive polls.",\n    )\n    poll_timeout: Optional[float] = Field(\n        default=None,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    yield_materializations: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations corresponding to the results of the dbt operation will "\n            "be yielded when the op executes."\n        ),\n    )\n\n    asset_key_prefix: List[str] = Field(\n        default=["dbt"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(\n required_resource_keys={"dbt_cloud"},\n ins={"start_after": In(Nothing)},\n out=Out(DbtCloudOutput, description="Parsed output from running the dbt Cloud job."),\n tags={"kind": "dbt_cloud"},\n)\ndef dbt_cloud_run_op(context, config: DbtCloudRunOpConfig):\n """Initiates a run for a dbt Cloud job, then polls until the run completes. If the job\n fails or is otherwised stopped before succeeding, a `dagster.Failure` exception will be raised,\n and this op will fail.\n\n It requires the use of a 'dbt_cloud' resource, which is used to connect to the dbt Cloud API.\n\n **Config Options:**\n\n job_id (int)\n The integer ID of the relevant dbt Cloud job. You can find this value by going to the details\n page of your job in the dbt Cloud UI. It will be the final number in the url, e.g.:\n ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n poll_interval (float)\n The time (in seconds) that will be waited between successive polls. Defaults to ``10``.\n poll_timeout (float)\n The maximum time (in seconds) that will waited before this operation is timed out. By\n default, this will never time out.\n yield_materializations (bool)\n If True, materializations corresponding to the results of the dbt operation will be\n yielded when the solid executes. Defaults to ``True``.\n rasset_key_prefix (float)\n If provided and yield_materializations is True, these components will be used to "\n prefix the generated asset keys. Defaults to ["dbt"].\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_dbt import dbt_cloud_resource, dbt_cloud_run_op\n\n my_dbt_cloud_resource = dbt_cloud_resource.configured(\n {"auth_token": {"env": "DBT_CLOUD_AUTH_TOKEN"}, "account_id": 77777}\n )\n run_dbt_nightly_sync = dbt_cloud_run_op.configured(\n {"job_id": 54321}, name="run_dbt_nightly_sync"\n )\n\n @job(resource_defs={"dbt_cloud": my_dbt_cloud_resource})\n def dbt_cloud():\n run_dbt_nightly_sync()\n\n\n """\n dbt_output = context.resources.dbt_cloud.run_job_and_poll(\n config.job_id, poll_interval=config.poll_interval, poll_timeout=config.poll_timeout\n )\n if config.yield_materializations and "results" in dbt_output.result:\n yield from generate_materializations(dbt_output, asset_key_prefix=config.asset_key_prefix)\n yield Output(\n dbt_output,\n metadata={\n "created_at": dbt_output.run_details["created_at"],\n "started_at": dbt_output.run_details["started_at"],\n "finished_at": dbt_output.run_details["finished_at"],\n "total_duration": dbt_output.run_details["duration"],\n "run_duration": dbt_output.run_details["run_duration"],\n },\n )
\n
", "current_page_name": "_modules/dagster_dbt/cloud/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.cloud.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.cloud.resources

\nimport datetime\nimport json\nimport logging\nimport time\nfrom enum import Enum\nfrom typing import Any, Mapping, Optional, Sequence, cast\nfrom urllib.parse import urlencode, urljoin\n\nimport requests\nfrom dagster import (\n    ConfigurableResource,\n    Failure,\n    IAttachDifferentObjectToOpContext,\n    MetadataValue,\n    __version__,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.merger import deep_merge_dicts\nfrom pydantic import Field\nfrom requests.exceptions import RequestException\n\nfrom .types import DbtCloudOutput\n\nDBT_DEFAULT_HOST = "https://cloud.getdbt.com/"\nDBT_API_V2_PATH = "api/v2/accounts/"\nDBT_API_V3_PATH = "api/v3/accounts/"\n\n# default polling interval (in seconds)\nDEFAULT_POLL_INTERVAL = 10\n\n\nclass DbtCloudRunStatus(str, Enum):\n    QUEUED = "Queued"\n    STARTING = "Starting"\n    RUNNING = "Running"\n    SUCCESS = "Success"\n    ERROR = "Error"\n    CANCELLED = "Cancelled"\n\n\n# TODO: This resource should be a wrapper over an existing client for a accessing dbt Cloud,\n# rather than using requests to the API directly.\nclass DbtCloudClient:\n    """This class exposes methods on top of the dbt Cloud REST API v2.\n\n    For a complete set of documentation on the dbt Cloud Administrative REST API, including expected\n    response JSON schemae, see the `dbt Cloud API Docs <https://docs.getdbt.com/dbt-cloud/api-v2>`_.\n    """\n\n    def __init__(\n        self,\n        auth_token: str,\n        account_id: int,\n        disable_schedule_on_trigger: bool = True,\n        request_max_retries: int = 3,\n        request_retry_delay: float = 0.25,\n        dbt_cloud_host: str = DBT_DEFAULT_HOST,\n        log: logging.Logger = get_dagster_logger(),\n        log_requests: bool = False,\n    ):\n        self._auth_token = auth_token\n        self._account_id = account_id\n        self._disable_schedule_on_trigger = disable_schedule_on_trigger\n\n        self._request_max_retries = request_max_retries\n        self._request_retry_delay = request_retry_delay\n\n        self._dbt_cloud_host = dbt_cloud_host\n        self._log = log\n        self._log_requests = log_requests\n\n    @property\n    def api_v2_base_url(self) -> str:\n        return urljoin(self._dbt_cloud_host, DBT_API_V2_PATH)\n\n    @property\n    def api_v3_base_url(self) -> str:\n        return urljoin(self._dbt_cloud_host, DBT_API_V3_PATH)\n\n    def build_url_for_job(self, project_id: int, job_id: int) -> str:\n        return urljoin(\n            self._dbt_cloud_host,\n            f"next/deploy/{self._account_id}/projects/{project_id}/jobs/{job_id}/",\n        )\n\n    def build_url_for_cloud_docs(self, job_id: int, resource_type: str, unique_id: str) -> str:\n        return urljoin(\n            self._dbt_cloud_host,\n            f"/accounts/{self._account_id}/jobs/{job_id}/docs/#!/{resource_type}/{unique_id}",\n        )\n\n    def make_request(\n        self,\n        method: str,\n        endpoint: str,\n        data: Optional[Mapping[str, Any]] = None,\n        params: Optional[Mapping[str, Any]] = None,\n        return_text: bool = False,\n        base_url: Optional[str] = None,\n    ) -> Any:\n        """Creates and sends a request to the desired dbt Cloud API endpoint.\n\n        Args:\n            method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").\n            endpoint (str): The dbt Cloud API endpoint to send this request to.\n            data (Optional[Mapping[str, Any]]): JSON-formatable data string to be included in the request.\n            params (Optional[Mapping[str, Any]]): Payload to add to query string of the request.\n            return_text (bool): Override default behavior and return unparsed {"text": response.text}\n                blob instead of json.\n\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n        """\n        headers = {\n            "User-Agent": f"dagster-dbt/{__version__}",\n            "Content-Type": "application/json",\n            "Authorization": f"Bearer {self._auth_token}",\n        }\n        base_url = base_url or self.api_v2_base_url\n        url = urljoin(base_url, endpoint)\n\n        if self._log_requests:\n            self._log.debug(f"Making Request: method={method} url={url} data={data}")\n\n        num_retries = 0\n        while True:\n            try:\n                response = requests.request(\n                    method=method,\n                    url=url,\n                    headers=headers,\n                    data=json.dumps(data),\n                    params=params,\n                )\n                response.raise_for_status()\n                return {"text": response.text} if return_text else response.json()["data"]\n            except RequestException as e:\n                self._log.error("Request to dbt Cloud API failed: %s", e)\n                if num_retries == self._request_max_retries:\n                    break\n                num_retries += 1\n                time.sleep(self._request_retry_delay)\n\n        raise Failure(f"Max retries ({self._request_max_retries}) exceeded with url: {url}.")\n\n    def list_jobs(\n        self, project_id: int, order_by: Optional[str] = "-id"\n    ) -> Sequence[Mapping[str, Any]]:\n        """List all dbt jobs in a dbt Cloud project.\n\n        Args:\n            project_id (int): The ID of the relevant dbt Cloud project. You can find this value by\n                going to your account settings in the dbt Cloud UI. It will be the final\n                number in the url, e.g.: ``https://cloud.getdbt.com/next/settings/accounts/{account_id}/projects/{project_id}/``\n            order_by (Optional[str]): An identifier designated by dbt Cloud in which to sort the\n                results before returning them. Useful when combined with offset and limit to load\n                runs for a job. Defaults to "-id" where "-" designates reverse order and "id" is\n                the key to filter on.\n\n        Returns:\n            List[Dict[str, Any]]: Parsed json data from the response to this request\n        """\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/jobs",\n            params={"project_id": project_id, "order_by": order_by},\n        )\n\n    def get_job(self, job_id: int) -> Mapping[str, Any]:\n        """Gets details about a given dbt job from the dbt Cloud API.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n        """\n        return self.make_request("GET", f"{self._account_id}/jobs/{job_id}/")\n\n    def update_job(self, job_id: int, **kwargs) -> Mapping[str, Any]:\n        """Updates specific properties of a dbt job.\n\n        Documentation on the full set of potential parameters can be found here:\n        https://docs.getdbt.com/dbt-cloud/api-v2#operation/updateJobById.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            kwargs: Passed in as the properties to be changed.\n\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n\n        Examples:\n        .. code-block:: python\n\n            # disable schedule for job with id=12345\n            my_dbt_cloud_resource.update_job(12345, triggers={"schedule": False})\n        """\n        # API requires you to supply a bunch of values, so we can just use the current state\n        # as the defaults\n        job_data = self.get_job(job_id)\n        return self.make_request(\n            "POST", f"{self._account_id}/jobs/{job_id}/", data=deep_merge_dicts(job_data, kwargs)\n        )\n\n    def run_job(self, job_id: int, **kwargs) -> Mapping[str, Any]:\n        """Initializes a run for a job.\n\n        Overrides for specific properties can be set by passing in values to the kwargs. A full list\n        of overridable properties can be found here:\n        https://docs.getdbt.com/dbt-cloud/api-v2#operation/triggerRun.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            kwargs: Passed in as the properties to be overridden.\n\n        Returns:\n            Dict[str, Any]: Parsed json data from the response to this request\n        """\n        self._log.info(f"Initializing run for job with job_id={job_id}")\n        if "cause" not in kwargs:\n            kwargs["cause"] = "Triggered via Dagster"\n        resp = self.make_request("POST", f"{self._account_id}/jobs/{job_id}/run/", data=kwargs)\n\n        has_schedule: bool = resp.get("job", {}).get("triggers", {}).get("schedule", False)\n        if has_schedule and self._disable_schedule_on_trigger:\n            self._log.info("Disabling dbt Cloud job schedule.")\n            self.update_job(job_id, triggers={"schedule": False})\n\n        self._log.info(\n            f"Run initialized with run_id={resp['id']}. View this run in "\n            f"the dbt Cloud UI: {resp['href']}"\n        )\n        return resp\n\n    def get_runs(\n        self,\n        include_related: Optional[Sequence[str]] = None,\n        job_id: Optional[int] = None,\n        order_by: Optional[str] = "-id",\n        offset: int = 0,\n        limit: int = 100,\n    ) -> Sequence[Mapping[str, object]]:\n        """Returns a list of runs from dbt Cloud. This can be optionally filtered to a specific job\n        using the job_definition_id. It supports pagination using offset and limit as well and\n        can be configured to load a variety of related information about the runs.\n\n        Args:\n            include_related (Optional[List[str]]): A list of resources to include in the response\n                from dbt Cloud. This is technically a required field according to the API, but it\n                can be passed with an empty list where it will only load the default run\n                information. Valid values are "trigger", "job", "repository", and "environment".\n            job_definition_id (Optional[int]): This method can be optionally filtered to only\n                load runs for a specific job id if it is included here. If omitted it will pull\n                runs for every job.\n            order_by (Optional[str]): An identifier designated by dbt Cloud in which to sort the\n                results before returning them. Useful when combined with offset and limit to load\n                runs for a job. Defaults to "-id" where "-" designates reverse order and "id" is\n                the key to filter on.\n            offset (int): An offset to apply when listing runs. Can be used to paginate results\n                when combined with order_by and limit. Defaults to 0.\n            limit (int): Limits the amount of rows returned by the API. Defaults to 100.\n\n        Returns:\n            List[Dict[str, Any]]: A list of dictionaries containing the runs and any included\n                related information.\n        """\n        query_dict = {\n            "include_related": include_related or [],\n            "order_by": order_by,\n            "offset": offset,\n            "limit": limit,\n        }\n        if job_id:\n            query_dict["job_definition_id"] = job_id\n        return self.make_request("GET", f"{self._account_id}/runs/?{urlencode(query_dict)}")\n\n    def get_run(\n        self, run_id: int, include_related: Optional[Sequence[str]] = None\n    ) -> Mapping[str, Any]:\n        """Gets details about a specific job run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            include_related (List[str]): List of related fields to pull with the run. Valid values\n                are "trigger", "job", and "debug_logs".\n\n        Returns:\n            Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.\n                See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.\n        """\n        query_params = f"?include_related={','.join(include_related)}" if include_related else ""\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/runs/{run_id}/{query_params}",\n        )\n\n    def get_run_steps(self, run_id: int) -> Sequence[str]:\n        """Gets the steps of an initialized dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n\n        Returns:\n            List[str, Any]: List of commands for each step of the run.\n        """\n        run_details = self.get_run(run_id, include_related=["trigger", "job"])\n        steps = run_details["job"]["execute_steps"]\n        steps_override = run_details["trigger"]["steps_override"]\n        return steps_override or steps\n\n    def cancel_run(self, run_id: int) -> Mapping[str, Any]:\n        """Cancels a dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n\n        Returns:\n            Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.\n                See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.\n        """\n        self._log.info(f"Cancelling run with id '{run_id}'")\n        return self.make_request("POST", f"{self._account_id}/runs/{run_id}/cancel/")\n\n    def list_run_artifacts(self, run_id: int, step: Optional[int] = None) -> Sequence[str]:\n        """Lists the paths of the available run artifacts from a completed dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run\n\n        Returns:\n            List[str]: List of the paths of the available run artifacts\n        """\n        query_params = f"?step={step}" if step else ""\n        return cast(\n            list,\n            self.make_request(\n                "GET",\n                f"{self._account_id}/runs/{run_id}/artifacts/{query_params}",\n                data={"step": step} if step else None,\n            ),\n        )\n\n    def get_run_artifact(self, run_id: int, path: str, step: Optional[int] = None) -> str:\n        """The string contents of a run artifact from a dbt Cloud run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            path (str): The path to this run artifact (e.g. 'run/my_new_project/models/example/my_first_dbt_model.sql')\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run.\n\n        Returns:\n            List[str]: List of the names of the available run artifacts\n        """\n        query_params = f"?step={step}" if step else ""\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/runs/{run_id}/artifacts/{path}{query_params}",\n            data={"step": step} if step else None,\n            return_text=True,\n        )["text"]\n\n    def get_manifest(self, run_id: int, step: Optional[int] = None) -> Mapping[str, Any]:\n        """The parsed contents of a manifest.json file created by a completed run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run.\n\n        Returns:\n            Dict[str, Any]: Parsed contents of the manifest.json file\n        """\n        return json.loads(self.get_run_artifact(run_id, "manifest.json", step=step))\n\n    def get_run_results(self, run_id: int, step: Optional[int] = None) -> Mapping[str, Any]:\n        """The parsed contents of a run_results.json file created by a completed run.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            step (int): The index of the step in the run to query for artifacts. The first step in\n                the run has the index 1. If the step parameter is omitted, then this endpoint will\n                return the artifacts compiled for the last step in the run.\n\n        Returns:\n            Dict[str, Any]: Parsed contents of the run_results.json file\n        """\n        return json.loads(self.get_run_artifact(run_id, "run_results.json", step=step))\n\n    def poll_run(\n        self,\n        run_id: int,\n        poll_interval: float = DEFAULT_POLL_INTERVAL,\n        poll_timeout: Optional[float] = None,\n        href: Optional[str] = None,\n    ) -> Mapping[str, Any]:\n        """Polls a dbt Cloud job run until it completes. Will raise a `dagster.Failure` exception if the\n        run does not complete successfully.\n\n        Args:\n            run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to\n                the details page of your run in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``\n            poll_interval (float): The time (in seconds) that should be waited between successive\n                polls of the dbt Cloud API.\n            poll_timeout (float): The maximum time (in seconds) that should be waited for this run\n                to complete. If this threshold is exceeded, the run will be cancelled and an\n                exception will be thrown. By default, this will poll forver.\n            href (str): For internal use, generally should not be set manually.\n\n        Returns:\n            Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.\n                See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.\n        """\n        status: Optional[str] = None\n\n        if href is None:\n            href = self.get_run(run_id).get("href")\n        assert isinstance(href, str), "Run must have an href"\n\n        poll_start = datetime.datetime.now()\n        try:\n            while True:\n                run_details = self.get_run(run_id)\n                status = run_details["status_humanized"]\n                self._log.info(f"Polled run {run_id}. Status: [{status}]")\n\n                # completed successfully\n                if status == DbtCloudRunStatus.SUCCESS:\n                    return self.get_run(run_id, include_related=["job", "trigger", "run_steps"])\n                elif status in [DbtCloudRunStatus.ERROR, DbtCloudRunStatus.CANCELLED]:\n                    break\n                elif status not in [\n                    DbtCloudRunStatus.QUEUED,\n                    DbtCloudRunStatus.STARTING,\n                    DbtCloudRunStatus.RUNNING,\n                ]:\n                    check.failed(f"Received unexpected status '{status}'. This should never happen")\n\n                if poll_timeout and datetime.datetime.now() > poll_start + datetime.timedelta(\n                    seconds=poll_timeout\n                ):\n                    self.cancel_run(run_id)\n                    raise Failure(\n                        f"Run {run_id} timed out after "\n                        f"{datetime.datetime.now() - poll_start}. Attempted to cancel.",\n                        metadata={"run_page_url": MetadataValue.url(href)},\n                    )\n\n                # Sleep for the configured time interval before polling again.\n                time.sleep(poll_interval)\n        finally:\n            if status not in (\n                DbtCloudRunStatus.SUCCESS,\n                DbtCloudRunStatus.ERROR,\n                DbtCloudRunStatus.CANCELLED,\n            ):\n                self.cancel_run(run_id)\n\n        run_details = self.get_run(run_id, include_related=["trigger"])\n        raise Failure(\n            f"Run {run_id} failed. Status Message: {run_details['status_message']}",\n            metadata={\n                "run_details": MetadataValue.json(run_details),\n                "run_page_url": MetadataValue.url(href),\n            },\n        )\n\n    def run_job_and_poll(\n        self,\n        job_id: int,\n        poll_interval: float = DEFAULT_POLL_INTERVAL,\n        poll_timeout: Optional[float] = None,\n        **kwargs,\n    ) -> DbtCloudOutput:\n        """Runs a dbt Cloud job and polls until it completes. Will raise a `dagster.Failure` exception\n        if the run does not complete successfully.\n\n        Args:\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            poll_interval (float): The time (in seconds) that should be waited between successive\n                polls of the dbt Cloud API.\n            poll_timeout (float): The maximum time (in seconds) that should be waited for this run\n                to complete. If this threshold is exceeded, the run will be cancelled and an\n                exception will be thrown. By default, this will poll forver.\n\n        Returns:\n            :py:class:`~DbtCloudOutput`: Class containing details about the specific job run and the\n                parsed run results.\n        """\n        run_details = self.run_job(job_id, **kwargs)\n        run_id = run_details["id"]\n        href = run_details["href"]\n        final_run_details = self.poll_run(\n            run_id, poll_interval=poll_interval, poll_timeout=poll_timeout, href=href\n        )\n        try:\n            run_results = self.get_run_results(run_id)\n        # if you fail to get run_results for this job, just leave it empty\n        except Failure:\n            self._log.info(\n                "run_results.json not available for this run. Defaulting to empty value."\n            )\n            run_results = {}\n        output = DbtCloudOutput(run_details=final_run_details, result=run_results)\n        if output.docs_url:\n            self._log.info(f"Docs for this run can be viewed here: {output.docs_url}")\n        return output\n\n    def get_job_environment_variables(self, project_id: int, job_id: int) -> Mapping[str, Any]:\n        """Get the dbt Cloud environment variables for a specific job.\n\n        Args:\n            project_id (int): The ID of the relevant dbt Cloud project. You can find this value by\n                going to your account settings in the dbt Cloud UI. It will be the final\n                number in the url, e.g.: ``https://cloud.getdbt.com/next/settings/accounts/{account_id}/projects/{project_id}/``\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n        """\n        return self.make_request(\n            "GET",\n            f"{self._account_id}/projects/{project_id}/environment-variables/job",\n            params={"job_definition_id": job_id},\n            base_url=self.api_v3_base_url,\n        )\n\n    def set_job_environment_variable(\n        self, project_id: int, job_id: int, environment_variable_id: int, name: str, value: str\n    ) -> Mapping[str, Any]:\n        """Set the dbt Cloud environment variables for a specific job.\n\n        Args:\n            project_id (int): The ID of the relevant dbt Cloud project. You can find this value by\n                going to your account settings in the dbt Cloud UI. It will be the final\n                number in the url, e.g.: ``https://cloud.getdbt.com/next/settings/accounts/{account_id}/projects/{project_id}/``\n            job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to\n                the details page of your job in the dbt Cloud UI. It will be the final number in the\n                url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``\n            name (str): The name of the environment variable to set.\n            value (str): The raw value of the environment variable.\n        """\n        return self.make_request(\n            "POST",\n            f"{self._account_id}/projects/{project_id}/environment-variables/{environment_variable_id}",\n            data={\n                "id": environment_variable_id,\n                "account_id": self._account_id,\n                "project_id": project_id,\n                "job_definition_id": job_id,\n                "type": "job",\n                "name": name,\n                "raw_value": value,\n            },\n            base_url=self.api_v3_base_url,\n        )\n\n\nclass DbtCloudResource(DbtCloudClient):\n    pass\n\n\n
[docs]class DbtCloudClientResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """This resource helps interact with dbt Cloud connectors."""\n\n auth_token: str = Field(\n description=(\n "dbt Cloud API Token. User tokens can be found in the [dbt Cloud"\n " UI](https://cloud.getdbt.com/#/profile/api/), or see the [dbt Cloud"\n " Docs](https://docs.getdbt.com/docs/dbt-cloud/dbt-cloud-api/service-tokens) for"\n " instructions on creating a Service Account token."\n ),\n )\n account_id: int = Field(\n description=(\n "dbt Cloud Account ID. This value can be found in the url of a variety of views in"\n " the dbt Cloud UI, e.g."\n " https://cloud.getdbt.com/#/accounts/{account_id}/settings/."\n ),\n )\n disable_schedule_on_trigger: bool = Field(\n default=True,\n description=(\n "Specifies if you would like any job that is triggered using this "\n "resource to automatically disable its schedule."\n ),\n )\n request_max_retries: int = Field(\n default=3,\n description=(\n "The maximum number of times requests to the dbt Cloud API should be retried "\n "before failing."\n ),\n )\n request_retry_delay: float = Field(\n default=0.25,\n description="Time (in seconds) to wait between each request retry.",\n )\n dbt_cloud_host: str = Field(\n default=DBT_DEFAULT_HOST,\n description=(\n "The hostname where dbt cloud is being hosted (e.g. https://my_org.cloud.getdbt.com/)."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_dbt_client(self) -> DbtCloudClient:\n context = self.get_resource_context()\n assert context.log\n\n return DbtCloudClient(\n auth_token=self.auth_token,\n account_id=self.account_id,\n disable_schedule_on_trigger=self.disable_schedule_on_trigger,\n request_max_retries=self.request_max_retries,\n request_retry_delay=self.request_retry_delay,\n log=context.log,\n dbt_cloud_host=self.dbt_cloud_host,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_dbt_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=DbtCloudClientResource.to_config_schema(),\n description="This resource helps interact with dbt Cloud connectors",\n)\ndef dbt_cloud_resource(context) -> DbtCloudResource:\n """This resource allows users to programatically interface with the dbt Cloud Administrative REST\n API (v2) to launch jobs and monitor their progress. This currently implements only a subset of\n the functionality exposed by the API.\n\n For a complete set of documentation on the dbt Cloud Administrative REST API, including expected\n response JSON schemae, see the `dbt Cloud API Docs <https://docs.getdbt.com/dbt-cloud/api-v2>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_dbt import dbt_cloud_resource\n\n my_dbt_cloud_resource = dbt_cloud_resource.configured(\n {\n "auth_token": {"env": "DBT_CLOUD_AUTH_TOKEN"},\n "account_id": {"env": "DBT_CLOUD_ACCOUNT_ID"},\n }\n )\n\n @job(resource_defs={"dbt_cloud": my_dbt_cloud_resource})\n def my_dbt_cloud_job():\n ...\n """\n return DbtCloudResource(\n auth_token=context.resource_config["auth_token"],\n account_id=context.resource_config["account_id"],\n disable_schedule_on_trigger=context.resource_config["disable_schedule_on_trigger"],\n request_max_retries=context.resource_config["request_max_retries"],\n request_retry_delay=context.resource_config["request_retry_delay"],\n log=context.log,\n dbt_cloud_host=context.resource_config["dbt_cloud_host"],\n )
\n
", "current_page_name": "_modules/dagster_dbt/cloud/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.cloud.resources"}}, "core": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.core.resources

\nfrom typing import Any, Iterator, Mapping, Optional, Sequence, Set\n\nimport dagster._check as check\nfrom dagster import resource\nfrom dagster._annotations import deprecated, public\nfrom dagster._config.pythonic_config import ConfigurableResource, IAttachDifferentObjectToOpContext\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.merger import merge_dicts\nfrom pydantic import Field\n\nfrom ..dbt_resource import DbtClient\nfrom .types import DbtCliOutput\nfrom .utils import (\n    DEFAULT_DBT_TARGET_PATH,\n    execute_cli,\n    execute_cli_stream,\n    parse_manifest,\n    parse_run_results,\n    remove_run_results,\n)\n\nDEFAULT_DBT_EXECUTABLE = "dbt"\n\n# The set of dbt cli commands that result in the creation of a run_results.json output file\n# https://docs.getdbt.com/reference/artifacts/run-results-json\nDBT_RUN_RESULTS_COMMANDS = ["run", "test", "seed", "snapshot", "docs generate", "build"]\n\n# The following config fields correspond to flags that apply to all dbt CLI commands. For details\n# on dbt CLI flags, see\n# https://github.com/fishtown-analytics/dbt/blob/1f8e29276e910c697588c43f08bc881379fff178/core/dbt/main.py#L260-L329\n\nCOMMON_OPTION_KEYS = {\n    "warn_error",\n    "dbt_executable",\n    "ignore_handled_error",\n    "target_path",\n    "docs_url",\n    "json_log_format",\n    "capture_logs",\n    "debug",\n}\n\n\nclass ConfigurableResourceWithCliFlags(ConfigurableResource):\n    project_dir: str = Field(\n        default=".",\n        description=(\n            "Which directory to look in for the dbt_project.yml file. Default is the current "\n            "working directory and its parents."\n        ),\n    )\n    profiles_dir: Optional[str] = Field(\n        default=None,\n        description=(\n            "Which directory to look in for the profiles.yml file. Default = $DBT_PROFILES_DIR or "\n            "$HOME/.dbt"\n        ),\n    )\n    profile: Optional[str] = Field(\n        default=None, description="Which profile to load. Overrides setting in dbt_project.yml."\n    )\n    target: Optional[str] = Field(\n        default=None, description="Which target to load for the given profile."\n    )\n    vars: Optional[Mapping[str, Any]] = Field(\n        default=None,\n        description=(\n            "Supply variables to the project. This argument overrides variables defined in your "\n            "dbt_project.yml file. This argument should be a dictionary, eg. "\n            "{'my_variable': 'my_value'}"\n        ),\n    )\n    bypass_cache: bool = Field(\n        default=False, description="If set, bypass the adapter-level cache of database state"\n    )\n    warn_error: bool = Field(\n        default=False,\n        description=(\n            "If dbt would normally warn, instead raise an exception. Examples include --models "\n            "that selects nothing, deprecations, configurations with no associated models, "\n            "invalid test configurations, and missing sources/refs in tests."\n        ),\n    )\n    dbt_executable: str = Field(\n        default=DEFAULT_DBT_EXECUTABLE,\n        description=f"Path to the dbt executable. Default is {DEFAULT_DBT_EXECUTABLE}",\n    )\n    ignore_handled_error: bool = Field(\n        default=False,\n        description=(\n            "When True, will not raise an exception when the dbt CLI returns error code 1. "\n            "Default is False."\n        ),\n    )\n    target_path: str = Field(\n        default=DEFAULT_DBT_TARGET_PATH,\n        description=(\n            "The directory path for target if different from the default `target-path` in "\n            "your dbt project configuration file."\n        ),\n    )\n    docs_url: Optional[str] = Field(\n        default=None, description="The url for where dbt docs are being served for this project."\n    )\n    json_log_format: bool = Field(\n        default=True,\n        description=(\n            "When True, dbt will invoked with the `--log-format json` flag, allowing "\n            "Dagster to parse the log messages and emit simpler log messages to the event log."\n        ),\n    )\n    capture_logs: bool = Field(\n        default=True,\n        description=(\n            "When True, dbt will invoked with the `--capture-output` flag, allowing "\n            "Dagster to capture the logs and emit them to the event log."\n        ),\n    )\n    debug: bool = Field(\n        default=False,\n        description=(\n            "When True, dbt will invoked with the `--debug` flag, which will print "\n            "additional debug information to the console."\n        ),\n    )\n\n\nclass DbtCliClient(DbtClient):\n    """A resource that allows you to execute dbt cli commands.\n\n    For the most up-to-date documentation on the specific parameters available to you for each\n    command, check out the dbt docs:\n\n    https://docs.getdbt.com/reference/commands/run\n\n    To use this as a dagster resource, we recommend using\n    :func:`dbt_cli_resource <dagster_dbt.dbt_cli_resource>`.\n    """\n\n    def __init__(\n        self,\n        executable: str,\n        default_flags: Mapping[str, Any],\n        warn_error: bool,\n        ignore_handled_error: bool,\n        target_path: str,\n        logger: Optional[Any] = None,\n        docs_url: Optional[str] = None,\n        json_log_format: bool = True,\n        capture_logs: bool = True,\n        debug: bool = False,\n    ):\n        self._default_flags = default_flags\n        self._executable = executable\n        self._warn_error = warn_error\n        self._ignore_handled_error = ignore_handled_error\n        self._target_path = target_path\n        self._docs_url = docs_url\n        self._json_log_format = json_log_format\n        self._capture_logs = capture_logs\n        self._debug = debug\n        super().__init__(logger)\n\n    @property\n    def default_flags(self) -> Mapping[str, Any]:\n        """A set of params populated from resource config that are passed as flags to each dbt CLI command."""\n        return self._format_params(self._default_flags, replace_underscores=True)\n\n    @property\n    def strict_flags(self) -> Set[str]:\n        """A set of flags that should not be auto-populated from the default flags unless they are\n        arguments to the associated function.\n        """\n        return {"models", "exclude", "select"}\n\n    def _get_flags_dict(self, kwargs) -> Mapping[str, Any]:\n        extra_flags = {} if kwargs is None else kwargs\n\n        # remove default flags that are declared as "strict" and not explicitly passed in\n        default_flags = {\n            k: v\n            for k, v in self.default_flags.items()\n            if not (k in self.strict_flags and k not in extra_flags)\n        }\n\n        return merge_dicts(\n            default_flags, self._format_params(extra_flags, replace_underscores=True)\n        )\n\n    @public\n    def cli(self, command: str, **kwargs) -> DbtCliOutput:\n        """Executes a dbt CLI command. Params passed in as keyword arguments will be merged with the\n            default flags that were configured on resource initialization (if any) overriding the\n            default values if necessary.\n\n        Args:\n            command (str): The command you wish to run (e.g. 'run', 'test', 'docs generate', etc.)\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        command = check.str_param(command, "command")\n        return execute_cli(\n            executable=self._executable,\n            command=command,\n            flags_dict=self._get_flags_dict(kwargs),\n            log=self.logger,\n            warn_error=self._warn_error,\n            ignore_handled_error=self._ignore_handled_error,\n            target_path=self._target_path,\n            docs_url=self._docs_url,\n            json_log_format=self._json_log_format,\n            capture_logs=self._capture_logs,\n            debug=self._debug,\n        )\n\n    def cli_stream_json(self, command: str, **kwargs) -> Iterator[Mapping[str, Any]]:\n        """Executes a dbt CLI command. Params passed in as keyword arguments will be merged with the\n            default flags that were configured on resource initialization (if any) overriding the\n            default values if necessary.\n\n        Args:\n            command (str): The command you wish to run (e.g. 'run', 'test', 'docs generate', etc.)\n        """\n        check.invariant(self._json_log_format, "Cannot stream JSON if json_log_format is False.")\n        for event in execute_cli_stream(\n            executable=self._executable,\n            command=command,\n            flags_dict=self._get_flags_dict(kwargs),\n            log=self.logger,\n            warn_error=self._warn_error,\n            ignore_handled_error=self._ignore_handled_error,\n            json_log_format=self._json_log_format,\n            capture_logs=self._capture_logs,\n            debug=self._debug,\n        ):\n            if event.parsed_json_line is not None:\n                yield event.parsed_json_line\n\n    @public\n    def compile(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        select: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``compile`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in compilation.\n            exclude (List[str]), optional): the models to exclude from compilation.\n            select (List[str], optional): the models to include in compilation.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("compile", models=models, exclude=exclude, select=select, **kwargs)\n\n    @public\n    def run(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        select: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``run`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in the run.\n            exclude (List[str]), optional): the models to exclude from the run.\n            select (List[str], optional): the models to include in the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("run", models=models, exclude=exclude, select=select, **kwargs)\n\n    @public\n    def snapshot(\n        self,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``snapshot`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("snapshot", select=select, exclude=exclude, **kwargs)\n\n    @public\n    def test(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        data: bool = True,\n        schema: bool = True,\n        select: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``test`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in testing.\n            exclude (List[str], optional): the models to exclude from testing.\n            data (bool, optional): If ``True`` (default), then run data tests.\n            schema (bool, optional): If ``True`` (default), then run schema tests.\n            select (List[str], optional): the models to include in testing.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        if data and schema:\n            # do not include these arguments if both are True, as these are deprecated in later\n            # versions of dbt, and for older versions the functionality is the same regardless of\n            # if both are set or neither are set.\n            return self.cli("test", models=models, exclude=exclude, select=select, **kwargs)\n        return self.cli(\n            "test",\n            models=models,\n            exclude=exclude,\n            data=data,\n            schema=schema,\n            select=select,\n            **kwargs,\n        )\n\n    @public\n    def seed(\n        self,\n        show: bool = False,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``seed`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            show (bool, optional): If ``True``, then show a sample of the seeded data in the\n                response. Defaults to ``False``.\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("seed", show=show, select=select, exclude=exclude, **kwargs)\n\n    @public\n    def ls(\n        self,\n        select: Optional[Sequence[str]] = None,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtCliOutput:\n        """Run the ``ls`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the resources to include in the output.\n            models (List[str], optional): the models to include in the output.\n            exclude (List[str], optional): the resources to exclude from the output.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("ls", select=select, models=models, exclude=exclude, **kwargs)\n\n    @public\n    def build(self, select: Optional[Sequence[str]] = None, **kwargs) -> DbtCliOutput:\n        """Run the ``build`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the models/resources to include in the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("build", select=select, **kwargs)\n\n    @public\n    def freshness(self, select: Optional[Sequence[str]] = None, **kwargs) -> DbtCliOutput:\n        """Run the ``source snapshot-freshness`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the sources to include in the run.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("source snapshot-freshness", select=select, **kwargs)\n\n    @public\n    def generate_docs(self, compile_project: bool = False, **kwargs) -> DbtCliOutput:\n        """Run the ``docs generate`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            compile_project (bool, optional): If true, compile the project before generating a catalog.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli("docs generate", compile=compile_project, **kwargs)\n\n    @public\n    def run_operation(\n        self, macro: str, args: Optional[Mapping[str, Any]] = None, **kwargs\n    ) -> DbtCliOutput:\n        """Run the ``run-operation`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            macro (str): the dbt macro to invoke.\n            args (Dict[str, Any], optional): the keyword arguments to be supplied to the macro.\n\n        Returns:\n            DbtCliOutput: An instance of :class:`DbtCliOutput<dagster_dbt.DbtCliOutput>` containing\n                parsed log output as well as the contents of run_results.json (if applicable).\n        """\n        return self.cli(f"run-operation {macro}", args=args, **kwargs)\n\n    @public\n    def get_run_results_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the run_results.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the manifest json file\n                for this dbt project.\n        """\n        project_dir = kwargs.get("project_dir", self.default_flags["project-dir"])\n        target_path = kwargs.get("target_path", self._target_path)\n        return parse_run_results(project_dir, target_path)\n\n    @public\n    def remove_run_results_json(self, **kwargs):\n        """Remove the run_results.json file from previous runs (if it exists)."""\n        project_dir = kwargs.get("project_dir", self.default_flags["project-dir"])\n        target_path = kwargs.get("target_path", self._target_path)\n        remove_run_results(project_dir, target_path)\n\n    @public\n    def get_manifest_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the manifest.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the manifest json file\n                for this dbt project.\n        """\n        project_dir = kwargs.get("project_dir", self.default_flags["project-dir"])\n        target_path = kwargs.get("target_path", self._target_path)\n        return parse_manifest(project_dir, target_path)\n\n\nclass DbtCliClientResource(ConfigurableResourceWithCliFlags, IAttachDifferentObjectToOpContext):\n    """Resource which issues dbt CLI commands against a configured dbt project."""\n\n    class Config:\n        extra = "allow"\n\n    @classmethod\n    def _is_dagster_maintained(cls) -> bool:\n        return True\n\n    def get_dbt_client(self) -> DbtCliClient:\n        context = self.get_resource_context()\n        default_flags = {\n            k: v\n            for k, v in self._get_non_none_public_field_values().items()\n            if k not in COMMON_OPTION_KEYS\n        }\n\n        return DbtCliClient(\n            executable=self.dbt_executable,\n            default_flags=default_flags,\n            warn_error=self.warn_error,\n            ignore_handled_error=self.ignore_handled_error,\n            target_path=self.target_path,\n            docs_url=self.docs_url,\n            logger=context.log,\n            json_log_format=self.json_log_format,\n            capture_logs=self.capture_logs,\n            debug=self.debug,\n        )\n\n    def get_object_to_set_on_execution_context(self) -> Any:\n        return self.get_dbt_client()\n\n\n
[docs]@deprecated(breaking_version="0.21", additional_warn_text="Use DbtCliResource instead.")\n@dagster_maintained_resource\n@resource(config_schema=DbtCliClientResource.to_config_schema())\ndef dbt_cli_resource(context) -> DbtCliClient:\n """This resource issues dbt CLI commands against a configured dbt project. It is deprecated\n in favor of :py:class:`~dagster_dbt.DbtCliResource`.\n """\n # all config options that are intended to be used as flags for dbt commands\n\n default_flags = {\n k: v for k, v in context.resource_config.items() if k not in COMMON_OPTION_KEYS\n }\n return DbtCliClient(\n executable=context.resource_config["dbt_executable"],\n default_flags=default_flags,\n warn_error=context.resource_config["warn_error"],\n ignore_handled_error=context.resource_config["ignore_handled_error"],\n target_path=context.resource_config["target_path"],\n logger=context.log,\n docs_url=context.resource_config.get("docs_url"),\n capture_logs=context.resource_config["capture_logs"],\n json_log_format=context.resource_config["json_log_format"],\n debug=context.resource_config["debug"],\n )
\n
", "current_page_name": "_modules/dagster_dbt/core/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.core.resources"}, "resources_v2": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.core.resources_v2

\nimport atexit\nimport contextlib\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport uuid\nfrom contextlib import suppress\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import (\n    Any,\n    Dict,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Union,\n)\n\nimport dateutil.parser\nimport orjson\nfrom dagster import (\n    AssetCheckResult,\n    AssetCheckSeverity,\n    AssetObservation,\n    AssetsDefinition,\n    ConfigurableResource,\n    Output,\n    get_dagster_logger,\n)\nfrom dagster._annotations import public\nfrom dagster._core.errors import DagsterInvalidPropertyError\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dbt.contracts.results import NodeStatus, TestStatus\nfrom dbt.node_types import NodeType\nfrom dbt.version import __version__ as dbt_version\nfrom packaging import version\nfrom pydantic import Field, root_validator, validator\nfrom typing_extensions import Literal\n\nfrom ..asset_utils import (\n    get_manifest_and_translator_from_dbt_assets,\n    output_name_fn,\n)\nfrom ..dagster_dbt_translator import DagsterDbtTranslator\nfrom ..dbt_manifest import DbtManifestParam, validate_manifest\nfrom ..errors import DagsterDbtCliRuntimeError\nfrom ..utils import ASSET_RESOURCE_TYPES, get_dbt_resource_props_by_dbt_unique_id_from_manifest\n\nlogger = get_dagster_logger()\n\n\nDBT_PROJECT_YML_NAME = "dbt_project.yml"\nDBT_PROFILES_YML_NAME = "profiles.yml"\nPARTIAL_PARSE_FILE_NAME = "partial_parse.msgpack"\n\n\ndef _get_dbt_target_path() -> Path:\n    return Path(os.getenv("DBT_TARGET_PATH", "target"))\n\n\n
[docs]@dataclass\nclass DbtCliEventMessage:\n """The representation of a dbt CLI event.\n\n Args:\n raw_event (Dict[str, Any]): The raw event dictionary.\n See https://docs.getdbt.com/reference/events-logging#structured-logging for more\n information.\n """\n\n raw_event: Dict[str, Any]\n\n @classmethod\n def from_log(cls, log: str) -> "DbtCliEventMessage":\n """Parse an event according to https://docs.getdbt.com/reference/events-logging#structured-logging.\n\n We assume that the log format is json.\n """\n raw_event: Dict[str, Any] = orjson.loads(log)\n\n return cls(raw_event=raw_event)\n\n def __str__(self) -> str:\n return self.raw_event["info"]["msg"]\n\n
[docs] @public\n def to_default_asset_events(\n self,\n manifest: DbtManifestParam,\n dagster_dbt_translator: DagsterDbtTranslator = DagsterDbtTranslator(),\n ) -> Iterator[Union[Output, AssetObservation, AssetCheckResult]]:\n """Convert a dbt CLI event to a set of corresponding Dagster events.\n\n Args:\n manifest (Union[Mapping[str, Any], str, Path]): The dbt manifest blob.\n dagster_dbt_translator (DagsterDbtTranslator): Optionally, a custom translator for\n linking dbt nodes to Dagster assets.\n\n Returns:\n Iterator[Union[Output, AssetObservation, AssetCheckResult]]: A set of corresponding Dagster events.\n - Output for refables (e.g. models, seeds, snapshots.)\n - AssetObservation for dbt test results that are not enabled as asset checks.\n - AssetCheckResult for dbt test results that are enabled as asset checks.\n """\n if self.raw_event["info"]["level"] == "debug":\n return\n\n event_node_info: Dict[str, Any] = self.raw_event["data"].get("node_info")\n if not event_node_info:\n return\n\n manifest = validate_manifest(manifest)\n\n if not manifest:\n logger.info(\n "No dbt manifest was provided. Dagster events for dbt tests will not be created."\n )\n\n invocation_id: str = self.raw_event["info"]["invocation_id"]\n unique_id: str = event_node_info["unique_id"]\n node_resource_type: str = event_node_info["resource_type"]\n node_status: str = event_node_info["node_status"]\n\n is_node_successful = node_status == NodeStatus.Success\n is_node_finished = bool(event_node_info.get("node_finished_at"))\n if node_resource_type in NodeType.refable() and is_node_successful:\n started_at = dateutil.parser.isoparse(event_node_info["node_started_at"])\n finished_at = dateutil.parser.isoparse(event_node_info["node_finished_at"])\n duration_seconds = (finished_at - started_at).total_seconds()\n\n yield Output(\n value=None,\n output_name=output_name_fn(event_node_info),\n metadata={\n "unique_id": unique_id,\n "invocation_id": invocation_id,\n "Execution Duration": duration_seconds,\n },\n )\n elif manifest and node_resource_type == NodeType.Test and is_node_finished:\n upstream_unique_ids: List[str] = manifest["parent_map"][unique_id]\n test_resource_props = manifest["nodes"][unique_id]\n metadata = {\n "unique_id": unique_id,\n "invocation_id": invocation_id,\n "status": node_status,\n }\n\n is_asset_check = dagster_dbt_translator.settings.enable_asset_checks\n attached_node_unique_id = test_resource_props.get("attached_node")\n is_generic_test = bool(attached_node_unique_id)\n\n if is_asset_check and is_generic_test:\n is_test_successful = node_status == TestStatus.Pass\n severity = AssetCheckSeverity(test_resource_props["config"]["severity"].upper())\n\n attached_node_resource_props: Dict[str, Any] = manifest["nodes"].get(\n attached_node_unique_id\n ) or manifest["sources"].get(attached_node_unique_id)\n attached_node_asset_key = dagster_dbt_translator.get_asset_key(\n attached_node_resource_props\n )\n\n yield AssetCheckResult(\n passed=is_test_successful,\n asset_key=attached_node_asset_key,\n check_name=event_node_info["node_name"],\n metadata=metadata,\n severity=severity,\n )\n else:\n for upstream_unique_id in upstream_unique_ids:\n upstream_resource_props: Dict[str, Any] = manifest["nodes"].get(\n upstream_unique_id\n ) or manifest["sources"].get(upstream_unique_id)\n upstream_asset_key = dagster_dbt_translator.get_asset_key(\n upstream_resource_props\n )\n\n yield AssetObservation(\n asset_key=upstream_asset_key,\n metadata=metadata,\n )
\n\n\n
[docs]@dataclass\nclass DbtCliInvocation:\n """The representation of an invoked dbt command.\n\n Args:\n process (subprocess.Popen): The process running the dbt command.\n manifest (Mapping[str, Any]): The dbt manifest blob.\n project_dir (Path): The path to the dbt project.\n target_path (Path): The path to the dbt target folder.\n raise_on_error (bool): Whether to raise an exception if the dbt command fails.\n """\n\n process: subprocess.Popen\n manifest: Mapping[str, Any]\n dagster_dbt_translator: DagsterDbtTranslator\n project_dir: Path\n target_path: Path\n raise_on_error: bool\n\n @classmethod\n def run(\n cls,\n args: List[str],\n env: Dict[str, str],\n manifest: Mapping[str, Any],\n dagster_dbt_translator: DagsterDbtTranslator,\n project_dir: Path,\n target_path: Path,\n raise_on_error: bool,\n ) -> "DbtCliInvocation":\n # Attempt to take advantage of partial parsing. If there is a `partial_parse.msgpack` in\n # in the target folder, then copy it to the dynamic target path.\n #\n # This effectively allows us to skip the parsing of the manifest, which can be expensive.\n # See https://docs.getdbt.com/reference/programmatic-invocations#reusing-objects for more\n # details.\n current_target_path = _get_dbt_target_path()\n partial_parse_file_path = (\n current_target_path.joinpath(PARTIAL_PARSE_FILE_NAME)\n if current_target_path.is_absolute()\n else project_dir.joinpath(current_target_path, PARTIAL_PARSE_FILE_NAME)\n )\n partial_parse_destination_target_path = target_path.joinpath(PARTIAL_PARSE_FILE_NAME)\n\n if partial_parse_file_path.exists():\n logger.info(\n f"Copying `{partial_parse_file_path}` to `{partial_parse_destination_target_path}`"\n " to take advantage of partial parsing."\n )\n\n partial_parse_destination_target_path.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(partial_parse_file_path, partial_parse_destination_target_path)\n\n # Create a subprocess that runs the dbt CLI command.\n logger.info(f"Running dbt command: `{' '.join(args)}`.")\n process = subprocess.Popen(\n args=args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=env,\n cwd=project_dir,\n )\n\n # Add handler to terminate child process if running.\n # See https://stackoverflow.com/a/18258391 for more details.\n def cleanup_dbt_subprocess(process: subprocess.Popen) -> None:\n if process.returncode is None:\n logger.info(\n "The main process is being terminated, but the dbt command has not yet"\n " completed. Terminating the execution of dbt command."\n )\n process.terminate()\n process.wait()\n\n atexit.register(cleanup_dbt_subprocess, process)\n\n return cls(\n process=process,\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n project_dir=project_dir,\n target_path=target_path,\n raise_on_error=raise_on_error,\n )\n\n
[docs] @public\n def wait(self) -> "DbtCliInvocation":\n """Wait for the dbt CLI process to complete.\n\n Returns:\n DbtCliInvocation: The current representation of the dbt CLI invocation.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n dbt_cli_invocation = dbt.cli(["run"]).wait()\n """\n list(self.stream_raw_events())\n\n return self
\n\n
[docs] @public\n def is_successful(self) -> bool:\n """Return whether the dbt CLI process completed successfully.\n\n Returns:\n bool: True, if the dbt CLI process returns with a zero exit code, and False otherwise.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n dbt_cli_invocation = dbt.cli(["run"], raise_on_error=False)\n\n if dbt_cli_invocation.is_successful():\n ...\n """\n return self.process.wait() == 0
\n\n
[docs] @public\n def stream(self) -> Iterator[Union[Output, AssetObservation, AssetCheckResult]]:\n """Stream the events from the dbt CLI process and convert them to Dagster events.\n\n Returns:\n Iterator[Union[Output, AssetObservation, AssetCheckResult]]: A set of corresponding Dagster events.\n - Output for refables (e.g. models, seeds, snapshots.)\n - AssetObservation for dbt test results that are not enabled as asset checks.\n - AssetCheckResult for dbt test results that are enabled as asset checks.\n\n Examples:\n .. code-block:: python\n\n from pathlib import Path\n from dagster_dbt import DbtCliResource, dbt_assets\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context, dbt: DbtCliResource):\n yield from dbt.cli(["run"], context=context).stream()\n """\n for event in self.stream_raw_events():\n yield from event.to_default_asset_events(\n manifest=self.manifest, dagster_dbt_translator=self.dagster_dbt_translator\n )
\n\n
[docs] @public\n def stream_raw_events(self) -> Iterator[DbtCliEventMessage]:\n """Stream the events from the dbt CLI process.\n\n Returns:\n Iterator[DbtCliEventMessage]: An iterator of events from the dbt CLI process.\n """\n with self.process.stdout or contextlib.nullcontext():\n for raw_line in self.process.stdout or []:\n log: str = raw_line.decode().strip()\n try:\n event = DbtCliEventMessage.from_log(log=log)\n\n # Re-emit the logs from dbt CLI process into stdout.\n sys.stdout.write(str(event) + "\\n")\n sys.stdout.flush()\n\n yield event\n except:\n # If we can't parse the log, then just emit it as a raw log.\n sys.stdout.write(log + "\\n")\n sys.stdout.flush()\n\n # Ensure that the dbt CLI process has completed.\n self._raise_on_error()
\n\n
[docs] @public\n def get_artifact(\n self,\n artifact: Union[\n Literal["manifest.json"],\n Literal["catalog.json"],\n Literal["run_results.json"],\n Literal["sources.json"],\n ],\n ) -> Dict[str, Any]:\n """Retrieve a dbt artifact from the target path.\n\n See https://docs.getdbt.com/reference/artifacts/dbt-artifacts for more information.\n\n Args:\n artifact (Union[Literal["manifest.json"], Literal["catalog.json"], Literal["run_results.json"], Literal["sources.json"]]): The name of the artifact to retrieve.\n\n Returns:\n Dict[str, Any]: The artifact as a dictionary.\n\n Examples:\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n dbt_cli_invocation = dbt.cli(["run"]).wait()\n\n # Retrieve the run_results.json artifact.\n run_results = dbt_cli_invocation.get_artifact("run_results.json")\n """\n artifact_path = self.target_path.joinpath(artifact)\n\n return orjson.loads(artifact_path.read_bytes())
\n\n def _raise_on_error(self) -> None:\n """Ensure that the dbt CLI process has completed. If the process has not successfully\n completed, then optionally raise an error.\n """\n if not self.is_successful() and self.raise_on_error:\n raise DagsterDbtCliRuntimeError(\n description=(\n f"The dbt CLI process failed with exit code {self.process.returncode}. Check"\n " the Dagster compute logs for the full information about the error, or view"\n f" the dbt debug log file: {self.target_path.joinpath('dbt.log')}."\n )\n )
\n\n\n
[docs]class DbtCliResource(ConfigurableResource):\n """A resource used to execute dbt CLI commands.\n\n Attributes:\n project_dir (str): The path to the dbt project directory. This directory should contain a\n `dbt_project.yml`. See https://docs.getdbt.com/reference/dbt_project.yml for more\n information.\n global_config_flags (List[str]): A list of global flags configuration to pass to the dbt CLI\n invocation. See https://docs.getdbt.com/reference/global-configs for a full list of\n configuration.\n profiles_dir (Optional[str]): The path to the directory containing your dbt `profiles.yml`.\n By default, the current working directory is used, which is the dbt project directory.\n See https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more\n information.\n profile (Optional[str]): The profile from your dbt `profiles.yml` to use for execution. See\n https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more\n information.\n target (Optional[str]): The target from your dbt `profiles.yml` to use for execution. See\n https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more\n information.\n\n Examples:\n Creating a dbt resource with only a reference to ``project_dir``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(project_dir="/path/to/dbt/project")\n\n Creating a dbt resource with a custom ``profiles_dir``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(\n project_dir="/path/to/dbt/project",\n profiles_dir="/path/to/dbt/project/profiles",\n )\n\n Creating a dbt resource with a custom ``profile`` and ``target``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(\n project_dir="/path/to/dbt/project",\n profiles_dir="/path/to/dbt/project/profiles",\n profile="jaffle_shop",\n target="dev",\n )\n\n Creating a dbt resource with global configs, e.g. disabling colored logs with ``--no-use-color``:\n\n .. code-block:: python\n\n from dagster_dbt import DbtCliResource\n\n dbt = DbtCliResource(\n project_dir="/path/to/dbt/project",\n global_config_flags=["--no-use-color"],\n )\n """\n\n project_dir: str = Field(\n ...,\n description=(\n "The path to your dbt project directory. This directory should contain a"\n " `dbt_project.yml`. See https://docs.getdbt.com/reference/dbt_project.yml for more"\n " information."\n ),\n )\n global_config_flags: List[str] = Field(\n default=[],\n description=(\n "A list of global flags configuration to pass to the dbt CLI invocation. See"\n " https://docs.getdbt.com/reference/global-configs for a full list of configuration."\n ),\n )\n profiles_dir: Optional[str] = Field(\n default=None,\n description=(\n "The path to the directory containing your dbt `profiles.yml`. By default, the current"\n " working directory is used, which is the dbt project directory."\n " See https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for "\n " more information."\n ),\n )\n profile: Optional[str] = Field(\n default=None,\n description=(\n "The profile from your dbt `profiles.yml` to use for execution. See"\n " https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more"\n " information."\n ),\n )\n target: Optional[str] = Field(\n default=None,\n description=(\n "The target from your dbt `profiles.yml` to use for execution. See"\n " https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles for more"\n " information."\n ),\n )\n\n @classmethod\n def _validate_absolute_path_exists(cls, path: Union[str, Path]) -> Path:\n absolute_path = Path(path).absolute()\n try:\n resolved_path = absolute_path.resolve(strict=True)\n except FileNotFoundError:\n raise ValueError(f"The absolute path of '{path}' ('{absolute_path}') does not exist")\n\n return resolved_path\n\n @classmethod\n def _validate_path_contains_file(cls, path: Path, file_name: str, error_message: str):\n if not path.joinpath(file_name).exists():\n raise ValueError(error_message)\n\n @validator("project_dir", "profiles_dir", pre=True)\n def convert_path_to_str(cls, v: Any) -> Any:\n """Validate that the path is converted to a string."""\n if isinstance(v, Path):\n resolved_path = cls._validate_absolute_path_exists(v)\n\n absolute_path = Path(v).absolute()\n try:\n resolved_path = absolute_path.resolve(strict=True)\n except FileNotFoundError:\n raise ValueError(f"The absolute path of '{v}' ('{absolute_path}') does not exist")\n return os.fspath(resolved_path)\n\n return v\n\n @validator("project_dir")\n def validate_project_dir(cls, project_dir: str) -> str:\n resolved_project_dir = cls._validate_absolute_path_exists(project_dir)\n\n cls._validate_path_contains_file(\n path=resolved_project_dir,\n file_name=DBT_PROJECT_YML_NAME,\n error_message=(\n f"{resolved_project_dir} does not contain a {DBT_PROJECT_YML_NAME} file. Please"\n " specify a valid path to a dbt project."\n ),\n )\n\n return os.fspath(resolved_project_dir)\n\n @validator("profiles_dir")\n def validate_profiles_dir(cls, profiles_dir: str) -> str:\n resolved_project_dir = cls._validate_absolute_path_exists(profiles_dir)\n\n cls._validate_path_contains_file(\n path=resolved_project_dir,\n file_name=DBT_PROFILES_YML_NAME,\n error_message=(\n f"{resolved_project_dir} does not contain a {DBT_PROFILES_YML_NAME} file. Please"\n " specify a valid path to a dbt profile directory."\n ),\n )\n\n return os.fspath(resolved_project_dir)\n\n @root_validator(pre=True)\n def validate_dbt_version(cls, values: Dict[str, Any]) -> Dict[str, Any]:\n """Validate that the dbt version is supported."""\n if version.parse(dbt_version) < version.parse("1.4.0"):\n raise ValueError(\n "To use `dagster_dbt.DbtCliResource`, you must use `dbt-core>=1.4.0`. Currently,"\n f" you are using `dbt-core=={dbt_version}`. Please install a compatible dbt-core"\n " version."\n )\n\n return values\n\n def _get_unique_target_path(self, *, context: Optional[OpExecutionContext]) -> Path:\n """Get a unique target path for the dbt CLI invocation.\n\n Args:\n context (Optional[OpExecutionContext]): The execution context.\n\n Returns:\n str: A unique target path for the dbt CLI invocation.\n """\n unique_id = str(uuid.uuid4())[:7]\n path = unique_id\n if context:\n path = f"{context.op.name}-{context.run_id[:7]}-{unique_id}"\n\n current_target_path = _get_dbt_target_path()\n\n return current_target_path.joinpath(path)\n\n
[docs] @public\n def cli(\n self,\n args: List[str],\n *,\n raise_on_error: bool = True,\n manifest: Optional[DbtManifestParam] = None,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n context: Optional[OpExecutionContext] = None,\n target_path: Optional[Path] = None,\n ) -> DbtCliInvocation:\n """Create a subprocess to execute a dbt CLI command.\n\n Args:\n args (List[str]): The dbt CLI command to execute.\n raise_on_error (bool): Whether to raise an exception if the dbt CLI command fails.\n manifest (Optional[Union[Mapping[str, Any], str, Path]]): The dbt manifest blob. If an\n execution context from within `@dbt_assets` is provided to the context argument,\n then the manifest provided to `@dbt_assets` will be used.\n dagster_dbt_translator (Optional[DagsterDbtTranslator]): The translator to link dbt\n nodes to Dagster assets. If an execution context from within `@dbt_assets` is\n provided to the context argument, then the dagster_dbt_translator provided to\n `@dbt_assets` will be used.\n context (Optional[OpExecutionContext]): The execution context from within `@dbt_assets`.\n target_path (Optional[Path]): An explicit path to a target folder to use to store and\n retrieve dbt artifacts when running a dbt CLI command. If not provided, a unique\n target path will be generated.\n\n Returns:\n DbtCliInvocation: A invocation instance that can be used to retrieve the output of the\n dbt CLI command.\n\n Examples:\n Streaming Dagster events for dbt asset materializations and observations:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli(["run"], context=context).stream()\n\n Retrieving a dbt artifact after streaming the Dagster events:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_run_invocation = dbt.cli(["run"], context=context)\n\n yield from dbt_run_invocation.stream()\n\n # Retrieve the `run_results.json` dbt artifact as a dictionary:\n run_results_json = dbt_run_invocation.get_artifact("run_results.json")\n\n # Retrieve the `run_results.json` dbt artifact as a file path:\n run_results_path = dbt_run_invocation.target_path.joinpath("run_results.json")\n\n Customizing the asset materialization metadata when streaming the Dagster events:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_cli_invocation = dbt.cli(["run"], context=context)\n\n for dbt_event in dbt_cli_invocation.stream_raw_events():\n for dagster_event in dbt_event.to_default_asset_events(manifest=dbt_cli_invocation.manifest):\n if isinstance(dagster_event, Output):\n context.add_output_metadata(\n metadata={\n "my_custom_metadata": "my_custom_metadata_value",\n },\n output_name=dagster_event.output_name,\n )\n\n yield dagster_event\n\n Suppressing exceptions from a dbt CLI command when a non-zero exit code is returned:\n\n .. code-block:: python\n\n from pathlib import Path\n\n from dagster import AssetExecutionContext\n from dagster_dbt import DbtCliResource, dbt_assets\n\n\n @dbt_assets(manifest=Path("target", "manifest.json"))\n def my_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n dbt_run_invocation = dbt.cli(["run"], context=context, raise_on_error=False)\n\n if dbt_run_invocation.is_successful():\n yield from dbt_run_invocation.stream()\n else:\n ...\n\n Invoking a dbt CLI command in a custom asset or op:\n\n .. code-block:: python\n\n import json\n\n from dagster import asset, op\n from dagster_dbt import DbtCliResource\n\n\n @asset\n def my_dbt_asset(dbt: DbtCliResource):\n dbt_macro_args = {"key": "value"}\n dbt.cli(["run-operation", "my-macro", json.dumps(dbt_macro_args)]).wait()\n\n\n @op\n def my_dbt_op(dbt: DbtCliResource):\n dbt_macro_args = {"key": "value"}\n dbt.cli(["run-operation", "my-macro", json.dumps(dbt_macro_args)]).wait()\n """\n target_path = target_path or self._get_unique_target_path(context=context)\n env = {\n **os.environ.copy(),\n # Run dbt with unbuffered output.\n "PYTHONUNBUFFERED": "1",\n # Disable anonymous usage statistics for performance.\n "DBT_SEND_ANONYMOUS_USAGE_STATS": "false",\n # The DBT_LOG_FORMAT environment variable must be set to `json`. We use this\n # environment variable to ensure that the dbt CLI outputs structured logs.\n "DBT_LOG_FORMAT": "json",\n # The DBT_TARGET_PATH environment variable is set to a unique value for each dbt\n # invocation so that artifact paths are separated.\n # See https://discourse.getdbt.com/t/multiple-run-results-json-and-manifest-json-files/7555\n # for more information.\n "DBT_TARGET_PATH": os.fspath(target_path),\n # The DBT_LOG_PATH environment variable is set to the same value as DBT_TARGET_PATH\n # so that logs for each dbt invocation has separate log files.\n "DBT_LOG_PATH": os.fspath(target_path),\n # The DBT_PROFILES_DIR environment variable is set to the path containing the dbt\n # profiles.yml file.\n # See https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles#advanced-customizing-a-profile-directory\n # for more information.\n **({"DBT_PROFILES_DIR": self.profiles_dir} if self.profiles_dir else {}),\n }\n\n assets_def: Optional[AssetsDefinition] = None\n with suppress(DagsterInvalidPropertyError):\n assets_def = context.assets_def if context else None\n\n selection_args: List[str] = []\n dagster_dbt_translator = dagster_dbt_translator or DagsterDbtTranslator()\n if context and assets_def is not None:\n manifest, dagster_dbt_translator = get_manifest_and_translator_from_dbt_assets(\n [assets_def]\n )\n\n # When dbt is enabled with asset checks, we turn off any indirection with dbt selection.\n # This way, the Dagster context completely determines what is executed in a dbt\n # invocation with a subsetted selection.\n if (\n version.parse(dbt_version) >= version.parse("1.5.0")\n and dagster_dbt_translator.settings.enable_asset_checks\n ):\n env["DBT_INDIRECT_SELECTION"] = "empty"\n\n selection_args = get_subset_selection_for_context(\n context=context,\n manifest=manifest,\n select=context.op.tags.get("dagster-dbt/select"),\n exclude=context.op.tags.get("dagster-dbt/exclude"),\n )\n else:\n manifest = validate_manifest(manifest) if manifest else {}\n\n # TODO: verify that args does not have any selection flags if the context and manifest\n # are passed to this function.\n profile_args: List[str] = []\n if self.profile:\n profile_args = ["--profile", self.profile]\n\n if self.target:\n profile_args += ["--target", self.target]\n\n args = ["dbt"] + self.global_config_flags + args + profile_args + selection_args\n project_dir = Path(self.project_dir)\n\n if not target_path.is_absolute():\n target_path = project_dir.joinpath(target_path)\n\n return DbtCliInvocation.run(\n args=args,\n env=env,\n manifest=manifest,\n dagster_dbt_translator=dagster_dbt_translator,\n project_dir=project_dir,\n target_path=target_path,\n raise_on_error=raise_on_error,\n )
\n\n\ndef get_subset_selection_for_context(\n context: OpExecutionContext,\n manifest: Mapping[str, Any],\n select: Optional[str],\n exclude: Optional[str],\n) -> List[str]:\n """Generate a dbt selection string to materialize the selected resources in a subsetted execution context.\n\n See https://docs.getdbt.com/reference/node-selection/syntax#how-does-selection-work.\n\n Args:\n context (OpExecutionContext): The execution context for the current execution step.\n select (Optional[str]): A dbt selection string to select resources to materialize.\n exclude (Optional[str]): A dbt selection string to exclude resources from materializing.\n\n Returns:\n List[str]: dbt CLI arguments to materialize the selected resources in a\n subsetted execution context.\n\n If the current execution context is not performing a subsetted execution,\n return CLI arguments composed of the inputed selection and exclusion arguments.\n """\n default_dbt_selection = []\n if select:\n default_dbt_selection += ["--select", select]\n if exclude:\n default_dbt_selection += ["--exclude", exclude]\n\n dbt_resource_props_by_output_name = get_dbt_resource_props_by_output_name(manifest)\n dbt_resource_props_by_test_name = get_dbt_resource_props_by_test_name(manifest)\n\n # TODO: this should be a property on the context if this is a permanent indicator for\n # determining whether the current execution context is performing a subsetted execution.\n is_subsetted_execution = len(context.selected_output_names) != len(\n context.assets_def.node_keys_by_output_name\n )\n if not is_subsetted_execution:\n logger.info(\n "A dbt subsetted execution is not being performed. Using the default dbt selection"\n f" arguments `{default_dbt_selection}`."\n )\n return default_dbt_selection\n\n selected_dbt_resources = []\n for output_name in context.selected_output_names:\n dbt_resource_props = dbt_resource_props_by_output_name[output_name]\n\n # Explicitly select a dbt resource by its fully qualified name (FQN).\n # https://docs.getdbt.com/reference/node-selection/methods#the-file-or-fqn-method\n fqn_selector = f"fqn:{'.'.join(dbt_resource_props['fqn'])}"\n\n selected_dbt_resources.append(fqn_selector)\n\n for _, check_name in context.selected_asset_check_keys:\n test_resource_props = dbt_resource_props_by_test_name[check_name]\n\n # Explicitly select a dbt resource by its fully qualified name (FQN).\n # https://docs.getdbt.com/reference/node-selection/methods#the-file-or-fqn-method\n fqn_selector = f"fqn:{'.'.join(test_resource_props['fqn'])}"\n\n selected_dbt_resources.append(fqn_selector)\n\n # Take the union of all the selected resources.\n # https://docs.getdbt.com/reference/node-selection/set-operators#unions\n union_selected_dbt_resources = ["--select"] + [" ".join(selected_dbt_resources)]\n\n logger.info(\n "A dbt subsetted execution is being performed. Overriding default dbt selection"\n f" arguments `{default_dbt_selection}` with arguments: `{union_selected_dbt_resources}`"\n )\n\n return union_selected_dbt_resources\n\n\ndef get_dbt_resource_props_by_output_name(\n manifest: Mapping[str, Any]\n) -> Mapping[str, Mapping[str, Any]]:\n node_info_by_dbt_unique_id = get_dbt_resource_props_by_dbt_unique_id_from_manifest(manifest)\n\n return {\n output_name_fn(node): node\n for node in node_info_by_dbt_unique_id.values()\n if node["resource_type"] in ASSET_RESOURCE_TYPES\n }\n\n\ndef get_dbt_resource_props_by_test_name(\n manifest: Mapping[str, Any]\n) -> Mapping[str, Mapping[str, Any]]:\n return {\n dbt_resource_props["name"]: dbt_resource_props\n for unique_id, dbt_resource_props in manifest["nodes"].items()\n if unique_id.startswith("test")\n }\n
", "current_page_name": "_modules/dagster_dbt/core/resources_v2", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.core.resources_v2"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.core.types

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._check as check\n\nfrom ..types import DbtOutput\n\n\n
[docs]class DbtCliOutput(DbtOutput):\n """The results of executing a dbt command, along with additional metadata about the dbt CLI\n process that was run.\n\n This class is deprecated, because it's only produced by methods of the DbtCliClientResource class,\n which is deprecated in favor of DbtCliResource.\n\n Note that users should not construct instances of this class directly. This class is intended\n to be constructed from the JSON output of dbt commands.\n\n Attributes:\n command (str): The full shell command that was executed.\n return_code (int): The return code of the dbt CLI process.\n raw_output (str): The raw output (``stdout``) of the dbt CLI process.\n logs (List[Dict[str, Any]]): List of parsed JSON logs produced by the dbt command.\n result (Optional[Dict[str, Any]]): Dictionary containing dbt-reported result information\n contained in run_results.json. Some dbt commands do not produce results, and will\n therefore have result = None.\n docs_url (Optional[str]): Hostname where dbt docs are being served for this project.\n """\n\n def __init__(\n self,\n command: str,\n return_code: int,\n raw_output: str,\n logs: Sequence[Mapping[str, Any]],\n result: Mapping[str, Any],\n docs_url: Optional[str] = None,\n ):\n self._command = check.str_param(command, "command")\n self._return_code = check.int_param(return_code, "return_code")\n self._raw_output = check.str_param(raw_output, "raw_output")\n self._logs = check.sequence_param(logs, "logs", of_type=dict)\n self._docs_url = check.opt_str_param(docs_url, "docs_url")\n super().__init__(result)\n\n @property\n def command(self) -> str:\n return self._command\n\n @property\n def return_code(self) -> int:\n return self._return_code\n\n @property\n def raw_output(self) -> str:\n return self._raw_output\n\n @property\n def logs(self) -> Sequence[Mapping[str, Any]]:\n return self._logs\n\n @property\n def docs_url(self) -> Optional[str]:\n return self._docs_url
\n
", "current_page_name": "_modules/dagster_dbt/core/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.core.types"}}, "dagster_dbt_translator": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.dagster_dbt_translator

\nfrom dataclasses import dataclass\nfrom typing import Any, Mapping, Optional\n\nfrom dagster import AssetKey, AutoMaterializePolicy, FreshnessPolicy\nfrom dagster._annotations import public\nfrom dagster._core.definitions.events import (\n    CoercibleToAssetKeyPrefix,\n    check_opt_coercible_to_asset_key_prefix_param,\n)\n\nfrom .asset_utils import (\n    default_asset_key_fn,\n    default_auto_materialize_policy_fn,\n    default_description_fn,\n    default_freshness_policy_fn,\n    default_group_from_dbt_resource_props,\n    default_metadata_from_dbt_resource_props,\n)\n\n\n
[docs]@dataclass(frozen=True)\nclass DagsterDbtTranslatorSettings:\n """Settings to enable Dagster features for your dbt project.\n\n Args:\n enable_asset_checks (bool): Whether to load dbt tests as Dagster asset checks.\n Defaults to False.\n """\n\n enable_asset_checks: bool = False
\n\n\n
[docs]class DagsterDbtTranslator:\n """Holds a set of methods that derive Dagster asset definition metadata given a representation\n of a dbt resource (models, tests, sources, etc).\n\n This class is exposed so that methods can be overriden to customize how Dagster asset metadata\n is derived.\n """\n\n def __init__(self, settings: Optional[DagsterDbtTranslatorSettings] = None):\n """Initialize the translator.\n\n Args:\n settings (Optional[DagsterDbtTranslatorSettings]): Settings for the translator.\n """\n self._settings = settings or DagsterDbtTranslatorSettings()\n\n @property\n def settings(self) -> DagsterDbtTranslatorSettings:\n if not hasattr(self, "_settings"):\n self._settings = DagsterDbtTranslatorSettings()\n\n return self._settings\n\n
[docs] @classmethod\n @public\n def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster asset key that represents that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom asset key for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n AssetKey: The Dagster asset key for the dbt resource.\n\n Examples:\n Adding a prefix to the default asset key generated for each dbt resource:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster import AssetKey\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n return super().get_asset_key(dbt_resource_props).with_prefix("prefix")\n\n Adding a prefix to the default asset key generated for each dbt resource, but only for dbt sources:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster import AssetKey\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_asset_key(cls, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n asset_key = super().get_asset_key(dbt_resource_props)\n\n if dbt_resource_props["resource_type"] == "source":\n asset_key = asset_key.with_prefix("my_prefix")\n\n return asset_key\n """\n return default_asset_key_fn(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_description(cls, dbt_resource_props: Mapping[str, Any]) -> str:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster description for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom description for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n str: The description for the dbt resource.\n\n Examples:\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_description(cls, dbt_resource_props: Mapping[str, Any]) -> str:\n return "custom description"\n """\n return default_description_fn(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_metadata(cls, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster metadata for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom metadata for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Mapping[str, Any]: A dictionary representing the Dagster metadata for the dbt resource.\n\n Examples:\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_metadata(cls, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:\n return {"custom": "metadata"}\n """\n return default_metadata_from_dbt_resource_props(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_group_name(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster group name for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom group name for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Optional[str]: A Dagster group name.\n\n Examples:\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_group_name(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[str]:\n return "custom_group_prefix" + dbt_resource_props.get("config", {}).get("group")\n """\n return default_group_from_dbt_resource_props(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_freshness_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[FreshnessPolicy]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster :py:class:`dagster.FreshnessPolicy` for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom freshness policy for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Optional[FreshnessPolicy]: A Dagster freshness policy.\n\n Examples:\n Set a custom freshness policy for all dbt resources:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_freshness_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[FreshnessPolicy]:\n return FreshnessPolicy(maximum_lag_minutes=60)\n\n Set a custom freshness policy for dbt resources with a specific tag:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_freshness_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[FreshnessPolicy]:\n freshness_policy = None\n if "my_custom_tag" in dbt_resource_props.get("tags", []):\n freshness_policy = FreshnessPolicy(maximum_lag_minutes=60)\n\n return freshness_policy\n """\n return default_freshness_policy_fn(dbt_resource_props)
\n\n
[docs] @classmethod\n @public\n def get_auto_materialize_policy(\n cls, dbt_resource_props: Mapping[str, Any]\n ) -> Optional[AutoMaterializePolicy]:\n """A function that takes a dictionary representing properties of a dbt resource, and\n returns the Dagster :py:class:`dagster.AutoMaterializePolicy` for that resource.\n\n Note that a dbt resource is unrelated to Dagster's resource concept, and simply represents\n a model, seed, snapshot or source in a given dbt project. You can learn more about dbt\n resources and the properties available in this dictionary here:\n https://docs.getdbt.com/reference/artifacts/manifest-json#resource-details\n\n This method can be overridden to provide a custom auto-materialize policy for a dbt resource.\n\n Args:\n dbt_resource_props (Mapping[str, Any]): A dictionary representing the dbt resource.\n\n Returns:\n Optional[AutoMaterializePolicy]: A Dagster auto-materialize policy.\n\n Examples:\n Set a custom auto-materialize policy for all dbt resources:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_auto_materialize_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[AutoMaterializePolicy]:\n return AutoMaterializePolicy.eager()\n\n Set a custom auto-materialize policy for dbt resources with a specific tag:\n\n .. code-block:: python\n\n from typing import Any, Mapping\n\n from dagster_dbt import DagsterDbtTranslator\n\n\n class CustomDagsterDbtTranslator(DagsterDbtTranslator):\n @classmethod\n def get_auto_materialize_policy(cls, dbt_resource_props: Mapping[str, Any]) -> Optional[AutoMaterializePolicy]:\n auto_materialize_policy = None\n if "my_custom_tag" in dbt_resource_props.get("tags", []):\n auto_materialize_policy = AutoMaterializePolicy.eager()\n\n return auto_materialize_policy\n\n """\n return default_auto_materialize_policy_fn(dbt_resource_props)
\n\n\nclass KeyPrefixDagsterDbtTranslator(DagsterDbtTranslator):\n """A DagsterDbtTranslator that applies prefixes to the asset keys generated from dbt resources.\n\n Attributes:\n asset_key_prefix (Optional[Union[str, Sequence[str]]]): A prefix to apply to all dbt models,\n seeds, snapshots, etc. This will *not* apply to dbt sources.\n source_asset_key_prefix (Optional[Union[str, Sequence[str]]]): A prefix to apply to all dbt\n sources.\n """\n\n def __init__(\n self,\n asset_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n source_asset_key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n *args,\n **kwargs,\n ):\n self._asset_key_prefix = (\n check_opt_coercible_to_asset_key_prefix_param(asset_key_prefix, "asset_key_prefix")\n or []\n )\n self._source_asset_key_prefix = (\n check_opt_coercible_to_asset_key_prefix_param(\n source_asset_key_prefix, "source_asset_key_prefix"\n )\n or []\n )\n\n super().__init__(*args, **kwargs)\n\n @public\n def get_asset_key(self, dbt_resource_props: Mapping[str, Any]) -> AssetKey:\n base_key = default_asset_key_fn(dbt_resource_props)\n if dbt_resource_props["resource_type"] == "source":\n return base_key.with_prefix(self._source_asset_key_prefix)\n else:\n return base_key.with_prefix(self._asset_key_prefix)\n\n\n@dataclass\nclass DbtManifestWrapper:\n manifest: Mapping[str, Any]\n
", "current_page_name": "_modules/dagster_dbt/dagster_dbt_translator", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.dagster_dbt_translator"}, "dbt_manifest_asset_selection": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.dbt_manifest_asset_selection

\nfrom typing import AbstractSet, Optional\n\nfrom dagster import (\n    AssetKey,\n    AssetSelection,\n    _check as check,\n)\nfrom dagster._core.definitions.asset_graph import AssetGraph\n\nfrom .asset_utils import is_non_asset_node\nfrom .dagster_dbt_translator import DagsterDbtTranslator\nfrom .dbt_manifest import DbtManifestParam, validate_manifest\nfrom .utils import (\n    ASSET_RESOURCE_TYPES,\n    get_dbt_resource_props_by_dbt_unique_id_from_manifest,\n    select_unique_ids_from_manifest,\n)\n\n\n
[docs]class DbtManifestAssetSelection(AssetSelection):\n """Defines a selection of assets from a dbt manifest wrapper and a dbt selection string.\n\n Args:\n manifest (Mapping[str, Any]): The dbt manifest blob.\n select (str): A dbt selection string to specify a set of dbt resources.\n exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.\n\n Examples:\n .. code-block:: python\n\n import json\n from pathlib import Path\n\n from dagster_dbt import DbtManifestAssetSelection\n\n manifest = json.loads(Path("path/to/manifest.json").read_text())\n\n # select the dbt assets that have the tag "foo".\n my_selection = DbtManifestAssetSelection(manifest=manifest, select="tag:foo")\n """\n\n def __init__(\n self,\n manifest: DbtManifestParam,\n select: str = "fqn:*",\n *,\n dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,\n exclude: Optional[str] = None,\n ) -> None:\n self.manifest = validate_manifest(manifest)\n self.select = check.str_param(select, "select")\n self.exclude = check.opt_str_param(exclude, "exclude", default="")\n self.dagster_dbt_translator = check.opt_inst_param(\n dagster_dbt_translator,\n "dagster_dbt_translator",\n DagsterDbtTranslator,\n DagsterDbtTranslator(),\n )\n\n def resolve_inner(self, asset_graph: AssetGraph) -> AbstractSet[AssetKey]:\n dbt_nodes = get_dbt_resource_props_by_dbt_unique_id_from_manifest(self.manifest)\n\n keys = set()\n for unique_id in select_unique_ids_from_manifest(\n select=self.select,\n exclude=self.exclude,\n manifest_json=self.manifest,\n ):\n dbt_resource_props = dbt_nodes[unique_id]\n is_dbt_asset = dbt_resource_props["resource_type"] in ASSET_RESOURCE_TYPES\n if is_dbt_asset and not is_non_asset_node(dbt_resource_props):\n asset_key = self.dagster_dbt_translator.get_asset_key(dbt_resource_props)\n keys.add(asset_key)\n\n return keys
\n
", "current_page_name": "_modules/dagster_dbt/dbt_manifest_asset_selection", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.dbt_manifest_asset_selection"}, "dbt_resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.dbt_resource

\nimport logging\nfrom abc import abstractmethod\nfrom typing import Any, Mapping, Optional, Sequence\n\nfrom dagster import get_dagster_logger\n\nfrom .types import DbtOutput\n\n\nclass DbtClient:\n    """Base class for a client allowing users to interface with dbt."""\n\n    def __init__(\n        self,\n        logger: Optional[logging.Logger] = None,\n    ):\n        """Constructor.\n\n        Args:\n            logger (Optional[Any]): A property for injecting a logger dependency.\n                Default is ``None``.\n        """\n        self._logger = logger or get_dagster_logger()\n\n    def _format_params(\n        self, flags: Mapping[str, Any], replace_underscores: bool = False\n    ) -> Mapping[str, Any]:\n        """Reformats arguments that are easier to express as a list into the format that dbt expects,\n        and deletes and keys with no value.\n        """\n        # remove any keys with a value of None\n        if replace_underscores:\n            flags = {k.replace("_", "-"): v for k, v in flags.items() if v is not None}\n        else:\n            flags = {k: v for k, v in flags.items() if v is not None}\n\n        for param in ["select", "exclude", "models"]:\n            if param in flags:\n                if isinstance(flags[param], list):\n                    # if it's a list, format as space-separated\n                    flags[param] = " ".join(set(flags[param]))\n\n        return flags\n\n    @property\n    def logger(self) -> logging.Logger:\n        """logging.Logger: A property for injecting a logger dependency."""\n        return self._logger\n\n    @abstractmethod\n    def compile(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``compile`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in compilation.\n            exclude (List[str]), optional): the models to exclude from compilation.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def run(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``run`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in the run.\n            exclude (List[str]), optional): the models to exclude from the run.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def snapshot(\n        self,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``snapshot`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def test(\n        self,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        data: bool = True,\n        schema: bool = True,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``test`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            models (List[str], optional): the models to include in testing.\n            exclude (List[str], optional): the models to exclude from testing.\n            data (bool, optional): If ``True`` (default), then run data tests.\n            schema (bool, optional): If ``True`` (default), then run schema tests.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def seed(\n        self,\n        show: bool = False,\n        select: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``seed`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            show (bool, optional): If ``True``, then show a sample of the seeded data in the\n                response. Defaults to ``False``.\n            select (List[str], optional): the snapshots to include in the run.\n            exclude (List[str], optional): the snapshots to exclude from the run.\n\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def ls(\n        self,\n        select: Optional[Sequence[str]] = None,\n        models: Optional[Sequence[str]] = None,\n        exclude: Optional[Sequence[str]] = None,\n        **kwargs,\n    ) -> DbtOutput:\n        """Run the ``ls`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the resources to include in the output.\n            models (List[str], optional): the models to include in the output.\n            exclude (List[str], optional): the resources to exclude from the output.\n\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def build(self, select: Optional[Sequence[str]] = None, **kwargs) -> DbtOutput:\n        """Run the ``build`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            select (List[str], optional): the models/resources to include in the run.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n        raise NotImplementedError()\n\n    @abstractmethod\n    def generate_docs(self, compile_project: bool = False, **kwargs) -> DbtOutput:\n        """Run the ``docs generate`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            compile_project (bool, optional): If true, compile the project before generating a catalog.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def run_operation(\n        self, macro: str, args: Optional[Mapping[str, Any]] = None, **kwargs\n    ) -> DbtOutput:\n        """Run the ``run-operation`` command on a dbt project. kwargs are passed in as additional parameters.\n\n        Args:\n            macro (str): the dbt macro to invoke.\n            args (Dict[str, Any], optional): the keyword arguments to be supplied to the macro.\n\n        Returns:\n            DbtOutput: object containing parsed output from dbt\n        """\n\n    @abstractmethod\n    def get_run_results_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the run_results.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the run_results json file\n                for this dbt project.\n        """\n\n    @abstractmethod\n    def get_manifest_json(self, **kwargs) -> Optional[Mapping[str, Any]]:\n        """Get a parsed version of the manifest.json file for the relevant dbt project.\n\n        Returns:\n            Dict[str, Any]: dictionary containing the parsed contents of the manifest json file\n                for this dbt project.\n        """\n\n\n
[docs]class DbtResource(DbtClient):\n pass
\n
", "current_page_name": "_modules/dagster_dbt/dbt_resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.dbt_resource"}, "errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.errors

\nimport warnings\nfrom abc import ABC\nfrom typing import Any, Mapping, Optional, Sequence\n\nfrom dagster import (\n    DagsterInvariantViolationError,\n    Failure,\n    MetadataValue,\n    _check as check,\n)\n\n\n
[docs]class DagsterDbtError(Failure, ABC):\n """The base exception of the ``dagster-dbt`` library."""
\n\n\n
[docs]class DagsterDbtCliUnexpectedOutputError(DagsterDbtError):\n """Represents an error when parsing the output of a dbt CLI command."""\n\n invalid_line_nos: Sequence[int]\n\n def __init__(self, invalid_line_nos: Sequence[int]):\n check.sequence_param(invalid_line_nos, "invalid_line_nos", int)\n line_nos_str = ", ".join(map(str, invalid_line_nos))\n description = f"dbt CLI emitted unexpected output on lines {line_nos_str}"\n metadata = {\n "Invalid CLI Output Line Numbers": MetadataValue.json({"line_nos": invalid_line_nos})\n }\n super().__init__(description, metadata=metadata)\n self.invalid_line_nos = invalid_line_nos
\n\n\n
[docs]class DagsterDbtCliRuntimeError(DagsterDbtError, ABC):\n """Represents an error while executing a dbt CLI command."""\n\n def __init__(\n self,\n description: str,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n if logs is not None:\n warnings.warn(\n "`logs` is a deprecated argument to DagsterDbtCliRuntimeError and will be discarded"\n )\n if raw_output is not None:\n warnings.warn(\n "`raw_output` is a deprecated argument to DagsterDbtCliRuntimeError and will be"\n " discarded"\n )\n metadata = {"Parsed CLI Messages": "\\n".join(messages or [])}\n super().__init__(description, metadata=metadata)
\n\n\n
[docs]class DagsterDbtCliHandledRuntimeError(DagsterDbtCliRuntimeError):\n """Represents a model error reported by the dbt CLI at runtime (return code 1)."""\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__("Handled error in the dbt CLI (return code 1)", logs, raw_output, messages)
\n\n\n
[docs]class DagsterDbtCliFatalRuntimeError(DagsterDbtCliRuntimeError):\n """Represents a fatal error in the dbt CLI (return code 2)."""\n\n def __init__(\n self,\n logs: Optional[Sequence[Mapping[str, Any]]] = None,\n raw_output: Optional[str] = None,\n messages: Optional[Sequence[str]] = None,\n ):\n super().__init__(\n "Fatal error in the dbt CLI (return code 2): " + " ".join(messages or []),\n logs,\n raw_output,\n messages,\n )
\n\n\n
[docs]class DagsterDbtCliOutputsNotFoundError(DagsterDbtError):\n """Represents a problem in finding the ``target/run_results.json`` artifact when executing a dbt\n CLI command.\n\n For more details on ``target/run_results.json``, see\n https://docs.getdbt.com/reference/dbt-artifacts#run_resultsjson.\n """\n\n def __init__(self, path: str):\n super().__init__(f"Expected to find file at path {path}")
\n\n\nclass DagsterDbtCloudJobInvariantViolationError(DagsterDbtError, DagsterInvariantViolationError):\n """Represents an error when a dbt Cloud job is not supported by the ``dagster-dbt`` library."""\n
", "current_page_name": "_modules/dagster_dbt/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.errors"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.ops

\nfrom typing import Any, Dict, List, Optional\n\nfrom dagster import Config, In, Nothing, Out, Output, op\nfrom pydantic import Field\n\nfrom .types import DbtOutput\nfrom .utils import generate_events, generate_materializations\n\n_DEFAULT_OP_PROPS: Dict[str, Any] = dict(\n    required_resource_keys={"dbt"},\n    ins={"start_after": In(Nothing)},\n    out=Out(DbtOutput, description="Parsed output from running the dbt command."),\n    tags={"kind": "dbt"},\n)\n\n\ndef _get_doc(op_name: str, dbt_command: str) -> str:\n    return f"""\nThis op executes a ``dbt {dbt_command}`` command. It requires the use of a dbt resource, which can be\nset to execute this command through the CLI (using the :py:class:`~dagster_dbt.dbt_cli_resource`).\n\nExamples:\n\n.. code-block:: python\n\n    from dagster import job\n    from dagster_dbt import {op_name}, dbt_cli_resource\n\n    @job(resource_defs={{"dbt":dbt_cli_resource}})\n    def my_dbt_cli_job():\n        {op_name}()\n    """\n\n\n# NOTE: mypy fails to properly track the type of `_DEFAULT_OP_PROPS` items when they are\n# double-splatted, so we type-ignore the below op declarations.\n\n\nclass DbtBuildOpConfig(Config):\n    yield_asset_events: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations and asset observations corresponding to the results of "\n            "the dbt operation will be yielded when the op executes. Default: True"\n        ),\n    )\n    asset_key_prefix: List[str] = Field(\n        default=["dbt"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n@op(**_DEFAULT_OP_PROPS)\ndef dbt_build_op(context, config: DbtBuildOpConfig) -> Any:\n    dbt_output = context.resources.dbt.build()\n    if config.yield_asset_events and "results" in dbt_output.result:\n        yield from generate_events(\n            dbt_output,\n            node_info_to_asset_key=lambda info: config.asset_key_prefix\n            + info["unique_id"].split("."),\n            manifest_json=context.resources.dbt.get_manifest_json(),\n        )\n    yield Output(dbt_output)\n\n\nclass DbtRunOpConfig(Config):\n    yield_materializations: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations corresponding to the results of the dbt operation will "\n            "be yielded when the op executes. Default: True"\n        ),\n    )\n    asset_key_prefix: Optional[List[str]] = Field(\n        default=["dbt"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_run_op(context, config: DbtRunOpConfig):\n dbt_output = context.resources.dbt.run()\n if config.yield_materializations and "results" in dbt_output.result:\n yield from generate_materializations(dbt_output, asset_key_prefix=config.asset_key_prefix)\n yield Output(dbt_output)
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_compile_op(context):\n return context.resources.dbt.compile()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_ls_op(context):\n return context.resources.dbt.ls()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_test_op(context):\n return context.resources.dbt.test()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_snapshot_op(context):\n return context.resources.dbt.snapshot()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_seed_op(context):\n return context.resources.dbt.seed()
\n\n\n
[docs]@op(**_DEFAULT_OP_PROPS)\ndef dbt_docs_generate_op(context):\n return context.resources.dbt.generate_docs()
\n\n\nfor dbt_op, cmd in [\n (dbt_build_op, "build"),\n (dbt_run_op, "run"),\n (dbt_compile_op, "compile"),\n (dbt_ls_op, "ls"),\n (dbt_test_op, "test"),\n (dbt_snapshot_op, "snapshot"),\n (dbt_seed_op, "seed"),\n (dbt_docs_generate_op, "docs generate"),\n]:\n dbt_op.__doc__ = _get_doc(dbt_op.name, cmd)\n
", "current_page_name": "_modules/dagster_dbt/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.ops"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.types

\nfrom typing import Any, Mapping, Optional\n\nimport dagster._check as check\n\n\n
[docs]class DbtOutput:\n """Base class for both DbtCliOutput and DbtRPCOutput. Contains a single field, `result`, which\n represents the dbt-formatted result of the command that was run (if any).\n\n Used internally, should not be instantiated directly by the user.\n """\n\n def __init__(self, result: Mapping[str, Any]):\n self._result = check.mapping_param(result, "result", key_type=str)\n\n @property\n def result(self) -> Mapping[str, Any]:\n return self._result\n\n @property\n def docs_url(self) -> Optional[str]:\n return None
\n
", "current_page_name": "_modules/dagster_dbt/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.types"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_dbt.utils

\nfrom pathlib import Path\nfrom typing import (\n    AbstractSet,\n    Any,\n    Callable,\n    Dict,\n    Iterator,\n    Mapping,\n    Optional,\n    Sequence,\n    Union,\n    cast,\n)\n\nimport dateutil\nfrom dagster import (\n    AssetKey,\n    AssetMaterialization,\n    AssetObservation,\n    MetadataValue,\n    Output,\n    _check as check,\n)\nfrom dagster._core.definitions.metadata import RawMetadataValue\n\nfrom .types import DbtOutput\n\n# dbt resource types that may be considered assets\nASSET_RESOURCE_TYPES = ["model", "seed", "snapshot"]\n\n\ndef default_node_info_to_asset_key(node_info: Mapping[str, Any]) -> AssetKey:\n    return AssetKey(node_info["unique_id"].split("."))\n\n\ndef _resource_type(unique_id: str) -> str:\n    # returns the type of the node (e.g. model, test, snapshot)\n    return unique_id.split(".")[0]\n\n\ndef input_name_fn(dbt_resource_props: Mapping[str, Any]) -> str:\n    # * can be present when sources are sharded tables\n    return dbt_resource_props["unique_id"].replace(".", "_").replace("*", "_star")\n\n\ndef output_name_fn(dbt_resource_props: Mapping[str, Any]) -> str:\n    # hyphens are valid in dbt model names, but not in output names\n    return dbt_resource_props["unique_id"].split(".")[-1].replace("-", "_")\n\n\ndef _node_result_to_metadata(node_result: Mapping[str, Any]) -> Mapping[str, RawMetadataValue]:\n    return {\n        "Materialization Strategy": node_result["config"]["materialized"],\n        "Database": node_result["database"],\n        "Schema": node_result["schema"],\n        "Alias": node_result["alias"],\n        "Description": node_result["description"],\n    }\n\n\ndef _timing_to_metadata(timings: Sequence[Mapping[str, Any]]) -> Mapping[str, RawMetadataValue]:\n    metadata: Dict[str, RawMetadataValue] = {}\n    for timing in timings:\n        if timing["name"] == "execute":\n            desc = "Execution"\n        elif timing["name"] == "compile":\n            desc = "Compilation"\n        else:\n            continue\n\n        # dateutil does not properly expose its modules to static checkers\n        started_at = dateutil.parser.isoparse(timing["started_at"])  # type: ignore\n        completed_at = dateutil.parser.isoparse(timing["completed_at"])  # type: ignore\n        duration = completed_at - started_at\n        metadata.update(\n            {\n                f"{desc} Started At": started_at.isoformat(timespec="seconds"),\n                f"{desc} Completed At": started_at.isoformat(timespec="seconds"),\n                f"{desc} Duration": duration.total_seconds(),\n            }\n        )\n    return metadata\n\n\ndef result_to_events(\n    result: Mapping[str, Any],\n    docs_url: Optional[str] = None,\n    node_info_to_asset_key: Optional[Callable[[Mapping[str, Any]], AssetKey]] = None,\n    manifest_json: Optional[Mapping[str, Any]] = None,\n    extra_metadata: Optional[Mapping[str, RawMetadataValue]] = None,\n    generate_asset_outputs: bool = False,\n) -> Iterator[Union[AssetMaterialization, AssetObservation, Output]]:\n    """This is a hacky solution that attempts to consolidate parsing many of the potential formats\n    that dbt can provide its results in. This is known to work for CLI Outputs for dbt versions 0.18+,\n    as well as RPC responses for a similar time period, but as the RPC response schema is not documented\n    nor enforced, this can become out of date easily.\n    """\n    node_info_to_asset_key = check.opt_callable_param(\n        node_info_to_asset_key, "node_info_to_asset_key", default=default_node_info_to_asset_key\n    )\n\n    # status comes from set of fields rather than "status"\n    if "fail" in result:\n        status = (\n            "fail"\n            if result.get("fail")\n            else "skip" if result.get("skip") else "error" if result.get("error") else "success"\n        )\n    else:\n        status = result["status"]\n\n    # all versions represent timing the same way\n    metadata = {"Status": status, "Execution Time (seconds)": result["execution_time"]}\n    metadata.update(_timing_to_metadata(result["timing"]))\n\n    # working with a response that contains the node block (RPC and CLI 0.18.x)\n    if "node" in result:\n        unique_id = result["node"]["unique_id"]\n        metadata.update(_node_result_to_metadata(result["node"]))\n    else:\n        unique_id = result["unique_id"]\n\n    if docs_url:\n        metadata["docs_url"] = MetadataValue.url(f"{docs_url}#!/model/{unique_id}")\n\n    if extra_metadata:\n        metadata.update(extra_metadata)\n\n    # if you have a manifest available, get the full node info, otherwise just populate unique_id\n    dbt_resource_props = (\n        manifest_json["nodes"][unique_id] if manifest_json else {"unique_id": unique_id}\n    )\n\n    node_resource_type = _resource_type(unique_id)\n\n    if node_resource_type in ASSET_RESOURCE_TYPES and status == "success":\n        if generate_asset_outputs:\n            yield Output(\n                value=None,\n                output_name=output_name_fn(dbt_resource_props),\n                metadata=metadata,\n            )\n        else:\n            yield AssetMaterialization(\n                asset_key=node_info_to_asset_key(dbt_resource_props),\n                description=f"dbt node: {unique_id}",\n                metadata=metadata,\n            )\n    # can only associate tests with assets if we have manifest_json available\n    elif node_resource_type == "test" and manifest_json and status != "skipped":\n        upstream_unique_ids = manifest_json["nodes"][unique_id]["depends_on"]["nodes"]\n        # tests can apply to multiple asset keys\n        for upstream_id in upstream_unique_ids:\n            # the upstream id can reference a node or a source\n            dbt_resource_props = manifest_json["nodes"].get(upstream_id) or manifest_json[\n                "sources"\n            ].get(upstream_id)\n            if dbt_resource_props is None:\n                continue\n            upstream_asset_key = node_info_to_asset_key(dbt_resource_props)\n            yield AssetObservation(\n                asset_key=upstream_asset_key,\n                metadata={\n                    "Test ID": result["unique_id"],\n                    "Test Status": status,\n                    "Test Message": result.get("message") or "",\n                },\n            )\n\n\ndef generate_events(\n    dbt_output: DbtOutput,\n    node_info_to_asset_key: Optional[Callable[[Mapping[str, Any]], AssetKey]] = None,\n    manifest_json: Optional[Mapping[str, Any]] = None,\n) -> Iterator[Union[AssetMaterialization, AssetObservation]]:\n    """This function yields :py:class:`dagster.AssetMaterialization` events for each model updated by\n    a dbt command, and :py:class:`dagster.AssetObservation` events for each test run.\n\n    Information parsed from a :py:class:`~DbtOutput` object.\n    """\n    for result in dbt_output.result["results"]:\n        for event in result_to_events(\n            result,\n            docs_url=dbt_output.docs_url,\n            node_info_to_asset_key=node_info_to_asset_key,\n            manifest_json=manifest_json,\n        ):\n            yield check.inst(\n                cast(Union[AssetMaterialization, AssetObservation], event),\n                (AssetMaterialization, AssetObservation),\n            )\n\n\n
[docs]def generate_materializations(\n dbt_output: DbtOutput,\n asset_key_prefix: Optional[Sequence[str]] = None,\n) -> Iterator[AssetMaterialization]:\n """This function yields :py:class:`dagster.AssetMaterialization` events for each model updated by\n a dbt command.\n\n Information parsed from a :py:class:`~DbtOutput` object.\n\n Examples:\n .. code-block:: python\n\n from dagster import op, Output\n from dagster_dbt.utils import generate_materializations\n from dagster_dbt import dbt_cli_resource\n\n @op(required_resource_keys={"dbt"})\n def my_custom_dbt_run(context):\n dbt_output = context.resources.dbt.run()\n for materialization in generate_materializations(dbt_output):\n # you can modify the materialization object to add extra metadata, if desired\n yield materialization\n yield Output(my_dbt_output)\n\n @job(resource_defs={{"dbt":dbt_cli_resource}})\n def my_dbt_cli_job():\n my_custom_dbt_run()\n """\n asset_key_prefix = check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n for event in generate_events(\n dbt_output,\n node_info_to_asset_key=lambda info: AssetKey(\n asset_key_prefix + info["unique_id"].split(".")\n ),\n ):\n yield check.inst(cast(AssetMaterialization, event), AssetMaterialization)
\n\n\ndef select_unique_ids_from_manifest(\n select: str,\n exclude: str,\n state_path: Optional[str] = None,\n manifest_json_path: Optional[str] = None,\n manifest_json: Optional[Mapping[str, Any]] = None,\n manifest_parsed: Optional[Any] = None,\n) -> AbstractSet[str]:\n """Method to apply a selection string to an existing manifest.json file."""\n import dbt.graph.cli as graph_cli\n import dbt.graph.selector as graph_selector\n from dbt.contracts.graph.manifest import Manifest, WritableManifest\n from dbt.contracts.state import PreviousState\n from dbt.graph.selector_spec import IndirectSelection, SelectionSpec\n from networkx import DiGraph\n\n if state_path is not None:\n previous_state = PreviousState(\n path=Path(state_path), # type: ignore # (unused path, slated for deletion)\n current_path=( # type: ignore # (unused path, slated for deletion)\n Path("/tmp/null") if manifest_json_path is None else Path(manifest_json_path)\n ),\n )\n else:\n previous_state = None\n\n if manifest_json_path is not None:\n manifest = WritableManifest.read_and_check_versions(manifest_json_path)\n child_map = manifest.child_map\n elif manifest_json is not None:\n\n class _DictShim(dict):\n """Shim to enable hydrating a dictionary into a dot-accessible object."""\n\n def __getattr__(self, item):\n ret = super().get(item)\n # allow recursive access e.g. foo.bar.baz\n return _DictShim(ret) if isinstance(ret, dict) else ret\n\n manifest = Manifest(\n # dbt expects dataclasses that can be accessed with dot notation, not bare dictionaries\n nodes={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["nodes"].items() # type: ignore\n },\n sources={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["sources"].items() # type: ignore\n },\n metrics={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["metrics"].items() # type: ignore\n },\n exposures={\n unique_id: _DictShim(info) for unique_id, info in manifest_json["exposures"].items() # type: ignore\n },\n )\n child_map = manifest_json["child_map"]\n elif manifest_parsed is not None:\n manifest = manifest_parsed\n child_map = manifest.child_map\n else:\n check.failed("Must provide either a manifest_json_path, manifest_json, or manifest_parsed.")\n graph = graph_selector.Graph(DiGraph(incoming_graph_data=child_map))\n\n # create a parsed selection from the select string\n try:\n from dbt.flags import GLOBAL_FLAGS\n except ImportError:\n # dbt < 1.5.0 compat\n import dbt.flags as GLOBAL_FLAGS\n setattr(GLOBAL_FLAGS, "INDIRECT_SELECTION", IndirectSelection.Eager)\n setattr(GLOBAL_FLAGS, "WARN_ERROR", True)\n parsed_spec: SelectionSpec = graph_cli.parse_union([select], True)\n\n if exclude:\n parsed_spec = graph_cli.SelectionDifference(\n components=[parsed_spec, graph_cli.parse_union([exclude], True)]\n )\n\n # execute this selection against the graph\n selector = graph_selector.NodeSelector(graph, manifest, previous_state=previous_state)\n selected, _ = selector.select_nodes(parsed_spec)\n return selected\n\n\ndef get_dbt_resource_props_by_dbt_unique_id_from_manifest(\n manifest: Mapping[str, Any]\n) -> Mapping[str, Mapping[str, Any]]:\n """A mapping of a dbt node's unique id to the node's dictionary representation in the manifest."""\n return {\n **manifest["nodes"],\n **manifest["sources"],\n **manifest["exposures"],\n **manifest["metrics"],\n }\n
", "current_page_name": "_modules/dagster_dbt/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_dbt.utils"}}, "dagster_docker": {"docker_executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.docker_executor

\nfrom typing import Iterator, Optional, cast\n\nimport dagster._check as check\nimport docker\nimport docker.errors\nfrom dagster import Field, IntSource, executor\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.executor_definition import multiple_process_executor_requirements\nfrom dagster._core.events import DagsterEvent, EngineEventData\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._core.execution.tags import get_tag_concurrency_limits_config\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.executor.init import InitExecutorContext\nfrom dagster._core.executor.step_delegating import StepDelegatingExecutor\nfrom dagster._core.executor.step_delegating.step_handler.base import (\n    CheckStepHealthResult,\n    StepHandler,\n    StepHandlerContext,\n)\nfrom dagster._core.origin import JobPythonOrigin\nfrom dagster._core.utils import parse_env_var\nfrom dagster._grpc.types import ExecuteStepArgs\nfrom dagster._serdes.utils import hash_str\nfrom dagster._utils.merger import merge_dicts\n\nfrom dagster_docker.utils import DOCKER_CONFIG_SCHEMA, validate_docker_config, validate_docker_image\n\nfrom .container_context import DockerContainerContext\n\n\n
[docs]@executor(\n name="docker",\n config_schema=merge_dicts(\n DOCKER_CONFIG_SCHEMA,\n {\n "retries": get_retries_config(),\n "max_concurrent": Field(\n IntSource,\n is_required=False,\n description=(\n "Limit on the number of containers that will run concurrently within the scope "\n "of a Dagster run. Note that this limit is per run, not global."\n ),\n ),\n "tag_concurrency_limits": get_tag_concurrency_limits_config(),\n },\n ),\n requirements=multiple_process_executor_requirements(),\n)\n@experimental\ndef docker_executor(init_context: InitExecutorContext) -> Executor:\n """Executor which launches steps as Docker containers.\n\n To use the `docker_executor`, set it as the `executor_def` when defining a job:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-docker/dagster_docker_tests/test_example_executor.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n Then you can configure the executor with run config as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n registry: ...\n network: ...\n networks: ...\n container_kwargs: ...\n\n If you're using the DockerRunLauncher, configuration set on the containers created by the run\n launcher will also be set on the containers that are created for each step.\n """\n config = init_context.executor_config\n image = check.opt_str_elem(config, "image")\n registry = check.opt_dict_elem(config, "registry", key_type=str)\n env_vars = check.opt_list_elem(config, "env_vars", of_type=str)\n network = check.opt_str_elem(config, "network")\n networks = check.opt_list_elem(config, "networks", of_type=str)\n container_kwargs = check.opt_dict_elem(config, "container_kwargs", key_type=str)\n retries = check.dict_elem(config, "retries", key_type=str)\n max_concurrent = check.opt_int_elem(config, "max_concurrent")\n tag_concurrency_limits = check.opt_list_elem(config, "tag_concurrency_limits")\n\n validate_docker_config(network, networks, container_kwargs)\n\n if network and not networks:\n networks = [network]\n\n container_context = DockerContainerContext(\n registry=registry,\n env_vars=env_vars or [],\n networks=networks or [],\n container_kwargs=container_kwargs,\n )\n\n return StepDelegatingExecutor(\n DockerStepHandler(image, container_context),\n retries=check.not_none(RetryMode.from_config(retries)),\n max_concurrent=max_concurrent,\n tag_concurrency_limits=tag_concurrency_limits,\n )
\n\n\nclass DockerStepHandler(StepHandler):\n def __init__(\n self,\n image: Optional[str],\n container_context: DockerContainerContext,\n ):\n super().__init__()\n\n self._image = check.opt_str_param(image, "image")\n self._container_context = check.inst_param(\n container_context, "container_context", DockerContainerContext\n )\n\n def _get_image(self, step_handler_context: StepHandlerContext):\n from . import DockerRunLauncher\n\n image = cast(\n JobPythonOrigin, step_handler_context.dagster_run.job_code_origin\n ).repository_origin.container_image\n if not image:\n image = self._image\n\n run_launcher = step_handler_context.instance.run_launcher\n\n if not image and isinstance(run_launcher, DockerRunLauncher):\n image = run_launcher.image\n\n if not image:\n raise Exception("No docker image specified by the executor config or repository")\n\n return image\n\n def _get_docker_container_context(self, step_handler_context: StepHandlerContext):\n # This doesn't vary per step: would be good to have a hook where it can be set once\n # for the whole StepHandler but we need access to the DagsterRun for that\n\n from .docker_run_launcher import DockerRunLauncher\n\n run_launcher = step_handler_context.instance.run_launcher\n run_target = DockerContainerContext.create_for_run(\n step_handler_context.dagster_run,\n run_launcher if isinstance(run_launcher, DockerRunLauncher) else None,\n )\n\n merged_container_context = run_target.merge(self._container_context)\n\n validate_docker_config(\n network=None,\n networks=merged_container_context.networks,\n container_kwargs=merged_container_context.container_kwargs,\n )\n\n return merged_container_context\n\n @property\n def name(self) -> str:\n return "DockerStepHandler"\n\n def _get_client(self, docker_container_context: DockerContainerContext):\n client = docker.client.from_env()\n if docker_container_context.registry:\n client.login(\n registry=docker_container_context.registry["url"],\n username=docker_container_context.registry["username"],\n password=docker_container_context.registry["password"],\n )\n return client\n\n def _get_container_name(self, execute_step_args: ExecuteStepArgs):\n run_id = execute_step_args.run_id\n step_keys_to_execute = check.not_none(execute_step_args.step_keys_to_execute)\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n step_name = f"dagster-step-{hash_str(run_id + step_key)}"\n\n if execute_step_args.known_state:\n retry_state = execute_step_args.known_state.get_retry_state()\n retry_number = retry_state.get_attempt_count(step_key)\n if retry_number:\n step_name = f"{step_name}-{retry_number}"\n\n return step_name\n\n def _create_step_container(\n self,\n client,\n container_context,\n step_image,\n step_handler_context: StepHandlerContext,\n ):\n execute_step_args = step_handler_context.execute_step_args\n step_keys_to_execute = check.not_none(execute_step_args.step_keys_to_execute)\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n env_vars = dict([parse_env_var(env_var) for env_var in container_context.env_vars])\n env_vars["DAGSTER_RUN_JOB_NAME"] = step_handler_context.dagster_run.job_name\n env_vars["DAGSTER_RUN_STEP_KEY"] = step_key\n return client.containers.create(\n step_image,\n name=self._get_container_name(execute_step_args),\n detach=True,\n network=container_context.networks[0] if len(container_context.networks) else None,\n command=execute_step_args.get_command_args(),\n environment=env_vars,\n **container_context.container_kwargs,\n )\n\n def launch_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n container_context = self._get_docker_container_context(step_handler_context)\n\n client = self._get_client(container_context)\n\n step_image = self._get_image(step_handler_context)\n validate_docker_image(step_image)\n\n try:\n step_container = self._create_step_container(\n client, container_context, step_image, step_handler_context\n )\n except docker.errors.ImageNotFound:\n client.images.pull(step_image)\n step_container = self._create_step_container(\n client, container_context, step_image, step_handler_context\n )\n\n if len(container_context.networks) > 1:\n for network_name in container_context.networks[1:]:\n network = client.networks.get(network_name)\n network.connect(step_container)\n\n step_keys_to_execute = check.not_none(\n step_handler_context.execute_step_args.step_keys_to_execute\n )\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n yield DagsterEvent.step_worker_starting(\n step_handler_context.get_step_context(step_key),\n message="Launching step in Docker container.",\n metadata={\n "Docker container id": step_container.id,\n },\n )\n step_container.start()\n\n def check_step_health(self, step_handler_context: StepHandlerContext) -> CheckStepHealthResult:\n container_context = self._get_docker_container_context(step_handler_context)\n\n client = self._get_client(container_context)\n\n container_name = self._get_container_name(step_handler_context.execute_step_args)\n\n container = client.containers.get(container_name)\n\n if container.status == "running":\n return CheckStepHealthResult.healthy()\n\n try:\n container_info = container.wait(timeout=0.1)\n except Exception as e:\n raise Exception(\n f"Container status is {container.status}. Raised exception attempting to get its"\n " return code."\n ) from e\n\n ret_code = container_info.get("StatusCode")\n if ret_code == 0:\n return CheckStepHealthResult.healthy()\n\n return CheckStepHealthResult.unhealthy(\n reason=f"Container status is {container.status}. Return code is {ret_code}."\n )\n\n def terminate_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n container_context = self._get_docker_container_context(step_handler_context)\n\n step_keys_to_execute = check.not_none(\n step_handler_context.execute_step_args.step_keys_to_execute\n )\n assert (\n len(step_keys_to_execute) == 1\n ), "Terminating multiple steps is not currently supported"\n step_key = step_keys_to_execute[0]\n\n container_name = self._get_container_name(step_handler_context.execute_step_args)\n\n yield DagsterEvent.engine_event(\n step_handler_context.get_step_context(step_key),\n message=f"Stopping Docker container {container_name} for step.",\n event_specific_data=EngineEventData(),\n )\n\n client = self._get_client(container_context)\n\n container = client.containers.get(container_name)\n\n container.stop()\n
", "current_page_name": "_modules/dagster_docker/docker_executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.docker_executor"}, "docker_run_launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.docker_run_launcher

\nfrom typing import Any, Mapping, Optional\n\nimport dagster._check as check\nimport docker\nfrom dagster._core.launcher.base import (\n    CheckRunHealthResult,\n    LaunchRunContext,\n    ResumeRunContext,\n    RunLauncher,\n    WorkerStatus,\n)\nfrom dagster._core.storage.dagster_run import DagsterRun\nfrom dagster._core.storage.tags import DOCKER_IMAGE_TAG\nfrom dagster._core.utils import parse_env_var\nfrom dagster._grpc.types import ExecuteRunArgs, ResumeRunArgs\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._serdes.config_class import ConfigurableClassData\nfrom typing_extensions import Self\n\nfrom dagster_docker.utils import DOCKER_CONFIG_SCHEMA, validate_docker_config, validate_docker_image\n\nfrom .container_context import DockerContainerContext\n\nDOCKER_CONTAINER_ID_TAG = "docker/container_id"\n\n\n
[docs]class DockerRunLauncher(RunLauncher, ConfigurableClass):\n """Launches runs in a Docker container."""\n\n def __init__(\n self,\n inst_data: Optional[ConfigurableClassData] = None,\n image=None,\n registry=None,\n env_vars=None,\n network=None,\n networks=None,\n container_kwargs=None,\n ):\n self._inst_data = inst_data\n self.image = image\n self.registry = registry\n self.env_vars = env_vars\n\n validate_docker_config(network, networks, container_kwargs)\n\n if network:\n self.networks = [network]\n elif networks:\n self.networks = networks\n else:\n self.networks = []\n\n self.container_kwargs = check.opt_dict_param(\n container_kwargs, "container_kwargs", key_type=str\n )\n\n super().__init__()\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return DOCKER_CONFIG_SCHEMA\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return DockerRunLauncher(inst_data=inst_data, **config_value)\n\n def get_container_context(self, dagster_run: DagsterRun) -> DockerContainerContext:\n return DockerContainerContext.create_for_run(dagster_run, self)\n\n def _get_client(self, container_context: DockerContainerContext):\n client = docker.client.from_env()\n if container_context.registry:\n client.login(\n registry=container_context.registry["url"],\n username=container_context.registry["username"],\n password=container_context.registry["password"],\n )\n return client\n\n def _get_docker_image(self, job_code_origin):\n docker_image = job_code_origin.repository_origin.container_image\n\n if not docker_image:\n docker_image = self.image\n\n if not docker_image:\n raise Exception("No docker image specified by the instance config or repository")\n\n validate_docker_image(docker_image)\n return docker_image\n\n def _launch_container_with_command(self, run, docker_image, command):\n container_context = self.get_container_context(run)\n docker_env = dict([parse_env_var(env_var) for env_var in container_context.env_vars])\n docker_env["DAGSTER_RUN_JOB_NAME"] = run.job_name\n\n client = self._get_client(container_context)\n\n try:\n container = client.containers.create(\n image=docker_image,\n command=command,\n detach=True,\n environment=docker_env,\n network=container_context.networks[0] if len(container_context.networks) else None,\n **container_context.container_kwargs,\n )\n\n except docker.errors.ImageNotFound:\n client.images.pull(docker_image)\n container = client.containers.create(\n image=docker_image,\n command=command,\n detach=True,\n environment=docker_env,\n network=container_context.networks[0] if len(container_context.networks) else None,\n **container_context.container_kwargs,\n )\n\n if len(container_context.networks) > 1:\n for network_name in container_context.networks[1:]:\n network = client.networks.get(network_name)\n network.connect(container)\n\n self._instance.report_engine_event(\n message=f"Launching run in a new container {container.id} with image {docker_image}",\n dagster_run=run,\n cls=self.__class__,\n )\n\n self._instance.add_run_tags(\n run.run_id,\n {DOCKER_CONTAINER_ID_TAG: container.id, DOCKER_IMAGE_TAG: docker_image},\n )\n\n container.start()\n\n def launch_run(self, context: LaunchRunContext) -> None:\n run = context.dagster_run\n job_code_origin = check.not_none(context.job_code_origin)\n docker_image = self._get_docker_image(job_code_origin)\n\n command = ExecuteRunArgs(\n job_origin=job_code_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n ).get_command_args()\n\n self._launch_container_with_command(run, docker_image, command)\n\n @property\n def supports_resume_run(self):\n return True\n\n def resume_run(self, context: ResumeRunContext) -> None:\n run = context.dagster_run\n job_code_origin = check.not_none(context.job_code_origin)\n docker_image = self._get_docker_image(job_code_origin)\n\n command = ResumeRunArgs(\n job_origin=job_code_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n ).get_command_args()\n\n self._launch_container_with_command(run, docker_image, command)\n\n def _get_container(self, run):\n if not run or run.is_finished:\n return None\n\n container_id = run.tags.get(DOCKER_CONTAINER_ID_TAG)\n\n if not container_id:\n return None\n\n container_context = self.get_container_context(run)\n\n try:\n return self._get_client(container_context).containers.get(container_id)\n except Exception:\n return None\n\n def terminate(self, run_id):\n run = self._instance.get_run_by_id(run_id)\n\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n container = self._get_container(run)\n\n if not container:\n self._instance.report_engine_event(\n message="Unable to get docker container to send termination request to.",\n dagster_run=run,\n cls=self.__class__,\n )\n return False\n\n container.stop()\n\n return True\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n def check_run_worker_health(self, run: DagsterRun):\n container = self._get_container(run)\n if container is None:\n return CheckRunHealthResult(WorkerStatus.NOT_FOUND)\n if container.status == "running":\n return CheckRunHealthResult(WorkerStatus.RUNNING)\n return CheckRunHealthResult(\n WorkerStatus.FAILED, msg=f"Container status is {container.status}"\n )
\n
", "current_page_name": "_modules/dagster_docker/docker_run_launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.docker_run_launcher"}, "ops": {"docker_container_op": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.ops.docker_container_op

\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport docker\nfrom dagster import Field, In, Nothing, OpExecutionContext, StringSource, op\nfrom dagster._annotations import experimental\nfrom dagster._core.utils import parse_env_var\nfrom dagster._serdes.utils import hash_str\n\nfrom ..container_context import DockerContainerContext\nfrom ..docker_run_launcher import DockerRunLauncher\nfrom ..utils import DOCKER_CONFIG_SCHEMA, validate_docker_image\n\nDOCKER_CONTAINER_OP_CONFIG = {\n    **DOCKER_CONFIG_SCHEMA,\n    "image": Field(\n        StringSource,\n        is_required=True,\n        description="The image in which to run the Docker container.",\n    ),\n    "entrypoint": Field(\n        [str],\n        is_required=False,\n        description="The ENTRYPOINT for the Docker container",\n    ),\n    "command": Field(\n        [str],\n        is_required=False,\n        description="The command to run in the container within the launched Docker container.",\n    ),\n}\n\n\ndef _get_client(docker_container_context: DockerContainerContext):\n    client = docker.client.from_env()\n    if docker_container_context.registry:\n        client.login(\n            registry=docker_container_context.registry["url"],\n            username=docker_container_context.registry["username"],\n            password=docker_container_context.registry["password"],\n        )\n    return client\n\n\ndef _get_container_name(run_id, op_name, retry_number):\n    container_name = hash_str(run_id + op_name)\n\n    if retry_number > 0:\n        container_name = f"{container_name}-{retry_number}"\n\n    return container_name\n\n\ndef _create_container(\n    op_context: OpExecutionContext,\n    client,\n    container_context: DockerContainerContext,\n    image: str,\n    entrypoint: Optional[Sequence[str]],\n    command: Optional[Sequence[str]],\n):\n    env_vars = dict([parse_env_var(env_var) for env_var in container_context.env_vars])\n    return client.containers.create(\n        image,\n        name=_get_container_name(op_context.run_id, op_context.op.name, op_context.retry_number),\n        detach=True,\n        network=container_context.networks[0] if len(container_context.networks) else None,\n        entrypoint=entrypoint,\n        command=command,\n        environment=env_vars,\n        **container_context.container_kwargs,\n    )\n\n\n
[docs]@experimental\ndef execute_docker_container(\n context: OpExecutionContext,\n image: str,\n entrypoint: Optional[Sequence[str]] = None,\n command: Optional[Sequence[str]] = None,\n networks: Optional[Sequence[str]] = None,\n registry: Optional[Mapping[str, str]] = None,\n env_vars: Optional[Sequence[str]] = None,\n container_kwargs: Optional[Mapping[str, Any]] = None,\n):\n """This function is a utility for executing a Docker container from within a Dagster op.\n\n Args:\n image (str): The image to use for the launched Docker container.\n entrypoint (Optional[Sequence[str]]): The ENTRYPOINT to run in the launched Docker\n container. Default: None.\n command (Optional[Sequence[str]]): The CMD to run in the launched Docker container.\n Default: None.\n networks (Optional[Sequence[str]]): Names of the Docker networks to which to connect the\n launched container. Default: None.\n registry: (Optional[Mapping[str, str]]): Information for using a non local/public Docker\n registry. Can have "url", "username", or "password" keys.\n env_vars (Optional[Sequence[str]]): List of environemnt variables to include in the launched\n container. ach can be of the form KEY=VALUE or just KEY (in which case the value will be\n pulled from the calling environment.\n container_kwargs (Optional[Dict[str[Any]]]): key-value pairs that can be passed into\n containers.create in the Docker Python API. See\n https://docker-py.readthedocs.io/en/stable/containers.html for the full list\n of available options.\n """\n run_container_context = DockerContainerContext.create_for_run(\n context.dagster_run,\n (\n context.instance.run_launcher\n if isinstance(context.instance.run_launcher, DockerRunLauncher)\n else None\n ),\n )\n\n validate_docker_image(image)\n\n op_container_context = DockerContainerContext(\n registry=registry, env_vars=env_vars, networks=networks, container_kwargs=container_kwargs\n )\n\n container_context = run_container_context.merge(op_container_context)\n\n client = _get_client(container_context)\n\n try:\n container = _create_container(\n context, client, container_context, image, entrypoint, command\n )\n except docker.errors.ImageNotFound:\n client.images.pull(image)\n container = _create_container(\n context, client, container_context, image, entrypoint, command\n )\n\n if len(container_context.networks) > 1:\n for network_name in container_context.networks[1:]:\n network = client.networks.get(network_name)\n network.connect(container)\n\n container.start()\n\n for line in container.logs(stdout=True, stderr=True, stream=True, follow=True):\n print(line) # noqa: T201\n\n exit_status = container.wait()["StatusCode"]\n\n if exit_status != 0:\n raise Exception(f"Docker container returned exit code {exit_status}")
\n\n\n
[docs]@op(ins={"start_after": In(Nothing)}, config_schema=DOCKER_CONTAINER_OP_CONFIG)\n@experimental\ndef docker_container_op(context):\n """An op that runs a Docker container using the docker Python API.\n\n Contrast with the `docker_executor`, which runs each Dagster op in a Dagster job in its\n own Docker container.\n\n This op may be useful when:\n - You need to orchestrate a command that isn't a Dagster op (or isn't written in Python)\n - You want to run the rest of a Dagster job using a specific executor, and only a single\n op in docker.\n\n For example:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-docker/dagster_docker_tests/test_example_docker_container_op.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n You can create your own op with the same implementation by calling the `execute_docker_container` function\n inside your own op.\n """\n execute_docker_container(context, **context.op_config)
\n
", "current_page_name": "_modules/dagster_docker/ops/docker_container_op", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.ops.docker_container_op"}}, "pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_docker.pipes

\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Mapping, Optional, Sequence, Union\n\nimport docker\nfrom dagster import (\n    OpExecutionContext,\n    ResourceParam,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom dagster._core.pipes.client import (\n    PipesClient,\n    PipesClientCompletedInvocation,\n    PipesContextInjector,\n    PipesMessageReader,\n)\nfrom dagster._core.pipes.context import (\n    PipesMessageHandler,\n)\nfrom dagster._core.pipes.utils import (\n    PipesEnvContextInjector,\n    extract_message_or_forward_to_stdout,\n    open_pipes_session,\n)\nfrom dagster_pipes import (\n    DagsterPipesError,\n    PipesDefaultMessageWriter,\n    PipesExtras,\n    PipesParams,\n)\n\n\n
[docs]@experimental\nclass PipesDockerLogsMessageReader(PipesMessageReader):\n @contextmanager\n def read_messages(\n self,\n handler: PipesMessageHandler,\n ) -> Iterator[PipesParams]:\n self._handler = handler\n try:\n yield {PipesDefaultMessageWriter.STDIO_KEY: PipesDefaultMessageWriter.STDERR}\n finally:\n self._handler = None\n\n def consume_docker_logs(self, container) -> None:\n handler = check.not_none(\n self._handler, "Can only consume logs within context manager scope."\n )\n for log_line in container.logs(stdout=True, stderr=True, stream=True, follow=True):\n if isinstance(log_line, bytes):\n log_entry = log_line.decode("utf-8")\n elif isinstance(log_line, str):\n log_entry = log_line\n else:\n continue\n\n extract_message_or_forward_to_stdout(handler, log_entry)\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to read messages by extracting them from docker logs directly."
\n\n\n@experimental\nclass _PipesDockerClient(PipesClient):\n """A pipes client that runs external processes in docker containers.\n\n By default context is injected via environment variables and messages are parsed out of the\n log stream, with other logs forwarded to stdout of the orchestration process.\n\n Args:\n env (Optional[Mapping[str, str]]): An optional dict of environment variables to pass to the\n container.\n register (Optional[Mapping[str, str]]): An optional dict of registry credentials to login to\n the docker client.\n context_injector (Optional[PipesContextInjector]): A context injector to use to inject\n context into the docker container process. Defaults to :py:class:`PipesEnvContextInjector`.\n message_reader (Optional[PipesContextInjector]): A message reader to use to read messages\n from the docker container process. Defaults to :py:class:`DockerLogsMessageReader`.\n """\n\n def __init__(\n self,\n env: Optional[Mapping[str, str]] = None,\n registry: Optional[Mapping[str, str]] = None,\n context_injector: Optional[PipesContextInjector] = None,\n message_reader: Optional[PipesMessageReader] = None,\n ):\n self.env = check.opt_mapping_param(env, "env", key_type=str, value_type=str)\n self.registry = check.opt_mapping_param(registry, "registry", key_type=str, value_type=str)\n self.context_injector = (\n check.opt_inst_param(\n context_injector,\n "context_injector",\n PipesContextInjector,\n )\n or PipesEnvContextInjector()\n )\n\n self.message_reader = (\n check.opt_inst_param(message_reader, "message_reader", PipesMessageReader)\n or PipesDockerLogsMessageReader()\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def run(\n self,\n *,\n context: OpExecutionContext,\n image: str,\n extras: Optional[PipesExtras] = None,\n command: Optional[Union[str, Sequence[str]]] = None,\n env: Optional[Mapping[str, str]] = None,\n registry: Optional[Mapping[str, str]] = None,\n container_kwargs: Optional[Mapping[str, Any]] = None,\n ) -> PipesClientCompletedInvocation:\n """Create a docker container and run it to completion, enriched with the pipes protocol.\n\n Args:\n image (str):\n The image for the container to use.\n command (Optional[Union[str, Sequence[str]]]):\n The command for the container use.\n env (Optional[Mapping[str,str]]):\n A mapping of environment variable names to values to set on the first\n container in the pod spec, on top of those configured on resource.\n registry (Optional[Mapping[str, str]]:\n A mapping containing url, username, and password to be used\n with docker client login.\n container_kwargs (Optional[Mapping[str, Any]]:\n Arguments to be forwarded to docker client containers.create.\n extras (Optional[PipesExtras]):\n Extra values to pass along as part of the ext protocol.\n context_injector (Optional[PipesContextInjector]):\n Override the default ext protocol context injection.\n message_reader (Optional[PipesMessageReader]):\n Override the default ext protocol message reader.\n\n Returns:\n PipesClientCompletedInvocation: Wrapper containing results reported by the external\n process.\n """\n with open_pipes_session(\n context=context,\n context_injector=self.context_injector,\n message_reader=self.message_reader,\n extras=extras,\n ) as pipes_session:\n client = docker.client.from_env()\n registry = registry or self.registry\n if registry:\n client.login(\n registry=registry["url"],\n username=registry["username"],\n password=registry["password"],\n )\n\n try:\n container = self._create_container(\n client=client,\n image=image,\n command=command,\n env=env,\n open_pipes_session_env=pipes_session.get_bootstrap_env_vars(),\n container_kwargs=container_kwargs,\n )\n except docker.errors.ImageNotFound:\n client.images.pull(image)\n container = self._create_container(\n client=client,\n image=image,\n command=command,\n env=env,\n open_pipes_session_env=pipes_session.get_bootstrap_env_vars(),\n container_kwargs=container_kwargs,\n )\n\n result = container.start()\n try:\n if isinstance(self.message_reader, PipesDockerLogsMessageReader):\n self.message_reader.consume_docker_logs(container)\n\n result = container.wait()\n if result["StatusCode"] != 0:\n raise DagsterPipesError(f"Container exited with non-zero status code: {result}")\n finally:\n container.stop()\n return PipesClientCompletedInvocation(tuple(pipes_session.get_results()))\n\n def _create_container(\n self,\n client,\n image: str,\n command: Optional[Union[str, Sequence[str]]],\n env: Optional[Mapping[str, str]],\n container_kwargs: Optional[Mapping[str, Any]],\n open_pipes_session_env: Mapping[str, str],\n ):\n kwargs = dict(container_kwargs or {})\n kwargs_env = kwargs.pop("environment", {})\n return client.containers.create(\n image=image,\n command=command,\n detach=True,\n environment={\n **open_pipes_session_env,\n **(self.env or {}),\n **(env or {}),\n **kwargs_env,\n },\n **kwargs,\n )\n\n\nPipesDockerClient = ResourceParam[_PipesDockerClient]\n
", "current_page_name": "_modules/dagster_docker/pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_docker.pipes"}}, "dagster_duckdb": {"io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb.io_manager

\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Optional, Sequence, Type, cast\n\nimport duckdb\nfrom dagster import IOManagerDefinition, OutputContext, io_manager\nfrom dagster._config.pythonic_config import ConfigurableIOManagerFactory\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.storage.db_io_manager import (\n    DbClient,\n    DbIOManager,\n    DbTypeHandler,\n    TablePartitionDimension,\n    TableSlice,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._utils.backoff import backoff\nfrom pydantic import Field\n\nDUCKDB_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"\n\n\n
[docs]def build_duckdb_io_manager(\n type_handlers: Sequence[DbTypeHandler], default_load_type: Optional[Type] = None\n) -> IOManagerDefinition:\n """Builds an IO manager definition that reads inputs from and writes outputs to DuckDB.\n\n Args:\n type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between\n DuckDB tables and an in-memory type - e.g. a Pandas DataFrame. If only\n one DbTypeHandler is provided, it will be used as teh default_load_type.\n default_load_type (Type): When an input has no type annotation, load it as this type.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb import build_duckdb_io_manager\n from dagster_duckdb_pandas import DuckDBPandasTypeHandler\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n duckdb_io_manager = build_duckdb_io_manager([DuckDBPandasTypeHandler()])\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key. For ops, the schema can be\n specified by including a "schema" entry in output metadata. If none of these is provided, the schema will\n default to "public".\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame):\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @dagster_maintained_io_manager\n @io_manager(config_schema=DuckDBIOManager.to_config_schema())\n def duckdb_io_manager(init_context):\n """IO Manager for storing outputs in a DuckDB database.\n\n Assets will be stored in the schema and table name specified by their AssetKey.\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n Op outputs will be stored in the schema specified by output metadata (defaults to public) in a\n table of the name of the output.\n """\n return DbIOManager(\n type_handlers=type_handlers,\n db_client=DuckDbClient(),\n io_manager_name="DuckDBIOManager",\n database=init_context.resource_config["database"],\n schema=init_context.resource_config.get("schema"),\n default_load_type=default_load_type,\n )\n\n return duckdb_io_manager
\n\n\n
[docs]class DuckDBIOManager(ConfigurableIOManagerFactory):\n """Base class for an IO manager definition that reads inputs from and writes outputs to DuckDB.\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_pandas import DuckDBPandasTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If none\n of these is provided, the schema will default to "public".\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame):\n # my_table will just contain the data from column "a"\n ...\n\n Set DuckDB configuration options using the config field. See\n https://duckdb.org/docs/sql/configuration.html for all available settings.\n\n .. code-block:: python\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb",\n config={"arrow_large_buffer_size": True})}\n )\n\n """\n\n database: str = Field(description="Path to the DuckDB database.")\n config: Dict[str, Any] = Field(description="DuckDB configuration options.", default={})\n schema_: Optional[str] = Field(\n default=None, alias="schema", description="Name of the schema to use."\n ) # schema is a reserved word for pydantic\n\n @staticmethod\n @abstractmethod\n def type_handlers() -> Sequence[DbTypeHandler]: ...\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return None\n\n def create_io_manager(self, context) -> DbIOManager:\n return DbIOManager(\n db_client=DuckDbClient(),\n database=self.database,\n schema=self.schema_,\n type_handlers=self.type_handlers(),\n default_load_type=self.default_load_type(),\n io_manager_name="DuckDBIOManager",\n )
\n\n\nclass DuckDbClient(DbClient):\n @staticmethod\n def delete_table_slice(context: OutputContext, table_slice: TableSlice, connection) -> None:\n try:\n connection.execute(_get_cleanup_statement(table_slice))\n except duckdb.CatalogException:\n # table doesn't exist yet, so ignore the error\n pass\n\n @staticmethod\n def ensure_schema_exists(context: OutputContext, table_slice: TableSlice, connection) -> None:\n connection.execute(f"create schema if not exists {table_slice.schema};")\n\n @staticmethod\n def get_select_statement(table_slice: TableSlice) -> str:\n col_str = ", ".join(table_slice.columns) if table_slice.columns else "*"\n\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = f"SELECT {col_str} FROM {table_slice.schema}.{table_slice.table} WHERE\\n"\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"""SELECT {col_str} FROM {table_slice.schema}.{table_slice.table}"""\n\n @staticmethod\n @contextmanager\n def connect(context, _):\n conn = backoff(\n fn=duckdb.connect,\n retry_on=(RuntimeError, duckdb.IOException),\n kwargs={\n "database": context.resource_config["database"],\n "read_only": False,\n "config": context.resource_config["config"],\n },\n max_retries=10,\n )\n\n yield conn\n\n conn.close()\n\n\ndef _get_cleanup_statement(table_slice: TableSlice) -> str:\n """Returns a SQL statement that deletes data in the given table to make way for the output data\n being written.\n """\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = f"DELETE FROM {table_slice.schema}.{table_slice.table} WHERE\\n"\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"DELETE FROM {table_slice.schema}.{table_slice.table}"\n\n\ndef _partition_where_clause(partition_dimensions: Sequence[TablePartitionDimension]) -> str:\n return " AND\\n".join(\n (\n _time_window_where_clause(partition_dimension)\n if isinstance(partition_dimension.partitions, TimeWindow)\n else _static_where_clause(partition_dimension)\n )\n for partition_dimension in partition_dimensions\n )\n\n\ndef _time_window_where_clause(table_partition: TablePartitionDimension) -> str:\n partition = cast(TimeWindow, table_partition.partitions)\n start_dt, end_dt = partition\n start_dt_str = start_dt.strftime(DUCKDB_DATETIME_FORMAT)\n end_dt_str = end_dt.strftime(DUCKDB_DATETIME_FORMAT)\n return f"""{table_partition.partition_expr} >= '{start_dt_str}' AND {table_partition.partition_expr} < '{end_dt_str}'"""\n\n\ndef _static_where_clause(table_partition: TablePartitionDimension) -> str:\n partitions = ", ".join(f"'{partition}'" for partition in table_partition.partitions)\n return f"""{table_partition.partition_expr} in ({partitions})"""\n
", "current_page_name": "_modules/dagster_duckdb/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb.io_manager"}, "resource": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb.resource

\nfrom contextlib import contextmanager\nfrom typing import Any, Dict\n\nimport duckdb\nfrom dagster import ConfigurableResource\nfrom dagster._utils.backoff import backoff\nfrom pydantic import Field\n\n\n
[docs]class DuckDBResource(ConfigurableResource):\n """Resource for interacting with a DuckDB database.\n\n Examples:\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_duckdb import DuckDBResource\n\n @asset\n def my_table(duckdb: DuckDBResource):\n with duckdb.get_connection() as conn:\n conn.execute("SELECT * from MY_SCHEMA.MY_TABLE")\n\n defs = Definitions(\n assets=[my_table],\n resources={"duckdb": DuckDBResource(database="path/to/db.duckdb")}\n )\n\n """\n\n database: str = Field(\n description=(\n "Path to the DuckDB database. Setting database=':memory:' will use an in-memory"\n " database "\n )\n )\n config: Dict[str, Any] = Field(description="DuckDB configuration options.", default={})\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @contextmanager\n def get_connection(self):\n conn = backoff(\n fn=duckdb.connect,\n retry_on=(RuntimeError, duckdb.IOException),\n kwargs={"database": self.database, "read_only": False, "config": self.config},\n max_retries=10,\n )\n\n yield conn\n\n conn.close()
\n
", "current_page_name": "_modules/dagster_duckdb/resource", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb.resource"}}, "dagster_duckdb_pandas": {"duckdb_pandas_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb_pandas.duckdb_pandas_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport pandas as pd\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_duckdb.io_manager import (\n    DuckDbClient,\n    DuckDBIOManager,\n    build_duckdb_io_manager,\n)\n\n\n
[docs]class DuckDBPandasTypeHandler(DbTypeHandler[pd.DataFrame]):\n """Stores and loads Pandas DataFrames in DuckDB.\n\n To use this type handler, return it from the ``type_handlers` method of an I/O manager that inherits from ``DuckDBIOManager``.\n\n Example:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_pandas import DuckDBPandasTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection\n ):\n """Stores the pandas DataFrame in duckdb."""\n connection.execute(\n f"create table if not exists {table_slice.schema}.{table_slice.table} as select * from"\n " obj;"\n )\n if not connection.fetchall():\n # table was not created, therefore already exists. Insert the data\n connection.execute(\n f"insert into {table_slice.schema}.{table_slice.table} select * from obj"\n )\n\n context.add_output_metadata(\n {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype)) # type: ignore # (bad stubs)\n for name, dtype in obj.dtypes.items()\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pd.DataFrame:\n """Loads the input as a Pandas DataFrame."""\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pd.DataFrame()\n return connection.execute(DuckDbClient.get_select_statement(table_slice)).fetchdf()\n\n @property\n def supported_types(self):\n return [pd.DataFrame]
\n\n\nduckdb_pandas_io_manager = build_duckdb_io_manager(\n [DuckDBPandasTypeHandler()], default_load_type=pd.DataFrame\n)\nduckdb_pandas_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes Pandas DataFrames to DuckDB. When\nusing the duckdb_pandas_io_manager, any inputs and outputs without type annotations will be loaded\nas Pandas DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_duckdb_pandas import duckdb_pandas_io_manager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_pandas_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class DuckDBPandasIOManager(DuckDBIOManager):\n """An I/O manager definition that reads inputs from and writes Pandas DataFrames to DuckDB. When\n using the DuckDBPandasIOManager, any inputs and outputs without type annotations will be loaded\n as Pandas DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb_pandas import DuckDBPandasIOManager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": DuckDBPandasIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPandasTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame
\n
", "current_page_name": "_modules/dagster_duckdb_pandas/duckdb_pandas_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb_pandas.duckdb_pandas_type_handler"}}, "dagster_duckdb_polars": {"duckdb_polars_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb_polars.duckdb_polars_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport polars as pl\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_duckdb.io_manager import DuckDbClient, DuckDBIOManager, build_duckdb_io_manager\n\n\n
[docs]class DuckDBPolarsTypeHandler(DbTypeHandler[pl.DataFrame]):\n """Stores and loads Polars DataFrames in DuckDB.\n\n To use this type handler, return it from the ``type_handlers` method of an I/O manager that inherits from ``DuckDBIOManager``.\n\n Example:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_polars import DuckDBPolarsTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPolarsTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pl.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pl.DataFrame, connection\n ):\n """Stores the polars DataFrame in duckdb."""\n obj_arrow = obj.to_arrow() # noqa: F841 # need obj_arrow symbol to exist for duckdb query\n connection.execute(f"create schema if not exists {table_slice.schema};")\n connection.execute(\n f"create table if not exists {table_slice.schema}.{table_slice.table} as select * from"\n " obj_arrow;"\n )\n if not connection.fetchall():\n # table was not created, therefore already exists. Insert the data\n connection.execute(\n f"insert into {table_slice.schema}.{table_slice.table} select * from obj_arrow"\n )\n\n context.add_output_metadata(\n {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype))\n for name, dtype in zip(obj.columns, obj.dtypes)\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pl.DataFrame:\n """Loads the input as a Polars DataFrame."""\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pl.DataFrame()\n select_statement = connection.execute(\n DuckDbClient.get_select_statement(table_slice=table_slice)\n )\n duckdb_to_arrow = select_statement.arrow()\n return pl.DataFrame(duckdb_to_arrow)\n\n @property\n def supported_types(self):\n return [pl.DataFrame]
\n\n\nduckdb_polars_io_manager = build_duckdb_io_manager(\n [DuckDBPolarsTypeHandler()], default_load_type=pl.DataFrame\n)\nduckdb_polars_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes polars dataframes to DuckDB. When\nusing the duckdb_polars_io_manager, any inputs and outputs without type annotations will be loaded\nas Polars DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_duckdb_polars import duckdb_polars_io_manager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pl.DataFrame: # the name of the asset will be the table name\n ...\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_polars_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pl.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pl.DataFrame) -> pl.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class DuckDBPolarsIOManager(DuckDBIOManager):\n """An I/O manager definition that reads inputs from and writes Polars DataFrames to DuckDB. When\n using the DuckDBPolarsIOManager, any inputs and outputs without type annotations will be loaded\n as Polars DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb_polars import DuckDBPolarsIOManager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pl.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": DuckDBPolarsIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pl.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pl.DataFrame) -> pl.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPolarsTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pl.DataFrame
\n
", "current_page_name": "_modules/dagster_duckdb_polars/duckdb_polars_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb_polars.duckdb_polars_type_handler"}}, "dagster_duckdb_pyspark": {"duckdb_pyspark_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_duckdb_pyspark.duckdb_pyspark_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport pyarrow as pa\nimport pyspark\nimport pyspark.sql\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_duckdb.io_manager import (\n    DuckDbClient,\n    DuckDBIOManager,\n    build_duckdb_io_manager,\n)\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType\n\n\ndef pyspark_df_to_arrow_table(df: pyspark.sql.DataFrame) -> pa.Table:\n    """Converts a PySpark DataFrame to a PyArrow Table."""\n    # `_collect_as_arrow` API call sourced from:\n    #   https://stackoverflow.com/questions/73203318/how-to-transform-spark-dataframe-to-polars-dataframe\n    return pa.Table.from_batches(df._collect_as_arrow())  # noqa: SLF001\n\n\n
[docs]class DuckDBPySparkTypeHandler(DbTypeHandler[pyspark.sql.DataFrame]):\n """Stores PySpark DataFrames in DuckDB.\n\n To use this type handler, return it from the ``type_handlers` method of an I/O manager that inherits from ``DuckDBIOManager``.\n\n Example:\n .. code-block:: python\n\n from dagster_duckdb import DuckDBIOManager\n from dagster_duckdb_pyspark import DuckDBPySparkTypeHandler\n\n class MyDuckDBIOManager(DuckDBIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in duckdb\n )\n def my_table() -> pyspark.sql.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}\n )\n """\n\n def handle_output(\n self,\n context: OutputContext,\n table_slice: TableSlice,\n obj: pyspark.sql.DataFrame,\n connection,\n ):\n """Stores the given object at the provided filepath."""\n pa_df = pyspark_df_to_arrow_table(obj) # noqa: F841\n connection.execute(\n f"create table if not exists {table_slice.schema}.{table_slice.table} as select * from"\n " pa_df;"\n )\n if not connection.fetchall():\n # table was not created, therefore already exists. Insert the data\n connection.execute(\n f"insert into {table_slice.schema}.{table_slice.table} select * from pa_df;"\n )\n\n context.add_output_metadata(\n {\n "row_count": obj.count(),\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype)) for name, dtype in obj.dtypes\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pyspark.sql.DataFrame:\n """Loads the return of the query as the correct type."""\n spark = SparkSession.builder.getOrCreate() # type: ignore\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return spark.createDataFrame([], StructType([]))\n\n pd_df = connection.execute(DuckDbClient.get_select_statement(table_slice)).fetchdf()\n return spark.createDataFrame(pd_df)\n\n @property\n def supported_types(self):\n return [pyspark.sql.DataFrame]
\n\n\nduckdb_pyspark_io_manager = build_duckdb_io_manager(\n [DuckDBPySparkTypeHandler()], default_load_type=pyspark.sql.DataFrame\n)\nduckdb_pyspark_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes PySpark DataFrames to DuckDB. When\nusing the duckdb_pyspark_io_manager, any inputs and outputs without type annotations will be loaded\nas PySpark DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_duckdb_pyspark import duckdb_pyspark_io_manager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pyspark.sql.DataFrame: # the name of the asset will be the table name\n ...\n\n @repository\n def my_repo():\n return with_resources(\n [my_table],\n {"io_manager": duckdb_pyspark_io_manager.configured({"database": "my_db.duckdb"})}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pyspark.sql.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class DuckDBPySparkIOManager(DuckDBIOManager):\n """An I/O manager definition that reads inputs from and writes PySpark DataFrames to DuckDB. When\n using the DuckDBPySparkIOManager, any inputs and outputs without type annotations will be loaded\n as PySpark DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_duckdb_pyspark import DuckDBPySparkIOManager\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in DuckDB\n )\n def my_table() -> pyspark.sql.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={"io_manager": DuckDBPySparkIOManager(database="my_db.duckdb")}\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pyspark.sql.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [DuckDBPySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pyspark.sql.DataFrame
\n
", "current_page_name": "_modules/dagster_duckdb_pyspark/duckdb_pyspark_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_duckdb_pyspark.duckdb_pyspark_type_handler"}}, "dagster_embedded_elt": {"sling": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_embedded_elt.sling.asset_defs

\nimport re\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom dagster import (\n    AssetExecutionContext,\n    AssetsDefinition,\n    AssetSpec,\n    MaterializeResult,\n    multi_asset,\n)\nfrom dagster._annotations import experimental\n\nfrom dagster_embedded_elt.sling.resources import SlingMode, SlingResource\n\n\n
[docs]@experimental\ndef build_sling_asset(\n asset_spec: AssetSpec,\n source_stream: str,\n target_object: str,\n mode: SlingMode = SlingMode.FULL_REFRESH,\n primary_key: Optional[Union[str, List[str]]] = None,\n update_key: Optional[Union[str, List[str]]] = None,\n source_options: Optional[Dict[str, Any]] = None,\n target_options: Optional[Dict[str, Any]] = None,\n sling_resource_key: str = "sling",\n) -> AssetsDefinition:\n """Asset Factory for using Sling to sync data from a source stream to a target object.\n\n Args:\n asset_spec (AssetSpec): The AssetSpec to use to materialize this asset.\n source_stream (str): The source stream to sync from. This can be a table, a query, or a path.\n target_object (str): The target object to sync to. This can be a table, or a path.\n mode (SlingMode, optional): The sync mode to use when syncing. Defaults to SlingMode.FULL_REFRESH.\n primary_key (Optional[Union[str, List[str]]], optional): The optional primary key to use when syncing.\n update_key (Optional[Union[str, List[str]]], optional): The optional update key to use when syncing.\n source_options (Optional[Dict[str, Any]], optional): Any optional Sling source options to use when syncing.\n target_options (Optional[Dict[str, Any]], optional): Any optional target options to use when syncing.\n sling_resource_key (str, optional): The resource key for the SlingResource. Defaults to "sling".\n\n Examples:\n Creating a Sling asset that syncs from a file to a table:\n\n .. code-block:: python\n\n asset_spec = AssetSpec(key=["main", "dest_tbl"])\n asset_def = build_sling_asset(\n asset_spec=asset_spec,\n source_stream="file:///tmp/test.csv",\n target_object="main.dest_table",\n mode=SlingMode.INCREMENTAL,\n primary_key="id"\n )\n\n Creating a Sling asset that syncs from a table to a file with a full refresh:\n\n .. code-block:: python\n\n asset_spec = AssetSpec(key="test.csv")\n asset_def = build_sling_asset(\n asset_spec=asset_spec,\n source_stream="main.dest_table",\n target_object="file:///tmp/test.csv",\n mode=SlingMode.FULL_REFRESH\n )\n\n\n """\n if primary_key is not None and not isinstance(primary_key, list):\n primary_key = [primary_key]\n\n if update_key is not None and not isinstance(update_key, list):\n update_key = [update_key]\n\n @multi_asset(\n compute_kind="sling", specs=[asset_spec], required_resource_keys={sling_resource_key}\n )\n def sync(context: AssetExecutionContext) -> MaterializeResult:\n sling: SlingResource = getattr(context.resources, sling_resource_key)\n last_row_count_observed = None\n for stdout_line in sling.sync(\n source_stream=source_stream,\n target_object=target_object,\n mode=mode,\n primary_key=primary_key,\n update_key=update_key,\n source_options=source_options,\n target_options=target_options,\n ):\n match = re.search(r"(\\d+) rows", stdout_line)\n if match:\n last_row_count_observed = int(match.group(1))\n context.log.info(stdout_line)\n\n return MaterializeResult(\n metadata=(\n {} if last_row_count_observed is None else {"row_count": last_row_count_observed}\n )\n )\n\n return sync
\n
", "current_page_name": "_modules/dagster_embedded_elt/sling/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_embedded_elt.sling.asset_defs"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_embedded_elt.sling.resources

\nimport contextlib\nimport json\nimport re\nfrom enum import Enum\nfrom subprocess import PIPE, STDOUT, Popen\nfrom typing import Any, Dict, Generator, List, Optional\n\nfrom dagster import ConfigurableResource, PermissiveConfig, get_dagster_logger\nfrom dagster._annotations import experimental\nfrom dagster._utils.env import environ\nfrom pydantic import Field\nfrom sling import Sling\n\nlogger = get_dagster_logger()\n\n\nclass SlingMode(str, Enum):\n    """The mode to use when syncing.\n\n    See the Sling docs for more information: https://docs.slingdata.io/sling-cli/running-tasks#modes.\n    """\n\n    INCREMENTAL = "incremental"\n    TRUNCATE = "truncate"\n    FULL_REFRESH = "full-refresh"\n    SNAPSHOT = "snapshot"\n\n\n
[docs]class SlingSourceConnection(PermissiveConfig):\n """A Sling Source Connection defines the source connection used by :py:class:`~dagster_elt.sling.SlingResource`.\n\n Examples:\n Creating a Sling Source for a file, such as CSV or JSON:\n\n .. code-block:: python\n\n source = SlingSourceConnection(type="file")\n\n Create a Sling Source for a Postgres database, using a connection string:\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING"))\n source = SlingSourceConnection(type="postgres", connection_string="postgresql://user:password@host:port/schema")\n\n Create a Sling Source for a Postgres database, using keyword arguments, as described here:\n https://docs.slingdata.io/connections/database-connections/postgres\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="postgres", host="host", user="hunter42", password=EnvVar("POSTGRES_PASSWORD"))\n\n """\n\n type: str = Field(description="Type of the source connection. Use 'file' for local storage.")\n connection_string: Optional[str] = Field(\n description="The connection string for the source database."\n )
\n\n\n
[docs]class SlingTargetConnection(PermissiveConfig):\n """A Sling Target Connection defines the target connection used by :py:class:`~dagster_elt.sling.SlingResource`.\n\n Examples:\n Creating a Sling Target for a file, such as CSV or JSON:\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="file")\n\n Create a Sling Source for a Postgres database, using a connection string:\n\n .. code-block:: python\n\n source = SlingTargetConnection(type="postgres", connection_string="postgresql://user:password@host:port/schema"\n source = SlingTargetConnection(type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING"))\n\n Create a Sling Source for a Postgres database, using keyword arguments, as described here:\n https://docs.slingdata.io/connections/database-connections/postgres\n\n .. code-block::python\n\n source = SlingTargetConnection(type="postgres", host="host", user="hunter42", password=EnvVar("POSTGRES_PASSWORD"))\n\n\n """\n\n type: str = Field(\n description="Type of the destination connection. Use 'file' for local storage."\n )\n connection_string: Optional[str] = Field(\n description="The connection string for the target database."\n )
\n\n\n
[docs]@experimental\nclass SlingResource(ConfigurableResource):\n """Resource for interacting with the Sling package.\n\n Examples:\n .. code-block:: python\n\n from dagster_etl.sling import SlingResource\n sling_resource = SlingResource(\n source_connection=SlingSourceConnection(\n type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING")\n ),\n target_connection=SlingTargetConnection(\n type="snowflake",\n host="host",\n user="user",\n database="database",\n password="password",\n role="role",\n ),\n )\n\n """\n\n source_connection: SlingSourceConnection\n target_connection: SlingTargetConnection\n\n @contextlib.contextmanager\n def _setup_config(self) -> Generator[None, None, None]:\n """Uses environment variables to set the Sling source and target connections."""\n sling_source = self.source_connection.dict()\n sling_target = self.target_connection.dict()\n if self.source_connection.connection_string:\n sling_source["url"] = self.source_connection.connection_string\n if self.target_connection.connection_string:\n sling_target["url"] = self.target_connection.connection_string\n with environ(\n {\n "SLING_SOURCE": json.dumps(sling_source),\n "SLING_TARGET": json.dumps(sling_target),\n }\n ):\n yield\n\n @staticmethod\n def _exec_sling_cmd(cmd, stdin=None, stdout=PIPE, stderr=STDOUT) -> Generator[str, None, None]:\n ansi_escape = re.compile(r"\\x1B(?:[@-Z\\\\-_]|\\[[0-?]*[ -/]*[@-~])")\n with Popen(cmd, shell=True, stdin=stdin, stdout=stdout, stderr=stderr) as proc:\n assert proc.stdout\n\n for line in proc.stdout:\n fmt_line = str(line, "utf-8")\n clean_line = ansi_escape.sub("", fmt_line).replace("INF", "")\n yield clean_line\n\n proc.wait()\n if proc.returncode != 0:\n raise Exception("Sling command failed with error code %s", proc.returncode)\n\n def _sync(\n self,\n source_stream: str,\n target_object: str,\n mode: SlingMode = SlingMode.FULL_REFRESH,\n primary_key: Optional[List[str]] = None,\n update_key: Optional[List[str]] = None,\n source_options: Optional[Dict[str, Any]] = None,\n target_options: Optional[Dict[str, Any]] = None,\n ) -> Generator[str, None, None]:\n """Runs a Sling sync from the given source table to the given destination table. Generates\n output lines from the Sling CLI.\n """\n if self.source_connection.type == "file" and not source_stream.startswith("file://"):\n source_stream = "file://" + source_stream\n\n if self.target_connection.type == "file" and not target_object.startswith("file://"):\n target_object = "file://" + target_object\n\n with self._setup_config():\n config = {\n "source": {\n "conn": "SLING_SOURCE",\n "stream": source_stream,\n "primary_key": primary_key,\n "update_key": update_key,\n "options": source_options,\n },\n "target": {\n "conn": "SLING_TARGET",\n "object": target_object,\n "options": target_options,\n },\n }\n config["source"] = {k: v for k, v in config["source"].items() if v is not None}\n config["target"] = {k: v for k, v in config["target"].items() if v is not None}\n\n sling_cli = Sling(**config)\n logger.info("Starting Sling sync with mode: %s", mode)\n cmd = sling_cli._prep_cmd() # noqa: SLF001\n\n yield from self._exec_sling_cmd(cmd)\n\n def sync(\n self,\n source_stream: str,\n target_object: str,\n mode: SlingMode,\n primary_key: Optional[List[str]] = None,\n update_key: Optional[List[str]] = None,\n source_options: Optional[Dict[str, Any]] = None,\n target_options: Optional[Dict[str, Any]] = None,\n ) -> Generator[str, None, None]:\n """Initiate a Sling Sync between a source stream and a target object.\n\n Args:\n source_stream (str): The source stream to read from. For database sources, the source stream can be either\n a table name, a SQL statement or a path to a SQL file e.g. `TABLE1` or `SCHEMA1.TABLE2` or\n `SELECT * FROM TABLE`. For file sources, the source stream is a path or an url to a file.\n For file targets, the target object is a path or a url to a file, e.g. file:///tmp/file.csv or\n s3://my_bucket/my_folder/file.csv\n target_object (str): The target object to write into. For database targets, the target object is a table\n name, e.g. TABLE1, SCHEMA1.TABLE2. For file targets, the target object is a path or an url to a file.\n mode (SlingMode): The Sling mode to use when syncing, i.e. incremental, full-refresh\n See the Sling docs for more information: https://docs.slingdata.io/sling-cli/running-tasks#modes.\n primary_key (str): For incremental syncs, a primary key is used during merge statements to update\n existing rows.\n update_key (str): For incremental syncs, an update key is used to stream records after max(update_key)\n source_options (Dict[str, Any]): Other source options to pass to Sling,\n see https://docs.slingdata.io/sling-cli/running-tasks#source-options-src-options-flag-source.options-key\n for details\n target_options (Dict[str, Any[): Other target options to pass to Sling,\n see https://docs.slingdata.io/sling-cli/running-tasks#target-options-tgt-options-flag-target.options-key\n for details\n\n Examples:\n Sync from a source file to a sqlite database:\n\n .. code-block:: python\n\n sqllite_path = "/path/to/sqlite.db"\n csv_path = "/path/to/file.csv"\n\n @asset\n def run_sync(context, sling: SlingResource):\n res = sling.sync(\n source_stream=csv_path,\n target_object="events",\n mode=SlingMode.FULL_REFRESH,\n )\n for stdout in res:\n context.log.debug(stdout)\n counts = sqlite3.connect(sqllitepath).execute("SELECT count(1) FROM events").fetchone()\n assert counts[0] == 3\n\n source = SlingSourceConnection(\n type="file",\n )\n target = SlingTargetConnection(type="sqlite", instance=sqllitepath)\n\n materialize(\n [run_sync],\n resources={\n "sling": SlingResource(\n source_connection=source,\n target_connection=target,\n mode=SlingMode.TRUNCATE,\n )\n },\n )\n\n """\n yield from self._sync(\n source_stream=source_stream,\n target_object=target_object,\n mode=mode,\n primary_key=primary_key,\n update_key=update_key,\n source_options=source_options,\n target_options=target_options,\n )
\n
", "current_page_name": "_modules/dagster_embedded_elt/sling/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_embedded_elt.sling.resources"}}}, "dagster_fivetran": {"asset_defs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_fivetran.asset_defs

\nimport hashlib\nimport inspect\nimport re\nfrom functools import partial\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    List,\n    Mapping,\n    NamedTuple,\n    Optional,\n    Sequence,\n    Set,\n    Union,\n    cast,\n)\n\nfrom dagster import (\n    AssetKey,\n    AssetOut,\n    AssetsDefinition,\n    Nothing,\n    OpExecutionContext,\n    Output,\n    _check as check,\n    multi_asset,\n)\nfrom dagster._core.definitions.cacheable_assets import (\n    AssetsDefinitionCacheableData,\n    CacheableAssetsDefinition,\n)\nfrom dagster._core.definitions.events import CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.metadata import MetadataUserInput\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.errors import DagsterStepOutputNotFoundError\nfrom dagster._core.execution.context.init import build_init_resource_context\n\nfrom dagster_fivetran.resources import DEFAULT_POLL_INTERVAL, FivetranResource\nfrom dagster_fivetran.utils import (\n    generate_materializations,\n    get_fivetran_connector_url,\n    metadata_for_table,\n)\n\n\ndef _build_fivetran_assets(\n    connector_id: str,\n    destination_tables: Sequence[str],\n    poll_interval: float = DEFAULT_POLL_INTERVAL,\n    poll_timeout: Optional[float] = None,\n    io_manager_key: Optional[str] = None,\n    asset_key_prefix: Optional[Sequence[str]] = None,\n    metadata_by_table_name: Optional[Mapping[str, MetadataUserInput]] = None,\n    table_to_asset_key_map: Optional[Mapping[str, AssetKey]] = None,\n    resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n    group_name: Optional[str] = None,\n    infer_missing_tables: bool = False,\n    op_tags: Optional[Mapping[str, Any]] = None,\n) -> Sequence[AssetsDefinition]:\n    asset_key_prefix = check.opt_sequence_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n    tracked_asset_keys = {\n        table: AssetKey([*asset_key_prefix, *table.split(".")]) for table in destination_tables\n    }\n    user_facing_asset_keys = table_to_asset_key_map or tracked_asset_keys\n\n    _metadata_by_table_name = check.opt_mapping_param(\n        metadata_by_table_name, "metadata_by_table_name", key_type=str\n    )\n\n    @multi_asset(\n        name=f"fivetran_sync_{connector_id}",\n        outs={\n            "_".join(key.path): AssetOut(\n                io_manager_key=io_manager_key,\n                key=user_facing_asset_keys[table],\n                metadata=_metadata_by_table_name.get(table),\n                dagster_type=Nothing,\n            )\n            for table, key in tracked_asset_keys.items()\n        },\n        compute_kind="fivetran",\n        resource_defs=resource_defs,\n        group_name=group_name,\n        op_tags=op_tags,\n    )\n    def _assets(context: OpExecutionContext, fivetran: FivetranResource) -> Any:\n        fivetran_output = fivetran.sync_and_poll(\n            connector_id=connector_id,\n            poll_interval=poll_interval,\n            poll_timeout=poll_timeout,\n        )\n\n        materialized_asset_keys = set()\n        for materialization in generate_materializations(\n            fivetran_output, asset_key_prefix=asset_key_prefix\n        ):\n            # scan through all tables actually created, if it was expected then emit an Output.\n            # otherwise, emit a runtime AssetMaterialization\n            if materialization.asset_key in tracked_asset_keys.values():\n                yield Output(\n                    value=None,\n                    output_name="_".join(materialization.asset_key.path),\n                    metadata=materialization.metadata,\n                )\n                materialized_asset_keys.add(materialization.asset_key)\n\n            else:\n                yield materialization\n\n        unmaterialized_asset_keys = set(tracked_asset_keys.values()) - materialized_asset_keys\n        if infer_missing_tables:\n            for asset_key in unmaterialized_asset_keys:\n                yield Output(\n                    value=None,\n                    output_name="_".join(asset_key.path),\n                )\n\n        else:\n            if unmaterialized_asset_keys:\n                asset_key = next(iter(unmaterialized_asset_keys))\n                output_name = "_".join(asset_key.path)\n                raise DagsterStepOutputNotFoundError(\n                    f"Core compute for {context.op_def.name} did not return an output for"\n                    f' non-optional output "{output_name}".',\n                    step_key=context.get_step_execution_context().step.key,\n                    output_name=output_name,\n                )\n\n    return [_assets]\n\n\n
[docs]def build_fivetran_assets(\n connector_id: str,\n destination_tables: Sequence[str],\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n io_manager_key: Optional[str] = None,\n asset_key_prefix: Optional[Sequence[str]] = None,\n metadata_by_table_name: Optional[Mapping[str, MetadataUserInput]] = None,\n group_name: Optional[str] = None,\n infer_missing_tables: bool = False,\n op_tags: Optional[Mapping[str, Any]] = None,\n) -> Sequence[AssetsDefinition]:\n """Build a set of assets for a given Fivetran connector.\n\n Returns an AssetsDefinition which connects the specified ``asset_keys`` to the computation that\n will update them. Internally, executes a Fivetran sync for a given ``connector_id``, and\n polls until that sync completes, raising an error if it is unsuccessful. Requires the use of the\n :py:class:`~dagster_fivetran.fivetran_resource`, which allows it to communicate with the\n Fivetran API.\n\n Args:\n connector_id (str): The Fivetran Connector ID that this op will sync. You can retrieve this\n value from the "Setup" tab of a given connector in the Fivetran UI.\n destination_tables (List[str]): `schema_name.table_name` for each table that you want to be\n represented in the Dagster asset graph for this connection.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (Optional[float]): The maximum time that will waited before this operation is\n timed out. By default, this will never time out.\n io_manager_key (Optional[str]): The io_manager to be used to handle each of these assets.\n asset_key_prefix (Optional[List[str]]): A prefix for the asset keys inside this asset.\n If left blank, assets will have a key of `AssetKey([schema_name, table_name])`.\n metadata_by_table_name (Optional[Mapping[str, MetadataUserInput]]): A mapping from destination\n table name to user-supplied metadata that should be associated with the asset for that table.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. This\n group name will be applied to all assets produced by this multi_asset.\n infer_missing_tables (bool): If True, will create asset materializations for tables specified\n in destination_tables even if they are not present in the Fivetran sync output. This is useful\n in cases where Fivetran does not sync any data for a table and therefore does not include it\n in the sync output API response.\n op_tags (Optional[Dict[str, Any]]):\n A dictionary of tags for the op that computes the asset. Frameworks may expect and\n require certain metadata to be attached to a op. Values that are not strings will be\n json encoded and must meet the criteria that json.loads(json.dumps(value)) == value.\n\n **Examples:**\n\n Basic example:\n\n .. code-block:: python\n\n from dagster import AssetKey, repository, with_resources\n\n from dagster_fivetran import fivetran_resource\n from dagster_fivetran.assets import build_fivetran_assets\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n Attaching metadata:\n\n .. code-block:: python\n\n fivetran_assets = build_fivetran_assets(\n connector_id="foobar",\n table_names=["schema1.table1", "schema2.table2"],\n metadata_by_table_name={\n "schema1.table1": {\n "description": "This is a table that contains foo and bar",\n },\n "schema2.table2": {\n "description": "This is a table that contains baz and quux",\n },\n },\n )\n """\n return _build_fivetran_assets(\n connector_id=connector_id,\n destination_tables=destination_tables,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n io_manager_key=io_manager_key,\n asset_key_prefix=asset_key_prefix,\n metadata_by_table_name=metadata_by_table_name,\n group_name=group_name,\n infer_missing_tables=infer_missing_tables,\n op_tags=op_tags,\n )
\n\n\nclass FivetranConnectionMetadata(\n NamedTuple(\n "_FivetranConnectionMetadata",\n [\n ("name", str),\n ("connector_id", str),\n ("connector_url", str),\n ("schemas", Mapping[str, Any]),\n ],\n )\n):\n def build_asset_defn_metadata(\n self,\n key_prefix: Sequence[str],\n group_name: Optional[str],\n table_to_asset_key_fn: Callable[[str], AssetKey],\n io_manager_key: Optional[str] = None,\n ) -> AssetsDefinitionCacheableData:\n schema_table_meta: Dict[str, MetadataUserInput] = {}\n if "schemas" in self.schemas:\n schemas_inner = cast(Dict[str, Any], self.schemas["schemas"])\n for schema in schemas_inner.values():\n if schema["enabled"]:\n schema_name = schema["name_in_destination"]\n schema_tables = cast(Dict[str, Dict[str, Any]], schema["tables"])\n for table in schema_tables.values():\n if table["enabled"]:\n table_name = table["name_in_destination"]\n schema_table_meta[f"{schema_name}.{table_name}"] = metadata_for_table(\n table, self.connector_url\n )\n else:\n schema_table_meta[self.name] = {}\n\n outputs = {\n table: AssetKey([*key_prefix, *list(table_to_asset_key_fn(table).path)])\n for table in schema_table_meta.keys()\n }\n\n internal_deps: Dict[str, Set[AssetKey]] = {}\n\n return AssetsDefinitionCacheableData(\n keys_by_input_name={},\n keys_by_output_name=outputs,\n internal_asset_deps=internal_deps,\n group_name=group_name,\n key_prefix=key_prefix,\n can_subset=False,\n metadata_by_output_name=schema_table_meta,\n extra_metadata={\n "connector_id": self.connector_id,\n "io_manager_key": io_manager_key,\n },\n )\n\n\ndef _build_fivetran_assets_from_metadata(\n assets_defn_meta: AssetsDefinitionCacheableData,\n resource_defs: Mapping[str, ResourceDefinition],\n poll_interval: float,\n poll_timeout: Optional[float] = None,\n) -> AssetsDefinition:\n metadata = cast(Mapping[str, Any], assets_defn_meta.extra_metadata)\n connector_id = cast(str, metadata["connector_id"])\n io_manager_key = cast(Optional[str], metadata["io_manager_key"])\n\n return _build_fivetran_assets(\n connector_id=connector_id,\n destination_tables=list(\n assets_defn_meta.keys_by_output_name.keys()\n if assets_defn_meta.keys_by_output_name\n else []\n ),\n asset_key_prefix=list(assets_defn_meta.key_prefix or []),\n metadata_by_table_name=cast(\n Dict[str, MetadataUserInput], assets_defn_meta.metadata_by_output_name\n ),\n io_manager_key=io_manager_key,\n table_to_asset_key_map=assets_defn_meta.keys_by_output_name,\n resource_defs=resource_defs,\n group_name=assets_defn_meta.group_name,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )[0]\n\n\nclass FivetranInstanceCacheableAssetsDefinition(CacheableAssetsDefinition):\n def __init__(\n self,\n fivetran_resource_def: Union[FivetranResource, ResourceDefinition],\n key_prefix: Sequence[str],\n connector_to_group_fn: Optional[Callable[[str], Optional[str]]],\n connector_filter: Optional[Callable[[FivetranConnectionMetadata], bool]],\n connector_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]],\n connector_to_asset_key_fn: Optional[Callable[[FivetranConnectionMetadata, str], AssetKey]],\n poll_interval: float,\n poll_timeout: Optional[float],\n ):\n self._fivetran_resource_def = fivetran_resource_def\n self._fivetran_instance: FivetranResource = (\n fivetran_resource_def.process_config_and_initialize()\n if isinstance(fivetran_resource_def, FivetranResource)\n else fivetran_resource_def(build_init_resource_context())\n )\n\n self._key_prefix = key_prefix\n self._connector_to_group_fn = connector_to_group_fn\n self._connection_filter = connector_filter\n self._connector_to_io_manager_key_fn = connector_to_io_manager_key_fn\n self._connector_to_asset_key_fn: Callable[[FivetranConnectionMetadata, str], AssetKey] = (\n connector_to_asset_key_fn or (lambda _, table: AssetKey(path=table.split(".")))\n )\n self._poll_interval = poll_interval\n self._poll_timeout = poll_timeout\n\n contents = hashlib.sha1()\n contents.update(",".join(key_prefix).encode("utf-8"))\n if connector_filter:\n contents.update(inspect.getsource(connector_filter).encode("utf-8"))\n\n super().__init__(unique_id=f"fivetran-{contents.hexdigest()}")\n\n def _get_connectors(self) -> Sequence[FivetranConnectionMetadata]:\n output_connectors: List[FivetranConnectionMetadata] = []\n\n groups = self._fivetran_instance.make_request("GET", "groups")["items"]\n\n for group in groups:\n group_id = group["id"]\n\n connectors = self._fivetran_instance.make_request(\n "GET", f"groups/{group_id}/connectors"\n )["items"]\n for connector in connectors:\n connector_id = connector["id"]\n\n connector_name = connector["schema"]\n\n setup_state = connector.get("status", {}).get("setup_state")\n if setup_state and setup_state in ("incomplete", "broken"):\n continue\n\n connector_url = get_fivetran_connector_url(connector)\n\n schemas = self._fivetran_instance.make_request(\n "GET", f"connectors/{connector_id}/schemas"\n )\n\n output_connectors.append(\n FivetranConnectionMetadata(\n name=connector_name,\n connector_id=connector_id,\n connector_url=connector_url,\n schemas=schemas,\n )\n )\n\n return output_connectors\n\n def compute_cacheable_data(self) -> Sequence[AssetsDefinitionCacheableData]:\n asset_defn_data: List[AssetsDefinitionCacheableData] = []\n for connector in self._get_connectors():\n if not self._connection_filter or self._connection_filter(connector):\n table_to_asset_key = partial(self._connector_to_asset_key_fn, connector)\n asset_defn_data.append(\n connector.build_asset_defn_metadata(\n key_prefix=self._key_prefix,\n group_name=(\n self._connector_to_group_fn(connector.name)\n if self._connector_to_group_fn\n else None\n ),\n io_manager_key=(\n self._connector_to_io_manager_key_fn(connector.name)\n if self._connector_to_io_manager_key_fn\n else None\n ),\n table_to_asset_key_fn=table_to_asset_key,\n )\n )\n\n return asset_defn_data\n\n def build_definitions(\n self, data: Sequence[AssetsDefinitionCacheableData]\n ) -> Sequence[AssetsDefinition]:\n return [\n _build_fivetran_assets_from_metadata(\n meta,\n {"fivetran": self._fivetran_instance.get_resource_definition()},\n poll_interval=self._poll_interval,\n poll_timeout=self._poll_timeout,\n )\n for meta in data\n ]\n\n\ndef _clean_name(name: str) -> str:\n """Cleans an input to be a valid Dagster asset name."""\n return re.sub(r"[^a-z0-9]+", "_", name.lower())\n\n\n
[docs]def load_assets_from_fivetran_instance(\n fivetran: Union[FivetranResource, ResourceDefinition],\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n connector_to_group_fn: Optional[Callable[[str], Optional[str]]] = _clean_name,\n io_manager_key: Optional[str] = None,\n connector_to_io_manager_key_fn: Optional[Callable[[str], Optional[str]]] = None,\n connector_filter: Optional[Callable[[FivetranConnectionMetadata], bool]] = None,\n connector_to_asset_key_fn: Optional[\n Callable[[FivetranConnectionMetadata, str], AssetKey]\n ] = None,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n) -> CacheableAssetsDefinition:\n """Loads Fivetran connector assets from a configured FivetranResource instance. This fetches information\n about defined connectors at initialization time, and will error on workspace load if the Fivetran\n instance is not reachable.\n\n Args:\n fivetran (ResourceDefinition): A FivetranResource configured with the appropriate connection\n details.\n key_prefix (Optional[CoercibleToAssetKeyPrefix]): A prefix for the asset keys created.\n connector_to_group_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an asset\n group name for a given Fivetran connector name. If None, no groups will be created. Defaults\n to a basic sanitization function.\n io_manager_key (Optional[str]): The IO manager key to use for all assets. Defaults to "io_manager".\n Use this if all assets should be loaded from the same source, otherwise use connector_to_io_manager_key_fn.\n connector_to_io_manager_key_fn (Optional[Callable[[str], Optional[str]]]): Function which returns an\n IO manager key for a given Fivetran connector name. When other ops are downstream of the loaded assets,\n the IOManager specified determines how the inputs to those ops are loaded. Defaults to "io_manager".\n connector_filter (Optional[Callable[[FivetranConnectorMetadata], bool]]): Optional function which takes\n in connector metadata and returns False if the connector should be excluded from the output assets.\n connector_to_asset_key_fn (Optional[Callable[[FivetranConnectorMetadata, str], AssetKey]]): Optional function\n which takes in connector metadata and a table name and returns an AssetKey for that table. Defaults to\n a function that generates an AssetKey matching the table name, split by ".".\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (Optional[float]): The maximum time that will waited before this operation is\n timed out. By default, this will never time out.\n\n **Examples:**\n\n Loading all Fivetran connectors as assets:\n\n .. code-block:: python\n\n from dagster_fivetran import fivetran_resource, load_assets_from_fivetran_instance\n\n fivetran_instance = fivetran_resource.configured(\n {\n "api_key": "some_key",\n "api_secret": "some_secret",\n }\n )\n fivetran_assets = load_assets_from_fivetran_instance(fivetran_instance)\n\n Filtering the set of loaded connectors:\n\n .. code-block:: python\n\n from dagster_fivetran import fivetran_resource, load_assets_from_fivetran_instance\n\n fivetran_instance = fivetran_resource.configured(\n {\n "api_key": "some_key",\n "api_secret": "some_secret",\n }\n )\n fivetran_assets = load_assets_from_fivetran_instance(\n fivetran_instance,\n connector_filter=lambda meta: "snowflake" in meta.name,\n )\n """\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n key_prefix = check.list_param(key_prefix or [], "key_prefix", of_type=str)\n\n check.invariant(\n not io_manager_key or not connector_to_io_manager_key_fn,\n "Cannot specify both io_manager_key and connector_to_io_manager_key_fn",\n )\n if not connector_to_io_manager_key_fn:\n connector_to_io_manager_key_fn = lambda _: io_manager_key\n\n return FivetranInstanceCacheableAssetsDefinition(\n fivetran_resource_def=fivetran,\n key_prefix=key_prefix,\n connector_to_group_fn=connector_to_group_fn,\n connector_to_io_manager_key_fn=connector_to_io_manager_key_fn,\n connector_filter=connector_filter,\n connector_to_asset_key_fn=connector_to_asset_key_fn,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )
\n
", "current_page_name": "_modules/dagster_fivetran/asset_defs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_fivetran.asset_defs"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_fivetran.ops

\nfrom typing import Any, Dict, List, Optional\n\nfrom dagster import (\n    AssetKey,\n    Config,\n    In,\n    Nothing,\n    Out,\n    Output,\n    op,\n)\nfrom pydantic import Field\n\nfrom dagster_fivetran.resources import DEFAULT_POLL_INTERVAL, FivetranResource\nfrom dagster_fivetran.types import FivetranOutput\nfrom dagster_fivetran.utils import generate_materializations\n\n\nclass SyncConfig(Config):\n    connector_id: str = Field(\n        description=(\n            "The Fivetran Connector ID that this op will sync. You can retrieve this "\n            'value from the "Setup" tab of a given connector in the Fivetran UI.'\n        ),\n    )\n    poll_interval: float = Field(\n        default=DEFAULT_POLL_INTERVAL,\n        description="The time (in seconds) that will be waited between successive polls.",\n    )\n    poll_timeout: Optional[float] = Field(\n        default=None,\n        description=(\n            "The maximum time that will waited before this operation is timed out. By "\n            "default, this will never time out."\n        ),\n    )\n    yield_materializations: bool = Field(\n        default=True,\n        description=(\n            "If True, materializations corresponding to the results of the Fivetran sync will "\n            "be yielded when the op executes."\n        ),\n    )\n    asset_key_prefix: List[str] = Field(\n        default=["fivetran"],\n        description=(\n            "If provided and yield_materializations is True, these components will be used to "\n            "prefix the generated asset keys."\n        ),\n    )\n\n\n
[docs]@op(\n ins={"start_after": In(Nothing)},\n out=Out(\n FivetranOutput,\n description=(\n "Parsed json dictionary representing the details of the Fivetran connector after the"\n " sync successfully completes. See the [Fivetran API"\n " Docs](https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails) to see"\n " detailed information on this response."\n ),\n ),\n tags={"kind": "fivetran"},\n)\ndef fivetran_sync_op(config: SyncConfig, fivetran: FivetranResource) -> Any:\n """Executes a Fivetran sync for a given ``connector_id``, and polls until that sync\n completes, raising an error if it is unsuccessful. It outputs a FivetranOutput which contains\n the details of the Fivetran connector after the sync successfully completes, as well as details\n about which tables the sync updates.\n\n It requires the use of the :py:class:`~dagster_fivetran.fivetran_resource`, which allows it to\n communicate with the Fivetran API.\n\n Examples:\n .. code-block:: python\n\n from dagster import job\n from dagster_fivetran import fivetran_resource, fivetran_sync_op\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n sync_foobar = fivetran_sync_op.configured({"connector_id": "foobar"}, name="sync_foobar")\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_simple_fivetran_job():\n sync_foobar()\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_composed_fivetran_job():\n final_foobar_state = sync_foobar(start_after=some_op())\n other_op(final_foobar_state)\n """\n fivetran_output = fivetran.sync_and_poll(\n connector_id=config.connector_id,\n poll_interval=config.poll_interval,\n poll_timeout=config.poll_timeout,\n )\n if config.yield_materializations:\n yield from generate_materializations(\n fivetran_output, asset_key_prefix=config.asset_key_prefix\n )\n yield Output(fivetran_output)
\n\n\nclass FivetranResyncConfig(SyncConfig):\n resync_parameters: Optional[Dict[str, Any]] = Field(\n None,\n description=(\n "Optional resync parameters to send in the payload to the Fivetran API. You can"\n " find an example resync payload here:"\n " https://fivetran.com/docs/rest-api/connectors#request_7"\n ),\n )\n\n\n@op(\n ins={"start_after": In(Nothing)},\n out=Out(\n FivetranOutput,\n description=(\n "Parsed json dictionary representing the details of the Fivetran connector after the"\n " resync successfully completes. See the [Fivetran API"\n " Docs](https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails) to see"\n " detailed information on this response."\n ),\n ),\n tags={"kind": "fivetran"},\n)\ndef fivetran_resync_op(\n config: FivetranResyncConfig,\n fivetran: FivetranResource,\n) -> Any:\n """Executes a Fivetran historical resync for a given ``connector_id``, and polls until that resync\n completes, raising an error if it is unsuccessful. It outputs a FivetranOutput which contains\n the details of the Fivetran connector after the resync successfully completes, as well as details\n about which tables the resync updates.\n\n It requires the use of the :py:class:`~dagster_fivetran.fivetran_resource`, which allows it to\n communicate with the Fivetran API.\n\n Examples:\n .. code-block:: python\n\n from dagster import job\n from dagster_fivetran import fivetran_resource, fivetran_resync_op\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n sync_foobar = fivetran_resync_op.configured(\n {\n "connector_id": "foobar",\n "resync_parameters": {\n "schema_a": ["table_a", "table_b"],\n "schema_b": ["table_c"]\n }\n },\n name="sync_foobar"\n )\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_simple_fivetran_job():\n sync_foobar()\n\n @job(resource_defs={"fivetran": my_fivetran_resource})\n def my_composed_fivetran_job():\n final_foobar_state = sync_foobar(start_after=some_op())\n other_op(final_foobar_state)\n """\n fivetran_output = fivetran.resync_and_poll(\n connector_id=config.connector_id,\n resync_parameters=config.resync_parameters,\n poll_interval=config.poll_interval,\n poll_timeout=config.poll_timeout,\n )\n if config.yield_materializations:\n asset_key_filter = (\n [\n AssetKey(config.asset_key_prefix + [schema, table])\n for schema, tables in config.resync_parameters.items()\n for table in tables\n ]\n if config.resync_parameters is not None\n else None\n )\n for mat in generate_materializations(\n fivetran_output, asset_key_prefix=config.asset_key_prefix\n ):\n if asset_key_filter is None or mat.asset_key in asset_key_filter:\n yield mat\n\n yield Output(fivetran_output)\n
", "current_page_name": "_modules/dagster_fivetran/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_fivetran.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_fivetran.resources

\nimport datetime\nimport json\nimport logging\nimport time\nfrom typing import Any, Mapping, Optional, Sequence, Tuple\nfrom urllib.parse import urljoin\n\nimport requests\nfrom dagster import (\n    Failure,\n    InitResourceContext,\n    MetadataValue,\n    __version__,\n    _check as check,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._config.pythonic_config import ConfigurableResource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.cached_method import cached_method\nfrom dateutil import parser\nfrom pydantic import Field\nfrom requests.auth import HTTPBasicAuth\nfrom requests.exceptions import RequestException\n\nfrom dagster_fivetran.types import FivetranOutput\nfrom dagster_fivetran.utils import get_fivetran_connector_url, get_fivetran_logs_url\n\nFIVETRAN_API_BASE = "https://api.fivetran.com"\nFIVETRAN_API_VERSION_PATH = "v1/"\nFIVETRAN_CONNECTOR_PATH = "connectors/"\n\n# default polling interval (in seconds)\nDEFAULT_POLL_INTERVAL = 10\n\n\n
[docs]class FivetranResource(ConfigurableResource):\n """This class exposes methods on top of the Fivetran REST API."""\n\n api_key: str = Field(description="The Fivetran API key to use for this resource.")\n api_secret: str = Field(description="The Fivetran API secret to use for this resource.")\n disable_schedule_on_trigger: bool = Field(\n default=True,\n description=(\n "Specifies if you would like any connector that is sync'd using this "\n "resource to be automatically taken off its Fivetran schedule."\n ),\n )\n request_max_retries: int = Field(\n default=3,\n description=(\n "The maximum number of times requests to the Fivetran API should be retried "\n "before failing."\n ),\n )\n request_retry_delay: float = Field(\n default=0.25,\n description="Time (in seconds) to wait between each request retry.",\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n def _auth(self) -> HTTPBasicAuth:\n return HTTPBasicAuth(self.api_key, self.api_secret)\n\n @property\n @cached_method\n def _log(self) -> logging.Logger:\n return get_dagster_logger()\n\n @property\n def api_base_url(self) -> str:\n return urljoin(FIVETRAN_API_BASE, FIVETRAN_API_VERSION_PATH)\n\n @property\n def api_connector_url(self) -> str:\n return urljoin(self.api_base_url, FIVETRAN_CONNECTOR_PATH)\n\n def make_connector_request(\n self, method: str, endpoint: str, data: Optional[str] = None\n ) -> Mapping[str, Any]:\n return self.make_request(method, urljoin(FIVETRAN_CONNECTOR_PATH, endpoint), data)\n\n def make_request(\n self, method: str, endpoint: str, data: Optional[str] = None\n ) -> Mapping[str, Any]:\n """Creates and sends a request to the desired Fivetran Connector API endpoint.\n\n Args:\n method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").\n endpoint (str): The Fivetran API endpoint to send this request to.\n data (Optional[str]): JSON-formatted data string to be included in the request.\n\n Returns:\n Dict[str, Any]: Parsed json data from the response to this request\n """\n url = urljoin(self.api_base_url, endpoint)\n headers = {\n "User-Agent": f"dagster-fivetran/{__version__}",\n "Content-Type": "application/json;version=2",\n }\n\n num_retries = 0\n while True:\n try:\n response = requests.request(\n method=method,\n url=url,\n headers=headers,\n auth=self._auth,\n data=data,\n )\n response.raise_for_status()\n resp_dict = response.json()\n return resp_dict["data"] if "data" in resp_dict else resp_dict\n except RequestException as e:\n self._log.error("Request to Fivetran API failed: %s", e)\n if num_retries == self.request_max_retries:\n break\n num_retries += 1\n time.sleep(self.request_retry_delay)\n\n raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.")\n\n def get_connector_details(self, connector_id: str) -> Mapping[str, Any]:\n """Gets details about a given connector from the Fivetran Connector API.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n\n Returns:\n Dict[str, Any]: Parsed json data from the response to this request\n """\n return self.make_connector_request(method="GET", endpoint=connector_id)\n\n def _assert_syncable_connector(self, connector_id: str):\n """Confirms that a given connector is eligible to sync. Will raise a Failure in the event that\n the connector is either paused or not fully setup.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n """\n connector_details = self.get_connector_details(connector_id)\n if connector_details["paused"]:\n raise Failure(f"Connector '{connector_id}' cannot be synced as it is currently paused.")\n if connector_details["status"]["setup_state"] != "connected":\n raise Failure(f"Connector '{connector_id}' cannot be synced as it has not been setup")\n\n def get_connector_sync_status(self, connector_id: str) -> Tuple[datetime.datetime, bool, str]:\n """Gets details about the status of the most recent Fivetran sync operation for a given\n connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n\n Returns:\n Tuple[datetime.datetime, bool, str]:\n Tuple representing the timestamp of the last completeded sync, if it succeeded, and\n the currently reported sync status.\n """\n connector_details = self.get_connector_details(connector_id)\n\n min_time_str = "0001-01-01 00:00:00+00"\n succeeded_at = parser.parse(connector_details["succeeded_at"] or min_time_str)\n failed_at = parser.parse(connector_details["failed_at"] or min_time_str)\n\n return (\n max(succeeded_at, failed_at),\n succeeded_at > failed_at,\n connector_details["status"]["sync_state"],\n )\n\n def update_connector(\n self, connector_id: str, properties: Optional[Mapping[str, Any]] = None\n ) -> Mapping[str, Any]:\n """Updates properties of a Fivetran Connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n properties (Dict[str, Any]): The properties to be updated. For a comprehensive list of\n properties, see the [Fivetran docs](https://fivetran.com/docs/rest-api/connectors#modifyaconnector).\n\n Returns:\n Dict[str, Any]: Parsed json data representing the API response.\n """\n return self.make_connector_request(\n method="PATCH", endpoint=connector_id, data=json.dumps(properties)\n )\n\n def update_schedule_type(\n self, connector_id: str, schedule_type: Optional[str] = None\n ) -> Mapping[str, Any]:\n """Updates the schedule type property of the connector to either "auto" or "manual".\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n schedule_type (Optional[str]): Either "auto" (to turn the schedule on) or "manual" (to\n turn it off).\n\n Returns:\n Dict[str, Any]: Parsed json data representing the API response.\n """\n if schedule_type not in ["auto", "manual"]:\n check.failed(f"schedule_type must be either 'auto' or 'manual': got '{schedule_type}'")\n return self.update_connector(connector_id, properties={"schedule_type": schedule_type})\n\n def get_connector_schema_config(self, connector_id: str) -> Mapping[str, Any]:\n return self.make_connector_request("GET", endpoint=f"{connector_id}/schemas")\n\n def start_sync(self, connector_id: str) -> Mapping[str, Any]:\n """Initiates a sync of a Fivetran connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n\n Returns:\n Dict[str, Any]: Parsed json data representing the connector details API response after\n the sync is started.\n """\n if self.disable_schedule_on_trigger:\n self._log.info("Disabling Fivetran sync schedule.")\n self.update_schedule_type(connector_id, "manual")\n self._assert_syncable_connector(connector_id)\n self.make_connector_request(method="POST", endpoint=f"{connector_id}/force")\n connector_details = self.get_connector_details(connector_id)\n self._log.info(\n f"Sync initialized for connector_id={connector_id}. View this sync in the Fivetran UI: "\n + get_fivetran_connector_url(connector_details)\n )\n return connector_details\n\n def start_resync(\n self, connector_id: str, resync_parameters: Optional[Mapping[str, Sequence[str]]] = None\n ) -> Mapping[str, Any]:\n """Initiates a historical sync of all data for multiple schema tables within a Fivetran connector.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n resync_parameters (Optional[Dict[str, List[str]]]): Optional resync parameters to send to the Fivetran API.\n An example payload can be found here: https://fivetran.com/docs/rest-api/connectors#request_7\n\n Returns:\n Dict[str, Any]: Parsed json data representing the connector details API response after\n the resync is started.\n """\n if self.disable_schedule_on_trigger:\n self._log.info("Disabling Fivetran sync schedule.")\n self.update_schedule_type(connector_id, "manual")\n self._assert_syncable_connector(connector_id)\n self.make_connector_request(\n method="POST",\n endpoint=(\n f"{connector_id}/schemas/tables/resync"\n if resync_parameters is not None\n else f"{connector_id}/resync"\n ),\n data=json.dumps(resync_parameters) if resync_parameters is not None else None,\n )\n connector_details = self.get_connector_details(connector_id)\n self._log.info(\n f"Sync initialized for connector_id={connector_id}. View this resync in the Fivetran"\n " UI: "\n + get_fivetran_connector_url(connector_details)\n )\n return connector_details\n\n def poll_sync(\n self,\n connector_id: str,\n initial_last_sync_completion: datetime.datetime,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> Mapping[str, Any]:\n """Given a Fivetran connector and the timestamp at which the previous sync completed, poll\n until the next sync completes.\n\n The previous sync completion time is necessary because the only way to tell when a sync\n completes is when this value changes.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n initial_last_sync_completion (datetime.datetime): The timestamp of the last completed sync\n (successful or otherwise) for this connector, prior to running this method.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n Dict[str, Any]: Parsed json data representing the API response.\n """\n poll_start = datetime.datetime.now()\n while True:\n (\n curr_last_sync_completion,\n curr_last_sync_succeeded,\n curr_sync_state,\n ) = self.get_connector_sync_status(connector_id)\n self._log.info(f"Polled '{connector_id}'. Status: [{curr_sync_state}]")\n\n if curr_last_sync_completion > initial_last_sync_completion:\n break\n\n if poll_timeout and datetime.datetime.now() > poll_start + datetime.timedelta(\n seconds=poll_timeout\n ):\n raise Failure(\n f"Sync for connector '{connector_id}' timed out after "\n f"{datetime.datetime.now() - poll_start}."\n )\n\n # Sleep for the configured time interval before polling again.\n time.sleep(poll_interval)\n\n connector_details = self.get_connector_details(connector_id)\n if not curr_last_sync_succeeded:\n raise Failure(\n f"Sync for connector '{connector_id}' failed!",\n metadata={\n "connector_details": MetadataValue.json(connector_details),\n "log_url": MetadataValue.url(get_fivetran_logs_url(connector_details)),\n },\n )\n return connector_details\n\n def sync_and_poll(\n self,\n connector_id: str,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n ) -> FivetranOutput:\n """Initializes a sync operation for the given connector, and polls until it completes.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~FivetranOutput`:\n Object containing details about the connector and the tables it updates\n """\n schema_config = self.get_connector_schema_config(connector_id)\n init_last_sync_timestamp, _, _ = self.get_connector_sync_status(connector_id)\n self.start_sync(connector_id)\n final_details = self.poll_sync(\n connector_id,\n init_last_sync_timestamp,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )\n return FivetranOutput(connector_details=final_details, schema_config=schema_config)\n\n def resync_and_poll(\n self,\n connector_id: str,\n poll_interval: float = DEFAULT_POLL_INTERVAL,\n poll_timeout: Optional[float] = None,\n resync_parameters: Optional[Mapping[str, Sequence[str]]] = None,\n ) -> FivetranOutput:\n """Initializes a historical resync operation for the given connector, and polls until it completes.\n\n Args:\n connector_id (str): The Fivetran Connector ID. You can retrieve this value from the\n "Setup" tab of a given connector in the Fivetran UI.\n resync_parameters (Dict[str, List[str]]): The payload to send to the Fivetran API.\n This should be a dictionary with schema names as the keys and a list of tables\n to resync as the values.\n poll_interval (float): The time (in seconds) that will be waited between successive polls.\n poll_timeout (float): The maximum time that will waited before this operation is timed\n out. By default, this will never time out.\n\n Returns:\n :py:class:`~FivetranOutput`:\n Object containing details about the connector and the tables it updates\n """\n schema_config = self.get_connector_schema_config(connector_id)\n init_last_sync_timestamp, _, _ = self.get_connector_sync_status(connector_id)\n self.start_resync(connector_id, resync_parameters)\n final_details = self.poll_sync(\n connector_id,\n init_last_sync_timestamp,\n poll_interval=poll_interval,\n poll_timeout=poll_timeout,\n )\n return FivetranOutput(connector_details=final_details, schema_config=schema_config)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=FivetranResource.to_config_schema())\ndef fivetran_resource(context: InitResourceContext) -> FivetranResource:\n """This resource allows users to programatically interface with the Fivetran REST API to launch\n syncs and monitor their progress. This currently implements only a subset of the functionality\n exposed by the API.\n\n For a complete set of documentation on the Fivetran REST API, including expected response JSON\n schemae, see the `Fivetran API Docs <https://fivetran.com/docs/rest-api/connectors>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Examples:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_fivetran import fivetran_resource\n\n my_fivetran_resource = fivetran_resource.configured(\n {\n "api_key": {"env": "FIVETRAN_API_KEY"},\n "api_secret": {"env": "FIVETRAN_API_SECRET"},\n }\n )\n\n @job(resource_defs={"fivetran":my_fivetran_resource})\n def my_fivetran_job():\n ...\n\n """\n return FivetranResource.from_resource_context(context)
\n
", "current_page_name": "_modules/dagster_fivetran/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_fivetran.resources"}}, "dagster_gcp": {"bigquery": {"io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.io_manager

\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Generator, Optional, Sequence, Type, cast\n\nfrom dagster import IOManagerDefinition, OutputContext, io_manager\nfrom dagster._annotations import experimental\nfrom dagster._config.pythonic_config import (\n    ConfigurableIOManagerFactory,\n)\nfrom dagster._core.storage.db_io_manager import (\n    DbClient,\n    DbIOManager,\n    DbTypeHandler,\n    TablePartitionDimension,\n    TableSlice,\n    TimeWindow,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom google.api_core.exceptions import NotFound\nfrom google.cloud import bigquery\nfrom pydantic import Field\n\nfrom .utils import setup_gcp_creds\n\nBIGQUERY_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"\n\n\n
[docs]@experimental\ndef build_bigquery_io_manager(\n type_handlers: Sequence[DbTypeHandler], default_load_type: Optional[Type] = None\n) -> IOManagerDefinition:\n """Builds an I/O manager definition that reads inputs from and writes outputs to BigQuery.\n\n Args:\n type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between\n slices of BigQuery tables and an in-memory type - e.g. a Pandas DataFrame.\n If only one DbTypeHandler is provided, it will be used as the default_load_type.\n default_load_type (Type): When an input has no type annotation, load it as this type.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import build_bigquery_io_manager\n from dagster_bigquery_pandas import BigQueryPandasTypeHandler\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n bigquery_io_manager = build_bigquery_io_manager([BigQueryPandasTypeHandler()])\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": bigquery_io_manager.configured({\n "project" : {"env": "GCP_PROJECT"}\n })\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the ``dataset`` configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset ``my_table`` had the key prefix ``["gcp", "bigquery", "my_dataset"]``, the dataset ``my_dataset`` will be\n used. For ops, the dataset can be specified by including a `schema` entry in output metadata. If ``schema`` is\n not provided via config or on the asset/op, ``public`` will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n :py:class:`~dagster.In` or :py:class:`~dagster.AssetIn`.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the ``gcp_credentials`` configuration.\n Dagster willstore this key in a temporary file and set ``GOOGLE_APPLICATION_CREDENTIALS`` to point to the file.\n After the run completes, the file will be deleted, and ``GOOGLE_APPLICATION_CREDENTIALS`` will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded with this shell command: ``cat $GOOGLE_APPLICATION_CREDENTIALS | base64``\n """\n\n @dagster_maintained_io_manager\n @io_manager(config_schema=BigQueryIOManager.to_config_schema())\n def bigquery_io_manager(init_context):\n """I/O Manager for storing outputs in a BigQuery database.\n\n Assets will be stored in the dataset and table name specified by their AssetKey.\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n Op outputs will be stored in the dataset specified by output metadata (defaults to public) in a\n table of the name of the output.\n\n Note that the BigQuery config is mapped to the DB IO manager table hierarchy as follows:\n BigQuery DB IO\n * project -> database\n * dataset -> schema\n * table -> table\n """\n mgr = DbIOManager(\n type_handlers=type_handlers,\n db_client=BigQueryClient(),\n io_manager_name="BigQueryIOManager",\n database=init_context.resource_config["project"],\n schema=init_context.resource_config.get("dataset"),\n default_load_type=default_load_type,\n )\n if init_context.resource_config.get("gcp_credentials"):\n with setup_gcp_creds(init_context.resource_config.get("gcp_credentials")):\n yield mgr\n else:\n yield mgr\n\n return bigquery_io_manager
\n\n\n
[docs]class BigQueryIOManager(ConfigurableIOManagerFactory):\n """Base class for an I/O manager definition that reads inputs from and writes outputs to BigQuery.\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import BigQueryIOManager\n from dagster_bigquery_pandas import BigQueryPandasTypeHandler\n from dagster import Definitions, EnvVar\n\n class MyBigQueryIOManager(BigQueryIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the ``dataset`` configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset ``my_table`` had the key prefix ``["gcp", "bigquery", "my_dataset"]``, the dataset ``my_dataset`` will be\n used. For ops, the dataset can be specified by including a ``schema`` entry in output metadata. If ``schema`` is\n not provided via config or on the asset/op, ``public`` will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n :py:class:`~dagster.In` or :py:class:`~dagster.AssetIn`.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the ``gcp_credentials`` configuration.\n Dagster will store this key in a temporary file and set ``GOOGLE_APPLICATION_CREDENTIALS`` to point to the file.\n After the run completes, the file will be deleted, and ``GOOGLE_APPLICATION_CREDENTIALS`` will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded with this shell command: ``cat $GOOGLE_APPLICATION_CREDENTIALS | base64``\n """\n\n project: str = Field(description="The GCP project to use.")\n dataset: Optional[str] = Field(\n default=None,\n description=(\n "Name of the BigQuery dataset to use. If not provided, the last prefix before"\n " the asset name will be used."\n ),\n )\n location: Optional[str] = Field(\n default=None,\n description=(\n "The GCP location. Note: When using PySpark DataFrames, the default"\n " location of the project will be used. A custom location can be specified in"\n " your SparkSession configuration."\n ),\n )\n gcp_credentials: Optional[str] = Field(\n default=None,\n description=(\n "GCP authentication credentials. If provided, a temporary file will be created"\n " with the credentials and ``GOOGLE_APPLICATION_CREDENTIALS`` will be set to the"\n " temporary file. To avoid issues with newlines in the keys, you must base64"\n " encode the key. You can retrieve the base64 encoded key with this shell"\n " command: ``cat $GOOGLE_AUTH_CREDENTIALS | base64``"\n ),\n )\n temporary_gcs_bucket: Optional[str] = Field(\n default=None,\n description=(\n "When using PySpark DataFrames, optionally specify a temporary GCS bucket to"\n " store data. If not provided, data will be directly written to BigQuery."\n ),\n )\n timeout: Optional[float] = Field(\n default=None,\n description=(\n "When using Pandas DataFrames, optionally specify a timeout for the BigQuery"\n " queries (loading and reading from tables)."\n ),\n )\n\n @staticmethod\n @abstractmethod\n def type_handlers() -> Sequence[DbTypeHandler]: ...\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return None\n\n def create_io_manager(self, context) -> Generator:\n mgr = DbIOManager(\n db_client=BigQueryClient(),\n io_manager_name="BigQueryIOManager",\n database=self.project,\n schema=self.dataset,\n type_handlers=self.type_handlers(),\n default_load_type=self.default_load_type(),\n )\n if self.gcp_credentials:\n with setup_gcp_creds(self.gcp_credentials):\n yield mgr\n else:\n yield mgr
\n\n\nclass BigQueryClient(DbClient):\n @staticmethod\n def delete_table_slice(context: OutputContext, table_slice: TableSlice, connection) -> None:\n try:\n connection.query(_get_cleanup_statement(table_slice)).result()\n except NotFound:\n # table doesn't exist yet, so ignore the error\n pass\n\n @staticmethod\n def get_select_statement(table_slice: TableSlice) -> str:\n col_str = ", ".join(table_slice.columns) if table_slice.columns else "*"\n\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"SELECT {col_str} FROM"\n f" `{table_slice.database}.{table_slice.schema}.{table_slice.table}` WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"""SELECT {col_str} FROM `{table_slice.database}.{table_slice.schema}.{table_slice.table}`"""\n\n @staticmethod\n def ensure_schema_exists(context: OutputContext, table_slice: TableSlice, connection) -> None:\n connection.query(f"CREATE SCHEMA IF NOT EXISTS {table_slice.schema}").result()\n\n @staticmethod\n @contextmanager\n def connect(context, _):\n conn = bigquery.Client(\n project=context.resource_config.get("project"),\n location=context.resource_config.get("location"),\n )\n\n yield conn\n\n\ndef _get_cleanup_statement(table_slice: TableSlice) -> str:\n """Returns a SQL statement that deletes data in the given table to make way for the output data\n being written.\n """\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"DELETE FROM `{table_slice.database}.{table_slice.schema}.{table_slice.table}` WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"TRUNCATE TABLE `{table_slice.database}.{table_slice.schema}.{table_slice.table}`"\n\n\ndef _partition_where_clause(partition_dimensions: Sequence[TablePartitionDimension]) -> str:\n return " AND\\n".join(\n (\n _time_window_where_clause(partition_dimension)\n if isinstance(partition_dimension.partitions, TimeWindow)\n else _static_where_clause(partition_dimension)\n )\n for partition_dimension in partition_dimensions\n )\n\n\ndef _time_window_where_clause(table_partition: TablePartitionDimension) -> str:\n partition = cast(TimeWindow, table_partition.partitions)\n start_dt, end_dt = partition\n start_dt_str = start_dt.strftime(BIGQUERY_DATETIME_FORMAT)\n end_dt_str = end_dt.strftime(BIGQUERY_DATETIME_FORMAT)\n return f"""{table_partition.partition_expr} >= '{start_dt_str}' AND {table_partition.partition_expr} < '{end_dt_str}'"""\n\n\ndef _static_where_clause(table_partition: TablePartitionDimension) -> str:\n partitions = ", ".join(f"'{partition}'" for partition in table_partition.partitions)\n return f"""{table_partition.partition_expr} in ({partitions})"""\n
", "current_page_name": "_modules/dagster_gcp/bigquery/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.io_manager"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.ops

\nimport hashlib\n\nfrom dagster import (\n    In,\n    List,\n    Nothing,\n    Out,\n    _check as check,\n    op,\n)\nfrom dagster_pandas import DataFrame\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\nfrom google.cloud.bigquery.job import LoadJobConfig, QueryJobConfig\nfrom google.cloud.bigquery.table import TimePartitioning\n\nfrom .configs import (\n    define_bigquery_create_dataset_config,\n    define_bigquery_delete_dataset_config,\n    define_bigquery_load_config,\n    define_bigquery_query_config,\n)\nfrom .types import BigQueryLoadSource\n\n_START = "start"\n\n\ndef _preprocess_config(cfg):\n    destination_encryption_configuration = cfg.get("destination_encryption_configuration")\n    time_partitioning = cfg.get("time_partitioning")\n\n    if destination_encryption_configuration is not None:\n        cfg["destination_encryption_configuration"] = EncryptionConfiguration(\n            kms_key_name=destination_encryption_configuration\n        )\n\n    if time_partitioning is not None:\n        cfg["time_partitioning"] = TimePartitioning(**time_partitioning)\n\n    return cfg\n\n\n
[docs]def bq_op_for_queries(sql_queries):\n """Executes BigQuery SQL queries.\n\n Expects a BQ client to be provisioned in resources as context.resources.bigquery.\n """\n sql_queries = check.list_param(sql_queries, "sql queries", of_type=str)\n m = hashlib.sha1()\n for query in sql_queries:\n m.update(query.encode("utf-8"))\n hash_str = m.hexdigest()[:10]\n name = f"bq_op_{hash_str}"\n\n @op(\n name=name,\n ins={_START: In(Nothing)},\n out=Out(List[DataFrame]),\n config_schema=define_bigquery_query_config(),\n required_resource_keys={"bigquery"},\n tags={"kind": "sql", "sql": "\\n".join(sql_queries)},\n )\n def _bq_fn(context):\n query_job_config = _preprocess_config(context.op_config.get("query_job_config", {}))\n\n # Retrieve results as pandas DataFrames\n results = []\n for sql_query in sql_queries:\n # We need to construct a new QueryJobConfig for each query.\n # See: https://bit.ly/2VjD6sl\n cfg = QueryJobConfig(**query_job_config) if query_job_config else None\n context.log.info(\n "executing query %s with config: %s"\n % (sql_query, cfg.to_api_repr() if cfg else "(no config provided)")\n )\n results.append(\n context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()\n )\n\n return results\n\n return _bq_fn
\n\n\nBIGQUERY_LOAD_CONFIG = define_bigquery_load_config()\n\n\n
[docs]@op(\n ins={"paths": In(List[str])},\n out=Out(Nothing),\n config_schema=BIGQUERY_LOAD_CONFIG,\n required_resource_keys={"bigquery"},\n)\ndef import_gcs_paths_to_bq(context, paths):\n return _execute_load_in_source(context, paths, BigQueryLoadSource.GCS)
\n\n\n
[docs]@op(\n ins={"df": In(DataFrame)},\n out=Out(Nothing),\n config_schema=BIGQUERY_LOAD_CONFIG,\n required_resource_keys={"bigquery"},\n)\ndef import_df_to_bq(context, df):\n return _execute_load_in_source(context, df, BigQueryLoadSource.DataFrame)
\n\n\n
[docs]@op(\n ins={"path": In(str)},\n out=Out(Nothing),\n config_schema=BIGQUERY_LOAD_CONFIG,\n required_resource_keys={"bigquery"},\n)\ndef import_file_to_bq(context, path):\n return _execute_load_in_source(context, path, BigQueryLoadSource.File)
\n\n\ndef _execute_load_in_source(context, source, source_name):\n destination = context.op_config.get("destination")\n load_job_config = _preprocess_config(context.op_config.get("load_job_config", {}))\n cfg = LoadJobConfig(**load_job_config) if load_job_config else None\n\n context.log.info(\n "executing BQ load with config: %s for source %s"\n % (cfg.to_api_repr() if cfg else "(no config provided)", source)\n )\n\n if source_name == BigQueryLoadSource.DataFrame:\n context.resources.bigquery.load_table_from_dataframe(\n source, destination, job_config=cfg\n ).result()\n\n # Load from file. See: https://cloud.google.com/bigquery/docs/loading-data-local\n elif source_name == BigQueryLoadSource.File:\n with open(source, "rb") as file_obj:\n context.resources.bigquery.load_table_from_file(\n file_obj, destination, job_config=cfg\n ).result()\n\n # Load from GCS. See: https://cloud.google.com/bigquery/docs/loading-data-cloud-storage\n elif source_name == BigQueryLoadSource.GCS:\n context.resources.bigquery.load_table_from_uri(source, destination, job_config=cfg).result()\n\n\n
[docs]@op(\n ins={_START: In(Nothing)},\n config_schema=define_bigquery_create_dataset_config(),\n required_resource_keys={"bigquery"},\n)\ndef bq_create_dataset(context):\n """BigQuery Create Dataset.\n\n This op encapsulates creating a BigQuery dataset.\n\n Expects a BQ client to be provisioned in resources as context.resources.bigquery.\n """\n (dataset, exists_ok) = [context.op_config.get(k) for k in ("dataset", "exists_ok")]\n context.log.info("executing BQ create_dataset for dataset %s" % (dataset))\n context.resources.bigquery.create_dataset(dataset, exists_ok)
\n\n\n
[docs]@op(\n ins={_START: In(Nothing)},\n config_schema=define_bigquery_delete_dataset_config(),\n required_resource_keys={"bigquery"},\n)\ndef bq_delete_dataset(context):\n """BigQuery Delete Dataset.\n\n This op encapsulates deleting a BigQuery dataset.\n\n Expects a BQ client to be provisioned in resources as context.resources.bigquery.\n """\n (dataset, delete_contents, not_found_ok) = [\n context.op_config.get(k) for k in ("dataset", "delete_contents", "not_found_ok")\n ]\n\n context.log.info("executing BQ delete_dataset for dataset %s" % dataset)\n\n context.resources.bigquery.delete_dataset(\n dataset, delete_contents=delete_contents, not_found_ok=not_found_ok\n )
\n
", "current_page_name": "_modules/dagster_gcp/bigquery/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.resources

\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Optional\n\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom google.cloud import bigquery\nfrom pydantic import Field\n\nfrom .utils import setup_gcp_creds\n\n\n
[docs]class BigQueryResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource for interacting with Google BigQuery.\n\n Examples:\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_gcp import BigQueryResource\n\n @asset\n def my_table(bigquery: BigQueryResource):\n with bigquery.get_client() as client:\n client.query("SELECT * FROM my_dataset.my_table")\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "bigquery": BigQueryResource(project="my-project")\n }\n )\n """\n\n project: Optional[str] = Field(\n default=None,\n description=(\n "Project ID for the project which the client acts on behalf of. Will be passed when"\n " creating a dataset / job. If not passed, falls back to the default inferred from the"\n " environment."\n ),\n )\n\n location: Optional[str] = Field(\n default=None,\n description="Default location for jobs / datasets / tables.",\n )\n\n gcp_credentials: Optional[str] = Field(\n default=None,\n description=(\n "GCP authentication credentials. If provided, a temporary file will be created"\n " with the credentials and ``GOOGLE_APPLICATION_CREDENTIALS`` will be set to the"\n " temporary file. To avoid issues with newlines in the keys, you must base64"\n " encode the key. You can retrieve the base64 encoded key with this shell"\n " command: ``cat $GOOGLE_AUTH_CREDENTIALS | base64``"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @contextmanager\n def get_client(self) -> Iterator[bigquery.Client]:\n """Context manager to create a BigQuery Client.\n\n Examples:\n .. code-block:: python\n\n from dagster import asset\n from dagster_gcp import BigQueryResource\n\n @asset\n def my_table(bigquery: BigQueryResource):\n with bigquery.get_client() as client:\n client.query("SELECT * FROM my_dataset.my_table")\n """\n if self.gcp_credentials:\n with setup_gcp_creds(self.gcp_credentials):\n yield bigquery.Client(project=self.project, location=self.location)\n\n else:\n yield bigquery.Client(project=self.project, location=self.location)\n\n def get_object_to_set_on_execution_context(self) -> Any:\n with self.get_client() as client:\n yield client
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=BigQueryResource.to_config_schema(),\n description="Dagster resource for connecting to BigQuery",\n)\ndef bigquery_resource(context):\n bq_resource = BigQueryResource.from_resource_context(context)\n with bq_resource.get_client() as client:\n yield client
\n
", "current_page_name": "_modules/dagster_gcp/bigquery/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.bigquery.types

\nimport re\nfrom enum import Enum as PyEnum\n\nfrom dagster import Enum, EnumValue\nfrom dagster._config import ConfigScalar, ConfigScalarKind, PostProcessingError\nfrom google.cloud.bigquery.job import (\n    CreateDisposition,\n    Encoding,\n    QueryPriority,\n    SchemaUpdateOption,\n    SourceFormat,\n    WriteDisposition,\n)\n\n\nclass BigQueryLoadSource(PyEnum):\n    DataFrame = "DATA_FRAME"\n    GCS = "GCS"\n    File = "FILE"\n\n\nBQCreateDisposition = Enum(\n    name="BQCreateDisposition",\n    enum_values=[\n        EnumValue(CreateDisposition.CREATE_IF_NEEDED),\n        EnumValue(CreateDisposition.CREATE_NEVER),\n    ],\n)\n\nBQPriority = Enum(\n    name="BQPriority",\n    enum_values=[EnumValue(QueryPriority.BATCH), EnumValue(QueryPriority.INTERACTIVE)],\n)\n\nBQSchemaUpdateOption = Enum(\n    name="BQSchemaUpdateOption",\n    enum_values=[\n        EnumValue(\n            SchemaUpdateOption.ALLOW_FIELD_ADDITION,\n            description="Allow adding a nullable field to the schema.",\n        ),\n        EnumValue(\n            SchemaUpdateOption.ALLOW_FIELD_RELAXATION,\n            description="Allow relaxing a required field in the original schema to nullable.",\n        ),\n    ],\n)\n\nBQWriteDisposition = Enum(\n    name="BQWriteDisposition",\n    enum_values=[\n        EnumValue(WriteDisposition.WRITE_APPEND),\n        EnumValue(WriteDisposition.WRITE_EMPTY),\n        EnumValue(WriteDisposition.WRITE_TRUNCATE),\n    ],\n)\n\nBQEncoding = Enum(\n    name="BQEncoding", enum_values=[EnumValue(Encoding.ISO_8859_1), EnumValue(Encoding.UTF_8)]\n)\n\nBQSourceFormat = Enum(\n    name="BQSourceFormat",\n    enum_values=[\n        EnumValue(SourceFormat.AVRO),\n        EnumValue(SourceFormat.CSV),\n        EnumValue(SourceFormat.DATASTORE_BACKUP),\n        EnumValue(SourceFormat.NEWLINE_DELIMITED_JSON),\n        EnumValue(SourceFormat.ORC),\n        EnumValue(SourceFormat.PARQUET),\n    ],\n)\n\n\n# Project names are permitted to have alphanumeric, dashes and underscores, up to 1024 characters.\nRE_PROJECT = r"[\\w\\d\\-\\_]{1,1024}"\n\n# Datasets and tables are permitted to have alphanumeric or underscores, no dashes allowed, up to\n# 1024 characters\nRE_DS_TABLE = r"[\\w\\d\\_]{1,1024}"\n\n# BigQuery supports writes directly to date partitions with the syntax foo.bar$20190101\nRE_PARTITION_SUFFIX = r"(\\$\\d{8})?"\n\n\ndef _is_valid_dataset(config_value):\n    """Datasets must be of form "project.dataset" or "dataset"."""\n    return re.match(\n        # regex matches: project.dataset -- OR -- dataset\n        r"^" + RE_PROJECT + r"\\." + RE_DS_TABLE + r"$|^" + RE_DS_TABLE + r"$",\n        config_value,\n    )\n\n\ndef _is_valid_table(config_value):\n    """Tables must be of form "project.dataset.table" or "dataset.table" with optional\n    date-partition suffix.\n    """\n    return re.match(\n        r"^"\n        + RE_PROJECT  #          project\n        + r"\\."  #               .\n        + RE_DS_TABLE  #         dataset\n        + r"\\."  #               .\n        + RE_DS_TABLE  #         table\n        + RE_PARTITION_SUFFIX  # date partition suffix\n        + r"$|^"  #              -- OR --\n        + RE_DS_TABLE  #         dataset\n        + r"\\."  #               .\n        + RE_DS_TABLE  #         table\n        + RE_PARTITION_SUFFIX  # date partition suffix\n        + r"$",\n        config_value,\n    )\n\n\nclass _Dataset(ConfigScalar):\n    def __init__(self):\n        super(_Dataset, self).__init__(\n            key=type(self).__name__,\n            given_name=type(self).__name__,\n            scalar_kind=ConfigScalarKind.STRING,\n        )\n\n    def post_process(self, value):\n        if not _is_valid_dataset(value):\n            raise PostProcessingError('Datasets must be of the form "project.dataset" or "dataset"')\n        return value\n\n\nclass _Table(ConfigScalar):\n    def __init__(self):\n        super(_Table, self).__init__(\n            key=type(self).__name__,\n            given_name=type(self).__name__,\n            scalar_kind=ConfigScalarKind.STRING,\n        )\n\n    def post_process(self, value):\n        if not _is_valid_table(value):\n            raise PostProcessingError(\n                'Tables must be of the form "project.dataset.table" or "dataset.table" '\n                "with optional date-partition suffix"\n            )\n\n        return value\n\n\n# https://github.com/dagster-io/dagster/issues/1971\nTable = _Table()\nDataset = _Dataset()\n\n\n
[docs]class BigQueryError(Exception):\n pass
\n
", "current_page_name": "_modules/dagster_gcp/bigquery/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.bigquery.types"}}, "dataproc": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.dataproc.ops

\nfrom typing import Any, Dict\n\nfrom dagster import (\n    Bool,\n    Config,\n    Field as DagsterField,\n    Int,\n    op,\n)\nfrom dagster._seven import json\nfrom pydantic import Field\n\nfrom .configs import define_dataproc_submit_job_config\nfrom .resources import TWENTY_MINUTES, DataprocResource\n\n# maintain the old config schema because of the nested job_config schema\nDATAPROC_CONFIG_SCHEMA = {\n    "job_timeout_in_seconds": DagsterField(\n        Int,\n        description="""Optional. Maximum time in seconds to wait for the job being\n                    completed. Default is set to 1200 seconds (20 minutes).\n                    """,\n        is_required=False,\n        default_value=TWENTY_MINUTES,\n    ),\n    "job_config": define_dataproc_submit_job_config(),\n    "job_scoped_cluster": DagsterField(\n        Bool,\n        description="whether to create a cluster or use an existing cluster",\n        is_required=False,\n        default_value=True,\n    ),\n}\n\n\nclass DataprocOpConfig(Config):\n    job_timeout_in_seconds: int = Field(\n        default=TWENTY_MINUTES,\n        description=(\n            "Maximum time in seconds to wait for the job being completed. Default is set to 1200"\n            " seconds (20 minutes)."\n        ),\n    )\n    job_scoped_cluster: bool = Field(\n        default=True,\n        description="Whether to create a cluster or use an existing cluster. Defaults to True.",\n    )\n    project_id: str = Field(\n        description=(\n            "Required. Project ID for the project which the client acts on behalf of. Will be"\n            " passed when creating a dataset/job."\n        )\n    )\n    region: str = Field(description="The GCP region.")\n    job_config: Dict[str, Any] = Field(\n        description="Python dictionary containing configuration for the Dataproc Job."\n    )\n\n\ndef _dataproc_compute(context):\n    job_config = context.op_config["job_config"]\n    job_timeout = context.op_config["job_timeout_in_seconds"]\n\n    context.log.info(\n        "submitting job with config: %s and timeout of: %d seconds"\n        % (str(json.dumps(job_config)), job_timeout)\n    )\n\n    if context.op_config["job_scoped_cluster"]:\n        # Cluster context manager, creates and then deletes cluster\n        with context.resources.dataproc.cluster_context_manager() as cluster:\n            # Submit the job specified by this solid to the cluster defined by the associated resource\n            result = cluster.submit_job(job_config)\n\n            job_id = result["reference"]["jobId"]\n            context.log.info(f"Submitted job ID {job_id}")\n            cluster.wait_for_job(job_id, wait_timeout=job_timeout)\n\n    else:\n        # Submit to an existing cluster\n        # Submit the job specified by this solid to the cluster defined by the associated resource\n        result = context.resources.dataproc.submit_job(job_config)\n\n        job_id = result["reference"]["jobId"]\n        context.log.info(f"Submitted job ID {job_id}")\n        context.resources.dataproc.wait_for_job(job_id, wait_timeout=job_timeout)\n\n\n@op(required_resource_keys={"dataproc"}, config_schema=DATAPROC_CONFIG_SCHEMA)\ndef dataproc_solid(context):\n    return _dataproc_compute(context)\n\n\n
[docs]@op(required_resource_keys={"dataproc"}, config_schema=DATAPROC_CONFIG_SCHEMA)\ndef dataproc_op(context):\n return _dataproc_compute(context)
\n\n\n@op\ndef configurable_dataproc_op(context, dataproc: DataprocResource, config: DataprocOpConfig):\n job_config = {"projectId": config.project_id, "region": config.region, "job": config.job_config}\n job_timeout = config.job_timeout_in_seconds\n\n context.log.info(\n "submitting job with config: %s and timeout of: %d seconds"\n % (str(json.dumps(job_config)), job_timeout)\n )\n\n dataproc_client = dataproc.get_client()\n\n if config.job_scoped_cluster:\n # Cluster context manager, creates and then deletes cluster\n with dataproc_client.cluster_context_manager() as cluster:\n # Submit the job specified by this solid to the cluster defined by the associated resource\n result = cluster.submit_job(job_config)\n\n job_id = result["reference"]["jobId"]\n context.log.info(f"Submitted job ID {job_id}")\n cluster.wait_for_job(job_id, wait_timeout=job_timeout)\n\n else:\n # Submit to an existing cluster\n # Submit the job specified by this solid to the cluster defined by the associated resource\n result = dataproc_client.submit_job(job_config)\n\n job_id = result["reference"]["jobId"]\n context.log.info(f"Submitted job ID {job_id}")\n dataproc_client.wait_for_job(job_id, wait_timeout=job_timeout)\n
", "current_page_name": "_modules/dagster_gcp/dataproc/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.dataproc.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.dataproc.resources

\nimport json\nimport time\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Mapping, Optional\n\nimport dagster._check as check\nimport yaml\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom googleapiclient.discovery import build\nfrom oauth2client.client import GoogleCredentials\nfrom pydantic import Field\n\nfrom .configs import define_dataproc_create_cluster_config\nfrom .types import DataprocError\n\nTWENTY_MINUTES = 20 * 60\nDEFAULT_ITER_TIME_SEC = 5\n\n\nclass DataprocClient:\n    """Builds a client to the dataproc API."""\n\n    def __init__(self, config):\n        # Use Application Default Credentials to check the\n        # GOOGLE_APPLICATION_CREDENTIALS environment variable\n        # for the location of the service account key file.\n        credentials = GoogleCredentials.get_application_default()\n\n        # See https://github.com/googleapis/google-api-python-client/issues/299 for the\n        # cache_discovery=False configuration below\n        self.dataproc = build("dataproc", "v1", credentials=credentials, cache_discovery=False)\n\n        self.config = config\n\n        (self.project_id, self.region, self.cluster_name, self.cluster_config) = (\n            self.config.get(k) for k in ("projectId", "region", "clusterName", "cluster_config")\n        )\n\n    @property\n    def dataproc_clusters(self):\n        return (\n            # Google APIs dynamically genned, so pylint pukes\n            self.dataproc.projects()\n            .regions()\n            .clusters()\n        )\n\n    @property\n    def dataproc_jobs(self):\n        return (\n            # Google APIs dynamically genned, so pylint pukes\n            self.dataproc.projects()\n            .regions()\n            .jobs()\n        )\n\n    def create_cluster(self):\n        (\n            self.dataproc_clusters.create(\n                projectId=self.project_id,\n                region=self.region,\n                body={\n                    "projectId": self.project_id,\n                    "clusterName": self.cluster_name,\n                    "config": self.cluster_config,\n                },\n            ).execute()\n        )\n\n        def iter_fn():\n            # TODO: Add logging\n            # See: https://bit.ly/2UW5JaN\n            cluster = self.get_cluster()\n            return cluster["status"]["state"] in {"RUNNING", "UPDATING"}\n\n        done = DataprocClient._iter_and_sleep_until_ready(iter_fn)\n        if not done:\n            cluster = self.get_cluster()\n            raise DataprocError(\n                "Could not provision cluster -- status: %s" % str(cluster["status"])\n            )\n\n    def get_cluster(self):\n        return self.dataproc_clusters.get(\n            projectId=self.project_id, region=self.region, clusterName=self.cluster_name\n        ).execute()\n\n    def delete_cluster(self):\n        return self.dataproc_clusters.delete(\n            projectId=self.project_id, region=self.region, clusterName=self.cluster_name\n        ).execute()\n\n    def submit_job(self, job_details):\n        return self.dataproc_jobs.submit(\n            projectId=self.project_id, region=self.region, body=job_details\n        ).execute()\n\n    def get_job(self, job_id):\n        return self.dataproc_jobs.get(\n            projectId=self.project_id, region=self.region, jobId=job_id\n        ).execute()\n\n    def wait_for_job(self, job_id, wait_timeout=TWENTY_MINUTES):\n        """This method polls job status every 5 seconds."""\n\n        # TODO: Add logging here print('Waiting for job ID {} to finish...'.format(job_id))\n        def iter_fn():\n            # See: https://bit.ly/2Lg2tHr\n            result = self.get_job(job_id)\n\n            # Handle exceptions\n            if result["status"]["state"] in {"CANCELLED", "ERROR"}:\n                raise DataprocError("Job error: %s" % str(result["status"]))\n\n            if result["status"]["state"] == "DONE":\n                return True\n\n            return False\n\n        done = DataprocClient._iter_and_sleep_until_ready(iter_fn, max_wait_time_sec=wait_timeout)\n        if not done:\n            job = self.get_job(job_id)\n            raise DataprocError("Job run timed out: %s" % str(job["status"]))\n\n    @staticmethod\n    def _iter_and_sleep_until_ready(\n        callable_fn, max_wait_time_sec=TWENTY_MINUTES, iter_time=DEFAULT_ITER_TIME_SEC\n    ):\n        """Iterates and sleeps until callable_fn returns true."""\n        # Wait for cluster ready state\n        ready, curr_iter = False, 0\n        max_iter = max_wait_time_sec / iter_time\n        while not ready and curr_iter < max_iter:\n            ready = callable_fn()\n            time.sleep(iter_time)\n            curr_iter += 1\n\n        # Will return false if ran up to max_iter without success\n        return ready\n\n    @contextmanager\n    def cluster_context_manager(self):\n        """Context manager allowing execution with a dataproc cluster.\n\n        Example:\n        .. code-block::\n            with context.resources.dataproc.cluster as cluster:\n                # do stuff...\n        """\n        self.create_cluster()\n        try:\n            yield self\n        finally:\n            self.delete_cluster()\n\n\n
[docs]class DataprocResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource for connecting to a Dataproc cluster.\n\n Example:\n .. code-block::\n\n @asset\n def my_asset(dataproc: DataprocResource):\n with dataproc.get_client() as client:\n # client is a dagster_gcp.DataprocClient\n ...\n """\n\n project_id: str = Field(\n description=(\n "Required. Project ID for the project which the client acts on behalf of. Will be"\n " passed when creating a dataset/job."\n )\n )\n region: str = Field(description="The GCP region.")\n cluster_name: str = Field(\n description=(\n "Required. The cluster name. Cluster names within a project must be unique. Names of"\n " deleted clusters can be reused."\n )\n )\n cluster_config_yaml_path: Optional[str] = Field(\n default=None,\n description=(\n "Full path to a YAML file containing cluster configuration. See"\n " https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig for"\n " configuration options. Only one of cluster_config_yaml_path,"\n " cluster_config_json_path, or cluster_config_dict may be provided."\n ),\n )\n cluster_config_json_path: Optional[str] = Field(\n default=None,\n description=(\n "Full path to a JSON file containing cluster configuration. See"\n " https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig for"\n " configuration options. Only one of cluster_config_yaml_path,"\n " cluster_config_json_path, or cluster_config_dict may be provided."\n ),\n )\n cluster_config_dict: Optional[Dict[str, Any]] = Field(\n default=None,\n description=(\n "Python dictionary containing cluster configuration. See"\n " https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig for"\n " configuration options. Only one of cluster_config_yaml_path,"\n " cluster_config_json_path, or cluster_config_dict may be provided."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def _read_yaml_config(self, path: str) -> Mapping[str, Any]:\n with open(path, "r", encoding="utf8") as f:\n return yaml.safe_load(f)\n\n def _read_json_config(self, path: str) -> Mapping[str, Any]:\n with open(path, "r", encoding="utf8") as f:\n return json.load(f)\n\n def _get_cluster_config(self) -> Optional[Mapping[str, Any]]:\n methods = 0\n methods += 1 if self.cluster_config_dict is not None else 0\n methods += 1 if self.cluster_config_json_path is not None else 0\n methods += 1 if self.cluster_config_yaml_path is not None else 0\n\n # ensure that at most 1 method is provided\n check.invariant(\n methods <= 1,\n "Dataproc Resource: Incorrect config: Cannot provide cluster config multiple ways."\n " Choose one of cluster_config_dict, cluster_config_json_path, or"\n " cluster_config_yaml_path",\n )\n\n cluster_config = None\n if self.cluster_config_json_path:\n cluster_config = self._read_json_config(self.cluster_config_json_path)\n elif self.cluster_config_yaml_path:\n cluster_config = self._read_yaml_config(self.cluster_config_yaml_path)\n elif self.cluster_config_dict:\n cluster_config = self.cluster_config_dict\n\n return cluster_config\n\n def get_client(self) -> DataprocClient:\n cluster_config = self._get_cluster_config()\n\n client_config_dict = {\n "projectId": self.project_id,\n "region": self.region,\n "clusterName": self.cluster_name,\n "cluster_config": cluster_config,\n }\n\n return DataprocClient(config=client_config_dict)\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=define_dataproc_create_cluster_config(),\n description="Manage a Dataproc cluster resource",\n)\ndef dataproc_resource(context):\n return DataprocClient(context.resource_config)
\n
", "current_page_name": "_modules/dagster_gcp/dataproc/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.dataproc.resources"}}, "gcs": {"compute_log_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.compute_log_manager

\nimport datetime\nimport json\nimport os\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport dagster._seven as seven\nfrom dagster import (\n    Field,\n    StringSource,\n    _check as check,\n)\nfrom dagster._config.config_type import Noneable\nfrom dagster._core.storage.cloud_storage_compute_log_manager import (\n    CloudStorageComputeLogManager,\n    PollingComputeLogSubscriptionManager,\n)\nfrom dagster._core.storage.compute_log_manager import ComputeIOType\nfrom dagster._core.storage.local_compute_log_manager import (\n    IO_TYPE_EXTENSION,\n    LocalComputeLogManager,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils import ensure_dir, ensure_file\nfrom google.cloud import storage\nfrom typing_extensions import Self\n\n\n
[docs]class GCSComputeLogManager(CloudStorageComputeLogManager, ConfigurableClass):\n """Logs op compute function stdout and stderr to GCS.\n\n Users should not instantiate this class directly. Instead, use a YAML block in ``dagster.yaml``\n such as the following:\n\n .. code-block:: YAML\n\n compute_logs:\n module: dagster_gcp.gcs.compute_log_manager\n class: GCSComputeLogManager\n config:\n bucket: "mycorp-dagster-compute-logs"\n local_dir: "/tmp/cool"\n prefix: "dagster-test-"\n upload_interval: 30\n\n There are more configuration examples in the instance documentation guide: https://docs.dagster.io/deployment/dagster-instance#compute-log-storage\n\n Args:\n bucket (str): The name of the GCS bucket to which to log.\n local_dir (Optional[str]): Path to the local directory in which to stage logs. Default:\n ``dagster._seven.get_system_temp_directory()``.\n prefix (Optional[str]): Prefix for the log file keys.\n json_credentials_envvar (Optional[str]): Environment variable that contains the JSON with a private key\n and other credentials information. If this is set, ``GOOGLE_APPLICATION_CREDENTIALS`` will be ignored.\n Can be used when the private key cannot be used as a file.\n upload_interval: (Optional[int]): Interval in seconds to upload partial log files to GCS. By default, will only upload when the capture is complete.\n inst_data (Optional[ConfigurableClassData]): Serializable representation of the compute\n log manager when instantiated from config.\n """\n\n def __init__(\n self,\n bucket,\n local_dir=None,\n inst_data: Optional[ConfigurableClassData] = None,\n prefix="dagster",\n json_credentials_envvar=None,\n upload_interval=None,\n ):\n self._bucket_name = check.str_param(bucket, "bucket")\n self._prefix = self._clean_prefix(check.str_param(prefix, "prefix"))\n\n if json_credentials_envvar:\n json_info_str = os.environ.get(json_credentials_envvar)\n credentials_info = json.loads(json_info_str) # type: ignore # (possible none)\n self._bucket = (\n storage.Client()\n .from_service_account_info(credentials_info)\n .bucket(self._bucket_name)\n )\n else:\n self._bucket = storage.Client().bucket(self._bucket_name)\n\n # Check if the bucket exists\n check.invariant(self._bucket.exists())\n\n # proxy calls to local compute log manager (for subscriptions, etc)\n if not local_dir:\n local_dir = seven.get_system_temp_directory()\n\n self._upload_interval = check.opt_int_param(upload_interval, "upload_interval")\n self._local_manager = LocalComputeLogManager(local_dir)\n self._subscription_manager = PollingComputeLogSubscriptionManager(self)\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {\n "bucket": StringSource,\n "local_dir": Field(StringSource, is_required=False),\n "prefix": Field(StringSource, is_required=False, default_value="dagster"),\n "json_credentials_envvar": Field(StringSource, is_required=False),\n "upload_interval": Field(Noneable(int), is_required=False, default_value=None),\n }\n\n @classmethod\n def from_config_value(\n cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]\n ) -> Self:\n return GCSComputeLogManager(inst_data=inst_data, **config_value)\n\n @property\n def local_manager(self) -> LocalComputeLogManager:\n return self._local_manager\n\n @property\n def upload_interval(self) -> Optional[int]:\n return self._upload_interval if self._upload_interval else None\n\n def _clean_prefix(self, prefix):\n parts = prefix.split("/")\n return "/".join([part for part in parts if part])\n\n def _gcs_key(self, log_key, io_type, partial=False):\n check.inst_param(io_type, "io_type", ComputeIOType)\n extension = IO_TYPE_EXTENSION[io_type]\n [*namespace, filebase] = log_key\n filename = f"{filebase}.{extension}"\n if partial:\n filename = f"{filename}.partial"\n paths = [self._prefix, "storage", *namespace, filename]\n return "/".join(paths)\n\n def delete_logs(\n self, log_key: Optional[Sequence[str]] = None, prefix: Optional[Sequence[str]] = None\n ):\n self._local_manager.delete_logs(log_key, prefix)\n if log_key:\n gcs_keys_to_remove = [\n self._gcs_key(log_key, ComputeIOType.STDOUT),\n self._gcs_key(log_key, ComputeIOType.STDERR),\n self._gcs_key(log_key, ComputeIOType.STDOUT, partial=True),\n self._gcs_key(log_key, ComputeIOType.STDERR, partial=True),\n ]\n # if the blob doesn't exist, do nothing instead of raising a not found exception\n self._bucket.delete_blobs(gcs_keys_to_remove, on_error=lambda _: None)\n elif prefix:\n # add the trailing '/' to make sure that ['a'] does not match ['apple']\n delete_prefix = "/".join([self._prefix, "storage", *prefix, ""])\n to_delete = self._bucket.list_blobs(prefix=delete_prefix)\n self._bucket.delete_blobs(list(to_delete))\n else:\n check.failed("Must pass in either `log_key` or `prefix` argument to delete_logs")\n\n def download_url_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return None\n\n gcs_key = self._gcs_key(log_key, io_type)\n try:\n return self._bucket.blob(gcs_key).generate_signed_url(\n expiration=datetime.timedelta(minutes=60)\n )\n except:\n # fallback to the local download url if the current credentials are insufficient to create\n # signed urls\n return self.local_manager.get_captured_log_download_url(log_key, io_type)\n\n def display_path_for_type(self, log_key: Sequence[str], io_type: ComputeIOType):\n if not self.is_capture_complete(log_key):\n return self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n gcs_key = self._gcs_key(log_key, io_type)\n return f"gs://{self._bucket_name}/{gcs_key}"\n\n def cloud_storage_has_logs(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial: bool = False\n ) -> bool:\n gcs_key = self._gcs_key(log_key, io_type, partial)\n return self._bucket.blob(gcs_key).exists()\n\n def upload_to_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(log_key, IO_TYPE_EXTENSION[io_type])\n ensure_file(path)\n\n if partial and os.stat(path).st_size == 0:\n return\n\n gcs_key = self._gcs_key(log_key, io_type, partial=partial)\n with open(path, "rb") as data:\n self._bucket.blob(gcs_key).upload_from_file(data)\n\n def download_from_cloud_storage(\n self, log_key: Sequence[str], io_type: ComputeIOType, partial=False\n ):\n path = self.local_manager.get_captured_local_path(\n log_key, IO_TYPE_EXTENSION[io_type], partial=partial\n )\n ensure_dir(os.path.dirname(path))\n\n gcs_key = self._gcs_key(log_key, io_type, partial=partial)\n with open(path, "wb") as fileobj:\n self._bucket.blob(gcs_key).download_to_file(fileobj)\n\n def on_subscribe(self, subscription):\n self._subscription_manager.add_subscription(subscription)\n\n def on_unsubscribe(self, subscription):\n self._subscription_manager.remove_subscription(subscription)\n\n def dispose(self):\n self._subscription_manager.dispose()\n self._local_manager.dispose()
\n
", "current_page_name": "_modules/dagster_gcp/gcs/compute_log_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.compute_log_manager"}, "file_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.file_manager

\nimport io\nimport uuid\nfrom contextlib import contextmanager\nfrom typing import Optional\n\nimport dagster._check as check\nfrom dagster._core.storage.file_manager import (\n    FileHandle,\n    FileManager,\n    TempfileManager,\n    check_file_like_obj,\n)\nfrom google.cloud import storage\n\n\n
[docs]class GCSFileHandle(FileHandle):\n """A reference to a file on GCS."""\n\n def __init__(self, gcs_bucket: str, gcs_key: str):\n self._gcs_bucket = check.str_param(gcs_bucket, "gcs_bucket")\n self._gcs_key = check.str_param(gcs_key, "gcs_key")\n\n @property\n def gcs_bucket(self) -> str:\n """str: The name of the GCS bucket."""\n return self._gcs_bucket\n\n @property\n def gcs_key(self) -> str:\n """str: The GCS key."""\n return self._gcs_key\n\n @property\n def path_desc(self) -> str:\n """str: The file's GCS URL."""\n return self.gcs_path\n\n @property\n def gcs_path(self) -> str:\n """str: The file's GCS URL."""\n return f"gs://{self.gcs_bucket}/{self.gcs_key}"
\n\n\nclass GCSFileManager(FileManager):\n def __init__(self, client, gcs_bucket, gcs_base_key):\n self._client = check.inst_param(client, "client", storage.client.Client)\n self._gcs_bucket = check.str_param(gcs_bucket, "gcs_bucket")\n self._gcs_base_key = check.str_param(gcs_base_key, "gcs_base_key")\n self._local_handle_cache = {}\n self._temp_file_manager = TempfileManager()\n\n def copy_handle_to_local_temp(self, file_handle):\n self._download_if_not_cached(file_handle)\n return self._get_local_path(file_handle)\n\n def _download_if_not_cached(self, file_handle):\n if not self._file_handle_cached(file_handle):\n # instigate download\n temp_file_obj = self._temp_file_manager.tempfile()\n temp_name = temp_file_obj.name\n bucket_obj = self._client.bucket(file_handle.gcs_bucket)\n bucket_obj.blob(file_handle.gcs_key).download_to_file(temp_file_obj)\n self._local_handle_cache[file_handle.gcs_path] = temp_name\n\n return file_handle\n\n @contextmanager\n def read(self, file_handle, mode="rb"):\n check.inst_param(file_handle, "file_handle", GCSFileHandle)\n check.str_param(mode, "mode")\n check.param_invariant(mode in {"r", "rb"}, "mode")\n\n self._download_if_not_cached(file_handle)\n\n encoding = None if mode == "rb" else "utf-8"\n with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:\n yield file_obj\n\n def _file_handle_cached(self, file_handle):\n return file_handle.gcs_path in self._local_handle_cache\n\n def _get_local_path(self, file_handle):\n return self._local_handle_cache[file_handle.gcs_path]\n\n def read_data(self, file_handle):\n with self.read(file_handle, mode="rb") as file_obj:\n return file_obj.read()\n\n def write_data(self, data, ext=None, key: Optional[str] = None):\n key = check.opt_str_param(key, "key", default=str(uuid.uuid4()))\n check.inst_param(data, "data", bytes)\n return self.write(io.BytesIO(data), mode="wb", key=key, ext=ext)\n\n def write(self, file_obj, mode="wb", ext=None, key: Optional[str] = None):\n key = check.opt_str_param(key, "key", default=str(uuid.uuid4()))\n check_file_like_obj(file_obj)\n gcs_key = self.get_full_key(key + (("." + ext) if ext is not None else ""))\n bucket_obj = self._client.bucket(self._gcs_bucket)\n bucket_obj.blob(gcs_key).upload_from_file(file_obj)\n return GCSFileHandle(self._gcs_bucket, gcs_key)\n\n def get_full_key(self, file_key):\n return f"{self._gcs_base_key}/{file_key}"\n\n def delete_local_temp(self):\n self._temp_file_manager.close()\n
", "current_page_name": "_modules/dagster_gcp/gcs/file_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.file_manager"}, "io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.io_manager

\nimport pickle\nfrom typing import Any, Optional, Union\n\nfrom dagster import (\n    ConfigurableIOManager,\n    InputContext,\n    OutputContext,\n    ResourceDependency,\n    _check as check,\n    io_manager,\n)\nfrom dagster._annotations import deprecated\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom dagster._core.storage.upath_io_manager import UPathIOManager\nfrom dagster._utils import PICKLE_PROTOCOL\nfrom dagster._utils.backoff import backoff\nfrom dagster._utils.cached_method import cached_method\nfrom google.api_core.exceptions import Forbidden, ServiceUnavailable, TooManyRequests\nfrom google.cloud import storage\nfrom pydantic import Field\nfrom upath import UPath\n\nfrom .resources import GCSResource\n\nDEFAULT_LEASE_DURATION = 60  # One minute\n\n\nclass PickledObjectGCSIOManager(UPathIOManager):\n    def __init__(self, bucket: str, client: Optional[Any] = None, prefix: str = "dagster"):\n        self.bucket = check.str_param(bucket, "bucket")\n        self.client = client or storage.Client()\n        self.bucket_obj = self.client.bucket(bucket)\n        check.invariant(self.bucket_obj.exists())\n        self.prefix = check.str_param(prefix, "prefix")\n        super().__init__(base_path=UPath(self.prefix))\n\n    def unlink(self, path: UPath) -> None:\n        key = str(path)\n        if self.bucket_obj.blob(key).exists():\n            self.bucket_obj.blob(key).delete()\n\n    def path_exists(self, path: UPath) -> bool:\n        key = str(path)\n        blobs = self.client.list_blobs(self.bucket, prefix=key)\n        return len(list(blobs)) > 0\n\n    def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:\n        parts = context.get_identifier()\n        run_id = parts[0]\n        output_parts = parts[1:]\n        return UPath("storage", run_id, "files", *output_parts)\n\n    def get_loading_input_log_message(self, path: UPath) -> str:\n        return f"Loading GCS object from: {self._uri_for_path(path)}"\n\n    def get_writing_output_log_message(self, path: UPath) -> str:\n        return f"Writing GCS object at: {self._uri_for_path(path)}"\n\n    def _uri_for_path(self, path: UPath) -> str:\n        return f"gs://{self.bucket}/{path}"\n\n    def make_directory(self, path: UPath) -> None:\n        # It is not necessary to create directories in GCP\n        return None\n\n    def load_from_path(self, context: InputContext, path: UPath) -> Any:\n        bytes_obj = self.bucket_obj.blob(str(path)).download_as_bytes()\n        return pickle.loads(bytes_obj)\n\n    def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:\n        if self.path_exists(path):\n            context.log.warning(f"Removing existing GCS key: {path}")\n            self.unlink(path)\n\n        pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)\n\n        backoff(\n            self.bucket_obj.blob(str(path)).upload_from_string,\n            args=[pickled_obj],\n            retry_on=(TooManyRequests, Forbidden, ServiceUnavailable),\n        )\n\n\n
[docs]class GCSPickleIOManager(ConfigurableIOManager):\n """Persistent IO manager using GCS for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for GCS and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at ``<base_dir>/<asset_key>``. If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of ``/my/base/path``, an asset with key\n ``AssetKey(["one", "two", "three"])`` would be stored in a file called ``three`` in a directory\n with path ``/my/base/path/one/two/``.\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import asset, Definitions\n from dagster_gcp.gcs import GCSPickleIOManager, GCSResource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": GCSPickleIOManager(\n gcs_bucket="my-cool-bucket",\n gcs_prefix="my-cool-prefix"\n ),\n "gcs": GCSResource(project="my-cool-project")\n }\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_gcp.gcs import GCSPickleIOManager, GCSResource\n\n @job(\n resource_defs={\n "io_manager": GCSPickleIOManager(\n gcs=GCSResource(project="my-cool-project")\n gcs_bucket="my-cool-bucket",\n gcs_prefix="my-cool-prefix"\n ),\n }\n )\n def my_job():\n ...\n """\n\n gcs: ResourceDependency[GCSResource]\n gcs_bucket: str = Field(description="GCS bucket to store files")\n gcs_prefix: str = Field(default="dagster", description="Prefix to add to all file paths")\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _internal_io_manager(self) -> PickledObjectGCSIOManager:\n return PickledObjectGCSIOManager(\n bucket=self.gcs_bucket, client=self.gcs.get_client(), prefix=self.gcs_prefix\n )\n\n def load_input(self, context: InputContext) -> Any:\n return self._internal_io_manager.load_input(context)\n\n def handle_output(self, context: OutputContext, obj: Any) -> None:\n self._internal_io_manager.handle_output(context, obj)
\n\n\n
[docs]@deprecated(\n breaking_version="2.0",\n additional_warn_text="Please use GCSPickleIOManager instead.",\n)\nclass ConfigurablePickledObjectGCSIOManager(GCSPickleIOManager):\n """Renamed to GCSPickleIOManager. See GCSPickleIOManager for documentation."""\n\n pass
\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n config_schema=GCSPickleIOManager.to_config_schema(),\n required_resource_keys={"gcs"},\n)\ndef gcs_pickle_io_manager(init_context):\n """Persistent IO manager using GCS for storage.\n\n Serializes objects via pickling. Suitable for objects storage for distributed executors, so long\n as each execution node has network connectivity and credentials for GCS and the backing bucket.\n\n Assigns each op output to a unique filepath containing run ID, step key, and output name.\n Assigns each asset to a single filesystem path, at ``<base_dir>/<asset_key>``. If the asset key\n has multiple components, the final component is used as the name of the file, and the preceding\n components as parent directories under the base_dir.\n\n Subsequent materializations of an asset will overwrite previous materializations of that asset.\n With a base directory of ``/my/base/path``, an asset with key\n ``AssetKey(["one", "two", "three"])`` would be stored in a file called ``three`` in a directory\n with path ``/my/base/path/one/two/``.\n\n Example usage:\n\n 1. Attach this IO manager to a set of assets.\n\n .. code-block:: python\n\n from dagster import Definitions, asset\n from dagster_gcp.gcs import gcs_pickle_io_manager, gcs_resource\n\n @asset\n def asset1():\n # create df ...\n return df\n\n @asset\n def asset2(asset1):\n return asset1[:5]\n\n defs = Definitions(\n assets=[asset1, asset2],\n resources={\n "io_manager": gcs_pickle_io_manager.configured(\n {"gcs_bucket": "my-cool-bucket", "gcs_prefix": "my-cool-prefix"}\n ),\n "gcs": gcs_resource.configured({"project": "my-cool-project"}),\n },\n )\n\n\n 2. Attach this IO manager to your job to make it available to your ops.\n\n .. code-block:: python\n\n from dagster import job\n from dagster_gcp.gcs import gcs_pickle_io_manager, gcs_resource\n\n @job(\n resource_defs={\n "io_manager": gcs_pickle_io_manager.configured(\n {"gcs_bucket": "my-cool-bucket", "gcs_prefix": "my-cool-prefix"}\n ),\n "gcs": gcs_resource.configured({"project": "my-cool-project"}),\n },\n )\n def my_job():\n ...\n """\n client = init_context.resources.gcs\n pickled_io_manager = PickledObjectGCSIOManager(\n bucket=init_context.resource_config["gcs_bucket"],\n client=client,\n prefix=init_context.resource_config["gcs_prefix"],\n )\n return pickled_io_manager
\n
", "current_page_name": "_modules/dagster_gcp/gcs/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.io_manager"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp.gcs.resources

\nfrom typing import Any, Optional\n\nfrom dagster import ConfigurableResource, IAttachDifferentObjectToOpContext, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom google.cloud import storage\nfrom pydantic import Field\n\nfrom .file_manager import GCSFileManager\n\n\n
[docs]class GCSResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """Resource for interacting with Google Cloud Storage.\n\n Example:\n .. code-block::\n\n @asset\n def my_asset(gcs: GCSResource):\n with gcs.get_client() as client:\n # client is a google.cloud.storage.Client\n ...\n """\n\n project: Optional[str] = Field(default=None, description="Project name")\n\n def get_client(self) -> storage.Client:\n """Creates a GCS Client.\n\n Returns: google.cloud.storage.Client\n """\n return _gcs_client_from_config(project=self.project)\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=GCSResource.to_config_schema(),\n description="This resource provides a GCS client",\n)\ndef gcs_resource(init_context) -> storage.Client:\n return GCSResource.from_resource_context(init_context).get_client()
\n\n\n
[docs]class GCSFileManagerResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """FileManager that provides abstract access to GCS."""\n\n project: Optional[str] = Field(default=None, description="Project name")\n gcs_bucket: str = Field(description="GCS bucket to store files")\n gcs_prefix: str = Field(default="dagster", description="Prefix to add to all file paths")\n\n def get_client(self) -> GCSFileManager:\n """Creates a :py:class:`~dagster_gcp.GCSFileManager` object that implements the\n :py:class:`~dagster._core.storage.file_manager.FileManager` API .\n\n Returns: GCSFileManager\n """\n gcs_client = _gcs_client_from_config(project=self.project)\n return GCSFileManager(\n client=gcs_client,\n gcs_bucket=self.gcs_bucket,\n gcs_base_key=self.gcs_prefix,\n )\n\n def get_object_to_set_on_execution_context(self) -> Any:\n return self.get_client()
\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=GCSFileManagerResource.to_config_schema())\ndef gcs_file_manager(context):\n """FileManager that provides abstract access to GCS.\n\n Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.\n """\n return GCSFileManagerResource.from_resource_context(context).get_client()
\n\n\ndef _gcs_client_from_config(project: Optional[str]) -> storage.Client:\n """Creates a GCS Client.\n\n Args:\n project: The GCP project\n\n Returns: A GCS client.\n """\n return storage.client.Client(project=project)\n
", "current_page_name": "_modules/dagster_gcp/gcs/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp.gcs.resources"}}}, "dagster_gcp_pandas": {"bigquery": {"bigquery_pandas_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp_pandas.bigquery.bigquery_pandas_type_handler

\nfrom typing import Optional, Sequence, Type\n\nimport pandas as pd\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_gcp.bigquery.io_manager import (\n    BigQueryClient,\n    BigQueryIOManager,\n    build_bigquery_io_manager,\n)\n\n\n
[docs]class BigQueryPandasTypeHandler(DbTypeHandler[pd.DataFrame]):\n """Plugin for the BigQuery I/O Manager that can store and load Pandas DataFrames as BigQuery tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import BigQueryIOManager\n from dagster_bigquery_pandas import BigQueryPandasTypeHandler\n from dagster import Definitions, EnvVar\n\n class MyBigQueryIOManager(BigQueryIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPandasTypeHandler()]\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection\n ):\n """Stores the pandas DataFrame in BigQuery."""\n with_uppercase_cols = obj.rename(str.upper, copy=False, axis="columns")\n\n job = connection.load_table_from_dataframe(\n dataframe=with_uppercase_cols,\n destination=f"{table_slice.schema}.{table_slice.table}",\n project=table_slice.database,\n location=context.resource_config.get("location") if context.resource_config else None,\n timeout=context.resource_config.get("timeout") if context.resource_config else None,\n )\n job.result()\n\n context.add_output_metadata(\n {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=name, type=str(dtype)) # type: ignore # (bad stubs)\n for name, dtype in obj.dtypes.items()\n ]\n )\n ),\n }\n )\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pd.DataFrame:\n """Loads the input as a Pandas DataFrame."""\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pd.DataFrame()\n result = connection.query(\n query=BigQueryClient.get_select_statement(table_slice),\n project=table_slice.database,\n location=context.resource_config.get("location") if context.resource_config else None,\n timeout=context.resource_config.get("timeout") if context.resource_config else None,\n ).to_dataframe()\n\n result.columns = map(str.lower, result.columns)\n return result\n\n @property\n def supported_types(self):\n return [pd.DataFrame]
\n\n\nbigquery_pandas_io_manager = build_bigquery_io_manager(\n [BigQueryPandasTypeHandler()], default_load_type=pd.DataFrame\n)\nbigquery_pandas_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes pandas DataFrames to BigQuery.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_gcp_pandas import bigquery_pandas_io_manager\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": bigquery_pandas_io_manager.configured({\n "project" : {"env": "GCP_PROJECT"}\n })\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n"""\n\n\n
[docs]class BigQueryPandasIOManager(BigQueryIOManager):\n """An I/O manager definition that reads inputs from and writes pandas DataFrames to BigQuery.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp_pandas import BigQueryPandasIOManager\n from dagster import Definitions, EnvVar\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": BigQueryPandasIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPandasTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame
\n
", "current_page_name": "_modules/dagster_gcp_pandas/bigquery/bigquery_pandas_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp_pandas.bigquery.bigquery_pandas_type_handler"}}}, "dagster_gcp_pyspark": {"bigquery": {"bigquery_pyspark_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_gcp_pyspark.bigquery.bigquery_pyspark_type_handler

\nfrom typing import Any, Mapping, Optional, Sequence, Type\n\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.definitions.metadata import RawMetadataValue\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_gcp import BigQueryIOManager, build_bigquery_io_manager\nfrom dagster_gcp.bigquery.io_manager import BigQueryClient\nfrom pyspark.sql import DataFrame, SparkSession\nfrom pyspark.sql.types import StructType\n\n\ndef _get_bigquery_write_options(\n    config: Optional[Mapping[str, Any]], table_slice: TableSlice\n) -> Mapping[str, str]:\n    conf = {\n        "table": f"{table_slice.database}.{table_slice.schema}.{table_slice.table}",\n    }\n    if config and config.get("temporary_gcs_bucket") is not None:\n        conf["temporaryGcsBucket"] = config["temporary_gcs_bucket"]\n    else:\n        conf["writeMethod"] = "direct"\n    return conf\n\n\ndef _get_bigquery_read_options(table_slice: TableSlice) -> Mapping[str, str]:\n    conf = {"viewsEnabled": "true", "materializationDataset": table_slice.schema}\n    return conf\n\n\n
[docs]class BigQueryPySparkTypeHandler(DbTypeHandler[DataFrame]):\n """Plugin for the BigQuery I/O Manager that can store and load PySpark DataFrames as BigQuery tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp import BigQueryIOManager\n from dagster_bigquery_pandas import BigQueryPySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MyBigQueryIOManager(BigQueryIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: DataFrame, _\n ) -> Mapping[str, RawMetadataValue]:\n options = _get_bigquery_write_options(context.resource_config, table_slice)\n\n with_uppercase_cols = obj.toDF(*[c.upper() for c in obj.columns])\n\n with_uppercase_cols.write.format("bigquery").options(**options).mode("append").save()\n\n return {\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=field.name, type=field.dataType.typeName())\n for field in obj.schema.fields\n ]\n )\n ),\n }\n\n def load_input(self, context: InputContext, table_slice: TableSlice, _) -> DataFrame:\n options = _get_bigquery_read_options(table_slice)\n spark = SparkSession.builder.getOrCreate() # type: ignore\n\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return spark.createDataFrame([], StructType([]))\n\n df = (\n spark.read.format("bigquery")\n .options(**options)\n .load(BigQueryClient.get_select_statement(table_slice))\n )\n\n return df.toDF(*[c.lower() for c in df.columns])\n\n @property\n def supported_types(self):\n return [DataFrame]
\n\n\nbigquery_pyspark_io_manager = build_bigquery_io_manager(\n [BigQueryPySparkTypeHandler()], default_load_type=DataFrame\n)\nbigquery_pyspark_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes PySpark DataFrames to BigQuery.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_gcp_pyspark import bigquery_pyspark_io_manager\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": bigquery_pyspark_io_manager.configured({\n "project" : {"env": "GCP_PROJECT"}\n })\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n"""\n\n\n
[docs]class BigQueryPySparkIOManager(BigQueryIOManager):\n """An I/O manager definition that reads inputs from and writes PySpark DataFrames to BigQuery.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_gcp_pyspark import BigQueryPySparkIOManager\n from dagster import Definitions, EnvVar\n\n @asset(\n key_prefix=["my_dataset"] # will be used as the dataset in BigQuery\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": BigQueryPySparkIOManager(project=EnvVar("GCP_PROJECT"))\n }\n )\n\n You can tell Dagster in which dataset to create tables by setting the "dataset" configuration value.\n If you do not provide a dataset as configuration to the I/O manager, Dagster will determine a dataset based\n on the assets and ops using the I/O Manager. For assets, the dataset will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the dataset. For example,\n if the asset "my_table" had the key prefix ["gcp", "bigquery", "my_dataset"], the dataset "my_dataset" will be\n used. For ops, the dataset can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the dataset.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_dataset"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_dataset.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n If you cannot upload a file to your Dagster deployment, or otherwise cannot\n `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_\n via a standard method, you can provide a service account key as the "gcp_credentials" configuration.\n Dagster will store this key in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to point to the file.\n After the run completes, the file will be deleted, and GOOGLE_APPLICATION_CREDENTIALS will be\n unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve\n the base64 encoded key with this shell command: cat $GOOGLE_APPLICATION_CREDENTIALS | base64\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [BigQueryPySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return DataFrame
\n
", "current_page_name": "_modules/dagster_gcp_pyspark/bigquery/bigquery_pyspark_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_gcp_pyspark.bigquery.bigquery_pyspark_type_handler"}}}, "dagster_ge": {"factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_ge.factory

\nimport datetime\nfrom typing import Any, Dict\n\nimport great_expectations as ge\nfrom dagster import (\n    ConfigurableResource,\n    ExpectationResult,\n    IAttachDifferentObjectToOpContext,\n    In,\n    MetadataValue,\n    OpExecutionContext,\n    Out,\n    Output,\n    _check as check,\n    op,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster_pandas import DataFrame\nfrom great_expectations.render.renderer import ValidationResultsPageRenderer\nfrom great_expectations.render.view import DefaultMarkdownPageView\nfrom pydantic import Field\n\ntry:\n    # ge < v0.13.0\n    from great_expectations.core import convert_to_json_serializable\nexcept ImportError:\n    # ge >= v0.13.0\n    from great_expectations.core.util import convert_to_json_serializable\n\n\nclass GEContextResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n    ge_root_dir: str = Field(\n        default=None,\n        description="The root directory for your Great Expectations project.",\n    )\n\n    def get_data_context(self):\n        if self.ge_root_dir is None:\n            return ge.data_context.DataContext()\n        return ge.data_context.DataContext(context_root_dir=self.ge_root_dir)\n\n    def get_object_to_set_on_execution_context(self):\n        return self.get_data_context()\n\n\n@dagster_maintained_resource\n@resource(config_schema=GEContextResource.to_config_schema())\ndef ge_data_context(context):\n    return GEContextResource.from_resource_context(context).get_data_context()\n\n\n
[docs]def ge_validation_op_factory(\n name,\n datasource_name,\n suite_name,\n validation_operator_name=None,\n input_dagster_type=DataFrame,\n batch_kwargs=None,\n):\n """Generates ops for interacting with GE.\n\n Args:\n name (str): the name of the op\n datasource_name (str): the name of your DataSource, see your great_expectations.yml\n suite_name (str): the name of your expectation suite, see your great_expectations.yml\n validation_operator_name (Optional[str]): what validation operator to run -- defaults to\n None, which generates an ephemeral validator. If you want to save data docs, use\n 'action_list_operator'.\n See https://legacy.docs.greatexpectations.io/en/0.12.1/reference/core_concepts/validation_operators_and_actions.html#\n input_dagster_type (DagsterType): the Dagster type used to type check the input to the op.\n Defaults to `dagster_pandas.DataFrame`.\n batch_kwargs (Optional[dict]): overrides the `batch_kwargs` parameter when calling the\n `ge_data_context`'s `get_batch` method. Defaults to `{"dataset": dataset}`, where\n `dataset` is the input to the generated op.\n\n Returns:\n An op that takes in a set of data and yields both an expectation with relevant metadata\n and an output with all the metadata (for user processing)\n """\n check.str_param(datasource_name, "datasource_name")\n check.str_param(suite_name, "suite_name")\n check.opt_str_param(validation_operator_name, "validation_operator_name")\n batch_kwargs = check.opt_dict_param(batch_kwargs, "batch_kwargs")\n\n @op(\n name=name,\n ins={"dataset": In(input_dagster_type)},\n out=Out(\n dict,\n description="""\n This op yields an expectationResult with a structured dict of metadata from\n the GE suite, as well as the full result in case a user wants to process it differently.\n The structured dict contains both summary stats from the suite as well as expectation by\n expectation results/details.\n """,\n ),\n required_resource_keys={"ge_data_context"},\n tags={"kind": "ge"},\n )\n def _ge_validation_fn(context: OpExecutionContext, dataset):\n data_context = context.resources.ge_data_context\n\n if validation_operator_name is not None:\n validation_operator = validation_operator_name\n else:\n data_context.add_validation_operator(\n "ephemeral_validation",\n {"class_name": "ActionListValidationOperator", "action_list": []},\n )\n validation_operator = "ephemeral_validation"\n suite = data_context.get_expectation_suite(suite_name)\n final_batch_kwargs = batch_kwargs or {"dataset": dataset}\n if "datasource" in final_batch_kwargs:\n context.log.warning(\n "`datasource` field of `batch_kwargs` will be ignored; use the `datasource_name` "\n "parameter of the op factory instead."\n )\n final_batch_kwargs["datasource"] = datasource_name\n batch = data_context.get_batch(final_batch_kwargs, suite)\n run_id = {\n "run_name": datasource_name + " run",\n "run_time": datetime.datetime.utcnow(),\n }\n results = data_context.run_validation_operator(\n validation_operator, assets_to_validate=[batch], run_id=run_id\n )\n res = convert_to_json_serializable(results.list_validation_results())[0]\n validation_results_page_renderer = ValidationResultsPageRenderer(run_info_at_end=True)\n rendered_document_content_list = (\n validation_results_page_renderer.render_validation_operator_result(results)\n )\n md_str = " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))\n\n yield ExpectationResult(\n success=res["success"],\n metadata={"Expectation Results": MetadataValue.md(md_str)},\n )\n yield Output(res)\n\n return _ge_validation_fn
\n\n\ndef ge_validation_op_factory_v3(\n name,\n datasource_name,\n data_connector_name,\n data_asset_name,\n suite_name,\n batch_identifiers: dict,\n input_dagster_type=DataFrame,\n runtime_method_type="batch_data",\n extra_kwargs=None,\n):\n """Generates ops for interacting with GE (v3 API).\n\n Args:\n name (str): the name of the op\n datasource_name (str): the name of your DataSource, see your great_expectations.yml\n data_connector_name (str): the name of the data connector for this datasource. This should\n point to a RuntimeDataConnector. For information on how to set this up, see:\n https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_create_a_batch_of_data_from_an_in_memory_spark_or_pandas_dataframe\n data_asset_name (str): the name of the data asset that this op will be validating.\n suite_name (str): the name of your expectation suite, see your great_expectations.yml\n batch_identifier_fn (dict): A dicitonary of batch identifiers to uniquely identify this\n batch of data. To learn more about batch identifiers, see:\n https://docs.greatexpectations.io/docs/reference/datasources#batches.\n input_dagster_type (DagsterType): the Dagster type used to type check the input to the op.\n Defaults to `dagster_pandas.DataFrame`.\n runtime_method_type (str): how GE should interperet the op input. One of ("batch_data",\n "path", "query"). Defaults to "batch_data", which will interperet the input as an\n in-memory object.\n extra_kwargs (Optional[dict]): adds extra kwargs to the invocation of `ge_data_context`'s\n `get_validator` method. If not set, input will be:\n {\n "datasource_name": datasource_name,\n "data_connector_name": data_connector_name,\n "data_asset_name": data_asset_name,\n "runtime_parameters": {\n "<runtime_method_type>": <op input>\n },\n "batch_identifiers": batch_identifiers,\n "expectation_suite_name": suite_name,\n }\n\n Returns:\n An op that takes in a set of data and yields both an expectation with relevant metadata and\n an output with all the metadata (for user processing)\n\n """\n check.str_param(datasource_name, "datasource_name")\n check.str_param(data_connector_name, "data_connector_name")\n check.str_param(suite_name, "suite_name")\n\n _extra_kwargs: Dict[Any, Any] = check.opt_dict_param(extra_kwargs, "extra_kwargs")\n\n @op(\n name=name,\n ins={"dataset": In(input_dagster_type)},\n out=Out(\n dict,\n description="""\n This op yields an ExpectationResult with a structured dict of metadata from\n the GE suite, as well as the full result in case a user wants to process it differently.\n The structured dict contains both summary stats from the suite as well as expectation by\n expectation results/details.\n """,\n ),\n required_resource_keys={"ge_data_context"},\n tags={"kind": "ge"},\n )\n def _ge_validation_fn(context: OpExecutionContext, dataset):\n data_context = context.resources.ge_data_context\n\n validator_kwargs = {\n "datasource_name": datasource_name,\n "data_connector_name": data_connector_name,\n "data_asset_name": datasource_name or data_asset_name,\n "runtime_parameters": {runtime_method_type: dataset},\n "batch_identifiers": batch_identifiers,\n "expectation_suite_name": suite_name,\n **_extra_kwargs,\n }\n validator = data_context.get_validator(**validator_kwargs)\n\n run_id = {\n "run_name": datasource_name + " run",\n "run_time": datetime.datetime.utcnow(),\n }\n results = validator.validate(run_id=run_id)\n\n validation_results_page_renderer = ValidationResultsPageRenderer(run_info_at_end=True)\n rendered_document_content_list = validation_results_page_renderer.render(\n validation_results=results\n )\n md_str = "".join(DefaultMarkdownPageView().render(rendered_document_content_list))\n\n yield ExpectationResult(\n success=bool(results["success"]),\n metadata={"Expectation Results": MetadataValue.md(md_str)},\n )\n yield Output(results.to_json_dict())\n\n return _ge_validation_fn\n
", "current_page_name": "_modules/dagster_ge/factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_ge.factory"}}, "dagster_github": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_github.resources

\nimport time\nfrom datetime import datetime\nfrom typing import Optional\n\nimport jwt\nimport requests\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\n\ndef to_seconds(dt):\n    return (dt - datetime(1970, 1, 1)).total_seconds()\n\n\nclass GithubClient:\n    def __init__(\n        self, client, app_id, app_private_rsa_key, default_installation_id, hostname=None\n    ) -> None:\n        self.client = client\n        self.app_private_rsa_key = app_private_rsa_key\n        self.app_id = app_id\n        self.default_installation_id = default_installation_id\n        self.installation_tokens = {}\n        self.app_token = {}\n        self.hostname = hostname\n\n    def __set_app_token(self):\n        # from https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/\n        # needing to self-sign a JWT\n        now = int(time.time())\n        # JWT expiration time (10 minute maximum)\n        expires = now + (10 * 60)\n        encoded_token = jwt.encode(\n            {\n                # issued at time\n                "iat": now,\n                # JWT expiration time\n                "exp": expires,\n                # GitHub App's identifier\n                "iss": self.app_id,\n            },\n            self.app_private_rsa_key,\n            algorithm="RS256",\n        )\n        self.app_token = {\n            "value": encoded_token,\n            "expires": expires,\n        }\n\n    def __check_app_token(self):\n        if ("expires" not in self.app_token) or (\n            self.app_token["expires"] < (int(time.time()) + 60)\n        ):\n            self.__set_app_token()\n\n    def get_installations(self, headers=None):\n        if headers is None:\n            headers = {}\n        self.__check_app_token()\n        headers["Authorization"] = "Bearer {}".format(self.app_token["value"])\n        headers["Accept"] = "application/vnd.github.machine-man-preview+json"\n        request = self.client.get(\n            (\n                "https://api.github.com/app/installations"\n                if self.hostname is None\n                else f"https://{self.hostname}/api/v3/app/installations"\n            ),\n            headers=headers,\n        )\n        request.raise_for_status()\n        return request.json()\n\n    def __set_installation_token(self, installation_id, headers=None):\n        if headers is None:\n            headers = {}\n        self.__check_app_token()\n        headers["Authorization"] = "Bearer {}".format(self.app_token["value"])\n        headers["Accept"] = "application/vnd.github.machine-man-preview+json"\n        request = requests.post(\n            (\n                f"https://api.github.com/app/installations/{installation_id}/access_tokens"\n                if self.hostname is None\n                else "https://{}/api/v3/app/installations/{}/access_tokens".format(\n                    self.hostname, installation_id\n                )\n            ),\n            headers=headers,\n        )\n        request.raise_for_status()\n        auth = request.json()\n        self.installation_tokens[installation_id] = {\n            "value": auth["token"],\n            "expires": to_seconds(datetime.strptime(auth["expires_at"], "%Y-%m-%dT%H:%M:%SZ")),\n        }\n\n    def __check_installation_tokens(self, installation_id):\n        if (installation_id not in self.installation_tokens) or (\n            self.installation_tokens[installation_id]["expires"] < (int(time.time()) + 60)\n        ):\n            self.__set_installation_token(installation_id)\n\n    def execute(self, query, variables, headers=None, installation_id=None):\n        if headers is None:\n            headers = {}\n        if installation_id is None:\n            installation_id = self.default_installation_id\n        self.__check_installation_tokens(installation_id)\n        headers["Authorization"] = "token {}".format(\n            self.installation_tokens[installation_id]["value"]\n        )\n        request = requests.post(\n            (\n                "https://api.github.com/graphql"\n                if self.hostname is None\n                else f"https://{self.hostname}/api/graphql"\n            ),\n            json={"query": query, "variables": variables},\n            headers=headers,\n        )\n        request.raise_for_status()\n        return request.json()\n\n    def create_issue(self, repo_name, repo_owner, title, body, installation_id=None):\n        if installation_id is None:\n            installation_id = self.default_installation_id\n        res = self.execute(\n            query="""\n            query get_repo_id($repo_name: String!, $repo_owner: String!) {\n                repository(name: $repo_name, owner: $repo_owner) {\n                    id\n                }\n            }\n            """,\n            variables={"repo_name": repo_name, "repo_owner": repo_owner},\n            installation_id=installation_id,\n        )\n\n        return self.execute(\n            query="""\n                mutation CreateIssue($id: ID!, $title: String!, $body: String!) {\n                createIssue(input: {\n                    repositoryId: $id,\n                    title: $title,\n                    body: $body\n                }) {\n                    clientMutationId,\n                    issue {\n                        body\n                        title\n                        url\n                    }\n                }\n                }\n            """,\n            variables={\n                "id": res["data"]["repository"]["id"],\n                "title": title,\n                "body": body,\n            },\n            installation_id=installation_id,\n        )\n\n\n
[docs]class GithubResource(ConfigurableResource):\n github_app_id: int = Field(\n description="Github Application ID, for more info see https://developer.github.com/apps/",\n )\n github_app_private_rsa_key: str = Field(\n description=(\n "Github Application Private RSA key text, for more info see"\n " https://developer.github.com/apps/"\n ),\n )\n github_installation_id: Optional[int] = Field(\n default=None,\n description=(\n "Github Application Installation ID, for more info see"\n " https://developer.github.com/apps/"\n ),\n )\n github_hostname: Optional[str] = Field(\n default=None,\n description=(\n "Github hostname. Defaults to `api.github.com`, for more info see"\n " https://developer.github.com/apps/"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> GithubClient:\n return GithubClient(\n client=requests.Session(),\n app_id=self.github_app_id,\n app_private_rsa_key=self.github_app_private_rsa_key,\n default_installation_id=self.github_installation_id,\n hostname=self.github_hostname,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=GithubResource.to_config_schema(),\n description="This resource is for connecting to Github",\n)\ndef github_resource(context) -> GithubClient:\n return GithubResource(**context.resource_config).get_client()
\n
", "current_page_name": "_modules/dagster_github/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_github.resources"}}, "dagster_graphql": {"client": {"client": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_graphql.client.client

\nfrom itertools import chain\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence, Union\n\nimport dagster._check as check\nimport requests.exceptions\nfrom dagster import DagsterRunStatus\nfrom dagster._annotations import deprecated, public\nfrom dagster._core.definitions.run_config import RunConfig, convert_config_input\nfrom dagster._core.definitions.utils import validate_tags\nfrom gql import Client, gql\nfrom gql.transport import Transport\nfrom gql.transport.requests import RequestsHTTPTransport\n\nfrom .client_queries import (\n    CLIENT_GET_REPO_LOCATIONS_NAMES_AND_PIPELINES_QUERY,\n    CLIENT_SUBMIT_PIPELINE_RUN_MUTATION,\n    GET_PIPELINE_RUN_STATUS_QUERY,\n    RELOAD_REPOSITORY_LOCATION_MUTATION,\n    SHUTDOWN_REPOSITORY_LOCATION_MUTATION,\n    TERMINATE_RUN_JOB_MUTATION,\n)\nfrom .utils import (\n    DagsterGraphQLClientError,\n    InvalidOutputErrorInfo,\n    JobInfo,\n    ReloadRepositoryLocationInfo,\n    ReloadRepositoryLocationStatus,\n    ShutdownRepositoryLocationInfo,\n    ShutdownRepositoryLocationStatus,\n)\n\n\n
[docs]class DagsterGraphQLClient:\n """Official Dagster Python Client for GraphQL.\n\n Utilizes the gql library to dispatch queries over HTTP to a remote Dagster GraphQL Server\n\n As of now, all operations on this client are synchronous.\n\n Intended usage:\n\n .. code-block:: python\n\n client = DagsterGraphQLClient("localhost", port_number=3000)\n status = client.get_run_status(**SOME_RUN_ID**)\n\n Args:\n hostname (str): Hostname for the Dagster GraphQL API, like `localhost` or\n `dagster.YOUR_ORG_HERE`.\n port_number (Optional[int]): Port number to connect to on the host.\n Defaults to None.\n transport (Optional[Transport], optional): A custom transport to use to connect to the\n GraphQL API with (e.g. for custom auth). Defaults to None.\n use_https (bool, optional): Whether to use https in the URL connection string for the\n GraphQL API. Defaults to False.\n timeout (int): Number of seconds before requests should time out. Defaults to 60.\n headers (Optional[Dict[str, str]]): Additional headers to include in the request. To use\n this client in Dagster Cloud, set the "Dagster-Cloud-Api-Token" header to a user token\n generated in the Dagster Cloud UI.\n\n Raises:\n :py:class:`~requests.exceptions.ConnectionError`: if the client cannot connect to the host.\n """\n\n def __init__(\n self,\n hostname: str,\n port_number: Optional[int] = None,\n transport: Optional[Transport] = None,\n use_https: bool = False,\n timeout: int = 300,\n headers: Optional[Dict[str, str]] = None,\n ):\n self._hostname = check.str_param(hostname, "hostname")\n self._port_number = check.opt_int_param(port_number, "port_number")\n self._use_https = check.bool_param(use_https, "use_https")\n\n self._url = (\n ("https://" if self._use_https else "http://")\n + (f"{self._hostname}:{self._port_number}" if self._port_number else self._hostname)\n + "/graphql"\n )\n\n self._transport = check.opt_inst_param(\n transport,\n "transport",\n Transport,\n default=RequestsHTTPTransport(\n url=self._url, use_json=True, timeout=timeout, headers=headers\n ),\n )\n try:\n self._client = Client(transport=self._transport, fetch_schema_from_transport=True)\n except requests.exceptions.ConnectionError as exc:\n raise DagsterGraphQLClientError(\n f"Error when connecting to url {self._url}. "\n + f"Did you specify hostname: {self._hostname} "\n + (f"and port_number: {self._port_number} " if self._port_number else "")\n + "correctly?"\n ) from exc\n\n def _execute(self, query: str, variables: Optional[Dict[str, Any]] = None):\n try:\n return self._client.execute(gql(query), variable_values=variables)\n except Exception as exc: # catch generic Exception from the gql client\n raise DagsterGraphQLClientError(\n f"Exception occured during execution of query \\n{query}\\n with variables"\n f" \\n{variables}\\n"\n ) from exc\n\n def _get_repo_locations_and_names_with_pipeline(self, job_name: str) -> List[JobInfo]:\n res_data = self._execute(CLIENT_GET_REPO_LOCATIONS_NAMES_AND_PIPELINES_QUERY)\n query_res = res_data["repositoriesOrError"]\n repo_connection_status = query_res["__typename"]\n if repo_connection_status == "RepositoryConnection":\n valid_nodes: Iterable[JobInfo] = chain(*map(JobInfo.from_node, query_res["nodes"]))\n return [info for info in valid_nodes if info.job_name == job_name]\n else:\n raise DagsterGraphQLClientError(repo_connection_status, query_res["message"])\n\n def _core_submit_execution(\n self,\n pipeline_name: str,\n repository_location_name: Optional[str] = None,\n repository_name: Optional[str] = None,\n run_config: Optional[Union[RunConfig, Mapping[str, Any]]] = None,\n mode: str = "default",\n preset: Optional[str] = None,\n tags: Optional[Mapping[str, str]] = None,\n op_selection: Optional[Sequence[str]] = None,\n is_using_job_op_graph_apis: Optional[bool] = False,\n ):\n check.opt_str_param(repository_location_name, "repository_location_name")\n check.opt_str_param(repository_name, "repository_name")\n check.str_param(pipeline_name, "pipeline_name")\n check.opt_str_param(mode, "mode")\n check.opt_str_param(preset, "preset")\n run_config = check.opt_mapping_param(convert_config_input(run_config), "run_config")\n\n # The following invariant will never fail when a job is executed\n check.invariant(\n (mode is not None and run_config is not None) or preset is not None,\n "Either a mode and run_config or a preset must be specified in order to "\n f"submit the pipeline {pipeline_name} for execution",\n )\n tags = validate_tags(tags)\n\n pipeline_or_job = "Job" if is_using_job_op_graph_apis else "Pipeline"\n\n if not repository_location_name or not repository_name:\n job_info_lst = self._get_repo_locations_and_names_with_pipeline(pipeline_name)\n if len(job_info_lst) == 0:\n raise DagsterGraphQLClientError(\n f"{pipeline_or_job}NotFoundError",\n f"No {'jobs' if is_using_job_op_graph_apis else 'pipelines'} with the name"\n f" `{pipeline_name}` exist",\n )\n elif len(job_info_lst) == 1:\n job_info = job_info_lst[0]\n repository_location_name = job_info.repository_location_name\n repository_name = job_info.repository_name\n else:\n raise DagsterGraphQLClientError(\n "Must specify repository_location_name and repository_name since there are"\n f" multiple {'jobs' if is_using_job_op_graph_apis else 'pipelines'} with the"\n f" name {pipeline_name}.\\n\\tchoose one of: {job_info_lst}"\n )\n\n variables: Dict[str, Any] = {\n "executionParams": {\n "selector": {\n "repositoryLocationName": repository_location_name,\n "repositoryName": repository_name,\n "pipelineName": pipeline_name,\n "solidSelection": op_selection,\n }\n }\n }\n if preset is not None:\n variables["executionParams"]["preset"] = preset\n if mode is not None and run_config is not None:\n variables["executionParams"] = {\n **variables["executionParams"],\n "runConfigData": run_config,\n "mode": mode,\n "executionMetadata": (\n {"tags": [{"key": k, "value": v} for k, v in tags.items()]} if tags else {}\n ),\n }\n\n res_data: Dict[str, Any] = self._execute(CLIENT_SUBMIT_PIPELINE_RUN_MUTATION, variables)\n query_result = res_data["launchPipelineExecution"]\n query_result_type = query_result["__typename"]\n if (\n query_result_type == "LaunchRunSuccess"\n or query_result_type == "LaunchPipelineRunSuccess"\n ):\n return query_result["run"]["runId"]\n elif query_result_type == "InvalidStepError":\n raise DagsterGraphQLClientError(query_result_type, query_result["invalidStepKey"])\n elif query_result_type == "InvalidOutputError":\n error_info = InvalidOutputErrorInfo(\n step_key=query_result["stepKey"],\n invalid_output_name=query_result["invalidOutputName"],\n )\n raise DagsterGraphQLClientError(query_result_type, body=error_info)\n elif (\n query_result_type == "RunConfigValidationInvalid"\n or query_result_type == "PipelineConfigValidationInvalid"\n ):\n raise DagsterGraphQLClientError(query_result_type, query_result["errors"])\n else:\n # query_result_type is a ConflictingExecutionParamsError, a PresetNotFoundError\n # a PipelineNotFoundError, a RunConflict, or a PythonError\n raise DagsterGraphQLClientError(query_result_type, query_result["message"])\n\n
[docs] @public\n def submit_job_execution(\n self,\n job_name: str,\n repository_location_name: Optional[str] = None,\n repository_name: Optional[str] = None,\n run_config: Optional[Dict[str, Any]] = None,\n tags: Optional[Dict[str, Any]] = None,\n op_selection: Optional[Sequence[str]] = None,\n ) -> str:\n """Submits a job with attached configuration for execution.\n\n Args:\n job_name (str): The job's name\n repository_location_name (Optional[str]): The name of the repository location where\n the job is located. If omitted, the client will try to infer the repository location\n from the available options on the Dagster deployment. Defaults to None.\n repository_name (Optional[str]): The name of the repository where the job is located.\n If omitted, the client will try to infer the repository from the available options\n on the Dagster deployment. Defaults to None.\n run_config (Optional[Dict[str, Any]]): This is the run config to execute the job with.\n Note that runConfigData is any-typed in the GraphQL type system. This type is used when passing in\n an arbitrary object for run config. However, it must conform to the constraints of the config\n schema for this job. If it does not, the client will throw a DagsterGraphQLClientError with a message of\n JobConfigValidationInvalid. Defaults to None.\n tags (Optional[Dict[str, Any]]): A set of tags to add to the job execution.\n\n Raises:\n DagsterGraphQLClientError("InvalidStepError", invalid_step_key): the job has an invalid step\n DagsterGraphQLClientError("InvalidOutputError", body=error_object): some solid has an invalid output within the job.\n The error_object is of type dagster_graphql.InvalidOutputErrorInfo.\n DagsterGraphQLClientError("RunConflict", message): a `DagsterRunConflict` occured during execution.\n This indicates that a conflicting job run already exists in run storage.\n DagsterGraphQLClientError("PipelineConfigurationInvalid", invalid_step_key): the run_config is not in the expected format\n for the job\n DagsterGraphQLClientError("JobNotFoundError", message): the requested job does not exist\n DagsterGraphQLClientError("PythonError", message): an internal framework error occurred\n\n Returns:\n str: run id of the submitted pipeline run\n """\n return self._core_submit_execution(\n pipeline_name=job_name,\n repository_location_name=repository_location_name,\n repository_name=repository_name,\n run_config=run_config,\n mode="default",\n preset=None,\n tags=tags,\n op_selection=op_selection,\n is_using_job_op_graph_apis=True,\n )
\n\n
[docs] @public\n def get_run_status(self, run_id: str) -> DagsterRunStatus:\n """Get the status of a given Pipeline Run.\n\n Args:\n run_id (str): run id of the requested pipeline run.\n\n Raises:\n DagsterGraphQLClientError("PipelineNotFoundError", message): if the requested run id is not found\n DagsterGraphQLClientError("PythonError", message): on internal framework errors\n\n Returns:\n DagsterRunStatus: returns a status Enum describing the state of the requested pipeline run\n """\n check.str_param(run_id, "run_id")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n GET_PIPELINE_RUN_STATUS_QUERY, {"runId": run_id}\n )\n query_result: Dict[str, Any] = res_data["pipelineRunOrError"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "PipelineRun" or query_result_type == "Run":\n return DagsterRunStatus(query_result["status"])\n else:\n raise DagsterGraphQLClientError(query_result_type, query_result["message"])
\n\n
[docs] @public\n def reload_repository_location(\n self, repository_location_name: str\n ) -> ReloadRepositoryLocationInfo:\n """Reloads a Dagster Repository Location, which reloads all repositories in that repository location.\n\n This is useful in a variety of contexts, including refreshing the Dagster UI without restarting\n the server.\n\n Args:\n repository_location_name (str): The name of the repository location\n\n Returns:\n ReloadRepositoryLocationInfo: Object with information about the result of the reload request\n """\n check.str_param(repository_location_name, "repository_location_name")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n RELOAD_REPOSITORY_LOCATION_MUTATION,\n {"repositoryLocationName": repository_location_name},\n )\n\n query_result: Dict[str, Any] = res_data["reloadRepositoryLocation"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "WorkspaceLocationEntry":\n location_or_error_type = query_result["locationOrLoadError"]["__typename"]\n if location_or_error_type == "RepositoryLocation":\n return ReloadRepositoryLocationInfo(status=ReloadRepositoryLocationStatus.SUCCESS)\n else:\n return ReloadRepositoryLocationInfo(\n status=ReloadRepositoryLocationStatus.FAILURE,\n failure_type="PythonError",\n message=query_result["locationOrLoadError"]["message"],\n )\n else:\n # query_result_type is either ReloadNotSupported or RepositoryLocationNotFound\n return ReloadRepositoryLocationInfo(\n status=ReloadRepositoryLocationStatus.FAILURE,\n failure_type=query_result_type,\n message=query_result["message"],\n )
\n\n
[docs] @deprecated(breaking_version="2.0")\n @public\n def shutdown_repository_location(\n self, repository_location_name: str\n ) -> ShutdownRepositoryLocationInfo:\n """Shuts down the server that is serving metadata for the provided repository location.\n\n This is primarily useful when you want the server to be restarted by the compute environment\n in which it is running (for example, in Kubernetes, the pod in which the server is running\n will automatically restart when the server is shut down, and the repository metadata will\n be reloaded)\n\n Args:\n repository_location_name (str): The name of the repository location\n\n Returns:\n ShutdownRepositoryLocationInfo: Object with information about the result of the reload request\n """\n check.str_param(repository_location_name, "repository_location_name")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n SHUTDOWN_REPOSITORY_LOCATION_MUTATION,\n {"repositoryLocationName": repository_location_name},\n )\n\n query_result: Dict[str, Any] = res_data["shutdownRepositoryLocation"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "ShutdownRepositoryLocationSuccess":\n return ShutdownRepositoryLocationInfo(status=ShutdownRepositoryLocationStatus.SUCCESS)\n elif (\n query_result_type == "RepositoryLocationNotFound" or query_result_type == "PythonError"\n ):\n return ShutdownRepositoryLocationInfo(\n status=ShutdownRepositoryLocationStatus.FAILURE,\n message=query_result["message"],\n )\n else:\n raise Exception(f"Unexpected query result type {query_result_type}")
\n\n def terminate_run(self, run_id: str):\n """Terminates a pipeline run. This method it is useful when you would like to stop a pipeline run\n based on a external event.\n\n Args:\n run_id (str): The run id of the pipeline run to terminate\n """\n check.str_param(run_id, "run_id")\n\n res_data: Dict[str, Dict[str, Any]] = self._execute(\n TERMINATE_RUN_JOB_MUTATION, {"runId": run_id}\n )\n\n query_result: Dict[str, Any] = res_data["terminateRun"]\n query_result_type: str = query_result["__typename"]\n if query_result_type == "TerminateRunSuccess":\n return\n\n elif query_result_type == "RunNotFoundError":\n raise DagsterGraphQLClientError("RunNotFoundError", f"Run Id {run_id} not found")\n else:\n raise DagsterGraphQLClientError(query_result_type, query_result["message"])
\n
", "current_page_name": "_modules/dagster_graphql/client/client", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_graphql.client.client"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_graphql.client.utils

\nfrom enum import Enum\nfrom typing import Any, Dict, List, NamedTuple, Optional\n\n\n
[docs]class DagsterGraphQLClientError(Exception):\n def __init__(self, *args, body=None):\n super().__init__(*args)\n self.body = body
\n\n\n
[docs]class ReloadRepositoryLocationStatus(Enum):\n """This enum describes the status of a GraphQL mutation to reload a Dagster repository location.\n\n Args:\n Enum (str): can be either `ReloadRepositoryLocationStatus.SUCCESS`\n or `ReloadRepositoryLocationStatus.FAILURE`.\n """\n\n SUCCESS = "SUCCESS"\n FAILURE = "FAILURE"
\n\n\nclass ShutdownRepositoryLocationStatus(Enum):\n SUCCESS = "SUCCESS"\n FAILURE = "FAILURE"\n\n\n
[docs]class ReloadRepositoryLocationInfo(NamedTuple):\n """This class gives information about the result of reloading\n a Dagster repository location with a GraphQL mutation.\n\n Args:\n status (ReloadRepositoryLocationStatus): The status of the reload repository location mutation\n failure_type: (Optional[str], optional): the failure type if `status == ReloadRepositoryLocationStatus.FAILURE`.\n Can be one of `ReloadNotSupported`, `RepositoryLocationNotFound`, or `RepositoryLocationLoadFailure`. Defaults to None.\n message (Optional[str], optional): the failure message/reason if\n `status == ReloadRepositoryLocationStatus.FAILURE`. Defaults to None.\n """\n\n status: ReloadRepositoryLocationStatus\n failure_type: Optional[str] = None\n message: Optional[str] = None
\n\n\nclass ShutdownRepositoryLocationInfo(NamedTuple):\n """This class gives information about the result of shutting down the server for\n a Dagster repository location using a GraphQL mutation.\n\n Args:\n status (ShutdownRepositoryLocationStatus) Whether the shutdown succeeded or failed.\n message (Optional[str], optional): the failure message/reason if\n `status == ShutdownRepositoryLocationStatus.FAILURE`. Defaults to None.\n """\n\n status: ShutdownRepositoryLocationStatus\n message: Optional[str] = None\n\n\nclass JobInfo(NamedTuple):\n repository_location_name: str\n repository_name: str\n job_name: str\n\n @staticmethod\n def from_node(node: Dict[str, Any]) -> List["JobInfo"]:\n repo_name = node["name"]\n repo_location_name = node["location"]["name"]\n return [\n JobInfo(\n repository_location_name=repo_location_name,\n repository_name=repo_name,\n job_name=job["name"],\n )\n for job in node["pipelines"]\n ]\n\n\n
[docs]class InvalidOutputErrorInfo(NamedTuple):\n """This class gives information about an InvalidOutputError from submitting a pipeline for execution\n from GraphQL.\n\n Args:\n step_key (str): key of the step that failed\n invalid_output_name (str): the name of the invalid output from the given step\n """\n\n step_key: str\n invalid_output_name: str
\n
", "current_page_name": "_modules/dagster_graphql/client/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_graphql.client.utils"}}}, "dagster_k8s": {"executor": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.executor

\nfrom typing import Iterator, List, Optional, cast\n\nimport kubernetes.config\nfrom dagster import (\n    Field,\n    IntSource,\n    Noneable,\n    StringSource,\n    _check as check,\n    executor,\n)\nfrom dagster._core.definitions.executor_definition import multiple_process_executor_requirements\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.events import DagsterEvent, EngineEventData\nfrom dagster._core.execution.retries import RetryMode, get_retries_config\nfrom dagster._core.execution.tags import get_tag_concurrency_limits_config\nfrom dagster._core.executor.base import Executor\nfrom dagster._core.executor.init import InitExecutorContext\nfrom dagster._core.executor.step_delegating import (\n    CheckStepHealthResult,\n    StepDelegatingExecutor,\n    StepHandler,\n    StepHandlerContext,\n)\nfrom dagster._utils.merger import merge_dicts\n\nfrom dagster_k8s.launcher import K8sRunLauncher\n\nfrom .client import DagsterKubernetesClient\nfrom .container_context import K8sContainerContext\nfrom .job import (\n    USER_DEFINED_K8S_CONFIG_SCHEMA,\n    DagsterK8sJobConfig,\n    UserDefinedDagsterK8sConfig,\n    construct_dagster_k8s_job,\n    get_k8s_job_name,\n    get_user_defined_k8s_config,\n)\n\n_K8S_EXECUTOR_CONFIG_SCHEMA = merge_dicts(\n    DagsterK8sJobConfig.config_type_job(),\n    {\n        "load_incluster_config": Field(\n            bool,\n            is_required=False,\n            description="""Whether or not the executor is running within a k8s cluster already. If\n            the job is using the `K8sRunLauncher`, the default value of this parameter will be\n            the same as the corresponding value on the run launcher.\n            If ``True``, we assume the executor is running within the target cluster and load config\n            using ``kubernetes.config.load_incluster_config``. Otherwise, we will use the k8s config\n            specified in ``kubeconfig_file`` (using ``kubernetes.config.load_kube_config``) or fall\n            back to the default kubeconfig.""",\n        ),\n        "kubeconfig_file": Field(\n            Noneable(str),\n            is_required=False,\n            description="""Path to a kubeconfig file to use, if not using default kubeconfig. If\n            the job is using the `K8sRunLauncher`, the default value of this parameter will be\n            the same as the corresponding value on the run launcher.""",\n        ),\n        "job_namespace": Field(StringSource, is_required=False),\n        "retries": get_retries_config(),\n        "max_concurrent": Field(\n            IntSource,\n            is_required=False,\n            description=(\n                "Limit on the number of pods that will run concurrently within the scope "\n                "of a Dagster run. Note that this limit is per run, not global."\n            ),\n        ),\n        "tag_concurrency_limits": get_tag_concurrency_limits_config(),\n        "step_k8s_config": Field(\n            USER_DEFINED_K8S_CONFIG_SCHEMA,\n            is_required=False,\n            description="Raw Kubernetes configuration for each step launched by the executor.",\n        ),\n    },\n)\n\n\n
[docs]@executor(\n name="k8s",\n config_schema=_K8S_EXECUTOR_CONFIG_SCHEMA,\n requirements=multiple_process_executor_requirements(),\n)\ndef k8s_job_executor(init_context: InitExecutorContext) -> Executor:\n """Executor which launches steps as Kubernetes Jobs.\n\n To use the `k8s_job_executor`, set it as the `executor_def` when defining a job:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-k8s/dagster_k8s_tests/unit_tests/test_example_executor_mode_def.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n Then you can configure the executor with run config as follows:\n\n .. code-block:: YAML\n\n execution:\n config:\n job_namespace: 'some-namespace'\n image_pull_policy: ...\n image_pull_secrets: ...\n service_account_name: ...\n env_config_maps: ...\n env_secrets: ...\n env_vars: ...\n job_image: ... # leave out if using userDeployments\n max_concurrent: ...\n\n `max_concurrent` limits the number of pods that will execute concurrently for one run. By default\n there is no limit- it will maximally parallel as allowed by the DAG. Note that this is not a\n global limit.\n\n Configuration set on the Kubernetes Jobs and Pods created by the `K8sRunLauncher` will also be\n set on Kubernetes Jobs and Pods created by the `k8s_job_executor`.\n\n Configuration set using `tags` on a `@job` will only apply to the `run` level. For configuration\n to apply at each `step` it must be set using `tags` for each `@op`.\n """\n run_launcher = (\n init_context.instance.run_launcher\n if isinstance(init_context.instance.run_launcher, K8sRunLauncher)\n else None\n )\n\n exc_cfg = init_context.executor_config\n\n k8s_container_context = K8sContainerContext(\n image_pull_policy=exc_cfg.get("image_pull_policy"), # type: ignore\n image_pull_secrets=exc_cfg.get("image_pull_secrets"), # type: ignore\n service_account_name=exc_cfg.get("service_account_name"), # type: ignore\n env_config_maps=exc_cfg.get("env_config_maps"), # type: ignore\n env_secrets=exc_cfg.get("env_secrets"), # type: ignore\n env_vars=exc_cfg.get("env_vars"), # type: ignore\n volume_mounts=exc_cfg.get("volume_mounts"), # type: ignore\n volumes=exc_cfg.get("volumes"), # type: ignore\n labels=exc_cfg.get("labels"), # type: ignore\n namespace=exc_cfg.get("job_namespace"), # type: ignore\n resources=exc_cfg.get("resources"), # type: ignore\n scheduler_name=exc_cfg.get("scheduler_name"), # type: ignore\n # step_k8s_config feeds into the run_k8s_config field because it is merged\n # with any configuration for the run that was set on the run launcher or code location\n run_k8s_config=UserDefinedDagsterK8sConfig.from_dict(exc_cfg.get("step_k8s_config", {})),\n )\n\n if "load_incluster_config" in exc_cfg:\n load_incluster_config = cast(bool, exc_cfg["load_incluster_config"])\n else:\n load_incluster_config = run_launcher.load_incluster_config if run_launcher else True\n\n if "kubeconfig_file" in exc_cfg:\n kubeconfig_file = cast(Optional[str], exc_cfg["kubeconfig_file"])\n else:\n kubeconfig_file = run_launcher.kubeconfig_file if run_launcher else None\n\n return StepDelegatingExecutor(\n K8sStepHandler(\n image=exc_cfg.get("job_image"), # type: ignore\n container_context=k8s_container_context,\n load_incluster_config=load_incluster_config,\n kubeconfig_file=kubeconfig_file,\n ),\n retries=RetryMode.from_config(exc_cfg["retries"]), # type: ignore\n max_concurrent=check.opt_int_elem(exc_cfg, "max_concurrent"),\n tag_concurrency_limits=check.opt_list_elem(exc_cfg, "tag_concurrency_limits"),\n should_verify_step=True,\n )
\n\n\nclass K8sStepHandler(StepHandler):\n @property\n def name(self):\n return "K8sStepHandler"\n\n def __init__(\n self,\n image: Optional[str],\n container_context: K8sContainerContext,\n load_incluster_config: bool,\n kubeconfig_file: Optional[str],\n k8s_client_batch_api=None,\n ):\n super().__init__()\n\n self._executor_image = check.opt_str_param(image, "image")\n self._executor_container_context = check.inst_param(\n container_context, "container_context", K8sContainerContext\n )\n\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n kubernetes.config.load_incluster_config()\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n self._api_client = DagsterKubernetesClient.production_client(\n batch_api_override=k8s_client_batch_api\n )\n\n def _get_step_key(self, step_handler_context: StepHandlerContext) -> str:\n step_keys_to_execute = cast(\n List[str], step_handler_context.execute_step_args.step_keys_to_execute\n )\n assert len(step_keys_to_execute) == 1, "Launching multiple steps is not currently supported"\n return step_keys_to_execute[0]\n\n def _get_container_context(\n self, step_handler_context: StepHandlerContext\n ) -> K8sContainerContext:\n step_key = self._get_step_key(step_handler_context)\n\n context = K8sContainerContext.create_for_run(\n step_handler_context.dagster_run,\n cast(K8sRunLauncher, step_handler_context.instance.run_launcher),\n include_run_tags=False, # For now don't include job-level dagster-k8s/config tags in step pods\n )\n context = context.merge(self._executor_container_context)\n\n user_defined_k8s_config = get_user_defined_k8s_config(\n step_handler_context.step_tags[step_key]\n )\n return context.merge(K8sContainerContext(run_k8s_config=user_defined_k8s_config))\n\n def _get_k8s_step_job_name(self, step_handler_context: StepHandlerContext):\n step_key = self._get_step_key(step_handler_context)\n\n name_key = get_k8s_job_name(\n step_handler_context.execute_step_args.run_id,\n step_key,\n )\n\n if step_handler_context.execute_step_args.known_state:\n retry_state = step_handler_context.execute_step_args.known_state.get_retry_state()\n if retry_state.get_attempt_count(step_key):\n return "dagster-step-%s-%d" % (name_key, retry_state.get_attempt_count(step_key))\n\n return "dagster-step-%s" % (name_key)\n\n def launch_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n step_key = self._get_step_key(step_handler_context)\n\n job_name = self._get_k8s_step_job_name(step_handler_context)\n pod_name = job_name\n\n container_context = self._get_container_context(step_handler_context)\n\n job_config = container_context.get_k8s_job_config(\n self._executor_image, step_handler_context.instance.run_launcher\n )\n\n args = step_handler_context.execute_step_args.get_command_args(\n skip_serialized_namedtuple=True\n )\n\n if not job_config.job_image:\n job_config = job_config.with_image(\n step_handler_context.execute_step_args.job_origin.repository_origin.container_image\n )\n\n if not job_config.job_image:\n raise Exception("No image included in either executor config or the job")\n\n run = step_handler_context.dagster_run\n labels = {\n "dagster/job": run.job_name,\n "dagster/op": step_key,\n "dagster/run-id": step_handler_context.execute_step_args.run_id,\n }\n if run.external_job_origin:\n labels["dagster/code-location"] = (\n run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n job = construct_dagster_k8s_job(\n job_config=job_config,\n args=args,\n job_name=job_name,\n pod_name=pod_name,\n component="step_worker",\n user_defined_k8s_config=container_context.run_k8s_config,\n labels=labels,\n env_vars=[\n *step_handler_context.execute_step_args.get_command_env(),\n {\n "name": "DAGSTER_RUN_JOB_NAME",\n "value": run.job_name,\n },\n {"name": "DAGSTER_RUN_STEP_KEY", "value": step_key},\n *container_context.env,\n ],\n )\n\n yield DagsterEvent.step_worker_starting(\n step_handler_context.get_step_context(step_key),\n message=f'Executing step "{step_key}" in Kubernetes job {job_name}.',\n metadata={\n "Kubernetes Job name": MetadataValue.text(job_name),\n },\n )\n\n namespace = check.not_none(container_context.namespace)\n self._api_client.create_namespaced_job_with_retries(body=job, namespace=namespace)\n\n def check_step_health(self, step_handler_context: StepHandlerContext) -> CheckStepHealthResult:\n step_key = self._get_step_key(step_handler_context)\n\n job_name = self._get_k8s_step_job_name(step_handler_context)\n\n container_context = self._get_container_context(step_handler_context)\n\n status = self._api_client.get_job_status(\n namespace=container_context.namespace,\n job_name=job_name,\n )\n if status.failed:\n return CheckStepHealthResult.unhealthy(\n reason=f"Discovered failed Kubernetes job {job_name} for step {step_key}.",\n )\n\n return CheckStepHealthResult.healthy()\n\n def terminate_step(self, step_handler_context: StepHandlerContext) -> Iterator[DagsterEvent]:\n step_key = self._get_step_key(step_handler_context)\n\n job_name = self._get_k8s_step_job_name(step_handler_context)\n container_context = self._get_container_context(step_handler_context)\n\n yield DagsterEvent.engine_event(\n step_handler_context.get_step_context(step_key),\n message=f"Deleting Kubernetes job {job_name} for step",\n event_specific_data=EngineEventData(),\n )\n\n self._api_client.delete_job(job_name=job_name, namespace=container_context.namespace)\n
", "current_page_name": "_modules/dagster_k8s/executor", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.executor"}, "launcher": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.launcher

\nimport logging\nimport sys\nfrom typing import Any, Mapping, Optional, Sequence\n\nimport kubernetes\nfrom dagster import (\n    _check as check,\n)\nfrom dagster._cli.api import ExecuteRunArgs\nfrom dagster._core.events import EngineEventData\nfrom dagster._core.launcher import LaunchRunContext, ResumeRunContext, RunLauncher\nfrom dagster._core.launcher.base import CheckRunHealthResult, WorkerStatus\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.storage.tags import DOCKER_IMAGE_TAG\nfrom dagster._grpc.types import ResumeRunArgs\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom dagster._utils.error import serializable_error_info_from_exc_info\n\nfrom .client import DagsterKubernetesClient\nfrom .container_context import K8sContainerContext\nfrom .job import DagsterK8sJobConfig, construct_dagster_k8s_job, get_job_name_from_run_id\n\n\n
[docs]class K8sRunLauncher(RunLauncher, ConfigurableClass):\n """RunLauncher that starts a Kubernetes Job for each Dagster job run.\n\n Encapsulates each run in a separate, isolated invocation of ``dagster-graphql``.\n\n You can configure a Dagster instance to use this RunLauncher by adding a section to your\n ``dagster.yaml`` like the following:\n\n .. code-block:: yaml\n\n run_launcher:\n module: dagster_k8s.launcher\n class: K8sRunLauncher\n config:\n service_account_name: your_service_account\n job_image: my_project/dagster_image:latest\n instance_config_map: dagster-instance\n postgres_password_secret: dagster-postgresql-secret\n\n """\n\n def __init__(\n self,\n service_account_name,\n instance_config_map,\n postgres_password_secret=None,\n dagster_home=None,\n job_image=None,\n image_pull_policy=None,\n image_pull_secrets=None,\n load_incluster_config=True,\n kubeconfig_file=None,\n inst_data: Optional[ConfigurableClassData] = None,\n job_namespace="default",\n env_config_maps=None,\n env_secrets=None,\n env_vars=None,\n k8s_client_batch_api=None,\n volume_mounts=None,\n volumes=None,\n labels=None,\n fail_pod_on_run_failure=None,\n resources=None,\n scheduler_name=None,\n security_context=None,\n run_k8s_config=None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.job_namespace = check.str_param(job_namespace, "job_namespace")\n\n self.load_incluster_config = load_incluster_config\n self.kubeconfig_file = kubeconfig_file\n if load_incluster_config:\n check.invariant(\n kubeconfig_file is None,\n "`kubeconfig_file` is set but `load_incluster_config` is True.",\n )\n kubernetes.config.load_incluster_config()\n else:\n check.opt_str_param(kubeconfig_file, "kubeconfig_file")\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n self._api_client = DagsterKubernetesClient.production_client(\n batch_api_override=k8s_client_batch_api\n )\n\n self._job_config = None\n self._job_image = check.opt_str_param(job_image, "job_image")\n self.dagster_home = check.str_param(dagster_home, "dagster_home")\n self._image_pull_policy = check.opt_str_param(\n image_pull_policy, "image_pull_policy", "IfNotPresent"\n )\n self._image_pull_secrets = check.opt_list_param(\n image_pull_secrets, "image_pull_secrets", of_type=dict\n )\n self._service_account_name = check.str_param(service_account_name, "service_account_name")\n self.instance_config_map = check.str_param(instance_config_map, "instance_config_map")\n self.postgres_password_secret = check.opt_str_param(\n postgres_password_secret, "postgres_password_secret"\n )\n self._env_config_maps = check.opt_list_param(\n env_config_maps, "env_config_maps", of_type=str\n )\n self._env_secrets = check.opt_list_param(env_secrets, "env_secrets", of_type=str)\n self._env_vars = check.opt_list_param(env_vars, "env_vars", of_type=str)\n self._volume_mounts = check.opt_list_param(volume_mounts, "volume_mounts")\n self._volumes = check.opt_list_param(volumes, "volumes")\n self._labels: Mapping[str, str] = check.opt_mapping_param(\n labels, "labels", key_type=str, value_type=str\n )\n self._fail_pod_on_run_failure = check.opt_bool_param(\n fail_pod_on_run_failure, "fail_pod_on_run_failure"\n )\n self._resources: Mapping[str, Any] = check.opt_mapping_param(resources, "resources")\n self._scheduler_name = check.opt_str_param(scheduler_name, "scheduler_name")\n self._security_context = check.opt_dict_param(security_context, "security_context")\n self._run_k8s_config = check.opt_dict_param(run_k8s_config, "run_k8s_config")\n super().__init__()\n\n @property\n def job_image(self):\n return self._job_image\n\n @property\n def image_pull_policy(self) -> str:\n return self._image_pull_policy\n\n @property\n def image_pull_secrets(self) -> Sequence[Mapping]:\n return self._image_pull_secrets\n\n @property\n def service_account_name(self) -> str:\n return self._service_account_name\n\n @property\n def env_config_maps(self) -> Sequence[str]:\n return self._env_config_maps\n\n @property\n def env_secrets(self) -> Sequence[str]:\n return self._env_secrets\n\n @property\n def volume_mounts(self) -> Sequence:\n return self._volume_mounts\n\n @property\n def volumes(self) -> Sequence:\n return self._volumes\n\n @property\n def resources(self) -> Mapping:\n return self._resources\n\n @property\n def scheduler_name(self) -> Optional[str]:\n return self._scheduler_name\n\n @property\n def security_context(self) -> Mapping[str, Any]:\n return self._security_context\n\n @property\n def env_vars(self) -> Sequence[str]:\n return self._env_vars\n\n @property\n def labels(self) -> Mapping[str, str]:\n return self._labels\n\n @property\n def run_k8s_config(self) -> Mapping[str, str]:\n return self._run_k8s_config\n\n @property\n def fail_pod_on_run_failure(self) -> Optional[bool]:\n return self._fail_pod_on_run_failure\n\n @classmethod\n def config_type(cls):\n """Include all arguments required for DagsterK8sJobConfig along with additional arguments\n needed for the RunLauncher itself.\n """\n return DagsterK8sJobConfig.config_type_run_launcher()\n\n @classmethod\n def from_config_value(cls, inst_data, config_value):\n return cls(inst_data=inst_data, **config_value)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n def get_container_context_for_run(self, dagster_run: DagsterRun) -> K8sContainerContext:\n return K8sContainerContext.create_for_run(dagster_run, self, include_run_tags=True)\n\n def _launch_k8s_job_with_args(\n self, job_name: str, args: Optional[Sequence[str]], run: DagsterRun\n ) -> None:\n container_context = self.get_container_context_for_run(run)\n\n pod_name = job_name\n\n job_origin = check.not_none(run.job_code_origin)\n user_defined_k8s_config = container_context.run_k8s_config\n repository_origin = job_origin.repository_origin\n\n job_config = container_context.get_k8s_job_config(\n job_image=repository_origin.container_image, run_launcher=self\n )\n job_image = job_config.job_image\n if job_image: # expected to be set\n self._instance.add_run_tags(\n run.run_id,\n {DOCKER_IMAGE_TAG: job_image},\n )\n\n labels = {\n "dagster/job": job_origin.job_name,\n "dagster/run-id": run.run_id,\n }\n if run.external_job_origin:\n labels["dagster/code-location"] = (\n run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n job = construct_dagster_k8s_job(\n job_config=job_config,\n args=args,\n job_name=job_name,\n pod_name=pod_name,\n component="run_worker",\n user_defined_k8s_config=user_defined_k8s_config,\n labels=labels,\n env_vars=[\n {\n "name": "DAGSTER_RUN_JOB_NAME",\n "value": job_origin.job_name,\n },\n *container_context.env,\n ],\n )\n\n namespace = check.not_none(container_context.namespace)\n\n self._instance.report_engine_event(\n "Creating Kubernetes run worker job",\n run,\n EngineEventData(\n {\n "Kubernetes Job name": job_name,\n "Kubernetes Namespace": namespace,\n "Run ID": run.run_id,\n }\n ),\n cls=self.__class__,\n )\n\n self._api_client.create_namespaced_job_with_retries(body=job, namespace=namespace)\n self._instance.report_engine_event(\n "Kubernetes run worker job created",\n run,\n cls=self.__class__,\n )\n\n def launch_run(self, context: LaunchRunContext) -> None:\n run = context.dagster_run\n job_name = get_job_name_from_run_id(run.run_id)\n job_origin = check.not_none(run.job_code_origin)\n\n args = ExecuteRunArgs(\n job_origin=job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n set_exit_code_on_failure=self._fail_pod_on_run_failure,\n ).get_command_args()\n\n self._launch_k8s_job_with_args(job_name, args, run)\n\n @property\n def supports_resume_run(self):\n return True\n\n def resume_run(self, context: ResumeRunContext) -> None:\n run = context.dagster_run\n job_name = get_job_name_from_run_id(\n run.run_id, resume_attempt_number=context.resume_attempt_number\n )\n job_origin = check.not_none(run.job_code_origin)\n\n args = ResumeRunArgs(\n job_origin=job_origin,\n run_id=run.run_id,\n instance_ref=self._instance.get_ref(),\n set_exit_code_on_failure=self._fail_pod_on_run_failure,\n ).get_command_args()\n\n self._launch_k8s_job_with_args(job_name, args, run)\n\n def terminate(self, run_id):\n check.str_param(run_id, "run_id")\n run = self._instance.get_run_by_id(run_id)\n\n if not run:\n return False\n\n self._instance.report_run_canceling(run)\n\n container_context = self.get_container_context_for_run(run)\n\n job_name = get_job_name_from_run_id(\n run_id, resume_attempt_number=self._instance.count_resume_run_attempts(run.run_id)\n )\n\n try:\n termination_result = self._api_client.delete_job(\n job_name=job_name, namespace=container_context.namespace\n )\n if termination_result:\n self._instance.report_engine_event(\n message="Run was terminated successfully.",\n dagster_run=run,\n cls=self.__class__,\n )\n else:\n self._instance.report_engine_event(\n message="Run was not terminated successfully; delete_job returned {}".format(\n termination_result\n ),\n dagster_run=run,\n cls=self.__class__,\n )\n return termination_result\n except Exception:\n self._instance.report_engine_event(\n message="Run was not terminated successfully; encountered error in delete_job",\n dagster_run=run,\n engine_event_data=EngineEventData.engine_error(\n serializable_error_info_from_exc_info(sys.exc_info())\n ),\n cls=self.__class__,\n )\n\n @property\n def supports_check_run_worker_health(self):\n return True\n\n @property\n def supports_run_worker_crash_recovery(self):\n return True\n\n def get_run_worker_debug_info(self, run: DagsterRun) -> Optional[str]:\n container_context = self.get_container_context_for_run(run)\n if self.supports_run_worker_crash_recovery:\n resume_attempt_number = self._instance.count_resume_run_attempts(run.run_id)\n else:\n resume_attempt_number = None\n\n job_name = get_job_name_from_run_id(run.run_id, resume_attempt_number=resume_attempt_number)\n namespace = container_context.namespace\n user_defined_k8s_config = container_context.run_k8s_config\n container_name = user_defined_k8s_config.container_config.get("name", "dagster")\n pod_names = self._api_client.get_pod_names_in_job(job_name, namespace=namespace)\n full_msg = ""\n try:\n pod_debug_info = [\n self._api_client.get_pod_debug_info(\n pod_name, namespace, container_name=container_name\n )\n for pod_name in pod_names\n ]\n full_msg = "\\n".join(pod_debug_info)\n except Exception:\n logging.exception(\n f"Error trying to get debug information for failed k8s job {job_name}"\n )\n if pod_names:\n full_msg = (\n full_msg\n + "\\nFor more information about the failure, try running `kubectl describe pod"\n f" {pod_names[0]}`, `kubectl logs {pod_names[0]}`, or `kubectl describe job"\n f" {job_name}` in your cluster."\n )\n\n else:\n full_msg = (\n full_msg\n + "\\nFor more information about the failure, try running `kubectl describe job"\n f" {job_name}` in your cluster."\n )\n\n return full_msg\n\n def check_run_worker_health(self, run: DagsterRun):\n container_context = self.get_container_context_for_run(run)\n\n if self.supports_run_worker_crash_recovery:\n resume_attempt_number = self._instance.count_resume_run_attempts(run.run_id)\n else:\n resume_attempt_number = None\n\n job_name = get_job_name_from_run_id(run.run_id, resume_attempt_number=resume_attempt_number)\n try:\n status = self._api_client.get_job_status(\n namespace=container_context.namespace,\n job_name=job_name,\n )\n except Exception:\n return CheckRunHealthResult(\n WorkerStatus.UNKNOWN, str(serializable_error_info_from_exc_info(sys.exc_info()))\n )\n\n inactive_job_with_finished_pods = bool(\n (not status.active) and (status.failed or status.succeeded)\n )\n\n # If the run is in a non-terminal (and non-STARTING) state but the k8s job is not active,\n # something went wrong\n if (\n run.status in (DagsterRunStatus.STARTED, DagsterRunStatus.CANCELING)\n and inactive_job_with_finished_pods\n ):\n return CheckRunHealthResult(\n WorkerStatus.FAILED, "Run has not completed but K8s job has no active pods"\n )\n\n if status.failed:\n return CheckRunHealthResult(WorkerStatus.FAILED, "K8s job failed")\n if status.succeeded:\n return CheckRunHealthResult(WorkerStatus.SUCCESS)\n return CheckRunHealthResult(WorkerStatus.RUNNING)
\n
", "current_page_name": "_modules/dagster_k8s/launcher", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.launcher"}, "ops": {"k8s_job_op": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.ops.k8s_job_op

\nimport time\nfrom typing import Any, Dict, List, Optional\n\nimport kubernetes.config\nimport kubernetes.watch\nfrom dagster import (\n    Enum as DagsterEnum,\n    Field,\n    In,\n    Noneable,\n    Nothing,\n    OpExecutionContext,\n    Permissive,\n    StringSource,\n    op,\n)\nfrom dagster._annotations import experimental\nfrom dagster._utils.merger import merge_dicts\n\nfrom ..client import DEFAULT_JOB_POD_COUNT, DagsterKubernetesClient\nfrom ..container_context import K8sContainerContext\nfrom ..job import (\n    DagsterK8sJobConfig,\n    K8sConfigMergeBehavior,\n    UserDefinedDagsterK8sConfig,\n    construct_dagster_k8s_job,\n    get_k8s_job_name,\n)\nfrom ..launcher import K8sRunLauncher\n\nK8S_JOB_OP_CONFIG = merge_dicts(\n    DagsterK8sJobConfig.config_type_container(),\n    {\n        "image": Field(\n            StringSource,\n            is_required=True,\n            description="The image in which to launch the k8s job.",\n        ),\n        "command": Field(\n            [str],\n            is_required=False,\n            description="The command to run in the container within the launched k8s job.",\n        ),\n        "args": Field(\n            [str],\n            is_required=False,\n            description="The args for the command for the container.",\n        ),\n        "namespace": Field(StringSource, is_required=False),\n        "load_incluster_config": Field(\n            bool,\n            is_required=False,\n            default_value=True,\n            description="""Set this value if you are running the launcher\n            within a k8s cluster. If ``True``, we assume the launcher is running within the target\n            cluster and load config using ``kubernetes.config.load_incluster_config``. Otherwise,\n            we will use the k8s config specified in ``kubeconfig_file`` (using\n            ``kubernetes.config.load_kube_config``) or fall back to the default kubeconfig.""",\n        ),\n        "kubeconfig_file": Field(\n            Noneable(str),\n            is_required=False,\n            default_value=None,\n            description=(\n                "The kubeconfig file from which to load config. Defaults to using the default"\n                " kubeconfig."\n            ),\n        ),\n        "timeout": Field(\n            int,\n            is_required=False,\n            description="How long to wait for the job to succeed before raising an exception",\n        ),\n        "container_config": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s pod's main container"\n                " (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "pod_template_spec_metadata": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s pod's metadata"\n                " (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "pod_spec_config": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s pod's pod spec"\n                " (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "job_metadata": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s job's metadata"\n                " (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "job_spec_config": Field(\n            Permissive(),\n            is_required=False,\n            description=(\n                "Raw k8s config for the k8s job's job spec"\n                " (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#jobspec-v1-batch)."\n                " Keys can either snake_case or camelCase."\n            ),\n        ),\n        "merge_behavior": Field(\n            DagsterEnum.from_python_enum(K8sConfigMergeBehavior),\n            is_required=False,\n            default_value=K8sConfigMergeBehavior.SHALLOW.value,\n            description=(\n                "How raw k8s config set on this op should be merged with any raw k8s config set on"\n                " the code location that launched the op. By default, the value is SHALLOW, meaning"\n                " that the two dictionaries are shallowly merged - any shared values in the "\n                " dictionaries will be replaced by the values set on this op. Setting it to DEEP"\n                " will recursively merge the two dictionaries, appending list fields together and"\n                " merging dictionary fields."\n            ),\n        ),\n    },\n)\n\n\n
[docs]@experimental\ndef execute_k8s_job(\n context: OpExecutionContext,\n image: str,\n command: Optional[List[str]] = None,\n args: Optional[List[str]] = None,\n namespace: Optional[str] = None,\n image_pull_policy: Optional[str] = None,\n image_pull_secrets: Optional[List[Dict[str, str]]] = None,\n service_account_name: Optional[str] = None,\n env_config_maps: Optional[List[str]] = None,\n env_secrets: Optional[List[str]] = None,\n env_vars: Optional[List[str]] = None,\n volume_mounts: Optional[List[Dict[str, Any]]] = None,\n volumes: Optional[List[Dict[str, Any]]] = None,\n labels: Optional[Dict[str, str]] = None,\n resources: Optional[Dict[str, Any]] = None,\n scheduler_name: Optional[str] = None,\n load_incluster_config: bool = True,\n kubeconfig_file: Optional[str] = None,\n timeout: Optional[int] = None,\n container_config: Optional[Dict[str, Any]] = None,\n pod_template_spec_metadata: Optional[Dict[str, Any]] = None,\n pod_spec_config: Optional[Dict[str, Any]] = None,\n job_metadata: Optional[Dict[str, Any]] = None,\n job_spec_config: Optional[Dict[str, Any]] = None,\n k8s_job_name: Optional[str] = None,\n merge_behavior: K8sConfigMergeBehavior = K8sConfigMergeBehavior.SHALLOW,\n):\n """This function is a utility for executing a Kubernetes job from within a Dagster op.\n\n Args:\n image (str): The image in which to launch the k8s job.\n command (Optional[List[str]]): The command to run in the container within the launched\n k8s job. Default: None.\n args (Optional[List[str]]): The args for the command for the container. Default: None.\n namespace (Optional[str]): Override the kubernetes namespace in which to run the k8s job.\n Default: None.\n image_pull_policy (Optional[str]): Allows the image pull policy to be overridden, e.g. to\n facilitate local testing with `kind <https://kind.sigs.k8s.io/>`_. Default:\n ``"Always"``. See:\n https://kubernetes.io/docs/concepts/containers/images/#updating-images.\n image_pull_secrets (Optional[List[Dict[str, str]]]): Optionally, a list of dicts, each of\n which corresponds to a Kubernetes ``LocalObjectReference`` (e.g.,\n ``{'name': 'myRegistryName'}``). This allows you to specify the ```imagePullSecrets`` on\n a pod basis. Typically, these will be provided through the service account, when needed,\n and you will not need to pass this argument. See:\n https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n and https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#podspec-v1-core\n service_account_name (Optional[str]): The name of the Kubernetes service account under which\n to run the Job. Defaults to "default" env_config_maps (Optional[List[str]]): A list of custom ConfigMapEnvSource names from which to\n draw environment variables (using ``envFrom``) for the Job. Default: ``[]``. See:\n https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#define-an-environment-variable-for-a-container\n env_secrets (Optional[List[str]]): A list of custom Secret names from which to\n draw environment variables (using ``envFrom``) for the Job. Default: ``[]``. See:\n https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables\n env_vars (Optional[List[str]]): A list of environment variables to inject into the Job.\n Default: ``[]``. See: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables\n volume_mounts (Optional[List[Permissive]]): A list of volume mounts to include in the job's\n container. Default: ``[]``. See:\n https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volumemount-v1-core\n volumes (Optional[List[Permissive]]): A list of volumes to include in the Job's Pod. Default: ``[]``. See:\n https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#volume-v1-core\n labels (Optional[Dict[str, str]]): Additional labels that should be included in the Job's Pod. See:\n https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\n resources (Optional[Dict[str, Any]]) Compute resource requirements for the container. See:\n https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n scheduler_name (Optional[str]): Use a custom Kubernetes scheduler for launched Pods. See:\n https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/\n load_incluster_config (bool): Whether the op is running within a k8s cluster. If ``True``,\n we assume the launcher is running within the target cluster and load config using\n ``kubernetes.config.load_incluster_config``. Otherwise, we will use the k8s config\n specified in ``kubeconfig_file`` (using ``kubernetes.config.load_kube_config``) or fall\n back to the default kubeconfig. Default: True,\n kubeconfig_file (Optional[str]): The kubeconfig file from which to load config. Defaults to\n using the default kubeconfig. Default: None.\n timeout (Optional[int]): Raise an exception if the op takes longer than this timeout in\n seconds to execute. Default: None.\n container_config (Optional[Dict[str, Any]]): Raw k8s config for the k8s pod's main container\n (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core).\n Keys can either snake_case or camelCase.Default: None.\n pod_template_spec_metadata (Optional[Dict[str, Any]]): Raw k8s config for the k8s pod's\n metadata (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta).\n Keys can either snake_case or camelCase. Default: None.\n pod_spec_config (Optional[Dict[str, Any]]): Raw k8s config for the k8s pod's pod spec\n (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec).\n Keys can either snake_case or camelCase. Default: None.\n job_metadata (Optional[Dict[str, Any]]): Raw k8s config for the k8s job's metadata\n (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta).\n Keys can either snake_case or camelCase. Default: None.\n job_spec_config (Optional[Dict[str, Any]]): Raw k8s config for the k8s job's job spec\n (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#jobspec-v1-batch).\n Keys can either snake_case or camelCase.Default: None.\n k8s_job_name (Optional[str]): Overrides the name of the the k8s job. If not set, will be set\n to a unique name based on the current run ID and the name of the calling op. If set,\n make sure that the passed in name is a valid Kubernetes job name that does not\n already exist in the cluster.\n merge_behavior (Optional[K8sConfigMergeBehavior]): How raw k8s config set on this op should\n be merged with any raw k8s config set on the code location that launched the op. By\n default, the value is K8sConfigMergeBehavior.SHALLOW, meaning that the two dictionaries\n are shallowly merged - any shared values in the dictionaries will be replaced by the\n values set on this op. Setting it to DEEP will recursively merge the two dictionaries,\n appending list fields together andmerging dictionary fields.\n """\n run_container_context = K8sContainerContext.create_for_run(\n context.dagster_run,\n (\n context.instance.run_launcher\n if isinstance(context.instance.run_launcher, K8sRunLauncher)\n else None\n ),\n include_run_tags=False,\n )\n\n container_config = container_config.copy() if container_config else {}\n if command:\n container_config["command"] = command\n\n op_container_context = K8sContainerContext(\n image_pull_policy=image_pull_policy,\n image_pull_secrets=image_pull_secrets,\n service_account_name=service_account_name,\n env_config_maps=env_config_maps,\n env_secrets=env_secrets,\n env_vars=env_vars,\n volume_mounts=volume_mounts,\n volumes=volumes,\n labels=labels,\n namespace=namespace,\n resources=resources,\n scheduler_name=scheduler_name,\n run_k8s_config=UserDefinedDagsterK8sConfig.from_dict(\n {\n "container_config": container_config,\n "pod_template_spec_metadata": pod_template_spec_metadata,\n "pod_spec_config": pod_spec_config,\n "job_metadata": job_metadata,\n "job_spec_config": job_spec_config,\n "merge_behavior": merge_behavior.value,\n }\n ),\n )\n\n container_context = run_container_context.merge(op_container_context)\n\n namespace = container_context.namespace\n\n user_defined_k8s_config = container_context.run_k8s_config\n\n k8s_job_config = DagsterK8sJobConfig(\n job_image=image,\n dagster_home=None,\n image_pull_policy=container_context.image_pull_policy,\n image_pull_secrets=container_context.image_pull_secrets,\n service_account_name=container_context.service_account_name,\n instance_config_map=None,\n postgres_password_secret=None,\n env_config_maps=container_context.env_config_maps,\n env_secrets=container_context.env_secrets,\n env_vars=container_context.env_vars,\n volume_mounts=container_context.volume_mounts,\n volumes=container_context.volumes,\n labels=container_context.labels,\n resources=container_context.resources,\n )\n\n job_name = k8s_job_name or get_k8s_job_name(\n context.run_id, context.get_step_execution_context().step.key\n )\n\n retry_number = context.retry_number\n if retry_number > 0:\n job_name = f"{job_name}-{retry_number}"\n\n labels = {\n "dagster/job": context.dagster_run.job_name,\n "dagster/op": context.op.name,\n "dagster/run-id": context.dagster_run.run_id,\n }\n if context.dagster_run.external_job_origin:\n labels["dagster/code-location"] = (\n context.dagster_run.external_job_origin.external_repository_origin.code_location_origin.location_name\n )\n\n job = construct_dagster_k8s_job(\n job_config=k8s_job_config,\n args=args,\n job_name=job_name,\n pod_name=job_name,\n component="k8s_job_op",\n user_defined_k8s_config=user_defined_k8s_config,\n labels=labels,\n )\n\n if load_incluster_config:\n kubernetes.config.load_incluster_config()\n else:\n kubernetes.config.load_kube_config(kubeconfig_file)\n\n # changing this to be able to be passed in will allow for unit testing\n api_client = DagsterKubernetesClient.production_client()\n\n context.log.info(f"Creating Kubernetes job {job_name} in namespace {namespace}...")\n\n start_time = time.time()\n\n api_client.batch_api.create_namespaced_job(namespace, job)\n\n context.log.info("Waiting for Kubernetes job to finish...")\n\n timeout = timeout or 0\n\n api_client.wait_for_job(\n job_name=job_name,\n namespace=namespace,\n wait_timeout=timeout,\n start_time=start_time,\n )\n\n restart_policy = user_defined_k8s_config.pod_spec_config.get("restart_policy", "Never")\n\n if restart_policy == "Never":\n container_name = container_config.get("name", "dagster")\n\n pods = api_client.wait_for_job_to_have_pods(\n job_name,\n namespace,\n wait_timeout=timeout,\n start_time=start_time,\n )\n\n pod_names = [p.metadata.name for p in pods]\n\n if not pod_names:\n raise Exception("No pod names in job after it started")\n\n pod_to_watch = pod_names[0]\n watch = kubernetes.watch.Watch() # consider moving in to api_client\n\n api_client.wait_for_pod(\n pod_to_watch, namespace, wait_timeout=timeout, start_time=start_time\n )\n\n log_stream = watch.stream(\n api_client.core_api.read_namespaced_pod_log,\n name=pod_to_watch,\n namespace=namespace,\n container=container_name,\n )\n\n while True:\n if timeout and time.time() - start_time > timeout:\n watch.stop()\n raise Exception("Timed out waiting for pod to finish")\n\n try:\n log_entry = next(log_stream)\n print(log_entry) # noqa: T201\n except StopIteration:\n break\n else:\n context.log.info("Pod logs are disabled, because restart_policy is not Never")\n\n if job_spec_config and job_spec_config.get("parallelism"):\n num_pods_to_wait_for = job_spec_config["parallelism"]\n else:\n num_pods_to_wait_for = DEFAULT_JOB_POD_COUNT\n api_client.wait_for_running_job_to_succeed(\n job_name=job_name,\n namespace=namespace,\n wait_timeout=timeout,\n start_time=start_time,\n num_pods_to_wait_for=num_pods_to_wait_for,\n )
\n\n\n
[docs]@op(ins={"start_after": In(Nothing)}, config_schema=K8S_JOB_OP_CONFIG)\n@experimental\ndef k8s_job_op(context):\n """An op that runs a Kubernetes job using the k8s API.\n\n Contrast with the `k8s_job_executor`, which runs each Dagster op in a Dagster job in its\n own k8s job.\n\n This op may be useful when:\n - You need to orchestrate a command that isn't a Dagster op (or isn't written in Python)\n - You want to run the rest of a Dagster job using a specific executor, and only a single\n op in k8s.\n\n For example:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-k8s/dagster_k8s_tests/unit_tests/test_example_k8s_job_op.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n You can create your own op with the same implementation by calling the `execute_k8s_job` function\n inside your own op.\n\n The service account that is used to run this job should have the following RBAC permissions:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/kubernetes/k8s_job_op_rbac.yaml\n :language: YAML\n """\n if "merge_behavior" in context.op_config:\n merge_behavior = K8sConfigMergeBehavior(context.op_config.pop("merge_behavior"))\n else:\n merge_behavior = K8sConfigMergeBehavior.SHALLOW\n\n execute_k8s_job(context, merge_behavior=merge_behavior, **context.op_config)
\n
", "current_page_name": "_modules/dagster_k8s/ops/k8s_job_op", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.ops.k8s_job_op"}}, "pipes": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_k8s.pipes

\nimport random\nimport string\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Mapping, Optional, Sequence, Union\n\nimport kubernetes\nfrom dagster import (\n    OpExecutionContext,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom dagster._core.definitions.resource_annotation import ResourceParam\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.pipes.client import (\n    PipesClient,\n    PipesClientCompletedInvocation,\n    PipesContextInjector,\n    PipesMessageReader,\n    PipesParams,\n)\nfrom dagster._core.pipes.context import (\n    PipesMessageHandler,\n)\nfrom dagster._core.pipes.utils import (\n    PipesEnvContextInjector,\n    extract_message_or_forward_to_stdout,\n    open_pipes_session,\n)\nfrom dagster_pipes import (\n    PipesDefaultMessageWriter,\n    PipesExtras,\n)\n\nfrom dagster_k8s.utils import get_common_labels\n\nfrom .client import DagsterKubernetesClient, WaitForPodState\nfrom .models import k8s_model_from_dict, k8s_snake_case_dict\n\n\ndef get_pod_name(run_id: str, op_name: str):\n    clean_op_name = op_name.replace("_", "-")\n    suffix = "".join(random.choice(string.digits) for i in range(10))\n    return f"dagster-{run_id[:18]}-{clean_op_name[:20]}-{suffix}"\n\n\nDEFAULT_CONTAINER_NAME = "dagster-pipes-execution"\n\n\n
[docs]@experimental\nclass PipesK8sPodLogsMessageReader(PipesMessageReader):\n """Message reader that reads messages from kubernetes pod logs."""\n\n @contextmanager\n def read_messages(\n self,\n handler: PipesMessageHandler,\n ) -> Iterator[PipesParams]:\n self._handler = handler\n try:\n yield {PipesDefaultMessageWriter.STDIO_KEY: PipesDefaultMessageWriter.STDERR}\n finally:\n self._handler = None\n\n def consume_pod_logs(\n self,\n core_api: kubernetes.client.CoreV1Api,\n pod_name: str,\n namespace: str,\n ):\n handler = check.not_none(\n self._handler, "can only consume logs within scope of context manager"\n )\n for line in core_api.read_namespaced_pod_log(\n pod_name,\n namespace,\n follow=True,\n _preload_content=False, # avoid JSON processing\n ).stream():\n log_chunk = line.decode("utf-8")\n for log_line in log_chunk.split("\\n"):\n extract_message_or_forward_to_stdout(handler, log_line)\n\n def no_messages_debug_text(self) -> str:\n return "Attempted to read messages by extracting them from kubernetes pod logs directly."
\n\n\n@experimental\nclass _PipesK8sClient(PipesClient):\n """A pipes client for launching kubernetes pods.\n\n By default context is injected via environment variables and messages are parsed out of\n the pod logs, with other logs forwarded to stdout of the orchestration process.\n\n The first container within the containers list of the pod spec is expected (or set) to be\n the container prepared for pipes protocol communication.\n\n Args:\n env (Optional[Mapping[str, str]]): An optional dict of environment variables to pass to the\n subprocess.\n context_injector (Optional[PipesContextInjector]): A context injector to use to inject\n context into the k8s container process. Defaults to :py:class:`PipesEnvContextInjector`.\n message_reader (Optional[PipesMessageReader]): A message reader to use to read messages\n from the k8s container process. Defaults to :py:class:`PipesK8sPodLogsMessageReader`.\n """\n\n def __init__(\n self,\n env: Optional[Mapping[str, str]] = None,\n context_injector: Optional[PipesContextInjector] = None,\n message_reader: Optional[PipesMessageReader] = None,\n ):\n self.env = check.opt_mapping_param(env, "env", key_type=str, value_type=str)\n self.context_injector = (\n check.opt_inst_param(\n context_injector,\n "context_injector",\n PipesContextInjector,\n )\n or PipesEnvContextInjector()\n )\n\n self.message_reader = (\n check.opt_inst_param(message_reader, "message_reader", PipesMessageReader)\n or PipesK8sPodLogsMessageReader()\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def run(\n self,\n *,\n context: OpExecutionContext,\n extras: Optional[PipesExtras] = None,\n image: Optional[str] = None,\n command: Optional[Union[str, Sequence[str]]] = None,\n namespace: Optional[str] = None,\n env: Optional[Mapping[str, str]] = None,\n base_pod_meta: Optional[Mapping[str, Any]] = None,\n base_pod_spec: Optional[Mapping[str, Any]] = None,\n ) -> PipesClientCompletedInvocation:\n """Publish a kubernetes pod and wait for it to complete, enriched with the pipes protocol.\n\n Args:\n image (Optional[str]):\n The image to set the first container in the pod spec to use.\n command (Optional[Union[str, Sequence[str]]]):\n The command to set the first container in the pod spec to use.\n namespace (Optional[str]):\n Which kubernetes namespace to use, defaults to "default"\n env (Optional[Mapping[str,str]]):\n A mapping of environment variable names to values to set on the first\n container in the pod spec, on top of those configured on resource.\n base_pod_meta (Optional[Mapping[str, Any]]:\n Raw k8s config for the k8s pod's metadata\n (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta)\n Keys can either snake_case or camelCase. The name value will be overridden.\n base_pod_spec (Optional[Mapping[str, Any]]:\n Raw k8s config for the k8s pod's pod spec\n (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec).\n Keys can either snake_case or camelCase.\n extras (Optional[PipesExtras]):\n Extra values to pass along as part of the ext protocol.\n context_injector (Optional[PipesContextInjector]):\n Override the default ext protocol context injection.\n message_reader (Optional[PipesMessageReader]):\n Override the default ext protocol message reader.\n\n Returns:\n PipesClientCompletedInvocation: Wrapper containing results reported by the external\n process.\n """\n client = DagsterKubernetesClient.production_client()\n\n with open_pipes_session(\n context=context,\n extras=extras,\n context_injector=self.context_injector,\n message_reader=self.message_reader,\n ) as pipes_session:\n namespace = namespace or "default"\n pod_name = get_pod_name(context.run_id, context.op.name)\n pod_body = build_pod_body(\n pod_name=pod_name,\n image=image,\n command=command,\n env_vars={\n **pipes_session.get_bootstrap_env_vars(),\n **(self.env or {}),\n **(env or {}),\n },\n base_pod_meta=base_pod_meta,\n base_pod_spec=base_pod_spec,\n )\n client.core_api.create_namespaced_pod(namespace, pod_body)\n try:\n # if were doing direct pod reading, wait for pod to start and then stream logs out\n if isinstance(self.message_reader, PipesK8sPodLogsMessageReader):\n client.wait_for_pod(\n pod_name,\n namespace,\n wait_for_state=WaitForPodState.Ready,\n )\n self.message_reader.consume_pod_logs(\n core_api=client.core_api,\n pod_name=pod_name,\n namespace=namespace,\n )\n else:\n # if were not doing direct log reading, just wait for pod to finish\n client.wait_for_pod(\n pod_name,\n namespace,\n wait_for_state=WaitForPodState.Terminated,\n )\n finally:\n client.core_api.delete_namespaced_pod(pod_name, namespace)\n return PipesClientCompletedInvocation(tuple(pipes_session.get_results()))\n\n\ndef build_pod_body(\n pod_name: str,\n image: Optional[str],\n command: Optional[Union[str, Sequence[str]]],\n env_vars: Mapping[str, str],\n base_pod_meta: Optional[Mapping[str, Any]],\n base_pod_spec: Optional[Mapping[str, Any]],\n):\n meta = {\n **(k8s_snake_case_dict(kubernetes.client.V1ObjectMeta, base_pod_meta or {})),\n "name": pod_name,\n }\n if "labels" in meta:\n meta["labels"] = {**get_common_labels(), **meta["labels"]}\n else:\n meta["labels"] = get_common_labels()\n\n spec = {**k8s_snake_case_dict(kubernetes.client.V1PodSpec, base_pod_spec or {})}\n if "containers" not in spec:\n spec["containers"] = [{}]\n\n if "restart_policy" not in spec:\n spec["restart_policy"] = "Never"\n elif spec["restart_policy"] == "Always":\n raise DagsterInvariantViolationError(\n "A restart policy of Always is not allowed, computations are expected to complete."\n )\n\n if "image" not in spec["containers"][0] and not image:\n raise DagsterInvariantViolationError(\n "Must specify image property or provide base_pod_spec with one set."\n )\n\n if "name" not in spec["containers"][0]:\n spec["containers"][0]["name"] = DEFAULT_CONTAINER_NAME\n\n if image:\n spec["containers"][0]["image"] = image\n\n if command:\n spec["containers"][0]["command"] = command\n\n if "env" not in spec["containers"][0]:\n spec["containers"][0]["env"] = []\n\n spec["containers"][0]["env"].extend({"name": k, "value": v} for k, v in env_vars.items())\n\n return k8s_model_from_dict(\n kubernetes.client.V1Pod,\n {\n "metadata": meta,\n "spec": spec,\n },\n )\n\n\nPipesK8sClient = ResourceParam[_PipesK8sClient]\n
", "current_page_name": "_modules/dagster_k8s/pipes", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_k8s.pipes"}}, "dagster_mlflow": {"hooks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mlflow.hooks

\nfrom dagster._core.definitions.decorators.hook_decorator import event_list_hook\nfrom dagster._core.definitions.events import HookExecutionResult\nfrom mlflow.entities.run_status import RunStatus\n\n\ndef _create_mlflow_run_hook(name):\n    @event_list_hook(name=name, required_resource_keys={"mlflow"})\n    def _hook(context, event_list):\n        for event in event_list:\n            if event.is_step_success:\n                _cleanup_on_success(context)\n            elif event.is_step_failure:\n                mlf = context.resources.mlflow\n                mlf.end_run(status=RunStatus.to_string(RunStatus.FAILED))\n\n        return HookExecutionResult(hook_name=name, is_skipped=False)\n\n    return _hook\n\n\ndef _cleanup_on_success(context):\n    """Checks if the current solid in the context is the last solid in the job\n    and ends the mlflow run with a successful status when this is the case.\n    """\n    last_solid_name = context._step_execution_context.job_def.nodes_in_topological_order[  # noqa: SLF001  # fmt: skip\n        -1\n    ].name\n\n    if context.op.name == last_solid_name:\n        context.resources.mlflow.end_run()\n\n\nend_mlflow_on_run_finished = _create_mlflow_run_hook("end_mlflow_on_run_finished")\n
", "current_page_name": "_modules/dagster_mlflow/hooks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mlflow.hooks"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mlflow.resources

\n"""This module contains the mlflow resource provided by the MlFlow\nclass. This resource provides an easy way to configure mlflow for logging various\nthings from dagster runs.\n"""\nimport atexit\nimport sys\nfrom itertools import islice\nfrom os import environ\nfrom typing import Any, Optional\n\nimport mlflow\nfrom dagster import Field, Noneable, Permissive, StringSource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom mlflow.entities.run_status import RunStatus\n\nCONFIG_SCHEMA = {\n    "experiment_name": Field(StringSource, is_required=True, description="MlFlow experiment name."),\n    "mlflow_tracking_uri": Field(\n        Noneable(StringSource),\n        default_value=None,\n        is_required=False,\n        description="MlFlow tracking server uri.",\n    ),\n    "parent_run_id": Field(\n        Noneable(str),\n        default_value=None,\n        is_required=False,\n        description="Mlflow run ID of parent run if this is a nested run.",\n    ),\n    "env": Field(Permissive(), description="Environment variables for mlflow setup."),\n    "env_to_tag": Field(\n        Noneable(list),\n        default_value=None,\n        is_required=False,\n        description="List of environment variables to log as tags in mlflow.",\n    ),\n    "extra_tags": Field(Permissive(), description="Any extra key-value tags to log to mlflow."),\n}\n\n\nclass MlflowMeta(type):\n    """Mlflow Metaclass to create methods that "inherit" all of Mlflow's\n    methods. If the class has a method defined it is excluded from the\n    attribute setting from mlflow.\n    """\n\n    def __new__(cls, name, bases, attrs):\n        class_cls = super(MlflowMeta, cls).__new__(cls, name, bases, attrs)\n        for attr in (attr for attr in dir(mlflow) if attr not in dir(class_cls)):\n            mlflow_attribute = getattr(mlflow, attr)\n            if callable(mlflow_attribute):\n                setattr(class_cls, attr, staticmethod(mlflow_attribute))\n            else:\n                setattr(class_cls, attr, mlflow_attribute)\n        return class_cls\n\n\nclass MlFlow(metaclass=MlflowMeta):\n    """Class for setting up an mlflow resource for dagster runs.\n    This takes care of all the configuration required to use mlflow tracking and the complexities of\n    mlflow tracking dagster parallel runs.\n    """\n\n    def __init__(self, context):\n        # Context associated attributes\n        self.log = context.log\n        self.run_name = context.dagster_run.job_name\n        self.dagster_run_id = context.run_id\n\n        # resource config attributes\n        resource_config = context.resource_config\n        self.tracking_uri = resource_config.get("mlflow_tracking_uri")\n        if self.tracking_uri:\n            mlflow.set_tracking_uri(self.tracking_uri)\n        self.parent_run_id = resource_config.get("parent_run_id")\n        self.experiment_name = resource_config["experiment_name"]\n        self.env_tags_to_log = resource_config.get("env_to_tag") or []\n        self.extra_tags = resource_config.get("extra_tags")\n\n        # Update env variables if any are given\n        self.env_vars = resource_config.get("env", {})\n        if self.env_vars:\n            environ.update(self.env_vars)\n\n        # If the experiment exists then the set won't do anything\n        mlflow.set_experiment(self.experiment_name)\n        self.experiment = mlflow.get_experiment_by_name(self.experiment_name)\n\n        # Get the client object\n        self.tracking_client = mlflow.tracking.MlflowClient()\n\n        # Set up the active run and tags\n        self._setup()\n\n    def _setup(self):\n        """Sets the active run and tags. If an Mlflow run_id exists then the\n        active run is set to it. This way a single Dagster run outputs data\n        to the same Mlflow run, even when multiprocess executors are used.\n        """\n        # Get the run id\n        run_id = self._get_current_run_id()\n        self._set_active_run(run_id=run_id)\n        self._set_all_tags()\n\n        # hack needed to stop mlflow from marking run as finished when\n        # a process exits in parallel runs\n        atexit.unregister(mlflow.end_run)\n\n    def _get_current_run_id(\n        self, experiment: Optional[Any] = None, dagster_run_id: Optional[str] = None\n    ):\n        """Gets the run id of a specific dagster run and experiment id.\n        If it doesn't exist then it returns a None.\n\n        Args:\n            experiment (optional): Mlflow experiment.\n            When none is passed it fetches the experiment object set in\n            the constructor.  Defaults to None.\n            dagster_run_id (optional): The Dagster run id.\n            When none is passed it fetches the dagster_run_id object set in\n            the constructor.  Defaults to None.\n\n        Returns:\n            run_id (str or None): run_id if it is found else None\n        """\n        experiment = experiment or self.experiment\n        dagster_run_id = dagster_run_id or self.dagster_run_id\n        if experiment:\n            # Check if a run with this dagster run id has already been started\n            # in mlflow, will get an empty dataframe if not\n            current_run_df = mlflow.search_runs(\n                experiment_ids=[experiment.experiment_id],\n                filter_string=f"tags.dagster_run_id='{dagster_run_id}'",\n            )\n            if not current_run_df.empty:\n                return current_run_df.run_id.values[0]\n\n    def _set_active_run(self, run_id=None):\n        """This method sets the active run to be that of the specified\n        run_id. If None is passed then a new run is started. The new run also\n        takes care of nested runs.\n\n        Args:\n            run_id (str, optional): Mlflow run_id. Defaults to None.\n        """\n        nested_run = False\n        if self.parent_run_id is not None:\n            self._start_run(run_id=self.parent_run_id, run_name=self.run_name)\n            nested_run = True\n        self._start_run(run_id=run_id, run_name=self.run_name, nested=nested_run)\n\n    def _start_run(self, **kwargs):\n        """Catches the Mlflow exception if a run is already active."""\n        try:\n            run = mlflow.start_run(**kwargs)\n            self.log.info(\n                f"Starting a new mlflow run with id {run.info.run_id} "\n                f"in experiment {self.experiment_name}"\n            )\n        except Exception as ex:\n            run = mlflow.active_run()\n            if "is already active" not in str(ex):\n                raise (ex)\n            self.log.info(f"Run with id {run.info.run_id} is already active.")\n\n    def _set_all_tags(self):\n        """Method collects dagster_run_id plus all env variables/tags that have been\n            specified by the user in the config_schema and logs them as tags in mlflow.\n\n        Returns:\n            tags [dict]: Dictionary of all the tags\n        """\n        tags = {tag: environ.get(tag) for tag in self.env_tags_to_log}\n        tags["dagster_run_id"] = self.dagster_run_id\n        if self.extra_tags:\n            tags.update(self.extra_tags)\n\n        mlflow.set_tags(tags)\n\n    def cleanup_on_error(self):\n        """Method ends mlflow run with correct exit status for failed runs. Note that\n        this method does not work when a job running in the webserver fails, it seems\n        that in this case a different process runs the job and when it fails\n        the stack trace is therefore not available. For this case we can use the\n        cleanup_on_failure hook defined below.\n        """\n        any_error = sys.exc_info()\n\n        if any_error[1]:\n            if isinstance(any_error[1], KeyboardInterrupt):\n                mlflow.end_run(status=RunStatus.to_string(RunStatus.KILLED))\n            else:\n                mlflow.end_run(status=RunStatus.to_string(RunStatus.FAILED))\n\n    @staticmethod\n    def log_params(params: dict):\n        """Overload of the mlflow.log_params. If len(params) >100 then\n        params is sent to mlflow in chunks.\n\n        Args:\n            params (dict): Parameters to be logged\n        """\n        for param_chunk in MlFlow.chunks(params, 100):\n            mlflow.log_params(param_chunk)\n\n    @staticmethod\n    def chunks(params: dict, size: int = 100):\n        """Method that chunks a dictionary into batches of size.\n\n        Args:\n            params (dict): Dictionary set to be batched\n            size (int, optional): Number of batches. Defaults to 100.\n\n        Yields:\n            (dict): Batch of dictionary\n        """\n        it = iter(params)\n        for _ in range(0, len(params), size):\n            yield {k: params[k] for k in islice(it, size)}\n\n\n
[docs]@dagster_maintained_resource\n@resource(config_schema=CONFIG_SCHEMA)\ndef mlflow_tracking(context):\n """This resource initializes an MLflow run that's used for all steps within a Dagster run.\n\n This resource provides access to all of mlflow's methods as well as the mlflow tracking client's\n methods.\n\n Usage:\n\n 1. Add the mlflow resource to any ops in which you want to invoke mlflow tracking APIs.\n 2. Add the `end_mlflow_on_run_finished` hook to your job to end the MLflow run\n when the Dagster run is finished.\n\n Examples:\n .. code-block:: python\n\n from dagster_mlflow import end_mlflow_on_run_finished, mlflow_tracking\n\n @op(required_resource_keys={"mlflow"})\n def mlflow_op(context):\n mlflow.log_params(some_params)\n mlflow.tracking.MlflowClient().create_registered_model(some_model_name)\n\n @end_mlflow_on_run_finished\n @job(resource_defs={"mlflow": mlflow_tracking})\n def mlf_example():\n mlflow_op()\n\n # example using an mlflow instance with s3 storage\n mlf_example.execute_in_process(run_config={\n "resources": {\n "mlflow": {\n "config": {\n "experiment_name": my_experiment,\n "mlflow_tracking_uri": "http://localhost:5000",\n\n # if want to run a nested run, provide parent_run_id\n "parent_run_id": an_existing_mlflow_run_id,\n\n # env variables to pass to mlflow\n "env": {\n "MLFLOW_S3_ENDPOINT_URL": my_s3_endpoint,\n "AWS_ACCESS_KEY_ID": my_aws_key_id,\n "AWS_SECRET_ACCESS_KEY": my_secret,\n },\n\n # env variables you want to log as mlflow tags\n "env_to_tag": ["DOCKER_IMAGE_TAG"],\n\n # key-value tags to add to your experiment\n "extra_tags": {"super": "experiment"},\n }\n }\n }\n })\n """\n mlf = MlFlow(context)\n yield mlf\n mlf.cleanup_on_error()
\n
", "current_page_name": "_modules/dagster_mlflow/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mlflow.resources"}}, "dagster_msteams": {"hooks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_msteams.hooks

\nfrom typing import Callable, Optional\n\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions import failure_hook, success_hook\nfrom dagster._core.execution.context.hook import HookContext\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom dagster_msteams.card import Card\n\n\ndef _default_status_message(context: HookContext, status: str) -> str:\n    return f"Op {context.op.name} on job {context.job_name} {status}!\\nRun ID: {context.run_id}"\n\n\ndef _default_failure_message(context: HookContext) -> str:\n    return _default_status_message(context, status="failed")\n\n\ndef _default_success_message(context: HookContext) -> str:\n    return _default_status_message(context, status="succeeded")\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef teams_on_failure(\n message_fn: Callable[[HookContext], str] = _default_failure_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step failure events that will message the given MS Teams webhook URL.\n\n Args:\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the\n HookContext outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this\n to allow messages to include deeplinks to the specific run that triggered\n the hook.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this\n to allow messages to include deeplinks to the specific run that triggered\n the hook.\n\n Examples:\n .. code-block:: python\n\n @teams_on_failure(webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op.name} failed!"\n\n @op\n def a_op(context):\n pass\n\n @job(...)\n def my_job():\n a_op.with_hooks(hook_defs={teams_on_failure("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @failure_hook(required_resource_keys={"msteams"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"<a href='{webserver_base_url}/runs/{context.run_id}'>View in Dagster UI</a>"\n card = Card()\n card.add_attachment(text_message=text)\n context.resources.msteams.post_message(payload=card.payload)\n\n return _hook
\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef teams_on_success(\n message_fn: Callable[[HookContext], str] = _default_success_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step success events that will message the given MS Teams webhook URL.\n\n Args:\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the\n HookContext outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this\n to allow messages to include deeplinks to the specific run that triggered\n the hook.\n\n Examples:\n .. code-block:: python\n\n @teams_on_success(webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op.name} failed!"\n\n @op\n def a_op(context):\n pass\n\n @job(...)\n def my_job():\n a_op.with_hooks(hook_defs={teams_on_success("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @success_hook(required_resource_keys={"msteams"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"<a href='{webserver_base_url}/runs/{context.run_id}'>View in webserver</a>"\n card = Card()\n card.add_attachment(text_message=text)\n context.resources.msteams.post_message(payload=card.payload)\n\n return _hook
\n
", "current_page_name": "_modules/dagster_msteams/hooks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_msteams.hooks"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_msteams.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\n\nfrom dagster_msteams.client import TeamsClient\n\n\n
[docs]class MSTeamsResource(ConfigurableResource):\n """This resource is for connecting to Microsoft Teams.\n\n Provides a `dagster_msteams.TeamsClient` which can be used to\n interface with the MS Teams API.\n\n By configuring this resource, you can post messages to MS Teams from any Dagster op,\n asset, schedule, or sensor:\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import op, job, Definitions, EnvVar\n from dagster_msteams import Card, MSTeamsResource\n\n\n @op\n def teams_op(msteams: MSTeamsResource):\n card = Card()\n card.add_attachment(text_message="Hello There !!")\n msteams.get_client().post_message(payload=card.payload)\n\n\n @job\n def teams_job():\n teams_op()\n\n defs = Definitions(\n jobs=[teams_job],\n resources={\n "msteams": MSTeamsResource(\n hook_url=EnvVar("TEAMS_WEBHOOK_URL")\n )\n }\n )\n """\n\n hook_url: str = Field(\n default=None,\n description=(\n "To send messages to MS Teams channel, an incoming webhook has to be created. The"\n " incoming webhook url must be given as a part of the resource config to the"\n " MSTeamsResource in Dagster. For more information on how to create an incoming"\n " webhook, see"\n " https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook"\n ),\n )\n http_proxy: str = Field(default=None, description="HTTP proxy URL")\n https_proxy: str = Field(default=None, description="HTTPS proxy URL")\n timeout: float = Field(default=60, description="Timeout for requests to MS Teams")\n verify: bool = Field(\n default=True, description="Whether to verify SSL certificates, defaults to True"\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> TeamsClient:\n return TeamsClient(\n hook_url=self.hook_url,\n http_proxy=self.http_proxy,\n https_proxy=self.https_proxy,\n timeout=self.timeout,\n verify=self.verify,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=MSTeamsResource.to_config_schema(),\n description="This resource is for connecting to MS Teams",\n)\ndef msteams_resource(context) -> TeamsClient:\n """This resource is for connecting to Microsoft Teams.\n\n The resource object is a `dagster_msteams.TeamsClient`.\n\n By configuring this resource, you can post messages to MS Teams from any Dagster solid:\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import op, job\n from dagster_msteams import Card, msteams_resource\n\n\n @op(required_resource_keys={"msteams"})\n def teams_op(context):\n card = Card()\n card.add_attachment(text_message="Hello There !!")\n context.resources.msteams.post_message(payload=card.payload)\n\n\n @job(resource_defs={"msteams": msteams_resource})\n def teams_job():\n teams_op()\n\n\n teams_job.execute_in_process(\n {"resources": {"msteams": {"config": {"hook_url": os.getenv("TEAMS_WEBHOOK_URL")}}}}\n )\n """\n return MSTeamsResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_msteams/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_msteams.resources"}, "sensors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_msteams.sensors

\nfrom typing import TYPE_CHECKING, Callable, Optional, Sequence, Union\n\nfrom dagster import DefaultSensorStatus\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions import GraphDefinition, JobDefinition\nfrom dagster._core.definitions.run_status_sensor_definition import (\n    RunFailureSensorContext,\n    run_failure_sensor,\n)\nfrom dagster._core.definitions.unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom dagster._utils.warnings import normalize_renamed_param\n\nfrom dagster_msteams.card import Card\nfrom dagster_msteams.client import TeamsClient\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.selector import JobSelector, RepositorySelector\n\n\ndef _default_failure_message(context: RunFailureSensorContext) -> str:\n    return "\\n".join(\n        [\n            f"Job {context.dagster_run.job_name} failed!",\n            f"Run ID: {context.dagster_run.run_id}",\n            f"Error: {context.failure_event.message}",\n        ]\n    )\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef make_teams_on_run_failure_sensor(\n hook_url: str,\n message_fn: Callable[[RunFailureSensorContext], str] = _default_failure_message,\n http_proxy: Optional[str] = None,\n https_proxy: Optional[str] = None,\n timeout: Optional[float] = 60,\n verify: Optional[bool] = None,\n name: Optional[str] = None,\n dagit_base_url: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n webserver_base_url: Optional[str] = None,\n):\n """Create a sensor on run failures that will message the given MS Teams webhook URL.\n\n Args:\n hook_url (str): MS Teams incoming webhook URL.\n message_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` and outputs the message you want to send.\n Defaults to a text message that contains error message, job name, and run ID.\n http_proxy : (Optional[str]): Proxy for requests using http protocol.\n https_proxy : (Optional[str]): Proxy for requests using https protocol.\n timeout: (Optional[float]): Connection timeout in seconds. Defaults to 60.\n verify: (Optional[bool]): Whether to verify the servers TLS certificate.\n name: (Optional[str]): The name of the sensor. Defaults to "teams_on_run_failure".\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the failed run.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from Dagit or via the GraphQL API.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, UnresolvedAssetJobDefinition, RepositorySelector, JobSelector]]]):\n Jobs in the current repository that will be monitored by this sensor. Defaults to None,\n which means the alert will be sent when any job in the repository matches the requested\n run_status. To monitor jobs in external repositories, use RepositorySelector and JobSelector.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the failed run.\n\n Examples:\n .. code-block:: python\n\n teams_on_run_failure = make_teams_on_run_failure_sensor(\n hook_url=os.getenv("TEAMS_WEBHOOK_URL")\n )\n\n @repository\n def my_repo():\n return [my_job + teams_on_run_failure]\n\n .. code-block:: python\n\n def my_message_fn(context: RunFailureSensorContext) -> str:\n return "Job {job_name} failed! Error: {error}".format(\n job_name=context.dagster_run.job_name,\n error=context.failure_event.message,\n )\n\n teams_on_run_failure = make_teams_on_run_failure_sensor(\n hook_url=os.getenv("TEAMS_WEBHOOK_URL"),\n message_fn=my_message_fn,\n webserver_base_url="http://localhost:3000",\n )\n\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n teams_client = TeamsClient(\n hook_url=hook_url,\n http_proxy=http_proxy,\n https_proxy=https_proxy,\n timeout=timeout,\n verify=verify,\n )\n\n @run_failure_sensor(\n name=name,\n default_status=default_status,\n monitored_jobs=monitored_jobs,\n monitor_all_repositories=monitor_all_repositories,\n )\n def teams_on_run_failure(context: RunFailureSensorContext):\n text = message_fn(context)\n if webserver_base_url:\n text += "<a href='{base_url}/runs/{run_id}'>View in Dagit</a>".format(\n base_url=webserver_base_url,\n run_id=context.dagster_run.run_id,\n )\n card = Card()\n card.add_attachment(text_message=text)\n teams_client.post_message(payload=card.payload)\n\n return teams_on_run_failure
\n
", "current_page_name": "_modules/dagster_msteams/sensors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_msteams.sensors"}}, "dagster_mysql": {"event_log": {"event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mysql.event_log.event_log

\nfrom typing import ContextManager, Optional, cast\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.exc as db_exc\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.event_api import EventHandlerFn\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.config import MySqlStorageConfig, mysql_config\nfrom dagster._core.storage.event_log import (\n    AssetKeyTable,\n    SqlEventLogStorage,\n    SqlEventLogStorageMetadata,\n    SqlPollingEventWatcher,\n)\nfrom dagster._core.storage.event_log.base import EventLogCursor\nfrom dagster._core.storage.event_log.migration import ASSET_KEY_INDEX_COLS\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_mysql_connection,\n    mysql_alembic_config,\n    mysql_isolation_level,\n    mysql_url_from_config,\n    parse_mysql_version,\n    retry_mysql_connection_fn,\n    retry_mysql_creation_fn,\n)\n\nMINIMUM_MYSQL_INTERSECT_VERSION = "8.0.31"\n\n\n
[docs]class MySQLEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """MySQL-backed event log storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml\n :caption: dagster.yaml\n :start-after: start_marker_event_log\n :end-before: end_marker_event_log\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n\n """\n\n def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.mysql_url = check.str_param(mysql_url, "mysql_url")\n self._disposed = False\n\n self._event_watcher = SqlPollingEventWatcher(self)\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n poolclass=db_pool.NullPool,\n )\n self._secondary_index_cache = {}\n\n table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if "event_logs" not in table_names:\n retry_mysql_creation_fn(self._init_db)\n # mark all secondary indexes to be used\n self.reindex_events()\n self.reindex_assets()\n\n self._mysql_version = self.get_server_version()\n super().__init__()\n\n def _init_db(self) -> None:\n with self._connect() as conn:\n SqlEventLogStorageMetadata.create_all(conn)\n stamp_alembic_rev(mysql_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection\n # https://github.com/dagster-io/dagster/issues/3719\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n pool_size=1,\n pool_recycle=pool_recycle,\n )\n\n def upgrade(self) -> None:\n alembic_config = mysql_alembic_config(__file__)\n with self._connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return mysql_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig\n ) -> "MySQLEventLogStorage":\n return MySQLEventLogStorage(\n inst_data=inst_data, mysql_url=mysql_url_from_config(config_value)\n )\n\n @staticmethod\n def wipe_storage(mysql_url: str) -> None:\n engine = create_engine(\n mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool\n )\n try:\n SqlEventLogStorageMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n @staticmethod\n def create_clean_storage(conn_string: str) -> "MySQLEventLogStorage":\n MySQLEventLogStorage.wipe_storage(conn_string)\n return MySQLEventLogStorage(conn_string)\n\n def get_server_version(self) -> Optional[str]:\n with self.index_connection() as conn:\n row = conn.execute(db.text("select version()")).fetchone()\n\n if not row:\n return None\n\n return cast(str, row[0])\n\n def store_asset_event(self, event: EventLogEntry, event_id: int) -> None:\n # last_materialization_timestamp is updated upon observation, materialization, materialization_planned\n # See SqlEventLogStorage.store_asset_event method for more details\n\n values = self._get_asset_entry_values(\n event, event_id, self.has_secondary_index(ASSET_KEY_INDEX_COLS)\n )\n with self.index_connection() as conn:\n if values:\n conn.execute(\n db_dialects.mysql.insert(AssetKeyTable)\n .values(\n asset_key=event.dagster_event.asset_key.to_string(), # type: ignore # (possible none)\n **values,\n )\n .on_duplicate_key_update(\n **values,\n )\n )\n else:\n try:\n conn.execute(\n db_dialects.mysql.insert(AssetKeyTable).values(\n asset_key=event.dagster_event.asset_key.to_string(), # type: ignore # (possible none)\n )\n )\n except db_exc.IntegrityError:\n pass\n\n def _connect(self) -> ContextManager[Connection]:\n return create_mysql_connection(self._engine, __file__, "event log")\n\n def run_connection(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return self._connect()\n\n def index_connection(self) -> ContextManager[Connection]:\n return self._connect()\n\n def has_table(self, table_name: str) -> bool:\n with self._connect() as conn:\n return table_name in db.inspect(conn).get_table_names()\n\n def has_secondary_index(self, name: str) -> bool:\n if name not in self._secondary_index_cache:\n self._secondary_index_cache[name] = super(\n MySQLEventLogStorage, self\n ).has_secondary_index(name)\n return self._secondary_index_cache[name]\n\n def enable_secondary_index(self, name: str) -> None:\n super(MySQLEventLogStorage, self).enable_secondary_index(name)\n if name in self._secondary_index_cache:\n del self._secondary_index_cache[name]\n\n def watch(self, run_id: str, cursor: Optional[str], callback: EventHandlerFn) -> None:\n if cursor and EventLogCursor.parse(cursor).is_offset_cursor():\n check.failed("Cannot call `watch` with an offset cursor")\n self._event_watcher.watch_run(run_id, cursor, callback)\n\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n self._event_watcher.unwatch_run(run_id, handler)\n\n @property\n def supports_intersect(self) -> bool:\n return parse_mysql_version(self._mysql_version) >= parse_mysql_version( # type: ignore # (possible none)\n MINIMUM_MYSQL_INTERSECT_VERSION\n )\n\n @property\n def event_watcher(self) -> SqlPollingEventWatcher:\n return self._event_watcher\n\n def __del__(self) -> None:\n self.dispose()\n\n def dispose(self) -> None:\n if not self._disposed:\n self._disposed = True\n self._event_watcher.close()\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = mysql_alembic_config(__file__)\n with self._connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_mysql/event_log/event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mysql.event_log.event_log"}}, "run_storage": {"run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mysql.run_storage.run_storage

\nfrom typing import ContextManager, Mapping, Optional, cast\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.config import MySqlStorageConfig, mysql_config\nfrom dagster._core.storage.runs import (\n    DaemonHeartbeatsTable,\n    InstanceInfo,\n    RunStorageSqlMetadata,\n    SqlRunStorage,\n)\nfrom dagster._core.storage.runs.schema import KeyValueStoreTable\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom dagster._utils import utc_datetime_from_timestamp\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_mysql_connection,\n    mysql_alembic_config,\n    mysql_isolation_level,\n    mysql_url_from_config,\n    parse_mysql_version,\n    retry_mysql_connection_fn,\n    retry_mysql_creation_fn,\n)\n\nMINIMUM_MYSQL_BUCKET_VERSION = "8.0.0"\nMINIMUM_MYSQL_INTERSECT_VERSION = "8.0.31"\n\n\n
[docs]class MySQLRunStorage(SqlRunStorage, ConfigurableClass):\n """MySQL-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml\n :caption: dagster.yaml\n :start-after: start_marker_runs\n :end-before: end_marker_runs\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.mysql_url = mysql_url\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n poolclass=db_pool.NullPool,\n )\n\n self._index_migration_cache = {}\n table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if "runs" not in table_names:\n retry_mysql_creation_fn(self._init_db)\n self.migrate()\n self.optimize()\n\n elif "instance_info" not in table_names:\n InstanceInfo.create(self._engine)\n\n self._mysql_version = self.get_server_version()\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n RunStorageSqlMetadata.create_all(conn)\n stamp_alembic_rev(mysql_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold 1 open connection\n # https://github.com/dagster-io/dagster/issues/3719\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n pool_size=1,\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return mysql_config()\n\n def get_server_version(self) -> Optional[str]:\n with self.connect() as conn:\n row = conn.execute(db.text("select version()")).fetchone()\n\n if not row:\n return None\n\n return cast(str, row[0])\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig\n ) -> "MySQLRunStorage":\n return MySQLRunStorage(inst_data=inst_data, mysql_url=mysql_url_from_config(config_value))\n\n @staticmethod\n def wipe_storage(mysql_url: str) -> None:\n engine = create_engine(\n mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool\n )\n try:\n RunStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n @staticmethod\n def create_clean_storage(mysql_url: str) -> "MySQLRunStorage":\n MySQLRunStorage.wipe_storage(mysql_url)\n return MySQLRunStorage(mysql_url)\n\n def connect(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return create_mysql_connection(self._engine, __file__, "run")\n\n def upgrade(self) -> None:\n alembic_config = mysql_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def has_built_index(self, migration_name: str) -> None:\n if migration_name not in self._index_migration_cache:\n self._index_migration_cache[migration_name] = super(\n MySQLRunStorage, self\n ).has_built_index(migration_name)\n return self._index_migration_cache[migration_name]\n\n def mark_index_built(self, migration_name: str) -> None:\n super(MySQLRunStorage, self).mark_index_built(migration_name)\n if migration_name in self._index_migration_cache:\n del self._index_migration_cache[migration_name]\n\n @property\n def supports_intersect(self) -> bool:\n return parse_mysql_version(self._mysql_version) >= parse_mysql_version( # type: ignore\n MINIMUM_MYSQL_INTERSECT_VERSION\n )\n\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n with self.connect() as conn:\n conn.execute(\n db_dialects.mysql.insert(DaemonHeartbeatsTable)\n .values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_type=daemon_heartbeat.daemon_type,\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n .on_duplicate_key_update(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n )\n\n def set_cursor_values(self, pairs: Mapping[str, str]) -> None:\n check.mapping_param(pairs, "pairs", key_type=str, value_type=str)\n db_values = [{"key": k, "value": v} for k, v in pairs.items()]\n\n with self.connect() as conn:\n insert_stmt = db_dialects.mysql.insert(KeyValueStoreTable).values(db_values)\n conn.execute(\n insert_stmt.on_duplicate_key_update(\n value=insert_stmt.inserted.value,\n )\n )\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = mysql_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_mysql/run_storage/run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mysql.run_storage.run_storage"}}, "schedule_storage": {"schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_mysql.schedule_storage.schedule_storage

\nfrom typing import ContextManager, Optional, cast\n\nimport dagster._check as check\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.config import MySqlStorageConfig, mysql_config\nfrom dagster._core.storage.schedules import ScheduleStorageSqlMetadata, SqlScheduleStorage\nfrom dagster._core.storage.schedules.schema import InstigatorsTable\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_mysql_connection,\n    mysql_alembic_config,\n    mysql_isolation_level,\n    mysql_url_from_config,\n    parse_mysql_version,\n    retry_mysql_connection_fn,\n    retry_mysql_creation_fn,\n)\n\nMINIMUM_MYSQL_BATCH_VERSION = "8.0.0"\n\n\n
[docs]class MySQLScheduleStorage(SqlScheduleStorage, ConfigurableClass):\n """MySQL-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml\n :caption: dagster.yaml\n :start-after: start_marker_schedules\n :end-before: end_marker_schedules\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.mysql_url = mysql_url\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n poolclass=db_pool.NullPool,\n )\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)\n if "jobs" not in table_names:\n retry_mysql_creation_fn(self._init_db)\n\n self._mysql_version = self.get_server_version()\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n ScheduleStorageSqlMetadata.create_all(conn)\n stamp_alembic_rev(mysql_alembic_config(__file__), conn)\n\n # mark all the data migrations as applied\n self.migrate()\n self.optimize()\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection\n # https://github.com/dagster-io/dagster/issues/3719\n self._engine = create_engine(\n self.mysql_url,\n isolation_level=mysql_isolation_level(),\n pool_size=1,\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return mysql_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig\n ) -> "MySQLScheduleStorage":\n return MySQLScheduleStorage(\n inst_data=inst_data, mysql_url=mysql_url_from_config(config_value)\n )\n\n @staticmethod\n def wipe_storage(mysql_url: str) -> None:\n engine = create_engine(\n mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool\n )\n try:\n ScheduleStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n @staticmethod\n def create_clean_storage(mysql_url: str) -> "MySQLScheduleStorage":\n MySQLScheduleStorage.wipe_storage(mysql_url)\n return MySQLScheduleStorage(mysql_url)\n\n def connect(self) -> ContextManager[Connection]:\n return create_mysql_connection(self._engine, __file__, "schedule")\n\n @property\n def supports_batch_queries(self) -> bool:\n if not self._mysql_version:\n return False\n\n return parse_mysql_version(self._mysql_version) >= parse_mysql_version(\n MINIMUM_MYSQL_BATCH_VERSION\n )\n\n def get_server_version(self) -> Optional[str]:\n with self.connect() as conn:\n row = conn.execute(db.text("select version()")).fetchone()\n\n if not row:\n return None\n\n return cast(str, row[0])\n\n def upgrade(self) -> None:\n with self.connect() as conn:\n alembic_config = mysql_alembic_config(__file__)\n run_alembic_upgrade(alembic_config, conn)\n\n def _add_or_update_instigators_table(self, conn: Connection, state) -> None:\n selector_id = state.selector_id\n conn.execute(\n db_dialects.mysql.insert(InstigatorsTable)\n .values(\n selector_id=selector_id,\n repository_selector_id=state.repository_selector_id,\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n )\n .on_duplicate_key_update(\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n update_timestamp=pendulum.now("UTC"),\n )\n )\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = mysql_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_mysql/schedule_storage/schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_mysql.schedule_storage.schedule_storage"}}}, "dagster_pagerduty": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pagerduty.resources

\nfrom typing import Dict, Optional, cast\n\nimport pypd\nfrom dagster import ConfigurableResource, resource\nfrom dagster._config.pythonic_config import infer_schema_from_config_class\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils.warnings import suppress_dagster_warnings\nfrom pydantic import Field as PyField\n\n\n
[docs]class PagerDutyService(ConfigurableResource):\n """This resource is for posting events to PagerDuty."""\n\n """Integrates with PagerDuty via the pypd library.\n\n See:\n https://v2.developer.pagerduty.com/docs/events-api-v2\n https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2\n https://support.pagerduty.com/docs/services-and-integrations#section-events-api-v2\n https://github.com/PagerDuty/pagerduty-api-python-client\n\n for documentation and more information.\n """\n\n routing_key: str = PyField(\n ...,\n description=(\n "The routing key provisions access to your PagerDuty service. You"\n "will need to include the integration key for your new integration, as a"\n "routing_key in the event payload."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def EventV2_create(\n self,\n summary: str,\n source: str,\n severity: str,\n event_action: str = "trigger",\n dedup_key: Optional[str] = None,\n timestamp: Optional[str] = None,\n component: Optional[str] = None,\n group: Optional[str] = None,\n event_class: Optional[str] = None,\n custom_details: Optional[object] = None,\n ) -> object:\n """Events API v2 enables you to add PagerDuty's advanced event and incident management\n functionality to any system that can make an outbound HTTP connection.\n\n Args:\n summary (str):\n A high-level, text summary message of the event. Will be used to construct an\n alert's description. Example:\n\n "PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host\n 'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN"\n\n source (str):\n Specific human-readable unique identifier, such as a hostname, for the system having\n the problem. Examples:\n\n "prod05.theseus.acme-widgets.com"\n "171.26.23.22"\n "aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003"\n "9c09acd49a25"\n\n severity (str):\n How impacted the affected system is. Displayed to users in lists and influences the\n priority of any created incidents. Must be one of {info, warning, error, critical}\n\n Keyword Args:\n event_action (str):\n There are three types of events that PagerDuty recognizes, and are used to represent\n different types of activity in your monitored systems. (default: 'trigger')\n\n * trigger: When PagerDuty receives a trigger event, it will either open a new alert,\n or add a new trigger log entry to an existing alert, depending on the\n provided dedup_key. Your monitoring tools should send PagerDuty a trigger\n when a new problem has been detected. You may send additional triggers\n when a previously detected problem has occurred again.\n\n * acknowledge: acknowledge events cause the referenced incident to enter the\n acknowledged state. While an incident is acknowledged, it won't\n generate any additional notifications, even if it receives new\n trigger events. Your monitoring tools should send PagerDuty an\n acknowledge event when they know someone is presently working on the\n problem.\n\n * resolve: resolve events cause the referenced incident to enter the resolved state.\n Once an incident is resolved, it won't generate any additional\n notifications. New trigger events with the same dedup_key as a resolved\n incident won't re-open the incident. Instead, a new incident will be\n created. Your monitoring tools should send PagerDuty a resolve event when\n the problem that caused the initial trigger event has been fixed.\n\n dedup_key (str):\n Deduplication key for correlating triggers and resolves. The maximum permitted\n length of this property is 255 characters.\n\n timestamp (str):\n Timestamp (ISO 8601). When the upstream system detected / created the event. This is\n useful if a system batches or holds events before sending them to PagerDuty. This\n will be auto-generated by PagerDuty if not provided. Example:\n\n 2015-07-17T08:42:58.315+0000\n\n component (str):\n The part or component of the affected system that is broken. Examples:\n\n "keepalive"\n "webping"\n "mysql"\n "wqueue"\n\n group (str):\n A cluster or grouping of sources. For example, sources "prod-datapipe-02" and\n "prod-datapipe-03" might both be part of "prod-datapipe". Examples:\n\n "prod-datapipe"\n "www"\n "web_stack"\n\n event_class (str):\n The class/type of the event. Examples:\n\n "High CPU"\n "Latency"\n "500 Error"\n\n custom_details (Dict[str, str]):\n Additional details about the event and affected system. Example:\n\n {"ping time": "1500ms", "load avg": 0.75 }\n """\n data = {\n "routing_key": self.routing_key,\n "event_action": event_action,\n "payload": {"summary": summary, "source": source, "severity": severity},\n }\n\n if dedup_key is not None:\n data["dedup_key"] = dedup_key\n\n payload: Dict[str, object] = cast(Dict[str, object], data["payload"])\n\n if timestamp is not None:\n payload["timestamp"] = timestamp\n\n if component is not None:\n payload["component"] = component\n\n if group is not None:\n payload["group"] = group\n\n if event_class is not None:\n payload["class"] = event_class\n\n if custom_details is not None:\n payload["custom_details"] = custom_details\n\n return pypd.EventV2.create(data=data)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=infer_schema_from_config_class(PagerDutyService),\n description="""This resource is for posting events to PagerDuty.""",\n)\n@suppress_dagster_warnings\ndef pagerduty_resource(context) -> PagerDutyService:\n """A resource for posting events (alerts) to PagerDuty.\n\n Example:\n .. code-block:: python\n\n @op\n def pagerduty_op(pagerduty: PagerDutyService):\n pagerduty.EventV2_create(\n summary='alert from dagster'\n source='localhost',\n severity='error',\n event_action='trigger',\n )\n\n @job(resource_defs={ 'pagerduty': pagerduty_resource })\n def pagerduty_test():\n pagerduty_op()\n\n pagerduty_test.execute_in_process(\n run_config={\n "resources": {\n 'pagerduty': {'config': {'routing_key': '0123456789abcdef0123456789abcdef'}}\n }\n }\n )\n """\n return PagerDutyService(**context.resource_config)
\n
", "current_page_name": "_modules/dagster_pagerduty/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pagerduty.resources"}}, "dagster_pandas": {"constraints": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandas.constraints

\nimport sys\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom functools import wraps\n\nimport pandas as pd\nfrom dagster import (\n    DagsterType,\n    TypeCheck,\n    _check as check,\n)\nfrom dagster._annotations import experimental\nfrom pandas import DataFrame\nfrom typing_extensions import Final\n\nCONSTRAINT_METADATA_KEY: Final = "constraint_metadata"\n\n\nclass ConstraintViolationException(Exception):\n    """Indicates that a constraint has been violated."""\n\n\nclass ConstraintWithMetadataException(Exception):\n    """This class defines the response generated when a pandas DF fails validation -- it can be used to generate either a\n    failed typecheck or an exception.\n\n    Args:\n        constraint_name (str):  the name of the violated constraint\n        constraint_description (Optional[str]): the description of the violated constraint\n        expectation (Optional[Union[dict,list, str, set]]): what result was expected -- typically a jsonlike, though it can be a string\n        offending (Optional[Union[dict,list, str, set]]):  which pieces of the dataframe violated the expectation, typically list or string\n        actual (Optional[Union[dict,list, str, set]]): what those pieces of the dataframe actually were -- typically a jsonlike\n    """\n\n    def __init__(\n        self,\n        constraint_name,\n        constraint_description="",\n        expectation=None,\n        offending=None,\n        actual=None,\n    ):\n        self.constraint_name = constraint_name\n        self.constraint_description = constraint_description\n        self.expectation = check.opt_inst_param(expectation, "expectation", (dict, list, str, set))\n        self.offending = check.opt_inst_param(offending, "offending", (dict, list, str, set))\n        self.actual = check.opt_inst_param(actual, "actual", (dict, list, str, set))\n        super(ConstraintWithMetadataException, self).__init__(\n            "Violated {} - {}, {} was/were expected, but we received {} which was/were {}".format(\n                constraint_name,\n                constraint_description,\n                expectation,\n                offending,\n                actual,\n            )\n        )\n\n    def normalize_metadata_json_value(self, val):\n        if isinstance(val, set):\n            return list(val)\n        else:\n            return val\n\n    def convert_to_metadata(self):\n        return {\n            CONSTRAINT_METADATA_KEY: {\n                "constraint_name": self.constraint_name,\n                "constraint_description": self.constraint_description,\n                "expected": self.normalize_metadata_json_value(self.expectation),\n                "offending": self.normalize_metadata_json_value(self.offending),\n                "actual": self.normalize_metadata_json_value(self.actual),\n            },\n        }\n\n    def return_as_typecheck(self):\n        return TypeCheck(\n            success=False, description=self.args[0], metadata=self.convert_to_metadata()\n        )\n\n\nclass DataFrameConstraintViolationException(ConstraintViolationException):\n    """Indicates a dataframe level constraint has been violated."""\n\n    def __init__(self, constraint_name, constraint_description):\n        super(DataFrameConstraintViolationException, self).__init__(\n            f"Violated {constraint_name} - {constraint_description}"\n        )\n\n\nclass DataFrameWithMetadataException(ConstraintWithMetadataException):\n    def __init__(self, constraint_name, constraint_description, expectation, actual):\n        super(DataFrameWithMetadataException, self).__init__(\n            constraint_name, constraint_description, expectation, "a malformed dataframe", actual\n        )\n\n\nclass ColumnConstraintViolationException(ConstraintViolationException):\n    """Indicates that a column constraint has been violated."""\n\n    def __init__(self, constraint_name, constraint_description, column_name, offending_rows=None):\n        self.constraint_name = constraint_name\n        self.constraint_description = constraint_description\n        self.column_name = column_name\n        self.offending_rows = offending_rows\n        super(ColumnConstraintViolationException, self).__init__(self.construct_message())\n\n    def construct_message(self):\n        base_message = (\n            'Violated "{constraint_name}" for column "{column_name}" - {constraint_description}'\n            .format(\n                constraint_name=self.constraint_name,\n                constraint_description=self.constraint_description,\n                column_name=self.column_name,\n            )\n        )\n        if self.offending_rows is not None:\n            base_message += "The offending (index, row values) are the following: {}".format(\n                self.offending_rows\n            )\n        return base_message\n\n\nclass ColumnWithMetadataException(ConstraintWithMetadataException):\n    def __init__(self, constraint_name, constraint_description, expectation, offending, actual):\n        super(ColumnWithMetadataException, self).__init__(\n            "the column constraint " + constraint_name,\n            constraint_description,\n            expectation,\n            offending,\n            actual,\n        )\n\n\nclass Constraint:\n    """Base constraint object that all constraints inherit from.\n\n    Args:\n        error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.\n        markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.\n    """\n\n    def __init__(self, error_description=None, markdown_description=None):\n        self.name = self.__class__.__name__\n        self.markdown_description = check.str_param(markdown_description, "markdown_description")\n        self.error_description = check.str_param(error_description, "error_description")\n\n\n@experimental\nclass ConstraintWithMetadata:\n    """This class defines a base constraint over pandas DFs with organized metadata.\n\n    Args:\n        description (str): description of the constraint\n        validation_fn (Callable[[DataFrame], Tuple[bool, dict[str, Union[dict,list, str, set]]]]:\n                    the validation function to run over inputted data\n                    This function should return a tuple of a boolean for success or failure, and a dict containing\n                    metadata about the test -- this metadata will be passed to the resulting exception if validation\n                    fails.\n        resulting_exception (ConstraintWithMetadataException):  what response a failed typecheck should induce\n        raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n                    (if set to False) when validation fails\n        name (Optional[str]): what to call the constraint, defaults to the class name.\n    """\n\n    # TODO:  validation_fn returning metadata is sorta broken.  maybe have it yield typecheck events and grab metadata?\n\n    def __init__(\n        self, description, validation_fn, resulting_exception, raise_or_typecheck=True, name=None\n    ):\n        if name is None:\n            self.name = self.__class__.__name__\n        else:\n            self.name = name\n        self.description = description\n        # should return a tuple of (bool, and either an empty dict or a dict of extra params)\n        self.validation_fn = validation_fn\n        self.resulting_exception = resulting_exception\n        self.raise_or_typecheck = raise_or_typecheck\n\n    def validate(self, data, *args, **kwargs):\n        res = self.validation_fn(data, *args, **kwargs)\n        if not res[0]:\n            exc = self.resulting_exception(\n                constraint_name=self.name, constraint_description=self.description, **res[1]\n            )\n\n            if self.raise_or_typecheck:\n                raise exc\n            else:\n                return exc.return_as_typecheck()\n\n        else:\n            if res[0]:\n                return TypeCheck(success=True)\n\n    # TODO:  composition of validations\n    def as_dagster_type(self, *args, **kwargs):\n        if self.raise_or_typecheck:\n            raise Exception(\n                "Dagster types can only be constructed from constraints that return typechecks"\n            )\n        return DagsterType(\n            name=self.name,\n            description=f"A Pandas DataFrame with the following validation: {self.description}",\n            type_check_fn=lambda x: self.validate(x, *args),\n            **kwargs,\n        )\n\n\nclass MultiConstraintWithMetadata(ConstraintWithMetadata):\n    """Use this class if you have multiple constraints to check over the entire dataframe.\n\n    Args:\n        description (str): description of the constraint\n        validation_fn_arr(List[Callable[[DataFrame], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n                    a list of the validation functions to run over inputted data\n                    Each function should return a tuple of a boolean for success or failure, and a dict containing\n                    metadata about the test -- this metadata will be passed to the resulting exception if validation\n                    fails.\n        resulting_exception (ConstraintWithMetadataException):  what response a failed typecheck should induce\n        raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n                    (if set to False) when validation fails\n        name (Optional[str]): what to call the constraint, defaults to the class name.\n    """\n\n    def __init__(\n        self,\n        description,\n        validation_fn_arr,\n        resulting_exception,\n        raise_or_typecheck=True,\n        name=None,\n    ):\n        validation_fn_arr = check.list_param(validation_fn_arr, "validation_fn_arr")\n\n        def validation_fn(data, *args, **kwargs):\n            results = [f(data, *args, **kwargs) for f in validation_fn_arr]\n            truthparam = all(item[0] for item in results)\n            metadict = defaultdict(dict)\n            for i, dicta in enumerate(item[1] for item in results):\n                if len(dicta.keys()) > 0:\n                    for key in dicta:\n                        metadict[key][validation_fn_arr[i].__name__] = dicta[key]\n            return (truthparam, metadict)\n\n        super(MultiConstraintWithMetadata, self).__init__(\n            description,\n            validation_fn,\n            resulting_exception,\n            raise_or_typecheck=raise_or_typecheck,\n            name=name,\n        )\n\n\nclass StrictColumnsWithMetadata(ConstraintWithMetadata):\n    def __init__(self, column_list, enforce_ordering=False, raise_or_typecheck=True, name=None):\n        self.enforce_ordering = check.bool_param(enforce_ordering, "enforce_ordering")\n        self.column_list = check.list_param(column_list, "strict_column_list", of_type=str)\n\n        def validation_fcn(inframe):\n            if list(inframe.columns) == column_list:\n                return (True, {})\n            else:\n                if self.enforce_ordering:\n                    resdict = {"expectation": self.column_list, "actual": list(inframe.columns)}\n                    return (False, resdict)\n                else:\n                    if set(inframe.columns) == set(column_list):\n                        return (True, {})\n                    else:\n                        extra = [x for x in inframe.columns if x not in set(column_list)]\n                        missing = [x for x in set(column_list) if x not in inframe.columns]\n                        resdict = {\n                            "expectation": self.column_list,\n                            "actual": {"extra_columns": extra, "missing_columns": missing},\n                        }\n                        return (False, resdict)\n\n        basestr = f"ensuring that the right columns, {self.column_list} were present"\n        if enforce_ordering:\n            basestr += " in the right order"\n        super(StrictColumnsWithMetadata, self).__init__(\n            basestr,\n            validation_fcn,\n            DataFrameWithMetadataException,\n            raise_or_typecheck=raise_or_typecheck,\n            name=name,\n        )\n\n\nclass DataFrameConstraint(Constraint):\n    """Base constraint object that represent Dataframe shape constraints.\n\n    Args:\n        error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.\n        markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.\n    """\n\n    def __init__(self, error_description=None, markdown_description=None):\n        super(DataFrameConstraint, self).__init__(\n            error_description=error_description, markdown_description=markdown_description\n        )\n\n    def validate(self, dataframe):\n        raise NotImplementedError()\n\n\n
[docs]class StrictColumnsConstraint(DataFrameConstraint):\n """A dataframe constraint that validates column existence and ordering.\n\n Args:\n strict_column_list (List[str]): The exact list of columns that your dataframe must have.\n enforce_ordering (Optional[bool]): If true, will enforce that the ordering of column names must match.\n Default is False.\n """\n\n def __init__(self, strict_column_list, enforce_ordering=False):\n self.enforce_ordering = check.bool_param(enforce_ordering, "enforce_ordering")\n self.strict_column_list = check.list_param(\n strict_column_list, "strict_column_list", of_type=str\n )\n description = f"No columns outside of {self.strict_column_list} allowed. "\n if enforce_ordering:\n description += "Columns must be in that order."\n super(StrictColumnsConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe):\n check.inst_param(dataframe, "dataframe", DataFrame)\n columns_received = list(dataframe.columns)\n if self.enforce_ordering:\n if self.strict_column_list != columns_received:\n raise DataFrameConstraintViolationException(\n constraint_name=self.name,\n constraint_description=(\n "Expected the following ordering of columns {expected}. Received:"\n " {received}".format(\n expected=self.strict_column_list, received=columns_received\n )\n ),\n )\n for column in columns_received:\n if column not in self.strict_column_list:\n raise DataFrameConstraintViolationException(\n constraint_name=self.name,\n constraint_description="Expected {}. Recevied {}.".format(\n self.strict_column_list, columns_received\n ),\n )
\n\n\n
[docs]class RowCountConstraint(DataFrameConstraint):\n """A dataframe constraint that validates the expected count of rows.\n\n Args:\n num_allowed_rows (int): The number of allowed rows in your dataframe.\n error_tolerance (Optional[int]): The acceptable threshold if you are not completely certain. Defaults to 0.\n """\n\n def __init__(self, num_allowed_rows, error_tolerance=0):\n self.num_allowed_rows = check.int_param(num_allowed_rows, "num_allowed_rows")\n self.error_tolerance = abs(check.int_param(error_tolerance, "error_tolerance"))\n if self.error_tolerance > self.num_allowed_rows:\n raise ValueError("Tolerance can't be greater than the number of rows you expect.")\n description = f"Dataframe must have {self.num_allowed_rows} +- {self.error_tolerance} rows."\n super(RowCountConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe):\n check.inst_param(dataframe, "dataframe", DataFrame)\n\n if not (\n self.num_allowed_rows - self.error_tolerance\n <= len(dataframe)\n <= self.num_allowed_rows + self.error_tolerance\n ):\n raise DataFrameConstraintViolationException(\n constraint_name=self.name,\n constraint_description=(\n "Expected {expected} +- {tolerance} rows. Got {received}".format(\n expected=self.num_allowed_rows,\n tolerance=self.error_tolerance,\n received=len(dataframe),\n )\n ),\n )
\n\n\ndef apply_ignore_missing_data_to_mask(mask, column):\n return mask & ~column.isnull()\n\n\nclass ColumnAggregateConstraintWithMetadata(ConstraintWithMetadata):\n """Similar to the base class, but now your validation functions should take in columns (pd.Series) not Dataframes.\n\n Args:\n description (str): description of the constraint\n validation_fn (Callable[[pd.Series], Tuple[bool, dict[str, Union[dict,list, str, set]]]]:\n the validation function to run over inputted data\n This function should return a tuple of a boolean for success or failure, and a dict containing\n metadata about the test -- this metadata will be passed to the resulting exception if validation\n fails.\n resulting_exception (ConstraintWithMetadataException): what response a failed typecheck should induce\n raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n (if set to False) when validation fails\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def validate(self, data, *columns, **kwargs):\n if len(columns) == 0:\n columns = data.columns\n columns = [column for column in columns if column in data.columns]\n relevant_data = data[list(columns)]\n\n offending_columns = set()\n offending_values = {}\n for column in columns:\n # TODO: grab extra metadata\n res = self.validation_fn(relevant_data[column])\n if not res[0]:\n offending_columns.add(column)\n if res[1].get("actual") is not None:\n offending_values[column] = [x.item() for x in res[1].get("actual").to_numpy()]\n else:\n offending_values[column] = [x.item() for x in relevant_data[column].to_numpy()]\n if len(offending_columns) == 0 and not self.raise_or_typecheck:\n return TypeCheck(success=True)\n elif len(offending_columns) > 0:\n metadict = {\n "expectation": self.description.replace("Confirms", ""),\n "actual": offending_values,\n "offending": offending_columns,\n }\n exc = self.resulting_exception(\n constraint_name=self.name, constraint_description=self.description, **metadict\n )\n\n if self.raise_or_typecheck:\n raise exc\n else:\n return exc.return_as_typecheck()\n\n\nclass ColumnConstraintWithMetadata(ConstraintWithMetadata):\n """This class is useful for constructing single constraints that you want to apply to multiple\n columns of your dataframe.\n\n The main difference from the base class in terms of construction is that now, your validation_fns should operate on\n individual values.\n\n Args:\n description (str): description of the constraint\n validation_fn (Callable[[Any], Tuple[bool, dict[str, Union[dict,list, str, set]]]]:\n the validation function to run over inputted data\n This function should return a tuple of a boolean for success or failure, and a dict containing\n metadata about the test -- this metadata will be passed to the resulting exception if validation\n fails.\n resulting_exception (ConstraintWithMetadataException): what response a failed typecheck should induce\n raise_or_typecheck (Optional[bool]): whether to raise an exception (if set to True) or emit a failed typecheck event\n (if set to False) when validation fails\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def validate(self, data, *columns, **kwargs):\n if len(columns) == 0:\n columns = data.columns\n\n columns = [column for column in columns if column in data.columns]\n relevant_data = data[list(columns)]\n offending = {}\n offending_values = {}\n # TODO: grab metadata from here\n inverse_validation = lambda x: not self.validation_fn(x)[0]\n for column in columns:\n results = relevant_data[relevant_data[column].apply(inverse_validation)]\n if len(results.index.tolist()) > 0:\n offending[column] = ["row " + str(i) for i in (results.index.tolist())]\n offending_values[column] = results[column].tolist()\n if len(offending) == 0:\n if not self.raise_or_typecheck:\n return TypeCheck(success=True)\n else:\n metadict = {\n "expectation": self.validation_fn.__doc__,\n "actual": offending_values,\n "offending": offending,\n }\n exc = self.resulting_exception(\n constraint_name=self.name, constraint_description=self.description, **metadict\n )\n\n if self.raise_or_typecheck:\n raise exc\n else:\n return exc.return_as_typecheck()\n\n\nclass MultiColumnConstraintWithMetadata(ColumnConstraintWithMetadata):\n """This class is useful for constructing more complicated relationships between columns\n and expectations -- i.e. you want some validations on column A, others on column B, etc.\n This lets you package up the metadata neatly, and also allows for cases like 'fail if any one of\n these constraints fails but still run all of them'.\n\n Args:\n description (str): description of the overall set of validations\n fn_and_columns_dict (Dict[str, List[Callable[[Any], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n while this is a relatively complex type,\n what it amounts to is 'a dict mapping columns to the functions to\n run on them'\n resulting_exception (type): the response to generate if validation fails. Subclass of\n ConstraintWithMetadataException\n raise_or_typecheck (Optional[bool]): whether to raise an exception (true) or a failed typecheck (false)\n type_for_internal (Optional[type]): what type to use for internal validators. Subclass of\n ConstraintWithMetadata\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def __init__(\n self,\n description,\n fn_and_columns_dict,\n resulting_exception,\n raise_or_typecheck=True,\n type_for_internal=ColumnConstraintWithMetadata,\n name=None,\n ):\n # TODO: support multiple descriptions\n self.column_to_fn_dict = check.dict_param(\n fn_and_columns_dict, "fn_and_columns_dict", key_type=str\n )\n\n def validation_fn(data, *args, **kwargs):\n metadict = defaultdict(dict)\n truthparam = True\n for column, fn_arr in self.column_to_fn_dict.items():\n if column not in data.columns:\n continue\n for fn in fn_arr:\n # TODO: do this more effectively\n new_validator = type_for_internal(\n fn.__doc__, fn, ColumnWithMetadataException, raise_or_typecheck=False\n )\n result = new_validator.validate(\n DataFrame(data[column]), column, *args, **kwargs\n )\n result_val = result.success\n if result_val:\n continue\n result_dict = result.metadata[CONSTRAINT_METADATA_KEY].data\n truthparam = truthparam and result_val\n for key in result_dict.keys():\n if "constraint" not in key:\n if key == "expected":\n new_key = "expectation"\n result_dict[key] = result_dict[key].replace("returns", "").strip()\n if column not in metadict[new_key] or new_key not in metadict:\n metadict[new_key][column] = dict()\n metadict[new_key][column][fn.__name__] = result_dict[key]\n else:\n if column not in metadict[key] or key not in metadict:\n metadict[key][column] = dict()\n if isinstance(result_dict[key], dict):\n metadict[key][column][fn.__name__] = result_dict[key][column]\n else:\n metadict[key][column][fn.__name__] = "a violation"\n return truthparam, metadict\n\n super(MultiColumnConstraintWithMetadata, self).__init__(\n description,\n validation_fn,\n resulting_exception,\n raise_or_typecheck=raise_or_typecheck,\n name=name,\n )\n\n def validate(self, data, *args, **kwargs):\n return ConstraintWithMetadata.validate(self, data, *args, **kwargs)\n\n\nclass MultiAggregateConstraintWithMetadata(MultiColumnConstraintWithMetadata):\n """This class is similar to multicolumn, but takes in functions that operate on the whole column at once\n rather than ones that operate on each value --\n consider this similar to the difference between apply-map and apply aggregate.\n\n Args:\n description (str): description of the overall set of validations (TODO: support multiple descriptions)\n fn_and_columns_dict (Dict[str, List[Callable[[pd.Series], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n while this is a relatively complex type,\n what it amounts to is a dict mapping columns to the functions to\n run on them'\n resulting_exception (type): the response to generate if validation fails. Subclass of\n ConstraintWithMetadataException\n raise_or_typecheck (Optional[bool]): whether to raise an exception (true) or a failed typecheck (false)\n type_for_internal (Optional[type]): what type to use for internal validators. Subclass of\n ConstraintWithMetadata\n name (Optional[str]): what to call the constraint, defaults to the class name.\n """\n\n def __init__(\n self,\n description,\n fn_and_columns_dict,\n resulting_exception,\n raise_or_typecheck=True,\n name=None,\n ):\n super(MultiAggregateConstraintWithMetadata, self).__init__(\n description,\n fn_and_columns_dict,\n resulting_exception,\n raise_or_typecheck=raise_or_typecheck,\n type_for_internal=ColumnAggregateConstraintWithMetadata,\n name=name,\n )\n\n\ndef non_null_validation(x):\n """Validates that a particular value in a column is not null.\n\n Usage:\n pass this as a column validator to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n Generally, you should prefer to use nonnull as a decorator/wrapper rather than using this\n directly.\n """\n return not pd.isnull(x), {}\n\n\ndef all_unique_validator(column, ignore_missing_vals=False):\n """Validates that all values in an iterable are unique.\n\n Returns duplicated values as metadata.\n\n Usage:\n As a validation function for a\n :py:class:'~dagster_pandas.constraints.ColumnAggregateConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiAggregateConstraintWithMetadata'\n Example:\n .. code-block:: python\n aggregate_validator = MultiAggregateConstraintWithMetadata(\n "confirms all values are unique",\n {'bar': [all_unique_validator]},\n ConstraintWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_aggregate_validator=aggregate_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 2, 3], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'bar': {'all_unique_validator': 'a violation'}}\n metadata['actual'] == {'bar': {'all_unique_validator': [10.0]}}\n """\n column = pd.Series(column)\n duplicated = column.duplicated()\n if ignore_missing_vals:\n duplicated = apply_ignore_missing_data_to_mask(duplicated, column)\n return not duplicated.any(), {"actual": column[duplicated]}\n\n\ndef nonnull(func):\n """Decorator for column validation functions to make them error on nulls.\n\n Usage:\n pass decorated functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n Args:\n func (Callable[[Any], Tuple[bool, dict[str, Union[dict,list, str, set]]]]]):\n the column validator you want to error on nulls.\n """\n\n @wraps(func)\n def nvalidator(val):\n origval = func(val)\n nval = non_null_validation(val)\n return origval[0] and nval[0], {}\n\n nvalidator.__doc__ += " and ensures no values are null"\n\n return nvalidator\n\n\ndef column_range_validation_factory(minim=None, maxim=None, ignore_missing_vals=False):\n """Factory for validators testing if column values are within a range.\n\n Args:\n minim(Optional[Comparable]): the low end of the range\n maxim(Optional[Comparable]): the high end of the range\n ignore_missing_vals(Optional[bool]): whether to ignore nulls.\n\n Returns: a validation function for this constraint\n Usage:\n pass returned functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n Examples:\n .. code-block:: python\n in_range_validator = column_range_validation_factory(1, 3, ignore_missing_vals=True)\n column_validator = MultiColumnConstraintWithMetadata(\n "confirms values are numbers in a range",\n {'foo': [in_range_validator]},\n ColumnWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_validator=column_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 2, 7], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'foo': {'in_range_validation_fn': ['row 2']}}\n metadata['actual'] == {'foo': {'in_range_validation_fn': [7]}}\n\n """\n if minim is None:\n if isinstance(maxim, datetime):\n minim = datetime.min\n else:\n minim = -1 * (sys.maxsize - 1)\n if maxim is None:\n if isinstance(minim, datetime):\n maxim = datetime.max\n else:\n maxim = sys.maxsize\n\n def in_range_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return (isinstance(x, (type(minim), type(maxim)))) and (x <= maxim) and (x >= minim), {}\n\n in_range_validation_fn.__doc__ = f"checks whether values are between {minim} and {maxim}"\n if ignore_missing_vals:\n in_range_validation_fn.__doc__ += ", ignoring nulls"\n\n return in_range_validation_fn\n\n\ndef categorical_column_validator_factory(categories, ignore_missing_vals=False):\n """Factory for validators testing if all values are in some set.\n\n Args:\n categories(Union[Sequence, set]): the set of allowed values\n ignore_missing_vals(Optional[bool]): whether to ignore nulls.\n\n Returns: a validation function for this constraint\n\n Usage:\n pass returned functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n\n Example:\n .. code-block:: python\n categorical_validation_fn = categorical_column_validator_factory([1, 2])\n column_validator = MultiColumnConstraintWithMetadata(\n "confirms values are numbers in a range",\n {'foo': [categorical_validation_fn]},\n ColumnWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_validator=column_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 2, 7], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'foo': {'categorical_validation_fn': ['row 2']}}\n metadata['actual'] == {'foo': {'categorical_validation_fn': [7]}}\n\n """\n categories = set(categories)\n\n def categorical_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return (x in categories), {}\n\n categorical_validation_fn.__doc__ = (\n f"checks whether values are within this set of values: {categories}"\n )\n if ignore_missing_vals:\n categorical_validation_fn.__doc__ += ", ignoring nulls"\n\n return categorical_validation_fn\n\n\ndef dtype_in_set_validation_factory(datatypes, ignore_missing_vals=False):\n """Factory for testing if the dtype of a val falls within some allowed set.\n\n Args:\n datatypes(Union[set[type], type]): which datatype/datatypes are allowed\n ignore_missing_vals(Optional[bool]): whether to ignore nulls\n\n Returns: a validation function for this constraint\n\n Usage:\n pass returned functions as column validators to\n :py:class:'~dagster_pandas.constraints.ColumnConstraintWithMetadata'\n or :py:class:'~dagster_pandas.constraints.MultiColumnConstraintWithMetadata'\n\n Examples:\n .. code-block:: python\n dtype_is_num_validator = dtype_in_set_validation_factory((int, float, int64, float64))\n column_validator = MultiColumnConstraintWithMetadata(\n "confirms values are numbers in a range",\n {'foo': [dtype_is_num_validator]},\n ColumnWithMetadataException,\n raise_or_typecheck=False,\n )\n ntype = create_structured_dataframe_type(\n "NumericType",\n columns_validator=column_validator\n )\n @op(out={'basic_dataframe': Out(dagster_type=ntype)})\n def create_dataframe(_):\n yield Output(\n DataFrame({'foo': [1, 'a', 7], 'bar': [9, 10, 10]}),\n output_name='basic_dataframe',\n )\n #will fail with\n metadata['offending'] == {'foo': {'categorical_validation_fn': ['row 1']}}\n metadata['actual'] == {'foo': {'categorical_validation_fn': ['a']}}\n\n """\n\n def dtype_in_set_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return isinstance(x, datatypes), {}\n\n dtype_in_set_validation_fn.__doc__ = f"checks whether values are this type/types: {datatypes}"\n if ignore_missing_vals:\n dtype_in_set_validation_fn.__doc__ += ", ignoring nulls"\n\n return dtype_in_set_validation_fn\n\n\nclass ColumnRangeConstraintWithMetadata(ColumnConstraintWithMetadata):\n def __init__(self, minim=None, maxim=None, columns=None, raise_or_typecheck=True):\n self.name = self.__class__.__name__\n\n description = f"Confirms values are between {minim} and {maxim}"\n super(ColumnRangeConstraintWithMetadata, self).__init__(\n description=description,\n validation_fn=column_range_validation_factory(minim=minim, maxim=maxim),\n resulting_exception=ColumnWithMetadataException,\n raise_or_typecheck=raise_or_typecheck,\n )\n self.columns = columns\n\n def validate(self, data, *args, **kwargs):\n if self.columns is None:\n self.columns = list(data.columns)\n self.columns.extend(args)\n return super(ColumnRangeConstraintWithMetadata, self).validate(\n data, *self.columns, **kwargs\n )\n\n\nclass ColumnConstraint(Constraint):\n """Base constraint object that represent dataframe column shape constraints.\n\n Args:\n error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.\n markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.\n """\n\n def __init__(self, error_description=None, markdown_description=None):\n super(ColumnConstraint, self).__init__(\n error_description=error_description, markdown_description=markdown_description\n )\n\n def validate(self, dataframe, column_name):\n pass\n\n @staticmethod\n def get_offending_row_pairs(dataframe, column_name):\n return zip(dataframe.index.tolist(), dataframe[column_name].tolist())\n\n\nclass ColumnDTypeFnConstraint(ColumnConstraint):\n """A column constraint that applies a pandas dtype validation function to a columns dtype.\n\n Args:\n type_fn (Callable[[Set[str]], bool]): This is a function that takes the pandas columns dtypes and\n returns if those dtypes match the types it expects. See pandas.core.dtypes.common for examples.\n """\n\n def __init__(self, type_fn):\n self.type_fn = check.callable_param(type_fn, "type_fn")\n description = f'Dtype must satisfy "{self.type_fn.__name__}"'\n super(ColumnDTypeFnConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n column_dtype = dataframe[column_name].dtype\n if not self.type_fn(column_dtype):\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=f'{self.error_description}, but was "{column_dtype}"',\n column_name=column_name,\n )\n\n\nclass ColumnDTypeInSetConstraint(ColumnConstraint):\n """A column constraint that validates the pandas column dtypes based on the expected set of dtypes.\n\n Args:\n expected_dtype_set (Set[str]): The set of pandas dtypes that the pandas column dtypes must match.\n """\n\n def __init__(self, expected_dtype_set):\n self.expected_dtype_set = check.set_param(expected_dtype_set, "expected_dtype_set")\n description = f"Column dtype must be in the following set {self.expected_dtype_set}."\n super(ColumnDTypeInSetConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n received_dtypes = dataframe[column_name].dtype\n if str(received_dtypes) not in self.expected_dtype_set:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=(\n f"{self.error_description}. DTypes received: {received_dtypes}"\n ),\n column_name=column_name,\n )\n\n\nclass NonNullableColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are not null."""\n\n def __init__(self):\n description = "No Null values allowed."\n super(NonNullableColumnConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n rows_with_null_columns = dataframe[dataframe[column_name].isna()]\n if not rows_with_null_columns.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=self.get_offending_row_pairs(rows_with_null_columns, column_name),\n )\n\n\nclass UniqueColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are unique.\n\n Args:\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, ignore_missing_vals):\n description = "Column must be unique."\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(UniqueColumnConstraint, self).__init__(\n error_description=description, markdown_description=description\n )\n\n def validate(self, dataframe, column_name):\n invalid = dataframe[column_name].duplicated()\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n rows_with_duplicated_values = dataframe[invalid]\n if not rows_with_duplicated_values.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=rows_with_duplicated_values,\n )\n\n\nclass CategoricalColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are a valid category.\n\n Args:\n categories (Set[str]): Set of categories that values in your pandas column must match.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, categories, ignore_missing_vals):\n self.categories = list(check.set_param(categories, "categories", of_type=str))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(CategoricalColumnConstraint, self).__init__(\n error_description=f"Expected Categories are {self.categories}",\n markdown_description=f"Category examples are {self.categories[:5]}...",\n )\n\n def validate(self, dataframe, column_name):\n invalid = ~dataframe[column_name].isin(self.categories)\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n rows_with_unexpected_buckets = dataframe[invalid]\n if not rows_with_unexpected_buckets.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=rows_with_unexpected_buckets,\n )\n\n\nclass MinValueColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are greater than the provided\n lower bound [inclusive].\n\n Args:\n min_value (Union[int, float, datetime.datetime]): The lower bound.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, min_value, ignore_missing_vals):\n self.min_value = check.inst_param(min_value, "min_value", (int, float, datetime))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(MinValueColumnConstraint, self).__init__(\n markdown_description=f"values > {self.min_value}",\n error_description=f"Column must have values > {self.min_value}",\n )\n\n def validate(self, dataframe, column_name):\n invalid = dataframe[column_name] < self.min_value\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n out_of_bounds_rows = dataframe[invalid]\n if not out_of_bounds_rows.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=out_of_bounds_rows,\n )\n\n\nclass MaxValueColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are less than the provided\n upper bound [inclusive].\n\n Args:\n max_value (Union[int, float, datetime.datetime]): The upper bound.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.\n """\n\n def __init__(self, max_value, ignore_missing_vals):\n self.max_value = check.inst_param(max_value, "max_value", (int, float, datetime))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(MaxValueColumnConstraint, self).__init__(\n markdown_description=f"values < {self.max_value}",\n error_description=f"Column must have values < {self.max_value}",\n )\n\n def validate(self, dataframe, column_name):\n invalid = dataframe[column_name] > self.max_value\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n out_of_bounds_rows = dataframe[invalid]\n if not out_of_bounds_rows.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=out_of_bounds_rows,\n )\n\n\nclass InRangeColumnConstraint(ColumnConstraint):\n """A column constraint that ensures all values in a pandas column are between the lower and upper\n bound [inclusive].\n\n Args:\n min_value (Union[int, float, datetime.datetime]): The lower bound.\n max_value (Union[int, float, datetime.datetime]): The upper bound.\n ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non\n missing values.\n """\n\n def __init__(self, min_value, max_value, ignore_missing_vals):\n self.min_value = check.inst_param(min_value, "min_value", (int, float, datetime))\n self.max_value = check.inst_param(max_value, "max_value", (int, float, datetime))\n self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n super(InRangeColumnConstraint, self).__init__(\n markdown_description=f"{self.min_value} < values < {self.max_value}",\n error_description="Column must have values between {} and {} inclusive.".format(\n self.min_value, self.max_value\n ),\n )\n\n def validate(self, dataframe, column_name):\n invalid = ~dataframe[column_name].between(self.min_value, self.max_value)\n if self.ignore_missing_vals:\n invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])\n out_of_bounds_rows = dataframe[invalid]\n if not out_of_bounds_rows.empty:\n raise ColumnConstraintViolationException(\n constraint_name=self.name,\n constraint_description=self.error_description,\n column_name=column_name,\n offending_rows=out_of_bounds_rows,\n )\n
", "current_page_name": "_modules/dagster_pandas/constraints", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandas.constraints"}, "data_frame": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandas.data_frame

\nimport pandas as pd\nfrom dagster import (\n    DagsterInvariantViolationError,\n    DagsterType,\n    Field,\n    MetadataValue,\n    StringSource,\n    TableColumn,\n    TableSchema,\n    TableSchemaMetadataValue,\n    TypeCheck,\n    _check as check,\n    dagster_type_loader,\n)\nfrom dagster._annotations import experimental\nfrom dagster._config import Selector\nfrom dagster._core.definitions.metadata import normalize_metadata\nfrom dagster._utils import dict_without_keys\n\nfrom dagster_pandas.constraints import (\n    CONSTRAINT_METADATA_KEY,\n    ColumnDTypeFnConstraint,\n    ColumnDTypeInSetConstraint,\n    ConstraintViolationException,\n)\nfrom dagster_pandas.validation import PandasColumn, validate_constraints\n\nCONSTRAINT_BLACKLIST = {ColumnDTypeFnConstraint, ColumnDTypeInSetConstraint}\n\n\n@dagster_type_loader(\n    Selector(\n        {\n            "csv": {\n                "path": StringSource,\n                "sep": Field(StringSource, is_required=False, default_value=","),\n            },\n            "parquet": {"path": StringSource},\n            "table": {"path": StringSource},\n            "pickle": {"path": StringSource},\n        },\n    )\n)\ndef dataframe_loader(_context, config):\n    file_type, file_options = next(iter(config.items()))\n\n    if file_type == "csv":\n        path = file_options["path"]\n        return pd.read_csv(path, **dict_without_keys(file_options, "path"))\n    elif file_type == "parquet":\n        return pd.read_parquet(file_options["path"])\n    elif file_type == "table":\n        return pd.read_csv(file_options["path"], sep="\\t")\n    elif file_type == "pickle":\n        return pd.read_pickle(file_options["path"])\n    else:\n        raise DagsterInvariantViolationError(f"Unsupported file_type {file_type}")\n\n\ndef df_type_check(_, value):\n    if not isinstance(value, pd.DataFrame):\n        return TypeCheck(success=False)\n    return TypeCheck(\n        success=True,\n        metadata={\n            "row_count": str(len(value)),\n            # string cast columns since they may be things like datetime\n            "metadata": {"columns": list(map(str, value.columns))},\n        },\n    )\n\n\nDataFrame = DagsterType(\n    name="PandasDataFrame",\n    description="""Two-dimensional size-mutable, potentially heterogeneous\n    tabular data structure with labeled axes (rows and columns).\n    See http://pandas.pydata.org/""",\n    loader=dataframe_loader,\n    type_check_fn=df_type_check,\n    typing_type=pd.DataFrame,\n)\n\n\ndef _construct_constraint_list(constraints):\n    def add_bullet(constraint_list, constraint_description):\n        return constraint_list + f"+ {constraint_description}\\n"\n\n    constraint_list = ""\n    for constraint in constraints:\n        if constraint.__class__ not in CONSTRAINT_BLACKLIST:\n            constraint_list = add_bullet(constraint_list, constraint.markdown_description)\n    return constraint_list\n\n\ndef _build_column_header(column_name, constraints):\n    header = f"**{column_name}**"\n    for constraint in constraints:\n        if isinstance(constraint, ColumnDTypeInSetConstraint):\n            dtypes_tuple = tuple(constraint.expected_dtype_set)\n            return header + f": `{dtypes_tuple if len(dtypes_tuple) > 1 else dtypes_tuple[0]}`"\n        elif isinstance(constraint, ColumnDTypeFnConstraint):\n            return header + f": Validator `{constraint.type_fn.__name__}`"\n    return header\n\n\ndef create_dagster_pandas_dataframe_description(description, columns):\n    title = "\\n".join([description, "### Columns", ""])\n    buildme = title\n    for column in columns:\n        buildme += "{}\\n{}\\n".format(\n            _build_column_header(column.name, column.constraints),\n            _construct_constraint_list(column.constraints),\n        )\n    return buildme\n\n\ndef create_table_schema_metadata_from_dataframe(\n    pandas_df: pd.DataFrame,\n) -> TableSchemaMetadataValue:\n    """This function takes a pandas DataFrame and returns its metadata as a Dagster TableSchema.\n\n    Args:\n        pandas_df (pandas.DataFrame): A pandas DataFrame for which to create metadata.\n\n    Returns:\n        TableSchemaMetadataValue: returns an object with the TableSchema for the DataFrame.\n    """\n    check.inst(pandas_df, pd.DataFrame, "Input must be a pandas DataFrame object")\n    return MetadataValue.table_schema(\n        TableSchema(\n            columns=[\n                TableColumn(name=str(name), type=str(dtype))\n                for name, dtype in pandas_df.dtypes.items()\n            ]\n        )\n    )\n\n\n
[docs]def create_dagster_pandas_dataframe_type(\n name,\n description=None,\n columns=None,\n metadata_fn=None,\n dataframe_constraints=None,\n loader=None,\n):\n """Constructs a custom pandas dataframe dagster type.\n\n Args:\n name (str): Name of the dagster pandas type.\n description (Optional[str]): A markdown-formatted string, displayed in tooling.\n columns (Optional[List[PandasColumn]]): A list of :py:class:`~dagster.PandasColumn` objects\n which express dataframe column schemas and constraints.\n metadata_fn (Optional[Callable[[], Union[Dict[str, Union[str, float, int, Dict, MetadataValue]])\n A callable which takes your dataframe and returns a dict with string label keys and\n MetadataValue values.\n dataframe_constraints (Optional[List[DataFrameConstraint]]): A list of objects that inherit from\n :py:class:`~dagster.DataFrameConstraint`. This allows you to express dataframe-level constraints.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader`. If None, we will default\n to using `dataframe_loader`.\n """\n # We allow for the plugging in of a dagster_type_loader so that users can load their custom\n # dataframes via configuration their own way if the default configs don't suffice. This is\n # purely optional.\n check.str_param(name, "name")\n metadata_fn = check.opt_callable_param(metadata_fn, "metadata_fn")\n description = create_dagster_pandas_dataframe_description(\n check.opt_str_param(description, "description", default=""),\n check.opt_list_param(columns, "columns", of_type=PandasColumn),\n )\n\n def _dagster_type_check(_, value):\n if not isinstance(value, pd.DataFrame):\n return TypeCheck(\n success=False,\n description=(\n f"Must be a pandas.DataFrame. Got value of type. {type(value).__name__}"\n ),\n )\n\n try:\n validate_constraints(\n value,\n pandas_columns=columns,\n dataframe_constraints=dataframe_constraints,\n )\n except ConstraintViolationException as e:\n return TypeCheck(success=False, description=str(e))\n\n return TypeCheck(\n success=True,\n metadata=_execute_summary_stats(name, value, metadata_fn) if metadata_fn else None,\n )\n\n return DagsterType(\n name=name,\n type_check_fn=_dagster_type_check,\n loader=loader if loader else dataframe_loader,\n description=description,\n typing_type=pd.DataFrame,\n )
\n\n\n@experimental\ndef create_structured_dataframe_type(\n name,\n description=None,\n columns_validator=None,\n columns_aggregate_validator=None,\n dataframe_validator=None,\n loader=None,\n):\n """Args:\n name (str): the name of the new type\n description (Optional[str]): the description of the new type\n columns_validator (Optional[Union[ColumnConstraintWithMetadata, MultiColumnConstraintWithMetadata]]):\n what column-level row by row validation you want to have applied.\n Leave empty for no column-level row by row validation.\n columns_aggregate_validator (Optional[Union[ColumnAggregateConstraintWithMetadata,\n MultiAggregateConstraintWithMetadata]]):\n what column-level aggregate validation you want to have applied,\n Leave empty for no column-level aggregate validation.\n dataframe_validator (Optional[Union[ConstraintWithMetadata, MultiConstraintWithMetadata]]):\n what dataframe-wide validation you want to have applied.\n Leave empty for no dataframe-wide validation.\n loader (Optional[DagsterTypeLoader]): An instance of a class that\n inherits from :py:class:`~dagster.DagsterTypeLoader`. If None, we will default\n to using `dataframe_loader`.\n\n Returns:\n a DagsterType with the corresponding name and packaged validation.\n\n """\n\n def _dagster_type_check(_, value):\n if not isinstance(value, pd.DataFrame):\n return TypeCheck(\n success=False,\n description=(\n f"Must be a pandas.DataFrame. Got value of type. {type(value).__name__}"\n ),\n )\n individual_result_dict = {}\n\n if dataframe_validator is not None:\n individual_result_dict["dataframe"] = dataframe_validator.validate(value)\n if columns_validator is not None:\n individual_result_dict["columns"] = columns_validator.validate(value)\n\n if columns_aggregate_validator is not None:\n individual_result_dict["column-aggregates"] = columns_aggregate_validator.validate(\n value\n )\n\n typechecks_succeeded = True\n metadata = {}\n overall_description = "Failed Constraints: {}"\n constraint_clauses = []\n for key, result in individual_result_dict.items():\n result_val = result.success\n if result_val:\n continue\n typechecks_succeeded = typechecks_succeeded and result_val\n result_dict = result.metadata[CONSTRAINT_METADATA_KEY].data\n metadata[f"{key}-constraint-metadata"] = MetadataValue.json(result_dict)\n constraint_clauses.append(f"{key} failing constraints, {result.description}")\n # returns aggregates, then column, then dataframe\n return TypeCheck(\n success=typechecks_succeeded,\n description=overall_description.format(constraint_clauses),\n metadata=metadata,\n )\n\n description = check.opt_str_param(description, "description", default="")\n return DagsterType(\n name=name,\n type_check_fn=_dagster_type_check,\n loader=loader if loader else dataframe_loader,\n description=description,\n )\n\n\ndef _execute_summary_stats(type_name, value, metadata_fn):\n if not metadata_fn:\n return []\n\n user_metadata = metadata_fn(value)\n try:\n return normalize_metadata(user_metadata)\n except:\n raise DagsterInvariantViolationError(\n "The return value of the user-defined summary_statistics function for pandas "\n f"data frame type {type_name} returned {value}. This function must return "\n "Dict[str, RawMetadataValue]."\n )\n
", "current_page_name": "_modules/dagster_pandas/data_frame", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandas.data_frame"}, "validation": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pandas.validation

\nfrom dagster import (\n    DagsterInvariantViolationError,\n    _check as check,\n)\nfrom pandas import DataFrame, Timestamp\nfrom pandas.core.dtypes.common import (\n    is_bool_dtype,\n    is_float_dtype,\n    is_integer_dtype,\n    is_numeric_dtype,\n    is_string_dtype,\n)\n\nfrom dagster_pandas.constraints import (\n    CategoricalColumnConstraint,\n    ColumnDTypeFnConstraint,\n    ColumnDTypeInSetConstraint,\n    Constraint,\n    ConstraintViolationException,\n    DataFrameConstraint,\n    InRangeColumnConstraint,\n    NonNullableColumnConstraint,\n    UniqueColumnConstraint,\n)\n\nPANDAS_NUMERIC_TYPES = {"int64", "float"}\n\n\ndef _construct_keyword_constraints(non_nullable, unique, ignore_missing_vals):\n    non_nullable = check.bool_param(non_nullable, "exists")\n    unique = check.bool_param(unique, "unique")\n    ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")\n    if non_nullable and ignore_missing_vals:\n        raise DagsterInvariantViolationError(\n            "PandasColumn cannot have a non-null constraint while also ignore missing values"\n        )\n    constraints = []\n    if non_nullable:\n        constraints.append(NonNullableColumnConstraint())\n    if unique:\n        constraints.append(UniqueColumnConstraint(ignore_missing_vals=ignore_missing_vals))\n    return constraints\n\n\n
[docs]class PandasColumn:\n """The main API for expressing column level schemas and constraints for your custom dataframe\n types.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If th column exists, the validate function will validate the column. Defaults to True.\n constraints (Optional[List[Constraint]]): List of constraint objects that indicate the\n validation rules for the pandas column.\n """\n\n def __init__(self, name, constraints=None, is_required=None):\n self.name = check.str_param(name, "name")\n self.is_required = check.opt_bool_param(is_required, "is_required", default=True)\n self.constraints = check.opt_list_param(constraints, "constraints", of_type=Constraint)\n\n def validate(self, dataframe):\n if self.name not in dataframe.columns:\n # Ignore validation if column is missing from dataframe and is not required\n if self.is_required:\n raise ConstraintViolationException(\n f"Required column {self.name} not in dataframe with columns {dataframe.columns}"\n )\n else:\n for constraint in self.constraints:\n constraint.validate(dataframe, self.name)\n\n @staticmethod\n def exists(name, non_nullable=False, unique=False, ignore_missing_vals=False, is_required=None):\n """Simple constructor for PandasColumns that expresses existence constraints.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=_construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def boolean_column(\n name, non_nullable=False, unique=False, ignore_missing_vals=False, is_required=None\n ):\n """Simple constructor for PandasColumns that expresses boolean constraints on boolean dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[ColumnDTypeFnConstraint(is_bool_dtype)]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def numeric_column(\n name,\n min_value=-float("inf"),\n max_value=float("inf"),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses numeric constraints numeric dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_value (Optional[Union[int,float]]): The lower bound for values you expect in this column. Defaults to -float('inf')\n max_value (Optional[Union[int,float]]): The upper bound for values you expect in this column. Defaults to float('inf')\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeFnConstraint(is_numeric_dtype),\n InRangeColumnConstraint(\n check.numeric_param(min_value, "min_value"),\n check.numeric_param(max_value, "max_value"),\n ignore_missing_vals=ignore_missing_vals,\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def integer_column(\n name,\n min_value=-float("inf"),\n max_value=float("inf"),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses numeric constraints on integer dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_value (Optional[Union[int,float]]): The lower bound for values you expect in this column. Defaults to -float('inf')\n max_value (Optional[Union[int,float]]): The upper bound for values you expect in this column. Defaults to float('inf')\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeFnConstraint(is_integer_dtype),\n InRangeColumnConstraint(\n check.numeric_param(min_value, "min_value"),\n check.numeric_param(max_value, "max_value"),\n ignore_missing_vals=ignore_missing_vals,\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def float_column(\n name,\n min_value=-float("inf"),\n max_value=float("inf"),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses numeric constraints on float dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_value (Optional[Union[int,float]]): The lower bound for values you expect in this column. Defaults to -float('inf')\n max_value (Optional[Union[int,float]]): The upper bound for values you expect in this column. Defaults to float('inf')\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeFnConstraint(is_float_dtype),\n InRangeColumnConstraint(\n check.numeric_param(min_value, "min_value"),\n check.numeric_param(max_value, "max_value"),\n ignore_missing_vals=ignore_missing_vals,\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def datetime_column(\n name,\n min_datetime=Timestamp.min,\n max_datetime=Timestamp.max,\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n tz=None,\n ):\n """Simple constructor for PandasColumns that expresses datetime constraints on 'datetime64[ns]' dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n min_datetime (Optional[Union[int,float]]): The lower bound for values you expect in this column.\n Defaults to pandas.Timestamp.min.\n max_datetime (Optional[Union[int,float]]): The upper bound for values you expect in this column.\n Defaults to pandas.Timestamp.max.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n tz (Optional[str]): Required timezone for values eg: tz='UTC', tz='Europe/Dublin', tz='US/Eastern'.\n Defaults to None, meaning naive datetime values.\n """\n if tz is None:\n datetime_constraint = ColumnDTypeInSetConstraint({"datetime64[ns]"})\n else:\n datetime_constraint = ColumnDTypeInSetConstraint({f"datetime64[ns, {tz}]"})\n # One day more/less than absolute min/max to prevent OutOfBoundsDatetime errors when converting min/max to be tz aware\n if min_datetime.tz_localize(None) == Timestamp.min:\n min_datetime = Timestamp("1677-09-22 00:12:43.145225Z")\n if max_datetime.tz_localize(None) == Timestamp.max:\n max_datetime = Timestamp("2262-04-10 23:47:16.854775807Z")\n # Convert bounds to same tz\n if Timestamp(min_datetime).tz is None:\n min_datetime = Timestamp(min_datetime).tz_localize(tz)\n if Timestamp(max_datetime).tz is None:\n max_datetime = Timestamp(max_datetime).tz_localize(tz)\n\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n datetime_constraint,\n InRangeColumnConstraint(\n min_datetime, max_datetime, ignore_missing_vals=ignore_missing_vals\n ),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def string_column(\n name, non_nullable=False, unique=False, ignore_missing_vals=False, is_required=None\n ):\n """Simple constructor for PandasColumns that expresses constraints on string dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in the column\n ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the constraint will\n only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[ColumnDTypeFnConstraint(is_string_dtype)]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )\n\n @staticmethod\n def categorical_column(\n name,\n categories,\n of_types=frozenset({"category", "object"}),\n non_nullable=False,\n unique=False,\n ignore_missing_vals=False,\n is_required=None,\n ):\n """Simple constructor for PandasColumns that expresses categorical constraints on specified dtypes.\n\n Args:\n name (str): Name of the column. This must match up with the column name in the dataframe you\n expect to receive.\n categories (List[Any]): The valid set of buckets that all values in the column must match.\n of_types (Optional[Union[str, Set[str]]]): The expected dtype[s] that your categories and values must\n abide by.\n non_nullable (Optional[bool]): If true, this column will enforce a constraint that all values in\n the column ought to be non null values.\n unique (Optional[bool]): If true, this column will enforce a uniqueness constraint on the column values.\n ignore_missing_vals (Optional[bool]): A flag that is passed into most constraints. If true, the\n constraint will only evaluate non-null data. Ignore_missing_vals and non_nullable cannot both be True.\n is_required (Optional[bool]): Flag indicating the optional/required presence of the column.\n If the column exists the validate function will validate the column. Default to True.\n """\n of_types = {of_types} if isinstance(of_types, str) else of_types\n return PandasColumn(\n name=check.str_param(name, "name"),\n constraints=[\n ColumnDTypeInSetConstraint(of_types),\n CategoricalColumnConstraint(categories, ignore_missing_vals=ignore_missing_vals),\n ]\n + _construct_keyword_constraints(\n non_nullable=non_nullable, unique=unique, ignore_missing_vals=ignore_missing_vals\n ),\n is_required=is_required,\n )
\n\n\ndef validate_constraints(dataframe, pandas_columns=None, dataframe_constraints=None):\n dataframe = check.inst_param(dataframe, "dataframe", DataFrame)\n pandas_columns = check.opt_list_param(\n pandas_columns, "column_constraints", of_type=PandasColumn\n )\n dataframe_constraints = check.opt_list_param(\n dataframe_constraints, "dataframe_constraints", of_type=DataFrameConstraint\n )\n\n if pandas_columns:\n for column in pandas_columns:\n column.validate(dataframe)\n\n if dataframe_constraints:\n for dataframe_constraint in dataframe_constraints:\n dataframe_constraint.validate(dataframe)\n
", "current_page_name": "_modules/dagster_pandas/validation", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pandas.validation"}}, "dagster_postgres": {"event_log": {"event_log": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_postgres.event_log.event_log

\nfrom typing import Any, ContextManager, Mapping, Optional, Sequence\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.event_api import EventHandlerFn\nfrom dagster._core.events import ASSET_CHECK_EVENTS, ASSET_EVENTS\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.config import pg_config\nfrom dagster._core.storage.event_log import (\n    AssetKeyTable,\n    DynamicPartitionsTable,\n    SqlEventLogStorage,\n    SqlEventLogStorageMetadata,\n    SqlEventLogStorageTable,\n)\nfrom dagster._core.storage.event_log.base import EventLogCursor\nfrom dagster._core.storage.event_log.migration import ASSET_KEY_INDEX_COLS\nfrom dagster._core.storage.event_log.polling_event_watcher import SqlPollingEventWatcher\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._core.storage.sqlalchemy_compat import db_select\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, deserialize_value\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_pg_connection,\n    pg_alembic_config,\n    pg_statement_timeout,\n    pg_url_from_config,\n    retry_pg_connection_fn,\n    retry_pg_creation_fn,\n)\n\nCHANNEL_NAME = "run_events"\n\n\n
[docs]class PostgresEventLogStorage(SqlEventLogStorage, ConfigurableClass):\n """Postgres-backed event log storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To use Postgres for all of the components of your instance storage, you can add the following\n block to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :lines: 1-8\n :language: YAML\n\n If you are configuring the different storage components separately and are specifically\n configuring your event log storage to use Postgres, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml\n :caption: dagster.yaml\n :lines: 12-21\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n\n """\n\n def __init__(\n self,\n postgres_url: str,\n should_autocreate_tables: bool = True,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.postgres_url = check.str_param(postgres_url, "postgres_url")\n self.should_autocreate_tables = check.bool_param(\n should_autocreate_tables, "should_autocreate_tables"\n )\n\n self._disposed = False\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n\n self._event_watcher = SqlPollingEventWatcher(self)\n\n self._secondary_index_cache = {}\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if self.should_autocreate_tables:\n table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())\n if "event_logs" not in table_names:\n retry_pg_creation_fn(self._init_db)\n self.reindex_events()\n self.reindex_assets()\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self._connect() as conn:\n with conn.begin():\n SqlEventLogStorageMetadata.create_all(conn)\n stamp_alembic_rev(pg_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection and set statement_timeout\n existing_options = self._engine.url.query.get("options")\n timeout_option = pg_statement_timeout(statement_timeout)\n if existing_options:\n options = f"{timeout_option} {existing_options}"\n else:\n options = timeout_option\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n pool_size=1,\n connect_args={"options": options},\n pool_recycle=pool_recycle,\n )\n\n def upgrade(self) -> None:\n alembic_config = pg_alembic_config(__file__)\n with self._connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return pg_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, Any]\n ) -> "PostgresEventLogStorage":\n return PostgresEventLogStorage(\n inst_data=inst_data,\n postgres_url=pg_url_from_config(config_value),\n should_autocreate_tables=config_value.get("should_autocreate_tables", True),\n )\n\n @staticmethod\n def create_clean_storage(\n conn_string: str, should_autocreate_tables: bool = True\n ) -> "PostgresEventLogStorage":\n engine = create_engine(\n conn_string, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n try:\n SqlEventLogStorageMetadata.drop_all(engine)\n finally:\n engine.dispose()\n\n return PostgresEventLogStorage(conn_string, should_autocreate_tables)\n\n def store_event(self, event: EventLogEntry) -> None:\n """Store an event corresponding to a run.\n\n Args:\n event (EventLogEntry): The event to store.\n """\n check.inst_param(event, "event", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event) # from SqlEventLogStorage.py\n with self._connect() as conn:\n result = conn.execute(\n insert_event_statement.returning(\n SqlEventLogStorageTable.c.run_id, SqlEventLogStorageTable.c.id\n )\n )\n res = result.fetchone()\n result.close()\n\n # LISTEN/NOTIFY no longer used for pg event watch - preserved here to support version skew\n conn.execute(\n db.text(f"""NOTIFY {CHANNEL_NAME}, :notify_id; """),\n {"notify_id": res[0] + "_" + str(res[1])}, # type: ignore\n )\n event_id = int(res[1]) # type: ignore\n\n if (\n event.is_dagster_event\n and event.dagster_event_type in ASSET_EVENTS\n and event.dagster_event.asset_key # type: ignore\n ):\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n "Cannot store asset event tags for null event id."\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, event_id)\n\n def store_asset_event(self, event: EventLogEntry, event_id: int) -> None:\n check.inst_param(event, "event", EventLogEntry)\n if not (event.dagster_event and event.dagster_event.asset_key):\n return\n\n # We switched to storing the entire event record of the last materialization instead of just\n # the AssetMaterialization object, so that we have access to metadata like timestamp,\n # job, run_id, etc.\n #\n # This should make certain asset queries way more performant, without having to do extra\n # queries against the event log.\n #\n # This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`\n # to `last_materialization_event`, for clarity. For now, we should do some back-compat.\n #\n # https://github.com/dagster-io/dagster/issues/3945\n\n # The AssetKeyTable contains a `last_materialization_timestamp` column that is exclusively\n # used to determine if an asset exists (last materialization timestamp > wipe timestamp).\n # This column is used nowhere else, and as of AssetObservation/AssetMaterializationPlanned\n # event creation, we want to extend this functionality to ensure that assets with any event\n # (observation, materialization, or materialization planned) yielded with timestamp\n # > wipe timestamp display in the Dagster UI.\n\n # As of the following PRs, we update last_materialization_timestamp to store the timestamp\n # of the latest asset observation, materialization, or materialization_planned that has occurred.\n # https://github.com/dagster-io/dagster/pull/6885\n # https://github.com/dagster-io/dagster/pull/7319\n\n # The AssetKeyTable also contains a `last_run_id` column that is updated upon asset\n # materialization. This column was not being used until the below PR. This new change\n # writes to the column upon `ASSET_MATERIALIZATION_PLANNED` events to fetch the last\n # run id for a set of assets in one roundtrip call to event log storage.\n # https://github.com/dagster-io/dagster/pull/7319\n\n values = self._get_asset_entry_values(\n event, event_id, self.has_secondary_index(ASSET_KEY_INDEX_COLS)\n )\n with self.index_connection() as conn:\n query = db_dialects.postgresql.insert(AssetKeyTable).values(\n asset_key=event.dagster_event.asset_key.to_string(),\n **values,\n )\n if values:\n query = query.on_conflict_do_update(\n index_elements=[AssetKeyTable.c.asset_key],\n set_=dict(**values),\n )\n else:\n query = query.on_conflict_do_nothing()\n conn.execute(query)\n\n def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n if not partition_keys:\n return\n\n # Overload base implementation to push upsert logic down into the db layer\n self._check_partitions_table()\n with self.index_connection() as conn:\n conn.execute(\n db_dialects.postgresql.insert(DynamicPartitionsTable)\n .values(\n [\n dict(partitions_def_name=partitions_def_name, partition=partition_key)\n for partition_key in partition_keys\n ]\n )\n .on_conflict_do_nothing(),\n )\n\n def _connect(self) -> ContextManager[Connection]:\n return create_pg_connection(self._engine)\n\n def run_connection(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return self._connect()\n\n def index_connection(self) -> ContextManager[Connection]:\n return self._connect()\n\n def has_table(self, table_name: str) -> bool:\n return bool(self._engine.dialect.has_table(self._engine.connect(), table_name))\n\n def has_secondary_index(self, name: str) -> bool:\n if name not in self._secondary_index_cache:\n self._secondary_index_cache[name] = super(\n PostgresEventLogStorage, self\n ).has_secondary_index(name)\n return self._secondary_index_cache[name]\n\n def enable_secondary_index(self, name: str) -> None:\n super(PostgresEventLogStorage, self).enable_secondary_index(name)\n if name in self._secondary_index_cache:\n del self._secondary_index_cache[name]\n\n def watch(\n self,\n run_id: str,\n cursor: Optional[str],\n callback: EventHandlerFn,\n ) -> None:\n if cursor and EventLogCursor.parse(cursor).is_offset_cursor():\n check.failed("Cannot call `watch` with an offset cursor")\n\n self._event_watcher.watch_run(run_id, cursor, callback)\n\n def _gen_event_log_entry_from_cursor(self, cursor) -> EventLogEntry:\n with self._engine.connect() as conn:\n cursor_res = conn.execute(\n db_select([SqlEventLogStorageTable.c.event]).where(\n SqlEventLogStorageTable.c.id == cursor\n ),\n )\n return deserialize_value(cursor_res.scalar(), EventLogEntry) # type: ignore\n\n def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:\n self._event_watcher.unwatch_run(run_id, handler)\n\n def __del__(self) -> None:\n # Keep the inherent limitations of __del__ in Python in mind!\n self.dispose()\n\n def dispose(self) -> None:\n if not self._disposed:\n self._disposed = True\n self._event_watcher.close()\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = pg_alembic_config(__file__)\n with self._connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_postgres/event_log/event_log", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_postgres.event_log.event_log"}}, "run_storage": {"run_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_postgres.run_storage.run_storage

\nimport zlib\nfrom typing import ContextManager, Mapping, Optional\n\nimport dagster._check as check\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.storage.config import PostgresStorageConfig, pg_config\nfrom dagster._core.storage.runs import (\n    DaemonHeartbeatsTable,\n    InstanceInfo,\n    RunStorageSqlMetadata,\n    SqlRunStorage,\n)\nfrom dagster._core.storage.runs.schema import KeyValueStoreTable, SnapshotsTable\nfrom dagster._core.storage.runs.sql_run_storage import SnapshotType\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._daemon.types import DaemonHeartbeat\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom dagster._utils import utc_datetime_from_timestamp\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_pg_connection,\n    pg_alembic_config,\n    pg_statement_timeout,\n    pg_url_from_config,\n    retry_pg_connection_fn,\n    retry_pg_creation_fn,\n)\n\n\n
[docs]class PostgresRunStorage(SqlRunStorage, ConfigurableClass):\n """Postgres-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To use Postgres for all of the components of your instance storage, you can add the following\n block to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :lines: 1-8\n :language: YAML\n\n If you are configuring the different storage components separately and are specifically\n configuring your run storage to use Postgres, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml\n :caption: dagster.yaml\n :lines: 1-10\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(\n self,\n postgres_url: str,\n should_autocreate_tables: bool = True,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.postgres_url = postgres_url\n self.should_autocreate_tables = check.bool_param(\n should_autocreate_tables, "should_autocreate_tables"\n )\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n poolclass=db_pool.NullPool,\n )\n\n self._index_migration_cache = {}\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if self.should_autocreate_tables:\n table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())\n if "runs" not in table_names:\n retry_pg_creation_fn(self._init_db)\n self.migrate()\n self.optimize()\n elif "instance_info" not in table_names:\n InstanceInfo.create(self._engine)\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n with conn.begin():\n RunStorageSqlMetadata.create_all(conn)\n # This revision may be shared by any other dagster storage classes using the same DB\n stamp_alembic_rev(pg_alembic_config(__file__), conn)\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold 1 open connection and set statement_timeout\n existing_options = self._engine.url.query.get("options")\n timeout_option = pg_statement_timeout(statement_timeout)\n if existing_options:\n options = f"{timeout_option} {existing_options}"\n else:\n options = timeout_option\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n pool_size=1,\n connect_args={"options": options},\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return pg_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: PostgresStorageConfig\n ):\n return PostgresRunStorage(\n inst_data=inst_data,\n postgres_url=pg_url_from_config(config_value),\n should_autocreate_tables=config_value.get("should_autocreate_tables", True),\n )\n\n @staticmethod\n def create_clean_storage(\n postgres_url: str, should_autocreate_tables: bool = True\n ) -> "PostgresRunStorage":\n engine = create_engine(\n postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n try:\n RunStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n return PostgresRunStorage(postgres_url, should_autocreate_tables)\n\n def connect(self) -> ContextManager[Connection]:\n return create_pg_connection(self._engine)\n\n def upgrade(self) -> None:\n with self.connect() as conn:\n run_alembic_upgrade(pg_alembic_config(__file__), conn)\n\n def has_built_index(self, migration_name: str) -> bool:\n if migration_name not in self._index_migration_cache:\n self._index_migration_cache[migration_name] = super(\n PostgresRunStorage, self\n ).has_built_index(migration_name)\n return self._index_migration_cache[migration_name]\n\n def mark_index_built(self, migration_name: str) -> None:\n super(PostgresRunStorage, self).mark_index_built(migration_name)\n if migration_name in self._index_migration_cache:\n del self._index_migration_cache[migration_name]\n\n def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:\n with self.connect() as conn:\n # insert or update if already present, using postgres specific on_conflict\n conn.execute(\n db_dialects.postgresql.insert(DaemonHeartbeatsTable)\n .values(\n timestamp=utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n daemon_type=daemon_heartbeat.daemon_type,\n daemon_id=daemon_heartbeat.daemon_id,\n body=serialize_value(daemon_heartbeat),\n )\n .on_conflict_do_update(\n index_elements=[DaemonHeartbeatsTable.c.daemon_type],\n set_={\n "timestamp": utc_datetime_from_timestamp(daemon_heartbeat.timestamp),\n "daemon_id": daemon_heartbeat.daemon_id,\n "body": serialize_value(daemon_heartbeat),\n },\n )\n .returning(\n # required because sqlalchemy might by default return the declared primary key,\n # which might not exist\n DaemonHeartbeatsTable.c.daemon_type,\n )\n )\n\n def set_cursor_values(self, pairs: Mapping[str, str]) -> None:\n check.mapping_param(pairs, "pairs", key_type=str, value_type=str)\n\n # pg speciic on_conflict_do_update\n insert_stmt = db_dialects.postgresql.insert(KeyValueStoreTable).values(\n [{"key": k, "value": v} for k, v in pairs.items()]\n )\n upsert_stmt = insert_stmt.on_conflict_do_update(\n index_elements=[\n KeyValueStoreTable.c.key,\n ],\n set_={"value": insert_stmt.excluded.value},\n ).returning(\n # required because sqlalchemy might by default return the declared primary key,\n # which might not exist\n KeyValueStoreTable.c.key\n )\n\n with self.connect() as conn:\n conn.execute(upsert_stmt)\n\n def _add_snapshot(self, snapshot_id: str, snapshot_obj, snapshot_type: SnapshotType) -> str:\n with self.connect() as conn:\n snapshot_insert = (\n db_dialects.postgresql.insert(SnapshotsTable)\n .values(\n snapshot_id=snapshot_id,\n snapshot_body=zlib.compress(serialize_value(snapshot_obj).encode("utf-8")),\n snapshot_type=snapshot_type.value,\n )\n .on_conflict_do_nothing()\n )\n conn.execute(snapshot_insert)\n return snapshot_id\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = pg_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_postgres/run_storage/run_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_postgres.run_storage.run_storage"}}, "schedule_storage": {"schedule_storage": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_postgres.schedule_storage.schedule_storage

\nfrom typing import ContextManager, Optional\n\nimport dagster._check as check\nimport pendulum\nimport sqlalchemy as db\nimport sqlalchemy.dialects as db_dialects\nimport sqlalchemy.pool as db_pool\nfrom dagster._config.config_schema import UserConfigSchema\nfrom dagster._core.scheduler.instigation import InstigatorState\nfrom dagster._core.storage.config import PostgresStorageConfig, pg_config\nfrom dagster._core.storage.schedules import ScheduleStorageSqlMetadata, SqlScheduleStorage\nfrom dagster._core.storage.schedules.schema import InstigatorsTable\nfrom dagster._core.storage.sql import (\n    AlembicVersion,\n    check_alembic_revision,\n    create_engine,\n    run_alembic_upgrade,\n    stamp_alembic_rev,\n)\nfrom dagster._serdes import ConfigurableClass, ConfigurableClassData, serialize_value\nfrom sqlalchemy.engine import Connection\n\nfrom ..utils import (\n    create_pg_connection,\n    pg_alembic_config,\n    pg_statement_timeout,\n    pg_url_from_config,\n    retry_pg_connection_fn,\n    retry_pg_creation_fn,\n)\n\n\n
[docs]class PostgresScheduleStorage(SqlScheduleStorage, ConfigurableClass):\n """Postgres-backed run storage.\n\n Users should not directly instantiate this class; it is instantiated by internal machinery when\n ``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in\n ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.\n\n To use Postgres for all of the components of your instance storage, you can add the following\n block to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml\n :caption: dagster.yaml\n :lines: 1-8\n :language: YAML\n\n If you are configuring the different storage components separately and are specifically\n configuring your schedule storage to use Postgres, you can add a block such as the following\n to your ``dagster.yaml``:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg-legacy.yaml\n :caption: dagster.yaml\n :lines: 23-32\n :language: YAML\n\n Note that the fields in this config are :py:class:`~dagster.StringSource` and\n :py:class:`~dagster.IntSource` and can be configured from environment variables.\n """\n\n def __init__(\n self,\n postgres_url: str,\n should_autocreate_tables: bool = True,\n inst_data: Optional[ConfigurableClassData] = None,\n ):\n self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)\n self.postgres_url = postgres_url\n self.should_autocreate_tables = check.bool_param(\n should_autocreate_tables, "should_autocreate_tables"\n )\n\n # Default to not holding any connections open to prevent accumulating connections per DagsterInstance\n self._engine = create_engine(\n self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n\n # Stamp and create tables if the main table does not exist (we can't check alembic\n # revision because alembic config may be shared with other storage classes)\n if self.should_autocreate_tables:\n table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())\n missing_main_table = "schedules" not in table_names and "jobs" not in table_names\n if missing_main_table:\n retry_pg_creation_fn(self._init_db)\n\n super().__init__()\n\n def _init_db(self) -> None:\n with self.connect() as conn:\n with conn.begin():\n ScheduleStorageSqlMetadata.create_all(conn)\n stamp_alembic_rev(pg_alembic_config(__file__), conn)\n\n # mark all the data migrations as applied\n self.migrate()\n self.optimize()\n\n def optimize_for_webserver(self, statement_timeout: int, pool_recycle: int) -> None:\n # When running in dagster-webserver, hold an open connection and set statement_timeout\n existing_options = self._engine.url.query.get("options")\n timeout_option = pg_statement_timeout(statement_timeout)\n if existing_options:\n options = f"{timeout_option} {existing_options}"\n else:\n options = timeout_option\n self._engine = create_engine(\n self.postgres_url,\n isolation_level="AUTOCOMMIT",\n pool_size=1,\n connect_args={"options": options},\n pool_recycle=pool_recycle,\n )\n\n @property\n def inst_data(self) -> Optional[ConfigurableClassData]:\n return self._inst_data\n\n @classmethod\n def config_type(cls) -> UserConfigSchema:\n return pg_config()\n\n @classmethod\n def from_config_value(\n cls, inst_data: Optional[ConfigurableClassData], config_value: PostgresStorageConfig\n ) -> "PostgresScheduleStorage":\n return PostgresScheduleStorage(\n inst_data=inst_data,\n postgres_url=pg_url_from_config(config_value),\n should_autocreate_tables=config_value.get("should_autocreate_tables", True),\n )\n\n @staticmethod\n def create_clean_storage(\n postgres_url: str, should_autocreate_tables: bool = True\n ) -> "PostgresScheduleStorage":\n engine = create_engine(\n postgres_url, isolation_level="AUTOCOMMIT", poolclass=db_pool.NullPool\n )\n try:\n ScheduleStorageSqlMetadata.drop_all(engine)\n finally:\n engine.dispose()\n return PostgresScheduleStorage(postgres_url, should_autocreate_tables)\n\n def connect(self, run_id: Optional[str] = None) -> ContextManager[Connection]:\n return create_pg_connection(self._engine)\n\n def upgrade(self) -> None:\n alembic_config = pg_alembic_config(__file__)\n with self.connect() as conn:\n run_alembic_upgrade(alembic_config, conn)\n\n def _add_or_update_instigators_table(self, conn: Connection, state: InstigatorState) -> None:\n selector_id = state.selector_id\n conn.execute(\n db_dialects.postgresql.insert(InstigatorsTable)\n .values(\n selector_id=selector_id,\n repository_selector_id=state.repository_selector_id,\n status=state.status.value,\n instigator_type=state.instigator_type.value,\n instigator_body=serialize_value(state),\n )\n .on_conflict_do_update(\n index_elements=[InstigatorsTable.c.selector_id],\n set_={\n "status": state.status.value,\n "instigator_type": state.instigator_type.value,\n "instigator_body": serialize_value(state),\n "update_timestamp": pendulum.now("UTC"),\n },\n )\n )\n\n def alembic_version(self) -> AlembicVersion:\n alembic_config = pg_alembic_config(__file__)\n with self.connect() as conn:\n return check_alembic_revision(alembic_config, conn)
\n
", "current_page_name": "_modules/dagster_postgres/schedule_storage/schedule_storage", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_postgres.schedule_storage.schedule_storage"}}}, "dagster_prometheus": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_prometheus.resources

\nimport prometheus_client\nfrom dagster import (\n    ConfigurableResource,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom prometheus_client.exposition import default_handler\nfrom pydantic import Field, PrivateAttr\n\n\n
[docs]class PrometheusClient:\n """Integrates with Prometheus via the prometheus_client library."""
\n\n\n
[docs]class PrometheusResource(ConfigurableResource):\n """This resource is used to send metrics to a Prometheus Pushgateway.\n\n **Example:**\n\n .. code-block:: python\n\n from dagster_prometheus import PrometheusResource\n from dagster import Definitions, job, op\n\n @op\n def example_prometheus_op(prometheus: PrometheusResource):\n prometheus.push_to_gateway(job="my_job")\n\n @job\n def my_job():\n example_prometheus_op()\n\n defs = Definitions(\n jobs=[my_job],\n resources={"prometheus": PrometheusResource(gateway="http://pushgateway.local")},\n )\n\n """\n\n gateway: str = Field(\n description=(\n "The url for your push gateway. Either of the"\n " form 'http://pushgateway.local', or 'pushgateway.local'."\n " Scheme defaults to 'http' if none is provided"\n )\n )\n timeout: int = Field(\n default=30,\n description="is how long delete will attempt to connect before giving up. Defaults to 30s.",\n )\n _registry: prometheus_client.CollectorRegistry = PrivateAttr(default=None)\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def setup_for_execution(self, context: InitResourceContext) -> None:\n self._registry = prometheus_client.CollectorRegistry()\n\n @property\n def registry(self) -> prometheus_client.CollectorRegistry:\n return self._registry\n\n def push_to_gateway(self, job, grouping_key=None, handler=default_handler) -> None:\n """Push metrics to the given pushgateway.\n `job` is the job label to be attached to all pushed metrics\n `grouping_key` please see the pushgateway documentation for details.\n Defaults to None\n `handler` is an optional function which can be provided to perform\n requests to the 'gateway'.\n Defaults to None, in which case an http or https request\n will be carried out by a default handler.\n If not None, the argument must be a function which accepts\n the following arguments:\n url, method, timeout, headers, and content\n May be used to implement additional functionality not\n supported by the built-in default handler (such as SSL\n client certicates, and HTTP authentication mechanisms).\n 'url' is the URL for the request, the 'gateway' argument\n described earlier will form the basis of this URL.\n 'method' is the HTTP method which should be used when\n carrying out the request.\n 'timeout' requests not successfully completed after this\n many seconds should be aborted. If timeout is None, then\n the handler should not set a timeout.\n 'headers' is a list of ("header-name","header-value") tuples\n which must be passed to the pushgateway in the form of HTTP\n request headers.\n The function should raise an exception (e.g. IOError) on\n failure.\n 'content' is the data which should be used to form the HTTP\n Message Body.\n This overwrites all metrics with the same job and grouping_key.\n This uses the PUT HTTP method.\n """\n prometheus_client.push_to_gateway(\n gateway=self.gateway,\n job=job,\n registry=self._registry,\n grouping_key=grouping_key,\n timeout=self.timeout,\n handler=handler,\n )\n\n def pushadd_to_gateway(self, job, grouping_key=None, handler=default_handler) -> None:\n """PushAdd metrics to the given pushgateway.\n `job` is the job label to be attached to all pushed metrics\n `registry` is an instance of CollectorRegistry\n `grouping_key` please see the pushgateway documentation for details.\n Defaults to None\n `handler` is an optional function which can be provided to perform\n requests to the 'gateway'.\n Defaults to None, in which case an http or https request\n will be carried out by a default handler.\n See the 'prometheus_client.push_to_gateway' documentation\n for implementation requirements.\n This replaces metrics with the same name, job and grouping_key.\n This uses the POST HTTP method.\n """\n prometheus_client.pushadd_to_gateway(\n gateway=self.gateway,\n job=job,\n registry=self._registry,\n grouping_key=grouping_key,\n timeout=self.timeout,\n handler=handler,\n )\n\n def delete_from_gateway(self, job, grouping_key=None, handler=default_handler) -> None:\n """Delete metrics from the given pushgateway.\n `job` is the job label to be attached to all pushed metrics\n `grouping_key` please see the pushgateway documentation for details.\n Defaults to None\n `handler` is an optional function which can be provided to perform\n requests to the 'gateway'.\n Defaults to None, in which case an http or https request\n will be carried out by a default handler.\n See the 'prometheus_client.push_to_gateway' documentation\n for implementation requirements.\n This deletes metrics with the given job and grouping_key.\n This uses the DELETE HTTP method.\n """\n prometheus_client.delete_from_gateway(\n gateway=self.gateway,\n job=job,\n grouping_key=grouping_key,\n timeout=self.timeout,\n handler=handler,\n )
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=PrometheusResource.to_config_schema(),\n description="""This resource is for sending metrics to a Prometheus Pushgateway.""",\n)\ndef prometheus_resource(context):\n return PrometheusResource(\n gateway=context.resource_config["gateway"], timeout=context.resource_config["timeout"]\n )
\n
", "current_page_name": "_modules/dagster_prometheus/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_prometheus.resources"}}, "dagster_pyspark": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_pyspark.resources

\nfrom typing import Any, Dict\n\nimport dagster._check as check\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom dagster_spark.configs_spark import spark_config\nfrom dagster_spark.utils import flatten_dict\nfrom pydantic import PrivateAttr\nfrom pyspark.sql import SparkSession\n\n\ndef spark_session_from_config(spark_conf=None):\n    spark_conf = check.opt_dict_param(spark_conf, "spark_conf")\n    builder = SparkSession.builder\n    flat = flatten_dict(spark_conf)\n    for key, value in flat:\n        builder = builder.config(key, value)\n\n    return builder.getOrCreate()\n\n\n
[docs]class PySparkResource(ConfigurableResource):\n """This resource provides access to a PySpark Session for executing PySpark code within Dagster.\n\n Example:\n .. code-block:: python\n\n @op\n def my_op(pyspark: PySparkResource)\n spark_session = pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n\n @job(\n resource_defs={\n "pyspark": PySparkResource(\n spark_config={\n "spark.executor.memory": "2g"\n }\n )\n }\n )\n def my_spark_job():\n my_op()\n """\n\n spark_config: Dict[str, Any]\n _spark_session = PrivateAttr(default=None)\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def setup_for_execution(self, context: InitResourceContext) -> None:\n self._spark_session = spark_session_from_config(self.spark_config)\n\n @property\n def spark_session(self) -> Any:\n return self._spark_session\n\n @property\n def spark_context(self) -> Any:\n return self.spark_session.sparkContext
\n\n\n
[docs]@dagster_maintained_resource\n@resource({"spark_conf": spark_config()})\ndef pyspark_resource(init_context) -> PySparkResource:\n """This resource provides access to a PySpark SparkSession for executing PySpark code within Dagster.\n\n Example:\n .. code-block:: python\n\n @op(required_resource_keys={"pyspark"})\n def my_op(context):\n spark_session = context.resources.pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n my_pyspark_resource = pyspark_resource.configured(\n {"spark_conf": {"spark.executor.memory": "2g"}}\n )\n\n @job(resource_defs={"pyspark": my_pyspark_resource})\n def my_spark_job():\n my_op()\n """\n context_updated_config = init_context.replace_config(\n {"spark_config": init_context.resource_config["spark_conf"]}\n )\n return PySparkResource.from_resource_context(context_updated_config)
\n\n\nclass LazyPySparkResource(ConfigurableResource):\n """This resource provides access to a lazily-created PySpark SparkSession for executing PySpark\n code within Dagster, avoiding the creation of a SparkSession object until the .spark_session attribute\n of the resource is accessed. This is helpful for avoiding the creation (and startup penalty) of a SparkSession\n until it is actually needed / accessed by an op or IOManager.\n\n Example:\n .. code-block:: python\n\n @op\n def my_op(lazy_pyspark: LazyPySparkResource)\n spark_session = lazy_pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n @job(\n resource_defs={\n "lazy_pyspark": LazyPySparkResource(\n spark_config={\n "spark.executor.memory": "2g"\n }\n )\n }\n )\n def my_spark_job():\n my_op()\n """\n\n spark_config: Dict[str, Any]\n _spark_session = PrivateAttr(default=None)\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def _init_session(self) -> None:\n if self._spark_session is None:\n self._spark_session = spark_session_from_config(self.spark_config)\n\n @property\n def spark_session(self) -> Any:\n self._init_session()\n return self._spark_session\n\n @property\n def spark_context(self) -> Any:\n self._init_session()\n return self._spark_session.sparkContext\n\n\n@dagster_maintained_resource\n@resource({"spark_conf": spark_config()})\ndef lazy_pyspark_resource(init_context: InitResourceContext) -> LazyPySparkResource:\n """This resource provides access to a lazily-created PySpark SparkSession for executing PySpark\n code within Dagster, avoiding the creation of a SparkSession object until the .spark_session attribute\n of the resource is accessed. This is helpful for avoiding the creation (and startup penalty) of a SparkSession\n until it is actually needed / accessed by an op or IOManager.\n\n Example:\n .. code-block:: python\n\n @op(required_resource_keys={"lazy_pyspark"})\n def my_op(context):\n spark_session = context.resources.lazy_pyspark.spark_session\n dataframe = spark_session.read.json("examples/src/main/resources/people.json")\n\n my_pyspark_resource = lazy_pyspark_resource.configured(\n {"spark_conf": {"spark.executor.memory": "2g"}}\n )\n\n @job(resource_defs={"lazy_pyspark": my_pyspark_resource})\n def my_spark_job():\n my_op()\n """\n context_updated_config = init_context.replace_config(\n {"spark_config": init_context.resource_config["spark_conf"]}\n )\n return LazyPySparkResource.from_resource_context(context_updated_config)\n
", "current_page_name": "_modules/dagster_pyspark/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_pyspark.resources"}}, "dagster_shell": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_shell.ops

\nimport os\nfrom enum import Enum\nfrom typing import AbstractSet, Any, Dict, Mapping, Optional\n\nfrom dagster import (\n    Config,\n    Failure,\n    In,\n    Nothing,\n    OpExecutionContext,\n    Out,\n    _check as check,\n    op,\n)\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom pydantic import Field\n\nfrom .utils import execute, execute_script_file\n\n\nclass OutputType(Enum):\n    STREAM = "STREAM"\n    """Stream script stdout/stderr."""\n\n    BUFFER = "BUFFER"\n    """Buffer shell script stdout/stderr, then log upon completion."""\n\n    NONE = "NONE"\n    """No logging."""\n\n\nclass ShellOpConfig(Config):\n    env: Optional[Dict[str, str]] = Field(\n        default=None,\n        description="An optional dict of environment variables to pass to the subprocess.",\n    )\n    output_logging: OutputType = Field(\n        OutputType.BUFFER.value,\n    )\n    cwd: Optional[str] = Field(\n        default=None, description="Working directory in which to execute shell script"\n    )\n\n    def to_execute_params(self) -> Dict[str, Any]:\n        return {\n            "env": {**os.environ, **(self.env or {})},\n            "output_logging": self.output_logging.value,\n            "cwd": self.cwd,\n        }\n\n\n
[docs]@op(\n name="shell_op",\n description=(\n "This op executes a shell command it receives as input.\\n\\n"\n "This op is suitable for uses where the command to execute is generated dynamically by "\n "upstream ops. If you know the command to execute at job construction time, "\n "consider `shell_command_op` instead."\n ),\n ins={"shell_command": In(str)},\n out=Out(str),\n)\ndef shell_op(context: OpExecutionContext, shell_command: str, config: ShellOpConfig) -> str:\n """This op executes a shell command it receives as input.\n This op is suitable for uses where the command to execute is generated dynamically by\n upstream ops. If you know the command to execute at job construction time,\n consider ``shell_command_op`` instead.\n\n Args:\n shell_command: The shell command to be executed\n config (ShellOpConfig): A ShellOpConfig object specifying configuration options\n\n Examples:\n .. code-block:: python\n\n @op\n def create_shell_command():\n return "echo hello world!"\n\n @graph\n def echo_graph():\n shell_op(create_shell_command())\n """\n output, return_code = execute(\n shell_command=shell_command, log=context.log, **config.to_execute_params()\n )\n\n if return_code:\n raise Failure(description=f"Shell command execution failed with output: {output}")\n\n return output
\n\n\n
[docs]def create_shell_command_op(\n shell_command: str,\n name: str,\n description: Optional[str] = None,\n required_resource_keys: Optional[AbstractSet[str]] = None,\n tags: Optional[Mapping[str, str]] = None,\n) -> OpDefinition:\n """This function is a factory that constructs ops to execute a shell command.\n\n Note that you can only use ``shell_command_op`` if you know the command you'd like to execute\n at job construction time. If you'd like to construct shell commands dynamically during\n job execution and pass them between ops, you should use ``shell_op`` instead.\n\n The resulting op can take a single ``start`` argument that is a\n `Nothing dependency <https://docs.dagster.io/concepts/ops-jobs-graphs/graphs#defining-nothing-dependencies>`__\n to allow you to run ops before the shell op.\n\n Examples:\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_command_op.py\n :language: python\n\n .. code-block:: python\n\n @op\n def run_before_shell_op():\n do_some_work()\n\n @graph\n def my_graph():\n my_echo_op = create_shell_command_op("echo hello world!", name="echo_op")\n my_echo_op(start=run_before_shell_op())\n\n\n Args:\n shell_command (str): The shell command that the constructed op will execute.\n name (str): The name of the constructed op.\n description (Optional[str]): Human-readable description of this op.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.\n Setting this ensures that resource spin up for the required resources will occur before\n the shell command is executed.\n tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may\n expect and require certain metadata to be attached to a op. Users should generally\n not set metadata directly. Values that are not strings will be json encoded and must meet\n the criteria that `json.loads(json.dumps(value)) == value`.\n\n Raises:\n Failure: Raised when the shell command returns a non-zero exit code.\n\n Returns:\n OpDefinition: Returns the constructed op definition.\n """\n\n @op(\n name=name,\n description=description,\n ins={"start": In(Nothing)},\n out=Out(str),\n required_resource_keys=required_resource_keys,\n tags=tags,\n )\n def _shell_fn(context, config: ShellOpConfig):\n output, return_code = execute(\n shell_command=shell_command, log=context.log, **config.to_execute_params()\n )\n\n if return_code:\n raise Failure(description=f"Shell command execution failed with output: {output}")\n\n return output\n\n return _shell_fn
\n\n\n
[docs]def create_shell_script_op(\n shell_script_path,\n name="create_shell_script_op",\n ins: Optional[Mapping[str, In]] = None,\n **kwargs: Any,\n) -> OpDefinition:\n """This function is a factory which constructs an op that will execute a shell command read\n from a script file.\n\n Any kwargs passed to this function will be passed along to the underlying :func:`@op\n <dagster.op>` decorator. However, note that overriding ``config`` or ``output_defs`` is not\n supported.\n\n You might consider using :func:`@graph <dagster.graph>` to wrap this op\n in the cases where you'd like to configure the shell op with different config fields.\n\n If no ``ins`` are passed then the resulting op can take a single ``start`` argument that is a\n `Nothing dependency <https://docs.dagster.io/concepts/ops-jobs-graphs/graphs#defining-nothing-dependencies>`__\n to allow you to run ops before the shell op.\n\n\n Examples:\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_script_op.py\n :language: python\n\n .. code-block:: python\n\n @op\n def run_before_shell_op():\n do_some_work()\n\n @graph\n def my_graph():\n my_echo_op = create_shell_script_op(file_relative_path(__file__, "hello_world.sh"), name="echo_op")\n my_echo_op(start=run_before_shell_op())\n\n\n Args:\n shell_script_path (str): The script file to execute.\n name (Optional[str]): The name of this op. Defaults to "create_shell_script_op".\n ins (Optional[Mapping[str, In]]): Ins for the op. Defaults to\n a single Nothing input.\n\n Raises:\n Failure: Raised when the shell command returns a non-zero exit code.\n\n Returns:\n OpDefinition: Returns the constructed op definition.\n """\n check.str_param(shell_script_path, "shell_script_path")\n name = check.str_param(name, "name")\n check.opt_mapping_param(ins, "ins", value_type=In)\n\n if "config" in kwargs:\n raise TypeError("Overriding config for shell op is not supported.")\n\n @op(\n name=name,\n description=kwargs.pop("description", "An op to invoke a shell command."),\n ins=ins or {"start": In(Nothing)},\n out=Out(str),\n **kwargs,\n )\n def _shell_script_fn(context, config: ShellOpConfig):\n output, return_code = execute_script_file(\n shell_script_path=shell_script_path, log=context.log, **config.to_execute_params()\n )\n\n if return_code:\n raise Failure(description=f"Shell command execution failed with output: {output}")\n\n return output\n\n return _shell_script_fn
\n
", "current_page_name": "_modules/dagster_shell/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_shell.ops"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_shell.utils

\n#\n# NOTE: This file is based on the bash operator from Apache Airflow, which can be found here:\n# https://github.com/apache/airflow/blob/master/airflow/operators/bash.py\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nimport signal\nfrom logging import Logger\nfrom subprocess import PIPE, STDOUT, Popen\nfrom typing import Mapping, Optional, Tuple\n\nimport dagster._check as check\nfrom dagster._utils import safe_tempfile_path\nfrom typing_extensions import Final\n\nOUTPUT_LOGGING_OPTIONS: Final = ["STREAM", "BUFFER", "NONE"]\n\n\ndef execute_script_file(\n    shell_script_path: str,\n    output_logging: str,\n    log: Logger,\n    cwd: Optional[str] = None,\n    env: Optional[Mapping[str, str]] = None,\n) -> Tuple[str, int]:\n    """Execute a shell script file specified by the argument ``shell_script_path``. The script will be\n    invoked via ``subprocess.Popen(['bash', shell_script_path], ...)``.\n\n    In the Popen invocation, ``stdout=PIPE, stderr=STDOUT`` is used, and the combined stdout/stderr\n    output is retrieved.\n\n    Examples:\n        .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_script_utility.py\n           :language: python\n\n    Args:\n        shell_script_path (str): The shell script to execute.\n        output_logging (str): The logging mode to use. Supports STREAM, BUFFER, and NONE.\n        log (Union[logging.Logger, DagsterLogManager]): Any logger which responds to .info()\n        cwd (str, optional): Working directory for the shell command to use. Defaults to the\n            temporary path where we store the shell command in a script file.\n        env (Dict[str, str], optional): Environment dictionary to pass to ``subprocess.Popen``.\n            Unused by default.\n\n    Raises:\n        Exception: When an invalid output_logging is selected. Unreachable from op-based\n            invocation since the config system will check output_logging against the config\n            enum.\n\n    Returns:\n        Tuple[str, int]: A tuple where the first element is the combined stdout/stderr output of running the shell\n        command and the second element is the return code.\n    """\n    check.str_param(shell_script_path, "shell_script_path")\n    check.str_param(output_logging, "output_logging")\n    check.opt_str_param(cwd, "cwd", default=os.path.dirname(shell_script_path))\n    env = check.opt_nullable_dict_param(env, "env", key_type=str, value_type=str)\n\n    if output_logging not in OUTPUT_LOGGING_OPTIONS:\n        raise Exception("Unrecognized output_logging %s" % output_logging)\n\n    def pre_exec():\n        # Restore default signal disposition and invoke setsid\n        for sig in ("SIGPIPE", "SIGXFZ", "SIGXFSZ"):\n            if hasattr(signal, sig):\n                signal.signal(getattr(signal, sig), signal.SIG_DFL)\n        os.setsid()\n\n    with open(shell_script_path, "rb") as f:\n        shell_command = f.read().decode("utf-8")\n\n    log.info(f"Running command:\\n{shell_command}")\n\n    sub_process = None\n    try:\n        stdout_pipe = PIPE\n        stderr_pipe = STDOUT\n        if output_logging == "NONE":\n            stdout_pipe = stderr_pipe = None\n\n        sub_process = Popen(\n            ["bash", shell_script_path],\n            stdout=stdout_pipe,\n            stderr=stderr_pipe,\n            cwd=cwd,\n            env=env,\n            preexec_fn=pre_exec,  # noqa: PLW1509\n            encoding="UTF-8",\n        )\n\n        log.info(f"Command pid: {sub_process.pid}")\n\n        output = ""\n        if output_logging == "STREAM":\n            assert sub_process.stdout is not None, "Setting stdout=PIPE should always set stdout."\n            # Stream back logs as they are emitted\n            lines = []\n            for line in sub_process.stdout:\n                log.info(line.rstrip())\n                lines.append(line)\n            output = "".join(lines)\n        elif output_logging == "BUFFER":\n            # Collect and buffer all logs, then emit\n            output, _ = sub_process.communicate()\n            log.info(output)\n\n        sub_process.wait()\n        log.info(f"Command exited with return code {sub_process.returncode}")\n\n        return output, sub_process.returncode\n    finally:\n        # Always terminate subprocess, including in cases where the run is terminated\n        if sub_process:\n            sub_process.terminate()\n\n\ndef execute(\n    shell_command: str,\n    output_logging: str,\n    log: Logger,\n    cwd: Optional[str] = None,\n    env: Optional[Mapping[str, str]] = None,\n) -> Tuple[str, int]:\n    """This function is a utility for executing shell commands from within a Dagster op (or from Python in general).\n    It can be used to execute shell commands on either op input data, or any data generated within a generic python op.\n\n    Internally, it executes a shell script specified by the argument ``shell_command``. The script will be written\n    to a temporary file first and invoked via ``subprocess.Popen(['bash', shell_script_path], ...)``.\n\n    In the Popen invocation, ``stdout=PIPE, stderr=STDOUT`` is used, and the combined stdout/stderr\n    output is retrieved.\n\n    Examples:\n        .. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_command_utility.py\n           :language: python\n\n    Args:\n        shell_command (str): The shell command to execute\n        output_logging (str): The logging mode to use. Supports STREAM, BUFFER, and NONE.\n        log (Union[logging.Logger, DagsterLogManager]): Any logger which responds to .info()\n        cwd (str, optional): Working directory for the shell command to use. Defaults to the\n            temporary path where we store the shell command in a script file.\n        env (Dict[str, str], optional): Environment dictionary to pass to ``subprocess.Popen``.\n            Unused by default.\n\n    Returns:\n        Tuple[str, int]: A tuple where the first element is the combined stdout/stderr output of running the shell\n        command and the second element is the return code.\n    """\n    check.str_param(shell_command, "shell_command")\n    # other args checked in execute_file\n\n    with safe_tempfile_path() as tmp_file_path:\n        tmp_path = os.path.dirname(tmp_file_path)\n        log.info("Using temporary directory: %s" % tmp_path)\n\n        with open(tmp_file_path, "wb") as tmp_file:\n            tmp_file.write(shell_command.encode("utf-8"))\n            tmp_file.flush()\n            script_location = os.path.abspath(tmp_file.name)\n            log.info(f"Temporary script location: {script_location}")\n            return execute_script_file(\n                shell_script_path=tmp_file.name,\n                output_logging=output_logging,\n                log=log,\n                cwd=(cwd or tmp_path),\n                env=env,\n            )\n
", "current_page_name": "_modules/dagster_shell/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_shell.utils"}}, "dagster_slack": {"hooks": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_slack.hooks

\nfrom typing import Callable, Optional\n\nfrom dagster._annotations import deprecated_param\nfrom dagster._core.definitions import failure_hook, success_hook\nfrom dagster._core.execution.context.hook import HookContext\nfrom dagster._utils.warnings import normalize_renamed_param\n\n\ndef _default_status_message(context: HookContext, status: str) -> str:\n    return f"Op {context.op.name} on job {context.job_name} {status}!\\nRun ID: {context.run_id}"\n\n\ndef _default_failure_message(context: HookContext) -> str:\n    return _default_status_message(context, status="failed")\n\n\ndef _default_success_message(context: HookContext) -> str:\n    return _default_status_message(context, status="succeeded")\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef slack_on_failure(\n channel: str,\n message_fn: Callable[[HookContext], str] = _default_failure_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step failure events that will message the given Slack channel.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the HookContext\n outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n\n Examples:\n .. code-block:: python\n\n @slack_on_failure("#foo", webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op} failed!"\n\n @op\n def an_op(context):\n pass\n\n @job(...)\n def my_job():\n an_op.with_hooks(hook_defs={slack_on_failure("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @failure_hook(required_resource_keys={"slack"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"\\n<{webserver_base_url}/runs/{context.run_id}|View in Dagster UI>"\n\n context.resources.slack.chat_postMessage(channel=channel, text=text)\n\n return _hook
\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\ndef slack_on_success(\n channel: str,\n message_fn: Callable[[HookContext], str] = _default_success_message,\n dagit_base_url: Optional[str] = None,\n webserver_base_url: Optional[str] = None,\n):\n """Create a hook on step success events that will message the given Slack channel.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n message_fn (Optional(Callable[[HookContext], str])): Function which takes in the HookContext\n outputs the message you want to send.\n dagit_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the specific run that triggered the hook.\n\n Examples:\n .. code-block:: python\n\n @slack_on_success("#foo", webserver_base_url="http://localhost:3000")\n @job(...)\n def my_job():\n pass\n\n .. code-block:: python\n\n def my_message_fn(context: HookContext) -> str:\n return f"Op {context.op} worked!"\n\n @op\n def an_op(context):\n pass\n\n @job(...)\n def my_job():\n an_op.with_hooks(hook_defs={slack_on_success("#foo", my_message_fn)})\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n\n @success_hook(required_resource_keys={"slack"})\n def _hook(context: HookContext):\n text = message_fn(context)\n if webserver_base_url:\n text += f"\\n<{webserver_base_url}/runs/{context.run_id}|View in Dagster UI>"\n\n context.resources.slack.chat_postMessage(channel=channel, text=text)\n\n return _hook
\n
", "current_page_name": "_modules/dagster_slack/hooks", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_slack.hooks"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_slack.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom pydantic import Field\nfrom slack_sdk.web.client import WebClient\n\n\n
[docs]class SlackResource(ConfigurableResource):\n """This resource is for connecting to Slack.\n\n By configuring this Slack resource, you can post messages to Slack from any Dagster op, asset, schedule or sensor.\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import EnvVar, job, op\n from dagster_slack import SlackResource\n\n\n @op\n def slack_op(slack: SlackResource):\n slack.get_client().chat_postMessage(channel='#noise', text=':wave: hey there!')\n\n @job\n def slack_job():\n slack_op()\n\n defs = Definitions(\n jobs=[slack_job],\n resources={\n "slack": SlackResource(token=EnvVar("MY_SLACK_TOKEN")),\n },\n )\n """\n\n token: str = Field(\n description=(\n "To configure access to the Slack API, you'll need an access"\n " token provisioned with access to your Slack workspace."\n " Tokens are typically either user tokens or bot tokens. For programmatic posting"\n " to Slack from this resource, you probably want to provision and use a bot token."\n " More in the Slack API documentation here: https://api.slack.com/docs/token-types"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def get_client(self) -> WebClient:\n """Returns a ``slack_sdk.WebClient`` for interacting with the Slack API."""\n return WebClient(self.token)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=SlackResource.to_config_schema(),\n)\ndef slack_resource(context) -> WebClient:\n """This resource is for connecting to Slack.\n\n The resource object is a `slack_sdk.WebClient`.\n\n By configuring this Slack resource, you can post messages to Slack from any Dagster op, asset, schedule or sensor.\n\n Examples:\n .. code-block:: python\n\n import os\n\n from dagster import job, op\n from dagster_slack import slack_resource\n\n\n @op(required_resource_keys={'slack'})\n def slack_op(context):\n context.resources.slack.chat_postMessage(channel='#noise', text=':wave: hey there!')\n\n @job(resource_defs={'slack': slack_resource})\n def slack_job():\n slack_op()\n\n slack_job.execute_in_process(\n run_config={'resources': {'slack': {'config': {'token': os.getenv('SLACK_TOKEN')}}}}\n )\n """\n return SlackResource.from_resource_context(context).get_client()
\n
", "current_page_name": "_modules/dagster_slack/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_slack.resources"}, "sensors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_slack.sensors

\nfrom typing import (\n    TYPE_CHECKING,\n    Any,\n    Callable,\n    Dict,\n    List,\n    Optional,\n    Sequence,\n    Tuple,\n    TypeVar,\n    Union,\n)\n\nfrom dagster import (\n    AssetSelection,\n    DefaultSensorStatus,\n    FreshnessPolicySensorContext,\n    freshness_policy_sensor,\n)\nfrom dagster._annotations import deprecated_param, experimental\nfrom dagster._core.definitions import GraphDefinition, JobDefinition\nfrom dagster._core.definitions.run_status_sensor_definition import (\n    RunFailureSensorContext,\n    run_failure_sensor,\n)\nfrom dagster._core.definitions.unresolved_asset_job_definition import UnresolvedAssetJobDefinition\nfrom dagster._utils.warnings import normalize_renamed_param\nfrom slack_sdk.web.client import WebClient\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.selector import (\n        CodeLocationSelector,\n        JobSelector,\n        RepositorySelector,\n    )\n\nT = TypeVar("T", RunFailureSensorContext, FreshnessPolicySensorContext)\n\n\ndef _build_slack_blocks_and_text(\n    context: T,\n    text_fn: Callable[[T], str],\n    blocks_fn: Optional[Callable[[T], List[Dict[Any, Any]]]],\n    webserver_base_url: Optional[str],\n) -> Tuple[List[Dict[str, Any]], str]:\n    main_body_text = text_fn(context)\n    blocks: List[Dict[Any, Any]] = []\n    if blocks_fn:\n        blocks.extend(blocks_fn(context))\n    else:\n        if isinstance(context, RunFailureSensorContext):\n            text = (\n                f'*Job "{context.dagster_run.job_name}" failed.'\n                f' `{context.dagster_run.run_id.split("-")[0]}`*'\n            )\n        else:\n            text = (\n                f'*Asset "{context.asset_key.to_user_string()}" is now'\n                f' {"on time" if context.minutes_overdue == 0 else f"{context.minutes_overdue:.2f} minutes late.*"}'\n            )\n\n        blocks.extend(\n            [\n                {\n                    "type": "section",\n                    "text": {\n                        "type": "mrkdwn",\n                        "text": text,\n                    },\n                },\n                {\n                    "type": "section",\n                    "text": {"type": "mrkdwn", "text": main_body_text},\n                },\n            ]\n        )\n\n    if webserver_base_url:\n        if isinstance(context, RunFailureSensorContext):\n            url = f"{webserver_base_url}/runs/{context.dagster_run.run_id}"\n        else:\n            url = f"{webserver_base_url}/assets/{'/'.join(context.asset_key.path)}"\n        blocks.append(\n            {\n                "type": "actions",\n                "elements": [\n                    {\n                        "type": "button",\n                        "text": {"type": "plain_text", "text": "View in Dagster UI"},\n                        "url": url,\n                    }\n                ],\n            }\n        )\n    return blocks, main_body_text\n\n\ndef _default_failure_message_text_fn(context: RunFailureSensorContext) -> str:\n    return f"Error: ```{context.failure_event.message}```"\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\n@deprecated_param(\n param="job_selection",\n breaking_version="2.0",\n additional_warn_text="Use `monitored_jobs` instead.",\n)\ndef make_slack_on_run_failure_sensor(\n channel: str,\n slack_token: str,\n text_fn: Callable[[RunFailureSensorContext], str] = _default_failure_message_text_fn,\n blocks_fn: Optional[Callable[[RunFailureSensorContext], List[Dict[Any, Any]]]] = None,\n name: Optional[str] = None,\n dagit_base_url: Optional[str] = None,\n minimum_interval_seconds: Optional[int] = None,\n monitored_jobs: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n job_selection: Optional[\n Sequence[\n Union[\n JobDefinition,\n GraphDefinition,\n UnresolvedAssetJobDefinition,\n "RepositorySelector",\n "JobSelector",\n "CodeLocationSelector",\n ]\n ]\n ] = None,\n monitor_all_repositories: bool = False,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n webserver_base_url: Optional[str] = None,\n):\n """Create a sensor on job failures that will message the given Slack channel.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n slack_token (str): The slack token.\n Tokens are typically either user tokens or bot tokens. More in the Slack API\n documentation here: https://api.slack.com/docs/token-types\n text_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``RunFailureSensorContext`` and outputs the message you want to send.\n Defaults to a text message that contains error message, job name, and run ID.\n The usage of the `text_fn` changes depending on whether you're using `blocks_fn`. If you\n are using `blocks_fn`, this is used as a fallback string to display in notifications. If\n you aren't, this is the main body text of the message. It can be formatted as plain text,\n or with markdown.\n See more details in https://api.slack.com/methods/chat.postMessage#text_usage\n blocks_fn (Callable[[RunFailureSensorContext], List[Dict]]): Function which takes in\n the ``RunFailureSensorContext`` and outputs the message blocks you want to send.\n See information about Blocks in https://api.slack.com/reference/block-kit/blocks\n name: (Optional[str]): The name of the sensor. Defaults to "slack_on_run_failure".\n dagit_base_url: (Optional[str]): The base url of your Dagit instance. Specify this to allow\n messages to include deeplinks to the failed job run.\n minimum_interval_seconds: (Optional[int]): The minimum number of seconds that will elapse\n between sensor evaluations.\n monitored_jobs (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSensor]]]): The jobs in the\n current repository that will be monitored by this failure sensor. Defaults to None, which\n means the alert will be sent when any job in the repository fails. To monitor jobs in external repositories, use RepositorySelector and JobSelector\n job_selection (Optional[List[Union[JobDefinition, GraphDefinition, RepositorySelector, JobSelector, CodeLocationSensor]]]): (deprecated in favor of monitored_jobs)\n The jobs in the current repository that will be monitored by this failure sensor. Defaults to None, which means the alert will\n be sent when any job in the repository fails.\n monitor_all_repositories (bool): If set to True, the sensor will monitor all runs in the\n Dagster instance. If set to True, an error will be raised if you also specify\n monitored_jobs or job_selection. Defaults to False.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from Dagit or via the GraphQL API.\n webserver_base_url: (Optional[str]): The base url of your webserver instance. Specify this to allow\n messages to include deeplinks to the failed job run.\n\n Examples:\n .. code-block:: python\n\n slack_on_run_failure = make_slack_on_run_failure_sensor(\n "#my_channel",\n os.getenv("MY_SLACK_TOKEN")\n )\n\n @repository\n def my_repo():\n return [my_job + slack_on_run_failure]\n\n .. code-block:: python\n\n def my_message_fn(context: RunFailureSensorContext) -> str:\n return (\n f"Job {context.dagster_run.job_name} failed!"\n f"Error: {context.failure_event.message}"\n )\n\n slack_on_run_failure = make_slack_on_run_failure_sensor(\n channel="#my_channel",\n slack_token=os.getenv("MY_SLACK_TOKEN"),\n text_fn=my_message_fn,\n webserver_base_url="http://mycoolsite.com",\n )\n\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n slack_client = WebClient(token=slack_token)\n jobs = monitored_jobs if monitored_jobs else job_selection\n\n @run_failure_sensor(\n name=name,\n minimum_interval_seconds=minimum_interval_seconds,\n monitored_jobs=jobs,\n monitor_all_repositories=monitor_all_repositories,\n default_status=default_status,\n )\n def slack_on_run_failure(context: RunFailureSensorContext):\n blocks, main_body_text = _build_slack_blocks_and_text(\n context=context,\n text_fn=text_fn,\n blocks_fn=blocks_fn,\n webserver_base_url=webserver_base_url,\n )\n\n slack_client.chat_postMessage(channel=channel, blocks=blocks, text=main_body_text)\n\n return slack_on_run_failure
\n\n\ndef _default_freshness_message_text_fn(context: FreshnessPolicySensorContext) -> str:\n return (\n f"Asset `{context.asset_key.to_user_string()}` is now {context.minutes_overdue:.2f} minutes"\n " late."\n )\n\n\n
[docs]@deprecated_param(\n param="dagit_base_url",\n breaking_version="2.0",\n additional_warn_text="Use `webserver_base_url` instead.",\n)\n@experimental\ndef make_slack_on_freshness_policy_status_change_sensor(\n channel: str,\n slack_token: str,\n asset_selection: AssetSelection,\n warn_after_minutes_overdue: float = 0,\n notify_when_back_on_time: bool = False,\n text_fn: Callable[[FreshnessPolicySensorContext], str] = _default_freshness_message_text_fn,\n blocks_fn: Optional[Callable[[FreshnessPolicySensorContext], List[Dict[Any, Any]]]] = None,\n name: Optional[str] = None,\n dagit_base_url: Optional[str] = None,\n default_status: DefaultSensorStatus = DefaultSensorStatus.STOPPED,\n webserver_base_url: Optional[str] = None,\n):\n """Create a sensor that will message the given Slack channel whenever an asset in the provided\n AssetSelection becomes out of date. Messages are only fired when the state changes, meaning\n only a single slack message will be sent (when the asset begins to be out of date). If\n `notify_when_back_on_time` is set to `True`, a second slack message will be sent once the asset\n is on time again.\n\n Args:\n channel (str): The channel to send the message to (e.g. "#my_channel")\n slack_token (str): The slack token.\n Tokens are typically either user tokens or bot tokens. More in the Slack API\n documentation here: https://api.slack.com/docs/token-types\n asset_selection (AssetSelection): The selection of assets which this sensor will monitor.\n Alerts will only be fired for assets that have a FreshnessPolicy defined.\n warn_after_minutes_overdue (float): How many minutes past the specified FreshnessPolicy this\n sensor will wait before firing an alert (by default, an alert will be fired as soon as\n the policy is violated).\n notify_when_back_on_time (bool): If a success message should be sent when the asset becomes on\n time again.\n text_fn (Optional(Callable[[RunFailureSensorContext], str])): Function which\n takes in the ``FreshnessPolicySensorContext`` and outputs the message you want to send.\n Defaults to a text message that contains the relevant asset key, and the number of\n minutes past its defined freshness policy it currently is.\n The usage of the `text_fn` changes depending on whether you're using `blocks_fn`. If you\n are using `blocks_fn`, this is used as a fallback string to display in notifications. If\n you aren't, this is the main body text of the message. It can be formatted as plain text,\n or with markdown.\n See more details in https://api.slack.com/methods/chat.postMessage#text_usage\n blocks_fn (Callable[[FreshnessPolicySensorContext], List[Dict]]): Function which takes in\n the ``FreshnessPolicySensorContext`` and outputs the message blocks you want to send.\n See information about Blocks in https://api.slack.com/reference/block-kit/blocks\n name: (Optional[str]): The name of the sensor. Defaults to "slack_on_freshness_policy".\n dagit_base_url: (Optional[str]): The base url of your Dagit instance. Specify this to allow\n messages to include deeplinks to the relevant asset page.\n default_status (DefaultSensorStatus): Whether the sensor starts as running or not. The default\n status can be overridden from Dagit or via the GraphQL API.\n webserver_base_url: (Optional[str]): The base url of your Dagit instance. Specify this to allow\n messages to include deeplinks to the relevant asset page.\n\n Examples:\n .. code-block:: python\n\n slack_on_freshness_policy = make_slack_on_freshness_policy_status_change_sensor(\n "#my_channel",\n os.getenv("MY_SLACK_TOKEN"),\n )\n\n .. code-block:: python\n\n def my_message_fn(context: FreshnessPolicySensorContext) -> str:\n if context.minutes_overdue == 0:\n return f"Asset {context.asset_key} is currently on time :)"\n return (\n f"Asset {context.asset_key} is currently {context.minutes_overdue} minutes late!!"\n )\n\n slack_on_run_failure = make_slack_on_run_failure_sensor(\n channel="#my_channel",\n slack_token=os.getenv("MY_SLACK_TOKEN"),\n text_fn=my_message_fn,\n webserver_base_url="http://mycoolsite.com",\n )\n\n\n """\n webserver_base_url = normalize_renamed_param(\n webserver_base_url, "webserver_base_url", dagit_base_url, "dagit_base_url"\n )\n slack_client = WebClient(token=slack_token)\n\n @freshness_policy_sensor(\n name=name, asset_selection=asset_selection, default_status=default_status\n )\n def slack_on_freshness_policy(context: FreshnessPolicySensorContext):\n if context.minutes_overdue is None or context.previous_minutes_overdue is None:\n return\n\n if (\n context.minutes_overdue > warn_after_minutes_overdue\n and context.previous_minutes_overdue <= warn_after_minutes_overdue\n ) or (\n notify_when_back_on_time\n and context.minutes_overdue == 0\n and context.previous_minutes_overdue != 0\n ):\n blocks, main_body_text = _build_slack_blocks_and_text(\n context=context,\n text_fn=text_fn,\n blocks_fn=blocks_fn,\n webserver_base_url=webserver_base_url,\n )\n\n slack_client.chat_postMessage(channel=channel, blocks=blocks, text=main_body_text)\n\n return slack_on_freshness_policy
\n
", "current_page_name": "_modules/dagster_slack/sensors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_slack.sensors"}}, "dagster_snowflake": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake.ops

\nfrom dagster import (\n    Nothing,\n    _check as check,\n    op,\n)\nfrom dagster._core.definitions.input import In\n\n\ndef _core_create_snowflake_command(dagster_decorator, decorator_name, sql, parameters=None):\n    check.str_param(sql, "sql")\n    check.opt_dict_param(parameters, "parameters")\n\n    @dagster_decorator(\n        name=f"snowflake_{decorator_name}",\n        ins={"start": In(Nothing)},\n        required_resource_keys={"snowflake"},\n        tags={"kind": "sql", "sql": sql},\n    )\n    def snowflake_fn(context):\n        context.resources.snowflake.execute_query(sql=sql, parameters=parameters)\n\n    return snowflake_fn\n\n\ndef snowflake_solid_for_query(sql, parameters=None):\n    """This function is a solid factory that constructs solids to execute a snowflake query.\n\n    Note that you can only use `snowflake_solid_for_query` if you know the query you'd like to\n    execute at job construction time. If you'd like to execute queries dynamically during\n    job execution, you should manually execute those queries in your custom solid using the\n    snowflake resource.\n\n    Args:\n        sql (str): The sql query that will execute against the provided snowflake resource.\n        parameters (dict): The parameters for the sql query.\n\n    Returns:\n        SolidDefinition: Returns the constructed solid definition.\n    """\n    return _core_create_snowflake_command(op, "solid", sql, parameters)\n\n\n
[docs]def snowflake_op_for_query(sql, parameters=None):\n """This function is an op factory that constructs an op to execute a snowflake query.\n\n Note that you can only use `snowflake_op_for_query` if you know the query you'd like to\n execute at graph construction time. If you'd like to execute queries dynamically during\n job execution, you should manually execute those queries in your custom op using the\n snowflake resource.\n\n Args:\n sql (str): The sql query that will execute against the provided snowflake resource.\n parameters (dict): The parameters for the sql query.\n\n Returns:\n OpDefinition: Returns the constructed op definition.\n """\n return _core_create_snowflake_command(op, "op", sql, parameters)
\n
", "current_page_name": "_modules/dagster_snowflake/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake.resources

\nimport base64\nimport sys\nimport warnings\nfrom contextlib import closing, contextmanager\nfrom typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Union\n\nimport dagster._check as check\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom dagster import (\n    ConfigurableResource,\n    IAttachDifferentObjectToOpContext,\n    get_dagster_logger,\n    resource,\n)\nfrom dagster._annotations import public\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.storage.event_log.sql_event_log import SqlDbConnection\nfrom dagster._utils.cached_method import cached_method\nfrom pydantic import Field, root_validator, validator\n\ntry:\n    import snowflake.connector\nexcept ImportError:\n    msg = (\n        "Could not import snowflake.connector. This could mean you have an incompatible version "\n        "of azure-storage-blob installed. dagster-snowflake requires azure-storage-blob<12.0.0; "\n        "this conflicts with dagster-azure which requires azure-storage-blob~=12.0.0 and is "\n        "incompatible with dagster-snowflake. Please uninstall dagster-azure and reinstall "\n        "dagster-snowflake to fix this error."\n    )\n    warnings.warn(msg)\n    raise\n\n\n
[docs]class SnowflakeResource(ConfigurableResource, IAttachDifferentObjectToOpContext):\n """A resource for connecting to the Snowflake data warehouse.\n\n If connector configuration is not set, SnowflakeResource.get_connection() will return a\n `snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__\n object. If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will\n return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__\n or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__.\n\n A simple example of loading data into Snowflake and subsequently querying that data is shown below:\n\n Examples:\n .. code-block:: python\n\n from dagster import job, op\n from dagster_snowflake import SnowflakeResource\n\n @op\n def get_one(snowflake_resource: SnowflakeResource):\n with snowflake_resource.get_connection() as conn:\n # conn is a snowflake.connector.Connection object\n conn.cursor().execute("SELECT 1")\n\n @job\n def my_snowflake_job():\n get_one()\n\n my_snowflake_job.execute_in_process(\n resources={\n 'snowflake_resource': SnowflakeResource(\n account=EnvVar("SNOWFLAKE_ACCOUNT"),\n user=EnvVar("SNOWFLAKE_USER"),\n password=EnvVar("SNOWFLAKE_PASSWORD")\n database="MY_DATABASE",\n schema="MY_SCHEMA",\n warehouse="MY_WAREHOUSE"\n )\n }\n )\n """\n\n account: Optional[str] = Field(\n default=None,\n description=(\n "Your Snowflake account name. For more details, see the `Snowflake documentation."\n " <https://docs.snowflake.com/developer-guide/python-connector/python-connector-api>`__"\n ),\n )\n\n user: str = Field(description="User login name.")\n\n password: Optional[str] = Field(default=None, description="User password.")\n\n database: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default database to use. After login, you can use ``USE DATABASE`` "\n " to change the database."\n ),\n )\n\n schema_: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default schema to use. After login, you can use ``USE SCHEMA`` to "\n "change the schema."\n ),\n alias="schema",\n ) # schema is a reserved word for pydantic\n\n role: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default role to use. After login, you can use ``USE ROLE`` to change "\n " the role."\n ),\n )\n\n warehouse: Optional[str] = Field(\n default=None,\n description=(\n "Name of the default warehouse to use. After login, you can use ``USE WAREHOUSE`` "\n "to change the role."\n ),\n )\n\n private_key: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Alternately, set private_key_path and private_key_password. To avoid issues with"\n " newlines in the keys, you can base64 encode the key. You can retrieve the base64"\n " encoded key with this shell command: ``cat rsa_key.p8 | base64``"\n ),\n )\n\n private_key_password: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key password to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Required for both ``private_key`` and ``private_key_path`` if the private key is"\n " encrypted. For unencrypted keys, this config can be omitted or set to None."\n ),\n )\n\n private_key_path: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key path to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Alternately, set the raw private key as ``private_key``."\n ),\n )\n\n autocommit: Optional[bool] = Field(\n default=None,\n description=(\n "None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True "\n "or False to enable or disable autocommit mode in the session, respectively."\n ),\n )\n\n client_prefetch_threads: Optional[int] = Field(\n default=None,\n description=(\n "Number of threads used to download the results sets (4 by default). "\n "Increasing the value improves fetch performance but requires more memory."\n ),\n )\n\n client_session_keep_alive: Optional[bool] = Field(\n default=None,\n description=(\n "False by default. Set this to True to keep the session active indefinitely, "\n "even if there is no activity from the user. Make certain to call the close method to "\n "terminate the thread properly or the process may hang."\n ),\n )\n\n login_timeout: Optional[int] = Field(\n default=None,\n description=(\n "Timeout in seconds for login. By default, 60 seconds. The login request gives "\n 'up after the timeout length if the HTTP response is "success".'\n ),\n )\n\n network_timeout: Optional[int] = Field(\n default=None,\n description=(\n "Timeout in seconds for all other operations. By default, none/infinite. A general"\n " request gives up after the timeout length if the HTTP response is not 'success'."\n ),\n )\n\n ocsp_response_cache_filename: Optional[str] = Field(\n default=None,\n description=(\n "URI for the OCSP response cache file. By default, the OCSP response cache "\n "file is created in the cache directory."\n ),\n )\n\n validate_default_parameters: Optional[bool] = Field(\n default=None,\n description=(\n "If True, raise an exception if the warehouse, database, or schema doesn't exist."\n " Defaults to False."\n ),\n )\n\n paramstyle: Optional[str] = Field(\n default=None,\n description=(\n "pyformat by default for client side binding. Specify qmark or numeric to "\n "change bind variable formats for server side binding."\n ),\n )\n\n timezone: Optional[str] = Field(\n default=None,\n description=(\n "None by default, which honors the Snowflake parameter TIMEZONE. Set to a "\n "valid time zone (e.g. America/Los_Angeles) to set the session time zone."\n ),\n )\n\n connector: Optional[str] = Field(\n default=None,\n description=(\n "Indicate alternative database connection engine. Permissible option is "\n "'sqlalchemy' otherwise defaults to use the Snowflake Connector for Python."\n ),\n is_required=False,\n )\n\n cache_column_metadata: Optional[str] = Field(\n default=None,\n description=(\n "Optional parameter when connector is set to sqlalchemy. Snowflake SQLAlchemy takes a"\n " flag ``cache_column_metadata=True`` such that all of column metadata for all tables"\n ' are "cached"'\n ),\n )\n\n numpy: Optional[bool] = Field(\n default=None,\n description=(\n "Optional parameter when connector is set to sqlalchemy. To enable fetching "\n "NumPy data types, add numpy=True to the connection parameters."\n ),\n )\n\n authenticator: Optional[str] = Field(\n default=None,\n description="Optional parameter to specify the authentication mechanism to use.",\n )\n\n @validator("paramstyle")\n def validate_paramstyle(cls, v: Optional[str]) -> Optional[str]:\n valid_config = ["pyformat", "qmark", "numeric"]\n if v is not None and v not in valid_config:\n raise ValueError(\n "Snowflake Resource: 'paramstyle' configuration value must be one of:"\n f" {','.join(valid_config)}."\n )\n return v\n\n @validator("connector")\n def validate_connector(cls, v: Optional[str]) -> Optional[str]:\n if v is not None and v != "sqlalchemy":\n raise ValueError(\n "Snowflake Resource: 'connector' configuration value must be None or sqlalchemy."\n )\n return v\n\n @root_validator\n def validate_authentication(cls, values):\n auths_set = 0\n auths_set += 1 if values.get("password") is not None else 0\n auths_set += 1 if values.get("private_key") is not None else 0\n auths_set += 1 if values.get("private_key_path") is not None else 0\n\n # if authenticator is set, there can be 0 or 1 additional auth method;\n # otherwise, ensure at least 1 method is provided\n check.invariant(\n auths_set > 0 or values.get("authenticator") is not None,\n "Missing config: Password, private key, or authenticator authentication required"\n " for Snowflake resource.",\n )\n\n # ensure that only 1 non-authenticator method is provided\n check.invariant(\n auths_set <= 1,\n "Incorrect config: Cannot provide both password and private key authentication to"\n " Snowflake Resource.",\n )\n\n return values\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @property\n @cached_method\n def _connection_args(self) -> Mapping[str, Any]:\n conn_args = {\n k: self._resolved_config_dict.get(k)\n for k in (\n "account",\n "user",\n "password",\n "database",\n "schema",\n "role",\n "warehouse",\n "autocommit",\n "client_prefetch_threads",\n "client_session_keep_alive",\n "login_timeout",\n "network_timeout",\n "ocsp_response_cache_filename",\n "validate_default_parameters",\n "paramstyle",\n "timezone",\n "authenticator",\n )\n if self._resolved_config_dict.get(k) is not None\n }\n if (\n self._resolved_config_dict.get("private_key", None) is not None\n or self._resolved_config_dict.get("private_key_path", None) is not None\n ):\n conn_args["private_key"] = self._snowflake_private_key(self._resolved_config_dict)\n\n return conn_args\n\n @property\n @cached_method\n def _sqlalchemy_connection_args(self) -> Mapping[str, Any]:\n conn_args: Dict[str, Any] = {\n k: self._resolved_config_dict.get(k)\n for k in (\n "account",\n "user",\n "password",\n "database",\n "schema",\n "role",\n "warehouse",\n "cache_column_metadata",\n "numpy",\n )\n if self._resolved_config_dict.get(k) is not None\n }\n\n return conn_args\n\n @property\n @cached_method\n def _sqlalchemy_engine_args(self) -> Mapping[str, Any]:\n config = self._resolved_config_dict\n sqlalchemy_engine_args = {}\n if (\n config.get("private_key", None) is not None\n or config.get("private_key_path", None) is not None\n ):\n # sqlalchemy passes private key args separately, so store them in a new dict\n sqlalchemy_engine_args["private_key"] = self._snowflake_private_key(config)\n if config.get("authenticator", None) is not None:\n sqlalchemy_engine_args["authenticator"] = config["authenticator"]\n\n return sqlalchemy_engine_args\n\n def _snowflake_private_key(self, config) -> bytes:\n # If the user has defined a path to a private key, we will use that.\n if config.get("private_key_path", None) is not None:\n # read the file from the path.\n with open(config.get("private_key_path"), "rb") as key:\n private_key = key.read()\n else:\n private_key = config.get("private_key", None)\n\n kwargs = {}\n if config.get("private_key_password", None) is not None:\n kwargs["password"] = config["private_key_password"].encode()\n else:\n kwargs["password"] = None\n\n try:\n p_key = serialization.load_pem_private_key(\n private_key, backend=default_backend(), **kwargs\n )\n except TypeError:\n try:\n private_key = base64.b64decode(private_key)\n p_key = serialization.load_pem_private_key(\n private_key, backend=default_backend(), **kwargs\n )\n except ValueError:\n raise ValueError(\n "Unable to load private key. You may need to base64 encode your private key."\n " You can retrieve the base64 encoded key with this shell command: cat"\n " rsa_key.p8 | base64"\n )\n\n pkb = p_key.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n return pkb\n\n @public\n @contextmanager\n def get_connection(\n self, raw_conn: bool = True\n ) -> Iterator[Union[SqlDbConnection, snowflake.connector.SnowflakeConnection]]:\n """Gets a connection to Snowflake as a context manager.\n\n If connector configuration is not set, SnowflakeResource.get_connection() will return a\n `snowflake.connector.Connection <https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-connection>`__\n If connector="sqlalchemy" configuration is set, then SnowflakeResource.get_connection() will\n return a `SQLAlchemy Connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Connection>`__\n or a `SQLAlchemy raw connection <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.Engine.raw_connection>`__\n if raw_conn=True.\n\n\n Args:\n raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw\n connection. Defaults to True.\n\n Examples:\n .. code-block:: python\n\n @op\n def get_query_status(snowflake: SnowflakeResource, query_id):\n with snowflake.get_connection() as conn:\n # conn is a Snowflake Connection object or a SQLAlchemy Connection if\n # sqlalchemy is specified as the connector in the Snowflake Resource config\n\n return conn.get_query_status(query_id)\n\n """\n if self.connector == "sqlalchemy":\n from snowflake.sqlalchemy import URL\n from sqlalchemy import create_engine\n\n engine = create_engine(\n URL(**self._sqlalchemy_connection_args), connect_args=self._sqlalchemy_engine_args\n )\n conn = engine.raw_connection() if raw_conn else engine.connect()\n\n yield conn\n conn.close()\n engine.dispose()\n else:\n conn = snowflake.connector.connect(**self._connection_args)\n\n yield conn\n if not self.autocommit:\n conn.commit()\n conn.close()\n\n def get_object_to_set_on_execution_context(self) -> Any:\n # Directly create a SnowflakeConnection here for backcompat since the SnowflakeConnection\n # has methods this resource does not have\n return SnowflakeConnection(\n config=self._resolved_config_dict,\n log=get_dagster_logger(),\n snowflake_connection_resource=self,\n )
\n\n\n
[docs]class SnowflakeConnection:\n """A connection to Snowflake that can execute queries. In general this class should not be\n directly instantiated, but rather used as a resource in an op or asset via the\n :py:func:`snowflake_resource`.\n\n Note that the SnowflakeConnection is only used by the snowflake_resource. The Pythonic SnowflakeResource does\n not use this SnowflakeConnection class.\n """\n\n def __init__(\n self, config: Mapping[str, str], log, snowflake_connection_resource: SnowflakeResource\n ):\n self.snowflake_connection_resource = snowflake_connection_resource\n self.log = log\n\n
[docs] @public\n @contextmanager\n def get_connection(\n self, raw_conn: bool = True\n ) -> Iterator[Union[SqlDbConnection, snowflake.connector.SnowflakeConnection]]:\n """Gets a connection to Snowflake as a context manager.\n\n If using the execute_query, execute_queries, or load_table_from_local_parquet methods,\n you do not need to create a connection using this context manager.\n\n Args:\n raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw\n connection. Defaults to True.\n\n Examples:\n .. code-block:: python\n\n @op(\n required_resource_keys={"snowflake"}\n )\n def get_query_status(query_id):\n with context.resources.snowflake.get_connection() as conn:\n # conn is a Snowflake Connection object or a SQLAlchemy Connection if\n # sqlalchemy is specified as the connector in the Snowflake Resource config\n\n return conn.get_query_status(query_id)\n\n """\n with self.snowflake_connection_resource.get_connection(raw_conn=raw_conn) as conn:\n yield conn
\n\n
[docs] @public\n def execute_query(\n self,\n sql: str,\n parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,\n fetch_results: bool = False,\n use_pandas_result: bool = False,\n ):\n """Execute a query in Snowflake.\n\n Args:\n sql (str): the query to be executed\n parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to the query. See the\n `Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__\n for more information.\n fetch_results (bool): If True, will return the result of the query. Defaults to False. If True\n and use_pandas_result is also True, results will be returned as a Pandas DataFrame.\n use_pandas_result (bool): If True, will return the result of the query as a Pandas DataFrame.\n Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be\n raised.\n\n Returns:\n The result of the query if fetch_results or use_pandas_result is True, otherwise returns None\n\n Examples:\n .. code-block:: python\n\n @op\n def drop_database(snowflake: SnowflakeResource):\n snowflake.execute_query(\n "DROP DATABASE IF EXISTS MY_DATABASE"\n )\n """\n check.str_param(sql, "sql")\n check.opt_inst_param(parameters, "parameters", (list, dict))\n check.bool_param(fetch_results, "fetch_results")\n if not fetch_results and use_pandas_result:\n check.failed("If use_pandas_result is True, fetch_results must also be True.")\n\n with self.get_connection() as conn:\n with closing(conn.cursor()) as cursor:\n if sys.version_info[0] < 3:\n sql = sql.encode("utf-8")\n\n self.log.info("Executing query: " + sql)\n parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters\n cursor.execute(sql, parameters)\n if use_pandas_result:\n return cursor.fetch_pandas_all()\n if fetch_results:\n return cursor.fetchall()
\n\n
[docs] @public\n def execute_queries(\n self,\n sql_queries: Sequence[str],\n parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,\n fetch_results: bool = False,\n use_pandas_result: bool = False,\n ) -> Optional[Sequence[Any]]:\n """Execute multiple queries in Snowflake.\n\n Args:\n sql_queries (str): List of queries to be executed in series\n parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to every query. See the\n `Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__\n for more information.\n fetch_results (bool): If True, will return the results of the queries as a list. Defaults to False. If True\n and use_pandas_result is also True, results will be returned as Pandas DataFrames.\n use_pandas_result (bool): If True, will return the results of the queries as a list of a Pandas DataFrames.\n Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be\n raised.\n\n Returns:\n The results of the queries as a list if fetch_results or use_pandas_result is True,\n otherwise returns None\n\n Examples:\n .. code-block:: python\n\n @op\n def create_fresh_database(snowflake: SnowflakeResource):\n queries = ["DROP DATABASE IF EXISTS MY_DATABASE", "CREATE DATABASE MY_DATABASE"]\n snowflake.execute_queries(\n sql_queries=queries\n )\n\n """\n check.sequence_param(sql_queries, "sql_queries", of_type=str)\n check.opt_inst_param(parameters, "parameters", (list, dict))\n check.bool_param(fetch_results, "fetch_results")\n if not fetch_results and use_pandas_result:\n check.failed("If use_pandas_result is True, fetch_results must also be True.")\n\n results: List[Any] = []\n with self.get_connection() as conn:\n with closing(conn.cursor()) as cursor:\n for raw_sql in sql_queries:\n sql = raw_sql.encode("utf-8") if sys.version_info[0] < 3 else raw_sql\n self.log.info("Executing query: " + sql)\n parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters\n cursor.execute(sql, parameters)\n if use_pandas_result:\n results = results.append(cursor.fetch_pandas_all()) # type: ignore\n elif fetch_results:\n results.append(cursor.fetchall())\n\n return results if len(results) > 0 else None
\n\n
[docs] @public\n def load_table_from_local_parquet(self, src: str, table: str):\n """Stores the content of a parquet file to a Snowflake table.\n\n Args:\n src (str): the name of the file to store in Snowflake\n table (str): the name of the table to store the data. If the table does not exist, it will\n be created. Otherwise the contents of the table will be replaced with the data in src\n\n Examples:\n .. code-block:: python\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n @op\n def write_parquet_file(snowflake: SnowflakeResource):\n df = pd.DataFrame({"one": [1, 2, 3], "ten": [11, 12, 13]})\n table = pa.Table.from_pandas(df)\n pq.write_table(table, "example.parquet')\n snowflake.load_table_from_local_parquet(\n src="example.parquet",\n table="MY_TABLE"\n )\n\n """\n check.str_param(src, "src")\n check.str_param(table, "table")\n\n sql_queries = [\n f"CREATE OR REPLACE TABLE {table} ( data VARIANT DEFAULT NULL);",\n "CREATE OR REPLACE FILE FORMAT parquet_format TYPE = 'parquet';",\n f"PUT {src} @%{table};",\n f"COPY INTO {table} FROM @%{table} FILE_FORMAT = (FORMAT_NAME = 'parquet_format');",\n ]\n\n self.execute_queries(sql_queries)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=SnowflakeResource.to_config_schema(),\n description="This resource is for connecting to the Snowflake data warehouse",\n)\ndef snowflake_resource(context) -> SnowflakeConnection:\n """A resource for connecting to the Snowflake data warehouse. The returned resource object is an\n instance of :py:class:`SnowflakeConnection`.\n\n A simple example of loading data into Snowflake and subsequently querying that data is shown below:\n\n Examples:\n .. code-block:: python\n\n from dagster import job, op\n from dagster_snowflake import snowflake_resource\n\n @op(required_resource_keys={'snowflake'})\n def get_one(context):\n context.resources.snowflake.execute_query('SELECT 1')\n\n @job(resource_defs={'snowflake': snowflake_resource})\n def my_snowflake_job():\n get_one()\n\n my_snowflake_job.execute_in_process(\n run_config={\n 'resources': {\n 'snowflake': {\n 'config': {\n 'account': {'env': 'SNOWFLAKE_ACCOUNT'},\n 'user': {'env': 'SNOWFLAKE_USER'},\n 'password': {'env': 'SNOWFLAKE_PASSWORD'},\n 'database': {'env': 'SNOWFLAKE_DATABASE'},\n 'schema': {'env': 'SNOWFLAKE_SCHEMA'},\n 'warehouse': {'env': 'SNOWFLAKE_WAREHOUSE'},\n }\n }\n }\n }\n )\n """\n snowflake_resource = SnowflakeResource.from_resource_context(context)\n return SnowflakeConnection(\n config=context, log=context.log, snowflake_connection_resource=snowflake_resource\n )
\n
", "current_page_name": "_modules/dagster_snowflake/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake.resources"}, "snowflake_io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake.snowflake_io_manager

\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom typing import Optional, Sequence, Type, cast\n\nfrom dagster import IOManagerDefinition, OutputContext, io_manager\nfrom dagster._config.pythonic_config import (\n    ConfigurableIOManagerFactory,\n)\nfrom dagster._core.definitions.time_window_partitions import TimeWindow\nfrom dagster._core.storage.db_io_manager import (\n    DbClient,\n    DbIOManager,\n    DbTypeHandler,\n    TablePartitionDimension,\n    TableSlice,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom pydantic import Field\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom .resources import SnowflakeResource\n\nSNOWFLAKE_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"\n\n\n
[docs]def build_snowflake_io_manager(\n type_handlers: Sequence[DbTypeHandler], default_load_type: Optional[Type] = None\n) -> IOManagerDefinition:\n """Builds an IO manager definition that reads inputs from and writes outputs to Snowflake.\n\n Args:\n type_handlers (Sequence[DbTypeHandler]): Each handler defines how to translate between\n slices of Snowflake tables and an in-memory type - e.g. a Pandas DataFrame. If only\n one DbTypeHandler is provided, it will be used as teh default_load_type.\n default_load_type (Type): When an input has no type annotation, load it as this type.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import build_snowflake_io_manager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n snowflake_io_manager = build_snowflake_io_manager([SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()])\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": snowflake_io_manager.configured({\n "database": "my_database",\n "account" : {"env": "SNOWFLAKE_ACCOUNT"}\n ...\n })\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the schema. For example,\n if the asset ``my_table`` had the key prefix ``["snowflake", "my_schema"]``, the schema ``my_schema`` will be\n used. For ops, the schema can be specified by including a ``schema`` entry in output metadata. If ``schema`` is not provided\n via config or on the asset/op, ``public`` will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @dagster_maintained_io_manager\n @io_manager(config_schema=SnowflakeIOManager.to_config_schema())\n def snowflake_io_manager(init_context):\n return DbIOManager(\n type_handlers=type_handlers,\n db_client=SnowflakeDbClient(),\n io_manager_name="SnowflakeIOManager",\n database=init_context.resource_config["database"],\n schema=init_context.resource_config.get("schema"),\n default_load_type=default_load_type,\n )\n\n return snowflake_io_manager
\n\n\n
[docs]class SnowflakeIOManager(ConfigurableIOManagerFactory):\n """Base class for an IO manager definition that reads inputs from and writes outputs to Snowflake.\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MySnowflakeIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the IO Manager. For assets, the schema will be determined from the asset key,\n as shown in the above example. The final prefix before the asset name will be used as the schema. For example,\n if the asset ``my_table`` had the key prefix ``["snowflake", "my_schema"]``, the schema ``my_schema`` will be\n used. For ops, the schema can be specified by including a ``schema`` entry in output metadata. If ``schema`` is not provided\n via config or on the asset/op, ``public`` will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n database: str = Field(description="Name of the database to use.")\n account: str = Field(\n description=(\n "Your Snowflake account name. For more details, see the `Snowflake documentation."\n " <https://docs.snowflake.com/developer-guide/python-connector/python-connector-api>`__"\n ),\n )\n user: str = Field(description="User login name.")\n schema_: Optional[str] = Field(\n default=None, alias="schema", description="Name of the schema to use."\n ) # schema is a reserved word for pydantic\n password: Optional[str] = Field(default=None, description="User password.")\n warehouse: Optional[str] = Field(default=None, description="Name of the warehouse to use.")\n role: Optional[str] = Field(default=None, description="Name of the role to use.")\n private_key: Optional[str] = Field(\n default=None,\n description=(\n "Raw private key to use. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details. To"\n " avoid issues with newlines in the keys, you can base64 encode the key. You can"\n " retrieve the base64 encoded key with this shell command: cat rsa_key.p8 | base64"\n ),\n )\n private_key_path: Optional[str] = Field(\n default=None,\n description=(\n "Path to the private key. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n ),\n )\n private_key_password: Optional[str] = Field(\n default=None,\n description=(\n "The password of the private key. See the `Snowflake documentation"\n " <https://docs.snowflake.com/en/user-guide/key-pair-auth.html>`__ for details."\n " Required for both private_key and private_key_path if the private key is encrypted."\n " For unencrypted keys, this config can be omitted or set to None."\n ),\n )\n store_timestamps_as_strings: bool = Field(\n default=False,\n description=(\n "If using Pandas DataFrames, whether to convert time data to strings. If True, time"\n " data will be converted to strings when storing the DataFrame and converted back to"\n " time data when loading the DataFrame. If False, time data without a timezone will be"\n " set to UTC timezone to avoid a Snowflake bug. Defaults to False."\n ),\n )\n authenticator: Optional[str] = Field(\n default=None,\n description="Optional parameter to specify the authentication mechanism to use.",\n )\n\n @staticmethod\n @abstractmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n """type_handlers should return a list of the TypeHandlers that the I/O manager can use.\n\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n """\n ...\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n """If an asset or op is not annotated with an return type, default_load_type will be used to\n determine which TypeHandler to use to store and load the output.\n\n If left unimplemented, default_load_type will return None. In that case, if there is only\n one TypeHandler, the I/O manager will default to loading unannotated outputs with that\n TypeHandler.\n\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n import pandas as pd\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame\n """\n return None\n\n def create_io_manager(self, context) -> DbIOManager:\n return DbIOManager(\n db_client=SnowflakeDbClient(),\n io_manager_name="SnowflakeIOManager",\n database=self.database,\n schema=self.schema_,\n type_handlers=self.type_handlers(),\n default_load_type=self.default_load_type(),\n )
\n\n\nclass SnowflakeDbClient(DbClient):\n @staticmethod\n @contextmanager\n def connect(context, table_slice):\n no_schema_config = (\n {k: v for k, v in context.resource_config.items() if k != "schema"}\n if context.resource_config\n else {}\n )\n with SnowflakeResource(\n schema=table_slice.schema, connector="sqlalchemy", **no_schema_config\n ).get_connection(raw_conn=False) as conn:\n yield conn\n\n @staticmethod\n def ensure_schema_exists(context: OutputContext, table_slice: TableSlice, connection) -> None:\n schemas = connection.execute(\n f"show schemas like '{table_slice.schema}' in database {table_slice.database}"\n ).fetchall()\n if len(schemas) == 0:\n connection.execute(f"create schema {table_slice.schema};")\n\n @staticmethod\n def delete_table_slice(context: OutputContext, table_slice: TableSlice, connection) -> None:\n try:\n connection.execute(_get_cleanup_statement(table_slice))\n except ProgrammingError:\n # table doesn't exist yet, so ignore the error\n pass\n\n @staticmethod\n def get_select_statement(table_slice: TableSlice) -> str:\n col_str = ", ".join(table_slice.columns) if table_slice.columns else "*"\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"SELECT {col_str} FROM"\n f" {table_slice.database}.{table_slice.schema}.{table_slice.table} WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"""SELECT {col_str} FROM {table_slice.database}.{table_slice.schema}.{table_slice.table}"""\n\n\ndef _get_cleanup_statement(table_slice: TableSlice) -> str:\n """Returns a SQL statement that deletes data in the given table to make way for the output data\n being written.\n """\n if table_slice.partition_dimensions and len(table_slice.partition_dimensions) > 0:\n query = (\n f"DELETE FROM {table_slice.database}.{table_slice.schema}.{table_slice.table} WHERE\\n"\n )\n return query + _partition_where_clause(table_slice.partition_dimensions)\n else:\n return f"DELETE FROM {table_slice.database}.{table_slice.schema}.{table_slice.table}"\n\n\ndef _partition_where_clause(partition_dimensions: Sequence[TablePartitionDimension]) -> str:\n return " AND\\n".join(\n (\n _time_window_where_clause(partition_dimension)\n if isinstance(partition_dimension.partitions, TimeWindow)\n else _static_where_clause(partition_dimension)\n )\n for partition_dimension in partition_dimensions\n )\n\n\ndef _time_window_where_clause(table_partition: TablePartitionDimension) -> str:\n partition = cast(TimeWindow, table_partition.partitions)\n start_dt, end_dt = partition\n start_dt_str = start_dt.strftime(SNOWFLAKE_DATETIME_FORMAT)\n end_dt_str = end_dt.strftime(SNOWFLAKE_DATETIME_FORMAT)\n # Snowflake BETWEEN is inclusive; start <= partition expr <= end. We don't want to remove the next partition so we instead\n # write this as start <= partition expr < end.\n return f"""{table_partition.partition_expr} >= '{start_dt_str}' AND {table_partition.partition_expr} < '{end_dt_str}'"""\n\n\ndef _static_where_clause(table_partition: TablePartitionDimension) -> str:\n partitions = ", ".join(f"'{partition}'" for partition in table_partition.partitions)\n return f"""{table_partition.partition_expr} in ({partitions})"""\n
", "current_page_name": "_modules/dagster_snowflake/snowflake_io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake.snowflake_io_manager"}}, "dagster_snowflake_pandas": {"snowflake_pandas_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake_pandas.snowflake_pandas_type_handler

\nfrom typing import Mapping, Optional, Sequence, Type\n\nimport pandas as pd\nimport pandas.core.dtypes.common as pd_core_dtypes_common\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.definitions.metadata import RawMetadataValue\nfrom dagster._core.errors import DagsterInvariantViolationError\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_snowflake import build_snowflake_io_manager\nfrom dagster_snowflake.snowflake_io_manager import SnowflakeDbClient, SnowflakeIOManager\nfrom snowflake.connector.pandas_tools import pd_writer\n\n\ndef _table_exists(table_slice: TableSlice, connection):\n    tables = connection.execute(\n        f"SHOW TABLES LIKE '{table_slice.table}' IN SCHEMA"\n        f" {table_slice.database}.{table_slice.schema}"\n    ).fetchall()\n    return len(tables) > 0\n\n\ndef _get_table_column_types(table_slice: TableSlice, connection) -> Optional[Mapping[str, str]]:\n    if _table_exists(table_slice, connection):\n        schema_list = connection.execute(f"DESCRIBE TABLE {table_slice.table}").fetchall()\n        return {item[0]: item[1] for item in schema_list}\n\n\ndef _convert_timestamp_to_string(\n    s: pd.Series, column_types: Optional[Mapping[str, str]], table_name: str\n) -> pd.Series:\n    """Converts columns of data of type pd.Timestamp to string so that it can be stored in\n    snowflake.\n    """\n    column_name = str(s.name)\n    if pd_core_dtypes_common.is_datetime_or_timedelta_dtype(s):  # type: ignore  # (bad stubs)\n        if column_types:\n            if "VARCHAR" not in column_types[column_name]:\n                raise DagsterInvariantViolationError(\n                    "Snowflake I/O manager: Snowflake I/O manager configured to convert time data"\n                    f" in DataFrame column {column_name} to strings, but the corresponding"\n                    f" {column_name.upper()} column in table {table_name} is not of type VARCHAR,"\n                    f" it is of type {column_types[column_name]}. Please set"\n                    " store_timestamps_as_strings=False in the Snowflake I/O manager configuration"\n                    " to store time data as TIMESTAMP types."\n                )\n        return s.dt.strftime("%Y-%m-%d %H:%M:%S.%f %z")\n    else:\n        return s\n\n\ndef _convert_string_to_timestamp(s: pd.Series) -> pd.Series:\n    """Converts columns of strings in Timestamp format to pd.Timestamp to undo the conversion in\n    _convert_timestamp_to_string.\n\n    This will not convert non-timestamp strings into timestamps (pd.to_datetime will raise an\n    exception if the string cannot be converted)\n    """\n    if isinstance(s[0], str):\n        try:\n            return pd.to_datetime(s.values)  # type: ignore  # (bad stubs)\n        except ValueError:\n            return s\n    else:\n        return s\n\n\ndef _add_missing_timezone(\n    s: pd.Series, column_types: Optional[Mapping[str, str]], table_name: str\n) -> pd.Series:\n    column_name = str(s.name)\n    if pd_core_dtypes_common.is_datetime_or_timedelta_dtype(s):  # type: ignore  # (bad stubs)\n        if column_types:\n            if "VARCHAR" in column_types[column_name]:\n                raise DagsterInvariantViolationError(\n                    f"Snowflake I/O manager: The Snowflake column {column_name.upper()} in table"\n                    f" {table_name} is of type {column_types[column_name]} and should be of type"\n                    f" TIMESTAMP to store the time data in dataframe column {column_name}. Please"\n                    " migrate this column to be of time TIMESTAMP_NTZ(9) to store time data."\n                )\n        return s.dt.tz_localize("UTC")\n    return s\n\n\n
[docs]class SnowflakePandasTypeHandler(DbTypeHandler[pd.DataFrame]):\n """Plugin for the Snowflake I/O Manager that can store and load Pandas DataFrames as Snowflake tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MySnowflakeIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)\n }\n )\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection\n ) -> Mapping[str, RawMetadataValue]:\n from snowflake import connector\n\n connector.paramstyle = "pyformat"\n with_uppercase_cols = obj.rename(str.upper, copy=False, axis="columns")\n column_types = _get_table_column_types(table_slice, connection)\n if context.resource_config and context.resource_config.get(\n "store_timestamps_as_strings", False\n ):\n with_uppercase_cols = with_uppercase_cols.apply(\n lambda x: _convert_timestamp_to_string(x, column_types, table_slice.table),\n axis="index",\n )\n else:\n with_uppercase_cols = with_uppercase_cols.apply(\n lambda x: _add_missing_timezone(x, column_types, table_slice.table), axis="index"\n )\n with_uppercase_cols.to_sql(\n table_slice.table,\n con=connection.engine,\n if_exists="append",\n index=False,\n method=pd_writer,\n )\n\n return {\n "row_count": obj.shape[0],\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=str(name), type=str(dtype))\n for name, dtype in obj.dtypes.items()\n ]\n )\n ),\n }\n\n def load_input(\n self, context: InputContext, table_slice: TableSlice, connection\n ) -> pd.DataFrame:\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return pd.DataFrame()\n result = pd.read_sql(\n sql=SnowflakeDbClient.get_select_statement(table_slice), con=connection\n )\n if context.resource_config and context.resource_config.get(\n "store_timestamps_as_strings", False\n ):\n result = result.apply(_convert_string_to_timestamp, axis="index")\n result.columns = map(str.lower, result.columns) # type: ignore # (bad stubs)\n return result\n\n @property\n def supported_types(self):\n return [pd.DataFrame]
\n\n\nsnowflake_pandas_io_manager = build_snowflake_io_manager(\n [SnowflakePandasTypeHandler()], default_load_type=pd.DataFrame\n)\nsnowflake_pandas_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes Pandas DataFrames to Snowflake. When\nusing the snowflake_pandas_io_manager, any inputs and outputs without type annotations will be loaded\nas Pandas DataFrames.\n\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_snowflake_pandas import snowflake_pandas_io_manager\n from dagster import asset, Definitions\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": snowflake_pandas_io_manager.configured({\n "database": "my_database",\n "account" : {"env": "SNOWFLAKE_ACCOUNT"}\n ...\n })\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class SnowflakePandasIOManager(SnowflakeIOManager):\n """An I/O manager definition that reads inputs from and writes Pandas DataFrames to Snowflake. When\n using the SnowflakePandasIOManager, any inputs and outputs without type annotations will be loaded\n as Pandas DataFrames.\n\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake_pandas import SnowflakePandasIOManager\n from dagster import asset, Definitions, EnvVar\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": SnowflakePandasIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)\n }\n )\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> pd.DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return pd.DataFrame
\n
", "current_page_name": "_modules/dagster_snowflake_pandas/snowflake_pandas_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake_pandas.snowflake_pandas_type_handler"}}, "dagster_snowflake_pyspark": {"snowflake_pyspark_type_handler": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_snowflake_pyspark.snowflake_pyspark_type_handler

\nfrom typing import Mapping, Optional, Sequence, Type\n\nimport dagster._check as check\nfrom dagster import InputContext, MetadataValue, OutputContext, TableColumn, TableSchema\nfrom dagster._core.definitions.metadata import RawMetadataValue\nfrom dagster._core.storage.db_io_manager import DbTypeHandler, TableSlice\nfrom dagster_snowflake import SnowflakeIOManager, build_snowflake_io_manager\nfrom dagster_snowflake.snowflake_io_manager import SnowflakeDbClient\nfrom pyspark.sql import DataFrame, SparkSession\nfrom pyspark.sql.types import StructType\n\nSNOWFLAKE_CONNECTOR = "net.snowflake.spark.snowflake"\n\n\ndef _get_snowflake_options(config, table_slice: TableSlice) -> Mapping[str, str]:\n    check.invariant(\n        config.get("warehouse", None) is not None,\n        "Missing config: Warehouse is required when using PySpark with the Snowflake I/O manager.",\n    )\n\n    conf = {\n        "sfURL": f"{config['account']}.snowflakecomputing.com",\n        "sfUser": config["user"],\n        "sfPassword": config["password"],\n        "sfDatabase": config["database"],\n        "sfSchema": table_slice.schema,\n        "sfWarehouse": config["warehouse"],\n    }\n\n    return conf\n\n\n
[docs]class SnowflakePySparkTypeHandler(DbTypeHandler[DataFrame]):\n """Plugin for the Snowflake I/O Manager that can store and load PySpark DataFrames as Snowflake tables.\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake import SnowflakeIOManager\n from dagster_snowflake_pandas import SnowflakePandasTypeHandler\n from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler\n from dagster import Definitions, EnvVar\n\n class MySnowflakeIOManager(SnowflakeIOManager):\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> pd.DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": MySnowflakeIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), warehouse="my_warehouse", ...)\n }\n )\n\n """\n\n def handle_output(\n self, context: OutputContext, table_slice: TableSlice, obj: DataFrame, _\n ) -> Mapping[str, RawMetadataValue]:\n options = _get_snowflake_options(context.resource_config, table_slice)\n\n with_uppercase_cols = obj.toDF(*[c.upper() for c in obj.columns])\n\n with_uppercase_cols.write.format(SNOWFLAKE_CONNECTOR).options(**options).option(\n "dbtable", table_slice.table\n ).mode("append").save()\n\n return {\n "dataframe_columns": MetadataValue.table_schema(\n TableSchema(\n columns=[\n TableColumn(name=field.name, type=field.dataType.typeName())\n for field in obj.schema.fields\n ]\n )\n ),\n }\n\n def load_input(self, context: InputContext, table_slice: TableSlice, _) -> DataFrame:\n options = _get_snowflake_options(context.resource_config, table_slice)\n\n spark = SparkSession.builder.getOrCreate() # type: ignore\n if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:\n return spark.createDataFrame([], StructType([]))\n\n df = (\n spark.read.format(SNOWFLAKE_CONNECTOR)\n .options(**options)\n .option("query", SnowflakeDbClient.get_select_statement(table_slice))\n .load()\n )\n return df.toDF(*[c.lower() for c in df.columns])\n\n @property\n def supported_types(self):\n return [DataFrame]
\n\n\nsnowflake_pyspark_io_manager = build_snowflake_io_manager(\n [SnowflakePySparkTypeHandler()], default_load_type=DataFrame\n)\nsnowflake_pyspark_io_manager.__doc__ = """\nAn I/O manager definition that reads inputs from and writes PySpark DataFrames to Snowflake. When\nusing the snowflake_pyspark_io_manager, any inputs and outputs without type annotations will be loaded\nas PySpark DataFrames.\n\nReturns:\n IOManagerDefinition\n\nExamples:\n\n .. code-block:: python\n\n from dagster_snowflake_pyspark import snowflake_pyspark_io_manager\n from pyspark.sql import DataFrame\n from dagster import Definitions\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": snowflake_pyspark_io_manager.configured({\n "database": "my_database",\n "warehouse": "my_warehouse", # required for snowflake_pyspark_io_manager\n "account" : {"env": "SNOWFLAKE_ACCOUNT"},\n "password": {"env": "SNOWFLAKE_PASSWORD"},\n ...\n })\n }\n )\n\n Note that the warehouse configuration value is required when using the snowflake_pyspark_io_manager\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: DataFrame) -> DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n"""\n\n\n
[docs]class SnowflakePySparkIOManager(SnowflakeIOManager):\n """An I/O manager definition that reads inputs from and writes PySpark DataFrames to Snowflake. When\n using the SnowflakePySparkIOManager, any inputs and outputs without type annotations will be loaded\n as PySpark DataFrames.\n\n Returns:\n IOManagerDefinition\n\n Examples:\n .. code-block:: python\n\n from dagster_snowflake_pyspark import SnowflakePySparkIOManager\n from pyspark.sql import DataFrame\n from dagster import Definitions, EnvVar\n\n @asset(\n key_prefix=["my_schema"] # will be used as the schema in snowflake\n )\n def my_table() -> DataFrame: # the name of the asset will be the table name\n ...\n\n defs = Definitions(\n assets=[my_table],\n resources={\n "io_manager": SnowflakePySparkIOManager(\n database="my_database",\n warehouse="my_warehouse", # required for SnowflakePySparkIOManager\n account=EnvVar("SNOWFLAKE_ACCOUNT"),\n password=EnvVar("SNOWFLAKE_PASSWORD"),\n ...\n )\n }\n )\n\n Note that the warehouse configuration value is required when using the SnowflakePySparkIOManager\n\n If you do not provide a schema, Dagster will determine a schema based on the assets and ops using\n the I/O Manager. For assets, the schema will be determined from the asset key, as in the above example.\n For ops, the schema can be specified by including a "schema" entry in output metadata. If "schema" is not provided\n via config or on the asset/op, "public" will be used for the schema.\n\n .. code-block:: python\n\n @op(\n out={"my_table": Out(metadata={"schema": "my_schema"})}\n )\n def make_my_table() -> DataFrame:\n # the returned value will be stored at my_schema.my_table\n ...\n\n To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the\n In or AssetIn.\n\n .. code-block:: python\n\n @asset(\n ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}\n )\n def my_table_a(my_table: DataFrame) -> DataFrame:\n # my_table will just contain the data from column "a"\n ...\n\n """\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n @staticmethod\n def type_handlers() -> Sequence[DbTypeHandler]:\n return [SnowflakePySparkTypeHandler()]\n\n @staticmethod\n def default_load_type() -> Optional[Type]:\n return DataFrame
\n
", "current_page_name": "_modules/dagster_snowflake_pyspark/snowflake_pyspark_type_handler", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_snowflake_pyspark.snowflake_pyspark_type_handler"}}, "dagster_spark": {"configs": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.configs

\n"""Spark Configuration.\n\nIn this file we define the key configuration parameters for submitting Spark jobs. Spark can be run\nin a variety of deployment contexts. See the Spark documentation at\nhttps://spark.apache.org/docs/latest/submitting-applications.html for a more in-depth summary of\nSpark deployment contexts and configuration.\n"""\nfrom dagster import Field, StringSource\n\nfrom .configs_spark import spark_config\nfrom .types import SparkDeployMode\n\n\n
[docs]def define_spark_config():\n """Spark configuration.\n\n See the Spark documentation for reference:\n https://spark.apache.org/docs/latest/submitting-applications.html\n """\n master_url = Field(\n StringSource,\n description="The master URL for the cluster (e.g. spark://23.195.26.187:7077)",\n is_required=True,\n )\n\n deploy_mode = Field(\n SparkDeployMode,\n description="""Whether to deploy your driver on the worker nodes (cluster) or locally as an\n external client (client) (default: client). A common deployment strategy is to submit your\n application from a gateway machine that is physically co-located with your worker machines\n (e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate.\n In client mode, the driver is launched directly within the spark-submit process which acts\n as a client to the cluster. The input and output of the application is attached to the\n console. Thus, this mode is especially suitable for applications that involve the REPL (e.g.\n Spark shell).""",\n is_required=False,\n )\n\n application_jar = Field(\n StringSource,\n description="""Path to a bundled jar including your application and all\n dependencies. The URL must be globally visible inside of your cluster, for\n instance, an hdfs:// path or a file:// path that is present on all nodes.\n """,\n is_required=True,\n )\n\n application_arguments = Field(\n StringSource,\n description="Arguments passed to the main method of your main class, if any",\n is_required=False,\n )\n\n spark_home = Field(\n StringSource,\n description=(\n "The path to your spark installation. Defaults to $SPARK_HOME at runtime if not"\n " provided."\n ),\n is_required=False,\n )\n\n return {\n "master_url": master_url,\n "deploy_mode": deploy_mode,\n "application_jar": application_jar,\n "spark_conf": spark_config(),\n "spark_home": spark_home,\n "application_arguments": application_arguments,\n }
\n
", "current_page_name": "_modules/dagster_spark/configs", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.configs"}, "ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.ops

\nfrom dagster import (\n    In,\n    Nothing,\n    Out,\n    _check as check,\n    op,\n)\n\nfrom .configs import define_spark_config\n\n\n
[docs]def create_spark_op(\n name, main_class, description=None, required_resource_keys=frozenset(["spark"])\n):\n check.str_param(name, "name")\n check.str_param(main_class, "main_class")\n check.opt_str_param(description, "description", "A parameterized Spark job.")\n check.set_param(required_resource_keys, "required_resource_keys")\n\n @op(\n name=name,\n description=description,\n config_schema=define_spark_config(),\n ins={"start": In(Nothing)},\n out=Out(Nothing),\n tags={"kind": "spark", "main_class": main_class},\n required_resource_keys=required_resource_keys,\n )\n def spark_op(context):\n context.resources.spark.run_spark_job(context.op_config, main_class)\n\n return spark_op
\n
", "current_page_name": "_modules/dagster_spark/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.ops"}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.resources

\nimport os\nimport subprocess\n\nimport dagster._check as check\nfrom dagster import resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.log_manager import DagsterLogManager\n\nfrom .types import SparkOpError\nfrom .utils import construct_spark_shell_command\n\n\nclass SparkResource:\n    def __init__(self, logger):\n        self.logger = check.inst_param(logger, "logger", DagsterLogManager)\n\n    def run_spark_job(self, config, main_class):\n        check.dict_param(config, "config")\n        check.str_param(main_class, "main_class")\n\n        # Extract parameters from config\n        (\n            master_url,\n            deploy_mode,\n            application_jar,\n            spark_conf,\n            application_arguments,\n            spark_home,\n        ) = [\n            config.get(k)\n            for k in (\n                "master_url",\n                "deploy_mode",\n                "application_jar",\n                "spark_conf",\n                "application_arguments",\n                "spark_home",\n            )\n        ]\n\n        if not os.path.exists(application_jar):\n            raise SparkOpError(\n                f"Application jar {application_jar} does not exist. A valid jar must be "\n                "built before running this op."\n            )\n\n        spark_shell_cmd = construct_spark_shell_command(\n            application_jar=application_jar,\n            main_class=main_class,\n            master_url=master_url,\n            spark_conf=spark_conf,\n            deploy_mode=deploy_mode,\n            application_arguments=application_arguments,\n            spark_home=spark_home,\n        )\n        self.logger.info("Running spark-submit: " + " ".join(spark_shell_cmd))\n\n        retcode = subprocess.call(" ".join(spark_shell_cmd), shell=True)\n\n        if retcode != 0:\n            raise SparkOpError("Spark job failed. Please consult your logs.")\n\n\n
[docs]@dagster_maintained_resource\n@resource\ndef spark_resource(context):\n return SparkResource(context.log)
\n
", "current_page_name": "_modules/dagster_spark/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.types

\nfrom dagster import Enum, EnumValue\n\nSparkDeployModeCluster = EnumValue("cluster")\nSparkDeployModeClient = EnumValue("client")\nSparkDeployMode = Enum(\n    name="SparkDeployMode", enum_values=[SparkDeployModeCluster, SparkDeployModeClient]\n)\n\n\n
[docs]class SparkOpError(Exception):\n pass
\n
", "current_page_name": "_modules/dagster_spark/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.types"}, "utils": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_spark.utils

\nimport itertools\nimport os\n\nimport dagster._check as check\n\nfrom .types import SparkOpError\n\n\ndef flatten_dict(d):\n    def _flatten_dict(d, result, key_path=None):\n        """Iterates an arbitrarily nested dictionary and yield dot-notation key:value tuples.\n\n        {'foo': {'bar': 3, 'baz': 1}, {'other': {'key': 1}} =>\n            [('foo.bar', 3), ('foo.baz', 1), ('other.key', 1)]\n\n        """\n        for k, v in d.items():\n            new_key_path = (key_path or []) + [k]\n            if isinstance(v, dict):\n                _flatten_dict(v, result, new_key_path)\n            else:\n                result.append((".".join(new_key_path), v))\n\n    result = []\n    if d is not None:\n        _flatten_dict(d, result)\n    return result\n\n\ndef parse_spark_config(spark_conf):\n    """Convert spark conf dict to list of CLI arguments.\n\n    For each key-value pair in spark conf, we need to pass to CLI in format:\n\n    --conf "key=value"\n    """\n    spark_conf_list = flatten_dict(spark_conf)\n    return format_for_cli(spark_conf_list)\n\n\ndef format_for_cli(spark_conf_list):\n    return list(\n        itertools.chain.from_iterable([("--conf", "{}={}".format(*c)) for c in spark_conf_list])\n    )\n\n\n
[docs]def construct_spark_shell_command(\n application_jar,\n main_class,\n master_url=None,\n spark_conf=None,\n deploy_mode=None,\n application_arguments=None,\n spark_home=None,\n):\n """Constructs the spark-submit command for a Spark job."""\n check.opt_str_param(master_url, "master_url")\n check.str_param(application_jar, "application_jar")\n spark_conf = check.opt_dict_param(spark_conf, "spark_conf")\n check.opt_str_param(deploy_mode, "deploy_mode")\n check.opt_str_param(application_arguments, "application_arguments")\n check.opt_str_param(spark_home, "spark_home")\n\n spark_home = spark_home if spark_home else os.environ.get("SPARK_HOME")\n if spark_home is None:\n raise SparkOpError(\n "No spark home set. You must either pass spark_home in config or "\n "set $SPARK_HOME in your environment (got None)."\n )\n\n master_url = ["--master", master_url] if master_url else []\n deploy_mode = ["--deploy-mode", deploy_mode] if deploy_mode else []\n\n spark_shell_cmd = (\n [f"{spark_home}/bin/spark-submit", "--class", main_class]\n + master_url\n + deploy_mode\n + parse_spark_config(spark_conf)\n + [application_jar]\n + [application_arguments]\n )\n return spark_shell_cmd
\n
", "current_page_name": "_modules/dagster_spark/utils", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_spark.utils"}}, "dagster_ssh": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_ssh.resources

\nimport getpass\nimport os\nfrom io import StringIO\n\nimport paramiko\nfrom dagster import (\n    BoolSource,\n    Field,\n    IntSource,\n    StringSource,\n    _check as check,\n    resource,\n)\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._utils import mkdir_p\nfrom dagster._utils.merger import merge_dicts\nfrom paramiko.config import SSH_PORT\nfrom sshtunnel import SSHTunnelForwarder\n\n\ndef key_from_str(key_str):\n    """Creates a paramiko SSH key from a string."""\n    check.str_param(key_str, "key_str")\n\n    # py2 StringIO doesn't support with\n    key_file = StringIO(key_str)\n    result = paramiko.RSAKey.from_private_key(key_file)\n    key_file.close()\n    return result\n\n\nclass SSHResource:\n    """Resource for ssh remote execution using Paramiko.\n\n    ref: https://github.com/paramiko/paramiko\n    """\n\n    def __init__(\n        self,\n        remote_host,\n        remote_port,\n        username=None,\n        password=None,\n        key_file=None,\n        key_string=None,\n        timeout=10,\n        keepalive_interval=30,\n        compress=True,\n        no_host_key_check=True,\n        allow_host_key_change=False,\n        logger=None,\n    ):\n        self.remote_host = check.str_param(remote_host, "remote_host")\n        self.remote_port = check.opt_int_param(remote_port, "remote_port")\n        self.username = check.opt_str_param(username, "username")\n        self.password = check.opt_str_param(password, "password")\n        self.key_file = check.opt_str_param(key_file, "key_file")\n        self.timeout = check.opt_int_param(timeout, "timeout")\n        self.keepalive_interval = check.opt_int_param(keepalive_interval, "keepalive_interval")\n        self.compress = check.opt_bool_param(compress, "compress")\n        self.no_host_key_check = check.opt_bool_param(no_host_key_check, "no_host_key_check")\n        self.log = logger\n\n        self.host_proxy = None\n\n        # Create RSAKey object from private key string\n        self.key_obj = key_from_str(key_string) if key_string is not None else None\n\n        # Auto detecting username values from system\n        if not self.username:\n            logger.debug(\n                "username to ssh to host: %s is not specified. Using system's default provided by"\n                " getpass.getuser()"\n                % self.remote_host\n            )\n            self.username = getpass.getuser()\n\n        user_ssh_config_filename = os.path.expanduser("~/.ssh/config")\n        if os.path.isfile(user_ssh_config_filename):\n            ssh_conf = paramiko.SSHConfig()\n            ssh_conf.parse(open(user_ssh_config_filename, encoding="utf8"))\n            host_info = ssh_conf.lookup(self.remote_host)\n            if host_info and host_info.get("proxycommand"):\n                self.host_proxy = paramiko.ProxyCommand(host_info.get("proxycommand"))\n\n            if not (self.password or self.key_file):\n                if host_info and host_info.get("identityfile"):\n                    self.key_file = host_info.get("identityfile")[0]\n\n    def get_connection(self):\n        """Opens a SSH connection to the remote host.\n\n        :rtype: paramiko.client.SSHClient\n        """\n        client = paramiko.SSHClient()\n        client.load_system_host_keys()\n        if self.no_host_key_check:\n            self.log.warning(\n                "No Host Key Verification. This won't protect against Man-In-The-Middle attacks"\n            )\n            # Default is RejectPolicy\n            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n        if self.password and self.password.strip():\n            client.connect(\n                hostname=self.remote_host,\n                username=self.username,\n                password=self.password,\n                key_filename=self.key_file,\n                pkey=self.key_obj,\n                timeout=self.timeout,\n                compress=self.compress,\n                port=self.remote_port,\n                sock=self.host_proxy,\n                look_for_keys=False,\n            )\n        else:\n            client.connect(\n                hostname=self.remote_host,\n                username=self.username,\n                key_filename=self.key_file,\n                pkey=self.key_obj,\n                timeout=self.timeout,\n                compress=self.compress,\n                port=self.remote_port,\n                sock=self.host_proxy,\n            )\n\n        if self.keepalive_interval:\n            client.get_transport().set_keepalive(self.keepalive_interval)\n\n        return client\n\n    def get_tunnel(self, remote_port, remote_host="localhost", local_port=None):\n        check.int_param(remote_port, "remote_port")\n        check.str_param(remote_host, "remote_host")\n        check.opt_int_param(local_port, "local_port")\n\n        if local_port is not None:\n            local_bind_address = ("localhost", local_port)\n        else:\n            local_bind_address = ("localhost",)\n\n        # Will prefer key string if specified, otherwise use the key file\n        pkey = self.key_obj if self.key_obj else self.key_file\n\n        if self.password and self.password.strip():\n            client = SSHTunnelForwarder(\n                self.remote_host,\n                ssh_port=self.remote_port,\n                ssh_username=self.username,\n                ssh_password=self.password,\n                ssh_pkey=pkey,\n                ssh_proxy=self.host_proxy,\n                local_bind_address=local_bind_address,\n                remote_bind_address=(remote_host, remote_port),\n                logger=self.log,\n            )\n        else:\n            client = SSHTunnelForwarder(\n                self.remote_host,\n                ssh_port=self.remote_port,\n                ssh_username=self.username,\n                ssh_pkey=pkey,\n                ssh_proxy=self.host_proxy,\n                local_bind_address=local_bind_address,\n                remote_bind_address=(remote_host, remote_port),\n                host_pkey_directories=[],\n                logger=self.log,\n            )\n\n        return client\n\n    def sftp_get(self, remote_filepath, local_filepath):\n        check.str_param(remote_filepath, "remote_filepath")\n        check.str_param(local_filepath, "local_filepath")\n        conn = self.get_connection()\n        with conn.open_sftp() as sftp_client:\n            local_folder = os.path.dirname(local_filepath)\n\n            # Create intermediate directories if they don't exist\n            mkdir_p(local_folder)\n\n            self.log.info(f"Starting to transfer from {remote_filepath} to {local_filepath}")\n\n            sftp_client.get(remote_filepath, local_filepath)\n\n        conn.close()\n        return local_filepath\n\n    def sftp_put(self, remote_filepath, local_filepath, confirm=True):\n        check.str_param(remote_filepath, "remote_filepath")\n        check.str_param(local_filepath, "local_filepath")\n        conn = self.get_connection()\n        with conn.open_sftp() as sftp_client:\n            self.log.info(f"Starting to transfer file from {local_filepath} to {remote_filepath}")\n\n            sftp_client.put(local_filepath, remote_filepath, confirm=confirm)\n\n        conn.close()\n        return local_filepath\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema={\n "remote_host": Field(\n StringSource, description="remote host to connect to", is_required=True\n ),\n "remote_port": Field(\n IntSource,\n description="port of remote host to connect (Default is paramiko SSH_PORT)",\n is_required=False,\n default_value=SSH_PORT,\n ),\n "username": Field(\n StringSource, description="username to connect to the remote_host", is_required=False\n ),\n "password": Field(\n StringSource,\n description="password of the username to connect to the remote_host",\n is_required=False,\n ),\n "key_file": Field(\n StringSource,\n description="key file to use to connect to the remote_host.",\n is_required=False,\n ),\n "key_string": Field(\n StringSource,\n description="key string to use to connect to remote_host",\n is_required=False,\n ),\n "timeout": Field(\n IntSource,\n description="timeout for the attempt to connect to the remote_host.",\n is_required=False,\n default_value=10,\n ),\n "keepalive_interval": Field(\n IntSource,\n description="send a keepalive packet to remote host every keepalive_interval seconds",\n is_required=False,\n default_value=30,\n ),\n "compress": Field(BoolSource, is_required=False, default_value=True),\n "no_host_key_check": Field(BoolSource, is_required=False, default_value=True),\n "allow_host_key_change": Field(\n BoolSource, description="[Deprecated]", is_required=False, default_value=False\n ),\n }\n)\ndef ssh_resource(init_context):\n args = init_context.resource_config\n args = merge_dicts(init_context.resource_config, {"logger": init_context.log})\n return SSHResource(**args)
\n
", "current_page_name": "_modules/dagster_ssh/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_ssh.resources"}}, "dagster_twilio": {"resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_twilio.resources

\nfrom dagster import ConfigurableResource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom dagster._core.execution.context.init import InitResourceContext\nfrom pydantic import Field\nfrom twilio.rest import Client\n\n\n
[docs]class TwilioResource(ConfigurableResource):\n """This resource is for connecting to Twilio."""\n\n account_sid: str = Field(\n description=(\n "Twilio Account SID, created with yout Twilio account. This can be found on your Twilio"\n " dashboard, see"\n " https://www.twilio.com/blog/twilio-access-tokens-python"\n ),\n )\n auth_token: str = Field(\n description=(\n "Twilio Authentication Token, created with yout Twilio account. This can be found on"\n " your Twilio dashboard, see https://www.twilio.com/blog/twilio-access-tokens-python"\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def create_client(self) -> Client:\n return Client(self.account_sid, self.auth_token)
\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema=TwilioResource.to_config_schema(),\n description="This resource is for connecting to Twilio",\n)\ndef twilio_resource(context: InitResourceContext) -> Client:\n return TwilioResource.from_resource_context(context).create_client()
\n
", "current_page_name": "_modules/dagster_twilio/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_twilio.resources"}}, "dagster_wandb": {"io_manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.io_manager

\nimport datetime\nimport os\nimport pickle\nimport platform\nimport shutil\nimport sys\nimport time\nimport uuid\nfrom contextlib import contextmanager\nfrom typing import List, Optional\n\nfrom dagster import (\n    Field,\n    InitResourceContext,\n    InputContext,\n    Int,\n    IOManager,\n    MetadataValue,\n    OutputContext,\n    String,\n    io_manager,\n)\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager\nfrom wandb import Artifact\nfrom wandb.data_types import WBValue\n\nfrom .resources import WANDB_CLOUD_HOST\nfrom .utils.errors import (\n    WandbArtifactsIOManagerError,\n    raise_on_empty_configuration,\n    raise_on_unknown_partition_keys,\n    raise_on_unknown_read_configuration_keys,\n    raise_on_unknown_write_configuration_keys,\n)\nfrom .utils.pickling import (\n    ACCEPTED_SERIALIZATION_MODULES,\n    pickle_artifact_content,\n    unpickle_artifact_content,\n)\nfrom .version import __version__\n\nif sys.version_info >= (3, 8):\n    from typing import TypedDict\nelse:\n    from typing_extensions import TypedDict\n\n\nclass Config(TypedDict):\n    dagster_run_id: str\n    wandb_host: str\n    wandb_entity: str\n    wandb_project: str\n    wandb_run_name: Optional[str]\n    wandb_run_id: Optional[str]\n    wandb_run_tags: Optional[List[str]]\n    base_dir: str\n    cache_duration_in_minutes: Optional[int]\n\n\nclass ArtifactsIOManager(IOManager):\n    """IO Manager to handle Artifacts in Weights & Biases (W&B) .\n\n    It handles 3 different inputs:\n    - Pickable objects (the serialization module is configurable)\n    - W&B Objects (Audio, Table, Image, etc)\n    - W&B Artifacts\n    """\n\n    def __init__(self, wandb_client, config: Config):\n        self.wandb = wandb_client\n\n        dagster_run_id = config["dagster_run_id"]\n        self.dagster_run_id = dagster_run_id\n        self.wandb_host = config["wandb_host"]\n        self.wandb_entity = config["wandb_entity"]\n        self.wandb_project = config["wandb_project"]\n        self.wandb_run_id = config.get("wandb_run_id") or dagster_run_id\n        self.wandb_run_name = config.get("wandb_run_name") or f"dagster-run-{dagster_run_id[0:8]}"\n        # augments the run tags\n        wandb_run_tags = config["wandb_run_tags"] or []\n        if "dagster_wandb" not in wandb_run_tags:\n            wandb_run_tags = [*wandb_run_tags, "dagster_wandb"]\n        self.wandb_run_tags = wandb_run_tags\n\n        self.base_dir = config["base_dir"]\n        cache_duration_in_minutes = config["cache_duration_in_minutes"]\n        default_cache_expiration_in_minutes = 60 * 24 * 30  # 60 minutes * 24 hours * 30 days\n        self.cache_duration_in_minutes = (\n            cache_duration_in_minutes\n            if cache_duration_in_minutes is not None\n            else default_cache_expiration_in_minutes\n        )\n\n    def _get_local_storage_path(self):\n        path = self.base_dir\n        if os.path.basename(path) != "storage":\n            path = os.path.join(path, "storage")\n        path = os.path.join(path, "wandb_artifacts_manager")\n        os.makedirs(path, exist_ok=True)\n        return path\n\n    def _get_artifacts_path(self, name, version):\n        local_storage_path = self._get_local_storage_path()\n        path = os.path.join(local_storage_path, "artifacts", f"{name}.{version}")\n        os.makedirs(path, exist_ok=True)\n        return path\n\n    def _get_wandb_logs_path(self):\n        local_storage_path = self._get_local_storage_path()\n        # Adding a random uuid to avoid collisions in multi-process context\n        path = os.path.join(local_storage_path, "runs", self.dagster_run_id, str(uuid.uuid4()))\n        os.makedirs(path, exist_ok=True)\n        return path\n\n    def _clean_local_storage_path(self):\n        local_storage_path = self._get_local_storage_path()\n        cache_duration_in_minutes = self.cache_duration_in_minutes\n        current_timestamp = int(time.time())\n        expiration_timestamp = current_timestamp - (\n            cache_duration_in_minutes * 60  # convert to seconds\n        )\n\n        for root, dirs, files in os.walk(local_storage_path, topdown=False):\n            for name in files:\n                current_file_path = os.path.join(root, name)\n                most_recent_access = os.lstat(current_file_path).st_atime\n                if most_recent_access <= expiration_timestamp or cache_duration_in_minutes == 0:\n                    os.remove(current_file_path)\n            for name in dirs:\n                current_dir_path = os.path.join(root, name)\n                if not os.path.islink(current_dir_path):\n                    if len(os.listdir(current_dir_path)) == 0 or cache_duration_in_minutes == 0:\n                        shutil.rmtree(current_dir_path)\n\n    @contextmanager\n    def wandb_run(self):\n        self.wandb.init(\n            id=self.wandb_run_id,\n            name=self.wandb_run_name,\n            project=self.wandb_project,\n            entity=self.wandb_entity,\n            dir=self._get_wandb_logs_path(),\n            tags=self.wandb_run_tags,\n            anonymous="never",\n            resume="allow",\n        )\n        try:\n            yield self.wandb.run\n        finally:\n            self.wandb.finish()\n            self._clean_local_storage_path()\n\n    def _upload_artifact(self, context: OutputContext, obj):\n        if not context.has_partition_key and context.has_asset_partitions:\n            raise WandbArtifactsIOManagerError(\n                "Sorry, but the Weights & Biases (W&B) IO Manager can't handle processing several"\n                " partitions at the same time within a single run. Please process each partition"\n                " separately. If you think this might be an error, don't hesitate to reach out to"\n                " Weights & Biases Support."\n            )\n\n        with self.wandb_run() as run:\n            parameters = {}\n            if context.metadata is not None:\n                parameters = context.metadata.get("wandb_artifact_configuration", {})\n\n            raise_on_unknown_write_configuration_keys(parameters)\n\n            serialization_module = parameters.get("serialization_module", {})\n            serialization_module_name = serialization_module.get("name", "pickle")\n\n            if serialization_module_name not in ACCEPTED_SERIALIZATION_MODULES:\n                raise WandbArtifactsIOManagerError(\n                    f"Oops! It looks like the value you provided, '{serialization_module_name}',"\n                    " isn't recognized as a valid serialization module. Here are the ones we do"\n                    f" support: {ACCEPTED_SERIALIZATION_MODULES}."\n                )\n\n            serialization_module_parameters = serialization_module.get("parameters", {})\n            serialization_module_parameters_with_protocol = {\n                "protocol": (\n                    pickle.HIGHEST_PROTOCOL\n                ),  # we use the highest available protocol if we don't pass one\n                **serialization_module_parameters,\n            }\n\n            artifact_type = parameters.get("type", "artifact")\n            artifact_description = parameters.get("description")\n            artifact_metadata = {\n                "source_integration": "dagster_wandb",\n                "source_integration_version": __version__,\n                "source_dagster_run_id": self.dagster_run_id,\n                "source_created_at": datetime.datetime.now(datetime.timezone.utc).isoformat(),\n                "source_python_version": platform.python_version(),\n            }\n            if isinstance(obj, Artifact):\n                if parameters.get("name") is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've provided a 'name' property in the 'wandb_artifact_configuration'"\n                        " settings. However, this 'name' property should only be used when the"\n                        " output isn't already an Artifact object."\n                    )\n\n                if parameters.get("type") is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've provided a 'type' property in the 'wandb_artifact_configuration'"\n                        " settings. However, this 'type' property should only be used when the"\n                        " output isn't already an Artifact object."\n                    )\n\n                if obj.name is None:\n                    raise WandbArtifactsIOManagerError(\n                        "The Weights & Biases (W&B) Artifact you provided is missing a name."\n                        " Please, assign a name to your Artifact."\n                    )\n\n                if context.has_asset_key and obj.name != context.get_asset_identifier()[0]:\n                    asset_identifier = context.get_asset_identifier()[0]\n                    context.log.warning(\n                        f"Please note, the name '{obj.name}' of your Artifact is overwritten by the"\n                        f" name derived from the AssetKey '{asset_identifier}'. For consistency and"\n                        " to avoid confusion, we advise sharing a constant for both your asset's"\n                        " name and the artifact's name."\n                    )\n                    obj._name = asset_identifier  # noqa: SLF001\n\n                if context.has_partition_key:\n                    artifact_name = f"{obj.name}.{context.partition_key}"\n                    # The Artifact provided is produced in a partitioned execution we add the\n                    # partition as a suffix to the Artifact name\n                    obj._name = artifact_name  # noqa: SLF001\n\n                if len(serialization_module) != 0:  # not an empty dict\n                    context.log.warning(\n                        "You've included a 'serialization_module' in the"\n                        " 'wandb_artifact_configuration' settings. However, this doesn't have any"\n                        " impact when the output is already an Artifact object."\n                    )\n\n                # The obj is already an Artifact we augment its metadata\n                artifact = obj\n\n                artifact.metadata = {**artifact.metadata, **artifact_metadata}\n\n                if artifact.description is not None and artifact_description is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've given a 'description' in the 'wandb_artifact_configuration'"\n                        " settings for an existing Artifact that already has a description. Please,"\n                        " either set the description using 'wandb_artifact_argument' or when"\n                        " creating your Artifact."\n                    )\n                if artifact_description is not None:\n                    artifact.description = artifact_description\n            else:\n                if context.has_asset_key:\n                    if parameters.get("name") is not None:\n                        raise WandbArtifactsIOManagerError(\n                            "You've included a 'name' property in the"\n                            " 'wandb_artifact_configuration' settings. But, a 'name' is only needed"\n                            " when there's no 'AssetKey'. When an Artifact is created from an"\n                            " @asset, it uses the asset name. When it's created from an @op with an"\n                            " 'asset_key' for the output, that value is used. Please remove the"\n                            " 'name' property."\n                        )\n                    artifact_name = context.get_asset_identifier()[0]  # name of asset\n                else:\n                    name_parameter = parameters.get("name")\n                    if name_parameter is None:\n                        raise WandbArtifactsIOManagerError(\n                            "The 'name' property is missing in the 'wandb_artifact_configuration'"\n                            " settings. For Artifacts created from an @op, a 'name' property is"\n                            " needed. You could also use an @asset as an alternative."\n                        )\n                    assert name_parameter is not None\n                    artifact_name = name_parameter\n\n                if context.has_partition_key:\n                    artifact_name = f"{artifact_name}.{context.partition_key}"\n\n                # We replace the | character with - because it is not allowed in artifact names\n                # The | character is used in multi-dimensional partition keys\n                artifact_name = str(artifact_name).replace("|", "-")\n\n                # Creates an artifact to hold the obj\n                artifact = self.wandb.Artifact(\n                    name=artifact_name,\n                    type=artifact_type,\n                    description=artifact_description,\n                    metadata=artifact_metadata,\n                )\n                if isinstance(obj, WBValue):\n                    if len(serialization_module) != 0:  # not an empty dict\n                        context.log.warning(\n                            "You've included a 'serialization_module' in the"\n                            " 'wandb_artifact_configuration' settings. However, this doesn't have"\n                            " any impact when the output is already an W&B object like e.g Table or"\n                            " Image."\n                        )\n                    # Adds the WBValue object using the class name as the name for the file\n                    artifact.add(obj, obj.__class__.__name__)\n                elif obj is not None:\n                    # The output is not a native wandb Object, we serialize it\n                    pickle_artifact_content(\n                        context,\n                        serialization_module_name,\n                        serialization_module_parameters_with_protocol,\n                        artifact,\n                        obj,\n                    )\n\n            # Add any files: https://docs.wandb.ai/ref/python/artifact#add_file\n            add_files = parameters.get("add_files")\n            if add_files is not None and len(add_files) > 0:\n                for add_file in add_files:\n                    artifact.add_file(**add_file)\n\n            # Add any dirs: https://docs.wandb.ai/ref/python/artifact#add_dir\n            add_dirs = parameters.get("add_dirs")\n            if add_dirs is not None and len(add_dirs) > 0:\n                for add_dir in add_dirs:\n                    artifact.add_dir(**add_dir)\n\n            # Add any reference: https://docs.wandb.ai/ref/python/artifact#add_reference\n            add_references = parameters.get("add_references")\n            if add_references is not None and len(add_references) > 0:\n                for add_reference in add_references:\n                    artifact.add_reference(**add_reference)\n\n            # Augments the aliases\n            aliases = parameters.get("aliases", [])\n            aliases.append(f"dagster-run-{self.dagster_run_id[0:8]}")\n            if "latest" not in aliases:\n                aliases.append("latest")\n\n            # Logs the artifact\n            self.wandb.log_artifact(artifact, aliases=aliases)\n            artifact.wait()\n\n            # Adds useful metadata to the output or Asset\n            artifacts_base_url = (\n                "https://wandb.ai"\n                if self.wandb_host == WANDB_CLOUD_HOST\n                else self.wandb_host.rstrip("/")\n            )\n            assert artifact.id is not None\n            output_metadata = {\n                "dagster_run_id": MetadataValue.dagster_run(self.dagster_run_id),\n                "wandb_artifact_id": MetadataValue.text(artifact.id),\n                "wandb_artifact_type": MetadataValue.text(artifact.type),\n                "wandb_artifact_version": MetadataValue.text(artifact.version),\n                "wandb_artifact_size": MetadataValue.int(artifact.size),\n                "wandb_artifact_url": MetadataValue.url(\n                    f"{artifacts_base_url}/{run.entity}/{run.project}/artifacts/{artifact.type}/{'/'.join(artifact.name.rsplit(':', 1))}"\n                ),\n                "wandb_entity": MetadataValue.text(run.entity),\n                "wandb_project": MetadataValue.text(run.project),\n                "wandb_run_id": MetadataValue.text(run.id),\n                "wandb_run_name": MetadataValue.text(run.name),\n                "wandb_run_path": MetadataValue.text(run.path),\n                "wandb_run_url": MetadataValue.url(run.url),\n            }\n            context.add_output_metadata(output_metadata)\n\n    def _download_artifact(self, context: InputContext):\n        with self.wandb_run() as run:\n            parameters = {}\n            if context.metadata is not None:\n                parameters = context.metadata.get("wandb_artifact_configuration", {})\n\n            raise_on_unknown_read_configuration_keys(parameters)\n\n            partitions_configuration = parameters.get("partitions", {})\n\n            if not context.has_asset_partitions and len(partitions_configuration) > 0:\n                raise WandbArtifactsIOManagerError(\n                    "You've included a 'partitions' value in the 'wandb_artifact_configuration'"\n                    " settings but it's not within a partitioned execution. Please only use"\n                    " 'partitions' within a partitioned context."\n                )\n\n            if context.has_asset_partitions:\n                # Note: this is currently impossible to unit test with current Dagster APIs but was\n                # tested thoroughly manually\n                name = parameters.get("get")\n                path = parameters.get("get_path")\n                if name is not None or path is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "You've given a value for 'get' and/or 'get_path' in the"\n                        " 'wandb_artifact_configuration' settings during a partitioned execution."\n                        " Please use the 'partitions' property to set 'get' or 'get_path' for each"\n                        " individual partition. To set a default value for all partitions, use '*'."\n                    )\n\n                artifact_name = parameters.get("name")\n                if artifact_name is None:\n                    artifact_name = context.asset_key[0][0]  # name of asset\n\n                partitions = [\n                    (key, f"{artifact_name}.{ str(key).replace('|', '-')}")\n                    for key in context.asset_partition_keys\n                ]\n\n                output = {}\n\n                for key, artifact_name in partitions:\n                    context.log.info(f"Handling partition with key '{key}'")\n                    partition_configuration = partitions_configuration.get(\n                        key, partitions_configuration.get("*")\n                    )\n\n                    raise_on_empty_configuration(key, partition_configuration)\n                    raise_on_unknown_partition_keys(key, partition_configuration)\n\n                    partition_version = None\n                    partition_alias = None\n                    if partition_configuration and partition_configuration is not None:\n                        partition_version = partition_configuration.get("version")\n                        partition_alias = partition_configuration.get("alias")\n                        if partition_version is not None and partition_alias is not None:\n                            raise WandbArtifactsIOManagerError(\n                                "You've provided both 'version' and 'alias' for the partition with"\n                                " key '{key}'. You should only use one of these properties at a"\n                                " time. If you choose not to use any, the latest version will be"\n                                " used by default. If this partition is configured with the '*'"\n                                " key, please correct the wildcard configuration."\n                            )\n                    partition_identifier = partition_version or partition_alias or "latest"\n\n                    artifact_uri = (\n                        f"{run.entity}/{run.project}/{artifact_name}:{partition_identifier}"\n                    )\n                    try:\n                        api = self.wandb.Api()\n                        api.artifact(artifact_uri)\n                    except Exception as exception:\n                        raise WandbArtifactsIOManagerError(\n                            "The artifact you're attempting to download might not exist, or you"\n                            " might have forgotten to include the 'name' property in the"\n                            " 'wandb_artifact_configuration' settings."\n                        ) from exception\n\n                    artifact = run.use_artifact(artifact_uri)\n\n                    artifacts_path = self._get_artifacts_path(artifact_name, artifact.version)\n                    if partition_configuration and partition_configuration is not None:\n                        partition_name = partition_configuration.get("get")\n                        partition_path = partition_configuration.get("get_path")\n                        if partition_name is not None and partition_path is not None:\n                            raise WandbArtifactsIOManagerError(\n                                "You've provided both 'get' and 'get_path' in the"\n                                " 'wandb_artifact_configuration' settings for the partition with"\n                                " key '{key}'. Only one of these properties should be used. If you"\n                                " choose not to use any, the whole Artifact will be returned. If"\n                                " this partition is configured with the '*' key, please correct the"\n                                " wildcard configuration."\n                            )\n\n                        if partition_name is not None:\n                            wandb_object = artifact.get(partition_name)\n                            if wandb_object is not None:\n                                output[key] = wandb_object\n                                continue\n\n                        if partition_path is not None:\n                            path = artifact.get_path(partition_path)\n                            download_path = path.download(root=artifacts_path)\n                            if download_path is not None:\n                                output[key] = download_path\n                                continue\n\n                    artifact_dir = artifact.download(root=artifacts_path, recursive=True)\n                    unpickled_content = unpickle_artifact_content(artifact_dir)\n                    if unpickled_content is not None:\n                        output[key] = unpickled_content\n                        continue\n\n                    artifact.verify(root=artifacts_path)\n                    output[key] = artifact\n\n                if len(output) == 1:\n                    # If there's only one partition, return the value directly\n                    return next(iter(output.values()))\n\n                return output\n\n            elif context.has_asset_key:\n                # Input is an asset\n                if parameters.get("name") is not None:\n                    raise WandbArtifactsIOManagerError(\n                        "A conflict has been detected in the provided configuration settings. The"\n                        " 'name' parameter appears to be specified twice - once in the"\n                        " 'wandb_artifact_configuration' metadata dictionary, and again as an"\n                        " AssetKey. Kindly avoid setting the name directly, since the AssetKey will"\n                        " be used for this purpose."\n                    )\n                artifact_name = context.get_asset_identifier()[0]  # name of asset\n            else:\n                artifact_name = parameters.get("name")\n                if artifact_name is None:\n                    raise WandbArtifactsIOManagerError(\n                        "The 'name' property is missing in the 'wandb_artifact_configuration'"\n                        " settings. For Artifacts used in an @op, a 'name' property is required."\n                        " You could use an @asset as an alternative."\n                    )\n\n            if context.has_partition_key:\n                artifact_name = f"{artifact_name}.{context.partition_key}"\n\n            artifact_alias = parameters.get("alias")\n            artifact_version = parameters.get("version")\n\n            if artifact_alias is not None and artifact_version is not None:\n                raise WandbArtifactsIOManagerError(\n                    "You've provided both 'version' and 'alias' in the"\n                    " 'wandb_artifact_configuration' settings. Only one should be used at a time."\n                    " If you decide not to use any, the latest version will be applied"\n                    " automatically."\n                )\n\n            artifact_identifier = artifact_alias or artifact_version or "latest"\n            artifact_uri = f"{run.entity}/{run.project}/{artifact_name}:{artifact_identifier}"\n\n            # This try/except block is a workaround for a bug in the W&B SDK, this should be removed\n            # once the bug is fixed.\n            try:\n                artifact = run.use_artifact(artifact_uri)\n            except Exception:\n                api = self.wandb.Api()\n                artifact = api.artifact(artifact_uri)\n\n            name = parameters.get("get")\n            path = parameters.get("get_path")\n            if name is not None and path is not None:\n                raise WandbArtifactsIOManagerError(\n                    "You've provided both 'get' and 'get_path' in the"\n                    " 'wandb_artifact_configuration' settings. Only one should be used at a time."\n                    " If you decide not to use any, the entire Artifact will be returned."\n                )\n\n            if name is not None:\n                return artifact.get(name)\n\n            artifacts_path = self._get_artifacts_path(artifact_name, artifact.version)\n            if path is not None:\n                path = artifact.get_path(path)\n                return path.download(root=artifacts_path)\n\n            artifact_dir = artifact.download(root=artifacts_path, recursive=True)\n\n            unpickled_content = unpickle_artifact_content(artifact_dir)\n            if unpickled_content is not None:\n                return unpickled_content\n\n            artifact.verify(root=artifacts_path)\n            return artifact\n\n    def handle_output(self, context: OutputContext, obj) -> None:\n        if obj is None:\n            context.log.warning(\n                "The output value given to the Weights & Biases (W&B) IO Manager is empty. If this"\n                " was intended, you can disregard this warning."\n            )\n        else:\n            try:\n                self._upload_artifact(context, obj)\n            except WandbArtifactsIOManagerError as exception:\n                raise exception\n            except Exception as exception:\n                raise WandbArtifactsIOManagerError() from exception\n\n    def load_input(self, context: InputContext):\n        try:\n            return self._download_artifact(context)\n        except WandbArtifactsIOManagerError as exception:\n            raise exception\n        except Exception as exception:\n            raise WandbArtifactsIOManagerError() from exception\n\n\n
[docs]@dagster_maintained_io_manager\n@io_manager(\n required_resource_keys={"wandb_resource", "wandb_config"},\n description="IO manager to read and write W&B Artifacts",\n config_schema={\n "run_name": Field(\n String,\n is_required=False,\n description=(\n "Short display name for this run, which is how you'll identify this run in the UI."\n " By default, it`s set to a string with the following format dagster-run-[8 first"\n " characters of the Dagster Run ID] e.g. dagster-run-7e4df022."\n ),\n ),\n "run_id": Field(\n String,\n is_required=False,\n description=(\n "Unique ID for this run, used for resuming. It must be unique in the project, and"\n " if you delete a run you can't reuse the ID. Use the name field for a short"\n " descriptive name, or config for saving hyperparameters to compare across runs."\n r" The ID cannot contain the following special characters: /\\#?%:.. You need to set"\n " the Run ID when you are doing experiment tracking inside Dagster to allow the IO"\n " Manager to resume the run. By default it`s set to the Dagster Run ID e.g "\n " 7e4df022-1bf2-44b5-a383-bb852df4077e."\n ),\n ),\n "run_tags": Field(\n [String],\n is_required=False,\n description=(\n "A list of strings, which will populate the list of tags on this run in the UI."\n " Tags are useful for organizing runs together, or applying temporary labels like"\n " 'baseline' or 'production'. It's easy to add and remove tags in the UI, or filter"\n " down to just runs with a specific tag. Any W&B Run used by the integration will"\n " have the dagster_wandb tag."\n ),\n ),\n "base_dir": Field(\n String,\n is_required=False,\n description=(\n "Base directory used for local storage and caching. W&B Artifacts and W&B Run logs"\n " will be written and read from that directory. By default, it`s using the"\n " DAGSTER_HOME directory."\n ),\n ),\n "cache_duration_in_minutes": Field(\n Int,\n is_required=False,\n description=(\n "Defines the amount of time W&B Artifacts and W&B Run logs should be kept in the"\n " local storage. Only files and directories that were not opened for that amount of"\n " time are removed from the cache. Cache purging happens at the end of an IO"\n " Manager execution. You can set it to 0, if you want to disable caching"\n " completely. Caching improves speed when an Artifact is reused between jobs"\n " running on the same machine. It defaults to 30 days."\n ),\n ),\n },\n)\ndef wandb_artifacts_io_manager(context: InitResourceContext):\n """Dagster IO Manager to create and consume W&B Artifacts.\n\n It allows any Dagster @op or @asset to create and consume W&B Artifacts natively.\n\n For a complete set of documentation, see `Dagster integration <https://docs.wandb.ai/guides/integrations/dagster>`_.\n\n **Example:**\n\n .. code-block:: python\n\n @repository\n def my_repository():\n return [\n *with_resources(\n load_assets_from_current_module(),\n resource_defs={\n "wandb_config": make_values_resource(\n entity=str,\n project=str,\n ),\n "wandb_resource": wandb_resource.configured(\n {"api_key": {"env": "WANDB_API_KEY"}}\n ),\n "wandb_artifacts_manager": wandb_artifacts_io_manager.configured(\n {"cache_duration_in_minutes": 60} # only cache files for one hour\n ),\n },\n resource_config_by_key={\n "wandb_config": {\n "config": {\n "entity": "my_entity",\n "project": "my_project"\n }\n }\n },\n ),\n ]\n\n\n @asset(\n name="my_artifact",\n metadata={\n "wandb_artifact_configuration": {\n "type": "dataset",\n }\n },\n io_manager_key="wandb_artifacts_manager",\n )\n def create_dataset():\n return [1, 2, 3]\n\n """\n wandb_client = context.resources.wandb_resource["sdk"]\n wandb_host = context.resources.wandb_resource["host"]\n wandb_entity = context.resources.wandb_config["entity"]\n wandb_project = context.resources.wandb_config["project"]\n\n wandb_run_name = None\n wandb_run_id = None\n wandb_run_tags = None\n base_dir = (\n context.instance.storage_directory() if context.instance else os.environ["DAGSTER_HOME"]\n )\n cache_duration_in_minutes = None\n if context.resource_config is not None:\n wandb_run_name = context.resource_config.get("run_name")\n wandb_run_id = context.resource_config.get("run_id")\n wandb_run_tags = context.resource_config.get("run_tags")\n base_dir = context.resource_config.get("base_dir", base_dir)\n cache_duration_in_minutes = context.resource_config.get("cache_duration_in_minutes")\n\n if "PYTEST_CURRENT_TEST" in os.environ:\n dagster_run_id = "unit-testing"\n else:\n dagster_run_id = context.run_id\n\n assert dagster_run_id is not None\n\n config: Config = {\n "dagster_run_id": dagster_run_id,\n "wandb_host": wandb_host,\n "wandb_entity": wandb_entity,\n "wandb_project": wandb_project,\n "wandb_run_name": wandb_run_name,\n "wandb_run_id": wandb_run_id,\n "wandb_run_tags": wandb_run_tags,\n "base_dir": base_dir,\n "cache_duration_in_minutes": cache_duration_in_minutes,\n }\n return ArtifactsIOManager(wandb_client, config)
\n
", "current_page_name": "_modules/dagster_wandb/io_manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.io_manager"}, "launch": {"ops": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.launch.ops

\nfrom dagster import OpExecutionContext, op\nfrom wandb.sdk.launch import launch\nfrom wandb.sdk.launch.launch_add import launch_add\n\nfrom .configs import launch_agent_config, launch_config\n\n\ndef raise_on_invalid_config(context: OpExecutionContext):\n    entity = context.resources.wandb_config["entity"]\n    if entity == "":\n        raise RuntimeError(\n            "(dagster_wandb) An empty string was provided for the 'entity' property of the"\n            " 'wandb_config'."\n        )\n\n    project = context.resources.wandb_config["project"]\n    if project == "":\n        raise RuntimeError(\n            "(dagster_wandb) An empty string was provided for the 'project' property of the"\n            " 'wandb_config'."\n        )\n\n\n
[docs]@op(\n required_resource_keys={"wandb_resource", "wandb_config"},\n config_schema=launch_agent_config(),\n)\ndef run_launch_agent(context: OpExecutionContext):\n """It starts a Launch Agent and runs it as a long running process until stopped manually.\n\n Agents are processes that poll launch queues and execute the jobs (or dispatch them to external\n services to be executed) in order.\n\n **Example:**\n\n .. code-block:: YAML\n\n # config.yaml\n\n resources:\n wandb_config:\n config:\n entity: my_entity\n project: my_project\n ops:\n run_launch_agent:\n config:\n max_jobs: -1\n queues:\n - my_dagster_queue\n\n .. code-block:: python\n\n from dagster_wandb.launch.ops import run_launch_agent\n from dagster_wandb.resources import wandb_resource\n\n from dagster import job, make_values_resource\n\n\n @job(\n resource_defs={\n "wandb_config": make_values_resource(\n entity=str,\n project=str,\n ),\n "wandb_resource": wandb_resource.configured(\n {"api_key": {"env": "WANDB_API_KEY"}}\n ),\n },\n )\n def run_launch_agent_example():\n run_launch_agent()\n\n """\n raise_on_invalid_config(context)\n config = {\n "entity": context.resources.wandb_config["entity"],\n "project": context.resources.wandb_config["project"],\n **context.op_config,\n }\n context.log.info(f"Launch agent configuration: {config}")\n context.log.info("Running Launch agent...")\n launch.create_and_run_agent(api=context.resources.wandb_resource["api"], config=config)
\n\n\n
[docs]@op(\n required_resource_keys={\n "wandb_resource",\n "wandb_config",\n },\n config_schema=launch_config(),\n)\ndef run_launch_job(context: OpExecutionContext):\n """Executes a Launch job.\n\n A Launch job is assigned to a queue in order to be executed. You can create a queue or use the\n default one. Make sure you have an active agent listening to that queue. You can run an agent\n inside your Dagster instance but can also consider using a deployable agent in Kubernetes.\n\n **Example:**\n\n .. code-block:: YAML\n\n # config.yaml\n\n resources:\n wandb_config:\n config:\n entity: my_entity\n project: my_project\n ops:\n my_launched_job:\n config:\n entry_point:\n - python\n - train.py\n queue: my_dagster_queue\n uri: https://github.com/wandb/example-dagster-integration-with-launch\n\n .. code-block:: python\n\n from dagster_wandb.launch.ops import run_launch_job\n from dagster_wandb.resources import wandb_resource\n\n from dagster import job, make_values_resource\n\n\n @job(\n resource_defs={\n "wandb_config": make_values_resource(\n entity=str,\n project=str,\n ),\n "wandb_resource": wandb_resource.configured(\n {"api_key": {"env": "WANDB_API_KEY"}}\n ),\n },\n )\n def run_launch_job_example():\n run_launch_job.alias("my_launched_job")() # we rename the job with an alias\n\n """\n raise_on_invalid_config(context)\n config = {\n "entity": context.resources.wandb_config["entity"],\n "project": context.resources.wandb_config["project"],\n **context.op_config,\n }\n context.log.info(f"Launch job configuration: {config}")\n\n queue = context.op_config.get("queue")\n if queue is None:\n context.log.info("No queue provided, running Launch job locally")\n launch.run(api=context.resources.wandb_resource["api"], config=config)\n else:\n synchronous = config.get("synchronous", True)\n config.pop("synchronous", None)\n queued_run = launch_add(**config)\n if synchronous is True:\n context.log.info(\n f"Synchronous Launch job added to queue with name={queue}. Waiting for"\n " completion..."\n )\n queued_run.wait_until_finished()\n else:\n context.log.info(f"Asynchronous Launch job added to queue with name={queue}")
\n
", "current_page_name": "_modules/dagster_wandb/launch/ops", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.launch.ops"}}, "resources": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.resources

\nfrom typing import Any, Dict\n\nimport wandb\nfrom dagster import Field, InitResourceContext, String, StringSource, resource\nfrom dagster._core.definitions.resource_definition import dagster_maintained_resource\nfrom wandb.sdk.internal.internal_api import Api\n\nWANDB_CLOUD_HOST: str = "https://api.wandb.ai"\n\n\n
[docs]@dagster_maintained_resource\n@resource(\n config_schema={\n "api_key": Field(\n StringSource,\n description="W&B API key necessary to communicate with the W&B API.",\n is_required=True,\n ),\n "host": Field(\n String,\n description=(\n "API host server you wish to use. Only required if you are using W&B Server."\n ),\n is_required=False,\n default_value=WANDB_CLOUD_HOST,\n ),\n },\n description="Resource for interacting with Weights & Biases",\n)\ndef wandb_resource(context: InitResourceContext) -> Dict[str, Any]:\n """Dagster resource used to communicate with the W&B API. It's useful when you want to use the\n wandb client within your ops and assets. It's a required resources if you are using the W&B IO\n Manager.\n\n It automatically authenticates using the provided API key.\n\n For a complete set of documentation, see `Dagster integration <https://docs.wandb.ai/guides/integrations/dagster>`_.\n\n To configure this resource, we recommend using the `configured\n <https://docs.dagster.io/concepts/configuration/configured>`_ method.\n\n **Example:**\n\n .. code-block:: python\n\n from dagster import job\n from dagster_wandb import wandb_resource\n\n my_wandb_resource = wandb_resource.configured({"api_key": {"env": "WANDB_API_KEY"}})\n\n @job(resource_defs={"wandb_resource": my_wandb_resource})\n def my_wandb_job():\n ...\n\n """\n api_key = context.resource_config["api_key"]\n host = context.resource_config["host"]\n wandb.login(\n key=api_key,\n host=host,\n anonymous="never",\n )\n client_settings = wandb.Settings(\n api_key=api_key,\n base_url=host,\n anonymous="never",\n launch=True,\n )\n api = Api(default_settings=client_settings, load_settings=False)\n return {"sdk": wandb, "api": api, "host": host}
\n
", "current_page_name": "_modules/dagster_wandb/resources", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.resources"}, "types": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.types

\nimport sys\n\nif sys.version_info >= (3, 8):\n    from typing import TypedDict\nelse:\n    from typing_extensions import TypedDict\n\nfrom typing import Any, Dict, List\n\n\n
[docs]class SerializationModule(TypedDict, total=False):\n """W&B Artifacts IO Manager configuration of the serialization module. Useful for type checking."""\n\n name: str\n parameters: Dict[str, Any]
\n\n\n
[docs]class WandbArtifactConfiguration(TypedDict, total=False):\n """W&B Artifacts IO Manager configuration. Useful for type checking."""\n\n name: str\n type: str\n description: str\n aliases: List[str]\n add_dirs: List[Dict[str, Any]]\n add_files: List[Dict[str, Any]]\n add_references: List[Dict[str, Any]]\n serialization_module: SerializationModule\n partitions: Dict[str, Dict[str, Any]]
\n
", "current_page_name": "_modules/dagster_wandb/types", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.types"}, "utils": {"errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagster_wandb.utils.errors

\n
[docs]class WandbArtifactsIOManagerError(Exception):\n """Represents an execution error of the W&B Artifacts IO Manager."""\n\n def __init__(self, message="A W&B Artifacts IO Manager error occurred."):\n self.message = message\n super().__init__(self.message)
\n\n\nSUPPORTED_READ_CONFIG_KEYS = [\n "alias",\n "get_path",\n "get",\n "name",\n "partitions",\n "version",\n]\nSUPPORTED_WRITE_CONFIG_KEYS = [\n "add_dirs",\n "add_files",\n "add_references",\n "aliases",\n "description",\n "name",\n "partitions",\n "serialization_module",\n "type",\n]\nSUPPORTED_PARTITION_CONFIG_KEYS = ["get", "get_path", "version", "alias"]\n\n\ndef raise_on_empty_configuration(partition_key, dictionary):\n if dictionary is not None and len(dictionary) == 0:\n raise WandbArtifactsIOManagerError(\n f"The configuration is empty for the partition identified by the key '{partition_key}'."\n " This happened within the 'wandb_artifact_configuration' metadata dictionary."\n )\n\n\ndef raise_on_unknown_keys(supported_config_keys, dictionary, is_read_config):\n if dictionary is None:\n return\n\n unsupported_keys = [key for key in dictionary.keys() if key not in supported_config_keys]\n if len(unsupported_keys) > 0:\n if is_read_config:\n raise WandbArtifactsIOManagerError(\n f"The configuration keys '{unsupported_keys}' you are trying to use are not"\n " supported within the 'wandb_artifact_configuration' metadata dictionary when"\n " reading an Artifact."\n )\n else:\n raise WandbArtifactsIOManagerError(\n f"The configuration keys '{unsupported_keys}' you are trying to use are not"\n " supported within the 'wandb_artifact_configuration' metadata dictionary when"\n " writing an Artifact."\n )\n\n\ndef raise_on_unknown_write_configuration_keys(dictionary):\n raise_on_unknown_keys(SUPPORTED_WRITE_CONFIG_KEYS, dictionary, False)\n\n\ndef raise_on_unknown_read_configuration_keys(dictionary):\n raise_on_unknown_keys(SUPPORTED_READ_CONFIG_KEYS, dictionary, True)\n\n\ndef raise_on_unknown_partition_keys(partition_key, dictionary):\n if dictionary is None:\n return\n\n unsupported_keys = [\n key for key in dictionary.keys() if key not in SUPPORTED_PARTITION_CONFIG_KEYS\n ]\n if len(unsupported_keys) > 0:\n raise WandbArtifactsIOManagerError(\n f"The configuration keys '{unsupported_keys}' you are trying to use are not supported"\n f" for the partition identified by the key '{partition_key}'. This happened within the"\n " 'wandb_artifact_configuration' metadata dictionary."\n )\n
", "current_page_name": "_modules/dagster_wandb/utils/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagster_wandb.utils.errors"}}}, "dagstermill": {"asset_factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.asset_factory

\nimport pickle\nimport tempfile\nfrom typing import Any, Callable, Iterable, Mapping, Optional, Set, Type, Union, cast\n\nimport dagster._check as check\nfrom dagster import (\n    AssetIn,\n    AssetKey,\n    AssetsDefinition,\n    Failure,\n    Output,\n    PartitionsDefinition,\n    ResourceDefinition,\n    RetryPolicy,\n    RetryRequested,\n    SourceAsset,\n    asset,\n)\nfrom dagster._config.pythonic_config import Config, infer_schema_from_config_class\nfrom dagster._config.pythonic_config.type_check_utils import safe_is_subclass\nfrom dagster._core.definitions.events import CoercibleToAssetKey, CoercibleToAssetKeyPrefix\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.execution.context.compute import OpExecutionContext\n\nfrom dagstermill.factory import _clean_path_for_windows, execute_notebook\n\n\ndef _make_dagstermill_asset_compute_fn(\n    name: str,\n    notebook_path: str,\n    save_notebook_on_failure: bool,\n) -> Callable:\n    def _t_fn(context: OpExecutionContext, **inputs) -> Iterable:\n        check.param_invariant(\n            isinstance(context.run_config, dict),\n            "context",\n            "StepExecutionContext must have valid run_config",\n        )\n\n        with tempfile.TemporaryDirectory() as output_notebook_dir:\n            executed_notebook_path = execute_notebook(\n                context.get_step_execution_context(),\n                name=name,\n                inputs=inputs,\n                save_notebook_on_failure=save_notebook_on_failure,\n                notebook_path=notebook_path,\n                output_notebook_dir=output_notebook_dir,\n            )\n\n            with open(executed_notebook_path, "rb") as fd:\n                yield Output(fd.read())\n\n            # deferred import for perf\n            import scrapbook\n\n            output_nb = scrapbook.read_notebook(executed_notebook_path)\n\n            for key, value in output_nb.scraps.items():\n                if key.startswith("event-"):\n                    with open(value.data, "rb") as fd:\n                        event = pickle.loads(fd.read())\n                        if isinstance(event, (Failure, RetryRequested)):\n                            raise event\n                        else:\n                            yield event\n\n    return _t_fn\n\n\n
[docs]def define_dagstermill_asset(\n name: str,\n notebook_path: str,\n key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,\n ins: Optional[Mapping[str, AssetIn]] = None,\n deps: Optional[Iterable[Union[CoercibleToAssetKey, AssetsDefinition, SourceAsset]]] = None,\n metadata: Optional[Mapping[str, Any]] = None,\n config_schema: Optional[Union[Any, Mapping[str, Any]]] = None,\n required_resource_keys: Optional[Set[str]] = None,\n resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n description: Optional[str] = None,\n partitions_def: Optional[PartitionsDefinition] = None,\n op_tags: Optional[Mapping[str, Any]] = None,\n group_name: Optional[str] = None,\n io_manager_key: Optional[str] = None,\n retry_policy: Optional[RetryPolicy] = None,\n save_notebook_on_failure: bool = False,\n non_argument_deps: Optional[Union[Set[AssetKey], Set[str]]] = None,\n) -> AssetsDefinition:\n """Creates a Dagster asset for a Jupyter notebook.\n\n Arguments:\n name (str): The name for the asset\n notebook_path (str): Path to the backing notebook\n key_prefix (Optional[Union[str, Sequence[str]]]): If provided, the asset's key is the\n concatenation of the key_prefix and the asset's name, which defaults to the name of\n the decorated function. Each item in key_prefix must be a valid name in dagster (ie only\n contains letters, numbers, and _) and may not contain python reserved keywords.\n ins (Optional[Mapping[str, AssetIn]]): A dictionary that maps input names to information\n about the input.\n deps (Optional[Sequence[Union[AssetsDefinition, SourceAsset, AssetKey, str]]]): The assets\n that are upstream dependencies, but do not pass an input value to the notebook.\n config_schema (Optional[ConfigSchema): The configuration schema for the asset's underlying\n op. If set, Dagster will check that config provided for the op matches this schema and fail\n if it does not. If not set, Dagster will accept any config provided for the op.\n metadata (Optional[Dict[str, Any]]): A dict of metadata entries for the asset.\n required_resource_keys (Optional[Set[str]]): Set of resource handles required by the notebook.\n description (Optional[str]): Description of the asset to display in the Dagster UI.\n partitions_def (Optional[PartitionsDefinition]): Defines the set of partition keys that\n compose the asset.\n op_tags (Optional[Dict[str, Any]]): A dictionary of tags for the op that computes the asset.\n Frameworks may expect and require certain metadata to be attached to a op. Values that\n are not strings will be json encoded and must meet the criteria that\n `json.loads(json.dumps(value)) == value`.\n group_name (Optional[str]): A string name used to organize multiple assets into groups. If not provided,\n the name "default" is used.\n resource_defs (Optional[Mapping[str, ResourceDefinition]]):\n (Experimental) A mapping of resource keys to resource definitions. These resources\n will be initialized during execution, and can be accessed from the\n context within the notebook.\n io_manager_key (Optional[str]): A string key for the IO manager used to store the output notebook.\n If not provided, the default key output_notebook_io_manager will be used.\n retry_policy (Optional[RetryPolicy]): The retry policy for the op that computes the asset.\n save_notebook_on_failure (bool): If True and the notebook fails during execution, the failed notebook will be\n written to the Dagster storage directory. The location of the file will be printed in the Dagster logs.\n Defaults to False.\n non_argument_deps (Optional[Union[Set[AssetKey], Set[str]]]): Deprecated, use deps instead. Set of asset keys that are\n upstream dependencies, but do not pass an input to the asset.\n\n Examples:\n .. code-block:: python\n\n from dagstermill import define_dagstermill_asset\n from dagster import asset, AssetIn, AssetKey\n from sklearn import datasets\n import pandas as pd\n import numpy as np\n\n @asset\n def iris_dataset():\n sk_iris = datasets.load_iris()\n return pd.DataFrame(\n data=np.c_[sk_iris["data"], sk_iris["target"]],\n columns=sk_iris["feature_names"] + ["target"],\n )\n\n iris_kmeans_notebook = define_dagstermill_asset(\n name="iris_kmeans_notebook",\n notebook_path="/path/to/iris_kmeans.ipynb",\n ins={\n "iris": AssetIn(key=AssetKey("iris_dataset"))\n }\n )\n """\n check.str_param(name, "name")\n check.str_param(notebook_path, "notebook_path")\n check.bool_param(save_notebook_on_failure, "save_notebook_on_failure")\n\n required_resource_keys = set(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n )\n ins = check.opt_mapping_param(ins, "ins", key_type=str, value_type=AssetIn)\n\n if isinstance(key_prefix, str):\n key_prefix = [key_prefix]\n\n key_prefix = check.opt_list_param(key_prefix, "key_prefix", of_type=str)\n\n default_description = f"This asset is backed by the notebook at {notebook_path}"\n description = check.opt_str_param(description, "description", default=default_description)\n\n io_mgr_key = check.opt_str_param(\n io_manager_key, "io_manager_key", default="output_notebook_io_manager"\n )\n\n user_tags = validate_tags(op_tags)\n if op_tags is not None:\n check.invariant(\n "notebook_path" not in op_tags,\n "user-defined op tags contains the `notebook_path` key, but the `notebook_path` key"\n " is reserved for use by Dagster",\n )\n check.invariant(\n "kind" not in op_tags,\n "user-defined op tags contains the `kind` key, but the `kind` key is reserved for"\n " use by Dagster",\n )\n\n default_tags = {"notebook_path": _clean_path_for_windows(notebook_path), "kind": "ipynb"}\n\n if safe_is_subclass(config_schema, Config):\n config_schema = infer_schema_from_config_class(cast(Type[Config], config_schema))\n\n return asset(\n name=name,\n key_prefix=key_prefix,\n ins=ins,\n deps=deps,\n metadata=metadata,\n description=description,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n resource_defs=resource_defs,\n partitions_def=partitions_def,\n op_tags={**user_tags, **default_tags},\n group_name=group_name,\n output_required=False,\n io_manager_key=io_mgr_key,\n retry_policy=retry_policy,\n non_argument_deps=non_argument_deps,\n )(\n _make_dagstermill_asset_compute_fn(\n name=name,\n notebook_path=notebook_path,\n save_notebook_on_failure=save_notebook_on_failure,\n )\n )
\n
", "current_page_name": "_modules/dagstermill/asset_factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.asset_factory"}, "context": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.context

\nfrom typing import AbstractSet, Any, Mapping, Optional, cast\n\nfrom dagster import (\n    DagsterRun,\n    JobDefinition,\n    OpDefinition,\n    _check as check,\n)\nfrom dagster._annotations import public\nfrom dagster._core.definitions.dependency import Node, NodeHandle\nfrom dagster._core.execution.context.compute import AbstractComputeExecutionContext\nfrom dagster._core.execution.context.system import PlanExecutionContext, StepExecutionContext\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.system_config.objects import ResolvedRunConfig\n\n\n
[docs]class DagstermillExecutionContext(AbstractComputeExecutionContext):\n """Dagstermill-specific execution context.\n\n Do not initialize directly: use :func:`dagstermill.get_context`.\n """\n\n def __init__(\n self,\n job_context: PlanExecutionContext,\n job_def: JobDefinition,\n resource_keys_to_init: AbstractSet[str],\n op_name: str,\n node_handle: NodeHandle,\n op_config: Any = None,\n ):\n self._job_context = check.inst_param(job_context, "job_context", PlanExecutionContext)\n self._job_def = check.inst_param(job_def, "job_def", JobDefinition)\n self._resource_keys_to_init = check.set_param(\n resource_keys_to_init, "resource_keys_to_init", of_type=str\n )\n self.op_name = check.str_param(op_name, "op_name")\n self.node_handle = check.inst_param(node_handle, "node_handle", NodeHandle)\n self._op_config = op_config\n\n def has_tag(self, key: str) -> bool:\n """Check if a logging tag is defined on the context.\n\n Args:\n key (str): The key to check.\n\n Returns:\n bool\n """\n check.str_param(key, "key")\n return self._job_context.has_tag(key)\n\n def get_tag(self, key: str) -> Optional[str]:\n """Get a logging tag defined on the context.\n\n Args:\n key (str): The key to get.\n\n Returns:\n str\n """\n check.str_param(key, "key")\n return self._job_context.get_tag(key)\n\n @public\n @property\n def run_id(self) -> str:\n """str: The run_id for the context."""\n return self._job_context.run_id\n\n @public\n @property\n def run_config(self) -> Mapping[str, Any]:\n """dict: The run_config for the context."""\n return self._job_context.run_config\n\n @property\n def resolved_run_config(self) -> ResolvedRunConfig:\n """:class:`dagster.ResolvedRunConfig`: The resolved_run_config for the context."""\n return self._job_context.resolved_run_config\n\n @public\n @property\n def logging_tags(self) -> Mapping[str, str]:\n """dict: The logging tags for the context."""\n return self._job_context.logging_tags\n\n @public\n @property\n def job_name(self) -> str:\n """str: The name of the executing job."""\n return self._job_context.job_name\n\n @public\n @property\n def job_def(self) -> JobDefinition:\n """:class:`dagster.JobDefinition`: The job definition for the context.\n\n This will be a dagstermill-specific shim.\n """\n return self._job_def\n\n @property\n def resources(self) -> Any:\n """collections.namedtuple: A dynamically-created type whose properties allow access to\n resources.\n """\n return self._job_context.scoped_resources_builder.build(\n required_resource_keys=self._resource_keys_to_init,\n )\n\n @public\n @property\n def run(self) -> DagsterRun:\n """:class:`dagster.DagsterRun`: The job run for the context."""\n return cast(DagsterRun, self._job_context.dagster_run)\n\n @property\n def log(self) -> DagsterLogManager:\n """:class:`dagster.DagsterLogManager`: The log manager for the context.\n\n Call, e.g., ``log.info()`` to log messages through the Dagster machinery.\n """\n return self._job_context.log\n\n @public\n @property\n def op_def(self) -> OpDefinition:\n """:class:`dagster.OpDefinition`: The op definition for the context.\n\n In interactive contexts, this may be a dagstermill-specific shim, depending whether an\n op definition was passed to ``dagstermill.get_context``.\n """\n return cast(OpDefinition, self._job_def.node_def_named(self.op_name))\n\n @property\n def node(self) -> Node:\n """:class:`dagster.Node`: The node for the context.\n\n In interactive contexts, this may be a dagstermill-specific shim, depending whether an\n op definition was passed to ``dagstermill.get_context``.\n """\n return self.job_def.get_node(self.node_handle)\n\n @public\n @property\n def op_config(self) -> Any:\n """collections.namedtuple: A dynamically-created type whose properties allow access to\n op-specific config.\n """\n if self._op_config:\n return self._op_config\n\n op_config = self.resolved_run_config.ops.get(self.op_name)\n return op_config.config if op_config else None
\n\n\nclass DagstermillRuntimeExecutionContext(DagstermillExecutionContext):\n def __init__(\n self,\n job_context: PlanExecutionContext,\n job_def: JobDefinition,\n resource_keys_to_init: AbstractSet[str],\n op_name: str,\n step_context: StepExecutionContext,\n node_handle: NodeHandle,\n op_config: Any = None,\n ):\n self._step_context = check.inst_param(step_context, "step_context", StepExecutionContext)\n super().__init__(\n job_context,\n job_def,\n resource_keys_to_init,\n op_name,\n node_handle,\n op_config,\n )\n\n @property\n def step_context(self) -> StepExecutionContext:\n return self._step_context\n
", "current_page_name": "_modules/dagstermill/context", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.context"}, "errors": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.errors

\nfrom dagster._core.errors import DagsterError\n\n\n
[docs]class DagstermillError(DagsterError):\n """Base class for errors raised by dagstermill."""
\n
", "current_page_name": "_modules/dagstermill/errors", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.errors"}, "factory": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.factory

\nimport copy\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport uuid\nfrom typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Set, Type, Union, cast\n\nimport nbformat\nimport papermill\nfrom dagster import (\n    In,\n    OpDefinition,\n    Out,\n    Output,\n    _check as check,\n    _seven,\n)\nfrom dagster._config.pythonic_config import Config, infer_schema_from_config_class\nfrom dagster._config.pythonic_config.type_check_utils import safe_is_subclass\nfrom dagster._core.definitions.events import AssetMaterialization, Failure, RetryRequested\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.utils import validate_tags\nfrom dagster._core.execution.context.compute import OpExecutionContext\nfrom dagster._core.execution.context.input import build_input_context\nfrom dagster._core.execution.context.system import StepExecutionContext\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._serdes import pack_value\nfrom dagster._seven import get_system_temp_directory\nfrom dagster._utils import mkdir_p, safe_tempfile_path\nfrom dagster._utils.error import serializable_error_info_from_exc_info\nfrom papermill.engines import papermill_engines\nfrom papermill.iorw import load_notebook_node, write_ipynb\n\nfrom .compat import ExecutionError\nfrom .engine import DagstermillEngine\nfrom .errors import DagstermillError\nfrom .translator import DagsterTranslator\n\n\ndef _clean_path_for_windows(notebook_path: str) -> str:\n    """In windows, the notebook can't render in the Dagster UI unless the C: prefix is removed.\n    os.path.splitdrive will split the path into (drive, tail), so just return the tail.\n    """\n    return os.path.splitdrive(notebook_path)[1]\n\n\n# https://github.com/nteract/papermill/blob/17d4bbb3960c30c263bca835e48baf34322a3530/papermill/parameterize.py\ndef _find_first_tagged_cell_index(nb, tag):\n    parameters_indices = []\n    for idx, cell in enumerate(nb.cells):\n        if tag in cell.metadata.tags:\n            parameters_indices.append(idx)\n    if not parameters_indices:\n        return -1\n    return parameters_indices[0]\n\n\n# This is based on papermill.parameterize.parameterize_notebook\n# Typically, papermill injects the injected-parameters cell *below* the parameters cell\n# but we want to *replace* the parameters cell, which is what this function does.\ndef replace_parameters(context, nb, parameters):\n    """Assigned parameters into the appropriate place in the input notebook.\n\n    Args:\n        nb (NotebookNode): Executable notebook object\n        parameters (dict): Arbitrary keyword arguments to pass to the notebook parameters.\n    """\n    check.dict_param(parameters, "parameters")\n\n    # Copy the nb object to avoid polluting the input\n    nb = copy.deepcopy(nb)\n\n    # papermill method chooses translator based on kernel_name and language, but we just call the\n    # DagsterTranslator to generate parameter content based on the kernel_name\n    param_content = DagsterTranslator.codify(parameters)\n\n    newcell = nbformat.v4.new_code_cell(source=param_content)\n    newcell.metadata["tags"] = ["injected-parameters"]\n\n    param_cell_index = _find_first_tagged_cell_index(nb, "parameters")\n    injected_cell_index = _find_first_tagged_cell_index(nb, "injected-parameters")\n    if injected_cell_index >= 0:\n        # Replace the injected cell with a new version\n        before = nb.cells[:injected_cell_index]\n        after = nb.cells[injected_cell_index + 1 :]\n        check.int_value_param(param_cell_index, -1, "param_cell_index")\n        # We should have blown away the parameters cell if there is an injected-parameters cell\n    elif param_cell_index >= 0:\n        # Replace the parameter cell with the injected-parameters cell\n        before = nb.cells[:param_cell_index]\n        after = nb.cells[param_cell_index + 1 :]\n    else:\n        # Inject to the top of the notebook, presumably first cell includes dagstermill import\n        context.log.debug(\n            "Executing notebook with no tagged parameters cell: injecting boilerplate in first "\n            "cell."\n        )\n        before = []\n        after = nb.cells\n\n    nb.cells = before + [newcell] + after\n    nb.metadata.papermill["parameters"] = _seven.json.dumps(parameters)\n\n    return nb\n\n\ndef get_papermill_parameters(\n    step_context: StepExecutionContext,\n    inputs: Mapping[str, object],\n    output_log_path: str,\n    compute_descriptor: str,\n) -> Mapping[str, object]:\n    check.param_invariant(\n        isinstance(step_context.run_config, dict),\n        "step_context",\n        "StepExecutionContext must have valid run_config",\n    )\n\n    run_id = step_context.run_id\n    temp_dir = get_system_temp_directory()\n    marshal_dir = os.path.normpath(os.path.join(temp_dir, "dagstermill", str(run_id), "marshal"))\n    mkdir_p(marshal_dir)\n\n    if not isinstance(step_context.job, ReconstructableJob):\n        if compute_descriptor == "asset":\n            raise DagstermillError(\n                "Can't execute a dagstermill asset that is not reconstructable. "\n                "Use the reconstructable() function if executing from python"\n            )\n        else:\n            raise DagstermillError(\n                "Can't execute a dagstermill op from a job that is not reconstructable. "\n                "Use the reconstructable() function if executing from python"\n            )\n\n    dm_executable_dict = step_context.job.to_dict()\n\n    dm_context_dict = {\n        "output_log_path": output_log_path,\n        "marshal_dir": marshal_dir,\n        "run_config": step_context.run_config,\n    }\n\n    dm_node_handle_kwargs = step_context.node_handle._asdict()\n    dm_step_key = step_context.step.key\n\n    parameters = {}\n\n    parameters["__dm_context"] = dm_context_dict\n    parameters["__dm_executable_dict"] = dm_executable_dict\n    parameters["__dm_pipeline_run_dict"] = pack_value(step_context.dagster_run)\n    parameters["__dm_node_handle_kwargs"] = dm_node_handle_kwargs\n    parameters["__dm_instance_ref_dict"] = pack_value(step_context.instance.get_ref())\n    parameters["__dm_step_key"] = dm_step_key\n    parameters["__dm_input_names"] = list(inputs.keys())\n\n    return parameters\n\n\ndef execute_notebook(\n    step_context: StepExecutionContext,\n    name: str,\n    save_notebook_on_failure: bool,\n    notebook_path: str,\n    output_notebook_dir: str,\n    inputs: Mapping[str, object],\n) -> str:\n    with safe_tempfile_path() as output_log_path:\n        prefix = str(uuid.uuid4())\n        parameterized_notebook_path = os.path.join(output_notebook_dir, f"{prefix}-inter.ipynb")\n\n        executed_notebook_path = os.path.join(output_notebook_dir, f"{prefix}-out.ipynb")\n\n        # Scaffold the registration here\n        nb = load_notebook_node(notebook_path)\n        compute_descriptor = "op"\n        nb_no_parameters = replace_parameters(\n            step_context,\n            nb,\n            get_papermill_parameters(\n                step_context,\n                inputs,\n                output_log_path,\n                compute_descriptor,\n            ),\n        )\n        write_ipynb(nb_no_parameters, parameterized_notebook_path)\n\n        try:\n            papermill_engines.register("dagstermill", DagstermillEngine)\n            papermill.execute_notebook(\n                input_path=parameterized_notebook_path,\n                output_path=executed_notebook_path,\n                engine_name="dagstermill",\n                log_output=True,\n            )\n\n        except Exception as ex:\n            step_context.log.warn(\n                "Error when attempting to materialize executed notebook: {exc}".format(\n                    exc=str(serializable_error_info_from_exc_info(sys.exc_info()))\n                )\n            )\n\n            if isinstance(ex, ExecutionError):\n                exception_name = ex.ename  # type: ignore\n                if exception_name in ["RetryRequested", "Failure"]:\n                    step_context.log.warn(\n                        f"Encountered raised {exception_name} in notebook. Use"\n                        " dagstermill.yield_event with RetryRequested or Failure to trigger"\n                        " their behavior."\n                    )\n\n            if save_notebook_on_failure:\n                storage_dir = step_context.instance.storage_directory()\n                storage_path = os.path.join(storage_dir, f"{prefix}-out.ipynb")\n                with open(storage_path, "wb") as dest_file_obj:\n                    with open(executed_notebook_path, "rb") as obj:\n                        dest_file_obj.write(obj.read())\n\n                step_context.log.info(f"Failed notebook written to {storage_path}")\n\n            raise\n\n    step_context.log.debug(f"Notebook execution complete for {name} at {executed_notebook_path}.")\n\n    return executed_notebook_path\n\n\ndef _handle_events_from_notebook(\n    step_context: StepExecutionContext, executed_notebook_path: str\n) -> Iterable:\n    # deferred import for perf\n    import scrapbook\n\n    output_nb = scrapbook.read_notebook(executed_notebook_path)\n\n    for output_name in step_context.op_def.output_dict.keys():\n        data_dict = output_nb.scraps.data_dict\n        if output_name in data_dict:\n            # read outputs that were passed out of process via io manager from `yield_result`\n            step_output_handle = StepOutputHandle(\n                step_key=step_context.step.key,\n                output_name=output_name,\n            )\n            output_context = step_context.get_output_context(step_output_handle)\n            io_manager = step_context.get_io_manager(step_output_handle)\n            value = io_manager.load_input(\n                build_input_context(\n                    upstream_output=output_context, dagster_type=output_context.dagster_type\n                )\n            )\n\n            yield Output(value, output_name)\n\n    for key, value in output_nb.scraps.items():\n        if key.startswith("event-"):\n            with open(value.data, "rb") as fd:\n                event = pickle.loads(fd.read())\n                if isinstance(event, (Failure, RetryRequested)):\n                    raise event\n                else:\n                    yield event\n\n\ndef _make_dagstermill_compute_fn(\n    dagster_factory_name: str,\n    name: str,\n    notebook_path: str,\n    output_notebook_name: Optional[str] = None,\n    asset_key_prefix: Optional[Sequence[str]] = None,\n    output_notebook: Optional[str] = None,\n    save_notebook_on_failure: bool = False,\n) -> Callable:\n    def _t_fn(op_context: OpExecutionContext, inputs: Mapping[str, object]) -> Iterable:\n        check.param_invariant(\n            isinstance(op_context.run_config, dict),\n            "context",\n            "StepExecutionContext must have valid run_config",\n        )\n\n        step_context = op_context.get_step_execution_context()\n\n        with tempfile.TemporaryDirectory() as output_notebook_dir:\n            executed_notebook_path = execute_notebook(\n                step_context,\n                name=name,\n                inputs=inputs,\n                save_notebook_on_failure=save_notebook_on_failure,\n                notebook_path=notebook_path,\n                output_notebook_dir=output_notebook_dir,\n            )\n\n            if output_notebook_name is not None:\n                # yield output notebook binary stream as an op output\n                with open(executed_notebook_path, "rb") as fd:\n                    yield Output(fd.read(), output_notebook_name)\n\n            else:\n                # backcompat\n                executed_notebook_file_handle = None\n                try:\n                    # use binary mode when when moving the file since certain file_managers such as S3\n                    # may try to hash the contents\n                    with open(executed_notebook_path, "rb") as fd:\n                        executed_notebook_file_handle = op_context.resources.file_manager.write(\n                            fd, mode="wb", ext="ipynb"\n                        )\n                        executed_notebook_materialization_path = (\n                            executed_notebook_file_handle.path_desc\n                        )\n\n                    yield AssetMaterialization(\n                        asset_key=[*(asset_key_prefix or []), f"{name}_output_notebook"],\n                        description="Location of output notebook in file manager",\n                        metadata={\n                            "path": MetadataValue.path(executed_notebook_materialization_path),\n                        },\n                    )\n\n                except Exception:\n                    # if file manager writing errors, e.g. file manager is not provided, we throw a warning\n                    # and fall back to the previously stored temp executed notebook.\n                    op_context.log.warning(\n                        "Error when attempting to materialize executed notebook using file"\n                        " manager:"\n                        f" {serializable_error_info_from_exc_info(sys.exc_info())}\\nNow"\n                        " falling back to local: notebook execution was temporarily materialized"\n                        f" at {executed_notebook_path}\\nIf you have supplied a file manager and"\n                        " expect to use it for materializing the notebook, please include"\n                        ' "file_manager" in the `required_resource_keys` argument to'\n                        f" `{dagster_factory_name}`"\n                    )\n\n                if output_notebook is not None:\n                    yield Output(executed_notebook_file_handle, output_notebook)\n\n            yield from _handle_events_from_notebook(step_context, executed_notebook_path)\n\n    return _t_fn\n\n\n
[docs]def define_dagstermill_op(\n name: str,\n notebook_path: str,\n ins: Optional[Mapping[str, In]] = None,\n outs: Optional[Mapping[str, Out]] = None,\n config_schema: Optional[Union[Any, Mapping[str, Any]]] = None,\n required_resource_keys: Optional[Set[str]] = None,\n output_notebook_name: Optional[str] = None,\n asset_key_prefix: Optional[Union[Sequence[str], str]] = None,\n description: Optional[str] = None,\n tags: Optional[Mapping[str, Any]] = None,\n io_manager_key: Optional[str] = None,\n save_notebook_on_failure: bool = False,\n) -> OpDefinition:\n """Wrap a Jupyter notebook in a op.\n\n Arguments:\n name (str): The name of the op.\n notebook_path (str): Path to the backing notebook.\n ins (Optional[Mapping[str, In]]): The op's inputs.\n outs (Optional[Mapping[str, Out]]): The op's outputs. Your notebook should\n call :py:func:`~dagstermill.yield_result` to yield each of these outputs.\n required_resource_keys (Optional[Set[str]]): The string names of any required resources.\n output_notebook_name: (Optional[str]): If set, will be used as the name of an injected output\n of type of :py:class:`~dagster.BufferedIOBase` that is the file object of the executed\n notebook (in addition to the :py:class:`~dagster.AssetMaterialization` that is always\n created). It allows the downstream ops to access the executed notebook via a file\n object.\n asset_key_prefix (Optional[Union[List[str], str]]): If set, will be used to prefix the\n asset keys for materialized notebooks.\n description (Optional[str]): If set, description used for op.\n tags (Optional[Dict[str, str]]): If set, additional tags used to annotate op.\n Dagster uses the tag keys `notebook_path` and `kind`, which cannot be\n overwritten by the user.\n io_manager_key (Optional[str]): If using output_notebook_name, you can additionally provide\n a string key for the IO manager used to store the output notebook.\n If not provided, the default key output_notebook_io_manager will be used.\n save_notebook_on_failure (bool): If True and the notebook fails during execution, the failed notebook will be\n written to the Dagster storage directory. The location of the file will be printed in the Dagster logs.\n Defaults to False.\n\n Returns:\n :py:class:`~dagster.OpDefinition`\n """\n check.str_param(name, "name")\n check.str_param(notebook_path, "notebook_path")\n check.bool_param(save_notebook_on_failure, "save_notebook_on_failure")\n\n required_resource_keys = set(\n check.opt_set_param(required_resource_keys, "required_resource_keys", of_type=str)\n )\n outs = check.opt_mapping_param(outs, "outs", key_type=str, value_type=Out)\n ins = check.opt_mapping_param(ins, "ins", key_type=str, value_type=In)\n\n if output_notebook_name is not None:\n io_mgr_key = check.opt_str_param(\n io_manager_key, "io_manager_key", default="output_notebook_io_manager"\n )\n required_resource_keys.add(io_mgr_key)\n outs = {\n **outs,\n cast(str, output_notebook_name): Out(io_manager_key=io_mgr_key),\n }\n\n if isinstance(asset_key_prefix, str):\n asset_key_prefix = [asset_key_prefix]\n\n asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str)\n\n default_description = f"This op is backed by the notebook at {notebook_path}"\n description = check.opt_str_param(description, "description", default=default_description)\n\n user_tags = validate_tags(tags)\n if tags is not None:\n check.invariant(\n "notebook_path" not in tags,\n "user-defined op tags contains the `notebook_path` key, but the `notebook_path` key"\n " is reserved for use by Dagster",\n )\n check.invariant(\n "kind" not in tags,\n "user-defined op tags contains the `kind` key, but the `kind` key is reserved for"\n " use by Dagster",\n )\n default_tags = {"notebook_path": _clean_path_for_windows(notebook_path), "kind": "ipynb"}\n\n if safe_is_subclass(config_schema, Config):\n config_schema = infer_schema_from_config_class(cast(Type[Config], config_schema))\n\n return OpDefinition(\n name=name,\n compute_fn=_make_dagstermill_compute_fn(\n "define_dagstermill_op",\n name,\n notebook_path,\n output_notebook_name,\n asset_key_prefix=asset_key_prefix,\n save_notebook_on_failure=save_notebook_on_failure,\n ),\n ins=ins,\n outs=outs,\n config_schema=config_schema,\n required_resource_keys=required_resource_keys,\n description=description,\n tags={**user_tags, **default_tags},\n )
\n
", "current_page_name": "_modules/dagstermill/factory", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.factory"}, "io_managers": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.io_managers

\nimport os\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Sequence\n\nimport dagster._check as check\nfrom dagster import (\n    AssetKey,\n    AssetMaterialization,\n    ConfigurableIOManagerFactory,\n    InitResourceContext,\n    IOManager,\n)\nfrom dagster._core.definitions.metadata import MetadataValue\nfrom dagster._core.execution.context.input import InputContext\nfrom dagster._core.execution.context.output import OutputContext\nfrom dagster._core.storage.io_manager import dagster_maintained_io_manager, io_manager\nfrom dagster._utils import mkdir_p\nfrom pydantic import Field\n\nfrom dagstermill.factory import _clean_path_for_windows\n\n\nclass OutputNotebookIOManager(IOManager):\n    def __init__(self, asset_key_prefix: Optional[Sequence[str]] = None):\n        self.asset_key_prefix = asset_key_prefix if asset_key_prefix else []\n\n    def handle_output(self, context: OutputContext, obj: bytes):\n        raise NotImplementedError\n\n    def load_input(self, context: InputContext) -> Any:\n        raise NotImplementedError\n\n\nclass LocalOutputNotebookIOManager(OutputNotebookIOManager):\n    def __init__(self, base_dir: str, asset_key_prefix: Optional[Sequence[str]] = None):\n        super(LocalOutputNotebookIOManager, self).__init__(asset_key_prefix=asset_key_prefix)\n        self.base_dir = base_dir\n        self.write_mode = "wb"\n        self.read_mode = "rb"\n\n    def _get_path(self, context: OutputContext) -> str:\n        """Automatically construct filepath."""\n        if context.has_asset_key:\n            keys = context.get_asset_identifier()\n        else:\n            keys = context.get_run_scoped_output_identifier()\n        return str(Path(self.base_dir, *keys).with_suffix(".ipynb"))\n\n    def handle_output(self, context: OutputContext, obj: bytes):\n        """obj: bytes."""\n        check.inst_param(context, "context", OutputContext)\n\n        # the output notebook itself is stored at output_file_path\n        output_notebook_path = self._get_path(context)\n        mkdir_p(os.path.dirname(output_notebook_path))\n        with open(output_notebook_path, self.write_mode) as dest_file_obj:\n            dest_file_obj.write(obj)\n\n        metadata = {\n            "Executed notebook": MetadataValue.notebook(\n                _clean_path_for_windows(output_notebook_path)\n            )\n        }\n\n        if context.has_asset_key:\n            context.add_output_metadata(metadata)\n        else:\n            context.log_event(\n                AssetMaterialization(\n                    asset_key=AssetKey(\n                        [*self.asset_key_prefix, f"{context.step_key}_output_notebook"]\n                    ),\n                    metadata=metadata,\n                )\n            )\n\n    def load_input(self, context: InputContext) -> bytes:\n        check.inst_param(context, "context", InputContext)\n        # pass output notebook to downstream ops as File Object\n        output_context = check.not_none(context.upstream_output)\n        with open(self._get_path(output_context), self.read_mode) as file_obj:\n            return file_obj.read()\n\n\n
[docs]class ConfigurableLocalOutputNotebookIOManager(ConfigurableIOManagerFactory):\n """Built-in IO Manager for handling output notebook."""\n\n base_dir: Optional[str] = Field(\n default=None,\n description=(\n "Base directory to use for output notebooks. Defaults to the Dagster instance storage"\n " directory if not provided."\n ),\n )\n asset_key_prefix: List[str] = Field(\n default=[],\n description=(\n "Asset key prefix to apply to assets materialized for output notebooks. Defaults to no"\n " prefix."\n ),\n )\n\n @classmethod\n def _is_dagster_maintained(cls) -> bool:\n return True\n\n def create_io_manager(self, context: InitResourceContext) -> "LocalOutputNotebookIOManager":\n return LocalOutputNotebookIOManager(\n base_dir=self.base_dir or check.not_none(context.instance).storage_directory(),\n asset_key_prefix=self.asset_key_prefix,\n )
\n\n\n@dagster_maintained_io_manager\n@io_manager(config_schema=ConfigurableLocalOutputNotebookIOManager.to_config_schema())\ndef local_output_notebook_io_manager(init_context) -> LocalOutputNotebookIOManager:\n """Built-in IO Manager that handles output notebooks."""\n return ConfigurableLocalOutputNotebookIOManager.from_resource_context(init_context)\n
", "current_page_name": "_modules/dagstermill/io_managers", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.io_managers"}, "manager": {"alabaster_version": "0.7.13", "body": "

Source code for dagstermill.manager

\nimport os\nimport pickle\nimport uuid\nfrom typing import TYPE_CHECKING, AbstractSet, Any, Mapping, Optional, cast\n\nfrom dagster import (\n    AssetMaterialization,\n    AssetObservation,\n    ExpectationResult,\n    Failure,\n    LoggerDefinition,\n    ResourceDefinition,\n    StepExecutionContext,\n    TypeCheck,\n    _check as check,\n)\nfrom dagster._core.definitions.dependency import NodeHandle\nfrom dagster._core.definitions.events import RetryRequested\nfrom dagster._core.definitions.graph_definition import GraphDefinition\nfrom dagster._core.definitions.job_base import InMemoryJob\nfrom dagster._core.definitions.job_definition import JobDefinition\nfrom dagster._core.definitions.op_definition import OpDefinition\nfrom dagster._core.definitions.reconstruct import ReconstructableJob\nfrom dagster._core.definitions.resource_definition import ScopedResourcesBuilder\nfrom dagster._core.events import DagsterEvent\nfrom dagster._core.execution.api import create_execution_plan, scoped_job_context\nfrom dagster._core.execution.plan.outputs import StepOutputHandle\nfrom dagster._core.execution.plan.plan import ExecutionPlan\nfrom dagster._core.execution.plan.state import KnownExecutionState\nfrom dagster._core.execution.plan.step import ExecutionStep\nfrom dagster._core.execution.resources_init import (\n    get_required_resource_keys_to_init,\n    resource_initialization_event_generator,\n)\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.ref import InstanceRef\nfrom dagster._core.log_manager import DagsterLogManager\nfrom dagster._core.storage.dagster_run import DagsterRun, DagsterRunStatus\nfrom dagster._core.system_config.objects import ResolvedRunConfig, ResourceConfig\nfrom dagster._core.utils import make_new_run_id\nfrom dagster._loggers import colored_console_logger\nfrom dagster._serdes import unpack_value\nfrom dagster._utils import EventGenerationManager\n\nfrom .context import DagstermillExecutionContext, DagstermillRuntimeExecutionContext\nfrom .errors import DagstermillError\nfrom .serialize import PICKLE_PROTOCOL\n\nif TYPE_CHECKING:\n    from dagster._core.definitions.node_definition import NodeDefinition\n\n\nclass DagstermillResourceEventGenerationManager(EventGenerationManager):\n    """Utility class to explicitly manage setup/teardown of resource events. Overrides the default\n    `generate_teardown_events` method so that teardown is deferred until explicitly called by the\n    dagstermill Manager.\n    """\n\n    def generate_teardown_events(self):\n        return iter(())\n\n    def teardown(self):\n        return [\n            teardown_event\n            for teardown_event in super(\n                DagstermillResourceEventGenerationManager, self\n            ).generate_teardown_events()\n        ]\n\n\nclass Manager:\n    def __init__(self):\n        self.job = None\n        self.op_def: Optional[NodeDefinition] = None\n        self.in_job: bool = False\n        self.marshal_dir: Optional[str] = None\n        self.context = None\n        self.resource_manager = None\n\n    def _setup_resources(\n        self,\n        resource_defs: Mapping[str, ResourceDefinition],\n        resource_configs: Mapping[str, ResourceConfig],\n        log_manager: DagsterLogManager,\n        execution_plan: Optional[ExecutionPlan],\n        dagster_run: Optional[DagsterRun],\n        resource_keys_to_init: Optional[AbstractSet[str]],\n        instance: Optional[DagsterInstance],\n        emit_persistent_events: Optional[bool],\n    ):\n        """Drop-in replacement for\n        `dagster._core.execution.resources_init.resource_initialization_manager`.  It uses a\n        `DagstermillResourceEventGenerationManager` and explicitly calls `teardown` on it.\n        """\n        generator = resource_initialization_event_generator(\n            resource_defs=resource_defs,\n            resource_configs=resource_configs,\n            log_manager=log_manager,\n            execution_plan=execution_plan,\n            dagster_run=dagster_run,\n            resource_keys_to_init=resource_keys_to_init,\n            instance=instance,\n            emit_persistent_events=emit_persistent_events,\n        )\n        self.resource_manager = DagstermillResourceEventGenerationManager(\n            generator, ScopedResourcesBuilder\n        )\n        return self.resource_manager\n\n    def reconstitute_job_context(\n        self,\n        executable_dict: Mapping[str, Any],\n        job_run_dict: Mapping[str, Any],\n        node_handle_kwargs: Mapping[str, Any],\n        instance_ref_dict: Mapping[str, Any],\n        step_key: str,\n        output_log_path: Optional[str] = None,\n        marshal_dir: Optional[str] = None,\n        run_config: Optional[Mapping[str, Any]] = None,\n    ):\n        """Reconstitutes a context for dagstermill-managed execution.\n\n        You'll see this function called to reconstruct a job context within the ``injected\n        parameters`` cell of a dagstermill output notebook. Users should not call this function\n        interactively except when debugging output notebooks.\n\n        Use :func:`dagstermill.get_context` in the ``parameters`` cell of your notebook to define a\n        context for interactive exploration and development. This call will be replaced by one to\n        :func:`dagstermill.reconstitute_job_context` when the notebook is executed by\n        dagstermill.\n        """\n        check.opt_str_param(output_log_path, "output_log_path")\n        check.opt_str_param(marshal_dir, "marshal_dir")\n        run_config = check.opt_mapping_param(run_config, "run_config", key_type=str)\n        check.mapping_param(job_run_dict, "job_run_dict")\n        check.mapping_param(executable_dict, "executable_dict")\n        check.mapping_param(node_handle_kwargs, "node_handle_kwargs")\n        check.mapping_param(instance_ref_dict, "instance_ref_dict")\n        check.str_param(step_key, "step_key")\n\n        job = ReconstructableJob.from_dict(executable_dict)\n        job_def = job.get_definition()\n\n        try:\n            instance_ref = unpack_value(instance_ref_dict, InstanceRef)\n            instance = DagsterInstance.from_ref(instance_ref)\n        except Exception as err:\n            raise DagstermillError(\n                "Error when attempting to resolve DagsterInstance from serialized InstanceRef"\n            ) from err\n\n        dagster_run = unpack_value(job_run_dict, DagsterRun)\n\n        node_handle = NodeHandle.from_dict(node_handle_kwargs)\n        op = job_def.get_node(node_handle)\n        op_def = op.definition\n\n        self.marshal_dir = marshal_dir\n        self.in_job = True\n        self.op_def = op_def\n        self.job = job\n\n        ResolvedRunConfig.build(job_def, run_config)\n\n        execution_plan = create_execution_plan(\n            self.job,\n            run_config,\n            step_keys_to_execute=dagster_run.step_keys_to_execute,\n        )\n\n        with scoped_job_context(\n            execution_plan,\n            job,\n            run_config,\n            dagster_run,\n            instance,\n            scoped_resources_builder_cm=self._setup_resources,\n            # Set this flag even though we're not in test for clearer error reporting\n            raise_on_error=True,\n        ) as job_context:\n            known_state = None\n            if dagster_run.parent_run_id:\n                known_state = KnownExecutionState.build_for_reexecution(\n                    instance=instance,\n                    parent_run=check.not_none(instance.get_run_by_id(dagster_run.parent_run_id)),\n                )\n            self.context = DagstermillRuntimeExecutionContext(\n                job_context=job_context,\n                job_def=job_def,\n                op_config=run_config.get("ops", {}).get(op.name, {}).get("config"),\n                resource_keys_to_init=get_required_resource_keys_to_init(\n                    execution_plan,\n                    job_def,\n                ),\n                op_name=op.name,\n                node_handle=node_handle,\n                step_context=cast(\n                    StepExecutionContext,\n                    job_context.for_step(\n                        cast(ExecutionStep, execution_plan.get_step_by_key(step_key)),\n                        known_state=known_state,\n                    ),\n                ),\n            )\n\n        return self.context\n\n    def get_context(\n        self,\n        op_config: Any = None,\n        resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,\n        logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,\n        run_config: Optional[dict] = None,\n    ) -> DagstermillExecutionContext:\n        """Get a dagstermill execution context for interactive exploration and development.\n\n        Args:\n            op_config (Optional[Any]): If specified, this value will be made available on the\n                context as its ``op_config`` property.\n            resource_defs (Optional[Mapping[str, ResourceDefinition]]): Specifies resources to provide to context.\n            logger_defs (Optional[Mapping[str, LoggerDefinition]]): Specifies loggers to provide to context.\n            run_config(Optional[dict]): The config dict with which to construct\n                the context.\n\n        Returns:\n            :py:class:`~dagstermill.DagstermillExecutionContext`\n        """\n        run_config = check.opt_dict_param(run_config, "run_config", key_type=str)\n\n        # If we are running non-interactively, and there is already a context reconstituted, return\n        # that context rather than overwriting it.\n        if self.context is not None and isinstance(\n            self.context, DagstermillRuntimeExecutionContext\n        ):\n            return self.context\n\n        if not logger_defs:\n            logger_defs = {"dagstermill": colored_console_logger}\n            run_config["loggers"] = {"dagstermill": {}}\n        logger_defs = check.opt_mapping_param(logger_defs, "logger_defs")\n        resource_defs = check.opt_mapping_param(resource_defs, "resource_defs")\n\n        op_def = OpDefinition(\n            name="this_op",\n            compute_fn=lambda *args, **kwargs: None,\n            description="Ephemeral op constructed by dagstermill.get_context()",\n            required_resource_keys=set(resource_defs.keys()),\n        )\n\n        job_def = JobDefinition(\n            graph_def=GraphDefinition(name="ephemeral_dagstermill_pipeline", node_defs=[op_def]),\n            logger_defs=logger_defs,\n            resource_defs=resource_defs,\n        )\n\n        run_id = make_new_run_id()\n\n        # construct stubbed DagsterRun for notebook exploration...\n        # The actual dagster run during job execution will be serialized and reconstituted\n        # in the `reconstitute_job_context` call\n        dagster_run = DagsterRun(\n            job_name=job_def.name,\n            run_id=run_id,\n            run_config=run_config,\n            step_keys_to_execute=None,\n            status=DagsterRunStatus.NOT_STARTED,\n            tags=None,\n        )\n\n        self.in_job = False\n        self.op_def = op_def\n        self.job = job_def\n\n        job = InMemoryJob(job_def)\n        execution_plan = create_execution_plan(job, run_config)\n\n        with scoped_job_context(\n            execution_plan,\n            job,\n            run_config,\n            dagster_run,\n            DagsterInstance.ephemeral(),\n            scoped_resources_builder_cm=self._setup_resources,\n        ) as job_context:\n            self.context = DagstermillExecutionContext(\n                job_context=job_context,\n                job_def=job_def,\n                op_config=op_config,\n                resource_keys_to_init=get_required_resource_keys_to_init(\n                    execution_plan,\n                    job_def,\n                ),\n                op_name=op_def.name,\n                node_handle=NodeHandle(op_def.name, parent=None),\n            )\n\n        return self.context\n\n    def yield_result(self, value, output_name="result"):\n        """Yield a result directly from notebook code.\n\n        When called interactively or in development, returns its input.\n\n        Args:\n            value (Any): The value to yield.\n            output_name (Optional[str]): The name of the result to yield (default: ``'result'``).\n        """\n        if not self.in_job:\n            return value\n\n        # deferred import for perf\n        import scrapbook\n\n        if not self.op_def.has_output(output_name):\n            raise DagstermillError(\n                f"Op {self.op_def.name} does not have output named {output_name}.Expected one of"\n                f" {[str(output_def.name) for output_def in self.op_def.output_defs]}"\n            )\n\n        # pass output value cross process boundary using io manager\n        step_context = self.context._step_context  # noqa: SLF001\n        # Note: yield_result currently does not support DynamicOutput\n\n        # dagstermill assets do not support yielding additional results within the notebook:\n        if len(step_context.job_def.asset_layer.asset_keys) > 0:\n            raise DagstermillError(\n                "dagstermill assets do not currently support dagstermill.yield_result"\n            )\n\n        step_output_handle = StepOutputHandle(\n            step_key=step_context.step.key, output_name=output_name\n        )\n        output_context = step_context.get_output_context(step_output_handle)\n        io_manager = step_context.get_io_manager(step_output_handle)\n\n        # Note that we assume io manager is symmetric, i.e handle_input(handle_output(X)) == X\n        io_manager.handle_output(output_context, value)\n\n        # record that the output has been yielded\n        scrapbook.glue(output_name, "")\n\n    def yield_event(self, dagster_event):\n        """Yield a dagster event directly from notebook code.\n\n        When called interactively or in development, returns its input.\n\n        Args:\n            dagster_event (Union[:class:`dagster.AssetMaterialization`, :class:`dagster.ExpectationResult`, :class:`dagster.TypeCheck`, :class:`dagster.Failure`, :class:`dagster.RetryRequested`]):\n                An event to yield back to Dagster.\n        """\n        valid_types = (\n            AssetMaterialization,\n            AssetObservation,\n            ExpectationResult,\n            TypeCheck,\n            Failure,\n            RetryRequested,\n        )\n        if not isinstance(dagster_event, valid_types):\n            raise DagstermillError(\n                f"Received invalid type {dagster_event} in yield_event. Expected a Dagster event"\n                f" type, one of {valid_types}."\n            )\n\n        if not self.in_job:\n            return dagster_event\n\n        # deferred import for perf\n        import scrapbook\n\n        event_id = f"event-{uuid.uuid4()}"\n        out_file_path = os.path.join(self.marshal_dir, event_id)\n        with open(out_file_path, "wb") as fd:\n            fd.write(pickle.dumps(dagster_event, PICKLE_PROTOCOL))\n\n        scrapbook.glue(event_id, out_file_path)\n\n    def teardown_resources(self):\n        if self.resource_manager is not None:\n            self.resource_manager.teardown()\n\n    def load_input_parameter(self, input_name: str):\n        # load input from source\n        dm_context = check.not_none(self.context)\n        if not isinstance(dm_context, DagstermillRuntimeExecutionContext):\n            check.failed("Expected DagstermillRuntimeExecutionContext")\n        step_context = dm_context.step_context\n        step_input = step_context.step.step_input_named(input_name)\n        input_def = step_context.op_def.input_def_named(input_name)\n        for event_or_input_value in step_input.source.load_input_object(step_context, input_def):\n            if isinstance(event_or_input_value, DagsterEvent):\n                continue\n            else:\n                return event_or_input_value\n\n\nMANAGER_FOR_NOTEBOOK_INSTANCE = Manager()\n
", "current_page_name": "_modules/dagstermill/manager", "customsidebar": null, "favicon_url": null, "logo_url": null, "parents": [{"link": "../../", "title": "Module code"}], "sidebars": ["about.html", "navigation.html", "relations.html", "searchbox.html", "donate.html"], "title": "dagstermill.manager"}}} \ No newline at end of file diff --git a/docs/content/concepts.mdx b/docs/content/concepts.mdx index 72155dc186335..22b2d01b2e173 100644 --- a/docs/content/concepts.mdx +++ b/docs/content/concepts.mdx @@ -45,6 +45,10 @@ An asset is an object in persistent storage, such as a table, file, or persisted title="Asset checks (Experimental)" href="/concepts/assets/asset-checks" > + --- diff --git a/docs/content/concepts/assets/external-assets.mdx b/docs/content/concepts/assets/external-assets.mdx new file mode 100644 index 0000000000000..393f8643e6787 --- /dev/null +++ b/docs/content/concepts/assets/external-assets.mdx @@ -0,0 +1,334 @@ +--- +title: External Assets | Dagster +description: External assets model assets in Dagster that are not scheduled or materialized in Dagster. +--- + +# External Assets (Experimental) + +An **external asset** is an asset that is not materialized by Dagster, but is tracked in the asset graph and asset catalog. This allows you to model assets in Dagster, attach metadata and events to those assets, but without scheduling their materialization with Dagster. + +**External assets are a good fit when data is**: + +- Landed by an external source (e.g. an external file landing daily; Kafka landing data into Amazon S3) +- Created and processed using manual processes +- Materialized by existing pipelines with their own scheduling and infrastructure that you do not want to or need to migrate en masse + +**With an external asset, you can:** + +- Attach metadata to its definition for documentation, tracking ownership, and so on +- Track its data quality and version in Dagster +- Use [asset sensors](/concepts/partitions-schedules-sensors/asset-sensors) or auto-materialize policies to update downstream assets based on updates to external assets + +**You cannot, however:** + +- Schedule an external asset's materialization +- Backfill an external asset using Dagster +- Use the [Dagster UI](/concepts/webserver/ui) or [GraphQL API](/concepts/webserver/graphql) to instigate ad hoc materializations + + + What about Source Assets? A common use case for external + assets is modeling data produced by a process not under Dagster's control. For + example, a daily file drop from a third party into Amazon S3. In most systems, + these are described as sources. This includes Dagster, which + includes . As + external assets are a superset of Source Asset functionality,{" "} + + source assets will be supplanted by external assets in the near future + + . + + +--- + +## Relevant APIs + +| Name | Description | +| ------------------------------------------------ | ------------------------------------------------------------------------------------------- | +| | Create list of objects that represent external assets | +| | An object that represents the metadata of a particular asset | + +--- + +## Defining external assets + +The following code declares a single external asset that represents a file in S3 and passes it to a object: + + + + +Click the **Asset in the Dagster UI** tab to see how this asset would be rendered in the Dagster UI. + +```python file=/concepts/assets/external_assets/single_declaration.py +from dagster import AssetSpec, Definitions, external_asset_from_spec + +defs = Definitions(assets=[external_asset_from_spec(AssetSpec("file_in_s3"))]) +``` + +--- + + + + +Click the **Asset definition** tab to view how this asset is defined. + + + +--- + + + + +### External assets with dependencies + +External assets can depend only on other external assets. + +Dependencies are defined by using the `deps` argument of . This enables Dagster to model entire graphs of assets scheduled and orchestrated by other systems. + +In the following example, we have two assets: `raw_logs` and `processed_logs`. The `processed_logs` asset is produced by a scheduled computation in another orchestration system. Using external assets allows you to model both assets in Dagster. + + + + +Click the **Assets in the Dagster UI** tab to see how these assets would be rendered in the Dagster UI. + +```python file=/concepts/assets/external_assets/external_asset_deps.py +from dagster import AssetSpec, Definitions, external_assets_from_specs + +raw_logs = AssetSpec("raw_logs") +processed_logs = AssetSpec("processed_logs", deps=[raw_logs]) + +defs = Definitions(assets=external_assets_from_specs([raw_logs, processed_logs])) +``` + +--- + + + + +Click the **Asset definitions** tab to view how these assets are defined. + + + +--- + + + + +### Fully-managed assets with external asset dependencies + +Fully-managed assets can depend on external assets. In this example, the `aggregated_logs` asset depends on `processed_logs`, which is an external asset: + + + + +Click the **Assets in the Dagster UI** tab to see how these assets would be rendered in the Dagster UI. + +```python file=/concepts/assets/external_assets/normal_asset_depending_on_external.py +from dagster import AssetSpec, Definitions, asset, external_assets_from_specs + +raw_logs = AssetSpec("raw_logs") +processed_logs = AssetSpec("processed_logs", deps=[raw_logs]) + + +@asset(deps=[processed_logs]) +def aggregated_logs() -> None: + # Loads "processed_log" into memory and performs some aggregation + ... + + +defs = Definitions( + assets=[aggregated_logs, *external_assets_from_specs([raw_logs, processed_logs])] +) +``` + + + + +Click the **Asset definitions** tab to view how these assets are defined. + + + + + + +--- + +## Updating external asset metadata + +As Dagster doesn't control scheduling or materializing external assets, it's up to you to keep their metadata updated. This also means that materialization for external assets will be disabled in the Dagster UI. + +To keep your external assets updated, you can use any of the following approaches: + +- [A REST API](#using-the-rest-api) +- [Sensors](#using-sensors) +- [Using the Python API](#using-the-python-api) +- [Logging events in ops](#logging-events-in-unrelated-ops) + +### Using the REST API + +Dagster OSS exposes a REST endpoint for reporting asset materializations. Refer to the following tabs for examples using a `curl` command, and for invoking the API in Python. + + + + +The following demonstrates how to use a `curl` command in a shell script to communicate with the API: + +```bash +curl --request POST \ + --url https://path/to/instance/report_asset_materialization/{asset_key}\ + --header 'Content-Type: application/json' \ + --data '{ + "metadata" : { + "source": "From curl command" + } +}' +``` + + + + +The following demonstrates how to invoke the API in Python using the `requests` library: + +```python +import requests + +url = f"https://path/to/instance/report_asset_materialization/{asset_key}" +payload = { "metadata": { "source": "From python script" } } +headers = { "Content-Type": "application/json" } + +response = requests.request("POST", url, json=payload, headers=headers) +``` + + + + +The API also has endpoints for reporting [asset observations](/concepts/assets/asset-observations) and [asset check evaluations](/concepts/assets/asset-checks). + +### Using sensors + +By using the `asset_events` parameter of , you can generate events to attach to external assets and then provide them directly to sensors. For example: + +```python file=/concepts/assets/external_assets/external_asset_using_sensor.py +import datetime + +from dagster import ( + AssetMaterialization, + AssetSpec, + Definitions, + SensorEvaluationContext, + SensorResult, + external_asset_from_spec, + sensor, +) + + +def utc_now_str() -> str: + return datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d, %H:%M:%S") + + +@sensor() +def keep_external_asset_a_up_to_date(context: SensorEvaluationContext) -> SensorResult: + # Materialization happened in external system, but is recorded here + return SensorResult( + asset_events=[ + AssetMaterialization( + asset_key="external_asset_a", + metadata={ + "source": f'From sensor "{context.sensor_name}" at UTC time "{utc_now_str()}"' + }, + ) + ] + ) + + +defs = Definitions( + assets=[external_asset_from_spec(AssetSpec("external_asset_a"))], + sensors=[keep_external_asset_a_up_to_date], +) +``` + +### Using the Python API + +You can insert events to attach to external assets directly from Dagster's Python API. Specifically, the API is `report_runless_asset_event` on . + +For example, this would be useful when writing a hand-rolled Python script to backfill metadata: + +```python file=/concepts/assets/external_assets/external_asset_events_using_python_api.py startafter=start_python_api_marker endbefore=end_python_api_marker dedent=4 +from dagster import AssetMaterialization + +# instance is a DagsterInstance. Get using DagsterInstance.get() +instance.report_runless_asset_event( + AssetMaterialization( + "asset_one", metadata={"nrows": 10, "source": "From this script."} + ) +) +``` + +### Logging events in unrelated ops + +You can log an from a bare op. In this case, use the `log_event` method of to report an asset materialization of an external asset. For example: + +```python file=/concepts/assets/external_assets/update_external_asset_via_op.py +from dagster import ( + AssetMaterialization, + AssetSpec, + Definitions, + OpExecutionContext, + external_asset_from_spec, + job, + op, +) + + +@op +def an_op(context: OpExecutionContext) -> None: + context.log_event(AssetMaterialization(asset_key="external_asset")) + + +@job +def a_job() -> None: + an_op() + + +defs = Definitions( + assets=[external_asset_from_spec(AssetSpec("external_asset"))], jobs=[a_job] +) +``` + +--- + +## Related + + + + + + + diff --git a/docs/next/public/images/concepts/assets/external-asset.png b/docs/next/public/images/concepts/assets/external-asset.png new file mode 100644 index 0000000000000000000000000000000000000000..68b7deb75d8d36915e53e2bb322ef17ab6a01e2c GIT binary patch literal 327433 zcmeFZWn5JK`aKLNAOfNyAdONY-5n}OgCLzMog>{0h=d{{Al)rF^Z)~ligf1yLr8bd zPy-Cl=05j%-u=$GkFWo4@S`7R@6C5#v97h&H6LDRD3ac!xru{=L#nJKuZ@F4Jcxrs z;CX`p_?Oe$kW=6fTsLjSr#R*Pbn7@cOgPH&PjtM^wq^+3@9EOE?x!FwhWL<^I zE+^uKQsP5(U&zyM3*i%5MR@NX3A?|rd#9x3sKoo^BYO71=!$XLf%^~*Uo zsn{7wubp#i&uZn2GmR>|Q&-tLGf{o!LPKMleOh!yLj zWBIc@!NK({|AW-LdhB492Xa9&%4ycO?*7>ofb>TES#tir^!|V8{lCTgReDSDyr^61=cK;m(d|Akc=g*Fc7eY$%i`yWlwH{;;%EBbuKIVW zq8~BtU45;cI2&Vor;7hiuK6CwtqfIW6+D_0#KwVYhNcf>!`?Ky2%@V zlyemnKyNv`&4EkW@0T;|pXd4bAx#0h5~HCohWQWrMNrvQ{UC&1UwACGD<=L9?VQdd z@!%g{NDl9|Qq03pZ=d|HUpZc?+`QmbxT%~W)xe^f(0dXc2Gj-?>tJ@xj0o)U$-adT zdM!rkCQ;0nFSmV8_biJFW$wGBMJouj$6}h|QO7Y#Q z+W8vXMcVlS?@&U2m17%R(qUQeJvoMHI4>E4)cYH_(Kl~r9Y3;Jpea@N1KT0XK~2(+ zNQVgj3LMa9sGH@R|Dk*b>BSu%X>b;eT3VE)&{e%XZ`SDWCSU1ZgXQmf8jW}QG6jO@Nqvq z(XEzeUt~5uIdc3Jx?9&(63QPrAy3(p_L@8n-mf)$`R6U1NruR_%?HHD2)sHU{}f8M zCjjcX+w^_0BYIimZG~1$MwpxqfJzh#uzk;u@!oJq;A0dRUXXD&NL_YTz3p>&lDuH(F?eF@=&AyU8Lo=?u zs#w6{d5qsNm!j|X)S@k;>+c}+MT@lp8I=a?RP?M%lYq0Sb;LB;^!nt7wrlz#R4EZVt0)n4mO z7Hlvm83>1tY$Ak}7!Pk>Yz6HL*o@a3Nb-2T|K~SW)@L%sxW}@or#!-y@sK?gh2>>d4(+^%VeG_JjZ!Mb zJKmXzOBHoy8*g2#mwF@@G#ipc zNXq`^a4j=X!Yc3B3)>X<;*&2c1{ce1Ik3|gGs{c+zhKZiMDle9O?s(tztzvv4Q`@2 z+02JcP%k4O>(q5IN<~A|Kr)oz#IKQWCLd0_+{rplvwBBU^miR8?Gw-0K!JCs?0WNE zldC-4hWDDn)^bN~3@Iu`JN@$<{2m_!$%J_O`N5*x*u@N-=Wcnzz&a8b3?>2^t*Iko zMGG7+lNJ&1go@5{jp^S$e+b!61B&;6AzM<+8%Ze$-)sCeVX?Vynq-o&AM_GEK^msL z9p&a=E=0M4v^-iFj++ReTX!cbT`^%JN{7+s0w`L_{467fL zZA@=!(*6j^%=Gs0QSf?Hr+8H}YQC4HiZ1WW_z_|5C;9gRQ&coH!drUd>U{+Sio3BT z3faZNra^H~zfN;WZi=2+=&cM7h zooDOU^~^QBR1#&<^OWP-M_L6KfNcAcb^TqrS^SqTibVGPY9cH6HwHVJT$-kyndoHd z<8t!$sx)qbG4JwFiPd{L`(QtD$lGt}BGAStCKRHE3FV9LB4|bG9e(3tgQT63I zzJO&f6Hw(o?se`?H!wp%GoLHW)janOQ4}E1ZqhWq1#^%b-rc?Z{TLxTZPo6t9IS>_ zmX!i1pjj#@DMd`fyv*N)gjiT*)=B#8{SG#PdrFTV_uN{90;TaJAW%vMR{u=~LKwqu z;o9}(K@DEB)7a@_EA9uUd&Pr_qH>VMm!Y$ZiR9P*@euhX?|M`72=GG9-*?b4{zQ!{ z-_42Mb05MDmgoKQP~j$UmuBEqx2;!R#X-3_{6REBo3p|RP{+E`CA}8W6O%sYcbViE z;r)uA`q|2h2R5JNE5}WLdkS)zEYXSBGa&R@6zaiQ&O+Z zN&UPS#{D1g(A#^1_I=6KmI+H;&)i;=f8CjZv5SJHKj{ap4;RJ>*$w|H5$C7Aemg=s z7$~Q0KHjswH8eSJU*Shbx6I_Pil08jgrY1=^O+nF5A4~SOu+MUR$2rF_M(dfTvuw| z9!k?^A=3M9-RA}1n(9-V^k@G(Yj8Dhoct?q>z~>2p^XNGHMjZ~Ei8M#^3Ptkl@5GM z5`lyF`sT^WNknEQsOfH_$7bDiLc)%Xw4LjOJ=@4i*nokwoF^p*FcHTrsMMz~R$-cS zxh>k4VjqN99p+mEwYaWV*B4$M`fxg!=wu@{Q2IRP%EP*A zs$aR*54y270~zw}Z7{D#Ks$3&*=v3tZOn3oqEU;T$%5!3g-z@fM;q~1&yVhAQjdvvIipF5527KmKt80V?6^8z(2;k}TOizu}kN>m!9r`->eJ6AJI>nH4{$X;7*|Gd+g zA#MCV68z@OR;V{{e<6Qjw(R=cc=xf_n*jL!>u^x5Gj|!X#!Wo_M+kwEHt`U93eH6g zxy?fBp;dqOE3DMMz09UaT7*98L$76x>%|Q0;qi{oO32HCJ%>l63rl zbS;hWa`QaneccLkpVPMYd{!$--|=0SIvIGQfBy`PtdxNOfSst3=HBL66qzImubixx zMo@36q^OLV;P?uFm%_d+z6@&lyCIU$+x?`gdtQ4^bvmZpRoZt+sdqO?`o9Un@N9t$A+=iU9fZ)eAF%+=|3hnSiHtgwHGiX1hWSvSdkqu>KN34e~OYWm{s#wdrl z>q!LwU-Vs9x>@s86NTTMr5Gk19I_bJybYFYWowkVzD7R75p_%`A2f@-XA1XJXOQx~ zzdMjx$ZkuUh4_e?dj6Tl;v77XF(Y6($fUU|U>2{aC5uY*rp%sy4=(AvrYA3S_T$x0RqQ<-KFA<%>-kW z8N%Po*Q=X&c3z@KssdXxSMELcKZxl~I&w16_z>{l=2p486o2?Au`j zDZ;5FH0Pgf2hETbm!$EsmnSnXUn=n}0-%zw@bQq^SDfaHq9(UhjdzcqQcn_B%6G-) zgg&N6Qsq8Z?uyNi3U@$M-K#Zewoc!_tiVIRj-r?pZ|lHbU$a;%{Z)u_(MYN=k4Ev+0UvR z*R>nKm%+vtJ#G<(0|euJWdba0a_bhSTJPZ&WlF zh0jjIYMQ6V*^$$!>vZA=44q|X(i04N-Gbo^sF7A9bE$7aq{olu?`OgNPpeFbtNc4P z*Wp1&jV(Y;Hc#|ZfC_*a9G<7H{@jQq^B zbR}~p4z2#$Uvx#U-2u+2+!&=1#};+(v5A4}YCm^i;QG?W80X!3Kd6?8V5we?ys~mS zTWy+@i#CsWi>1yeuSs6SSMIYW6L_F$pi%AcazWciX-=O^-|kLpdU1b)TZ}R+$GdZ0 zlL*})vuf1=sh4sYoMt9GW=%|IM>`aK9)q{{-pi?7Uwa8qMdWN638O_LN||OeIquEv zz$erpW(_HiBH>1y-Qiv{69fxR*N*3%1ic4w5_PwwT>{$2?q6S9RFO$IGKM@+T;G(P zE^D4?W_H~y2naG0?M)I-_V#HImVdXQlWT;%EC+sM`i(Mr6Oz!`Hp=>+4DQhNrc3I7W|YjZRfv5P6!?psvFnrFT*~Sw zQnKtGuc1nJ0yq1%gu4LcOT$#X9MH^y`Vf++9r{=5-@!xgZ~F6H&6NN5EEqgNt`x^s zo2Qy7(@QVoi|)MfrV;5_(ye$e1NzyHHV0vUxNQXW^zxcN8VSWMzgeYVwFhAXbF->^ zjfIOkf6d+{`PhBi6WKKMt+EC*H^i|7$v!BW(}R-yVxGZKKoJMGx5(KVt8?g8sEXBK6DE0(joGnFaS+H8Pe`>#fcD$R&Fe8DC(r;9w zUPlg?XZRFk%`Ezwo*(o`Pz&1UX~oagi4&~Vx`GqSrGE_!3~U`Ob&*e^kLLu=o4pjO z8HBFqFyAJ}#n1?wCwgv6b{DAAS`Gi&*Ge%nAy@;=);~DjX)s&5n;-IlkXGSd7@`~H zw%Y4L{ocz4`5+|ca+ZNCsh0zoY&C6|308*xMTa5EVi=`vH!v*GWevn~1B}Yf_-&%% z5u8iPDx2e#>%yxQY{rdhc17m!meZQRwG>RMr!0kPk)2w&rkU{s{mx)w$qwA8@v4D+A5iog!R$ZWK;{(c_vmhk%t;Z%vn>E zJtyUZN^xs)-_N;DILEK&u@dkYQfcrTy*}cK8MW(5XNQ_DU+n$--dJ{+VAKdQQ3)CJMR#^)d zuQ{Atwnx#npSHMMgc&0{JPH?BKdRod_kQ;rN z16g)Jly@nJf(rBCI-Lp9usgrUjUUW3`Mmt;2X;x*w47BG63UQPytNo^U@Bzk)Dg{? z7)2{eW4_^l81qrHU7fDPYqkiQ9j~;ld{gym|1Ru!`?6WmfU=LQOz-zv`Kl?X>nkrf z5CDU@l{uGt+0W+n%6Uy{A}sq-`tEQ1xHz*u3_(*5tA4nfznuJTpmSL@(X!+7wkyY{ zP4+5W3`w$p%KESsi+;IOd*DF2By%wA+Op9X6mRiFHfim@y#NB%-CP4Q$E7~s050H* zV4()gpUAQ&L;WTN&l?%9tu?L1i9_jp#|En2j^k}4C*ty~B~^6prFN0FmAEhZ+Z9}6 z2%peTc`=eRAj4_ZEyYX4n~^9T3|l_P@43@_J!*AKVIbFR4#QBmx^VJ=T;r_bRsy%8 zCx_0r3h|?YDB8Z=W}l31P72PJNBZR^vD?VH?rr1#F=BUc18OP8y3bf;#y2~YzWp;b zYcR~qXjaj4*R`R{m{6YmKb8~R>kAD*5J-D!fFQRYf4fQ-fx$atGRgxk-RA|*}$zNI&s%uC&<<60VAy5`9OxWKo;1%@bUgQ zPQdq4YHg@<0JGp-Iy$aGjdVWj^E2$;ajy&Uy+@DC)id7cq$3J^zJ(ByDV3WvM$pq1N!7RQcn$q5oFw*6X0(v&J;GaCf-5iU-)ae-KsrumE3(@G6(Z;H z?{%E!mCx;;Qf)1Ru%hl2wmT0WbMst7d%X+kYqHc?VVJ9PA^DCd*L{ZOcS<>PA?Z(i zWH{{4Tf-13BjP%Gzmjwc%y0h{fHG5Su@l9E>GVntZN&^#gB_@QZa{3bkz7gb3-{Ch9e}&`^cfjPGHdLFJnjVGGy={1VFvKR<}P zZc6IvQ}!#7(749&LFi4xz(*qve!Xi$!=*lM951qh_LH!`g*|)k6A@LKSG<-42lVOq z6n)ew(rz;edUFQg-qmGFsYJezf#9?Tze09x6PT@qnu z^3{ntrRTOV3Nw?z?sSq)JB;efpD4CpZ z#QWRJMCajVMPArrB8-7EPqdr9Yv5S7$tNEt~Kf*Ir+En7xGsZ7kE@xo}NC;d) z8quW7dFuX43AKN>?WRh@0magyrmSc&n?Wki<1^Zl2J>oqTuYTxFRSik}{6GrAFTLs15K0<>^V~Bnt3D%Hx&Q_EH zk)NTIg{ayhUxxHbp+x?G6-Q>Za@_@+zBC@$a)TY60O_27uN=mD&!bO=Pml-i@KKgS z?@ph)$U4y+fhIS&?dG!M_75X8WfO#DA$7D>>#2Qt1IUX{9SOX~536kmJL7w@@%tX@ z2WrV&Y!NT+w&k{*sOgoT4>|ie1eslWmQ2Qy%`&n9DAwcg-QqUJLVwA=M?7A8b6~3_ zD&|jIv9MDMkehLbGnRo*jGFIg_>_6ZaKQ0Y*?h56I>R9WD+b9c=EMJYM&kn)uGC700$ ztX70x2&MA+@;uRnXd1z42uPOyXn8`Yf|2$K=GWbgTZn8KSPNH4v|H7GQ`Rj zW+0i%tvPjWtNmSQyOAic!{IXSHNuo9@?~flhZU|uenEg&55S=L84-_RoM02Xz_Jn@ zNzW}?{q>2lUv*AMHdzREa)+aX@32_6WGO$Y&urfl^h`@pV8;Lsx zraz8PB%zU-aGsXMo;(Y@-1dT*qPNCINOrJ3AYs|HEmfkY^!D+^yiHurDcMqrtN&Dv>!;?-gQuaWkCb;)y<0lmTS2 z+3&b}m{qlHpXc6&`>RXbM14g7_I&!@T%y2NM{9fHIh3QILHwqs8#=rH6wE{dNf&c@ zO4*yadv7Uv<4no&8k*=dO;H4>gs)f9d=7?Hb^Bgw*QzKC!Ne!(63u-S3N%syp|5}h zHM&lA5JGqq$C$_q-CVq%g+Sg46j(s+FX#x2HGKkMUEy`B2E3=ih_UVEC- zT|<*@A`%)2L2W8Ld)LzpA`<4`?!6Awn>tuo)=TKe8lYy zQ|&ZabB!L=HD?fs3d#FMTuw(OJu_|66j=y(!$x|i^q7!NN$A))mU!8LzkeGErFnhn zgshXeFr9LN8|k26j0x1p%bE>EpkWP|Xq18AxWo0zp=Zz7*&PbcPM z^r;PV+Phte--?<3k9AA*1mA61JddtIh6~F<=8ZgB-p)7mh=00pZIZe?Cw^WJcUZmk z{LrUkJlleuc?)78Y&R+=WRue;A_w=}9ynO-g-RppQ;`L}uYR_-{=Cnoj7jLu47H%Z zRsItFoxf^H=(u}lyLEzf&*q3I8v;0Me%gKfN@J#O9kwPdMGBh-JO|vJ9v-?59iNh8 zKj;@HrG<9>tx6}&D06+nss-CDl|g3(j_=*S|A;dI^44J>gn&!{)SCBW_fF<#KB&WB zMex_hy4@#6`Z}L!CH3Bg>`p`(XTQNOGOR8*O>!DT*Vv6Hg@2~+`wk7`B4d={qKQ?{ zlxe=lp)K#uT2R}UDQo_jLEJuY`+mVV*=XZOoz)zQw!@I+N{fyqi0q|6-8#yXQEio~ps16SqqYp@$+n*lEWpaA)*dgo6sPV?hYJ8;{_n2(@)3j@B zwf0YUJxwJLK(!PB?JwN_GFM|4S85KOP0J)e8Q0odiJF&LGxBkM_g4cE7Cf3p&AZci0!0EwqAI=uwfw0=oY2)XjtDUDWIpj5KGPs5Hdx1VI z^1`bi*sM*#dPTzB8scK}Ra9ETvd6j(r*x`0^~>+SHZz@g)0~b3L^#Xo2^UXU|CX6! zr$u2!a5#Y=@`94&$BU+{S%A@~Z*z5O#W5VBT7pVRe5M072Fd zX?tK#0ET8`iDPl@s&uf*$y%;m=BYv%3OtU>)2%M121C{IRT2}#QB|G5sglqV^ z?>>yagq4{t7W7#ztyQj|OeLX-=7{hX(M@uNNQ$`l5=3%9J6K$gEzoYV-6asy^92z9 zRDbW7=FEi`Hyh2AKWveKKtjKK2|wuAh0{_zHY>+`oZ+1+$H!L=J+SH%-v}JG>1CR` zsF)^M+1ym)BzBxdL+@-s%yPr-aoINXysx{pJsT?vCNm9M2CAgb+Ohcs?$Pfb;C#6s zPqiD}sbOyWKlWY=c-2|WPpj&d(7@3?)jp~)oJ)WO=Lv>+QU_4-B-u>c1zlOA{%>Rw zB=_dtAgmx<-*JB^OR+>0xDjT?V;V#Vq<#?0g98T~D#^r4>W)em77@GWuI&V}8C`($3*!$aS2 z7?Z~+?&?&HRCQrom<)*Yx_4qIz{3|Ys{@QLGP@e|2iAcVVJ3X!Tue+dcJ(|}o zie?XIT(@KSOKv;TeaXx{KTmpd1*-EnYLikg2-}^{esEHGvE_de`vI!iJ8(Mrn_O+AK+0k|!tE_>{H;3w488Eg< zEmfk=zZ~N*mhZaU?OBH*^&w^ zBbCNlI=<)5#QH&6xCggn*iZQB3T=b?P}N7on8%_)pR8BLbbI=pQgz78zWHZS_&B38 z0SVo!_W`x*+qIOv=C3+zO{H>vydqs4NaJsn(yyG~-jSKA1+i;NTM^yfH1QaUI20bQ z(pBX!sL0w+!!pSN(@)|UG*uX6&E-8r35?irEtu{d;Us38TkFI5k^Ess8|n1@8R8D; zfedDt`}WjW+72)L{2>@rbD2FL(GvD9nnC(;PXdqNnc{JPJfUmT%l5ws@vR%=81ZP8 zpq3xBpw0dM(o zP#!NejOx(C)|+4s*R5ngv{_Iy3s9~9%Q_ILDU()%3I$P8^>>p`Il5Cl@E1nTCNSqu zc}uOX*&H0p(b`c=Y;Y-_G=B`pbAB+mo=n<(rlk=<=?jo@PXPw22++!z)J47g|3npO36NDlV zughp4o$HsB&X%2DwZ_XXO#vFn^2p~fo@#mYL);2tPA!#V%)I2ae_8wiveu465(*N{ zM1-=9Tw9@zyjznl?q+1K3$bs}pudK03{s;ao(S859j>v4gpmk$jGF6_qQS`i9J^}D zgVWBT6^kg^af*v4AOC9^q8gM;*_?}SwNKem(lGBtj5h%iqr zDMnv&mAwqKW;(x80RjHM8UpCax$kNZ3Eu;pgK?nYW#oQSbTm|pdj?}-@Whrh9dP3z z>D>M%Ru&yPnA~5y_~I{p>W~Q49mY0JK^3D1?YlR}%i{V{pTeI|aTv*`2s>EsIQ)1A z$ohMCP@8*rXrN)ni_ZCvY|-j@H>*twXhY1Tb2AC<$zE=`xutgV>LqN9mC%}Riq2^< zE2hl1Yotpm17z5z)l!O)!#RLaiUTMN>S#ofv0?@o>$5mV^M*l^t;XI}epOr~0}2RW zSS+T;)dt7OC|%09&M}r*@fOU!SJZj>3#dSipZ-k_Ui0D`{LM58&y7b7ZmS}z-{xI5 z=_%1u419b-e4E7*uyQVh0(Cj1+pjRFpPyeT94Ky zTTReeh(eyPE+*ocZ0h!j^Ji}cDB}v$Q&UDfO8Fp~-*Q{nN`C}8*l0FIuNmwBg8RQp z$lIZZ2O|Rs$Bs_s>L=PQbPhy{2>%O@WGPm3A`a5tbhGhst(l*0 zIeuu^`JszxeYN!kD3&#HnM#ITGwsU;dUMp2XU-w(%-P@Lf~! zGWC*GQ9E0oXfI*evd-CT^)>p}HJYhM+shY#TnjaX9@(r9)mC!{lU(ju0B+_G06^oP zswE5d6qHFSl*ri9?sq8)SWH1Zk(*Sf8U4I)#~T52wWLr|AoI5 zvLD1uz%^b|5M+oBpHqpRjcN5xt~d+RnCF=^s%sp5|uR)N?R zEYOS(G88mr2;3BL%2P!!+@Tea?x$|EX-)fp&@B8e!#b%b5mN z`uD8U2bz|l7b*!g*RR#8r-^BmU+SOg!^^d*!*oh*4fkp8-hF74jh%134P^XYnG?m{ z>PK02K)n#@NSF4jGik9by1g^ui{7%+^E7YqZ`dNLDb;Z$c?p_pG1a@Qk(lNzQ$_GR zS29KdVhhVFy`V|>lU4%lVjYe}2U1^PBJ1b*#Ap`O;@sS*UFT@%##OekU2KQ{8ri)9 zG}FXXQ7%sT!|*8QnWvo0y#LZ9xoD)uyH6S?evg+KSJ{oQT}^V8QQJA!k+)G9(tehw z0;-{lC&R!5f2U71q_|P;H=yxYBY?>WG}EX%e!kVzU9Y{YaxmopNj|%EfNMp+&} zp7Th7Z96>6Y;s?ZUE9>+M9l$4mT3zQsX~lQ&J!%4M~QnKmAA(p{N0jtrgUKXJc?aSjJ?H_G1c1!Nz!@ZdUJ*mQO*jE{bd$@w0AsqZ$7RlTaTdnY zSv506mD`~bSCCEo&CI7LHcdQWYVE+N4`I~}&5p!RY0}cf^D^G4Td0pFlfBR8X5A+c z?wK|Rh~}2(Vm)NE<9am){Dut@?)bZd4?>n;3P)Zw-}IUr)zTR-7sdL~JXO}UZFy!P zqDwtQ*@$GXG(vh{@ zyIrJ(@?ic3Le>nQWrq+hzwW$Nq_f`kmUA$iDrYU#u0hz(5Z+c{g*#IRTM-h|OJG&KuVkS&SZ!aw-`&F6jewlw>z@ z=3EAG7q;i^Q5#wNta9dmi~8$)4ALrWP-m$GLNa-@+dHVHz|c)J|K!7B8ThLo9aXMv zBo;SOlyqLySzdI^76q0I@sI7RHMnqnJ8lku&-cU6vB+KLZ1?SP-71>_=c5b9UpHWK z4xiJ+^HdWD$%-PWcoRY{@tLl$ap~Y_!Rs{+b4~sZ7ZP@uF!i0udY9aDOb_I_msjg( zorQj>j+Xss#k84xE7(Z)ygwcGTrA-Gcib!I)nWrcNp%gBfa_BAym2v<+{Q}TPeZ4r zNvh`niD7vyCU*s4G&DCb;41l;v>Rd$HM>d0!PS>4D$0Ot_JiI@*`2H|+&={Rt|g{s z{kZ~|RKge;%c^P#@SQOknrhCs0hNiM7Jj)SS@>~zCj+(GBh3rQ!2Dz#p8H`i3#qf| z>ft=)wHa~un2s)T_C9`ks)&gxV7OA($dKY7yhX>ib9?Wk(yc;_t!#!KW_;}Yd>QNn zI5ZOZb34RzKSP`A4uw(6iROT`cmyTVdTDwdBVeUcXu3IbTO(73n~;n_DDXQV`n|SF zh?w9ptE88k5&8t)n&598_B|$YVEk7zCs-ptxS9uMD+L;zeou)gZSNcwTsU$T0z>KH zXMEO=6kmZWNstzfU0yq;7uYqZ3Y?)?pUsc^D@D7s^DVm$bXUABIzGjAk$1q-oD+AX ztvV%lQwD^=)iGNWb$usByFnE`ep!HdarVUbAn${K=O|S>+_+`zxeOL9xHuR6^l6@U z=ql@)y=~}O#N(w+PfTRGu538=`^4_{ED$mRFrOBk+Tvc4BpVrP+N|B_vhXHBG&n|_ z^!Xg9C4(!s<^Jn)B)=TvI6b}$!+t}JBLyaszGI;3`WkZp&BXuh&$zPnFvZ-k>`NFs zPc0~U*66?PnpeVy)XPTjx87`XaH*LE!JYK1j@`Y@1ciP5D`#dM$4z{X*BzGbZ;Y37 zc~|1)YwTIn^oFE3-)g>Yz5dKW2xp?C=bR+LNft#{C(sJhBfgX$4>~&?oLxH@gSpkqrxoX_)ofx z)FN<~AU;ZrbXzPe8Z=rm$ew@`Fk5(_0u0mYoTODeW;J3zM{+p1=BP2B4FilokwW%M zj|#Vak2W}x1#Ltapn5Wz$pWET!lwF;_GbV@w^}r(?Q=3O!xlH4yqSPf-E?p@O7>lk z&2JysA;@;>I5xT>Zuay_T$AIzc&x+6a5f*# zXnEOkF$^^~S5D+f-)|ke(m|8)@biFmrss^vP~1D)vLcyYB!inzaU4oa8meY=0#BEz zPW>#_P~+?~(yNtsvJm2Somk-ap<>Le(5EtO^43b=F{Rm+QCla20_zh{H-tk+q$%y=C#4Gn*gGqIqOC!G5Nz$a*Rd^WJ zerA?m9`3PeJ=w^L1|2PP>hOi2#xLxG%+BO>DraY2e)B3DNNbBF{;1KD#1VHT#NS^4 zoaiaRCl&3ZZ!C?R%#5RVOs|;w2VGB|&Bpj1(%f%Iq3nA~%-kp`3Ic%)YkkXHS^#S& zZybB*>^{=-6JS+R>d-LFq053`jYz4n*zoo*+fy~MfW@_5QX!WS7o+4dGgj}3DPQB_bZx}JSdZo}5sUTZcr}6Nmx8fU{!Fgi zkG7bw<4&rU=M#fBk>qR_ENvM9;~$PEVgwHsqXm~j4}|ky1>d}@hwn=|wJGCqCny>c zAU7(p_P2QO{_V}y^xUw?S_vOO+KLMFEy*TYWS6h661FK%8*Xs3adRYv1YXV@Yn=H4 zL9i6B&xQNcbh}8ml$e6YQcoSx0eEbb2wAyVv&2ub_NiUKIs|NSEIZ%b z2`i#!pKerl@WHGXMunF}64{TIKA_;#6Ji+Im^W8<$G)WlV6RW)igx|m*KYboxU=b= zM_Yysf!mFfV@J-o<8S?8N83>^ssg30(YAnfF=MpEToYK>v2cXi4n6b%SpB6?Dl@*8 zaef;$=sC`v%oL{}fbjlnn0NCvdfhmH1gRG5Bp?-Rh0zc;4WXS|uaI;Dr@+>SC90rL zBiyj%sZWt^Ny4eOnobtc|vWw6uYRZ%Q@vePS5ODIe zd+d6b2qBLjTxBZDL-H&HmT3Pkfw1G7?hx^%7gfyf-$?EGQ|*<7P*CAOEZ{@7&UjXq zvZFyvGR;w_nIX_P1@zTT0oKT_vebn5^Ez&eg5D}8o2=78%@VzCWHVQ!>`&rvvc^sI zr3Tc#>@mvZ9X<8@{gL0@(`ExM@-x9}HlGTjHhOkUvm;Z{i5dU__NsqAKJjtaSXN1u z1;PwVH|V zyJ0SeA}QaQa-LC%{MgXQ43#YZ`~Xu?R~fchYY!9A)XhP_mu)(ehSH0|kbvE)NdOeg z%9#U(n>L9Fi7wc;0_I|s#PS3!WJ@FbVhIA@s8tl}~e0v>0bn|AjYW<>E zOG}F@VC02`1C~7IMRpAV28YQ7b#brpmhT0%s0kPIZq#zOqPQHyOWeHfO?-^sja~1N z0-z(!*;;OSwnE?$l=h>5HFGMF?wY(dQzNdgtgI}x?CUD2zyqb(of*}gX`;_dBqm?`HNijMEhr=GH0wGQl!dzzTg!tZ4%!abV2N9c<%3Tw)&>Wq;ZyBX|vub>QWC zI@`+qaWYQJOanah%~!^gCh*xNpXyR$7*B89%5uE6ggc>ZyOB1aYOjo}0o4d>@Q2_a~2L5uL` zDn`XJNc*XTkx+l?z@99UonAoa(pT;RS*bB2ad3_rphemaUPKPaCWezed*~rGC9a+_ z`g}RiVQhD~Cy`dSxptCZ(JABzV43*w(1z7U&WVR-yYL=H_~o7TfDSYPK(u?z83i{1 z3dxEPx*jmfJBXIKua79twInd2YXFT`GvWMqlfjQ86FYbk$)IH8I+={ZN6Hv|^8j4| zt8Pn~3&7(9*iZ@Iq)cw_pEb>NG_91;+SSkKx{*|BrU-RylZ6A`IFbGm9=-~|gm4AT zpM~_jR+sSD(9te-w*x?{DDid)v9h~MDEq9Uds8RSCl|z{9fJZ-2pLt8NA$hVOdFR7 zsYhkewZ}t+naU;5srv()+XpICyl=R6KB&2o48kY#vZ>eV=sWnE82H@6{Lkm#=Bj$* zdUX80NXgV!h3uG`XzcfVh|4nz=!#@L2K-IEkVEOyXgE}TF zB64U4pEY@VDI9^TAXIS{h0j$U_)CU%S{EQ<}_Fp z&%MDc@S^BOYRAF{oMvnp%yDuRSUu?h^ikzPAA2qNfIz_8**@1yZ`|U4%Idb#TjhZ0 zzd>BeB$4fld?(l-Q|94^cvNioI+Vm2h ze2+C8kc}QC_jt`r&ESYZQp(Gs6Wah0Uk zPVx*7q7r~L;gN+avHWz{3RdYK5bz5Q2BRul!^wJn;uA+f4n;g0y%p#W4|h(?vI88c zzO5mQeqhfVPzxW9@6ZqM%kA3VQF$u68bdUAyfvbaA-ivx0a1MLU^veuU=p^9Ojf@ zYPXSR(L$yA$n`_MlXS0qhvSU)#Z>IHi|loisrd%BMoJW^46A)>_iM2aoqLYtP<|=fc-N2(YcsYfP)F`W zUh}W0)q$>kz_fA1t(qSS?E66NXMgP+#c0S$U44_re-gzX0*fk=0N9gZwB=B9b z6=nfB(WH1eex}&W1$GijBZUj^%v}4X4-dXt_2U1e_c>F_E@x|^rS$}AO>nT(#|fxY zapg04*?3@Cp$6$O|5M@9Q;R7@s@uJ{l2|Vp#9pBhF2F#>+U$EA517cP zADqoz&MZ$$bOQMob>1I*?acD$32&5B?OK*~dUqm(=~)yPzt_Q3aQj6P{?l1quM=fo3B@|!O{NlWwjuTJ}pyv$~ zCI!8VFfPaUDYy3nK>zMv+r$|~d5nzZX?IywyF0ZkJ{1{U+s7DO5$Bd&!vesD%cAti z6k6QrbF|&1ba2A-dQ*d&)nUru)BbsU)MCgJ%T-{*v+4~|4P4J*Ac-UW=B}68`THvw?x@TDkTSoh-Oz4u zwQ_60`~V@!P4C{nZ|gszvc7DklLmQ_{cit9-&9QwW^1x~8CW!U&EU{L)vdfemGC|L z2&(r2aT{G@D}@S-$MtGKFhXSoL2p?=RV0I z9w|MGz_u!cG3o4R(}7^pWBFT(T_<8n2t5m!b-NOIGxxs)-pAT6$QP;{>Y5e5p<07F z%et+oiMvfz(deaa=iqNz03=B~Kw16j(apSrzjk=v5yE^ir5k#x2db?qvBnO_UUmZ7 z^zu$REN84A(A*sS>!D77sS-JhD|cKSaOP=v5y#qV?*tO^eRk!-38Wv-`N(Ik!+-K? z^Z#S-t>dCj`}J`t6_gS|2?UNA?l9;^I;DH0b3g^@u8|y(?ig}l znD0Hi>+X5Z?>W!9=fB_Ub^ck=h2itLzwJ$%Dy{GBZSm1%D}rfZI|bsYz0jR%SpDqwhcx@PGw5A&L}yvH z#MQNY)$FLnpt$#g#+@|J?TI}ns!9zXcFYCNuG6cQO9)vDZF-Q?dFV`A9OdY>PjjcF zdGJ}sZg&w_gPRB zKFi?S>C<@~NxT-_!uH2t6lkzg)xFkjK4?2$JwI({o!_-lcWHITTl!o_2Em17ebTCX z&U#N@Vm{;9f6#2(FP;xac-fEN$~am+I{B73elx1l7&8rPg{KnrYvetZTi`tGb%;mxz<0iyruiLBf1&tKmIm0N8&o(w zZ?le}l3+S`@Fb-0MHr9cN$SM`)844D6Zblng=0WP!~5(z8R+oDh$IUNP!-7TyE*h-U*61?x`XaTMWX08V>q6b52dV z>Sv|}%@{^^DRf-)!=ul5(&-nK)>;-zAkE(Xro=_E8O8JwSvzh5wFNuEIaCO0t#^-$;e|)r*<5ZBO1!cbSyK zbD)9rrXi+TGgvXGRcP5Z_i;>e*o4`LZSfuJ;@h^K`VyL)`W4^ix%hR#aIAB^T7d>s zKL(^4)H98_scxNI=RLoF!Z|vv-|Q*Pph)LdsR-b=9PKvO2e6liQxfSfh3Q5FG$(v!;knXgnV!Jp$QD)rFf)x_id? z+LZ-U`z5%Gs z*Ygye@`au(z^%pAr+}pD$$mNWWx*bd8pVOAjk;QUh=AEmhQRVrzi$Z80?c(9EBI(Z z4<4o59pkJ6L37qNBt2%?ktckQ>*6?Kn^0#=f<}I3{)jwO2pNwzHY0D zd@>=81$P(Byhe;zdI=P;Xifkghme&`WqOI&D{N0Aa0xh?E)VP@SJJYa!%G&h*M0RG zs}mXtopK$rIIz$|j>M%nC@cF zrasx9^SD6C#-Ug57%lZK=vyc>0k>z2?!>vNJDx|cHlHi)iy=F-Ke?=gt4TyV|S7FZrAcdh^)%eDvNurm2Tzi6jS zuWDs-th~%-iY7t8-cRqAZ1py_h|a!?WH3AFK{VMih&bHlXM`Rf?w0p~0&JY|!A1)2 zr=~PL0=E=o`@n3-+UQ%*E?mB(5DCHFatkoZFKgJU6v3fY)){dRJwI7nPX8dN>kN#` zV-T+V!lFuTsace3;?9Xqv5vy3;QDo=JiRWFp(`^Fw;QB02tj3?{8VW88-$bqxfr;v z2d0!sCA4l6TzDFA=RU)Xq>Yoyq2!!)fAk%93740ucPq*2p^WX()UFJ-_v!!7EReJQK;a0cpN4&EnA1|1Sa?$s1SB$&0g&L7W1&F{E@VK@+o^VHjfQLZ%8NN);m4Hc+?98|iVYrvJElq8FCVXw|! z7eC8c=-ckWtxO#$@x@km)uWxVm)lY2`^qcdaJ{|?20hB=+~F6}i_qg0tBlRl?glDZ ziqsq5kc&N$;Sw%=8*%r*5N`1^sPBe3pNYRn>~x9E(sy5{CjOT!=_M4$R&!>q$fk+G zcLGn)&%L*Nyk@39XX=FO^P06ku4gjoOJrs6+)C|wKK0v8tg}ad?bk5yW>4UoK=?Ml zdf_H23C1|)2HyoQsA4al&t~I6qt^1Y69SBw3D_d$_ut9N{0%PfQiRWfrivf3S{0pC zYrSz(kc6|2$<08*p#DT&v0Ljxxw*iu`@tpvp82mAe5|jRxF(Is*<4@e1cNm1yW+TO z>+=tfj;NhU;9s6;b3A5$)b?~J!>rbBk%|G_7>~J^Asbz87qxd)!LWP`P<`5tVi5DJvNd>urp#WXp59XMi%$Jfw}sAN?HcIUAH`JF-@(& z;m0idkJ%&+zcBq98Cwk-E}vhA8_x$(FVKA<>a+A8!CO7nCjlU++437I$dcQXVxyt%P@HjuS#3Mb1k0d(xpqMY#iv1dfD=ntWPrDs-}zI zzAam2i{R3!wsMRL0u!L;{shHoNZ|J+iE?-I{Q&+`4 zhclEn3A~?ugfQFnn|W^iM~iCA5lawvk&eH=R}uE+&1TDh8s(|Y6bQj?>(x1x7JhF8 z;5k5Y5>Sih=vj#>Y$biHU89-~J9&14Lq8ldF^^#t7Xh>NR-LBC$oq%V27LKX~ByQRp039xLPYW*lseTd#n`HVMz z{Pn`@`~l0YcmFwa_F=)lPL#AdRaWe5dC+n?d99h}zCqE;rKv(@8fMVhBBzQBF}s*5 zbH?{A6WPcaGz;@X8fUx1E%bBnAEs~yD_w%6*%t%RZQEa{AF~fwo}Vrq^P4ZGoY#PA znL3mck4)HH8PzOx72~Po#@*$)L2H1w`!%)rcJs8(Gdf4bcS}=*eLs7&j}U4PT-U!} z4T86QS<%VV_NwP$Rvm!&alI41rY@a6JML_9BjvD%& zecB%pVe6aWJG)EGdv#>bcCzOAQ$~0?TTeMI1Tq~p*rfni;HXU-X_gKY7EN{Q>s}mS z0JFNw(~;5E?FTeWAN)xM9|V!Pog>G;PuB8qGvmDM6OQv4c@7a_#QAJNxGGk+RZ1Kp z^xrZv%93*4czfgRrj{t;Vzg0|VpPH~#*2RAYl6MgQA+fk2xqAM{LVbSP?)d=ll2|| zn?sjKjM0gW9?Ma0{ia{Jyha^X(}`p@k}jFhPjw{3LupMt7zQAq0AVO-j6Zd;Ow_r= z)hgDi5u#G0$%B;T@}z?&3}IT`T9Vhb_HlP(5A5eVtfh9{FiXsQR7&VL>P)%NmR#3?)ix%(dsWF7=jf+&S3Ngjs}Og?v3iX zkW4vi1T?zCd&}%) z)~?6y-_8q#a?2_I9dG)kI zy9S?!JO%WR5H4r|`-K)c)ozUGFdK4v8?f)R^hC!?4}oX}-k)P_yF9#nZPhNBI*YW? zQTa86OF=Mf%fn-2F`6RCncple-m3Jfz$;7jB}ND+DSy8BJz? zlie};_z=zQbPUK`SkR2fm(f4^Nedsx&cH&RT#@&n;^}|isKd>0!-H1 zckgQP*#tnjGC(uf%cONQ+frkByx4y#nP2e9Of>Wxuto27M&m2>Uhmq1OqBF2{oWzbit+Uv6WvHgvjJ z=N`5S$X1x^>z)cp75+I35Xky>d!~+$Ekp{r@4MtuT%zNswjs!kVVy|vhqz9+!G)Ci zB0HFTeC!FLDAJub6f+KDX>d^Ab*gU&3%A5{$(MU-l&E~weg#>$LD-)=*45ggd-IO> z3}kz!le*BIc*qjD(|KvJAFvrO$iCDM*D0IB(<-;nuQ>ftCr-6KeoimPw3n~`ku=;Nl=gw>2-=QS9z+1PhY`1qU2fTO6UFF+p$~jU{?^vZTBkkj#P?m%Lg6lMZaE#bJgoR+FH?Nd45s-HDxnB=JUnQf&482J zBT{8TxhD~dkgGT-MK8Q>T!^uIZp6LZI)dWrBd*u@SZ9t7dtX(kz+jId)ju78utvH8 z##g0U-*d>^Y{_~VUL9_K?xmD%Epj0;4n-UR7N;E`#pbqVq{}=fOhi6%BiHjBaar{K z?r^c(f0oSBUSqsyufVm;(|^nXx;3;r|$mz znn#jWp%P7p*J4XFOZ$Qaki}`1(j9+0<5V6<+uB_zdn| zc{7qndOMJCoP{=o~7Rd1G>6eN(c7OvnoG)0Mk}*c(}6s z#jjVv<2@u8RG~x-=Rv&LZ_7$a+2jrsHdm+snF>#fyxeG>iHdd3N{i};crHZ5={>@i5HwVd7hpWjs})PJ>9FjK7P2C0b?*=oYsGEgcwaLn~)e;9O?*d zyQqvwPoCise?qZu9XsAVwwZH96R znb%0SFxXZ@s;Kdb(5v3n$`98-Q7rGsG85Gx9T)NDA1rSmEbPd4nu4=N5xPl~YmH$&{;xy#kFN zVSMnIQz(S}ueJNTuYBju8v%+75xWJ4ybHxO9$>q_G&9rPh18yNb#s&5rxQ_u4LaDt zKOX^4A!$eO+GZM1??^{a5HFfaf=8m>%D7vWb@+POt?_JcrHFRSG;hvLekf+$pfZeE zmB;Wa9m|I;7|i~MHpPsqKNsG)f54f39lva`oeKhikff3SW~OVaPgwib zEKk=Y36`&Yf+TIW-4_BGyO9(gUQ=H$YA4)={dQ_?Mt=UMAm&Q=0B`>h#R6Idu3pRg zYlpattfjY!KXYpf3#fn8WAAIbk_xZ=dapqf1M2}q|1f@eq zkMWm=jtCrDcdrqausA2zu=QFt0XVpO_=g_`m5&;lTP{wsJS~Qlqw6CWfMEQ53sVGVQuSPW0tEwh4Sg?PK40gm3aG1$YF&uLhb)2Z;Ap0(J1 zH*ei1r}%POf6qSiGS(*7~aYmH?b?pBBD;7fFawr%K41i0tR|$9cjX5zL zH!?Ev61dh(Ow~B|80CHw)M=FqB$rh*n;x{)RhyeFgebc}cr@M_{`#AK8o>qh(BuXN z3>^!z%qaKYwEzYQu;#T|S_*`4_jeEY@uS$l1s&tk3ARa{`(?O6DS zpC+>ir}31STR>j}S8CvU@s5`YTla)c6*H5|HA}F@87Uj>uGBsWH~+)&{cp!y@+{fb z&dz0Mk=EC)uHZYKXKPY907tYgb2<>x_;`n~-d+ZHLp{FpFcWQHu@m+OMNMa>+(=?J z#Lu!6@)3W+8lT<*B#&tIHT!`t%VjQ~hmAt}I{wLpvN#WnU2tx$KjC2gDz~#U*lCJl zP3Uj=h2%r3ev2IxJ1?)w#x#uGnfdztH%JhnE;N~+uaWmT11mJ*lWXcA>}L@aPLq?> z`c=B$e_4haS@1!@&-fIoqjSC3!Ila)=qi}$D}QTpPHJ9!#rW%n{l6@Hgf3XG4$AJi zMNt*s)P9d!O>&x$|HUVn-v<|qrj~5km4+tc-~ei=_XUI@07Fo`(3H=cmI~#t$Fjoz z{Z0CpZ(%e9njn3e@aU2M^2onz1HtOGV3k*N@c!{H{_Rm2RRL*z-{rW32mhH@@aRDMW=lqP;Cmx&CRW;HRkEM zE}0ft!exmSditeP44iWO8JOQ<^OrA*^GA8|8T2_(c{lN6N@Ke%{`!C@_VhG{%v9aW z&r@}?VwPYH4c&_}Ak-L{_xQD6gMFNrF+PO-h*}C0wm#FIm7Ci4-v1Yt3ZG0|l!%Ii z^2(wtT1?Jo`iV%G^sm0d8Q=F*?VxH^>0S*eA*{yA5)1y&3;X@yF0rSNEODl>?qMqD zJEGZ3r(TNv1LOEz#g{_^4>}2Qfy|u-g@oll1vuIdakT>>yu5>AW3se3s1WZSEv8@o z;AJI7h3_$42Mw_DoN8CTzeJXN`EQefs>=228;J?z>B?`e1hD>c4I-k0pbRL=?4{=B z%G@%2_Fr<$Ub6lbr1Qqxgp%|9qSEgnf9>txef22;R|`~8Y8Dq_zb1GNUitG4C2vs% zfI|uLp`I1Oe$&AuALGAc{QuKXMEDVu;Unr?xrzJ2^Z$CiEIwbIYN_rJP^d1y(B|;V zc~m<-Ho*G)z$Eae`H_^zDoV*&w@Ur`DQdxr!-i0`{S_eI0Ge2kd)G<+$H?yg^FMr_ z0;!xd`K^n;*U7;P8$HFAo%TA*xPJMn3^sw7ZP%Mv3+LxzN8|vRw%q)#&qjZ@U4Omq zr-wksvl{Q`NBH=tf8b6oy4DNLhi(dHkPhc#@?n9x5pX;By zy!;eo7k6PV6={FbMu9*Qi2gz-bd6$xzRvHDl9dIP=>4z>$1g7`nQRF7kruO89DlBe z^FPORLv-ju5w}a$4Bz> zZT|K_&)-l7m*d4(;Xi|(02#1hpg;>tS_qk(n)=$@%=}>Cfe++CJT?BWAO0w;Q6V%- ze*N>EzERWwNfXcz&eo#i=H~7Iq&{7XGCtGuEf@d(Awjnx10T%)QXw+2mX4JvVzg-y zT=T$sAD%ysFUU!=MplqiC+`cSltE%Vxxq2mfh{X$B)!b=GGAQq{@LD+Si#l0e~`ZN z@F{wJVIg*f@5TSLT9Q(@An1t&!!z=LEb1-(IR(&{yMO;}LYmAV__e<8$@a9h0nw=>=PzEo zEFsaf%Wu38BpHQY1CMTLl>@ytfQ8?fl=~1F1yKadYtde1Z%i+xJCg-9^TK>iXWyS) z+`B^j$1V(p`}i&GX~>IBv7+3~lh)5P9A<+>d>Rixily}B6_mQ3vfMk;RlF*9@OTS&(BR{7sHt_ItvO$YR(2P+6 zU2#hZVq)A8W!vpI&{+*8`IDvLw>jw?0LjD{?3kM`CWw+fpsC4QJ3iKt#QUUFAE=3# zoz_1HUZ8fPc;HqG9~vh9Vg&jP4+!|r(9+YVW*FWwz5{ChaT+~0B-7rrH;W`FNlHwR zCHGlrr0o3`E*SCS5Dy@0W<#9Rv`GK$W-Y6N+AS7#V~$)m-e1?rR?RI5hvJ#6NH)FXpV<;OF`nn=$b z?Y(`A@AL1k4ze0{CyL8VdmuT7;Du0<7(ht9{@yEPqyw-eGGUI9%0p8qiBSO7s7xMl zOk8DwO|F`ggb_0NNCpuu8Au9{C5n8h8iK}@8VxtSo1jrjsMTd^i; zjR+j>t^{x8F2HcobL{1Hlr1}u5e2Zlm#=gU1IKOw&sWs|Skv2p&O~7?pGy=1isx+N zx>`VJh6B*E$4Q?f4FEC)U9AchZTL&kY*#R@)PM=qe$l3T;3-&8@>rc?(r}@MMj0Ueuk(!> z-~-LX?_cdL9uoEg?5fr@EgB4Lwv*d-ZOuGU!x&cWzJ%5<$PcmOp3gex3o7lo8t+d5 z{r>#P(FK)^`>)JLixO0?R`BM4sTfsMP$32GF4ufE(t%CV1(KpKob!9i>eGz-cIjHq z4=@?@@7(yyARqNq3CEo*!oDjUnwo5C`-|oYk0P@vz#tJJPR|p%yC0Rg^u=tYc5f=g z$PdVac`r$5%n!W*FyClWr&l)D#jo~mn$ptIDGfxPoHQc_CKvkaA4ck9y~*Nv&^&Jw zxfa-tCAgVucQyjRoSeQKC59NF4kcQQDbgyxTg>f_)l6dLPRUjE1@|o1woESf@*lyb z??v|XBsFm{G2S(l=ARDrU#)xAHQ)aJ$0FopjK02cN-n1}Y-&TjbMb^=mm|*mzBxWf zB!i_Hc(JNh6#-dSmLF?b42ev5yCvsq04J9kKn7c6=?yfk-)!(g*U<^Tlq9(Zxh_&> z8i&<6nY!!PGBh-_5py~lzPI2gRLub{X0%zG=5*IWw9?8*=(r{sH8m?BHHiaJj=~Yq ze6~p_4GH2UP_u~Dt9R}I6pdL@NB&vbl@?L#8eBu0KpXJpu2bHTXY{S`08nvoqc{Wz2Qu=}bF!9O`Ztmds4&NC9q_wO#I4mu2xQ4}W9+z=ftN=1i zz&)(t$mFTOu7<%iHiia?yS>x=!uj`P1nNMy_-UTzJvOx>v%ObFI4q|}ZMw&RFjZk$ zB^ziuSg52+`U)#M@=JOH(kBtS-8_6c(uj!CVDwyH@I#xYDz1_ROxwksP z*9K&@^=xf>|8a*#v zby2(ZD3JqmjKFkXbxUhR@4rMIgZV1CcSMv$_92`4;k6M{^%&lRfq8a~EC4%O(Eheh ziF85N9j`*g7v_P|sREXed9xBw$5~Iz&2^{(o;Ia?0Bf&spNL$w5LrlzX2BxV79<$l z@{%ueY7gTtJEY!400Bn11m5iI=U{ZGX&Q5|n0ASt%BB~pJ%T7{GUnL3NE&_zqgAXu za(OBt<|;+l15lC?W|o8^C>qgD`HG4Ba#B)KSf^`;Tu=h3;PS)goCzwD60|F(FMsRz zVUt7;9Q45rXXk^ErpM}kCOQ8aTII80g$IugcIB|9W4J&strMB=CU6zhBchnoUGq~! zt7Q9ol2jV*v$BGwqFtT+p)LpC}trB?4Y!UQFOPTq+^O^Y>&z~zGcXpz@xM8jTTK~ZxMjV^at7#_mhUgh`kVr3y0~h;CQhKM5-JdGn(C!}>k%C{*d)8H zAQ7i;gPVW|zIdLY8vRtki@0B4(n!wMKSw0~IxV^i8C+;5PXqS(WG@M9(LjNE))q;gV zDZxre*Sgl^ru=2B*_Z80g&B|S%#U{Fi#l6H1Mi}JJk)=(RV~vFqJvA8sWcUv69)vY zrgH%R5j8G>4k^+*)=9on2;`A>#&Xkcpdf>;Kz8PadJ}Liv8>T7Ns_MJIljm?ok~IO zgOw5093`|YILYjV@FMbw$3-?*6L<_0#g4oA!lsM_WkRV%AGL;EGu2NJF|4bwLBDm_ zm=qX=H`rXIMo-rRI+R>9LjXio^3tpwY}lW!&z#$X`;Vo(@k29|=zeX;J^0??jeDsw zl6m05EIvNj9o$|(1(Q<@sOJO2!^0a+QLAD)N#fI_M~l=n?B;uCgPt!@fG`e>e}akKj*^s#?r1M| zwE_+>e(!Yj4S(s{4JSt*eRc)swQnNYAI07G*K$xr)|H*+Ly)LO?gldJ(J(+yQ{cP_ z8R)BAmu;@0=?opK7;cn6&^5<*|U7=@#J5NII)s?WM9i!27Bata3ZI1jdNX zRy4CGUPQr<)LXQ-r1L!(;I4Oy9N6@J78Y2D>nVj^s>ROHW30y+-INTg09c?aQz%piqYD+_dBhR@0Z3br~=8IL*^ftu+egOG)Y08Z&l07c$#t(q6OTa&c*lA*U zSpcVO9H7gn@mT(2+Z!E|s>+@>pXE!MhPlf@Q^l1Yi_HPK!~G=N`8}a&{ut}Zg5zyN zZv~sADatVXB^V5*G{2=SU?~rn|CJEH7RCfpuymv)3F%P2P^W|o@qaA*tY=O0Vsk9B zzQSfm`($T9V5D`CTa$6I&*097H4v@X>%L&?x18U}=}GJYf+#)AC4e2`MzOXyR}6!f zrqkLtbE$52ufxx~Isi98aUZUCtHT`d)kXM)(wClXT!k01ToZMQ0Q9k)r+CNn{tW7! zgV_i<5(5K5A`(IAu?zj&2BlJU&YR{|KrQN7nE9zD9~Me%HCZ8dOZilGe-mNb3ui;u z4ScF1IOi{0$ddY-zu~rwhshjln9$#C+l=Fk2CSdc+IyaT2K95zT^zt2@}q8MOZzWo12Tq% zyFzA<5tIJd1aG`@wNrvcIT4xWQ5Nd3J_wjkUGMiA)G$gE9o}mVO?y_{ebQEkc)$X<6lH4_>(d1C)UQy_wHGzaA9;jC%r4U4ZLa2Kjb{<%GI?9M?_vgY`IP zgr~-?9QinZ05trbyka?VzvWH$ltT3~?*dQJ{1t-{57NW^iR=toi;88^NjRqT(Qe@R z@nM)l!nt|C2))omO8<=;_4ajqaYv?-@nWyu!hANu%aCnq2&8^{SQ`gWcX~iS*f0eU zF~SXmOv7L9S)}XMI|*o411&=Nebcy>URCLda+62Vh|!Xm^z?MITuXsr?xxd+L6q){ zrBgZM+u9;C4j~fG$c61_AQY1`o=kU5ctO>NPCR_}XIk!gVW}y%|KEnQAm}u`U^cq) z=@gdp$qa@H{n`eHk4g74V1q$6T&B;T0p0Yifd>0|Hnp38r-m!#9FE&LS3T-<_#LYb z6=TKt`rln^s%ClOUB^TYokm?YwSpUO&cUuzvQHyK7G!<&MXiQEegGtM9OFvaGX-af zX_G!yRVhPKnWyR;E5k5w1NW>avob{(*vlCYio;540CI~4-z6v8ZoS$CqCE}hlP>Ld z?xWq*gM}J>W@t;7dYN$jjOUKlz)Q#Z;-N%c=GPeZJ^aG}cb7zh(8yEiBdv0iP^Yb6 z?!9PeBWfI_-F*gTBe@0D?!j-aEIWZVozNBDZ9Ll|FkGC3&wmkSK6^G_y||#V@7^2# zc(%Fm9|bCi&H2+Zm+iQPtHiaI-=+qC5cOL=e#9ODIAD9;RtQ<^Vx4|aRb#ls(NjNS z_o;>)97|&D3BfMUZ0K^bM)Aq#r^TUTAcr>-B(j=lR07Of?1kzjcW9ZcbiiRSo-a^a z$OfR$4tBhA_uah*vpROBmkxR?nPnO{oBf-N559bLh8`blMEg*%bi2}gNfWm;sem{{ zODC~RXbBZ8nY@mXxId*j4m90R2;kZxNhJajqq702u6lub;d3d>ILS4!XI~D(1Oa7Q zD)Pj>uk+;S2^=xVZX(Vp0kFK6G@O>(piQ9Ryf`yF8(?1 z>)+$Y`0s%}75oAS_!#VC@t5?xr;^3LO!i5{d<~|0xazd*+?D+-M8ZEsyu{iYMS62H zjD`si=T(Y^*gVoHwF@^0e6XYow(b6)U+omXZhyR;E&U85bJbX!uUsMVzf(Lx0)_xq?cL)l&g?Vz_1hc-7e1B@IB} z%c;$;93fUxT5+jSy#;Q<&7V4nt)qx$nU}XIVgQGKOw;N3&9U{hlHoHRl`8_>CZqy{ z!l%`HNku@}6(Q=qQ8adyynIZ3ALs*p5SwcBe9aY$2hJRgjfBI?Cf5S#?i>V#gJGcC z$`I9~G*lZ({P}8?_0BiLCkAbm79;P8S1|!js5KFWE?V3BYFpmRU$4w(hG$6ZeVXa8 zV+4VKw$B-~XBoCF<~;|xmjhwT=(iPQ>E4@Dk!7dhOWu%gb%NHTiqoE(ZyX+^fGQAR z94}TrQ^ui^_5z4no?bR6_gmcSvzHl97Ac>jV8LHpG{kuffwiZYYN7BYTSc;9`W-83H@C2IUraZh<5S$q9K z5QGt=la9+#g#DuJtDS&s))Jua*tLswL80jEuD8~BwJmpJBZRQGKN1{^tK!H_)`6f* zo(bTXvqmVayStPU`6C(N_tl8tCba#jeD1NkO9N9I9>D){@2v<`G%lL?c>QRR<3*wK z&cM%Yh6^(7)g=1C1a|cbp2~^1JoA3d_3GNz;zG}0nK3)k2`B|A1>7y!pG01p%T4nV zv~2@Kr2@#kRd~_?e@oGG6Zdr&NuQ_1+EWc9Fn6INmq$;H2RL=A<&g`VJ)0M(IBsK- zg;SRVL@uR=kLCNd;o*iQ73|wXPFos^DFFj-a2d%+Aa_WWH;>1Xy`a@CvS{&5&EKB=2hKJT1OOv(H9;aiuue2==b)}ca7y6E+0NuU_ z(tY9#x|vjhW1`x+bfV4Qu(h-1)ohP@<}@{{ZR3$&GaSf9F(CZN*SBUmf%Zg)YL0C& zD4;BH_Uxp3=`o1g$%5?0#8Y{b6;@qgJ^4}JVl-qtS^75{MV8)VtLOvSW9v77(cJE< zu)V@wp;s~~J(&Jnnvj%~&%1TF5dLY;;PN9nCLX|NRTcpmXsrsR7Sg8?v|4MS78#9! zJeL=`cX!ZjZ;h+od4qq(S%K13rZ?C%R!4Rq zK+1I;(2t+kH=7_j;lbM-DS*E>Y}?SV4!7NieQXShKSon^h;A~3CkD2=My(d3*E~QI z^~g%$wj>lj3paoRrHrNo209FY0Xt^;t!r)rcxJt`)p3pEYODta%{$g$984$XBKs&+ znCaf<&!w?*L1)nEpQY!5xfc>jFO3KAY{lH;_4XQ$zHF6t9^^LOYo#V-JN}eT-2R!* zz1>+rFn12d`xvlj(k|Yto@)3&i|b(Mlf7($GsT~7{iB7@q&Za3Q(J2690Tu%j&ohH zF)(+G{2J%iZE(#mfHf z{eioSK6^0&D0MV!I zHT%&Lv`!d9({)a5u^Xh2c0CtpSGkB(hbgrrT(q4egr z^!4=q{Cg(M|xr#)OP4_^|J zkOWq~1q4rB%`t3&HpBf3L1$p=p8T7^wSym;k*`!N_FRguL#@Akgd6R;Zzdvg9ejY0 zFRAg?_j|8;kumq)JkYCDO5oK(!FDtDG$(SwrPWaOI$qa6PMfBAb4mL*ns}vsTR9Ab z9bmhFFF^mi$yN%OGTdEDZ}gZIS-CYc!c{r3+AOl9$!f^f2QvBGS%CmGhdptV=B
iKv{cBz4oiL?+}y zZ|L6cx%GbTw6tA@JC9Rc_g%}Wh4}=hUJh&@nf44K+>Wi6C$n`DpZ80;jFlPAE%Zzb z=JYg|J^PFfEo2q%Ah_Vv`SH1gY_(0L@yf6htGLJOSxVNElDF?<+PyKMu|-;{ESD#% zH&uY{Nty@BTn4bt))WS^)Lgy7ke-Bb-VkHg#`~aNeC|c4plPoOwZIAs5|yL$raOmF zdyt@_{WTSFzjv@*h1Toc*nZDbTmn77?Tb2~`dL=|*8<{s#_t?B=-T1f9t__Dvn0rV z&D4vAUYcMpV1s>!$@({Ky-!Yqk4{JPUQxt7@;w84)4)oeUaffZdkdjfH4w%7R;mi< z&aPul-COT=(KR-f87`8r>YtpFJ_8946p8b2hYymZme7 z)iCwN<8l*X)dbzUs{&>PtP4+Zob5d0Ce4P}scq)#pU=>(FE zw+mcNR03a4n@FF!az&}l%XkUzR56V1)9>wr^yWKxOScG#pI?)3dP+peb_0lB4_dVh zNQgN^%LQKgN?|XG9YwjJijD6i!BEMSbT|uPX!!Qq==>p5*}WBkXjlK-jpYP!;NRz? zCoD#TP!|&?o#xufm$%l2Ms57UuQCB;TqT?8e5XeBC6|0#hJ3x;uYFn@Hd5QdgD@O%%8ZSf;Ra??;AHcgL!oq042eaa<6;SY zDLX(_F7d_Tc3bn#D%88HiyIzCM5)5v4nl{SiR&=@rRL^M`I#@7Ej^sUx^=tHi{9zM zXs%PRl*GOGYS;0}mzY9dSf+)QtbC>6>7jV6&KugrzLZdVSp`Nd(g13pwY|ZrRlPxKzoB8k8JjzP zvqUQ0FHHb{yN z#Nd6W&C{+1%zRvM7Fty~0c$WJACxGnl!XuH#ER;)rPff0C&*qwQH*&gl6 zEQ^tqvyylj!B=iVB%$Hy+6TwlN0tXRwJ*0{;{!nFhu812jPg~02BLX+p*mje{&*lo z+kDbZg>D^7?@%;a0%+2xz*^B~(B*v)ZqgWT+iGYui*o0*c>VnD@{k?4>9s(+1;DyB zh^RP>dd)`)DnVP5xtBX5Nyw(XJ7GX5K)_-*fZuB_Qn>GExordpY^0iPTDE_yQgN07 zQ7F-1$g-*uLz18k89Lc;!o$NO>v?ADXdJ5Xn=;Ym02JL^Cl!6_3DK?^o7&6q%Zr?v zj2*;-%E@=s8+9u*E#Sq>qE*&$@*t*GXhip#+bb@IMbV}+?Dd{MdVXZLz1?vOdByW4 zC0k3QJ}5uDf5MCq99i}cr(*>RYrT-oLGXs@%-UF1M=`H zCA|9Li3$N6BL#bY$tG`%SH6Y>+trh;X40-vPVa{o6tqx7q4-JFR*^*WUBPK-2G=*k zmi#Rv^6z&H+Re5SBU^64o1(A}u&pJ@zyJK1X#o}wbX6kUJZ3O}UacdL0wW7p zoRmJc4h}07?pxkbng9vB0!ZY)CbogP#C-mKUp&!fofomEjyAXw`Y^XH(Bu*F=QP-d zRRC|H-URZYI6>uy?@prucbjTeGHCmK))krxi2zLlO~X(x3|@lpt0-6T4`XE%J;1uEP(1F~GXsXE$%_YRkdToB5<1~$xB+gY02$bkAI(zqyyZPfziC&j z5@_mOM>+V!1DOLv7J8^JK5j3cBKJ{R$X*LU@pS$^K(y_&Gl$0$?ctqme9?;~;@Vvq z87g7ozV73*3TEjL)jeh)f~ryd?gq=bTU+NY~)}QTdia$d|7v4SIU|QTU}54UDcwj;D@)(ZyR@t zEp2VfiQlOOoLGuOAcRg^7O)rr+??I6*yPKz+u$Xp+~|eqa354DN$fftsu#oHee}T1 zX_G6COaFb0<*01FN~y%qw^ain0)s|}4lBu5Mik8{{f`NSU+Ff$JhL%bgcD%hdd^;UUtBW1jXwSq+mmG6vL%!aROEP_^@kU(pp$cwxGq_EV~R%(GgB|| zdyaCr-bkI=M8Teq%P`0_o*c~`IMTNCOihi$)JwyG$_wg%4xMWM4C7TkcJSG5{)N&k zAk_qz=%xie573AF3Wvito-zX}PD?m6xW2v~Byl5TE4K7@)k3XF{k`fcgrKgLUa2O!+wjZT6-f9asi1Z^?0ElhV3iH*yyWXcc z;I}9N?_Btvbrxg|2Tie@=YaWD`0`~Vs0ni)IT*>@+=H2zR){@T*bHuPojvu+Jovsm z1P3@vCHeR5%z*V6an+W3Nb8_*{q_1$-uggh+zkVWf$MkQ$V^689b+Wl(hrz9Iu%OKhKbfPzgLP;s9QCg=E` zfTno@F>Y+mO!@++b|)zO6+uBm&(f@+z*_*@7=o_`*QP1>lk~ME%7ZTQeEnvVknelB z=v#^M$FcjMAZ+n zc6J}r?8Ez;U{na$bj6tA=xR1Cxr6j;>!D|bev72n?i%TDd@FrZYZALqbOs$~;ANaj zv%O~Dr&&0m)7o#D0dE}ybegxki;kxbbrRL-MtjcGI&wL94PWhsS~;3@Dc`oeFay$H7m&F6na$F?>kWf9~A5F4|`Om^bH0Z@8q1AwFat zLDtNG0Eh;#nEGBvA}a%`VHKdq+5T2eHRByTnl>G?n0Cf>wMXc_^h%gu@ofndnx|9{ za_h1~HCzG*bvs61ygn(yy$J^|3lNGg2q^eKYCk)h?vB6+Xe90)lteS55h0+8Aq;=X4Kw~_HC5u{jLS2d5iCA3Fz(I!MtrW%XNP9$xjW9 zh6nTscf5*DYTRM~NfaOhOnYBCn}J(1{wP_PiJ~n$##*OZTL|gmQlk*urqhvROA-gV zQ4fqc878?fJ^9zFVXdD&^zPF8o|NzdmX~L6i6{rXCc=- z52mBy>mw6DCvQ!(6{b9Y9& zBK3L_cJm^X%IT4tJ;*RT<854apC1A_g}9DxeGf0lnl|^%za=^jwGN2ylN72|Um-VhJYo(D!bdNa{jPx;FLQZA$YLPT$*{_j@2{qKrrP>5{;_U} z0V^>z-{Vyz8R9ST3ko{E^Y}7U8lV$1nd$5dt~3gV#B3(Uimd+hYX9BxDexz^vjEcr zh0T_k22}ZC^oO~k4Z_QT0B595orM$D(v&Raq9Jp&CS>#6xLdpobV?KL(vG2q%3}!S zY`Bf;%B03iiiI5(jD(+Gp}#sj@qFi=U{m8^VXbH!D)cTAtsq)=r2H*O|9L z#||X8!KE2_bQj<%)rk%G?c1Q~3%xymV-SjhPPW!BC}GyH?5?k`M@n{MG#Xubr^_5; ziY`sFEfZi#3lPZFnOPM(4qyure-Vd;rfeDo)VQ?o?pFe)CfNK0NZp-OPvc8ZQU?q< z_jsXp4CGpmzfh;NCV0c!+xuyUaSaSrBX)h-7xyBPu{qP5x5{@b-%^$H`2;l)JIh}U zIv8X(dY@xsB6F0##X>GkPN73(O~FgNl(QmvhWR5MAC_9LExY-z?CbA^9pExao1FaK ztZ?3}Eyqj@-XoLIdwqR@DneJr?#j(KoJLV&THbnF{Y85Rdh*Ir@vnD9W{JjG6kE7S zXceWgpU-mNg2i23oOC|nb$~)NV-2}YZGtcf#hSSTBdD8zg}74&c2CzER*;An`>sz{u`sjcI&$$38c2Be_it({{e%SyPi#wqn0Q&K3q>tjgl^eUgPQ&Ykzd-ILK0)yk!urq4%JX!^{=^T+C2Wbi@7sTGX2-+gKy;==(;k3g zDY>=3{tf)kA=%EhM$>APnP5*JO)^THgZq1PlUc}oFdCnPRzf&VXT^TjHPv>}N8`i# zZ?uHz~1-pYh~Sx*R8XkBXgnC&h<*ioHUETCu7S?*ZnlI#OBzlX|a=)4BFrZ26Xp6k0-e=W4|Qy16})5 z39Bfd-&5?_s%bM5oZXb^i@hl%!6+c-{UFD`hg(pWw zc(4yncCX$Yt;dkxyT>JBjnrh2dUv>11M0zEJ6WFY!TMAo&+LBk>6WB}6#kbdyBDe_ zI}&D4&x20<$F4gR6FvlF*K*Dj-6PrRz8iZZtXqT+;Y{H9HMag8d@~ql|2SSq?9oPP zXr!u+)y%OR$oY@qO}h0+>fwm(mR@~@ZWYma-1S#DVy2OuM~1&i4JK;$#Iz*)4|Jhy zxz4D`T~kVW?(Huz>FgIV7dMQ$wR_Hlx9c9kyvL0VYP>jfCV})vTQS0jr9UY?p7m7& z>v?K*vCNH8$N(vtLf0LtnuF7<5J>;O1pQda0xW{I?zS?sHeP0`rMJ;Q>eb*HuSn(n zN)wlY*HLXTynpR5fup&DQJbS?y8Np0x{j$Dz-*ykvBquTYK1ZQ%OrTcivqZS zwvWAMgg58vHLGfk0cX)x^$~t^ee2PqHP@ni5$ma;gfrb@`;Q|t3SRKckJQ z^x^_WL^)@<%(MgQ^yMZJa{XxD^SFxc_9!;3m_1L5q>C`+QZ*1`ZEpiDm)u`D7Rlgr zQnF5b+Qqqrxs&YgoSev7>vtqovy!k5$Rf{quT%!m;DJ=%f5Pz#UF;pJYZu^}G8a2e zN6!9hxX8gY2)}9#AgAf;VSff)l}RWf7$g+|sJm5;21hvt4x}di=7h5og}SPLApvyu z*`A9u2SB_l9c9I!KgWfKTJ}z>b5;;`PKqF57k^Hs}hS>bj z0mWr$n@OTQz(t@WbYIB6N!ZpMu~IvV!2UUG3U-3+jqsdEVPR8HQ+aLGzBvQxd(Cjh z(*~0)++q&EE}yMBn7R@_IH)C8JO5kI;zarU?EwAeTybz-wEV4Hd)+#|jzHF4a~?I)xz}3{&)$gmK%8r#bxzye@X4 zQCp@Bo3);`1LW^_>YTTS74S(Ity5hyRKx@jR}*xZssxrJRr$(EGNTRA%m9Jeg+cmM zlO!|~g{`bp%I$^s#+yTtn#IST?-!@O->J0l0xvP*{xb+W%U&ByTMWxi;$QG|_;y+5 zRgcamh&Z!f@On8@ePKj&ug8kZ(_)bET{hrb(T^QhP~|v-fapa_6_dBX(Zt|PcO8uk zK8AU_#CoB}UxS?FZrw*)eQ*2 z>AU=Wf(Cfk0c1YW51)NVl7Q^Xj!o7LcwAqm9Cp!h7tc=*=);U^cKn)-N5nv+Eu((* z79J;5YRd>@Ni%+J@s3Y;aJJqb@WXPLgV=7-7XibdFD7v3CyO?@|MXFpMF9L)hDKmX z(rF<>!QpuXy*h04q(uAa(;ve2Qhm18$cf?_<%FRwgL9+8EyR9%EgRO&VPE)K02yO# zgu}rEb*g*jSFh{yqY0b4P7w_>W`BBRhEt&?`86fJI{3I@QJ@x8{{r;714`nzCN`fj zMqncQ6Bs&}uP2H(dY3ykvVCt9AHVr&HX&h*@G-=k!uV_^8RK+{xq91!Qi@d>%8qky zv7Y#ZZ_AXUf#mb=BJpaX;1Ui4QHQ$tBp4{bg9K5aXkyS=#AQnnKooY#Nly%FE|pzT zGQ2RaVF?2?=I2}~yogm6Wr;pNYd@L+t(`xi*b)D8Op^Z?`SWnBt2o}I{trOY$%D2d zdPU-Sg%+ndroWnY(n!2k{s6oRuBzj{@q^!mLO2sHh7|tv4^9}51C)*hSoMS;V!kKS z7c>I8;Tr`$Xp2mXn+w%1kbS?ge8WlB4qSQ&pXbRQC8|jO&oB2w`(F%RonQVOt^a&?ehWTERDCgot#w@3y5%RlBM1I zE^4PkJ{WOP1s3Q8F#{RfRoa}|%hdW+8adE+nf9Im+@weI7 zo0+e6eb7*1Q#4;V1%7lZF5dn!_1T!CW{Tq>^7N8y@s7}MtXseDd_s9JBG5_or^!yf zr=*XCQ0I&0DuPxkxPWdaOnE|}?M}ig%RdO*ZghacP^X-9F|2s|ORai~|2||ug$`Fx z)R)k!u|B-xkdnJv``}$8sOTsl+OoGdi{{QX)$SO=Ij$)6%8<4`h9!tO$UqT3#SA)J zBa!z7?=VO@%Zi5VsFM!8>L$LZZjoQ$vWN&lP>8gv+v@%%v`B~7e%XUrp5Y>$P*7M|jXNshpc3L(ip9bz| z&fw$w+?m*L{d6}#2uU5E=(x%LN)V)2=Qk>%{xET;!urpL-${~64!>0lYqoh`n)yL* zz9k6BqPDhZH<@hNRa4sM^eg{~eR)DBTGY{F+M(7WkM}!ST30eYh1NNY zUvUpUIUhg(&C@TYrKe{ohws7zwsn2@udB6*L8T(&C}8@D1C&H%`H5{4MXUnv&QSRK{uJX;vgua1D)X8nlOHWY?8AjhV zwn}iiRZ|7w@c~f*ku*+NG3-oFCrjQ5!kt~3b6G9D_vRGt@g@?AiaJ&g1_3Ojx{&+j zx7#hm$vbB!#Z1W`Ztx^MCFdH&p3=RI+vc?x5Zw1hh430m78IbFlS2FOi(tO5rEOn= zI+3eyoFqdH)yo9lA*A#2t6i_u%1q4ZrI2q%&TrSN4bW|DX;-U>}`#nra*=}#1ehvEod^ghJP zf3t}2r5r1uJl211Nr_x+I{$Qa;gBy|Q*5_iAM1UxQy&q>8=vv^g%ap4Z<>;$_y?4B z-z_oPbSO(RXZ&!AW7YZPo#^{~BM(wO=y6w$3WHjUm+S6PGzpOJ``|aj?c}2$>X&)C zSp-O&pPgmePSb4d;a!~6XFiV(vsRwR@_t$}S;j{-j|H;J?ZQ*D7JX*Jv|Rw;vNYO!~2|eiBj$IIaW= zSWM)q!UFG%^!e=V?M+opSD~WiNgtet+UlbbNrIw;oD15OI=rBWi0Uw4y6| zpWz{yZf;5@))_l!uc{p;senF5qwNIHf(CB^2SQ~6R5e?kMuXg4#0QElOF(>>p6g-r>2eCcQ?t?>KJ}ihBWLNopZ>YulI=~CeiJtk z_kDxQbmQU_wc1I{M48!Z-44MwPn8dBjM;Yrt`)7@de?34=)-3}6q?ApP7V*-!S0|% z*}fAIfzN#R`_hnUhoIHS+UTfw z*uY?&Iseq!^E;y4iRrQIV&f@OfiGd?i~XD3Mz<}vYMM-4qP06kR1kHAn!G{$K#HKp z;m>{7<)eG)enGfAKo2vd#d1|SK6d^y7_0fxUbFPIinb|qbZzlb{$;92lIeYontJDh zhj3>%!8|?i8~BMkX~l|i-A)rB@Kd>YIEg$fsXiyPUC;BB>GACtMPb=e$@EJ}zU6oG zM9U3>X;O~#-U@Ssmd6R@9J(bRU4=k1vuZ{?&T&r5N&74x7dA$;*s$Ok6Ek;2A4+=M z_nc5e`f3Q#eawaE^AatBg@QH}7MH}a1cZs;hjpToWQG|pfF6fX55m&zE>7ON?E1Su z8D9(Et@NDayPU2Sd*ZVBkr9FCYm(WxJ10>o;JL1RyfaP=3PLAjD5tTme3c^84JS)_ zVCh&o#rhTk#bit}vQ)kIZDP4}<$y}ITGi^~pQIXlv4&5In#R5>YroF23w$0Jjea)I zX!mOisP;|*a2G(x|K_VB9lu?d*BMB8t{h9mEL;Rprbki_GVZazrrXWc2`{KaxiA^J z?f7MxMeOrA@ff<&0)IZu<_K$NFo|8Xm_x(+Q$lW8CjL~0y#-H`wxQdLnHm(<1M&PU5%hvBdK*2_vzAlu%iEIEq&-8JeBKKg ze1;*Kja))UDg z_c+Oy*ZY@(q!RtyQ=Z_JxjPEQY@|=CQN0qjs-O=cHSV4&uO80VeBT)d3WVPcw^qDk zZ=Y>bocJ#-K+!;n{-ekCM#j!aHF*^!iu6b%ns!3Y#_RFh=Mk^s!*u^?aNZQ;(oX!W zkzHYSaY8z?w>56~5ir(4c*KcWK3A7B-6G?gvsLGJW7vXxYOD8O2mh_H9i>{fV=NvN zl4UBsq)+~z4lHntm20#D(M>)h|MeJ9K0 zykkvML_A5Y6WrPDG$S52wVa}62`2aC7AuWgyl7lqPw?l1Cqp=o&Y(4>e%GE7ql-O` zFF6ca=)u>NHcSI+bS3HhLg4Ml-L{-0R-%8|^P48U_Ux?$79Lh0ReB^(1a91yd%VFH zr%ZW#q)bn4TWqXE??zmx`{AhHi&`qbG^80#;&HC56}x;kpL`TSf{mH2b;$Id6@~M# zzP$WI7?TtDVm#A>Lc8C%Ht5cf!+Zr^414(Gta)cabKn+kp8t&7u5yNTMxv^?#`k`qC2H&gc0q@!pvahkQh#hdRq681{Q z-^R=vjb>{2FU(%ZSZfs*$RCD8HqZsGi7aCT?d{={0b#s|QekT~UA**cN?zz4;C<9} zWB=NFbST96c2uh|zB`6hH8{;TpU7f^Nxedg6R1ksO)dym=gYh{HGaHE!vE7pz>gf7 z-}gVgm3%C2s>TVaz=o^fVL;byn*vu>NFEV?s6<~JQA4PH&jhH^yhAUgbx~GsQokot zv9u1N>8C2ZB`Yk%nz&{`h( znpDT6Bg();{2tz2BCXh*C|nQ;Bx$DT1{XH%kn6JpAug?i?2o%6kESk0RqNGjin0o% z9Ci|kxn;qmiMkDKWw}4R0a8IHCH{dI=rmJrP#)S?I=p#DXT@AN+UJxw)C`_VSxPrS z5h+hLX`X+A5*6;v)Ki#sU1ql^MGK(8psUPv`F4x76RSC2!w<+MHO3xWMC@=0Kunfo zk9VvtFB%8au9i1W3157DB2HDsId&=x_Z@k(<(V;6v4V-v0~~)kt8U4qmrC6 z1^Lw-$pn19uHx%^B?X@y=E^|3CV?&)1vX(P@Gf8}fi2h{kU)2csejjs4*>mnm098w zyS==9~oUcZV3A;(<`O6uQ*PP4J_ zl&Pa&BY#v3lpV+etDK|QMRAd;DN>(hn$e5zMeIlJ0od}+(LFNcBK2N(#HJm^W1xoK zvn?{Hmv=}4YR_QL%^m`h2LByFv+gYA=?eJ8GZ9&SIS<15g%%eA@C0oIF(AwtXCS!e z!Gi~_CJ}drLcC{wQ>KgQNVJ%=G~GTw+tg$qYOyn{0)?w<`4|;Uc(}X-sw)v`rScr+Kc=ZdspMaC0K44rm_G3Gy zi>up)3x*spTX7eom+-p3`=m;#6!Ni90&Jyjdd=xW|0gj{ z!inhe+7qwJZo5bf=7f*TRfT+sX1S4zWTX_Zo#IzCe)Kzqb5+fY^o-oz@A}xvL95Ul zv&&lRfC|&d4qN18t%?DSA-mqEF$c42AGWtD;e`xr^CLg%oZpCQDLR)#(XAbCiEA?<7+uiNQVrt@>P@ zLV_6T{zA)D-mWBA5CFn=m;4*V_u79N1e8A}>CRP6Lzjd`ky;GO{NzXcRvgaMFL!gG zMpv=kIxaSlFxwh~XQZw&`P>h&7+h!{b;1H4R)cg&X(__s(IM+TcL{Vm^2dwS)tPDw z4lwk5SbH8o!5-$DXEzdryL4X3ZjD_pjE_{&UUmH+c8dRd!^+y;Eqm)H^VS`zsV&3P z7LvpU-(*pcYc;anxoSF%@CBoAZ1^ZEFGITT5k@3A`D05Vv>Hyr1++ex zwW6}@d~7(kGONy=HZ1QV@$mB12Z&R1s?l2$>df%`Yxj!*h9YRQVo>3a zUuFbkI|}wJZHZGEZCUBEYk0r!rZ1K`vKSeA%Hq6rSZEr49X!6l;tQ=~$NSCtV0opr zRo2PHqTFX~Bc!;vxDyi}VW!~cgG!x0vpMFF&Xw`L$Z$;=y7ji=08FE0@bQ<;SMvDC z8(L{{&(j1d(L38uT{gEqQt=vY+mV0AYR2`RpTFst=udLo8vAv7{)ImnykIZOS!H3( zTZ8y+I=6RwPMMy3tg;%V^)`^=gVy;~TBsII`e|v$ug_!=G&t{?J2UAId9EM7NS5*- z2g_6-Vv+sF^Y284HFlFaSxiTh97X!oM8-x&YPHa*Qgu*N4nGRD^@Fk*R$}SdkU-DN37|=0TSYN_ z60&GoUUnoitc4f!E{tGKZ}7Y^xQoTD$=DdMNT=&pX6Nbb5)|nTkK}=tHU8xtsTb1J zP&`aLKjeYi9>TpDLu+F+;SmT~))~#(h@`J;C&_1uG{H(s$-`%U;|a#xs{ z-lN3sBFgl5RHM^IFF0ox&8GI3-#ZFkH)s9c2Ckf~i(+=ww7kaVjbAYY5gA@LY_m#_ zWceu%vQbKrOw7R3+LOX9Ms$zI@=)YtrIQ$B_qO+KGveIBiRN@-bFo0-5|Ss3g+xY@ z0R}s}WwJk+wX&cmjnDW=b*YwOykD(XZoNsHd<;gYCK`c&fb=^@)PnS*7hmdK-01Zhw7VfVRQt<%OJi^Tvvhe4HI-a-c2`wHVC<#CAH=(^WBk}VfB z7sm^Wa6ecnMD!U?1J6wUHRHskJX~oV{uMO_@wxmoSO0-op)0Ha1arU&-Z(SK_&ohg z&bqX;WHNFdztW;x;=TgvDa$IW6@Uq5e>IYo7h0w%BmvI2Cn|G2v^b?OgdzX{%B_yp z1ww#+hNdr$?-^*UEEeRAkn?wV-3rW7`6FDRI>IVGFA_yvhzx3M5Bfdcx8M6@hjSR} z^0d8T*yWOZC7SCv&bFKx?4K7p%J(6!Bdw78ck?t@cev`oJGNy85g(rr)1)&=l|dpH zuf%$D(_f65zu&!1fJ~#5aPAvX8~1_ukoQUSpfj!osO~mbIF1 z7tZnF*#v{+5;CE~1eUJ|9jVW25DMzfuVnL9BWJ+IL{X$kU)&ESozHG>aYK<#K%Lg_ z482O2{bhCM$IMl|Q*$_UGI6N>H$NE3Z8eLg-nx67FLi~>J^%jLG_b@`Q?$kRM&t`s z3Ko0rvAAnd$gnXfecfju9?%)w95`R@5aZCYlsa7R_sdcW04?^ct(jo6u#u z67Pkn`mddL8J-l$XUKmqcYx{s3@^N?d|GM3{NgL_R+G)lqHmgfnvCI4gA1YZca2FT zeebvL{@asjCg+C}c~I%dHc{h7`_^ERjo(!1sBs(aIDW`y)>j7UuDf1}8*A&X|4^_q z`Xw1MIPaWg&2)Xm#t2(|UD_JIR@?vzV3y0vBc5-3DYs^8@UE06syad(vedIW^R#{u zZcpBPS?SEp+diACZ|O;v`2K4{s)_`Nva?fP#)vX-=@gLXUkiKp41^~ctV@gBI*xuK zGWYp^S-J1sR%q40hR?q|yh}K!Hkk`(-7-%`p77Q!chr=aHs48f&wt%$=g4v6B?Z94*^+zqq>H-=3`Yf)}XlyI2e zxJKFALehQzLA879QLlDZH{JIm0r)Y;?_@z1EdWqL355k5|(#3s=7R& zS_#uC$sNglS@7(kz5S|Mvm!~A{X7xSwvRRP37P*6j4jk*1_Oqo3hU~F$(Uq^b3VP= zcLT{ZAhK$bF_P6+O-R<^h4Bdkr$38Qgvs{cib1Sco_baToS_1s z2Z9O@){>$V1i7#De}+;Iu8e*G&_R|`q{)6OHe3gIj>*LCOTow9gH9w}2SgM$<^@H! z+3W-_e74S2j8X5-_S-+MALN(YpP@xR15?sel$^NNSyY<%UmxLjyMvDl*N0Fx|7$inHNKMtD6(~#&qTIGpY|w1DO|S40#S{pd3yQe zIj_^Koc+HR9inEkSq4%hLXGO3%vRTn;j4jw_$V`FP$Z=b{uo4T3VeK@=rSKR@wnUT zAxKjpo{ZMwi+cZn0R|!HXf!0B0mgmc@b5~yLc>%mz?02J!20LywO>gD+oFNZLk3P| z6GTHylCEUk5%bSNNLiz}v>!@u*E(E9Nndbi_g@XnXB0fC7Y0j8HbiBq8HPw3E`>d7 zmT2fc2A2&L=A<~#p!(z92a_G6{_m?1%nGi-w1Kduvqu>8ENwo~@pNTm z*K92lfK$?XqL}j+E^%%l(oNb3-7&|-a(*%omWW4k$qa^8|A^fAOg_E6OJ}g;eSRo} z9JirJPGKlBQzDp}Lh{4ENs@o#hft`dHnlA@&ue9STg8FH|E4dlT0WdMnCXZ0KuG8- zi7;Ai71Vu%*ZCLuAhwnB%38gaOp!z}*q4fTPH+0CR>yZ(;LC70v>jwsjM+!$%QKB% zsig4VI?j3NDEYrICu*%)*st)AE`@^-587jCMR(q+{iYp@Vxr^6I@JZ*j@OqG!gdpP zMsrjqlTJW0gIcaDueP_50WkZi6}bxA7|rB;vTj(EzLu53M?}4Bqo$=K_bI)t`f#adhkp)S501{%`ChTU% zof>w5?aFeZ>T`o+Qxy;p7dAfVSp|t*!E>z4;6>ujX9ADkRoF(`;Uh_)dtMK62%n>$ zzS1oVx9a|V{;$!wojhth-rwI}km`ke!C`RU2rqz7{!CcU8-29D$+iM2#;#A%SsiQJ zV;LzYDk0__QDB;DXm*djwi}kWrtD`w8V5$>p6T%Z#QS1CnI~#%9f_==)Mi_2 zpE=YIm3WNqX-ykw9xMzDP}xj6$PXB`$wAN4Hz##?$5Q^ehPKc@3&q#Aj%vBynWVef znF-JA3^mfmD#l*9MC}i zgZAf8n;bFmTgn>;X-_$-^ye{n_u~1CTYqnkI%38+|AM%oRQy2-pJrwMUb$p32DP@q zlC`LG;}FunU+vr8Vh#e5ONtOu`lshalmgyeJk5U1m-H8?#jL;NP0s5i|EkTAeBWGA zJEO?YtKIn$__(;iwBW6#e;i(^Las7dqlc^-VTK)pD2jige(|4KHY6pZ}H-@ow;{8^VrRAVg8DdR_vNC4MNa5UM*Fi(;q-Dq0v4+ zPfSb=Z=JmmVU+R8mT3}ZmG!;8R2a*zynUS}V|MVB3NB|3K@1|Norp#4r6RvjI+=9@ zi?XFeM$rMF?_QSTRX~+h%$w7_fu1;C%eK&_mi7Ll)}^*K;uPl%o>@yU$hSzkxqi$t z8?xibmiD=%Qc2|L0H~~>zKTar)$nq(N?U8G)Yak6?`e+q#wO)Qc(#39&x1h_3r-n$3x64IS`n&(f>Xqm z&fTjE-Qect@EKjImvs&H?Ip&kz|PB5=H=mJ*DAvW^bvXQ*Lv*2?yFN$a^WsI$~!^d z?Evn`6tz$? z*DM?ip-hd>H{h1#$KIo#W*9$(f>b%hzBoBIn>aR_h>2o@f~|cG56`_O4=w@TX+o>X zQkYU<94{c!ZlRmk+5+T)T7-FlDQRQy8XPF!3%r2M)c^#Yk>e5LEe0911LVP3EdvE) zRwf%I^VX7JCNVaow2i;BUU#snJth6?CN$`Us1wNTP7su~lxua2q`=ocR0572eIbj2 z4bRIHIuN&8#GvrSAK0MrLaBM>KjIQTc!mI3&;fYF$CX6O&1FB-^h2ro$IUffcu$p? zf{x_8sY2YsTKnmb3(Yrv_nf{7hH0lqZh1tNzb*HV(HwdhIBqi`4@9de2+$E;9tW8> zG9E5nOq;pW50=^Gk!s=}Jylf-A(2RJE3!g|Y3M*!vZ~c7ll!)@Qldogc!)#^yNVyQ zWLYa;)9k7N4PUJ7GsbCfaZ)d(G;_0=YH$f{F!bl)iISAiD$-N;thGCbHaO)*SZB!Q zk7SoSuCj%ihtQ;xp<;h+aQ>HgK=uwrlz@7Vp>^jE&R+gsv0gl$9kBpD? z5WUxP=fqJ!sg!jzbu`EqcK*s2TdoJIxfCTimZziba6=A>`x*E-Cj4Y`dpp!Xq+m;; zFL)7Xb2ROsx3cNd$@HzDHH3vY&E_x_SVQb43wiotqlFme@_^t+#YKtlF{;R9CyFD|_08;&>CDzvQ7P9v*z1kmPVx%NOpfp;^?Xg0%9 zcwNI%&vHl9Ki+NaLCb63l(hBgyrwB&w{EHD7)gL|FV zJOTPj$rTVuDZc3_ALK_TX7`0zA%sr3*?ShI-D8(GFkhySc9W@|8B7Swf45-nfV2e(>T=SzwsH8GI@DB z9tMUGS#1ER&#O4TNkj1m7r)x$Nq2pMN>Kz;Gc$ROFQt5rpYuAur-LxAcGoXw5558k zYDLo(nU>`%Qx#S*2KSbXGI4llvkLBY85Q z8$Eb@dx9B=OO{iq$s#W-$1DjiFE67xHHj#1UmtVI;svs{CUK}s@fgpLQ*z@I1obnd z$Ai9k4?KpMQbk#leFV#)dx6&Ha(iPstx{34>$F?SL_>vXp#=7s+I{cN==braE3)M` zM!B8J zsCf*W=?Bvgqe+!HC_yt)lf$)uPjq6WhZ}=|na_k)0XZk`jAZljhJe2yY^t`DcxM+) zeF*jlve8^7oq1rtB?Nk&ws5+^;*u7fKR?S6Pqq^^d3qw`s;!T-V$XkmUd?(LJLi70z;wgV z9He@U&7tv#xW;Zfy*WVM{mN=U{lD{&yi{MyiKrP%uNfrc;)ZPow1j^F{Y3anDsiLcnZPK_d6OJlZSMK{g31CAEJ*q|D^>0$98LiA1lsrgQ>eF&hg$}w}7Mk`vV@cBVh2W zs(I}GbnwSLOmCb>=#1Sl5&;}i%PGJ9ND!LeOk@!}Lh|h*eQ!3k;ovMTDEPvcacnp1 zhQ!Mhacc0s5VfAj^9QH6=}>wb*QXLIQ!sD$>l8z?xPFD$0XeH_BHQlt8p zji0@0>ufe&1N9{KyWTkM<J6us^ExiNfJLqsRs}pRfMnGqL1`omFB!zp}2SWBA&|tiQ}W?I=X+$vp&7{PcR5tFc{zH zl=(ZWio>thzZ`80Y#P7rPpXxjw4V=$zF`R_^Dk;4od$FSE7GvmRpkuRGHK7%Hs-j> zZl&80vZJt|H~3xTmeoj(Hh0$_)z3_?@}o-ZLqRMyiZ!(C?ZLy{y+bx@)U`rJ2|1(4 z2k{fsD4BW)SfDH*y7_svQBQF?L*p*Pccw`?TtNCzS3xZIo!`G1(oq){zGDU2-CGda zH6x{LD|s~F<*0whQ!_J7+VW##?4L|p>0JeiAok`P3TFFV93A~_PM65O$j@#}m1n)* zoTK5?%+?BhARJ5g=qX6&f7?76Ggq+}a(OOjdv-g(Mq$o;AmIxr|7bfk-rs&&kSk_A zcR!`E?tb&@KLNRftKDxl22!}nO0719ekcttdS;k&O#1Px-GAbqPGtJ>{?$f-wxHGF z*zJ@Cu#>!*9zm2_oZ6pV5Tv;gHtH5b8B!XZ0T@gZegb-qpsr0WM{IL**BZn)!YX0D zB^Y81+5JGErKQ=uS$jdIob&`wm(No^b1sOOQob33R!7LO1nN>k3l1Zl=mJ#eCTt>K zMdW+*=^zhgQFjf#Q7-7M6Acl63`8o0&xBI2?^&y?bXICEFaC zYora=0cy%eKj?S%i@HX$+!5qLGtc3MTc{TJ-$i#)OT%s3vy@7!yJK0DXqD3zuncRV zU6<$GVfe^^h+Tp80qNjchwPP?ZvcE#^`5us%YkmGOlP`W;{kB3SDVdDcReef!4q(` zCHe8X*teu(TlI+`RXQ0^b(7aMF1CHTJ3d@&NVDq>R0rD=tOAy5l+Jri*ncCP<|X6| z;&?a(oh`KkwlK`4g}t#GODoSDgzXIORGN!?8WFL!-H7scpH`GV7!eM&e`LwuKRT*u zSPrdB;Usp-d~Ah}%*yQc{`WxhpQMW!P8{hF!~?@Kv+Q!~xZkj9h&aB_Vs>GHY|Nr1 zgp~0outAJ@&DPiuh}Am6o%l6#gzXm_f?BRH86aUigHd-d6#>2&5q!C0AUYwhihBdi z2r3r|_+?yAa0gk+l>%#W1~&oz;;+ejNH}_{ef7-Id3WpMGK-?6n?BGPVNN!)T4G{- zSnKSIejS7cRDA+MEJwNTBiUmuq4Af%m}7mPLE*#Wfq_nKW_}=i&Pyer83VK%NU_TL z5X+?)nGN`%D7=TeGqpg?Wj|xb!;mi4{deW2=G3^?cyr#3JYaBhQ@X;S$DDLapreh9 zU=Q~P@*n_C(u-pj1w(Zb$WSvDelk+bGOGU`&fnzu1wvf7dF&rv7c71lga#Y3_b&kBi7$+rTwc>6&+wiOu#RVem+Z2Ln znh2xS-XmIkTaOOk?VgY)!z%5w8pad-n`PxltQ6y_a*4BqSxHeSGiyh*~QW;eJgIhT=|;6cW}Vl}VnCc)v{Vv%Oa^ zSz0v3bQPa8zPRMQjSY9Y*#SIZTb9zo^M4@B^pzH5E^VL8jqoHaBdwOPYa!$Nh$Sy z%Z~;?%=<{8%+)5Z&08pfg{_T-xjn&y8;8~VP>2Sn+&cBbNmoI8UkFspVNTX4NxUbn ze>i=_+uclQ@<@a8vMoUDc~mP^{fBJ-V!JtH;+KUw@{@tD?{FWDq5g>Aii+M%n#@vi z(EhUzHn9huC*SZdwsl;{P<|EzwqXFn*|uYg9d}qf@dt~Yf$cxh=XkhY>cj-8GTxCq z`d^tf*)(Ck{`){k4CaG}4hvP!lf^%j%IV|2KR?v0u44h&32^P1j>_Ap^X|ySl?h%Z z@h3S?*MH|Q#2<4qtpRg8?XQ?_rHZ$h8~YHk9IXS8Zy~a?yV~By%=(oroN0kC8h$-5 z2?28d#aeqmpWbf`1+aS25~z3ZH%iYZS*#~JWM9riwb_(RVkg4lO`OqEII z!1PKtbu!GMf3dAY()|B($^C~Q=qD-mD_o>VhUwjzDi5KT@uYlpwyUKKqh4FJ?az2y)_sXJXmH_Li2gA}QFbXM?EHPft!158gZ*ch$%@X-WuaV=g(B7LB>?1#h`8Bd7Ew^=w%N!P=0a(ov0|8-X16Wt+ZLuhxr{(hf z<(5>MT6t>NHj~AZjF~`>vEGm>3zmd1!cY05NY=S)NFzwgDl5BNvP`RNG#%{s?w{d- z40gF@_<7j%#WsLL464_&KD=WuGCVDujx@&{XPsT3mdM;XXia{8vd%M>Oq8?C0JKJ02Q9$ym9N-{%;SOt3Mveb#o#@W(^Lbrp5j}gZ+nEexHFz z?;)VhYgwQ2?-~*hg;1G50M!Ua)RKr7L@um^Ssbz8EI--#&eh{OxGl4@yBokGYWGx( zni>S~11h3gcXo3$SG~|&P2}W?fb=22oid=GHGT{XGbc5KHU=iL&e2!A5V`Wbo(|_u zO|A1;4d$X^a~iP&Z9d_2B0q<-l++7#czKMf#DD9`qpio^@6Ao$CP3yYe-CrCx9>%5 zRd1U{2wIOl|D7l?`asqp;^@JxBp^>dGzyl=TkyT~kLA`)x1IhlRpyv=VgrS!}addiPD>$7WJQ%Myu1kZ=i?^+rtv?rMhUx&?M zR|Y&hF+t2NVj<`6_1CtknLBhdD1+~RvELQ_Wi<2#TH-xg()fRo(zJ5xgN$%kftD3L zWUu733S3iOSOOqa*~PGO-(dOSv%ge5$!~Dd#|vJZZkq|{_8YCC%bZT3lxwxlp<9Dk z^H|M=g+AI>Cw8-$fO2wXf_QA*TOUZITwi^44K?5FFuz%>!@6~>4)NceDQ*KrM?+wL zQch5#`%I4K-NS{TNR~XkJ>Ov0T5LEd@GofXaZEUlpvr>^Yz#lFMSibhY4r#js8%|@ z3qcbEG8kv7iAzl0Y?^sr?+~U+`<4WIT<_2v{vkGa^p(vH{)()460z>)SV-F2_8%wP zzd{ znAGM4gvueHr$O;d{B@c{=x>nBbR`e0%O~Eu7?($?j3+5ouf(aluP#nw3(u?*1HaGK z+T^zHHmJzJ2BMf*zDQE*x_t(g&jL9-Oo6s%+ds+Qe7Y=-NOc3%E)hUH$6Ul6e z)`xl~iP{NT?k_!G9G)Mee)J#o%0CKtk2u&7MlTZn=rz-{0zScXq#`4`BL&WAb{&9ND3<3 zNHaDk8$x&=;VT5+`FZZN5k?AmH{%QnnC9*sa@eti5@sG^CUKEj!h|U)jSUT%hP8X^ zTM<`3HL$@@>GNhV&G!==O|^RE33>D??w|7`3)w{B&-;tMZU-x!k|M2xkd3yv`tT?o z{f~KMJ&YVC*#15E$VjGB`Q4eLaa8(YeiiZgRPJ&mYcZU&`2f z74D7+lt>m&#nukVFLO_rX z1tg`rB?JjYLO@cbL%LJCySpB`yWY)QMeh7&?l_1Pv0{q&joWb0E}cTJE=*Q&XV4ISm~!gL(4#(VNFa0}?b4X6H)BeJlXcRNH=g zq%U!->7Pd z9DW)dw)M!V($1Fp%fJw@NyxlWmFWQ5Pz}PPXfC1KCeKqlC9GZ5=Qn_`Uj+eRe=zrWYXd0udX8RF2j3ijS1- zmhKitW};A=r+qu&wbiVQmBOciPfEt1Y%;K;qXT<N%~HxJPwQvfV+NBfjC}y{+fW<=^eF{#o(Zmt71{* zdTvMJc?=>uzF&ntJPGgRP5G0_qvdju;(qZp5-0D3pMu8LYKC4E>aOgnJ+`? zK^hJP7&_*yr(B>nIRRPYo3Onk{wjcx(i%O8tAnkNyk0K#f#zNH{vfyqUxMgD>8pEs z@cL2}YbT__y@vNeMt73ahT*f_Cj`)lC!JmpT8B0r#;SWK8gs6F=}EbOZKhiAda20M zxCt;?qQqt~;fUwe2F})8i@p`swTf_@iVM$}dXUi{hiaY8aL2%EsdorU2}-lg*gQ%A{#9of+?e7o=BLij_t(dnLzdWYm4}#b{G*bsJ*@j1Yj^uEC4l&IgU*d z=X{I>?J3N%i2`v4{~Ju-;W7_ zlEBi^G{^yHY6zy(m-d`7p2%dhSsngDg8j^0gTxm~Fg>?8dUB0KwNiYJ>|EfVFF#VUs!}f=QrDn~$UJCt5ACA&o>V z1J&9_-Q;qGaOreaFy)HH+{zdP!PrIsT~g;sGBP4swe@0K*vw}B`L9?0 z{EtVnc@$;P5h-z#4Tm}C0gcA#{`r&z>M*hiFK%R|Q9%!8oW#?%UYPQo_3nInPP6gP z0wJN37VaL1`L)fq`D5zpAU%ep_~k5%RaX|Ed@1g#MJ2k%Hs=9Bv2LN}~COii&;as7n9~9XT{jL9W4Rt{I<0=A)0N zC;Zx2sXjY|v~QcN0j~#s$3p+8%s#r;!yCYNq@t~YeF`o0roU((x9o4#VFln6)RUtLBKYq$URRtiaJzf<4{MAdF_{Z&WyOwR`rcuuV0tP`7#Gwlu zx}sE&^|?8(oEHy8;93FNORaYR(e78ephDv=HN++kMlh0PV}IWVq3_Fsg9@qeOhN&j z*W(qSApXnxg<%5iN;Iql70b!lM;G0{Gc$=KEs-4bHfzHHKpzp}1d`sz0_vz+fT`Mr zmshos^+CwYx5C0&fJ+?!B#$yP`6-B0zdR21+tb`sL~Q@kWdCk*56nWL1a;X0VT`JN zXRc={<=Z6;;*y~}93+AcpFbrjOok&O*3)&ih|7WEXxB&Boh`(3rpQlBGz*(*K+~jD znokY-#VzqObUQmav@{t03?#mN;ynEVHz>%LH;;&L@Csps7JD?=95;pd@X%f3pL7M|Kn~xFrFP_AIhH@sI{Skg=#`8tj=J{w zzWytXd!`q~D;+?fd^4C5&OZk8>J`1L#_lLq-+dhItC*(r(sORB?7C=E)O}|0z*}A* z^l~dY=R)n*0waw@`@~N1je30-`(b*K%_abj%eHB|b;wZH0pv;tkTwP3>v7=>8gD^LWx6ZTXIOv=I$9{BX|>Xe>g=!|_{2?vhu`Ud zWwbuf>vcK@Bd9ts=*%Xs4CwAJ051);su8*n>p9T#<?p{*#D+ zU^j5J+{P)=S9XYi9AW|>x0rWCb|wx|WRl>N4mS;{iQ{D8i%nzN#WD}R*8=fHYnO}{?Hi8dmmWLW?`Ek`+6Zg z_*_nucULE&zQBC=IJ#f!GrqC2BU+NT7(CL1ih7NT#a?qr7Oeze(Ma5mLf8Xw7|OiL z6e34Q$C>f+Qpqs(n_IJO=)AnV^0s}Z`c2z7Q%%YS$mZ`YDYPx0CF6G8=v$^unDH9b zNbQ{1->N|;B?VK{;VlM`Xepj&)WekLf9A&eWkt9>LnMs5Pk)!!UknbTt05tzNhJP1 z5q2ml3ttDi$E6_nGsgu&tPr3Xmd}a3z^_1p0rQq!yCStg4+IjlvZ<&3f$9T@@Qy{= zy+h{9q}fD;n}XWAkwul(h4ddi>L_1A6qu^I(ENVomRL)gebVS$`yMANp1{Mr*l z#tqg%1Lr!5CRhVZG)8q5F%|kDejso0gGVwS+Qk6V>Tecs%SRxLD-2_A0NO2o&Jx*CI)gkHqw`iRWM(ieb#nc zB4~&FpNssU^)d>vt!)D^{be(S@drmnRP|*8FKATt5DpB)NTf! zXtAkeVnCr0eh#z|qf_-m_@_%L?Y<89?Pz^HdfWGh$hg8RfAv_k1y^V~R+H$19ZYYl z@w{zb>kyV3?qZ|#XrucjKA`QKLiZ~}l4>Ub5?27-=x&#ZAgis4C9AyP52_u>Z3bpt}KC+TH~My6_oz z*x+|=%)MB+{D8@0JT14gW8MWmol83ZtS9=*1I;e00-ZtTi_`W1u^XJzmShCYyE)`J zl#5rUlWkNF5HyUPi=7;7%Ef7EQ-Q2WLSnuB{$ASvz$jC;!nwjbG#kz+PKR6gqykO~ z$Wfp(Y<(uq{=?MpJ0FghE~w%aH_|K(?2`J4Yd zE~Ay%pWcdz2?ua@Zqx15?-=aSmZjYiFVqQ3ZdKGk)~llr$i^ln@0c}Vsia~?TPX=` z2pSgZ{IfP^MpT{1u&$69=NJ*FjJ_zVm;Z7T=%b~!*!$&c6c4&TzhaWRWj_7=+Zh!X z#Rc2lLvcDi!cUSenHd`?Y>aHf#lphUgL0fOO_%N1Z*qVZ2QB=gjc-MWzb+F53irhM z3?%D<&%t=57$x*yKl-0ksXBoJ9q~oZ%Qu{yVkp&1Fd8+pvk#HAgZ{%5uMVf}`J9gk zQ=WVg3XJB)6?Ac;e~b3re*36zKqmHtToGzwY@E(UgZx7uUGTB5Ivl7C4qn>coE`$D zrXf6BNKoAY?)k1iDX0H)F#lm=NGq=0?h^jmSh>IL`gk&qa|N82@*9EqXp;8VaMw{J zAaos$8YOWx+n}l3(=(A0AZ%!J6mnIpPo#ntDzwllJo+xxwix(J>%d)^oUBr!c|Byy z@$-qmA#kv))xcr-^|W~Y%@+PdF?T|`jqU~Nva&T``@jGEU}|;;{ylY}1`LOH&MOt5 zS1}0ewvX@un4O2M*+lAwQ-Lv-+n7L2%%lqU<8fV#;hYhv`p(GQsKcuu}dS80u!LI#SJq&*q z{_ly%IJaFy5C{q&5D?g~-^93bQ<Wpcfy^@2z#%3b_t28S2} zH6l>(Nsa{j*Q@k^X^?n`@XGG@KIP{sWD4N#?9H#-^9V&?`sG^W-1~Pgs(K5)B?>o0 z{mQw*V03QN{aooEG|bP{bW(v`$GY44O5;*DO%maQb0Xq5kLJAm-rsp^1^k7%Y57#t1aW(ekkrt2g`lnET1G@Dydw}xD|}N@Fl~j)3~(#j!$Cr zm%AGwR@Hw8$HpcsOSl83{FyWX@4u4Yz-lJb3WYh95OhR;^hy27rbQLVgq+TO*RD;i z-i!-kxoP_^WlK7h)+UUzmX_LqYe9bZ)GNYY#|3huE30r?y4!ULl7I5Pevd`uaI)>( z&bIZ55Dh=TUT_;-#_|#xYdDMf3mtUClyWe z=w0a*szgYUnT}@p>Yop(jon=J`IDpiJBKMth6Z;7yYtvNr(qyVE`r?livP?K1&QW@ zoY?G~OeL?_QgqD5hLFGBd8)In z@I~FZEog2={49ABJE^1Jx0ltcu;<7clv_-^T-9rmudf|o)-*p6<@oNek2k({!_{S^ z-v`>a{W;a!b=h=>*)aP6*7AqF>gEN4jD;*LrAa`MYzXeH!PBMcV#6OGOGzM#`C&T3 zH1UGk30Hz4LZKz__lw|QHY@{+1Jkx%@6T&^3Gm6h8e+)8)`f? zwjE9OIlT95=kMF@^Ual>OI_ime|XGE2S(Cnfj9#6(k0fqlB!6K6qqufQoMVCcfL0VEB+xD*{PdE0ldNTa`B^OvxG=uV6q z$NbFLHFEVJP<{8^JE5Z;tWIQv*NLgMxQn^wQn%R+v_kcszU07x-+4*hWd7q$vjQ7> z`{A$m{D!E2L9|GP6kvDyH*b%XXnZ(UE!SJBnHu;s(Zp)AmKySPKuTHpdZxM~b`XJk zW2h8qC~>P?dIYVNl^eifICZLhbzDHIQGwy{@gr>WhqeH=FP0+lNg-Ml9kGbbHfONf zgaSJyPL*brY^gBL0_Y-xLvs&0nYp;G?W+q+dP$JM!PRKJc;UO$#|ZxOA-QR$A$DW$ zF-vP3rfht|vxL=5wLKv#tGsC2(M@(lsMy>I(SY-ua=C*>*twA=IdlEn{BYOyP*v53pbcxbq zCx%(f5$7;QPx&A1IK1rr4AY;HmExd@uuyn*h+#BPRNZl3AtP2e-cvjPU8~wGurWk} zxjF)`{@kb1MnI%Z<>_F>z0D}><}eV0N;!TfpU%fy2E^))0Uko5pi<_0f?A0Wiuvmk9XIu5DxaRJm89Qn4!>XP zzu=yF7TC8n(~*&{{rt16{No@PP;M80up2(I#Ye#Wy*Bx_Evxi@|dSc~A833Mt~4SG+=mWN?%{8nMLJ;vTY_DEAq zL4kd~)lxJ(v%pXnoQA*}s>**!n6fb5X1$;C?IID=KIWMDD?I%>0)|_iOac)q@s=ih z`V`D#t?s7IOxEZ@V;QdBgTyYic)|bJceeuYZ`tQ&e_Z8%&>5GE zT}423A+cZP-+hT2=eoy$^SDBQIw{ZM@nd$Qr;wQ)3d#hztZ~{h=i8rIA%ZV!Zm*0^ z1xDtlXFKap$DfruYU;Em6R~ji)$)gPcD@O59k>I@3K)A|7nHq%tu(0A=r1C1E7e-t zO@m0N;D?FxJR0b$oBD0{u7oUphxpH=E{N|FkS*UsfIn9kzi5s7A?w*aSC2ig_48w9 z?6;)HS;WV~ZDEbE6D(b62@^pBuh!m%*TXp19-UY9%;33@nEK#Sw%p+nAFVnIO>5JZ zuTt*g%m)x*q?E7LGX=lORQlgFh0QKt^$xML%r9nv#O4d^L}UK2LqKW{{HV!x@`6S`?@{Kv*m&4iA2=_e>Md{CsAV>^+N|01ZX(3Y&b<61?(j%kv2QHBh;wpn=`TrE?Re71$$ioIEb?k4nLthL zWNHrz&&Mw~HRJOtf13>wD_{Ar$n+0`@8CVSf{4KB8-@l{nx}N}^ihMd)oEF&3 z4lpn*7m9RFhb3s_+c6qs*dJO5-Z!yoNWW=&4FT%uK_T0<_9`dK3lWH05g=||hA#lp z6IonFUAc-T;N|4H3>Vn%-G_Mj9mZv4`ETo&E4vE}@z!AdxRP%{R8U~1&0hSck$OqR z|LLxi8G$5ZEGq0#UmA(-loJ8rP1;u-`IuJL*4N5!eU&cuF>w{>Qg$EDTp@q$Ek5lt z{-rZf-#jCehMt~swml=sFzvCM%SPNXm;FiT%8W9j;BX!-2C`2XjY{ivJ-wr?U2CIQ zt?_8b6tvE8CTkcu+%Dy!hk@i1ZEcX?NEJ27cuff8*oaqZxx4;dlCsRJ6bQS|rQ$z%~1AH4jXi+7@OK z;GSFS>yt95Pe;x!o749v&29}^a@L&Zzeu$^tBtYK{flTkt!Y_fL>X zIt1fIl5~`_vmZYE>H!Qecz83l_pv2B9qw)}>K4_*oaUwM zXIr=6eP9ShzI*;G?U>r#dLU`}$hzqoTjzUy+Vz{1A zy1lJ$c(jt#%}n)ED8R{>PY4SHNTlv?lHRz;P-6T=MnUi3J*%mh0-Tv zJxLvYezX8(6%0Q!?$7`I4F(1}h%5>1c^XXl*T)LpOB{-Uz&010uFN!M24IK4j=}uc z%SBskr=|wiS+UA}(}h{F9;4PfRHQa5L(4D~l$;1ou2=G9zv$7uP8*3H!z=(&hyL4Z zqnmIccShgKv1tmxxr{^ z9XFrTWY9ybLzWjR)C#H3vAl$l-Q97E?51wlzda)$uYR{h90|8GH6@pP)@Sa7g5sGO zgN2066comY-ps90zU2AEwJ5=Ea};Q5B?qeekV=O>U54m&3i_3fj|;xT)(pU}@^?DK zc#^}h@}o?laSxbC`p1m(aR1JtbQcBUI_srZ5&qmgItcTgZbKkJ1kYjtMvQ-L6@K^%(mdddIe)Gb`JNyW~H-{ zUb}Wrd^FM5glR>v6NV#=R66FTW|3`!k~OB^!zGj98`Z7Pu(-@mIK;6kW56h1 zs}swXd&a{%2tcBxsJHYZK4N(e+Pd9;)&fXAvs%`8^wcTH6_`>4&T4CjTD{EoH)l7I z^U0U`bE{X1vAA4X(YNPEr2yuH+VWDUDM;(`3ybJ_?CcU0ApS3moiWFZ__Q?bF7Dl)cIzMI6lVY}h@TnpCmDYGe#`>ENMGE! z-o0XA8C+PC=9;!Pd++*2Lmc=`>yGkDUTKu0c{KQw&9)~7S9izu-^(BuS){i_vqbLY;-NTJd`rA3MfBi{piewE4(4L3(gDAWhq;i}Y z-ZNa6r%YB0LM0uqEMoqxdZ2(C5`ZvAp98K=$Z=93PEc_Uh`jNV^?JiE!{y_JxB4}E z`}5^;zvHXML2lqQ;=&#=Ot#Wqnw{w;>8(McES&;P_k zKQe+>>8-d>6%ypS!{J;e(Q$(Vk^mbh&KYXZnTE-y00%NrMbR_S9*fI@GN1+~E88l(;W?Ht;_77C3p z?iiSz{=2?tJK9qiUHVnyTi1V{!0ap7n=u2}KFN^ScgVowO=+{dB;D`O#tp6sw{Zie zJR&+-l$0d%ZtV`=m0Q@Y0eL;YyGzNIw^e3=4>o`5Vj=o3$zy`RMh(e|cCdR5hl}f% z_Y76yN){p*&cWeb=YW4JJ0_5D0Wo*49L!%6@dTl?iVHLCQLsn7D-{1Wd%8#)ya;<#0h3v}a_aAsVC ziiIzEB{=?Kvo!Z*eyd=BtO4~qCWxss$f{O;93nitN8hdv-^hQd zB{xp+a-+oWoIh{HrON%wD-e9q$|_IS;PS_>lL13`!TQR{1b+o@Yk1}DEjrUQgYqG_ z$5S{yUm{ADU)xiHYBq*>6wnQ@3d8 z=pz({7II2q&9oA`0A8(Z0DWWAE9t>mH?IhC+xL}CY`Eh0!GRHk8D0uJ%^6C{A7KzonM9`^uD=e&7fl{Mr z<2t3pnnb+t`~m1p<$04u0k;Wz^zc4L?w!NN+ln++*-jz& zTdH(9O<0yUHxZwJpz!vDD`;y!I?uvK3}ey^0La@(H@A+g)26KE{D?cMs$kLvOeJ%_ zapU~De!n#Ne+y?esmbmawtXpCY7XAT+tSUk4M9fUJi9uzS-VSNjlr}$JZIhj5i0(r zJnzc*m};734EK!p-s(V0f#d$3NXVTRv$5+FuAZDG{S-7b#LNBGVo{uXDgohmw^$*u zcsAVjMi?k40JVzszP$0+Fy8KR_@i+cnD=O%W)m*xfOEWxi&>>(N#ImfSN)mqI8v0y)A{-t%& zw+UiN&GpB7yCO67toIYB79@cmUyn=fEH5ttU958v54k5WqWAcM3Fa$aJ-uXtN&kzA zJO42rX+AHw6wJ<~jXV`=;E&Va;rz#%r2`Md9AUTdo*q&#-w{g?Z3gNp@; zcoLAJLX`=U3|07O;lRQfSdKT;M@>96)wShJZ-j_;QYq}Ltjta_hTSky_>M+ZgVf7p zH!d-8jCx_0?ixu4#cc8RGaA{VWD>ONy4c6OOifX+P;0inxB|o}&E2!_O{})#%Mb!Ej`^V~O5i_o4a6Pen!%VMn0)OLJv#)F zumO}3$7#dgF^BK=v65*C`91AQ?c-6aI3eUP3ae@gfe#^`se&cgS=vo9o|$F8wzv09 zCKU@az%8W5@=zm`&I_c94)=YgPxsg1&1byt^6{NK2OUvHAL#WnzC9^GxiwL7`Y`ag zPsxkpX(2WimS?VI>pC-N84jxPYU9Y90cZ=pZ{Lc4(>VkKA1kigzp*bZwx?uZ5YFsr zZ%0BxLi?`n8gQ$Cg_Tt{AT}E2G(%O#iS*zQF4j#3O=tN=1U*MhFyrH~lha#B9kIt06ckS!IE|IB)z^PB@)L{)Lpy4Fl0c`Ou;ts^ zBQ#{mN@-u}10E*3ca^lgI)bW)2duKc*7?JYqP+wKm@&m)i z^W1HG%=f7vLN?a78$i6UoF$bKu)i_ITjp{K`}FD8)Q;>T?s-t-QB+zf(U5{E z>QGJ?jdCYL*9+f{ECc#Xdvo9QL^S;L7l`NSHX!%VXLGfBNAqH)OV#3xka8|NXC9yH zN9u)7Hs4|TbfV%-)$j|_SNl(E&M5LUD&NqSoCTxXZy8{9`*@4$ zo~B|KS>2I5gMYIn3tf!chp}Wg(|zmCKQUD<^V%uxDL>u`8~K!A4ha2e*ZKt&S&*aP z;(nX)74Q}Z#y}57#_&goQYoLSw~HT8HGX8GPGQN)(YCXrst@4x zFd50Dq@q&Ga9!-~KUgg+Fc_5OU!nJy{qnFKBKrJ#7~}F=QkMg|Y8zsK6%P`A9gkDN z6Iu9{Fl)(9@z*)@N-=jeCyH~iRSFnl(@pTJ54CIeYWn)5G60BN@i%KDnd{7^?iO=; z&HI_^$M|#2bvLf}+<~|r=J3|P<$is<-`7AuKCRcbl5O?UQTf?iKi5JL?9m&%N}-90 z(pOuv&iMt!mPERZ!AESPG0H4MEz9sX72dHN?*q%P;D zyzC~o@MMEreWNgrc81<8bU62pUlU&)QL1hZy{&DKTs)}pFmvHHuPK;fs{Tg9vDU{m z;O0FQ18b8zyC0>NB8fPSNehh4#Y=6l2D7IeF$gyZ0*NbpIjCY@*^`qsjBZK&J@d~R$s9*1skqSO>kp=fLb62xws9oEv5oU1&~p#BoqPANdQQ3q z$92?QDvY)2t=A0}md>>`=-VUXulhiR*1~Tuv;h;blqx&~425ou;6Q%*v_I!I(3kng ze7}nS|J$UxbnhT~D8NJVf4FUq{o;kg=F#A0fTEdt@eYRN!lw^{cl#yE!n@ISVgDlYO)8MNEL9;t zG_*ks{}_}b&T?=+mxOQq81kxoT8*|i2Fy)@`=&9)7;}pZ%UlE?zR%6Zj^wBLf|)nt zR0)eBA8)`q+`FfH^DBX=rt=GQvvtU5`SvyKDctVo+hH*VJ@pzhw!#nlvxp@}gS!(B z(fTm4r;!4VcBPsN#i*S~V}rI%RL@r4^k?A3X!4oFJ&v+2ayk9DqWk=FkV^i;$)Vi+ z)F|@#HY0!Q<;-TI`(V`A781pr0MN|hnQGoe=BLF=-T>xr<(rWba{$uEO$zv(kMZr6 zmjyfS#tavi25!fGoAx<}gPci)lbDDvQL9++Nl(LIX>pw+(U~8rIDJqMX#$IRuVb0Z zP0PX(ltQHpy|-vs-Z<4qQA(a0-S_q~=TUPRWb0T9u9c&-VXrS>lCDQ^HlJYydZTRk z*M#~DZP?v#yjzz1VItRzDWBU|?G(2G+eN`K<2fz-hcp9}LgaY6EgB3YZysO2*Qq%! zCulnfaYxg|XGIRQl7W={X6st7n*+UZ)=zEkDP)wml(yS~X=w^xFEiDOZ1=GONqJL`kh1{;Hb7Zf&)vY_8S{hVL+y2vUAk?6&i#IU zd1)y@H!1Vuc zYfUh@q@l-gxjE-+f0 z%(ty>3}}|;&*Q@G+tpiGw$VVRdTU=y>Z+b^sDt*Z_+xQuDjhCFG4-rZbCI8 zAtt6<|5jeP?BMOm;oj;uRF4%}Alc|8b-_CaV+(6Tonbh#=G%DTp!XP0mXdt{4o_D7Tpe9&z{H#?+bNeJq8v0?D=I z!ilHvWf)~0-rx}np23tmn20DVBVZ9jV<($XQi@~h4k#d+c~FJoFz4f=c=H)H70r-l zZ{Q;NM=6U|Y1h`T8Is!*gbzT6t}7h($plE+1}Jeao8CQ1XGHIMjvfXR=-AAINxkgAIx0{E3lL}L-DLbDi%X8S> zKR7$4YZ_17fr6XqkZkejGGr0#2{K|b8X(h27n(|_J-SoYIS`bRzP3*4wOQ+=FlDnj zoD=Xgn-=GoE7!i~WMe&FYiPVv7CB^oR)U+?xi6I97F}w|@wB^Ct005&vt{ij6)EU> zrNeQbRQzC3GwGSjJGMLly-380)J&4>^@&wRQEMmj4@~-Q-2S@f+G4Hdkiz?HvrJg5 z^|Qr?a$V#FPtH7#ry@A(iR8>~73S=UMs598tW zDuK9!mKU|LzoAQ>s-DwMHyHAWkMDIRxL1=T!`epx&{I7&@%qk%oo5?`jK^|r*4Ni( z=jGI-4{d835S9kfhcQ+Iq@L%l@bK`)V`oZwIGfQ|+|9pvl$3W*zZEBFS&z9ok`-=b zZE=JMTqsyQYRGc`@~g#eU8n7U+j}by2q7|bt&Y1(=y?X;p4=FE$lyx{mzJTDQCoRl z0RdwDo0%qQ=xI-qK(dOp`w19#PoAupmb}D|8*S zRLlTKS3=K=B7piV4G>f!`;U1Hdxg?9UB!^jWul2OBpPZ^kNi@nC!7nVk=p9sMWX?b z+l;!o7MagOUSBNN@D*AytQ z(=yD`H%~~%i6hun&QGN;u}+KBTD>dHCmPN~G`GCJ7T}X%qM;bon+XChwyPxpr^o9$ zM>{u?HO4rr57pHt;+|$qoL4Hz&xYW9XmE#Tg7(qRZ?ovP7ZS3Gp1vfU-UU8py!5BK zEmVYQU(z^i3=FSNV`^Y%dc1zeEfBvH@!<9)5DyY~oo~e4((%?V$BXTA);Dvc@vE~Hf{osC%?-(QfPAW+Ot$JnJMbs%yzuZX+&u^ zyr^CIF_9um`2dV%-kCJN71MC}p5OOu(uvgih}`ejzO>YPuhbEdSIcee`+GmGj^GWR zA(0|DnCUK#Xx^Y0t+bBCyzanCe(oBGsJ)sqIO#YdZQn_$;pNb^mX#5ktuL{{GmQ*& zy*(R1(4k8=_MZUgH`ixxdBbBJ$~@PPBMcAuUD!oN0#VO%VxPr++d=CDPOlIWG*lvg ziDP3KYHrx__U&6yC02*uPM(zs z*f{C-IVv&n*N&bm2}!3184y<{I9v3S!}U@;qDzenA|4{_`N|=`yf(KBySJD70-`2w zr@}4`ChF;YcQGEe!rxsA1%u;H@jeX)-Yx`4R7??(x2F2i7E>QiCwS(l9As5;R?jv) zR@6;bvqzk1ApKBnj(bMJs)G&Y4hEGw>cSr^wDSv<+8E8*1(7^{iav8^j;dd^V8!_6 zEmF46u=A}iLk+giPS0y5T+itz1~?(-ONuZns|$@p=-1HM=w%LSu}91|CU1iw#wbde z7)nt-z?>I8TkH6mOxMs5yHf6`k*sR_#>swt+{K`q%3iTUwLr}uxO!U9PNWEa)iH`M zuns1f@^Z9g*2{es=Y0H0X&G7aNo`!#&G=s)KAWl!kTb-e54m;R&nV+`y3027e5}M2 zI$E5=DgVx-d)lEL4c1|k9TmG{k%c3G(I!b0=l-MX*&nIWa?cO-Wx!A~xw()V%jd)e zT$TBK2%@6m+5?#a23-kdss3BJlasOt1WXu;$1%RVnOiA0r}GbBkIEg_6U-(|wrB~N zH6F>2DdW0o#KfE>%W4NY9iAL&15CUrO!in+DV53&7-toK3%*}^Q8@tLDH988U;4wUY+dIpZ^tRYlb2p~w!AE%B-2?z?J5%j3NbDo7Ub3*a`VB!m|S@S>2pMXsvK|k z=C!5WCIeh0mP?W$!F|a30+A)9+ZME!5<9Z+fgPoro$Ky<>>_iHJFOGhDYV{i*<09R z^8iQtBLeUn0x&+oB4e7Am5!S$?m@^`8Z#x>V#@{rQx;YX5D9I4QS;`1Cx<|^|8qP9 zl+x+c`URi7OZ-N@Efqf2RnoWA(N4hua*YSGO%30y7P+s}zQ+b~NfG@Eld27B$er8b zYoql!7RBGbIl!`a38ui5d)C77xvl|FgD9Pv*LU7z&NbnIapUz0k**tB6lgczo}PlC zV|TNkH$79xWDw33Osj`>YU4@Ax{%S245TTkA$5z<>>X~uWMT6=RNr`wD)CLH4v~xJ zM%_@Zh=5UwF^;W(YQc${%_<$$u+q(00?a(jPI@!K&b;x1yTf^lC1u0 zkZI*fpv0Yu3J#mJqHeu1HvB%e?+h@}Xyj^yImW(7G3#v2Qf@79n$T%OI~|gr-P=P& ze3L=X#YK!y*9xE$C6rYcswT(f(O6jnofH(Ue2?VByry;mG5l=r$>F&31TLAuxs<1q(J>!lAh-*n;MpGeEbn5_<*Lc7I|i5TIgRIcA1om!8w%I?!)uUPnJM}Xvpz05Jt|mjcS3^^ z(?L18K;#aMNSdPNnnr>yNblBWELZ0QBqvm!LSUgp-xZRA9DN}!K{i>6piu$|Ix|5P zx9agQl%r~t`Y_)lW^J|m9>jI(al-FMyOw><6>^CN`m6M2})duT`E6%PxPaw3xf8QlLgXGEu~O>qRpa16vaD z%E{qfgAtpI6S)zXaujcwSQiXEy*nEl5n;1sU|7C8@b?=zrKKsg%`_o>8aLz22HIg& z3#?=;qP;Q0XZB36cUGRkw}jsvl>+uQm%|RM=)pl(Vj-CNCGzT3#BjtSZX7lmS}ibO zUfH+=B4dwFF3OYIIl6x6JDefegx}HDerGV|zzBMyz8c=i{00*0aYCNm27Re~7IP1k zwbcO_;UJ>m-k3XxLnSp6-CRsm$=As-YYCf^OL-y`5cblXU*8Krsl}kkeQ6dhDs|MI z{ytB5@9Vo}3dVkBBs>{AoLHjG*C?B(GxK{l%ruTE`6wQ?@tcCO#&4~H8c)A!mKDJR zl+bt3U8V|BH2i3)k$UV-U`3(oxAPsW*uFQ|m~TTqJPnPGlFmDO|3C;Zsk~b~(-kIt zbHSu%ut%#=f>TwRr0<EpY)QW{LX%RI9l@I`IUFl*Bv2sCL(2%sbO8|O;fRPHPV#{h|@ ze>bURu*{qv4(?jz%bEfMxf4EPDr1vAdKj<+jna1WD@6f+Z3yJ}K}MUmE|Od2CDd%r zhStkWb>)uCn=?WAo5$;`vreOkR=Mx8xVl79H5{HDGjiFi2vnRFTq`hLjW=8d z)(U`wsRJgy>@u^dGKka@_oWedMUC)|R<4K6<3q)E&pzIhX>2|)$JW&aopeKu=c4(q z%)f)?y#de=nQc5I2E-JeOl-ZtOO3L-!kb`pYfW!To1zSruVY}TZF#m<_04FWU0Lf8 zpDky(F4v~_dxrR{JxdN}Pe2=CKA0sG8?RY;+wN3_UnVXq$^RO4MrXG6OXKkshJ7l) zccZP^x_H~pTg6GjzMNiYkE@ep$Wh^Tnu#8~->&N|cjQP{8=~b_@BRX+{TFMRg)Yw& z(hl;00;ljhKKqDM9oA>5{wodS_V0L=YL?bFvuaNI(LOMeZlhF1ll+W z?#htVj-Ap93k&x?)qVNW%gaBkYLIJB&JGl-)OuS=UK%QYlcRTMpbo}|-=Tl1R{t8+ zM@##i)Kja0o}Rjup`>qG)wIvJ&)Pq+=D?MGr39G0{rz|^LJwkHQhh*1(=Z>Kle75U z-Jqj&Z6IC(R!7X@Y#K+RZQcHg&9dqHjYg2uV$>oOs$PEhpu zSgCG4Phn1%>v_4b=l^5xt;3?;*8X8Y5CIVo0R;q6UIO1t}2*28NODjvCo#w$2+v=FAm>84DpFI8h^!kmh5qbi79A(Ga)*=;OZ9I-?y#Tj96Q~k3ZAZL2jo6 zDxB=MU-I1DmUVbFn9K1x7Q8fG7$}O`ldU-3X1P*s$q_wx(uQ(bR>zS^coP^P@!$&E zTMAHQvQ%fTM+-yyyZraNh;T;O(Z6_bd zO1Gz2UOYITq1hNSz(Xy(n30=;$W{MeY!T zh@H}oO!(6NQ^G!i7e5M5fJFRU@VV?s<(hZ`;ny9sFAvl9RMG<%o|^SxOm&%MFgZY z)A%Aj&C*9)-^-@F1722t6`j?7V$hrS}Hgq!sA_EYV=%diqcP<>goUfAJ>`Hy@JtsNeK*h6^Ny?ztj|Nh5o zH|WH-I;T26n8gdNy+5))sxr-1;;P7SGgZm-L9Wtw#wETA`EqM{{Fpw0k0YE_g&aKR zN@di37d)aTbJ;{1T=Sp?51ucomt+`)s4OM#*2zpO3A{g*6~K7hhJawfJ-c|_P0h2el`swEZXgaoEPyx zdWvd8zKyRpa#aXaiLYIDM6SAB;Yi?DpaKC!>`uM9`13qv!&oMr7?9|j`#&e|aQ1V* zkZHyuC7~e+#3M~GS{{B>x*~dVU%VrNJ9gJ~Wif!9qtl8yQzmhz%B`P620ZdXSrDbN zW;MuqZDlG5iN3tRL4`x?YKhGS&&H)%hGe49UM}XOI)_A9&?9_n_qS=T|(O z-2&$0SNGBz1!14~pFFRQ*dM{|O?i={8%dSsy3ciSprhmb6amFxN1fm98 zyG#+=%{<8Xv<9>#+16{{d&B>zEKVKRfa(?x#_ked$C)luZh}4X^CHK~gPRS_anxgN zH#9VWB&>6w!Xqg9+rp2ytCq*fHMBJCbj^od_r0b^WJ-PC>}b4{SQtG}3hDCtOS_G9 zs5snz&emx#{*@n?{%3yRM~lT**5Qt^D|hU4wJuU#v7)A-!7Fin z`!<1K_IR@k(|Y(PHV+S}o7*!b!enwNxR_PHX&O82Js)%Mjm55q82X~|#H&0#c4EV0 zM|$c@bZev4n+N-$q=`n~;^PxJTBBh*m6qj~;El|O%rwcBI)fD($Z~%pI<=Am2j1qINzm3+5yQ@ zFbb~n4rNeBo?Shde(tvn?f@wdUJ#AslXEa5+`iGWFew1Jf5GbQK_0AE&ouP%hiw&6 zqInd#cSH21SqEkNRep{M_!`PvVIHyaDpxLx#=WoFb6M}Mcj8Q>m72d{$I%Ei;?qmQ zW+_QbE&f*3Pzt$|#LBxaG{?2|fsK0Ks5?if3}@}gEtiDc8Lm>P9^{DcPIgxTBNVmn zJ?^%3kB?d}y_BTE!odH|%+AIS4)J?ONV{TK6|m_$h;R(1a@F%Z^b#L0Rmri{xiDS{ zH*Hkw{_qE`#%b_Vv`oYUSh}&)tVZ*_Eip-94Goyjo*B-92L+zwYmsibiP?WkY`oC+ zwDo@BdhUP;dnjpy9V}nadzr$MzrnoBYvi(ajoX7kpC9uj*}B5J3a@HUV4B{uQ0ba9+X(V5^Tpy@&nLR-}iy z5DFH9zcpSn6Ka=`A9Z6kU4lviH71|U#klEc;DKb=2k4z_%u&#`c0tFg9Eh# za%m%^V`rxTDt_W~&>RWU)4Dru3DF2IvZi_{d%=m)`1Y-___6&f-yot&J1dT?RurjXT0oHGTpMsaHkeV90nCwxZ1BWHHu;8(8=IQ9jB$=|In#3uI5w9IM$SK- zr(!q?K5w(jA`BAC`mA`oyEV(XH{<$CEr5_;Uw?k_Acq!|MWh=I6M}_PaQWervE5+3 zbD0ylHnCr3maC+xscHE2du-o8c)`ty#YO)GCFVTX{Nf@=fy0*!CvVH{4;6KgPjoIm zz21H%eg2NZ-2tVbd{JNe{VWH};jABhRZP{=i10$i7QXXrOh{Pwm$ZcorGd1LC#8&J zvt-x#?HVE`T99A3ABHq&@kxcXyVO!SiAwBZ&6)i?68S9t`G6 z()qUnI5{XTPV`_qCr~D_u#@jA9>C6t{Y|@7TR3%~`Khlp`8mrMXp)4a?N&R=t#{B;7&Aibtr6n{ z1zMwd_q`zJ6iTNrx;LZCFam^n6Ou&JgYQNr`t3m-P z;&KAF?uiJa*ozTK=e^NNJpjZByB+Nn+%FQCR7)!Y`4ClAb^bXyB;Yy12#=kxu0)bn zUg!Nyo^J`x_^zvNAitD?Ml|KUhhj6Bd4K&Oxu7YuP|sj_xIOpz_Eg8X!1E@YGP6`M zknB0zYZvTy&MHqw6u={YVi~>#1vo?b3;v29*o{MkTy1x5yBSj0nu5y;hwiGd@Fk1g zWxM_oBSw(E^tQ{%ajX5?J7$v^Hv_JSJpgkj>@AmDmXkktknXV7<~)%0zykR76Ts=; z-1<>sq>^-hAA?r(yj5NEWKXghv0OZ{T(cW ze~&GjB!AK)yFnq%Dg7bfPo-4(_~Cg7d#_6466VbrU~)E<(4 zOA;Y_@SucW$XqioTa!zCe`shiQaL;%r0E9>>Be+Pq9#8*J-s!o^!iQt-v6RLj$Q&9 zsupX)Dql!^_bOV{%*9vto564~WVp0Vqo<=!p4Qej288017?5*V+Zkoq2f}vaPX+~U z{$OX7YINJsC5tLl%DS?}?`)VMo5)yRPy?o#F2W1VYJHVUDT`U8-@Jf3vh zbJv1h{kgOW1M5+E*fB^3{Q?E<0DVAP#&>Vc^Yd^xVybqHFT2OcJT3UUN&d!mPGy~i z*o*uuS)0%trN~ZqH1I;_Tm&%tnpZAM&|x`3kg}Wm{`lg=7(iaCJ6+d;Bcj8Bl_JG@Q=G#8Z9Ew~%Jh{~Rf#-qc4VUQf3eN1v*#epV zNiEWO^IOBHRU3x49NQgJQU)O}L#n6LsV_x5+Osdd2GZTz5B1Pzi)I8DJ`O&MeCbQf23teyz?4e|J|}!%!6jw-`|Olbm#`nDJUpBe}3yFNVUpP zEy$?yIdKjHdUq^iasTBtX8YyraLqK>w{O_>p3tVfjaQ`gIcPN&r$?zj`%Ou`Y&J~u zdBd}7Rxhs&U?<;W^rmm#U8%fn1LLmqq|hpsl}iuzK3)Rl`%LHIp3Jf-b*UdQJxW|? zQ{}GS*5~Kcb$hWMGJM+|m?r8qpUyp#+37EyP^^{&X3He68k8TnF-};uzz0^yd}PnA z`GZ_Cd{(Bo?dC%O%9G{mLawExG~^;B&GnLM{AdaZ3p!d0ttF^Q~J_cDN^eTb*^(eTPguO$g-UI zT7L-U!AtpB!7~t9?9$~goZy*Bn=XKnP8@Set2**`e+d69d8H33cR(xg#_A$bWP7+7 zNytlVe6oItLT%X2K>OqSc==Mb&!oqp$kfHlsUL;yL&z_a( zn(HQ(XE`T+`SKXN8{gqcYj^c3Wya&^jL@K2QlZM!)a?9qy!|aLapm;6)7LNl65KwU zB)Zj)O97m!NXfogApZ7(4O^~PquWFp!OZgoQiqK};B9w@@I|F7owd!@=V1XW zVK~Y$F)Cq)tw?Eu0qF0|uHIKEx-eso)?pm{XRm>3%rm5QwE=f&UduuD0NiNOrJ14M z+{$w^mFGz>@>-gloKD1VxJeqoO$5ybB-c@)TG%vIFVe@MkBUDjGN>Q%ATPYUcG)qw zQs5r)OMon#RAiTHF5CXYfAXDvI|eaJbO5?Qe|8&`_w#M^xd6P{ljMz$GEi>ARvN)( zemhei`y2D$qGvQ%q8u>oTO^`yh9Zn_q%5uvsu9fK0mT-~B-8o+$XFwr`P{aKIgh}v z+T&5z!PURsOSOCZyx2I8CgvK(!TXzve4Lu%wDj$>k_MykLc8?znHzn)SX65JEmIm* zmQC(YITT8_Hfwb}2Sa;E?#Vz6PFo41W2b zf+}QaIQV6JVD=rKpKD^v@Ewu~gZ|JIMLj*-dY?N|1FG%UxH(e(3kwQOgr#Xs%>l^U z4%6OsJ|`^wBN`xV-Tn%$P-(ltUWFwaiZ$|7?EbaM4qDgd!9lkEz-yHA21CnyS8u1O zR%qS_H#jYYs(+Q^joL?Fbw$w_Z(kgDlbjq*r>on(5FBTQ?S+t{Yb>X+4qt4kh!?Sh z@{5;T_P8d@k@7E02GubR@fB1kFrYqpGoI&E>_?aOm<$s$FkI)s`wQyw%Y>fI;{>Y2 zRIKD-SDJ&aG-FBQl02~Z?n$Cx5jH+2+|_E$HzMN_V|^Jl=U$~Y%wpzkX8KxYe2Wn4 z0-Z0$fBHwvA8&cAy5nFc(Q+WG=#xoT#W1+}kgxbbQ8i0eGEb-0U&m3#kxj3`+kxTjwALuv(H@~=-u2d|T!pPtvt%+5zLPBqui-5QPinrW#=tyyL#24>0wy-G z7zX&~iw-8%cJ)%~1<#W?+A;|unHMSGdhfq}_s1Xn`+xi%lFPgC%h5FG%T9A=*x|-C zH9N>$P7|%_v5Fu+9Xq5Nd@Y`|?A$+(ciQF4N6^Lka|z|yu82Jc6HL1HzwOjtw=gOj z3wxf_*?6Afbd0F?pe4rX=RDtO%YWYWKgYuL1M>*bj}m-%I(NDhFvwI+cf&vK&FNai z=q`h)N1KaEm z&O_`z7^Fif5%OQ}sjnCV*i`y{Lyfb~`muvS%$~HK9rs@e^ViP(`YGxu7{n1T1%C6i zEr(UiS42hX?UeXGF5}N#quWPsz{yny?6cK~;}NIHWlyB>oVNV)Onx$p`KAnDo|2sy zT`rheX6M)T|HIjYp}T~9{rIcPw+nracl|6rE*jAh)wAdy*ohyV4SV)#7<3?h}$R{HC+8eov#*C9B+R`*Xo z(Zzv5<}Tl_l{`I5MHOERNNxmsN8sP*{qMI)Y?AHphdo)d$E5xsE!oy5b_sZX** zEn^KRrdzl}6hN5oFS^V6C7jiGm8l)ee|`!4)tg|7jRLheM;K%{q@0p>nNgSi`d+H$sqe=alX8=9fqx95d^XDEgM z^zL0{JP0}M#}Ex#7%16m!cJTMHqvu25~=OudE(zkdLIw^U7?b{gnPQ?FE(KA)xucI zw5Kh9YNE6M(!B$$$-Fh+@A^AKJwA`#z1z~;B&UD)_$Fw1a#!7n?6l=?BTb-Lb1bUw z#$QI_2K`w0(x@+;&NONi%-zuwdsy_elZj#HM&WgRIE$vQFyFp8!dG&TRE}`UV)W&?Jkd);7_{g zeGWGIa_2X4W61RtuT7xVEY>$*)2=Ee<5Kv3iA}8spMvvsw$3G35xmxAq^QxhGFix{ zO!Q>01$DF~cX+r4sk4*IRk-gaq?IOy=}Jh>q54fHfbr>@PoE`UFHw%|%%xULRbF3p zofGvwfgi8dfww$gT>YWE4zIK;Ixan;(5ke3Wiwva249-)G9$&a59`OnCJssW9nV`7 z0l8OrY-+2w^R%mTuRCumycPBFySmEDm&hbWOQlxvS7G4>m`sr3RIy-UXRtmmJ`ZXo zjaXCh=U0N*5DY}g$yAj1f__nz*iC}zF1042v2^jk{AVK$F2e>h`A+XjCko%Pb=e>)1}m&btVb(sE5PfGbJDao z;TCuHZQ(V8vp%9wa!&2X>LTC$Oo37LSOYU~ap^~n(T=RSzDXC;?$Y}cI>7ZKO0UoT zZpZpzlK_Q8ckTiI>011hNd$(mwmn_eD##ky%VZMys-(9pL6$?MVOyBi#7#+^woLFU zQne2>j(0)xCqik4;itf9XCSyWL7nN7ih57vaG@ybxymaL4&-;O%YCNTAkHYX`6aVJ zKV2NpBn`P@1uX={LDb$I6sgucyT+#UF)0?@xWOmmoE)z-bL&l!mjXW7ZHlm=_bc#P z)F*`ZVRwbis5Q8H=y-R3YpyaZHdp;arwW|(=H;9PJewtGK?&g!6xIMayu3^KOjXZM zHhJ?^_d6b;=%C~VG%4dTgH&>QD|?5o7fWizmq^m&!VEHEej`e{s8+NFaa*6icPcxh zLa=a^e7(280a>4P&P+_`*KU(3OH~Ku5HCSuV zG8nzb8MX8kLBJ(iKG>4eD)Wdf(5da*NwXetSf!SGW4WDsuLxxWuMuP^YJP}R83Z|X z?TI3pX@6Kh#vJm6+U|#XwF8$MD1|W(@;hEL{yHA;lV1|BuzEvjmVTKD#>4{ZSlejI z)qBfT2CqH&Bpw*1{y|zL|8ajt&r}~kG6{NVJ+jw&Z{VW31Luwmt7 z7}mR-9IiF4o~XR?p5f|R=@s><{2Uk#Q;<@t+$3Y2UF^3RDIHN|*PG3qreM<@vmDB| zVU&K`IVe)%h;7KYJ{qBCvoR^Td)Sk_`qjMVyrbvMuD9KZV{1J=M?GO25Gd_Nh6Hg3Cfmtti?mc3#EZd%yI^F~4_>%SOYz&`C2{H5oKRs58R`qoD3N6C-Y z>j^N7FZ8nAT|1c*!|^COs7u3Jez@ag^c&I`O7}bstG*`V`9dRlE##cV99n!DUlmRA zWs|4!0mIs_u>#K{JbSpPkOc5NypJY4DKfZpW^d1LYN)^+dJe%1PRZu=G0T)lwm6(Q$Nem=9F<*Eo4 zA+#`6R7o>vc{DUR{T9#F?Y}e}{gf~D*}+RkytUWsR}hVe2$7nPWGtFL0;kE|_U5hE z`ER*z!P0eS_9ZUe$c`ChX86q(|3Zct_|$cIZ*aCJWn!EeDqPqQ%c8}{2^ndCByz-y zLQ$wgJF6d6IkC4iSUC?(m@hCSG}#GxmyeWv)B;Zo6|8RT6oBWD!K>_O9ZTyIZ+5u5 z@^r!KcBL11lio4_;ls~q^i>uuECM9cJR`#ktdqv`f~=%{MYLJo{;oeTu}jgum#1z1 zFT99YoG$j zh;dw~;{#iMH?UmPh$=O*9I60<<=yAaT!Er>uW_&aF@y?4S7yTFS3Cs+3%9nZQ1s-J z1p;D(gMXzo=v9Jx`sY9+=~7Q9XQGf<(8@?zhpgq$cYF$#%LB(H_SlzonrhE)&2h~a zHmmEA!VpO#JT{1ifGNtCm1_+U&y_*7^YKi#`u~_%26I}kLs7%5W>;#X*fw863kOMB z?+v=VUv!PuYZ^eF&s{ll-iSkT#d6Qjs}*R2ItD0fwC2u?D4g)$#Q(-7kjkYe(v^ok z3hcQ|555xaGi~LkOdQYe`Vp{fx8-1=RVTbvJaOVORj` z>&>4!xlsi;r?~(D9YZ2LtSW_<2W)9yLiO)Md{7I_RHCS@_=p=+Xbbbt zPwUUrBESQ^hJWwZ`GXa(prI{E+_x=f8pYu$oyxgwg4y1(?nHjgwlJ0D;!QI4-t^#u z&s(`-Yk_k)_fxgZXbyk5m5-JE2J=*5-x)+FAnL7zg zU*wOmSsM>nqI{XPW>s{lSlDxyp({s96u>8b?=Rm{FHX~O-YNe-OquokJQDae_?yx9m7tyCDNX+6|?ySA*bEntE2Wn9tu)ejV@uBkG8|6)b1G-L$-xRc{He^NKc z<+NTV>b&34CJZbCAXA7)!`goB)@RbkZ5Z_tUf)}Lulr(c7an*ucokyRqqOu2iF@$Y zj`Ug1sLC%usmW6mKI>!FdWBB+Fm&NTu|2jS9dVHrJ@_A{LImHnqABla;kp- zqF=OsejKl4xCowK`0w~>@Z2`@54$AtwBA%i;Jiw+9NE0lpy{*JSw3ON!QH3txt<$3 z*ZpBp{R?`aeFA<~}~zay)q4Og6bj_5I`MXL4mesQr%ZYaY%1I0fDTK^iHq z9Zh>n2lLc$bP6@gTM7&^+cj+Iqsc?I_egWrwsC$_`+tM&0E~tywu4)5i4_r{ zYF$^}tX{l!-}*)J@b>#FLRJE2=qQGtI)N7wUb*1XpRci{zIF$`JN3u9Cc`>Iz@EtuW&=D8L`qMF{nu+g8a{wolb~Nhx?Ux54@%m`F$*Kz`hwoK&V3XjKk#@aupp z)EwQ#TX#%4hap7w4P|7lDNBur#-JTcvX4DItg|?ER%W$UziiXdxX)uAdMz_Qh!dRp z=D6H|OA{t4erfmW!p5Aw8ea8c;|a=2tHQbhj8tPNb@z-OFtKj}$BZ_Vyz4hBo#pv$ zY+xsjH)0*%2&R7h;i(IGAH~!$7ESN?dkZD1JJ!Z7d>!{rEnHtx^+xY{LTZ6N=~z}1 zsIDi6-kq#@_w=SCSA@420+J=V(6TjtgY6+AUxTN1Wp~u5khti%Mcpo`<82pTu{SuA z&Ni)9q6)KLH)bcD$3kh+>|opP&tV=+RJg3bsB26A5C`MHW13=XaG1fb-T$~`J<$HD z;W`DY@-XVaDitkHRuS7HhrZBpP{>L4OP`>Mp4~uAHE`&l$NB0dlYF+5akk0w9I;s2 zBZ8>Ci09V3AE#*UO+yYByPoWJZO(ui$((3$G?=f_MS0iSP?&3JCWYqNZzc#G8xY1C zcpHvqd*>H|ed#SmJu9#W<9cy=49U{ghIX(L1tA{iPfXbMFW<|Ec^!xrJj7U_Y-2HS z!ZfNKX2w#Lt!iD@Dx8;ws-d}?GhJE}p97Uvhy_!qFWf?3WW6kOxLj6X()Ctw_;Mao zh8V&urKACp8k%iY33u^)7O=g6AJ=XrN-|M;8w>AG&V7RM-AN||YR~N9J5G>Ns8bun zx*h8B`a9s93us(L{h5N+JS@+11i3X^v!Wt&^xSiMBWoo)Gs4B^q^s`EPsYUC-bsg^cU03N zxA!vn+^RpN10EZpCxY7%_-GTC;u2EvRxqb}_k@AU9#89yD^JM<1L$?2g*6_9d7qKo zlbcbSmR!_@-HmaZFfmSCZfG1dAAnS92f-_ zOhwOKVOivQ3sLV-O+EmcO>>jB0U-#5Wk%fVlzz*O_w*$WfQ5|YM2|nQ7<(@jRBDct zni$n}?AdsO%a44A8oN^WvhAe>*Qjus|^Ys|{hDB+4si?8yp7Wx5@TKeDovp#? z6gp^utX6pHm@Qz(G9uBVJ{?kFkL}0ro?uYM0ZxS_@LFj*t3q--r=z7bL50;RRYsY^ zRcKKv8M}JlYTcG>gL=$A% zyIa|`DT|J=%QVU@V`biLzADhIhgH~(+JZsL7I`hz1I`oXiwEMrbBLhg?*VyXuC8|A zv|a8ljF4lo_z-%?jm#fD(?co~7Mbii5f)6rsWR=uT9?L2*DxTltSX;#8Hzf$@C8(J z6bCzUdYOKFq}@AtoD@#ZVyKd*;+S`|KbN}NKxx6In_s0;s5nvpoMeQ}O3e?>NAjP{ z6@;c|k$fU1vJuBIj0(8o<+Ld>X-yz3VGx>&4M7 z)6o`2tL0x~8lD$%@mdYxkI<^*0xIR=@IiSE)k$_mJvfx7a$5}TSkC0m9E*(M7E2wN zirg2Ia>u0)A}X6pg}`jepN*6b)i_hB-4$OWYmYqqhB{c{LYBQP(5xjECj3q4e*+%7 zqxUal{-MgxG`v2FTR;7`qtDOpn7B4lpbme!tQV8KJHK!oXZQV64&gPvJJW6pT?-U? zZbNG2B1y&dC+$I(u3K)+c6SZ!_hl>4FO5|@Rt%XiViVs+sxNx3R(B|9JA7s2ye}1A z&}5bhnH8VL*C@9`h*SuLPYt0h43N6-9J@2PIorL2HIjRK{G$vNzs3~maIF%8`x)1D z%wDSI>$wEwG=0}vmz8`VOzfB4*LCXmXA>*jHVlz15!Z!aq0A}qOOQ?rT!7vguGMWt z01Q&F<>U4z-vK#MxF(>f0r-n@6qQ}w1e0cx3(R8mD2=gFtT=aTLOHf~)g#)n?GO6f zqdn;rJ+YP(-hc6mR&*S0b}o?#vAgX44*sLIGL}>4-YuQ7m;@MCEZVDu6gvc~HfVaO0R9Ropg9A+vp` zuI+1~UAPZwr2Q2YRk?SC#g=HE=a_wkOE)DIujLE7+LAG|ANNxAsqjtS>BFCeteU^} zMAA^yHq?RJ*cA{@*jiPIExHZcyp{oXw znP3`GoH4ZRVBlxM##7|~QF`0*UEti#)}oa1*W#6YtOFF=%j-fzS5SD?_!=Iu3n~`6 z556^hEH3)&dKcT_>CdcG?^1)7plUc9bw07>opR+1eR;1BW8_2@hPi6yZ|3RNdwjms z3}|3ECo9F_hLnuNdKv} z#uqlh1$>u56}1}tua-cpmMsoiRegN|P+I$RH{lV90^_fWyI-?X)n8F|7z>O7w`c<= zoNPuDkpi&&S|_@_3F+X!4ej zi$lkK@3FZGul>^O6*lF0p|^)GHRnjJB|@pwpw5mKyk9^132if|3 zc4ioA=L$#R7OGS63A;U>5{@%(!XNpfiXex}(Cdww)ec_|-(U!{09&qGwPM+TL~WyY zq3L*|gSKL;5|Ix|2nc%L(?u3i42O|5R&mzkG!Ir@A8v-vD)6dL=IaV?`Ur- za(A_Mt%erVyqH?ZC1MQF%*CFkb^D3g0nbz85}J*FInng+S~wml(=dgemw^qI@($ zexOpvJ|B(j;J&;t-o<{qu{Rq}@J58g++J7?R=TZ79VXm!4s%}p@m1!F;_y_8=Ye`0 zr_NX_W6^PiQE)x?I-zH^kB_Tpp5lkFSI1sSv1~|0yoCm^`Gm{VxwQS zAGirCWNU1hMu_`B?^r?(KOQ$iw|Tgn5ucSv%Y3wRDaJKP=3F+g&u63hy2%Xh`_7p+++ z^~2;$)7R8jcr+n$pH>(gd>i!ruv`Xtp@OB{nNlOUDtQjEkr)um^YhNO5VxOC$bF7b zwKii_&B$`~aXQr7Um19ce{K zj%TU(Y+%ZVCi@P^TJL_twtSbBdoI9gD)5MUQZ}*6k~dT8d32|7S8UK{Won<|pw^Ip z3+eNfNiM-0*rKJc8J}v#kS4-p_z|Wbq;Y&aR8@}&=H>uE`{{vQBY<}}cam{60nFCv z;?OlTbes_mx3Ye}vYA3LA{S0xGk8Gc1_xI2l?5hsnlshl2VIg@!&=D8MX(!tQdq0E4bT;*%kT}ApQIuYkuC~m?btCc6^Lc+`NlY=FfrD2q{ zt8Mm$ma0%ajs6VD7Tr{wUjWG^5#T}LBH|rePvx4}GoTyIdh#qcd9uvB@?A|IAoE%K zJSX;=nY)fffxSa%M-djjiy^&F@yDVN9jp6Hue{^P^% z4^BR&<|NDklK?(7P9~uILHGFC$#q+$YzttruLd|4=z(f~*^uXj9bLA=#m$hfX$`sJ zk-5m(t~f4pr2q9y@*f8doFGD+$48yd>QvRS)TsUbm|!fpUCF52Vt@(Czt2MVRz(Rz zS>;uEp}3(#zkMLp{1_;#TA&0n}nCJ{ha}yE`WStE!Lz2Lf{T;sxBof~Q>d zKlslBAN!3yC*DdsyeiL#D)B2D zt_)l~0!mml;~}2f22Awf>k$FgE8sSWWo2Q+6QeW{+|kM-=cAgBxI{F|e50i%PxCa& zJ|5b=N=}c#A0~e`D`r+eZE@J1@4I0NHN4__eW*?6q zO{Kiwat40U>TQvOd3x9J?3|1$sFR+mB!Bt09_aIP+tF3i#>-?Ep-aA+`p!;k0^26p z)PrbaT8%{k84W~OjSmqrA@oz~ojxnyLLL?Cf$Bml<&*3A3puzR$phtYG~|y1h(wA7 z9cQbcwM&Ih^Dy420ROF4cz-&^wgjMRq8ZR?68cBO`5I-V!V5R^8aB3u3HH=dd-DzJ zu(t6#@*n~ppYpy_=kIgH4VFez>#$3dqmF@@nc}&s;dzjgHFo&DKeI!9;5L(S{oW+O zZtGNJEs|x+M?GrQ`6j3q!m85~&)uS0pt*3V*0ftw6&TRPh_!iC)$9k92Nkp2`YPkbrDNGGI-q$J4vh)LY0wT}c2^(TW`8`KOA;)z>0abj z%`nExQ(00+(mYDf(&x}75_bQ2xW5HwiV#|@dWHyCDDUw+2x#!L6us-vqI3i$)4pfyT_k6R^;cN~HnT zzO~UxC=$i1ZpQYeNdJR$TtPDm5=`K6CCER%em9GE&Pw&qOb%YIiPO9Pw^N3Bsck*8w(O>4`0$#LPbF|_YVfl9%_-$n1Vr++1?Q?9=j>LG>rp~UC=LqXXd#5?yKfnd^ zMHUmHM9Jorc*H~vjs&PgS9x4*7c^Cgb!1WlY4p}!IAz&)5Y&JRrX2pdVXg@Gyfq6d zs-Yqz&d3x=+Q~68dFX=@?&!u+QK8+d#3b{qB-oIS#MW$2xt!>=sqx^&vJgMJdaLq? zg6lmi3>hXs-2#ygvHgH-;m|6t2qH`r=#UekSs|#fx@cQ}+!J!}cJMPAklUS0n&3G= zSY&Cf7%ibaX}FmsX?tY(Aa07ULYAzK=W(~|=L1eLQdEl95ebO&R4j>Cu2PKd@bC8I z3LR1Zrfxw}vQJbh2DQj3^iPaV8`B8)&|~n_1K2|(5&3%}vNScxO0?e^I9tC{$7t-(0`NRfkVma&2+nT`kxJpnfPTpzXCogpn9 zeYd`}2iF?jE2T{*Iri8a#D9YUBugji2Z63f*J4+=X?H?(Zo`2gy^u_-2eVwtGFGr( ztb@~7u6aRchTnseCZji?l4*fXOYdR3dh9 z80v6#+J@elrC0!W#_J1YzKaQGWCthSXJB8wNLLu`(tnYUKlq40+uUQL;|Y>)dWj8H z_hkGK^1vQcbs5sI(EH3RzO|~7i-N4xBTZq9IOmyVljc{|P%<(2vZBXMRZZWY9{dO|$G-Jl06DD|a^Vy@H*OJ4EsgU!&ORFuT=R)A9#S~jV*cU44|j@5 ze3<@rWS;x=sxE$}hM{^}mV^7rnoECtK z9KOGDTCzM$h3vkAAmOgp{CO#r;CZDUuR8O2)*22A7jh^Q+?-a*p+SRdN!asb{(}%L zzgS@UGfW@f$d4$NAH(@vAW{?&NR}ofC1C5TW9aEZVNVQRn$mB z9>C1wyMeFjY;MK7TlA(3W%bry8*f~!-^SbdKJMzpV-l8F>~;b{?qj(@5zhnQo@)?; z>y4FBJ#8Y$F(c9AZKax>D{qucyk&eh5p&!f?`1lbYFCD|Rsa%MHruT=oJ}Npu^N%9 z!YVWucd+BS2Ej`7c0oXpu)Z4-ck1j0W3cavBoJR6{=C!Y89cQ(WKa6i6WK7bT64G> z1%WvJ6k8gYqv3aPoc&fF7RxF*eQovgMh%}~dFt|bT(%hP>a|UTgD#o-2`_z)co9Y| z6Xx9eecX9wnFMqE>r2AJTIUeOI<*qP-Q{vPzRrl@VuN~uY+%1IgE!F)9B}xk_$-YB z@yTSNCiRtiC6g410tQ|+hYvRl+Y07p>cwfsKfa;@I_1VSZ_YT?0=e$n)#~6C4HPKB zvNC!RX=c+(pv|RI+G=c%+NV$#Yz%$Ch4k!4jW;5$_%|GdeL_eEs4VsNlz8Bhux=-h zh{)Z`dB-bQVxgYp$HaQkYpVQW7de~eH(8s}Atw=R?}m&}eLaYv;OGMnxTBm%b!_5G zGJy%zRJfO*yW*G5?~CTL9gZsrynL3*>&!yNuA?;irnXf4w#D{F9WXnglSAsoVIwQ8 z+5vGjw-g@Fw=&?0zUADf%5?dT96ZKU@6D3q9jM6C_>^Jgp66PF(P*Vve$@3LJn{)~ z<+Mm*cHHnYZ<;(JM`Pf8z0h4PEhXM9iqy5Gp;uDA4Qm6DqrsWq!-lknS8mo$d`ukJ zIV_VC1(v;g)A49Y`O*St0&v_@2pPxc+33EN8VS(1KC!__qr z(PN8#afI1jn-gfys^`z>C`sGT}B9) z_p}*nTbI#Oh=&mKL{=5lGQ0MfrOSX#go7tv;NuSXDg^Ow8PqW z7~G}uK-=$0U@@74T8O+}ljjXpNEHIWE>RPTI4{m$y!!L-wN|I(< zAT2U@rs9QJ0OukhPx2bFeE`zpn66QDGYN|yz>hBX3{H&d<}<>rr@urOM&g&VR=v+% z(v8<0RovPi+Zl$AwMoT>}_FwE{^U8~=6vxItu@n!au5*-V#+12f(SOs3tPrH%)!wef8l}4LGT_QQ696V8{Gd5OZC_I~CcQ^uM`Fn{hSEv*5Vv!>$iA;SD|g3o zF%#>jB|d+ip-sl!WW-`wk3@20dIFQ8xqp!B{Tf+bnwm>>z7@Hxi{3K`CtCRsUA?`W zPPd-|ELRJBKWP@})_chw)V9lzhSuS~sT^|%&qJlEY%x(=2%cCqd*b_|bYf`h@NNkL zFAvI?XLMm_3)svxQ1i>-s~>7y=Xb=7ZcynEfM`K6C;?$wN!75U?5-qvDy`yQ#qfUxQf7dRlAZlzLY79$=ajv(3W-^B zUZ|Ribd9}d)|;?`Woo$sOEz6=h~rMn#Q&D?CEK#o(&LrgkE)VbF12xT_Pu6dyeBJ* zc||3-v>vzM>3g{CWUOlQ+C^#>a>hB+<7>LE$2$hJ4NP=8kU^CO5vxgsn$;Ou?n`>MN{fI)3=^ z@%_TuT}=SpzV{U6g|vT*dSsUB{4tR@_en7AMIxGy7pVkdaHi~S%#kH9xhh}5ma@7L$u`}?~tf4J7wg+2GUKkoP2 z>3)*L8a|ird}m??1U(z?r&Z_x7H`BFq3HFlJryD`Df#Z0q*FY-x*NNlbZyHc->}Rq zn}g}m0l`Hn%-NCF%AJW0=H)Igs1vmfBTO%f6OH_!u4OgkAV0XGcvZ#^kg8#2E>I?z zelXqi2Yhc7ycXw@ZswB~N|P5eVaps)8R3;J|C8d){$q4N;8o-HKg%S^KKiEUnp!L8 z`J44*FS*16HKO)ROclhzT04%)+bg+s9xl4QqkUUUqr=v)KKs9!-(lD!_~}dcuqGl; z$(bY+KPS#eIlwV04wDDuP0z)MHm?bFRNRZ}7Ck{>U}PPQ%T(JW1#VmR zJXGg2m-!|A=%un4!ANDQzskdSCM#R)9Ll_=gZtQ((GGjdQ^luQgE@;|`e>|&!TTZu zTFE4u85#N#R_z&n)neVcx5M69tNwOM;4jm<9G?ZC@RM^HA~SOxEvd3QsL_I+j(@bq zOXWyX(8~P_GXY;Eliu(2yF*26)BUM2ZeI}VUAAgs89ocW-;FS^DY6RFGui*$U6h#~ zCoM(6%1)k?C>Q?S5srK(-sry&{Lvca(C6@gvSphV=oAfENhgeGwR$Z|j zvy4x04*iP<<{Kk$DvlzDbo>Rg@iHezzlVwNGADoM3hg=dSJQJu>m=xVFH3(qec->H z@P`vSW(B#!ulmUJDK7>(W%w@iIZXGgKb5+i`|cRX_C&xprs6FtAI14VHxx})#X~7N z-@!Qcv*K+~rG?cQiI@2enLj#_?)ev@8}?=0FLM=Rx~lFQGLU$OO66?cZi!ae@+}v?G&6bP>&bM6sP1ny4!c`_ z44#a3Wop)NaTPz%SZ+vnB-J6D0nz0>{GxequYkn|m)F0)YD8FQ8J*p|x55jtE+2_o zU!EY+v$@2FLh%A2uQlJF7(WfK zt#;5H^Le@=IWZqfQO9h0M5xKohK+nooqODDA7`3qdJvdS@I~sxYTRtCaSvh!wCXQr zijl33;IBPW&Pf}&Bcl72>vRsvw^sX1btszmAG`v}WUszOYulbbU;ActzqDt8e$FW22$r zfyALQIRvxxE$+LP(GqVb!+&-!fK;C4lhQMi9&L7{SKLr4Y9n~Zii%ZVOI?g~Jsd#P zbMBFQ_HyfA$h}l7i@o$SAW7EaZnc=ckl*Bk?9M81z14jgO+9jKpqf$n`_E@cn+-0} zzS?-dBhxZn2k)|2m#?~FY^Cc>)$cnDnLMI6{yTHNt)#=7@k8TaP-!@ySQg>E01QvP z!IYkj2rqxuGJZ(^XDdoDKrdQW5$M`~)%>66cF`m8!TGS7*PO5(N?+AXnNRHAV~<4` z8@7^4+qhR^eX}0i>{{RDql>HIXM;&PjAKaDT0yue-0&65n18aj(ihjznkH3$idQ$v z;A|MLW~7!+ora@Q@LsY+ht|LBxDl5E`Ecu`gv^uS8?>BPPV0vYyGUEO9KBD zg9dcf0-zwifpHCrI-m`lP6 zODC9?qo^Lj`XWK|^ouO7s=^w7T}e~qqdQ6d6Lb!_>7Tp{X+di_{y!YS)V)w7mIj(e zcbL}MuZN4S{kK-XRPEj|R92tO{P`CrehQpq(H-mS`)7AoI$_ZbM*@bEq6+|{1yR9 za`wSBxHYEGHcc7v+Itls5$+j|&2O~m%$SqHyS-2rwd%C*u%J>) zB8%U!Pxk9O25j{FfU;L0xh`{6gYOwlbGyLdeTt&bVbkw0#%=25vm@S6SyR5YUfR$E zbcz;;E_}cJ=F;|-9TDIh{sGmZd+%f#2I;*wl@E9Y@QeOq?#2h@4Uk^F$4<8+!RW1W z8lr&mxAYL`8|ILp^XMFrmpnhwjq1PBzoEcg+ctg*moH_1JWantsgGL z`HdN?1JwyYs6iNkuj)#e7Jq6_k;gFvGaoYn;p-9CnN(r-#Mt7+^H4gyu6m1$exDI< zY~|m+cOJV_j9Fp&%IqryzI{wuRr3hY!Vyn32hfU=+5`t={wX7bSR$S@+3hFOOJmk% zQYhsjPQ|a7nT%Zu+cWJ+lo5}^LThT}O0TW)A>_?fJsBOAMTE7_nz9zK5 zO&i-c6HLGOnSXZoZra6n@r4nRRlOmDDuSySRD0Deb~g?V^RD8TFdCk2OMU(@N3RG_ zIadKP(MO`+$y4%M8EOol2o7DrDNs%W2SjN8Yem0qfs-eGaCQ2SJG+oVft5AQQKz?F zEhsMa9@&nxH=Ite{RX+{v|gA|{$yewDH^*}y*0L4lvcGAQ25ytT*b|T3*i1=^a^a& zb9svYC=<&+4+9sIhy!l_=E)z@j{Q*ThY|@6t zN7+35ZQ^p(yu7por%b4@-?nxlR+r`X3ZwFh%eF3!qPQEoTr!wBQl_%YYHFt2ovt@o z#%0b>Vt>34x2xt+L#T#J2-l-7<&uU%2JLmjXeCo1au-4`>TDthvrRmVVv=(|;b~gH zO1L7`Ys+6>)u7NaR;pLBc1bbLXOXHxNZ_AqR$d?7EpQxPwrFLk zc#@O0rS?QuxwGonK;8D1|EU|#-X;yN0Ebw}Ayvv&2J3C2=+8?5lO)=mz0jigE*|;e z(bq2&@cbVR%c@ib) z>A4oTwP7oNQY-e%xx8y4l~&t$W2!ATpCx?uTY^z4KK4?!ZUkyBA2vgS3!r9Gg8-~s zEn@%Ut!Pwd%&W`G( z|2Vi1B5>}YwT8lx*+sP zyoWj|5X-PtML=;gLEed~4FBH8W>cG97|CQN$nKS?>GDL*mm=}~Yklx2p=P>??0Nl}O zJ=T%nCS>%v5dBTYerPyttUOv zc?dY2`bukW5i2i-SiNrM7zs>Kt7QGT;imI2NzgnU>Wtp9L&aeX&i+)k7D~q-rr$|2 zwwkpi1+11~hOqd5i046n$9yX$OoqHnJ(B*i&MY2HT*yw!+#ZBC1~+p5w;2KkQwgA# zcoG&G_175Cav8kBoR}8L(x(9@04$CO9fO!0!RHU3nz!8=TLJ2jN^W{aqQLz=96LDQ zW2U2QM@{y)=m$0dx&>7=dex~FJf09)81)1Ig_4iw#r;4E`|CE?s>-i6Sb_E(n5@H#(D_8VD?zEU7mE>y=3G~-i}yO4?^$N@=kkV>iRV@Fx@{{^xmvJ$Dlpj;f-kU z>=5+KLwx#T2Diy9*M-El$LGE7TciHaL0f!l>#7?T$npju*z+c<_1?ErS_f|__lILu zvp5)9@eE#6C|@OR?(M%*Vk&$^dxxhPe)!&|gDExnB~Hyo1yFSL8`C2IFFN7~@79*H zx&NyncUz7k6qF3CoGr|Hr3MBq7RAh(vb~??B0ubg-I2M2fvjO?Fsn!mdK@sB`=I}8 zQ;Vtd4Kj{inv^Y;4Iz@t@KzP89S7O4=KLkh^Gu<1l_G6xbD#;`k#BmlgnZs8eV%HK zH(#}}9~<{utpmpAZgG#ha~aykI|3L=lrZEMaG14K;J_mvXt`>(hizK^p~U2}Fg~>U zNCf*1EBr*u{HaLr`6C?+)%>Z>!7OvcSB5KfIRMlfelHr?Eq-5xydb~eQ;#eg!U(*B zl{tKiJeeY=CR)S6aD5rF}Lz%g5?+Bz4a*{NGkeSgzSPNnnCm}K&&;~R~kDws!k#Yv;r zJeT~-eMohl&5uxa*K1J5W}TkBzyDbr=I)yl_DRljh2LvUS91+PFlC(vew!sHd%GHi zZ1{Q13^3`G5(|~YiIX5UbGDp*@})p$%FQRWSItVVwN7wZocT|g$ip3xTu3mwcD_VR zoIJxLC{r41p3Pqiz<)WjJ>=lD{*QZp2RomQ*~O*+Pm-D|uV<1y;!hsXGWTnk0=pVY z2;oV8i)nE)eml9&cUhGxlm_Gd6@AK)uQ_OO(UNJ==or5?|J)dcaIQS&QU!=)Ykj59 zYm+(E%|fmi-qXjrm{;Ea4FGU+1M`zg<*%g7zpk~XlAt~WjJqLAiM_1j6>6@7fl^Ag z^0d09+92K_*FmcrG2FDFR)FI;0b*BjSMJLK^-Kq(TfRdFH#bnX3<}CRd_q42l?3RM z2DuU*wfF9)L_$8P^-20-vZt^PbKlFQtU@mIibd83smg#W zYC&4DIh_t5ENW*)aEhlP+2KR=XIj=SM8>fb-f_XANZ;r$@gF;0M5etaBn}@z5j}7a zmu^iXeAHT=hh@7;Ev8-!%t_mU*$ri{@vn5D6SCT35(n89jH^|bKNYxnKkj+9Pxodg z`>yUxMXLCi%JP|EM}^_3ooOgrrxHv|+bS!B_8Nnxm#U0pWs91J^c<_h-#JkRR52u|=b?&_)n}n@6;1BT z{7bxz@{aw($J}wKf6}8z)(^p1&OlA0NW4(SFR@)CGWm=*gOeuUz3>zcBpD5XHo9Xj zSpCY`>=ADUgz?Pe5!JFek~Mt%s)b!d{FUn=3|ino{qy7L#xJeZw&rDY-zZUsJ%*81 z2sYS_7H9N8&Lm06KE{jkS&mC~h=b_>&A*w6Qlh77#vboeKB4@B(I~#X3sEV&&9X8# z2PO>f<)-+7p8cZd9BOb%>{1BJ$x0qAZPW3m>!I$Cld&dy2*C^T$64XDE&eFY()jSE z7yBvJr2w-lFFBQEoO|o)O%ZHo?e;S0<0*{J)Ofl()x)Oai%U;7V#Zt}P4UvFd=fu2 z1qPk#1MjYcFO2;K>@2QYF)F`rDgpa?CzRna$kQqB&^wSQ^2Ij`cl`#ye=<~Gah5+} z!%ry(d~xkE7^62hhw!E27K`pmr~*#dQ86QMH-j~||7YmX=c^~vEy&@>0YiLpeFJl)6R4uS15QSE) zjqXYS%3-?@s&opiBHX4kj`HS_i+JBu78o9_D5;x%;w`RT|N050#HOM;3^)7D4>^3_o=%sRr zTUGyqrm7lh9RZ<@_TvR|m-~AJ=u89HljqQt zN0p-ToHT%D`&<#M*%2CVZmc4{TTyM9?q+IZeELe)FC~aO=_lN13ejOp>}%2F*aWKf z?1Q}dLs;a9=8%kypdv;?LKU;10Y79Wwliv^>WvCv&}a9KJj#RdQ`iJRv^jNWX=lI7 zc?Hb_`abHfhWe-F>dzP>{;pZKA6r&?zJL~EKZ8_;!P@ol2z0A4%Z|}u{w&A*5%E`_;U2BDkhL3tA&vT(4MH!hJ1*x z49DHR5ov$!dp6Bfm6KO`V~=v;aoV7UPt{OP_=#mwX1|8>sZ@_7qehJgeUy)9;F&7sv-vPZ$(DfHz>Ew@Vg6pf}O-99&LDUIOUOlyK zxt!GTty`ei(zR2Z`~5pjaHU&^jq->uEwu9Hq`B5ER}+1x*hMVmwTJ){Ch}+u7PnY} z3>E1bwJmm2Lh~#rA7?G|c9Oga3*f85HbR52oVb|O@_Ve|51@R z6lwG4C}p&^V)ZcxoIT%|Emk$R!AzkS$dlZtt-Xn|u1_LQ$-m%3@5jn^Zrw7#%9ZTG zs`ok%l}khS^|L$cXMKR&PHu23XpQhuQ#cNQ7)W=t!_3FhorInW61=D49%vqKLHdD$ zIQmw0cFK!s)5S_rptn$wG&%A~BX$V~!Q8&^%w@2GblOfah;(udp7jhWB~JF3bH%$;dr=L_(08EH&k zqO5UcPH-;We?!)BAoqoWif)#p`_rEE?8Id0^q?WzAYFY28yxn7QDw;go8WT|5Ln3q z|Lgc403m^haE^GH7j@`zYsl@8N$eR!Y_{$Ku;ykT9JH==?I8v;-1h#?<3#U z{~Zxt|J1JJroBQKe=x)+Y>{CzknlSg9XR5TUXSe7@AJ`>tb*6;Imt1{&$XkbE~ru= zx9$?~5b=Dm%4dB7fx=d=wA2?r8o5PXNEYdM&0VF$Ti?7S7^!-g+d>M- zt~tqYq)^c2GJ2&7NzT7KjQ-#cD%H-2CymqcH*2-ER?(N|l3GevW>4}_`Y7%U9Jncs z^DJ5-aXM^fPST{eZy7S1(H75`dk&FWIQco}hr*?X+gf=P5pEeA)irg!Tep=inKQI?y%Kk z*g{FKrEkLHB%xkfIZA1iMJ$4>W#!Q;?rb^0zfOq3I%7O0_Cb76jOyu%q5?8k3FP zp?job(}I{SzoPZ?uK>lcar*XrPqZ_ueBnve!n$)9m4PlaS~AySg8-0sP^85m$U~`i z{n~?sN;tQXc@a8HW2@*j-)T<*lMVgla6UBXF$Ir_`1Bn#9hxO+Kf`#+cuw^w=N#yF z=*@j%n;N%969iYd`c8?E_-~282I|*u4M`xz7Ui6`ZF!Y9eXDVo%6N^FpKZ?ck8K`2 zS9kop<=KDBMXbKkQ=nsOT&&EGPVri+Iw@MzQmI+dQ9d3&wP-66q%iX5(^ZP~M&O7= z%`!880XU;P_{f_{QB))>9vDyai7Hk;w_PbC=WE4S8gm^h|0^1KXF=01`|D%o--y4X zcY$YU{dR^_8k-OyrQ|4`qp#0!O@Jow8byw5A6!U+la2=3-Y~tP2oj52jmn;uDN<{D z5FZN~Fr0$`{0^DbDQuV)NjL6;LovL4kIrsl86zGlbyJ_s?^_*1`o+DYleqWfA2pCk zQvba}I1?tn#lsxH73>AynsIhkVb% z544=$%Cm=sh;f0~;#&07q{unO>)uo=fG8GJ3_lItUr&YU*mNWcNmjU4wZw{oofrbu z_4TX~|9&*lUajJVT62_~*W9Ns0CJmjbQ{fkfKz59dr^hqF^L`aDZvh}IGFy_1?$}; z-=A}M*T-qmrvzvwwA4zk{LhQF;2&cXQ^}$bFYJ;!P~?{W`Sh`Leh;~f9RUPQfI2nF z(Mv&DZO1Q+g$b8(?A}+Y{Zk}p#r}fj@3rE8Ws=Dw@$S`uMoUEsiw*2w3aS{kS8e*k z(0eqb640}}MV%PNl?7I|h7LW>-G%+ELYenK!RdnkHFNsKwbOxf`xSj(Ksu;)MqN2*HoT{fo9>>f^E^)rukx+Js+{*FzyQfOeg>LVhv@yj3$?9C&Q10cQ zDkmHW<<84jr4d6i=GD)NSgZ${BF^pRSrA=l!Tp4vJIxxS8)lAmh_md?j9{0!rn#nc zSXS5IhG-k9i{>Ixrx!(wJ!{h=i0%ZTlj`IDSfvL_Oir$QD5`?pMckAZPsB)X{sR|W zHBT8&UHgh#hQ57D%N>&o3LKgd2fB4$wrQ24*u>OB>0Tx9YCXMQuf6_f?uoGirDY}hEXXweKO*-6U^AHWJYhj6MuHB4 zxgR^FUI327?a;_JSlDL&AHA2bd=(INkljGH&qNNdEsDAyh5{=-sP8qsAxc;d8zrAwTXA;GgFYXFz$iC|HCkDM(h{%JwtR?*!Fah z@H~Qw6ow3C>n2Sz^SF+{{t~bU#TFDzw#!g;2Ob^lm*3vSejaZ*Elv@A+Vf19C}CFa zWWS@`43%W+cUA%JbC@mhsC8cX>-BDvS>WuY=%94yIkz z9e7(fSeXeioY5$x+);VBZi%@)lj@Hyv-@boqdVp&m7Lml;rh1N#l}odtZ3+EC*%<~ zTQo?hE)N@54CE$&u$ zhvb=9*U=_l$Y6}r9Med=r>UlxPnCGm3J%dbs1{yFvb-aikr@-A_H^_)F}m7l%X z+o^MaV^WbC$tU09QheFGn3d+C8B_ajD&+E?>nQfU=yn;=U3SLxJRfSbSnjEyPNy6C?eC9t@ zd(C|@=mXgo;sE3EChGpke&%C&3XN)B%R9xTD_E>i#)G5a9>VET zk11aej(7o6sA1NYy+*hbPFlT$i5)YQVCrpdDR%V*dC}c0)dCljbF~Tm)0b4Oavz_y zCnzJM`RY!ISU33)CJ0JHMj&k?5zvzpp>&(0hV!~&mmE%51c`N)+n*A0hSztsbQD!g z?jt|^iUF$y&H037?2u!ADeDQH{dqkPZOXTZZ0&z4Pok2Lv`p^ z9x1#0n>*B)X7Mh##23K067ICYVE4pLBpiM2697^O-@jOD2uy$|&bjigm4nGNK^t3r z^Yy|#dN~KvRND6KK1<6NdMb39w2gYxtKF)twq8UDWwXj-GB!Fak);)2&X|FHKogUL z%;Kb-@9Awr0IyP?5U|p6PdaJybV^xPKaw%YlGwI0m~42pd!=DF@0W)6*mgNp42HrzRc+`u?+kSR zH=d~XX5wVOTg>xR>C#ar7mmm0b|6OVcK970k$Af>7-@dZcpbCRfj|*%V9{eD4OL?Z%scS&O@)87=I5 zxToBae7?xsotZ9i0kP%RE1nJVP2`+Qi9de)cs5-5!bZcMIDW8GQK5o91BU=)rawUY znEy9;ifRTvB^59D8h=e~zCAI(5ZD$=;Mg)DMeqA{%sB}@CRllgN6?31B{q>DWaX}c z&*JIxrP*T$o@B@0FpR|2=8zuJzNW z{E5B9qcjj%5r+T5obBN(b>Sh3d3yjy+(nC{1OXx`iIc`0$-tAqd)$s(s#XY?*TuZx z>7?{$YVvxdnkC7aYLRYxavIKm$Y0FCkAlQLht${U)a%SrPJhNze|#j`sldFztn8?4 z`0vC=<4z_jmnVK-NAnl*ggW@bZqwG}cshetmc^#OjUeEPhlXk#;%t0EbcoPs1#ikw zlhPASm7lfj4#kzt)olo|$@>s(NVs!5l8h@{<77mEU#`w1--f3Pca$;g5xpt1V9N)O zzyhNlZVxTrs6aIHZnT`lYW;783;+b?E-3(yKEI6(cd>eD(1Fpth zVf;p(ZCh4roW$qm1Ky(Ys%b|Nx(+Yv?nWjWVgh@qA;8#oJ%6`i$?U_sg2O)b8%|0y z(WHfamOa_p=c84+9uz;vNrH6bkFrInE96|^G8aNQc2C%wn-+PNVy$Mc@B=e(63x<8 z&ndPqSt53J)r)rgpm9C;+d5k1$yyh*QudCk^K5r^+>BK~OYKa7N!@+G(2`?3h7#>E zL!`c_7}H!I6jeC%E#A=QER+M=myUuRJ*Q*%&fq5=o>Rq4y$MPAIF=T8A*cYxZ_D|p z>0aDao^AC#HuFVV0J#lutqu{>6QsYNTo71bR;3=8RuJKbdIxJE!F8#)K4jmAI@gOE$R#V z8<3&Fo?aq6pVwx4G-<7m3A#T?z^8X3pG0{Pt+evi&SZdFP)IqzBwIht1%`)Kg3wTk zrxvuntsh!brY~sFs~U-3{FYQ6#AecOfJ`7hcrW7L>ey>y?21?YsJu5FOpu0!kJb1} zyYXv_L`Ne7JX~gStM{S4Mvr-hCHqxqj_h!Ob(WQ{<4u1Mt_1wky%Rpx7z>OM2*=3e z`ZFg`rU25HKg;T0>C~+s?&9zEdiusi&<<3R>G(54Has-SJA~k3e>lJ3y>6*y*`?98 z&&kSmY(e$DReh|WoReLLB~j)OddwSQ+^ep8)C`C~r+T#afu9v?QRRNpV__SFe!dS2 zouF@4@QlvU!H)R{`BgwGPdx3ad#0`117gdQ0V1Z1R(drpB%4abLFOZ6QG$%aA?3F&E}OJ3b?T)@OCKk?joE_cien_B7$ zklwrRmwTYaUS&>1AQn;f1EY8H$)OXhE2hPbsIB~&uvF=_4&Ia_8GJWNNkhshZ4UwS zR)Sfx3jQ<1DvJbm0&BUxJBrZiYztud))>C4GFjqn#dFq#)X6pMSy#MdnO?^Xx_y5DDDQ;6U)q)wq|4Zj-%609e=Z@qCy> zex{QE_W=4tTC!WNgt5K1ab>S1o$NOECQ(z^<@pH3s|OcGtoDij6PN#m;mYIgGE%F3 zH(HP%i_45IkHg%Jd?(G-2HMD0Vx4mm89C29X(EhcLvCJ>BI?5-9V|5= zh!M`L2rcsL(Z1auWX>{WWy#d$vz*KDgJ=BRS++IY*per{1Oq2K^7VwRR-CrW-2f0% z(aUXO&Y!Fj)PXV(^7lBF4ifhIs_E6RI?wQAWyHO$Di2N0-JIX-ioSMNYD=%)(x>u( zYYgOg#Z=ShlC&FxB+V{0M+wO?4zsbHL!MBocQoWlpnOfhM$8rS>N`7M2y)3iq^znvi%#G2G(lTnpTh8r4=k$d39R_?d#j+2nVQiGh&oeiO#V17VKwYD zFGN({m3U*KKq8f7o)b0M5{rNTQ{F22`b>!8FX^h8cjZO)CDVp=0edIP-KX4dRc^nu zsB<4oZU)GA{4d?aMACI!Do^a?Sh1$-j>PE)T@-y>-`>@k1l&LHKnhjy^7Hf#g(r8K zuisb+MZwDc+>hy`+}{q(lnBJTx}{lQ-8Eisv1^WSFfSpjKyQWUf^S5@N%)o|W^X7s znfJbP?wvrvUA@L4aAB>Hzu(@WYi%E=*TWEeUhnh0^=8*72%{kRo2QQ3jHu|uNnWk; zXts3v`5=e}%b{X3nxnJ^on({_^exN^***ZNh{0_|-> zmSx`~S!C`$(DIz1I1CAn<8|vV^`61>XPng_%53K5!$1Z@--cRGU(xX^A3$KRhDhdj zO}6@gLw>n-{Xpjus%AHBD0i|{KN^3e3>^rTfB$}H+|>Irt_%oMaO(v0BIe0?%Y;EQ z?EQ8xuNOkQTP;#RMYMTsRV2^;aXYxHf8#60@ql>{dcr0$6FptPf$&dqzD9ss8k^#| zj&(S-EClOybb7#y=c2H)w>DQ=rCLY72kWMRmew)Xg7q{ZKUiT-K)c%B?_`1SQZMcP(Y zYLW3A{&9w0Vd)afXs?WH|`vuUl@{}1rkKRuSor%X%zrlxJVK*}-MME~Q|z5d#wDyUovo4f{&9)~oYb3hlzb ziU3c6&qBBCT`hcS0*oM-DzdzBfG;2#+av(=m|jQCXY};%lB8&CQ)5WK!Pk9~UOk25 zpYl{1tF{AVoW2CC4d!$~k6$t{;N$bYFNvU?J202a*XLPAYjk45 zGRFd2VJgj5m2NQi$=@d-97fww8Tr#7H|eHc7y2}A@>2m%A9RvX>TD~@X_B-Rc^@ZB zX>ourwx`NIl-y*&oCs=cYGSvxVZLgb!htqr} z2J72Di76=ABQL6od;>vS*Nv-v771BT*e8N`-Lm803zFtl{Rx(qb@~pj^{uZ!ar!G$ z=-cswc-_7m{1CuSft)Qnr7|Ue}c@mwD34$NG;+njA zpq^f`mYPYCVnh}#@ejNWSOmaA6CLf^+&l{Zz)z9R_k83Bo+p1BVg_`=M< zvjadvZ~;>aj6eGlq-~=%juMAxtwb;?YXRfi^FN<1Pyf@KdZ5$MKW0cdU#hyhYG3YC z%xL!5OAvyg1E#Wh&|SVn#~ze9{cLa56%QdPp^5>CW`dgO7`<-$$qd2LCN09&!3~*D`m47`oPMbpFj}+1~6Sd z%NIoMDF>0Z_(9N5q$gl?>plDXh+a)WY{TK9UIPlovZkDT;mvwENMudAA>-y$GrR8i zTc@>i<+QWlk$y4IrROHWN(66IlmjyU4@d!QyTivbKlkvklf0#dm2nR~y-wM=!8^n^ z21K(U~Bcu<6VDuX4=9)ZK8&0rUv zXqiCfdaj)pYi%JhY6qQuV`Q#3_ab~DvgXIPMPzgNHWOBWr{k@gzqnN_@O_af%vPHZ zM6k5ud_`SELF>YlxuX)WZd>f#o_LemwJ=m6FH!A?SSwEp+(QkO+ZqW4jqiE&~TU&5v8X zKYjU>+$N*yp-Rpn;gwe#haOZKeD?)?+^>p>=2ieMHv6UF8)ksR(MI(BudTM&rd+d;ejX=y31Sro)})`Jt5f zvF2zI`7rxkCL%M*Uv!Omdd?K?Lwa4j#g6G+Ywc?M#P4s#Bb0jc#gBj|=(8srWigYV z5@MP~D-a5zfo0*-RO}c!3~8p3;9Hcc>9%Zyv5HqGMZa0<`a+@6o!$wrc~8#Z z1?0PqQnmVIJVmeGGa3@eT=(Hw7z~|oZtb-eB1)CI3OZ;x`8%~*Rk?q&>$KRdQvC;^ zo`k-Wciaq<8m*Af5SO%O#ftyrfsb2wXIU;S3E%bw-ebPLD?HJTc3se`^Q<7Jb20R{{C3A}U}9EW1eHpD92*girb9Tl|ZbWj^S|rGmL3Zgr+3VqfRV z6fSKXAFoVT7aJq9oD#af@s<6cE>C3Ga2kv#`F#WNzb$YT-NghH!+@uUROh85Zk%_N zZTc`MOzV=WYhM8aOb<~fc5BA&A9u_NobJ-joQsikR*+tnx^(kqm8FMoK&)uwbC4xy z6EjSp)>4Y$^ZfH@$Dfw;fwc$J=lRA3Ta3DLGdLq0e(%9OWsUP0-`$PtiD2f#{pmoT z#ylYo1ch}z_UPrNWpFL~ZF*UDDeI2}tl(5CbU{b@&g`jRcD>JYobZJ)Jo;W^z5c!; zpV}?h@ngrF0!dx`eNvXSm$nd64$KfQ!;{ z)!aA(Urc9UA_OE8uZQ2FpWPC)xRu&+Rq+f0zM7g%jb^e?bWauAcA2*#Ua7DsykgbD zBi88X5$zdmis%aJva3Ui9?r|koS3Z;Q z5B*swbQ_ibUdp>Twn1<_RmECY|L?6_0TzTrr`^g}s`V;ew51-jt2p$O+h_$yc)!HG zlQ2eK0XDy=if5W&yP?xzjUDsCUDaTmNZ@+L2IHVq?U|+s0nJ|;oOP$uef~)>f7cg& ziFsikPp%tJWytVC+|3ekkNLY*@c+W6g#CTd;`i?Zi9-?jp|3E!FvNbP`+2Lb2`MG1 z2F>HsH=(C!JNNH1@yRBYvj)(2G2b`VjnMi|gY!Gv9VXKYN`_>zsfay(6-(x9GH+U7uOXTyN`}2)) z#?=}s^9Src1Ojcdw{aq#SP(bs1%(@9dgsw@c{OumZ#+_7V)l_mT>1=wY}ytri|7un zkp~bfxax?3Tzxa>K!1;yu*jiuM=3xk$w}aV2b+u<^V)M{=`K)|jocJ3{r|7mbBhFj zE56e3?`qi53Q+fic(H-6HYfQO+7Cy__rag)4T88f4Tk7Zy0+j{I;} z*PMzJl~VAqYBd{@wqX9#wAmBaz)B{%@FlbV-

L>o0dk+Y@)H|B}(Zu>iDO*SuYS z2W`oMRpqQT&rxyT(~oiDr(@5NbnG`${9M}Cqr?!PbIfA!26)e!^(GTV=> zp)8GG9<;=V*32y*_$siT9s0O>OX!S@OfMny?b=S@xE|&X@Nw~(UP&B-yVNg&nuR0jt=`1c026dzF7c? zO95}g2XSaV93{0_EjJ8}ZayRQXdWGy19Ws1kvt1K3xJO9&|=rk=Kn6y{!e~-mJ9fh z<@s;gLH_II9tH2uiS&8|tNe<2u>lAGKHrBVUFDeV(RmztXqsf-^DS1!zT200v-8aV z3d^2a4OJCQDVPi{z{O>tvZ^)lIjhQ&GUid;ntS=SjlBWaRWYi9oRvTU{|ISldEKD&; z^*-XPqIcV+@V9TZuI30Y>@3wWj8pyf{^YmjRVlb`5{Rp4H|RX_|GY0pmUb2bDJv9u z{#{kR^l7Ie;J3=|p<&o7-&&16jHW;4`^9zO!2ieIn}u-s#?C5 zHRX{L_7?gyK~k0{bFx2&gv6;;Ob&5duYWi`H~!_Bqqp<%(oSqZy83ui^gE7IWlmCT zMDW3-%YL9xSRo)t#X~9he=udZ?GUGVOI+BKgMo5>Po4ob4w3RrQH)Z?EF*r8KU{cV zI5Hhtoo9=-Vbl5eB7(!PJ@pP~8W}P6UN+S!dPObe5pPtF%;@2i)1=uqkU?%Z~saQW``aUNBu?=arP=BnwspfHdCG4X9sk0u^plK(&X``l{qV5hVU-4!zLn!DUsnEBwnBK|7 zkbH_f>_U~mJMseQ^WQ0lCF!_AK=k=aQsn#({OOPoDNrYxqB{10a$&LuN!eBw8QZYO zV!@eKQGxmh+-PE_>FxnnMG|!p$#X8ZT^W(WxM|=C4ybQCQE6s(iV1J*h`8S@GM-+uAXv+&#FfY9kx4O z>Ov#7Q0DN>p|9B9-%3<|2C!UT=;5x9s_L`dwj<6JV3N+aZuih4Q1Lqbt}1 zeFuWxI~4=}g!S&v+eWiA0GYTzw>I(zF&I+Ug|ot(rZ-ZwT>?oev`-v;voA=&zA^z! zbM!5LdF{Pr+5WY(MpAJ$AYPcOG|AL$L$f|!6sGY}Lk2)R*Jiv$*^zY|+FdOPY4DNh zj3!aIt-$Ce;43L6GkjtE=G?&rGlM1h4iAP604z!v<&xGXy~x&iMtVwi=QQ61fp_Ab zIT4(i_Z{4(&x*m2BLoOgy=0&zhbro^^L_bz_$Z50Cr^&HsJ|{HP78Xn4pf^LSYzLf zH%vsgW>DTZm8c^30=dr>z_Z-;bJ)#%Ke&lQ>l5jPI)TR^F9M`x)h06<8JUl_`Qcflp z1u8v1l&BZ^?T}v+8_wvytd{uT2F&!L6uR)%t?Iqa2OC=@OQfkfsg6C|geqO~ZDuJL z%E-K>u01(m;`=kl=amoMUs6kv8E}}dU6K6S z+-!O&#kE=q!9zy+FA2Tvw;UGd@2#o<&qS(SXp)zW>t^(ggNvtLK#or~MApWQCERzH zaH(`=_Nb7J6Zd4%H1J0)W^%Oi=>2Wj5+}Tntsdk-T!{u(2&;;LMMpt2GF!}c6r4$S z8vUF7xH|DGL?C?HW2cn%`r507`$`_1cGkO;Z5VY~_C#ZP_f zyLUrO8scQg$jEe}B?RKX7VI~6uX<0s)8muPu@2(fF?yv=@-6YAm#11~l&&g4B`Rn* zH8cQVIVU~8=?c3Hevt93$DN=6#!<&I?Kj8X$bXXZLOyu#AQ5K$Oxws;Wbj)`CYP|Q1NptLNO4~ql5WO2~F+^pMO z_Co1dh)yrPup^gjT}XUh6;_3`1w0*Z}18`vNR3Obw%B+(2jd}4d}XhR}-O{YZBQ|)z$HOF3@E9 z?I~tOxd#sp(e=#6BmwzY6R8Fk?)UYuNef1yyezJm5Z$Se`l0iO9U=8D>2|Q50fc+NFE~jyzvxQZM1Y7HOgO@$QQ|(OU(; zUh@tce}8Lg99DUPJ{h;j0Ty(~{f)MzWgSRvGF=?hPfB?Ip6-fm!o1pt$BgOsQxhX# zAv^-k-;8v9pcU%eh9%sZ>~tXtkw-VAp{I^~rB1}YH2OgOV+r1%`3fqS0-C*d;@LzS z?TBW>-aXx4`_E5Hbfh4ZJ6vq9_==X{Y83agMt=R-hL5VjqT&$@qIW`~`Gw>^Ke@Pq zMY%Ft_g~BN#;caVKC^y!y}ju_QS})c&)pj9>NOskn`ie~x$`RwF?&_N)WiFsRPfY? zh7@_LJFy1dCGU^nJ{Ag5d*}~<&IK&~hnl10sdvJxkn@uEGw3M95nV3RD&G$VLZ{^|LTD3tw7Dz0(slWiPa1k}>cl}opCCgmPRR?mo^OxlC}<_B zG(569_VdoN@c_Owp~QA_*Fsvd0@v4DeA{Wi-Nz4Jy8r7!G#LU^7Y9Wd#IOtp8M1x; zd~bXy^v0<(?<8)dHb!MDwkI8!0H{AJTXRaLZ6j|d-__OWs=*i2Kto$f_l3e5J~z2c z@?D{mx~)p85)os+@JEGi~ir znchA3vD=qTk=fxZ9gS31M9C38KJN-37^^-wy1>HWfs_ZEIyaPtJMLD?bN$c?PnB4C z=F*w3ASX(WH<`|g?cwcK-$_c|0ZLSMiD)Y}K#{AFoxgo0m z5-0VDihDxyv$cs-60x*4=JkPnGWbpnT4itg0amw9^}m0T>L8k!J4nNT6xHg{L#F)& zj2(HJ-^l$~l=WC!vF{$Q4-{MYM({v+YIuqF1OTJ3=qOwEk9MTa0f)sQ*x~e@jj`J^ z{S%(l+X!b|5y(+zh~^!0x9BOqX?aOWLl-2NT<*&i*YLpFu%M|tuBz7dFM6%d3#6+; zK0bY>FF5KJKfT=1h?Pfp^(DKOInObH2H-flBAx@-*$J#P)r@r!^82QYWTCmY3`s9td@)_5dQNMy- zE3qo4z8%C=dF9PH2)ArdTqMx6Vl@c|$U0I#9wFUIu{UeMyb}JKm!pl9CIPtu&e2-z z`jH=m?iT^VjL?px2%@F)W6m2Tz_BDLNjklUu}>6czb{8BkK7AngqzfP(erIk)km=1 zJ;6lJ2^wrTO*_qYnX+lFaOKZ#vO){O={yBrC7tPejR%=_7 z(1kyLm3@=(RM@=?ZJznAO}i}&ZC~!#(>}OravpC`Uj;5A?z+AF%49uDBs<;TF#Sqz z^`1|{oD@_~<=@1}q&iG&hjHuQPn%VElorm)qd23cN~qlKxYc5KIBF^({t7++p@z|F z|0|w%KRkW)H%wM~5HK?3h90i>VGUZOfeO(5c@K}(=))8 z>a+8Kk}-3vcge4{$$MG+{f*X5F5RHU&;I5zkd=v}VIFG=Uv>IWMSRwQ>oDwGWy7!! z^cqhyp~4))O(k}>=2Hlhr{ZIAkMewuHrd;#F!1X7(OXl}JG!ke`?06PM@E_`Med2t z574pb7Cmj~%gI!G|Ipjfz@(MYSu%I=IIy$ApB(g$W7LN*q_8^2v;JG{#qrKA8eaAf?QDE_1oBDEiHV-j&vX4e9kkgsu*Z0;-d zF(*P1+YZr~1%7?9109V7fB!4MW6ry-boGZJ8dr}^_c+um=arKM}G7u^9#?tM!x z9XUqP*Vl%1anyaKYagAa?clF>iqzwf0yKsNbtJ&I85D6fRMutw{3AHX@vXRA8*CU7 zWoUD?TEcRntA43dB{rpzpPJ};b>P}wJyVOB-wgM{F0ki&>^wz7!)!n@t8IP}NXLq8 z$C)CO3P(y*d#whrDds^)YDWPE`ML z&wBzMx5F}4!yO(AC%nwH4ZS2hvD!MIm_JE7|JHu8iFRhC=?ZXVs>C7DfUVF+g)uw! zi3_rgd1IyzvT3g~1mM$Eliz*P>1(v;)Hc|>hoFbwJaYP2sD9b7i)EhgX`3wX;SbHg zIcFJlX3ZiIGT$4?x1BFJG2h3*YoG|_&MI)rdF3UZ?N9zKruQyl5NBrUDLc^T2$h>` z@@wZ|Eo9`n@ZL%dOD}4cdW=B?sOu#($?{-EA>2S(w^1<#xt@Aq;4@UF5_)je_3L$s z3y}Nt>#Dl$glAjr3uk!0+q-AW>ZjE<0~!IntmqPa93SxKLgGmvdC#By%2tr$1?-UR zw)~BKHUYYjC#>#^%71?HOF6m$()VP}3Y}%k)JB60Gxi4C$~C(e8kHz>pky1lFTH06 zsY^p+^HFzouqEbc4deAzP4@JB>v#(^Vn6h$C5K}FD^{VGmqWj8c z3dbw1i}NhkvyEH^nvay|N)F_%TIU}@EEHKy$*13Q{J7a(z6q1_Ka?6LLLYa$jpCp= zpOJ^PaaLKNd~Y;Io^vDb%W`+cYj?!mEw$Y@M%k6SXC9@+2n5t;$JeIf^4Cnq>(}H^Q^W=>UvIA=z4L>M)U`z_PXq3gC4R-oD2mzhuGzgx z1Q5yRPbJ3wjZB^%o0F5t z5|K6V<)CQtKxc}@H=ltJSqVb%8})?Jhn33L+Cn^AqZsYBik}{_MJ}F;Uac*f9%9$v zyb83uhyxT~Oz>N`CIKiwL|9*C<%MXzgVN|U#nRYOAzGt@z@?57 zZ|T~LWNZ*)GnMp~B75!A{>f5C$Vh==;)7i{JJaWwLFvJC{hE%$RLfaeyB)&c>1ysF zo0}2oL#ax!#YRgTUrL$0i|?X*Y>Vzycw>55CqiY}Wnp<)Ml+1!-XZRe@K}oo4;=@szu|Xy26n^_n<82k`Vh84% zBFvCm9#!a1v4dyp}kDcw}6Pbol-)HfH_YBfGdHS?n?kICjzHaR`IrEawy8lr9UuP~D z*E)v4n%Q7-d%hRc>79Ea$5Y1aRy+Qhuk)*r; z5iJ^{l-~jbAe<>aMfRsY^1JAfr(2pJ!A~e6Q+0CTSAeN3fiI{iUwVkYewWEBN9qvO z))09c6$2xU@w3NP=CVdZ-I-MK;Wd|=*jL98?g|NBd1i4d)zz0%-Sc54)z5F)YkfhE zDMWc_E5{XIu&wM=K)EH$W0tq>FF&+xjOLF@QHm`ysd?QD6kFP8L9WRon^Z+-m=&WI zLu;8}%)?64^O%<-cR->t&O^_9rYl`5&OtzKsAxfFb#=%S6`3TnKru*dd3RWXf1;6x zP7#O?Q)~h}xf&fB3cs_%qgTv+VAzuFz@ck}x?Sq=l>mmS?<#}HAlF#s0*yIS{MWyi znumeIcg4)XzmTkS>n-PUP@IUf>g_k)Y63D;}6^-EO<8^!^kwXy11K~8GChePcU z`_&Rsj{(Xv=L9iPM`%}Bzs8TuS@oQM3$lXi-@J(BTb~Q!WjU`y;X42K4YQWPD?_Eo zC?3QMkB7bslOT*yvUa_cg~u=`q$3p-3WDB^71+072ag(tIn8v`SQx_$DjVLlD>iQy zJ>HyC%Qp+1hIr$hGo3sf$~Sk6mMps-~bNURaBUEFPRs?1(*lCHVU@U2wX!1W-O?0=_!?SkXEi#}P_4 zBBw7ES&MQPqUl*SR}RMJp)&-ffwgX;N&_jM^7X;j3^@KS{`m+m-; zdvTO)a}9V_<>TQXd*>?~wkONyigpmnY|UDh1IyZVhAY|RC0yxnABuhct!;gny9qAH z&n3{R(=4tD3fEVNYT<~5AImwnQRJF;6r250b!W>ZEIMqDF z94sMHQ!rQ8PBkxK^m#^^`C;v6P{>5V)D&vj{i<%Z(xX!etWNiDkRDeVIAM#5@~4Xg*mN6Q3O zz6Yvu^p#zfvdy|`JMbK|bCN@y?`@)2UMGf0iHw~JoE3J7iYB7Ie0?S@a3af(eDKn^~_wdDq0Us_u(wT`z(-dLip10Ff7yxGnHUmHd+Y;f- z_bS0o)P?4+EP7HY4kO;gXAktZyF8xHN<>(8-^l+`X%kdq{`CbjU*gWO{MO6C8sPx8 zPHM3NUaW5Ij%l~&vt4u{ay{_`TksD?H@0o3A*b-V>}~uY=uXLu8-Gtx`FHcSC)!i3 z*zScoUU^n|OFH09?9EhH^bFBCz;^e7^g5FpkFtF|(96}q>E+crs+)GC2!NtP#=hK6 zLdwz1&nl7vs@?>5e-N0H614HUT*}1!s?s{)&XQj6A-;#wq5*n6SLlX5Gk~TQVc%9J zd}QGPOI~WNCP6>ryH)()ki^H7vv%*1Z!7TnVf)^v8t*>&owNRUX7eKwu)sdG%L)r- z&*9DL>q31Lyg1_KT7tfmUi=Je1SywF?m)~rT)*$S_<}NKP7sGp==iAGHx02JEM|Uo z^!VQ#?^O~ylo%le8Sc$T8t+SbE)*J$&*e6z0%TI+zQ;feAZv8=*fj@>-kkDnLhaF) zBD&yuu@>ZcPbdHfk{|!dFC*$@hM{Xw!ikZqA7Pnaqd0Jgd(7p!*sYasbPHo$C+XcA zb+Ya`Uf2>~2;5pztjAMDM84(XP$+u5&U$)&n2AN{Y;Z(p8c(#bbN5O^9ce1((pn9HRvizG_rg!UEI(@R1U036#dF`C4>cgWZkTbd_tCPlg zj#Kjd?qeOPOJ~Z`04zc}kxMe!2;4r+)z8eOs~Kw1Sz0@{JQ{Emc&*G7?d(nq+dF*h z!$N%)azxR>gqyOrOB$0M_cUJy2)1w!SYADaPC72#K$n73XI!$N^tS?eZl zTq8Ausz@l8oW5B9j`u=Ku6e6L_znNb28^L#{pPB+ad`rVwxBz@LaQUyYGE3hHeSps zB;~XGu$ZQf$yvgOpTIWLn`@TTLh30x_w;E358(;`(QnbaT%;=}rv4itd(0Jtbm+H&R+ASaF z`dw}YCO=srvg>R;S4EIE;*aZ4>>M9h=4ie+ij-xUyt}@OQ%Ofmh&jpmZBMQdfT@Hs zluhv7^Su7^_N6@al_Z~r+Z>PdwDK`2?)kL9s+mC(k*bKT_vb?V{k|{03>EtDo5S^S zCz_t-lO4JsM4$wWrm0yGf%BWIrcv`Zq94wSy03V+FZRi|`;80x%tyE^)F{M@q?)ut zVecRU#rhu~_>{P>zCAB2BHa>yG|EzwA{M#6jZdIqJM~=y-=mzcr-fT4^^2KWx>|I8 zP$;jR?{`t# zU8jRyLnsK^Xl4LzkI7{dSOhH2%6~Bn|MKyWhX#Oe=H6UR<5u>ZwVVXrY_Pwa9R>F` zr^iHG-d*4~x8d7?O&Q|WyHm}wlGO4{l!43iv3GVjEl|}%KaoHj=Iou6ifr(OobM`c z#R$MS`OU=6!D>fttd9FrP0F>z^^NOHhfG6O#}};<4lhr}Qq{*Eh!y4>+gMgqEA7(V zi4o&&U9($c9Qjz)6rGg4mOV6EA7v;piP7mF2n9y(#`QBlg~0pBawPO zVF^M9qP=-UCkwkK_m#PxH6^DZaC$;E*VMFND4v!6Y~C(t@@? zi^^C{?I<*U%PCsW+FuwbeNhtxoT8}n_M(2;crvH>C3!XY%uiga{m@km zxE!HVg%03Hn7_&{s5G66RaQzN>J<^3%SC$SZc3n)?k1&vWEE9m^~123=<@9*M$NKWdJ#?ARF7py9nm?E$|()r8c1ffq_Am#Mm zD@6bER_$4ugi9klqyT`8t}*t2nZo;ePWU=JiphBLb;69aOKCi*Ssb@F_%j=0ahDoTMGl$aVyZkw&V*f4<-lG z3p=Ejx1+Uz{$CU)0+Bh*HOOXL-hLD1ohFl{qOEdhnu% zNqdUwm>ds|>O>eY$-nmQjMq1Y@fZr05w`C4b=bz<-Kb@PhbQm|yl{L>*zN?crpYo|GVIs-zdw?9*FUo>(Dm-f6Cz<9n({-5*8ne4}ej zF&v(m^l|=j`Kb(xk+d8^q zD=3n&=P(2@dJ(4}la{y}Z+XznwCuMxzoHV{wnaA=;LVe@khwSG6ZIDqbh4+ zQG3ji8}uJUJ3M6HwGw_4X+Ndkk`bv7=+hBW>XZ&|Vn^$jb%%rFxR&X{Bq1i3M`8NP z^(n(MXCWIg7OFvPI;DB#io)ehyWYz#jWYx0A6_$6s+|?!W>rsfi3DxSYx-IRN0`of z7_3Yl5AzQ|1)qG5%AYFGsI+=PPGKJU;zcaTNnqp9S`R%daD;X1*qVv2cU{OPpUDza z*u}r4PB744`TRM0%z7u+m@Y-H&$ao=QxypdR-X0OM?OP=qdGcUf-zg$yE1kXk8nReq0z0xz>99jrQh9J0gT?E^?@xDcBX7vwJ!l$=xV2)q__uMr% zPg7}y&UD5b-jfvUnO5Z(Uqku`O4=Z^p99}ua&`S#fYPN}>Kgkf+yC|(_PY6DXfXF; zQ#6O#r|HkG&Dq3 zd>_0lvGpjl9M@voUJIPd7_Jw;B1uc4#5kMKxYIAbO8%E!7BORYOKmT1{?7n}ukFzd zMPCEiyGYBf6;f~MR^zbHA_i58hOEKK1;6;J3ig+I9a>7N)bz2 z#gOI2WLPUkp9s^i&$(w*X`0E-XC34CdHI79A|#RnTd(g*8>yLT7`)goj`iXkfHM${ zKM=uemBSxOz0;<@pq<^zgRT&gEqW}>F6?L)UM+h_zSK!IobJn)Y=xOHTgCBaUA0sN zqfN^$YO_Kgi2eao;AuP;#AAn@j$1D>fC-`Ky`8Rj4Cta*6^TuCl(*H}=(^?Zk0$^I zODf3JaeMn4>EL3 z4`DK6b&AGn<;4UreC;>KJ7t^4j|r3j9t~7g4iK5Ugv^$Cx7yc!1ameIv}ROM60h(S z3SVsP;SIligy!%oejE8LR)m$qK+#x7%Ca2D=qW9*)KroOIBT;6QI*Q%JaPM{+36Pna)zONK@ofI#0yI>Ksm|> z|AQ;+v5PS75fxC14k@#WLMP{%HMto89U)m5 zZ7Ea^PVa2Zg6mjTEv|BhU;(md^j?=1G({N_+CM#p>E69-2FyYo7qrC53d3&-2PbNo z%H-?AO1>BHmyCHYjobslb-VnIl0tz;ceN&)(K8V*7nD5`D%x$|dTma8ClAg7afUn5 z{W7m;C|~BI2Zcpb9j7QyF?v^*zZJil4_woie9#gW2?Zd9Cj>-56eV`NLaRwCic)Xj zNAoN?N2gbu^^~FXEPj47&8=a&DmRfS%D#^V%nAm}*Mfl`?Di457B+gml&H%At@R+D z^5Hm97#dVC+D)gLiuHKMxp&-}&$_qTz-Rx^oK1I9yu@w!{;NaN)aR?D7eRn2^uc`4 z4};S;5X4f;4!?c4XTJCVY*;kxvP$LNlYbtu+RZ8`8D?}e6fiWY5)XkdH0^$#WA?~wKoQQj=ssZv6fJ{=)KjM0m;~UlVPVDP5xpEz8iX5^*$3sIx zIPGKBYSE%85FD(z)d#=kZh^WqN=dT5HJMI=SL9J5%9M!tKK}aA%l^#9H6&@uV@K(- z!Mz8om@G(X)p_bmaAgpHK znf1Peg}T2bp@JoxB`eZeHyC99jH>0{^iIE*`LV!8|J!R-zim7)%b(WG{nD@=5F_E2 z_SCDxDWJ}Ei)JF_?|+`zp1mqby_7~$kr|++%?C;q%g>9td?sH7phv7gdg|+QP3(Y7 zybfn!r|I&z<7Jmp6{bRyWNw{`D|j#(FXny^G;QD+9Tfu6U9vC^?ZNt^L5aXiCSd2i zu_Rm%mGpcS#}Bu`69a=0ZEb;JLbifzTAXPqDo{bysfKTXqh`-`e0`nN($&1{L#y4X zGm{43JD~?CS`>Od(WglHT_{@vD$cU=Jg^XI)@$DAsuessLG)OKv0m-%N#7Nz@kZi=*dK9%f>A$+gez?*`Kc`xOi~$qB zWLX}$&`TFj1P_;CUu$HF^p;Cl^#pdLSd?5re)Ida-y=Apv(4xA%*EYNN3_6;Z6-eO z{p*f1fz%DO@c{)_(2?mL84xnzQ69?S&yKn_qJ1zCWoT|oczF;5OI=fREXc`{>MKK6 zNQ)O~}6QX=E78Bjb7`f`(rYUNvWT#HrtGg^-2N#Aj)U=TxD1e>qmOeyf z5&0!6@%ru-3xJg(1d}j(^-W7TV2i1!NV@;k3fmyIpn{%N76HYP@_+RfuBM2sVWf!$ zS#>f$`JuWtD&59ao-UP}$ItId76J%>qlRzC5%v|)(>Y|A?Hj}2+GvPnR{PZc+ozFm zGIM2UP&*F+wHRJ~Ck^a%C>h*#=W;kpc6eVU8br*L8%J*$g>iYBrR(JSYJAd=5^>I8 z?#`rBHc9`*41rzw9}WU;qJQU-&Z}LY!tM$Da>s&-g`-5Binqz>6e)QaTcY064Bv;J+ z=g#b%xHkYJM)5q4R^Fo$@Ks-8(z@qg(2PCNWk}q!z+?sJ@9*1xGx#(}w0rbBy7sGg ziSC8}J2KdRnb=h@y8nj8-d1lUfju)kuetSfck}m_^lPmn+<=}I!#*(XZU%9>OWNQe zL1YDUZgw|%Yn3cqLH-mV%BQWCV&f_yeyGJx~2@J&jBvt31 zJF`bHmBd6)y6QWudlU@5TICN05`Ur6y*s)u4{^`FsivyzKKTz~S|qY9Zj0I7u+fh< z{x2)~jR1WAr>qG2$h&?w7vT4+@cZ8XFI)Oww)AKH_y1dLshC~L2h@{LQc*?43fU^e zT(h_WDwz4qTR3MrQdeba&OnmTuSKWCkfKIaygghTE<9C&DnV{0kPu%tIF(H97Ca{2f?Zrv75~0F57gMthvaTP zQ}`hUG#Y3$Xw;41Jr+%5$VArRiF9i_yUWNgbRR9gB;^D8NanAUS8x%FN|+VjYM>`S z9ssWnhS=D(I9A}k9}o6qz=|w7?@-$PSUC#*V+ujo>!>jeU#u%?7&14+l_B(N53GjA z0166ZS>onD^cNK(|4)%{-jkUUS4Uov;6aAwd4-weDt~`}$X&h+Ma&SPjc+k-72($I zSm9VvdwRs6wj#(g-uBn)36iA5<*sIlid9ywf?fZy{ploVG#ief41};GSKA+I!4eA) zV5nDu;=iJ<&k>^7ZS2 zxv{z68)@j*p7YdDM~Og{L5>CICv)-O2QNvw=sY;*wQROWW4k1_`4%j)ymFP$f-B2J z4Fl@x!5yslLs+7TW*fx#X5jA0gNiHSCNB)YCjc%J0N&X7n$zAr<^SXfG2=D8pA%8j z36{RTJrXzNf$fg{angfxN#w)z8lCGj9^+;`B+FqMOf@0m++w4Ct*nH=x`&>lg&)ZeTqeAoD zfOwv0ucA>$2B-yOT>E0c(8#J7bZS$qb;@0$6}KM$>*5Vg>Dxm=sqX-q0dr#aV2Q66 z19cjxRb#26yIVjCJ!Plqi%JWb{uMRYJ(AyL;0dBn!tzfEnN4&m-8WR&HHyf$mBGq} zj|(&GX={7$_;sh`h?*+f3whN)-r>stp2nTtf5rw>bqN_^wqw6f{P(e{T8XwxpKquB z?qww^xOQbgeTIjt@7wVMK*@r5dd)EGa+BY51+ctrXa$ZWIH`a5I^jcGnAG%D1`7pv z+evcioQn+S4jnn7t4D^G7C!Ua1pXLM>K!m-sPdouNeOk1U={AlEtGq6G$*FRxqz0D z|2B}X5Aht3QOSG!_s4t4r|A#uzfgiWWp(8ff0P{W|M~ERzsvx|Reu_BqT4+d;+%t#N8q+w#cTh{GQDBz>jXrr%Hs6LH94x;Y4t^NWMGno6awZ zC!_P<*zzITWXi|dY=;p_k~^5>*c{MzYtCV6FTyZ2aGBWOKeGi7% zE?&H04SV$d01lg}8qRWd^_zByz=OxzH^2)T4Hf*ZN@p!ta)31`P(bY z*@9^H`=4~OTP9#sxc;w(3tpfjBxWn~77YVK>dDKJnt_eFTXPy)^PQ?9-rL3E-q=NP z{~{Of10z+twCKx3q~?=RxAv%c6jTTtNTpuPBVS4Iujwl@4bi%T z()Q2BuDO0zqh;5)s#x?b@N|Ybyv1UD(-_YJV6`N*2LUoiJLq#aSo_FsnfcZ0*G=8L ztrOLiJA(Rf?2M`8!50OkCq?aFEB~PZ}#RUmlk_7 z#LFueRr)NA${I)(hm6&@ZHxpl3>2DUZE!Zxt1apvG}bvkV?}KUA9gP17ShY4ZHyW) z#9`)OK9cxPmCC8lqq>lv58xJ-eF)Am#I|5?M^on!&`#>=axZV0V&SZSMv7dhZ-IaK zo>vJ$bRa1z1?_>_UbsMpIY#!+{n!^c4%~m8AoWOI{T_nsTCIuAd+IUm=ZP5-8cyk} zr30C_U!}V<*Hq59PsB*(A77m+PokG}7ZJYadi;FirsGt)D-_}Js4clfq#StZg{O6Q zJZ~}iT+@p(=()>uY)1!x@icaqIBbnx zL(l|0Q=h(~<@z8r>~5%;snxi6S=d`rhYS-hVvwcDp~l#7hS#AW`%(N#&Y>URHbjp4 zR|6pDTO}R^0IKNcdJf+Ntnr2pP0YVKtN!961C{46eRwtkPcRR57Iqu)jS(u*ke5&E zm>+uz0u8s!TN5-vl~H!BnHH&sqn%+qtI+XgZYi`?_+WuRS?gpZD`@M({u#GRj#4c| z@h}*qvUdiDO)de!WAxt?KeinF!YpO`l*JpDJlhR}vh^%cp*k~f88j>zfQB^yBHRiz zAbJ(}Mzhahf|i;(d~}uK#5^)Lxwfm+lppKaY2K2{(MHFqbJyPf8NXKz`3LMgDoel2 zCBr0LjolFOq;|yf=FKMnW6&sAHASv`WvbQoz&wC#(=gG9u)Lv%3V&DIx(fE0621Jaj^1htzd8 zSC+A(+}+W6TPF$fE>SDK-(z_KIq!cQ^*DVF*W}1kjGGO1G^I_HMB1Zg2R00NtY`G(2loyn`xR8^aEpHK=8Kx#K0Q4Bna`icdFDX3w-~ z%^|b%r_SOx=`4H7pW*wMCQ+|R_p;El=?Htaz9t!gTX z?H9+_LDEkL06`?9%L){7-Y>T^dhSBUqk3}; zv2`DGeGFN6J*ouEx2YAH#Mb&^AqE^CK2wU#_F>~)F0S=PIiv9&)1$KUIK;8Gsqy=E z0v)j=6|km!29o(*ZTjq1ztve0i~v;bn8*0Z<7XpQ;Iam`F7h=$teNy`Ns~k zDxZPLGI_TlwDUCNhlH051T1>)%ptQnbFjnhrG-`tp7Etl?QKG+Jjqso&>V1rp$m^4 zoRFB*MSCaSe0HRs{5wzS$G4E7r*9}Q=uH0ldUlPEiHm34g7r5+%Z{O~?R?c3N!RQl zbj)0Kr!LxSL_%xUxA5Mm80fQM=`Lf{{mDJ_$hWVLswx+o_?8IMi2>J^GkF%40UL)I zz}M6l`kk-2wmh!btb8xxu<-(dp3n}+@C-h;&k4r9hWR3PEblmt86sPDrnA zBEr#E87h$lxO=qik-x3`3$4-+ z=Y_NYJVrrnhWqHVv$zRqS~j(>gai>O+t~-R?S;5Ef>(FOf`={YlVldxT^9TGVHJeo zV~jI~ODt08Oh@-4ZBr(LvEGAT#zI{0qVg=g0<5yz7x3u2Ktu_QVP`xDeQA$#P#Z1p z#D@)z`g}5AUX`l#^@f+w+srt}WfFqyGji7+>YMQV>@_^zOZRC(4 z4!2YAiP>%e+4=ePuE})a|HNnm8q5}a_}qunO5ExWD;An1BVtCnf0*U2*u(Khg^!(|;TC{aJo|xBzu@7f1JGw} zeF}_R^HWb8ul+{b!wP+)>VJqXO}Hqn3J36GFn!ai{RI}u$7d_Ajnfk4F20Wrx)Xfc z8}lAtF{LCl^DRnUi_ir6%jGw+v$4qoO!SV136A*zJo?PX&%u&2;AWyK_&7fA6q*Jm zB`2>99_6{mgml3dE)<%rS>oo-*&>pIgq$2Ig~n6qu03>X@IN#Y9IMZiZ8Ln^CtKH6 z(i7(^irENPDGBx*uRG~noKficjbIM-QH8H7P*_5M=8v|DXR1gr z8uX=t{_wlGAs5QL$V?$eXico&aMcvP1~~km(l*$epZH zdV%^R0^Z>_c({#14eGe3;-4VuwN**`@LvBE&wgUmhXH)kJ!UcI;`L76v3Z4YGaUNd zt+hqnoqS2j)xo2J#}?S6pqi@Qu@Xvn+`0=o&%0C0wO6!^1oA!CxU|@S>zp9sqZSur zrxQl6EMPfa>p_rI89+jPEMC}@ZffBiXT6kNfOE z${J1IoSg~eVeN-P00P5&Xn=qbu!5Ryzd1M8*;>oSF4CWw>+}69<=O%y|1`JG18n6K zfnu%7pQ=nUwvz;(Yb@(*KXjiDFEmV^^Qg~~_UhdWO0kGDt0e^dSL5}O7|#(=XB2ab*08G}EzB_s*kanAc|e%o<8`+p@7z>|;Lu zLn}JW?`KEZ)*8>RTVbv*(0NqrR<*eEDf>9TOP zIa*wi$gTl?YXQ*X7Wzy0o9phumwAZlB{J`5TPsvZ8$Q@Ogx}mce%9p7GfGOz{sQ}C z2)@W0kLl`c?SraQe1dvR3J>)NnbK}|>AHZX$>y#71Y+-$IV21TEUW_e&f5$UPQ8$K zisuBoikA%sh9M^tRTu<$fJ{c#%OYs80r>0j(l~RD`w*L*bfogz%2#ZaS-ZR8M}*ljR*08Kp%WoXqkS)U$MJf z=BPeu<<%Xg3rjM-5^2aM7N3Mbv^_eYZ{hjkPjg~~+Ru@)okGzKe2TZwEd?N+#BXV%_CB|TH?F~7E6<( z49WRKhHi}F>BM7CbggF%al8ls=@%--l6F-XM{oHTkb+!6u0_)r2cTd_Xo?u zSrJ&1yQQ1FICfe%iUdD@kKcJ9OQ^541_%tmzT+pZf|fTC;;=pk>cnBVEi~J5po2kK zh$9=0wtK@}{_!nnyq)em*Yjwka%*69gI%f;_!FGVv;Eh1@Nn_P5mu<$4k;TIZ_2Ru zj&@^1bl z*C1>Hv=l@d3TWn-O>`!x@=G?h7<61Q>p309d){vdCE4chL+G(ZUa*OJ~1ql{6(FLsV ztKeYQ;J2U^TE^N$^I#Ah{asn5D>l}H=u*5GST6=(fQoA^U0_cGBG)L6~AwT_VYi$i$&#NmAN;20r97bF|X zqNMpIMqsEiHmB14ubQ>CL%=RJyZFn_xnM^Y{cPHwSIqV)5T7=2c6+@11Ir7x-CTgG zP{21B?rDka`-pb6`d>bc5LvQOnna-#szKLa?~TRSYo$Epm{&ICo2PLxX(adtT8^%g zF##87Mt7!F$M}pz(DJe@(`@S0&X_lsvoDIgc1};H`FUP5=*n0E+z~fpTH%!|M*x)S zXT&-x=VWOHB;Xpl61zAMBX0G)g-2IQ+ynLL_M5W8n&*RzqE_i} zWhvwwzQm>izbk7>hDDvGL$VPWh3gAL=7yf@n{>{=!7$$ne56xOgFN?(rIIB!E{Isi4K-+Er^GB;}f zI>oy>*6Q>8aEj3ZdA~ctZ$WXu1jv9F;Y&c=to7D+lzBj(*(u6A`2gcrwA=YbjEeDHV0_fsw71`hp zZNm>whnn9Gkl_D6_TB^<>i+#7&QeOZC`rgtqELj8ofJjLI`%0-c9MM=QYo@z%`W>s z+1H_!?E5+xvX6Zk%M9cHo{H-Jp3eE5|MQ&ZoacAGea>+l@tOB}t*`5K_3yGaY5fx%mc$T#oL$#-Mh&~@8r$1vL59sPLos_FVA3YQsSwhQTcJVw>A zBc3duxA7fETW7|Of0}ojXo&2+T)M6~!AbkTIph4!$nvDlm4w@b-YE#n@<)7Qb~7Pg z2HxS7X##nYK9N!Of1*xowSBqrlf^=s}4aBP2 zau{;|nv*p7zY5yP|lq zH7=R@g3`c7C)qR#w#bOUqa~+!aVgNDZU20uwZ%GS%(W{ZtqZvD|ju<=5+9k(tZgiUWjDv_*#6zeubD|JDVAJ-EzQzy@+PW*{Wv%5(Wv z@-*<{XAw*pq8dOTjf*%9BLwC+mOh){pPL#(7bbtd&#O<2%{gDnZcmfu=D0k+jB_&z~Rc zzEyXdL+zFlT>bU!0zbdNmbF44(z_S*E7i+x-Pg#`x~-z3GW9snen?nYSR42lg+vpe zo94v;A9ew8sbENgaAaIa zNXTQ*35bTvU|Ub3c8_(`>%B0@`bwJeN&M2?!Qq7TJK0`{^VfgueJ4!FtJ{l8>@DRF z4dF~+USeR|kCg)St`5jd_o~nBFY&)KxL#6+K&Is%@?WkZ`0jlTuq$?E8{7A?aNh60 zU14@)OaJt5RkZ&;34&WMbLZHfrpSYTR;+bO5nc zR|ZnbrnUgcpziKo)wC<>ki6I>j_EULvPbau3M+fL#DhoTrkC59jcZfQOB~{c@uuIO zDW*E{K)2Xc(y|(RSgSwI4GTKt=AC!_Gr>-(2{1tSc7#RuRL{sYA{gm(&>3MvNPMd$ zm)5&PLn9-`scAHoNBY7HnUGQZ?9fYUCVNKk3`r9Im{iM&k?)@d#=!c1Ck|IxxM0nJ z82vE<*rFZC)Lygyv+UzA0pIySonl}=e|t^pJu?v&bc}QhJ!AJ>fY~S-wLI=b5pMamTz9-BazF-Gz)6~YW zF^-)4)x?#Or-vnW1VrlM>jeY;1T{*Avz;f<4lcvz(XWzFu-evf(E#W+6 ziZp769c)|#Fhs}tc z9`7CY)n4(y10ExjjxE39a$w0vK@F9J82S#+zJVb|4HjYe>hRO4AZE!twRENa>hZ=% zWnNz1z+MZ;~;9kt`aH01A0hp2@C&u%J$4$(9!7yF!?B#i96hXojFpf!>$C- z3-lcQzP0u=g5Vcs(dPcbg8e&gPpH}Yph5Xt4@fZV7J_S272UzQ_j;B@Fh-Jw*805# z-wQVEzwSB#{r7@4cyd2c1Eww2Mi?jdM7z}fi;4iI)mIbo@bX{hW=p_2=DGfYs1p)A z{~6REBJ99jPo+PBmA_d$RNuguWvaZc?qxT=FZvS$FwVBf7l-yiI&TS^QiQ%~@z=RM z0eq4kw%tBGzrEedYZc5y6rQ%W&v$&7P{QoBV*hAwvj35pq5@_&^S6U3TfdG3KrgL< zar#u>xVjH=J$xB#%3BQ{oxj$9Q4u2kKG{q~hkoCPSkS#|*hL^!ZtwNq8JNk!5<-99 zlwZdPeD|~rjG2murTKT4$vcrS&Na5Az2adnsfUD3$#rel`Rm+9K;0Iz9wt#>@52e7 zGa?3N(vr;~zF+7`Oh7#+ZvP5b?Vq5ZcY@_JKvDTR6Pf+NEKZ65<4oi^PO%TYy-wJa zhu!`?`y2WD2NUidDX1aEj%1PL{(f_brU*5G28uJs_YNS{T`)$XiK+2_|NZ`MpXCN) z{(SV->hC9nWg3i=%;V)lmi-s$5Dr|nLD65UQCZ5snIVISKcv|E8^SoRDFY{K)m(nR zQp4K|1RE$XN?!PVQ-0rQs>60*%x4XxqW>r>mL_y3m{zz!vNuleHo~UpV3LaeI=BCd zB$oe*r2kh%68p`Ou1qyx3KRi&^0ZS(vK;kET1emG)bf`jE%D;npFbP<1q7^q8GV93 zO#m|n&LhK}#ZxmiGf3o#RrF*Ac~_?T2LqkW7sAOfKjHG>N@o(YyVY zf&s7o#V=eP?n$z-?76iicUikisyiE+0q}GU7?*7qFOY41_<3og*ajt0j9B3mui=!rIZ+dW0GM4&Q4 zxjwqOH%pc|f(+cDI`D4|vM&T!Xo;05+F%UaCSydhb!FIEjV_|5u z&NQW-s`5h1i(AnaHnuhV+dfWf8z`LmyL)HFm)eqva&a@A^#h>InHA`-DZp3sB`0im zJ;vrM>5RBvl%F;T9Wpke75GM@3 z%~VH3goQl>o8<;J>lWY_RRaje)$IXZu2uj|P)JvbQ!Si~%$D*dF1Pjp`KuNH!nW); z)PJnPs8OD1p4S>0Ljrh3)NSw^0&Xv)gCyQ=Z}G3YVY0#pDlj|(=7Trbhk< zvGzT)%5sKqa*VoX`1dr-h#a^;o}VY${;C23X>DD-szLa6pxPsba6_5IZBziHoG*8# z{NQvey06^`1VdFnJbC6WA*&R7EsyvhIVWI`>@-n!3rK5_;{(fp24|oKR`c!Kx0e1+q~}3^h0v4>Vk(~O@81-ZO2F|( z$GXb{uzmewHh1Nlf<`0L?WA97YdQJ&X6+)nrRdi2$bp(&8;3=g!Yl|pYW@wMm3X|f zUcQ|j0hI3ugD%{|TGqlGF^aGjwe*m=(o3H|f385ClUi(Su>F6uB_nGGJp3EsZJ?=+ zfK$sI1}0|PI(;+ZY;IS8PQtCvcgJe;7h4a}N4?MMzw@xf0CC8Cpz3B?U2_&lP+N6v z^3-Kn_7_wXJ4rUj3&>dDwE!A;BkS(l>sflKegj1a`Q_!C^IHTc68QBtLQmxa%v2my zb6!+08!$;5yV6s?NszS3)6CJzNisI_oDla`xTOsOE%K=GcpnE4 zYva46GYNKP=7vm7+GY3hO00GiE-1y`Z8!X)p4oBG+(`p>iCILe!1?v5ri5e?vUTLx zDQ?vp0_J)tTuA-Y{z4P;o+i}XwHap_Q}|%t9^=Aw8G@PG^-^=+Og&3z8T>&0rp%s| zwlCE8gu#=$EN6(gZ`@>t!x>DV`N zE&va_efz@vqH&8T6aZb+Kq>{1w^ADbGau}0wLE}N(s8i7C(6!Vd&o&KanLeVA+kLe zfkRi?R6oHCN$+6+N=DZ=H=QfZ9Spzm0&e69q}a(3@~sLb>yPCL?qY+7+?peJpb|8f zb?&g=vl67Xt$u4S)SZPzG}6w5Od@jOwcuj z;zdj@z&tcWO4ghRJY%;zoC_|8R=z4dB_tT3lAH!WslFvvNOPH1(5qX$@Ki2nok`6Q z@9FUM=(_#J@a{WuTJCQ^@?ec6<~XDlXE2~csKrW3LP2h-wl+S$E+q)J~C2S zV)(@$+1;)$a9KC6%DNmQIMEo%r+G(_{nhSub}7%52Mx}Al;=)fy!#$+iDsY)U$G)d zORKFiO3%wuOE-vKoqYT%jmZ~-hMrrcAf0pvHc98`C*L_|e%(%!NF*7ns*TArEHi9V zmJ4#JlqW_ol2c!nY<<^?IqS;UYDx8u1Afc6u=px2#-hnG&$fK4U*J$o>BH0t47xIi zePgzt7*DJlnE&do3Z`;7~~qqg5W7GgQk*xv48HCHyq^D zEvcGpiU*y@k?kLk{Sf+30VDw1mQ8ngKR^5gBz8RRM*7zNHG4@~E_ID`x|y?&J!O0O z#eESEk^a14E6p>?2!PQS`Y@@&DmUHs)+b9NJC9z_Itlb+?GK+XwH(f&t5NO*r2r1n z-R9`_)l&e&;Hu#OvF#_#Z1!7;6LErcn|B*uuZQ8wl_cFa0w|_qx*JxfcoOr5Y`R>g z;%s~n?9=#FWfY+f1Ej?D95_YlrL0u3PK@gIB~u|gUE3NWrd&lswW zuKF4vI~^QvE)eZjAfIpO$8f!~9#Bn`@&pzlAWzm4ZuT3BCtfR`=`BAr9gr^!HPHpd zG!z9fz8rbxumJtrsN!7xRbO_c)21*1l;>SwD&j^yM)9W?yL7VeEPWCZaa_yLDKSqz z6FeehtOPr|qD*D936uv5Ly;isdweEcJ5v=uTq{)f0=l zIpVd0em6|353J}rlhWoZ`2Ek8uaCvxU&m#GETudtX=u-u+ z;jsJoDq+WPCu-TnUAVVk-`qd}c^gdpepOG7c4~)liyFYOHBem>%6=Sl8XvDOKKQho zLD2l0Mz{V4<3js7hKdZOA(&>bAiVs^+@vXW6I>G^8$Fh%TUwi^XK-G^U}F#$j6(q9 zpy<&T@?2<5?en*48=Z3xo}kz>tga7v*wnCoLXY$xoQ~wyzEr4#(AM55ksz7N+R7BF z6xm5ncsLa}Ur}Xj0f3t18^}DCN^K(8dqyXu7Yy4Bka0pgBE1I?>BRK>ou%{05_FPI zad){w(ACOlGL8ryU30}IABLGyP{v?}-xdbgl{D8uXZz?)wV^>mugT8S;u5{H_>&@&w}>Vj2v@63U{TZLCp+hoKCyy{+^yE zMDcD!{n^TNKVAZaj#M!DQ#CI3O%Thch{UBEN}^}b(fF_)oK;HALPL6fiZ4f`w)F#$76%CAUi1?1*%(z zJUg$)iD0_()o3@6lK7!!|qQgtvg; zgec9!I5h?k2>@jnUyCovh0S!fGiTe5o~ziv`wmSFR+^T|x^Gq&3s6=*SiekT;^~TO z?w!|@@fyy)|553uoCl>49U=^>%lPNW*ML}2ZH|V< zPuhdqCh5F*neo~?C<3Z z2a*EmNx%St8ffg~>@t99q)Y4^%6L4(XQ1Dgrc~>>i?1nm-%^bVb^u8=Cb#XGs|l@U zUJ|87T>{5|!8BV<2D(h9gA!XbKzyofk<)bJN{~ai?Q+k!T9|Mx-XK%eSik4pOYRJv z%Z>Td6gr^%bM@YH7b(`l{1|K>)RR}1K`^J&q1Q!Uqqdr-+>fp93 zDjv~6TJBqoO={xoFfq0({OIf$!&Ro2<7`J!HH-hJa!L@i-i&D0DSz|~m|#f&**j2@ zifR#%A`~bfcptw=c~!U4g#s!ob9-wQSt@?8UkxS*3MG`V;Q){az+s(VgPzZJWuDh} z@-XerM!h$3p{5Qp>&^<-DP5bsS-OIq2c8T;wwc?i%brSc7;HX%`CTdrYp7& z3iHnkU3z1qV@oJxi0Pha=3Jc-TJ`eN-U7vhBx|9bTetmbn<1fTm%GxeM1!p2S7v#1 z-reIS!rC)lnfs}!4iluB_qp~P`wJgNkZP*M54rxj@b8X1H3AF&-R+tm=;qYOD#BuF zWN_;X-F3_qg%h_K-SrXXsnt7^T;W_$=~B$hV!peHzy`#(Dv{r`WqQGlN!`6YsXs!b za^z79$(pGEFbb{i^;#Z3{+d&xqR5YyClA=r=^?i_33ylG^fxO&FjS3^8bMA+C3E+NprGbL0|%tkX9jv6r}*e^jIf*%(06M`bN z6^AWB9&DW`!0?M-=7ai2{QlZMAgukJ%!e2My7pA$ zPc1;x3|&ROJ6rhDPK+3?z+aP1>i#@Rg;Iroh=$d~e-mou?k1G;-_J7(9dxq_l> z%o=y#W4UGdk+!KcDEL|l98vCMAY{Hi{U~n5D&daG6M=d5KLUEjL1cHkz|7Ghfcn=7 zCjnAT&1vQH%dvs7`#zJ+ajD{Nj!3d=*mZ`))eh|k0@lE=i6PTzk|D7|I@|@#OU0RL zHA>eR^Ey)_1s*R>9cx3i-E=_rv@W@<1`q-4V1dWskwEW_tQ#*w924fM=|0SjnH|Kk z`P0^my;058QN)xjn*>?m4d>E9i3|x?=_vKSAKV`HTM1!sRrJ|+xXhHvwUwyyq$*N zz~M60sSW3fn8hu!n=k|`?u^boX#}-0%3DB#YF&ZQ8ffW)^5(Ba2-fE{rVxsnBuuz@ z&-Thy+=F6bM8Dm3q|qE2)9W;sQ-Pt6?R?jwNU)_E%)C4a=AhmivWwVZPhW5`M)PY- zURCj~(J)l4ui1V8_WadRu0mpuA0oc4VTOFIoH4mSK*oL>l-X8_7*_|34wde5t>zxQ z%w6+QqG@!VAW=r2xI}NywgP{zTEr3kZ8X>vGhAtdZu{U1Kk98^q~BV$r2!5>mK>I)BuXXFCUXf|qEb$wgWGzb?4QbY;JJJ`<6qqI#LKjdBIKUV!9R2Jn`(9d_Y ztx^J-?a1hW9u)sHW!f7376pqjhn%b_hM@k*M#fO)=#=Z}mRPDbAGOGI#Y8U{jk8N7 z#CT4eCv;@iy*gQ_BI36Gs^al{=24!kaASQ!c6}FdpLne!Q7+*w(C_?nyYs!&MXne> za1S|%w$HBfXZj#BDZd$rBh(<>nQE;&n!rSI9BYd`U-%vS0-V!KE8Ws*w1ub%*7if> zm_+Y$$=>X48Y@r+r2x-6TBru758^~iWjdwctO3~Lj~ z(Y)rOtnA0cB8s^?ie7#|f9Bl797XazzN!b2x39WN+RfRcl3@Sr?%;x2B{ zgMO!8&XERd(yW`MGL{A)J^46hF=r_?_FY3rF1OmG7^WgOVP%SEL?gfODEHJ=z$&F( zLon}BAqiJMm~=)0@^P-wug8(KO~h*!JvlwS)YXJ=s_RC%{ujSgeFsU7nG^{Ian(wv zMOA$gZg%2Rs4*~2`t`XRQa=o2NQgA;>f-B}7bw~Z+nX0q0M zT{YAS8_Kk;s$Bdvfh)sxQ#OsXwAp-)kgfnOWj~5v*L%210)+h^n0J-kwl79>h;0Y2 z-=|O}l%=%K)xI^01hqBI#f46-)}YS+wzy4I_s*4PNBrQrm6|9IQR#H$gr1s(DmlO* zx6;ry7Y3}|szui&w(5r}0Pv&;Q1GRi>B<*oTQRekc_C>3ssSqBAKjCkFj)m?_d5Do z<@$Tk-8G*@XgN@*W}?uPPMK{ryE3-EXBdb+ge--q#GQw* zv094gE1t3ESXqrH0Tjo8lICZ17t4RB6)Ji{k~UeX2#_BC`xgF4F|b@a_z^Un%PKIc zb7Tk>lGZFFkU8`c;(MX85H*sMJk=`iJm~ z2``Xj6z@1x-mIB)<90Z=TJ51>kQTb>PovhpnKV#fY@bHtO6|_Pj@K1&oO?sT7UO#` zTF~NctzvXJ&g98NvTR`6TN$#Usf#KFDdN?QpPy#glotn49IPxEB%QYJgxiDU&&GVO zPVu8CNO9Fxk3^PUx!;fz=tF?TBVP($vC&L~Yh{KgUASOT7sx1hdT8XELXqXni6Ef6 z(bJP!ldHhtZ$oTIKB$ZBC)A4a6O6OTJ`j}96|@P_HXhD-XZ>*mWY5&RiJuiY%$l&V zvGofkG?Ec8f&%(+fK`2{e5w<aH7yHbAcbc=!3OHP z&iw$EN!=ot$C;1X_nBu|HVNi2Ea2fj0os4W6P7^wTsgCOu|~0TPP+z~z<(6%UvmQ* zpp@D{er)Jqz>61mK?19LvcXX>!;@S0hjpOHaHnq@)UI?72*CQn%k+^4HL_>W7J0pd zKoPL^%!ktADC*KtfoCLgY--{XftTOUViyoio29!E(=lhFiAs=g@aus?jY!7wE(;23 zD#qyNJr9Wfx|uQzYWP|eYk*z7+r62uQ)8VAnYaU5rv^|P!N$Md-at!0K?xDGqfoKB#!s85{NsKfP-$3Ye+#7w5&o3{{F=*HllH#{vY#}L^L6NgK4M5KS9E#t~t`qjh%%yaLl zQaktq!TcRctY*3y=E?g5WN8CHmWy*+f70YijSwV#^`WBuwX+TLk_5Vkc??LZ*bf|h ze5%XK%Y3_wT>;f<8(l^*yselPy(zJuSl*N0G1IX^pVHS}{0=c)uxp_n!ME&)^DF?4 z+&Cepwtbu<;gb&k2PE=o72#un<(<#fYFYx=Rqp2J=bv4i1fd3iZDnR4U|TOJIX5{F zp54P&zRyWi;X8<$fMk7jV8dx)v8AFib?x~Nlr0`ncRbPQThr}7=y6%z5bmFvz{8_^ zXJX_40Y1I4h+0&sb*FJ|PrAv-66r!a?Ka8X700{y**_kz#fl>ie8<$r6_UQ0iUQB> zaRPq2MVsGU-sZ!q3B_FaMDh@Uq?$%`b#Z#9q`ghyjghsi}9B*zgPlA|4WTu{PR)=4sIV$1YPlp0?!C1 zff!B7Oq8|4ULkB`_PN&I+dQ&xCNE-+}s4<_P2T&ixndf*0Xh zBUt}By=(uT3BlZCc?U!{$!u%?-t>s-2!L|E+cQ*qO8dK!zXY2bB71YEceietr&7z| zIO6;b9_e%PRF@xwPa_VUJ$JZW;b}&;FO~R>>o2I{Zs5$aom6~oTu?FWF5Z-!nuEdn zhfN$Ohh0!p^{X-E6NB~!DAB0}o~^+ppTM=n4vxAZ$G25RRBw7t?hVdzje1Z{da}_ zdxn0$jlW&B|Mj;_bXlE$7qou45`a*O>OCldUiiVBC-sB7fGF{^pY3J4eKIXqpFF4H}o{@n!|{ zn<{0hc3;rZh}zxJb6FXGnQv1WB=W-7SFR|~p*wDSXJ-Q8H!;4RtE*b@*i5@@I`WzW zW+6_r;6c6N(DTU1$RhKh+J|t(>{A6sIL&3HP#r<5VeZV%l$V|5XB^%k6wh=k5A6H) zREMGW2Fvn0vLQB|OH_$MikZ{|*^kKnzgWeLAsSHeU!8^7IIGZD#YE zGgTOv?s&3b?%G*&748@#wT;c05$}aZ z3t}#88d>Tl=<0TeDNIT6rEK~Sy=R*wmG+7*1+qAtcBCC}vwi#WF>_xda)HoBsi6kl zUz_PkxgRN$@iNd3Tw>$`RG|K0#c~_m1&_8?sBnF(QoBo|G90Q`;Oz_fp{B|ItRf5X zt;8e~ol1-kTJwfFDin2R+Ow5oqk}F=!Rh0+F{E48Eq6HZjz^*-Rz+`zT~vu}p+Wk1 z$Q_I9=TD#ggmjuazk{Xxr#%dzXoytiIs++nbP)EuI<0WBaH=KJFZ@Zh-u;pP#X8AtXZ>iZGntbziHO<$zdKXI?aT&SGJ?}rI?S%*BF`-9kTNM1p>)8}~EJFa@+?6_1wTA-JAdDrvA(FSm zs(&FpL27Ygf{jBpvt9g%X2qwzq#@@MG+_e6p|*g7|s>RreCoZ!e*hlytejF zq0Iv#SApTF)`VH3j+EP6OiWBz%nlB}vT1i$l2!M_1*LPDs;T5ov;u+tw7U@?0VW^O zqnY zwO3|aT&k*;@W86$af^9&@*1;C5g9Es?(^Nb8Zvp%%CwjzMo|Mv?_t)rzA3k31P2`# zhs#+bx%E4GVJ*E-$_;uoGTHG=wA;iAO??axv@F4Yb*goyRSEiHtg_EJM-7TR(kzSiVt=d;H zRE9{uiJFG6dg_UfTdTF$2lXNH$!X0grmfYo7e$?oKnmS>C|UqJA7;is(f-MoZ@{Tc z!t}$(XT}p~%!zL5F%1@%?LOX>*$(LAO)=+XrTNPN(O1DTN*+DF7Lq$7edFjF1+>_P1h6 z=2JCh1#2r3PT?>599r{V64P~Bh{(|{ylIw1f46wkvEjV%##Dl=+Yxc`R6xCzlP{NZK&qeo zgEudG(8^S9x_T-%)hWpO(a4Cj3)AU~W>A-#0C>+W?zYiAA3!I5y?3s*coyq{E;4)Y z74m7qN5>&4C9r<#YMQ;&QhhfqZEe7EcmM+|w1#Ip6_UQ9#O8B|GE_BHL1Bw8bxmaP zYUafIw_|6qIn^hxJiI31e1=u@hi1Mf;N1qUu>|B8 zPza?1G+&+fW#k2r19EyqbWQvZ7wNZTh*##p9HvAbgQTZ}Wp?_$V~DDduPx$aUyw-A z8_giUMbMRPv*@nu3tXN%-F?;!i4bX==H=+G){-@cE#f6)T(t` zr9A0do#g3KE`=z#ADY&%5^;FQr5sOsqfe5CWnrkaX4o2jV5bA>8R3a=lhyLKgquXN zDL|fQJOd18ik0h+=K78czq5h9@ys6C_y+Y1j}?XnJz6Mp-wG+R9KJTl<3!4=S8<0f z3b#WHwAAbEonGjQ!Iwv_q2#zlQQPxPn7o0b)z5M3%oiC&96NAzYkIs#+CWNCK3XvE z0)E55Q~HzR61q^>XkCm$cR}CC;bAMFxA?l8T=(%!0OM{kzWXWs!mFc&isC~6t{mBb zWRsUQXB=uZvz+uAq253*M=fU`ekxV~aJ3JU>=d(fNGp;xK#}@dLR_*J z*^#PzA>lEale5Zwa?LD?*UB#=_?-^A2hKCbey0cSw9v=xv^f0QT3OVBy zP2B=m?#=@qxe)g)@>?Ab>Qg?|CETEQUhK^Fy@Z=E9Wnqt#8bO{=Tq*!{aDawlYR&5 z8as$vj2{*>Z@OmIRhH?=2ng}%Q2^Hc$z3VW0CDc9=jO21?)y9f>Pf?z(4b zUXAb3FVtkr)P+~4D1eSAdHRcj@BiI3Aei*36V~ z2&V?;&amK8E;jr??Ae)aY%`+90exrf{fMD(UZ3NDonte;xgV#caZBUW+(D4O#&;erPF* zGYQTwP+}#zF+F?tGOu{=OUXcgEU~PwBQyPwbvs%nQ!BHex>}(|O%@IrU6;jJFx2$5 zrD*dnXKJe90Hj0y6pvoUhMF>f;otX{5EN@wA0AGMzYec9D0p0NSnfLEH>|9k4Q0Q0 zHzT+!Qx~cfe~o*#H;=da){6Dk3ZIi_oA5cEB%E+S^O?jJVTXnXQP-?r*S6%ntG zMFjka3u)b8Ndg-h&-HxzNSp5XN2AmG@uF-(`ZQd=jq)$<-2ozga$wcsV{A-sm3P=E z+#BJq-@?uo++P(;R_8papFb!nQk6yAV9{e0CMN6?&jddO5VVUmxsg6(>y1#@4jXgv zA`7@KeCuj&XO%^YIv1ghQ8I#7{r4^1HkaO>&VDEBl<|)#u}9$ zx;87>)`tP@`;y{G;UB-m$H((M-n=zWUDcH}!+K$NQ>LbvA~<`an$@>)E=n4D_3)6j zx>3f0ns!UPL~_thhK$^dq;0UB5{(pm-IHzX<&MnrfZ&MSC&c3=M6MV-ju}S2d;>Eo z9_(>CGc=LvqX1(4SwO zYU$1vaaiC4Dl@>ga-MqGO1!&s4Fca%T$yZ44?;YiNmA#A_JF?T7zJwo$g1s$7x~F@ z^{>p-Fx7PYrXg{a2A(cw&#g_nD;bHBh4JV%9NrePXqSH;a2R_%NXL2z3o+kRf33TV z&qg5+O)sK0BEQ`{I=gjSToJZ`msx4xnt5de(=Sm6ppujtQWb=&qRVBuz>9DW#~*U_ zJtr}661q?8U%mn8fH)ycWBc+}?*{Ni{7*iy{>?M~O{ap?%^TJCjKphbj?oh7RpwLN zJ<8`Gjh~Nm~HKIh|hXE-RA30p-EPt1p~8Mn7Ku35MpoCXl-%!KoK5h1Da^^`|X`TM4u7D>nfIAKIdZcHOE zsxWx{3J>lf$wjj>4B%e)kZnvf3z7TL*I?Pac6@h(K>*?DbZ64DqvQTW(r0avP=MLxoYuX8|y`0MY(1Ebi5w7(s+85F`KyG zSpHORZzxvvOSOy~n@Z9R*WT6&cTNikybCTD$ix*8bCo~(MRR$P^hqKt*1;Uj1IyrD z6n%WB3W1P$jO)q_U6C%%a*e}=d*4Qp9z$iUanFmfVzR|IFv0=o%@>nil}-y^K{?9tE?Q+}VfVGC=M^F@-LOYBN3pT`dZhLO zm6UMsko?>2^0i1iI9a%2a`bcvD^RHnxrheq<5nO85@#R}V#}sh$;^%>%_kvFf zPF&+D>hv|LGkxOQmzwd8FaD3_(cM(N%`^5ee_vl;16qwMPOS_29Z6=aW1C4|{GOE` z+5tfpP;>0M<2yf4pUC5j?gi$Sw2yCtua!^y6PwB#pclgOw`WYPz~-H3isAF zp-wu+*%r^!Yf)RV;~S)a?#g71v(e1e<4%B`TKl{?a&yP;NrkL@n7dq-dM^F9m*XFB zMLX}FS)wX5YT|I8h+Q^@$WNR;ebI*2O{Pn&nP51$9Xk^q#-T<(U%ReHxTNrZPF@a! z#7j_gO&wVr(CmdS!MVnH?n-IGkyN$(oBS5DOZT%)3hdp2cbbG(Q&!wGUNxh8k2Zw4 zE7U-gttH(?Ra08bjnos=kcSNU$maU06$gsU^C)5B5biAxwY&P3kpLFk=zH0xy6GM; zffvb+O|$yba$j}aSX&F;K6q3<-|$N+U=s#P(oeYZr9UgCIT5xy}_cxU~ewnbDaL`TpfB_S)4jk(j!iprCrWO&dD^ts5Q(CQsBv;-+n4Yw{Q zpv>sB(JirNm3e-Q7Rgeoj6w4`4CGuP=n=wJN8T~ttfHN_&=u$1U0MzW6X7{uChsL7 zXzw-8bE-{}m430bDMhHW%|+a#jSBV`I+1~eu;O*4G1 z9S1xY<5= zBaY(C`FT-NkC$iH3P8j%1?ZRxcJoZF z@`5lfi1eeWde$sBfEK<6zHkq6jwUgvg?gHjQRHF(y1Qq}{IUC5gg?a6f^!V0JQ7fK=3 z^kTO>HzlV1X;Ce2E-&KLkM@eEP5E7tf`3HaB%g|$@|IRkgedz3S*k#ZFGAlM7Kz0z zlVbath|vBbiu665M&x-nANnXV2;Bk|nV`EPK>2~L+U2ak%?|@40QIkBe8;{&*q@<00KWECA)< z!@(v2vyHDm`8vcbLbBOWN|fa9eM{YW6B9hqB{~wb!!o-@m22pJMM5tKNI#bpJny9t z>f%MtxrK@F8|%z?nX0O)>#~Db6`0w^vLm4w3%NtnGPrHrqU6dKFMPrB z2FA5BTi779c^64%SZb{=+6elntW|_t^eEIFn;#$O&JJl14WH|W-8jJ}e=<(tPLc=& z1P&dDG0$F7*9Fbm&kx#5@tHK8&RvzAphFR1`&x*Cn8ZU2pRLZO*Il?3ELCE;5elM~ zzE@83yFP7-6;y=#UzB2|2M%*h=LO~&X!l)(u{1^Fl*O`#{RKax*P2u>E=n!>jk-*L zE-vH{Mx$T$7k~mHj?(z#?Mu?V+q#FEd80&#v1MHwlAJx44dJF#NgrjEQ|O$$yjJmo zcn#Drg}WA8OnW?N}AXusGQi^7>OXUk@+^G;FJE@q;2^xe>ebeKhP+vudeH zvA9)bgm_iD4@?xp&|hv8su(wtTUr|ZjzAN4Dj-RdV%9qjYFS_qz zU>bB_$IBBjK6TUov77Lt$*_s+9zWM_o;MV5j=2-C7@^tb&~&9pdI8BRjvm0%P|s9X zvK}tW?0t5uEr?lECry#UH3at4fjq0UI@Qmwnrm*1;C2|eTCh99YvksT(C}z|jcy13 z$#MQcTck{Y74Xl)(Tk7}m^D7Rmx504x=so9ZcQNP5@Y9uHbRj3IVIyIA)DbY==!Gh zLQn|1l}<8GwkCIEtK@7FYN|OwxhD^G((n}^MF|U%G*9l;$Wy&3Xfv1r6#|XkWM9!U z7^2EzvsHXV9!Y5dfEVT)*hbjjHb3>v35lzbN zPLuYPA}242tX4C<$pc7>tiBkVh9Jqyywz{=VJHK$;hg;CFi+$G#K8Jal`xk6Yw%g0 zJMnz_zDqul-Xz$s-<}=CAT_N9zK)!gu6g(2M=-=o^zc*KM5pehsw^^Kmz8nSvF_Pr zR6!uIK6X4t?ROyT-s$*$Y_yc#4-u~cn(^0@0Su=d)RmJ^#T#Dd72@>>XLI+`mAk{7 zI(^RO9<35PDb@(=%GHs~?RBIGoseso=f}rOhQJP(wM_V^U!cyBsW;)lp@C*}NxpU5 z<7;$x_V{Y6$ZW93x6dzAK3#}Qy%SXsa^?Q@t({T~A8gpEVh6-%8z!whL%8)_J@i0_ zAV5MM^4!5<#!-9k0;NRmAp>ORqXU`P2VEKHgNOE-_uefwm* zB~GYmio?b4WXH~GV)}6FoAumRjN+;_Er9LAUHHM9lNYsaDQ(;7!6`jM5CcpiQZ;5{ zhi3Y!b0svzZ&&N%0ta#&ViFyIP8G7=)E9wud~h~&&J}S;LF2k728y!@@c_@|me8x}QQMx-Nva!+~f zp)~f>W-GN~^Y)guztNFmpjxmdDsmcz{(R(iq(t0u>ObIYRZKQSa->eN6&G0!$(ncP zv2v)Tov4Wu{p>V;H@W$lB#%I~7Ij)#&QAyM2{AAp^3KFVwn?WiJ0Isezh?pbz9W1~au{rqBd8rz?v{VFFxP)klA92dUT3uAa$qUOfKfyWwp(4J&Iwgo7_2rvlNLXS*j=()USAKqDg{5LRr2`d z+I;t?!`!|L&Lxn-#*kDlHV?oA$(QlvI1zTJa{KX;$YupO<-1$$Nqk1@Ra72FhFjNX z7R}?Z2Yo<*>Z!Az%Tjf|crS1MYSbIo+rzp zAR-w>1tbkwGKzqJWE!N&CRCD=k(?ywOp_D^$r&WJh(Hrta)!5#4#ByfS?hhiAK#C6 z-K*Ek^yukx&fdFr?Fv_2g&4e-&inOcSSTF-@hMRLowbGC+}9{A20oGW4uoaO?d}uN zzO77y1;p_lCQ3zIVZ8B00J8A%1>$ncmOuVtQC#vQU#((sK!i@Qb6 zAf!fNXdfrW6Lf=M5k58>x>ud@cwq%57P7cb1#mZt` zk2L7Pf`Tv08R=}ce#2p&SFUp+N;m$j#kY@@g>4-49t`8GnDsReHT0uLpN)AfKO6Mf zBc|@h&#fRd2}1D}VMJ}HeCW(DJ=?=RMooB-5-r;8)TZ@!=CgT_40OzR8;4?D=yd9u z#YE8E@1vuq(nlKD==iLC(4;iC?kHUOwQxF0u3z_*9RD!8g3sT#h$mTp_s2GT8=^5b zO1JG$oQAU?6En-&;yLq)68LNF5-J4m=AbdnGP1QUiv{8RH8(D!v^*HB! zIQp`mt`g6`@KVvmST1M9%J|+Ab`*M73z?7apk9407;;tzu!X>ZBk)f8i{2)XvW+?| zt*%O@X%wEl`N~f&&wjCij%8Z;$eD~%{qEt~*_o$6xWxV6yO4h3tf0s!o`wF@l=gsx_ zbOO{yZ{e(X^o^IH*D84b1{WM4pp3+~WAYPVVyOt{^b?LcZsRGWU=Q`*_!OP|Ph)F) zAF(4exI?$EyOC#vl>Z&pw6-TXL>=It+}V?ALi4J$9JIlG2w*_Y=F;&|el;ySM$H#b z;-xm!O2aS8q_fU9@MnFEXKXeL75w{^6kII^E8j8+q~C_tDbgH@&$sjNS8tcgLFn@G z(fPSRD1RJ_4@zLZn!x<|>dFBQFUwaF7>}?btNK|xgEaT*6vI--Ewp&>NIj2jRczHS z&U577mx8(L^EtMF+h-8q_ArK_NtgIEMS=Bn^dBdh#KQLyf8KCh^@wk53|@Sqm~iKS z0Q3I2(RR^*^_grTtB!|V-&@)H&N{miT2AB3?$-``W})YeAUT+j@4dM~o2ZZNyJR%)^j(GKvf6& zZ=UvRga{?Qs@Zcgnj>399=|xmQERyqpfWujKosL-PFw z&N%kbR=$iWb6gF`dC(ya(P4_a9B{ha#`inMtye|h-0U;|UQw4^Kx(QdhkI<39h(=q zfaXBtO*Wj&YZsi<_eG&>=@*ITpDF7}_55q*+BOF|49s=WAk zw&wRI$$W#{;&Fq#S|E7;$$svI)JND3Pc#%B$tWwIGgQ!nLdBW2PgX2{MMf6m6-13X z6OA`1n(uiKw*`J1u6xT;8@PGeS-Sw#<~8S@`!%7eGeeCWVHj)Q{8wdkF;}{_Fqd^m zZxw;u==klaW+6UtB(e-OuKZM2Mc6xp4`rnA<>NbF4?zd+u?B92kVfBRhKiA}M*9Z@ z!+gVhDiQhjT7{8s!$pBV7a%iOOmqFut#S*-pDgvs*z|j?WlNB*2cDp#3KPx2bRuDwoi$ey_Z?&!}N?^BXQ3(9~z<+b4H(f|L!0?|<^6 zZ1aB*bPB+yc$L_6sd(0IWVT^M^T3bAVib|hjs$l&;fQ|DZ?WzOBE3uaQ#&;dhzhJ@ zg=r|xjTzaV21!KX@OV52K*t8WI>&?w8owmk;|@g_3l&N>I4MQv<)a&`>|=)h{r&v_&>_Tlz4xclooEX)77}-990IJ8^T z7mCvh3C1D%c_tt-KNad-aEL=ceel$DX^ZKZ*~@G}ufW z-4gPjO2j5K!SLX3)39F*rXg^<2f7O~4V01j#>|HQ3ew&~K#kymqn|~oZ>=5T$mkr0 z$a4S-Bhhw65l|BCDKTzy)!J6byRSzQ6hR7b1@HK+^xcMQIZ}*QufU3awdEfLU5Cq9 zYHl#^zHH01{dtUP08xI4JJoa3-R_l(9-=PZ`Tem|)kg~Nm?pOmy;F4bd!FQ^jtm0Z zOQ9{-2bFS;Ie2b>+MRS9kKfLt^6k32=m}B zj%sTZ$k^{dePW5;?P`(j0d#_)f~P@6QvY+cwwG}jAoOMpMa{-rqd?Z;A{4%&XWL$d zZ!3^@h{f}Yvsq&IWxu`Ee+B9PMvxFR4fP~b-|xCZiWL-SPF&1R;|XJ=khH$2iS=LS z+X|6mu`nFtZV3od0ST}(7HizzU4cjoE+4%pg@2U*CPa>Xt~X2Xe$+*Ty}C8P%WE5I zmWCADa*tn6-xjXj*CV1(hZIMjKt%`cew2w*uKbT@xER8w3wfuwQ#bsZRKh!!lOTIVM-F#%_e|Zn4Q~-wQ?0W- zoDl@i>i@pcf9L4Ga7 zk!1jmOi{;Fp1a);${TlEDdGX8;l`rXP_-{}W$C2Q*hzqbH}iEU*>)EU@D!R4Jf70( zvcw}HF^$~h!=s(h%Jw_pm`wSd+#*}Hi!fY7JV4V|y>JiPKHG?+5wZ<`@}8YP**(#S zCG+A=`744I=n;M!)4}F(EnYnMx(y&AAhzK-Cx11=IpVV69gdGe%nhM|0CTTeK3Zfs zl+_D(_3QW~+xz$5^JWPoqoLaP;w9B;cLifMyH|!d#z|qqTI6>w4*!a)wn?Qo!SYR|`I#l{HXzzVm+G?T~dXW0NWe>b!zZkGj! z9QOJ8n#Z?UBfegSLsKSZ^f&94irn07Ps#b#{n*TwSi@{_){Yo^YSlsqyBCg- zab2Y2)zEJYP3!p^+;y=(jb4lx2X!^cr5I2`E_x{Y>K)svrVl-Wq<dA+FvPu>!zsy;= z3^~qLcxLDISI1Z?<^}=nXF2-qW}(%h*Zqdjo1FJQliziYQF>srV^&^7y2UE^yEkl` z@M+MhqJQ4_#&!+wE(!d#cp56Zs|Q#r-KjZZ<)3j{E`N)=*aMJZ242fMFuAVO z+TBidzo_QLpe8REE2r+yCsWPV^G)TqsVbWMEX0_1YAJKDETDLKMj}5gqDGsV9aqs( zI`gVny|j34tW|qon@J$xFE%$;y*D0LK)U(*8sW9PiLRlT!*odfz^QIl&FG-D)yA>` zn|!B=N>2S=V%?CH>mMRr*T3TiL}zf#y1FGs7~S}WmJ~b5&ae}>qoy!BDLr^#u6M^h#!qqJ-z)DaaU6;~B+Q<0*n7}&Vjq05q zhD2?^1$YNomu@kYb9t)`)9ZD)dkHlms`p?+{B>|jHnB0;>VkxNvHg!KEt^Z9S_1z=L8$fjY{`7}u=o0kN)fH0pvxY6Pmh)qja#5>tXP(zA zTaC;uK&_#8*@iZ*6Ys~us->m1A~DvUr0iE-ZcrpRZ$0zM|9mYV(vbWq?RqVgB20YA zZNyCX2KJp#%zovZdb}d{y#H`Yt4^q#c`oy8bo33(%svRt*%+Cjx1C zOvT-7fMEwo?H8pR6~|#&VpflY665r3h&IGPeGf$K;&7?LNUbWon)@Gjgk5P{ehmD-Y%V2Q8=g zzHtgBsdGIxQ_WRd!CUawo^t4Tlb3K+%o-^Y;|hy*4vPus1=pVzdih`b6@S$yoFb3h z9PT+TpElRS7aBf0->FwIY;wy#9cs`hYfH{RnuB6~ZiajO%I)ybF3(B897buA`oQXp znS}A#`iGaSJ}*#vn7=qF{1>0~#Qld`=N&d5|3zYDp5f1LUk0nZYMzsp)nWmHZC0YB zp_XrG{IJYwHLqAEr7*bT^N5ks8<8s z+3f+$_OLOLCRrU*bV3*{mCtWK-h_F?Z6N2Zy=) z8Tjp#_u-xdt}d>}J!<~2Qgr+EN!N*fXK_emgk`RG>PZfL9uQq|Xix6^LoWMAWh39# zhAHnO`F1o~MK(+Y7`oVj{%Cjp_z+@U=*S)a&Hdf&*NPM)g_|2tUGmc8#fsZA_jzAz zJEz5mtHFHB#_Q-GO#_G$KC%}#-kHdPAkk$>v9`D%fF-#lv?eMr4?QtbnlE~rJN7x& z>>W){%)urxW%K!uwh`wkzn5-0mh^ZFuRnZRWg@ydmN;Lsx9=j$ zA1tW*wm-gSL@>@cKa%4#F~?Zq`cRGE>yS<}oRWIpvc!Ulc6&O!3hUzaWvk=b=m+iZ zJgui%q`@9wI8NHLqgqZMc_ICxpYp@3kvIr)&i_)7k49pL@)gv$&7zalAy*zeGbtr^ z%7F6q#>lX+&*j9xrC;#d{X8NJ17SI?W`)_q4X~LL+^Wc^a?sSYiO>A$#bmQ)p2!hJiRRV zJVRR _JnzlEhfE=A4Ai?RPW!h!nR+)zU#om!7`!{}70A0|Bt`$FcBUEg> zNZ*ob?;Y70#$Qv!Z9Hc@&-fB-Ra~buZD-)R{o+_>t3f@mS37K4s`OS!csRXJna(v9<&*+HHWitEHf=cohkc-BAKx|0A6r7y*$NED}8zYqS>A7LGc)@tLUeAe1dsI;={mHiOs@&>Ob#Pz7 zL77lq4xrTy;EI=(B#l^%c0|QOxx`@xVZ#XqB>*>kcXL>Nc0n!Yukd|)pUpi9yzx@t zQ$>X+y|7cX3Lvy31J5ySZp@}@l_BTJQV-*Yx#?{7h??$p)_L?VWzhZdya+>JUWI5L z>3cE;UbVMZ%#P^in=wiINl;Xc&*zSJ_#e=gZB9RPu!-6^J7djuB(2|Is2dxr^ZxqX z?i|whJ$n~pC?cKLmNGqPE=f^=;S?@tPxWr{7>mN^$oHagfO%>zP5W|W+d>mL%XbQ` zauCo5MoP8Om9JVr}JpIjw-z`LdS^z-r*Twvmu3s_|o-?iH>|* zS=FNZnOfMtp*<_8&~OoKQQ<>uo^(Ff-IimCn#?P*kz`X}!JNN8JM3xswOokT68Ack zZ+XU@GHQHOzm=?FdrI!Hk*mHy;F(6RiE+A`Mu&Iehx?%=r(oy52Iuz>W>T9&|F|7b zMbU>@_7s>=o#rG5DSecTom$h+pb1O5l~Q+tjw6F^?hdIrgzLHmw5EF~PlboyZ~=|rs!&0`f=-(?`PJH_Zu1Z<|KN|(E+siCEd=}dkkfO&6s;Ub+jngIeHl00CKYq4WB{dMNo#U%se=hjfg>ltB`gN8l z>29e*8!_*YlK{ugt9^8OWvT1=*LI;i0=s#q<_{zQPc5w+;J3vGdIWQhF2b||kvyv= zpJU3R#GBJ`DeI0gfTU}Gpu3Wj$Z0xvZe8f_1H1Cd{HLcMmaS?b0Xts=Fiys}!6RncU(AAFG@u@Fyn8PP(Z0DipAJ1KC2oSskK+Poae+tf?9AppCLP*n1Y}49 z5SE~J|B{{6iH3|zBja{u=I#Ug=N=cCAn9cjL11rdyTw--s z^Hxd%RgM~Vmc|^e&NrpVN&8#%ub}GHP_YUl>^?${=&o%0{6Cg0BYR6xwGV2qh&V(4 zruEB9wbUG=lfWS0OJ>yWFJL!Yy_xsW&$Dck{Qw2ivH8oW+%I7J+RL;zfDJQ*>Ec3# zAbuPbo4TY|XnO6c(6wuu1vzy6?cj;13`zuCC~Qm7l(}r_TGz(uYt`~`v6lXe zaf*qy-#t&?M8GogrBW=8f{Af@Kz0D6>X1tJhbNwbTDRLeI)6!1Z4@is4!A(spP~Az zaSk&1>_GB^o z^zueU6;x8Iccf^sL^yYRdK6ta`97G_phNbOF#0U+0PbhyU88X0ZKO1ybaVZpBRb~6 zk!pX|{0rK7QfAqpa)v^ofa?uaeaPq8@<^9LwHfMjCzCr(n$i(F4AIgHbo|xPB*M?t zpFDncsP-FU*==BQ@YFwjuql}i_*de#yu$IW{7sS3rc8yP$=1@a(}PvT#Wrn+^U9UQUI+RS|x7ydL)dxZ*nX>cy^#4h<%eI@s>q zD5oQun;oh5pVk!{Ke&-|Kk?XwJD(Hu5u;=f8oS+JyQAGTIbtAiw-=6eu;xV(IJ;1S z5_50zAOI#>V(eAm{D_>MnrWfj1oB0qm+N-iQvL55=t6)VOSv%(vqG z)#11tf;sWIYVqx=H}hp39J#|wpSW0q#WYLs=}VGVe69gP^Lut9TD(#!`1%QYaJ46w z@MW7X@CgH1rt{|qSr&{O0Us&p=FLAEKQ5M22QWazzl4_DkNk}Tykw)DjrQNdvt>54 zwu)*shN<&eX;{VlQpE5x=G3k>Bx9WZZKR zD!u&(-!CW^^@x&QMb=Ih!dN*nbWk* zKV%Sk7icdtGiIEA6C*tKr66>6;K@GA*nGvsuYap6ZPUI2GztaKDC%E;m_Y>2)e$b7%2|{Ue%j36 zg7fg!v-S}DPhnk%!nQkmD992XC_$>eg%8|XPgkO@l<6}`H$l)Kb*K&sop1yAko|%( zp}8EIUTi^Q5H8X|dILd%DSR(>aBxG$r5|nOMkxm9wO2}dQd=W;Sw+IL5o%mJy}h&$ zT#|}#VN$pNLq8G_g8P0(vkNsNJccfK2#qW0%n_HLV^F4@RlmJFZW-pf>! z<^V(LNL4>#lo&xM8^)J=ll$hM_DgYbFz?E1O$Y*sUd^S@3qB2Me*@Aue)0=vcl^O{ zndWy~RLPVDF>aDkg5T@dpI^%IANFQki-@+xZnR{`QHX_mN|4%?>`xfdGR`p)J-))BX8T`1vx*YdrgRU_lMtWUD&i-eq>c zqCmMRe{XyK5moi|HXvU#6KUqQ8NM@IkVitu_$WC!d74+dGhuk9!eSG(P^W|Cp*p_- zGS^0a1Qo?Wf-FGF)vhfGWc^8b41y==vdWK;4)TE&9^d--3sqOu4-~lSsD=9)TaWrV=<~>+)8l{Ao{Fq zgiyQu%cG1wU|Fg12{1TFT(kjL96Y;5``Ep-LHerouriY+j8~Z<+l=$G(@|6xCEohK*B zT|sc!Nq6=wd@Ij{fzXXhj&F_5r3We1!}xIN{C3@LjmeM1ST)d}Y`h_*%rNM?Y4nW@Oo{43p*ITt+GRN4IbZ-n;K?0|Xiujbuu*Pvqpmvy9Z z#Qgh>5N4~Yl@`9=-WOP-jdv!oJPoFOvPMF8=LP@q&6EiYhWdjI=DP=VvL8lT#m~#U z?dzqm!DZd^de;B=`Og1(A-K0Y+G^Xdl%MD1{w6EZ9ZC731csa@@4v(6%IiVHVN~BA zXWu=jj~HNGeZ)NSbn9s@x$u7Km{O0!+YbHCn16oz4|)5wmJk%*^$QFlV64oZzYEyb zL*N$$!7#-p^>sA?gaG7yuVDrD;=&CXdT-VX?Z$_%WMINjN z?wus$|BdwkQ>;&7P_XM2Pm<$di3h_%DYg%V4k{4A$raA+Xy!lWW)DHt!Ka6t_!l1L zY?GG$T%&>r7{_8V@0slk;t3@7994Yz_v>~R^rVgjh$$7v#fsm=G#)|zpby}-H;@h+ zBF#z6ziZdsJO70?Ah}kc*tXJa-&NY(tN%Yd;~PS{>)>wnuiY{TSYglS%X1&!U1YGo zAR5M}l5)^-JF6#k0mk?7Q_Qwn+J57$b&)|d;M~!st+oECcrps$M()%>1KSHe4f{i= z^b_yDb|VCiAQ0Kaw;tQa7jh>Jpr$QhRemP4eIq7h@xFa)M(v)Q9c>5S{$p~HSG}VF zH~Ms_?#Itvhe+uX+=v1DEtKrHH{W%qa^yfwRg~T#D?-*a4WWLgOuoKDxlaL$$zw5% z`(4j$%@2s8ODm!mONgz1ld}t;(fYrU=s(&LRuKodk++#e!*3_d*54u%ufqsZqMXE} z``a0;f&lJbDMiU{UFz}@o)=Q^$olu)w0mfyFtji&LcU#ZxrdMnmeOpRkV{}|YEPCT zYA*lk$Zx{C^|#1qGN30JnpMenSDpsO%L;nK_ES<5^VX&1AK`h@8UjWCx=+4Z!_Z_n z2ycK-Ij-~L2n7>9SK-y|D@_&J|CQ}lrRv?Jtf>W zm!!@;cpeiwz2@)gcuP89Xy!#QwD>@m!@G8~i_0gtk$Q3YAKOy%!VGRS&DOBI>!-hs zfj9*zHM!k>|HAH9{rU<9qM3RAf5){4Lu-{3*|+;q<=@~&Y+5?cx25J|KAZ+iR3i?% zpZD8BDQ?fvid^>hI>V@F&O?j(ExMowN=0{O6nA2mP_~2cGc!Z?Am?kbqCxOh3Njk(Wk5HrjRL1@3>(CD@wk=g%_I4nPyVTx84|@x= zm^QEUU$|hm)Esq`jxOvycTI0##KP(^)e@sZ#WW4+l?8^c59&JDLWUZJZN;HFDo&;k zO<)*1p8_pH(||pYT~n{W;LS-WV7-Dh#74z@uM1|?#5Q^`%LWTvJ4z7~E8SyrA@<<4 zea~0_>?t)#euyoVzU_aWZGMbrzw7=$k#b=;XEv6nO}5U|J+8+<^B+IDr3|+Yz@5Fl z#I6H{_Ru7m72!HbNVvd>D?P=e;oo8L{l}^0mE^{H7LKwNSsRN5M{$zLkfJkl(8>A? zO|0zkhos#%D>A-5efp@Ya8_0!`?i15{m*ykxMz&J%FNQO$NMFr*&+jZocRM45gH$_ z>xAw4tUpo%>1V#i$fw+ywr+c@pm5M*Y2pm8#kYb3vBF#vH>(7%7%WIZ;w|dHn9XDd z*Gcn^>?Fxfdik>VY@_G7gf@zbmsiT_ZNnW_cwc(fhos?G(KF$K7^He%KI;Zcbr^Gw z8PPHXD;ZC}oWTvsO1yt;QF_G{Z^!alPPeXhta{DP`SS-7t2pIJbW4nZnORw--TwPQ z$Uzb_@?mS1Tm(Ugda|_*x1o=}9WjVLdO$ouAF1*Qa4I!KWP(h*mfy|Yfs^O*U_CbZ zVRykbC~89pzWfUcu5VBUvXlWI6^Y$uU%pgdg!8mg^_w$u7|(rYL>LZF9-MdCuNVSY zOlCngeda$1YkCH*&l>J|UFFIVV;`^J71EHbzUqv>U zJ`?CsBLX7iW3Ety)-^12~5%}Cjjn!yq zKmJMpKYlS2=DhA%E3HAng!37FW-Q)YUQi;SR!o46d__a zqKh}eQQv?{j1iie9tGMQzOs-uv1VSY+j$SWT9*5AWXk|YdqZr*FX3G0WOHM-UTt{V zxRGh+0c-m~^Li)ORp-nqhqoPwR14JA(NMYu;Dt5Q``Xzw{_*iN@7?68?S(U+|b&2Uf6 z)AoK#@KLjWTh>Bhx?n5CuO25X!cbuFE}0HD@1&64Yu6I4t1+K1yeO(RSk+z}96lWo z^hi`DT!01W=`B|6pO+1)MjR-#9J+0F*h0AS=n#NSj#BvY3)XX%^*mZlkyOoSuZ)hbi1H&Q3ZG+n1Sp}Mn&`LaaRvW_(dEU)1cV$fHDxeXIfi!>3_BN_ZPYJ5zVog zl}H~wdK3U(Butl=o6dM+{&29Ph=eG5&;_jm5kM8*v(al8CCY+kt-ktfYVY{P^Xxli zmgfVT<)d#LC>-fOm4$9U*i~%!rvE&AYqjxrS#wncdnRColwoQ&b(~ctpVJDr??E(*fBPfen7z}u|Y@@YZ;f+ zlIJK!aZ%85#qUDUrrSgxrph7huC?aVrc7OxY6{x%nIf7s^<2wmDH@qlGGTm%4b4AL zNSe+B<0YT-;8OmioV)9jDQeOr)4nW;hH$|%{;VnpsaFu*L3qZP*BbZQx~wFyEkTu4 ziMLzKovOhXT6hKv)%Y+bAA`=$=M@vqrmA5B(lRV+HdJ%WOCNR>%X!i9>tA<+zO_7) z)qxykg&n=Z!=I-#ool8Ct8Qk`e|i=8S|$u50g=kS{wn^>-c!(cK9FeN;TJ`Tc2U=# z#oes!kWWQ>-pBHzzh^isPO5li8aO80brgIqxM`WywBbp6Aj@=8O*PZu<$-%f%jgHo z^JCc4o-{lt%>J*SL9-qV5US?e_#coMyXOju%TltUa@ZN#v?A)}+3ZMlLs$>g_OcEa zsWm3GCrJP$bS!ig_R5RP*IcUQ|HgF`WkB+&{@bld99)vCZVg6mO3Uoc?abH-v2%pa7bkWQ@u99AKXBch_IkUPj$vxZsdLM&7 zG?Awz3zO`v-&fk}8-U$dU-g&=awrgB_u(qOVOkzTCU|>^#ScMqUbPmwPH%)ad)c9} zt1r7I1~b{gA+&i1_`=vsy7PE+d-yJYE{3kOs%1`|8dse+(iy}gU{Aza_ztsEE2&!D z4=b9FvkElz%7PZPtz1*7*TabB_~NA_u5`_fd~!FUW4!53<*(0X>QBQh-)P%UjBDk8 z*IH!OZ$BbkXfvsn#ryfKv$aD&`d^p2I*Hcn+CISVX)42oOkSPfd(?E{eDfzUH8~M!yG959;tN6lGE0%+M zA#o7Qs~J)4{PyP6Nj~cUCh51!+~Hv(P6yk@s8 z?m@T$^wG2Ho(}T>?=M1bM&nm&cf^TLgl!*UbD@4a?DJ0>YyV4;v&1;{oQI592f^!J zbb_aql9YYIfrZ|WaO2Jc|yYm;l{hDE*?oA@>>~KQ> z4Y!eGI{%b)sO~e7kSoS4&>tX>t|vcOBxUMWa+HQ3*dQFK>iNEJu#Su)4PpMsi4C0w z>E26}OjURb3~;^{(p@>GT6nLJMT}@bEsrv`vv0g77ysZxep+`7m0nFiAQ+2py-}{$ zgR}JF8pCXZ%bZu|_fx!`YEJfUOHyK1J&yLomo9(PXZTcElbfs}Ej${fXUy9_rH}6w zZZ~f8>B@c=Lbrsnc(Rvv{+xVlLa!BmeWvH>G}CvW=oe}=Q*OFLv{v*$v|@zCJ5~Kk zZ9!()i>c@|F|b|UVN-h{#I zb=aH=;TQs1!(lE`OQQ#xRhdc~Y@{LL@eP^^?UEI7!kcqXCc2kLv>k)$i?4}1@kRG` zCwYS8)IR-)LE-f(U(IIP&E5*W>f~WJ{)E0r-F)Fh7adlm;@YC%*NYOem>dDpKgS74 z$D1-AEw|{2Mfjaly}h^JO1sEbLe|U~Hy>6J*ZnTS>4e^jFgtt0Q%UX{$zokuL$RAK z_I0NT1zTG6jq7ZuEHMWf$G@~#&h^t=eUJg|eBY8e6f)8*`VfU&y#s6D+d9v*^|Mp&SJNY?NZ_ zJ=nB3Y9A=IVe(j`1pTSbbw!!j@%d=fp}Z-~=KK4HP@z3TjJp0eUte2>+MY=V7 zf-atx!&34LV^$gw`Ov{yn%himFv((2-*V~4wdR|wd;wNNH7u}u11g=__D`b()UCwR z`B!Ql+c^h~xY%V&KGYIhzbuZ$$ZBUlXk0d@*CpE9&pcSQ@oAf&17B$tPY1ENqVsoT3 zYh>e+dH|c|{Z~b{Kc2`ZDH@7ph7)F^qvQ8DT3gG!tfo8TxY}o8!`S(Wz*srm`f0m0 zwHfKi0uB}5)u&!z2^0E;#h3Z*3JTZOK9Mx;ka8zeN?#_z=<%>-(fsR~A=bAyNsJPr z(0$cv6;Yz}4j-R3rdAkvKR?`lU38&{Hk)11T&R}>$DNWgVApr4%&48FWu`+}U+&2i zc^dz6Loj#6Qz75$qwB?^^kU~!SH*1ofHNi?ij}(B%&NRhn2Hk*ookaI3ZcO%a*#&i z`?AHHBKWH1B7_`QnK8prPxgIZmBap7l{d*RQD4@bggG~U_>(WQCl3nWGH;WJjl7$M zPa0aqm+Cn!thuK%^>p>}SpdQ-%V>y`;(R-2PQRKgsn8hviW-mLT>ZX=D3$f#DW~Fl zpX9;g@$R~&ZHw;|QWWmf)U+k)STHi@)^pK%;;HSpoOoc#SUBf_UGgzjt=I|74Y3FI!9e>pL)jFS*<;8OLa(Jm`G`ONnaX>0mY*?wQp3UzdpyyGuOBtB zFzK8RzB0E`SwlE?MV&w2J}kG+>t&7ccUF(tPd6`1S8NphDMp97^x22;P~Dx5<%L3O zEorJcSu^PCZDB@&L%F)-c`wJ)KMwemq~-R-_8lIyC@5u%2jZpGy|9C1Vg;>r}sYMY42ld1&JlSv;D{ zBYp7+bD&|F?3?!FfDpTEtC1;v=CzR7VNTUt%fbU6$E~gm6&rf;(1kQN^VW^%)%r_b z$zSCPPvyN4F}R}9d`UUer(wJqLE1nYB!AVMcZaO@_VL_+qkF*8V>Bp5xumGK}jI@KX2@#kt)4f{+f!4rHo_{UQhunVGMA())CKK{)}_L zr42w-#2%()J;qp=6xi> z@hr2<3F^Mo<>{zKzQ>jwyvpTjwxS)HZ&F3eWmygvV)#F1sYVv4Efi+6<`@G6FX1FLWn0A)> zf`id3xA6YSJ}KNC(Joukf+}Jkc%6CMIWlZ;Q!G8plgO34$0*~@&rrpAqdtXhI+fFU z;C;IA=7XvCBU+C>O?X6v+ZgWGuI(hDDZ0osbjg0$S~c%srecz!(n=pHKiL#D&4q=% zSFVw(hN=oV8p<(K6$)f6(}b;LMRz_e|9HoddMTaXar^^CuB2T-2-#X8GfE6aS{ZO& zv$9u##oLp%v$inXpx&#!h&GqUEq!oiG0|7AevDE+Tc2*U3nTkVlCj2(q4*4t_EGdn zm+IGd4_j$^WZhjy13cDUHT1gxc6@~9euqveJ9*R1Jq=LPe6viitHKMGD~~J^ps`gsJnJ|X5r!0lcOCU`I(%8(*hepu`$*LGbLX-& z(FSU~?{r_$qWR_Oh}BbUfzXdEI6oIx%&YUYM84GFnWC=soOjF#Oe^M4)40P@(;3+y z5(XAB=gMquw+F#RE@^zsg?y3=yvo!O5{*N4^e4nWIj zp%Z9VzU>g%6(kW7(-6z@uhEQC)GuQ9lstEKUfWRb!q|HIu`6^C7o_enQzKX*>!)Q$ z0ty4m-xzNfO~g>v2G-OaFI&y2D5P3yYi$e(Q&ONmL|w~t3{FMiviM)fzOOgk_WX2qIUN1U!?$41g zrvy@I8it-+~lx97Y7b;tk++-YA$5WvnDI-g!RY_)UumQr>poh@qMF< znc>CWXa>hMmVBk9AM5!@dk3cClvDcSKbWj14NnL2*m~K~x|GUAb<)}L$>djB%#Y>9 z+D4SJyEl_3v1c`%(IanbMWGf42h27)p9V14O-Q_SXFb+rGuc%l>9jV$UmeEpEwDau zC_G75Xfa+mv?5N>zR$4#H%pKYNgO|RvwG}$O=lYNnv+F<{|rtu!u=K@O!wb4HPdw)FdQrheq z(elwKyjN#81@Tr7Ls^sNMnMwX6F5bT)d^O>Ka<294%1M!T}%t(T&a4)q>=7wJ^o|F zNxW9CD~plSe)uwlRHsW(a>W>h%IrJt{<+0N>w*=X6@8ELhRGf6-=Cy*zdMHx$@O7bzWCY zt6QcFdVpht$0x30un#G=XsF=7#hN6KDyKaLk5_Sd8H8#YEXCSf`iV*0XBO@ayP$!& z0rM%}u;&^CUJQqi+k)SB(O5*%_OkOpTp4rEr5?NC_OQ&diI?+ zS0hHCxx0&dh`q^me^X~-k4O==pQ{}e$gXCyQ<&5#wJn2^Rvimk(0LtxlHo zDl1U4fcs>Sq?uT9Kr{Y=qU~&Z-v`thgP@I~@&)yK8!4zi%@<3&TAf3#YF#nC!)nrl z@_SdWRxtJ9;|;rUPv;PWY#xFYRu%TzU@n92b)&6Wx>Yt^8BU%oi>2?$nCvB8IVM{y z+f;j^`DN18^KV_wNGqc*ovRTprXk#zbF*?$r543SzwoOH=87Nk_3q~qc3N(CW;*m3 z-Kt*|WJ{qXZL@jo!Z~k2Co4Bg=P3!)`a~;P-QlM0G(evRxtL2ynWk%$@K*O&BdND2hpMOR%uoH~F_U6W?vp+de;EwmNo)*M8Uow~l!*;E)|8I+8NXW{PrPBquiD&;@HLH{(N-7p{7-?cO|3wIfQfX9rTU*QudT2=RdxK>dZe> zyt**1?;0RPk#duAqnFIEQ0j!R@)Z-ch%rz4>av*GVdwggBfXO%5tdaBvW^3jumN5_ z_oR`mlIiQEN&1N=ymF)-gfD@LGD!yw22(zX|gA-#Rx86yGpQL0}ezQTe zwv=SLIOKDJpxiglME(9my08R9Ife*$Fhs_|H1}yYhMjNN-e}i+ zjoSB6){W=X64<9T-z5g?4}YgI%06sXnmo{CXJruIt=Fr0we5TlWM@>g9nJ$;=5*V` zs*&|6nsff=l`c6iz7s@;{}gP)$*&WrxKNonMP_ZHxomwY=ujB=NHdN}T3sBoKu0%X zPf^t2%l%i{paFEov-}1G!QwAuQT2WVD@ce8l3Psc_#(0M%1IM>Udq}S(03>}R8NQS znEHX6{kG3lldcqcplTMKgZkjtCh9!yQ}Pb;W9^Ec;^EYU$oHX({3_xV-3-oiY0Gc4 z30AZmW0h0eFv9Vu%_S>YqeAe{xUGhy2hXu6CcFl(=_WM3Ga3FgM_=k8S@N7a0tr7& zP1pt&rtJM8{0PsM&GNRCR22K_&CC~!LH@%p8Cp;vqDy6gI}tXsA7I6uh@lTb8feCK z#kSV)Sxy+WQz5N0XkxP!E^ziQMg*(zu!=-<^xT>gFs5ExopKh3atXb+L-_Tt4V7xW zQHL6=2ZAx8)LondD3uS zVzB3`8nsK`T->Z%$DN1-8I*~*q6D2im<&toMAMBmpX@2&v1K25`UEmc<~WkEZTI<$ z_{l#BR;)%Ef}NWu$3DX@{@y@W5JveSmGuGtdbp(&C#c&BlkkH#91)yt(75SOsPn#i zTBP}$TR>|i7lRXx1?KibB+){MFd}N0I>kWaEc6$%>TsbNe?q_orS6HK$*Cqy53IvFmPuscW0L`+_Qo20yt6EU3EN375*%VbAOzT5 z1f|x&ob)|dV5(+ONh{`Uq&+0q2wg+_yq4%$zYY+~qGq^7gi$p&R5ALwh3Kxb?g(Y2 z^Ga#wrQcq*3jHSS;xt$}&$*mGtuHY42I@$&SlxSkjkffPY&>FHro@}jRdvA2G-uqI*;ey!<> z*!aPu@%9b0DwvX}9Rr5-=s}4mKrTz?_w*_8KHc@bp0QSeY5MVIW{0(_;sR3ntDM&c z?DYqH+2vcDhg*3ruU$s_9mjeLNAcsuc$Ln`y^~$-dul%4#76}|{ex5&&XF9V@&QBd zMd^SjQAIQq+yv(XB z{uly3uXW%G9--j^p3F;~eaDdEfJFgg{Zg*qWwmaRcvfTZFX6${+#>1xRL+;a={B1u zO#RTz)@KVOSW$?VdG-8NOiauWIPs_+*CONm=Y#j|S=Mq@XwCIDYdx1MO%&$ty z{PO%=l3Js-1h$f?f>w*`88(p-7dKQOI}~F*)+=pl_~i>L(fC=}yQpyi7<96CmR>D8 zNAX-z=UpvX%3qohB1eAMco`p~cH_@p2TX>og4V& zOL27#5)rNo5@Li((+>rgvg~G`-zTW?qinLoq6dYUibFW}^I271Msg{@ry$0M8e@WJ zx8%J3h%MHsVr0*HptggNO&GM9jD`AcP4MsEq@X(-g8-B-ygndZaQO_;9Ha!(_P#4U zAW1EjawkvGX5tIe>6a~~y3~ff1&LJ_6O9Eq2RHAw@nk_Y-Kdj22SVN;wPN!4;hnNu zrY7z14@y0Y?*aK8qW49S@xU5_R2V>NSuD#c<(cBf-(Fg&X^DEQ`Xaq4U1}yaeN!Y+ zk$1G7?w8Q%ioFsCA4NxqE7ufy_J}nK$tUNH^KjxGKev6p*?1x0(Z(m0vtnKkT2n-@ zWC88aarJ&*H+MA^Ppq|++Wkd_=FLS$5qu+}F+o529}X)jn7n7~`o7$LFF%#79aoj1 zYZ_u9SF|T9*W_TCC4hJ-4y$R4fx|ko`Pt=(e|Ng}-*VY#OH^Rj#>PLi*(9^f5ro!$ zqqg=OG&wsV!he07okk+ClS>Y*SAOcr zR|XofpeJocA|THYQ5u+#aL%wNEe490`d;(nMbe#}4VK2icGR<*n2wJVdHt{jZWKuoxV|qIC-b!elU33`S9l-h=1;}&LGH)M=6)x6>PB{Ee1$@o18I|S z!~-l&{otGl$*3UYpr8uG$i}Xd=e+QI-7GuxvAUoQ3}hn}p#b_E(Nt-t+%p8k657QT ztDoB4lHFN z9UFewxiU^EkDFnvV6q2q3YZ3-oA_}E^Hfe;$2$O13-o5uKHZ+w?#DkhF_MLbK+%o4 z)V<(rbJVN%j$LKm)`yVC)cf(;O|wIRn!R|vWAp5(Q~kZIb~7K}&h~l=J6Gn!JGC{o zf-y4k9<6WG1@VT|u<|XE0uvXeQ8XUYsn7Kci{=I_Mnd#ByLgEqx1(E=#RBL;=Bw>|rlmMnC;HT+b>R zabkGJ_Jkvq!_o%7B@oaP&h_^egi=r;aq?)|1mZ1#sSbmy4lxcO8yuZ!npQ9GFRGNc z+N=l;l^fKJ7pf# za#QFVRkTfNzZgKzLNwQLIG*ynvJYl&?`&rwPgU%cP4cxmFdyC(5}DMjenDi>mO4@v z@JSWewRDX#C(TgXvw3N6jtQ-P$YFt^L)Mc)Tn6lgwm*p2pG-jHEwIo^U-H>2$~dic z^%EFhkW3RRx=8Bd>x1H5CC2dwp8oW&GWwn!8bSOE9#;iYMWc+C)`3JBu>DZ`zJaga z*cNAi=Z}LBQDRbbgmmadG`f~W~{V^vyjEwovs{1$0AtjIkz$bw5*AY zaUK6)5p6PPR$vtifRww0r&_LtS2GgUyeT$RtMZzF2tRAhAABiBw<^%WkkVQp#c1eGHPh=GS{RCOfr1zd7Ru6Uf(+ zZOb6GzE;T4V%a>>O&#HUdPM`uWg(CuZd24hqU}6i7;L3k9}?bwFa|qq0B5}Px?udK zCQ;nIxHhZoTz4neV`jP2`d+Oog$KdFIE-q3vEdb?SHRhBoOY*FQNLlSFQ}-;Rck!m zEPg=bqy_-z-6UoI+U_V?&P4cre-rcBv!4s0&jb6}2Dd5YH;|6xVGW%mxZN{qTj}7l zXAy{~1@-6KQhycy8HA7ga{JebAC=BJj4U_&lHygtJ|DG@T$5)+GS4lPYGlr z_=XC?q2q}Q>})vM?DR7$@Ew2uex=G>@)e&;+SY9+wJlfqqvGbY_s8Zksa zYPx6%xAr~cCQld`>oLQdd}!(phYLeqTS6)*o68t9ty+8{RA6>OJet zNxYYkEn>>)Bx)iMCdbm4A#m8TuapKX7_YYjiyQ_pCGif0nB-@ko2RH9j=12?>FbxgoDxI>5Z+DWsaOA2^xzetbg6fXD_ii)YXx zvrj2V+hY2)oon3KD95|v3c2#)?o@X&w)U=51@XSXnEX_>#Eo|Ha%p#Ft4Sv*m0nJs zP%%5vl&{|x{jrL?ZnXIA3)A{20lktDNVV)2#aslVwwP^i)5FdTT(@j;R!PXoYSs@; zIZ_dCUP!&E6&j*`CvCL0B2XFI8q<<+J5Yf|>1m0x=<%9Z_1eQoP`CptUpsa5TPQ!$+&$t;48aB2P`&TZJnMf?=X!OtND;?R!f zY015-uDp%C6i#5&M*T9bJL%9c`pF$&IH4Y_4x=*%KnT2oW^SPeuJvMVCBgQfu3LmF zIZPKP|O>pnG)H2iDR526IVPU1aSWEOR>h0VI8}{mejBV9Fj$n zLZMVa@AHAA(iLg~Ex-q2m@Co0ArR{O31Mu>@n+Yicmb8s(zTYWxa-l_8gr!#(QLK$ zMw`_Lj)wJmb-mmWA$!Md8yOkTFvVO3fi&tZy})12nEhhId=a*rcZ%IJYHds-Xjeu! z$n75|UcI9jYm6%?o*N)#I0MR(aFDCdZfH)24afVw^Czfn?$@X3bYv})^`YLl$TIq>j9#)P-ql=lui9} z3fr7KqbuA?w=J9GmIjw{(gO+SZ#OaW<=(~9qbv%^@{$U@UtW-Z?Gn*lZ=K=0b#AUI z!87E2QYA9`ptV|OxS^ozitU)NW>DZ3skmOhINlIO^F?m=W$sIu^{-~oR`EqV8`96OE8nu_<1WnYylmC%&<}bI zY?+N8!GD!2_k$Z=dR!lsbL+>4*JO0;dq`=yPD+r-W@u#ccSFV!QyylX1W14c z9t>49dNK0b7he8@Sy+cr=Of3Jln+vWXIvHABLuKtBl1t9oef2NYZO6>D}}Z*ubV`i z^svbSk|OJq^Ht=A$|DB7FDX~NRLo;Mz54;Z78+4y{ZSquXs2CE)^zWAfzooVfSB4$ zM;d~>YhaaVw3nMmZlIYcRme5$R#ffj;HSt;_%+f%?WgEE-7?aT?hVDr?5g9l6bs{x z0SrdmoZ03u3U5H_IA+u-bCAL*X_LjGzRu6+6gy0Pdmm+E{#mmM^}|D#G-eD^KdExXuQ7Z2X-wT>(k_6L<~gD@d?B!bUU_3TTG!aKoH^R6-D%>jhcK^4bGv85>r# zP;Jy>lRZ$hu;iJrBa?iR3BEeERag2!PZ$989A#^SY?o4NBLpNNzpDMH8k9w6D3AsF z?VW`KV)bi^R}P5Nl{~QVY4@Nn;LI+(+L`u1XCko9n?5ZvWuQc|y;BDuXCJI5&Kk7j zxN{lxG~aod{kDkhYa`8cT6uTFM%EDPEr5}%Ivl6{X<17Lhh6t}6t|T%>x#|K(*MD% zz*$V;I!4pdvlPcB*l3=)xDR_3I}ce2olRaqSpLL-grdevUQk{@}4UTU1ttqW|O zRE=1@;SbhqD18>&Y$NZP*pWtaEcA0b$5@7|M_An3Vpps-ZPg1^q5Uc~6Y0@$Nsy z#>&Cb!+!0dh90g$$AO0HYpU5~Bk!G~OtPmFEpti-G&|#U^qm*$4)f9I2IVu(QeD zE8c4XvCcbHAuK`mYF=#o`-)9hN3T*7JWs&%GJZRg0doR&^rZNkEJe;*W7n(0yh zJ`zHh>)|ZQ$MmnmNa#O-=`617*|IIU_9mjH4`!4~2blj-Bq{cNve@e~;W~Vq1AU-C ztJ?qk@y3Y+guOZxtRJk_3@yFNV{a(!GPh>Yi>@g(e z8nA3RoP@HXw%_T%Q=1Qb(qp?2P_2-BmPMH3*OOZjb79caTI~M)`-9IIJ?)F#9q7kz zqJ+tw`@4zWg>~<#cqRMWNWnrTyFmsY@caFXt)I$}a{iLeU)wVmo*KJ-{(Ox>06KU< zv56bZtbOBu?vBJ9_{spK^gAEMTMdmC{0HW!PtC*66gcmsS2PyetNCw=1qF1q`V zUaCl4&K`fiPycx7c6rVzq?G5e^`*M4rfF`d{Q$SG`4A_m{p$^p#Z*VDyv}jf6O6VC zV~p3IlM|0x7c;$taJxjfJ5*!9Yb!!h@RU3Ry`hEg7lb~+*K_}8@}BTAypBwUX?vK- zDXBhyHd`!=9^$i}i&ZbMy7LpdtDU<3T*3|1H8Zp16Faa7(KBc2(cJ_n$BPy*6kTApD6xeE83`iGBt8c!q})K_{BIYeC-iJ6Clom?Gq;ZFLC}$oPU%8LEp0fa?XFR74&ZY zugpO%$A1O#|5{r?aGa_w!}|h3=wH2m_7Wk zM2|vr_0yYo{!+0hV))zvV!RpyLj;-=oMZI(*9HRDkOI=Z6Xt)f=p@qN#=?T_>9)xG z4h>g|i!niM|N2G$*u#dc21eV7-$vWsM0gp>LiS&q>xw?!7kdCK*7?6xIz1WEJ>xRt z@U}`viE2ZC7oSvYeD+`a-(D_au(%;6hO)H8-?e_Shz)%UZ_w=hpV-h434U?(JmtNe zT=o>i|-~xW3f2`<$yTO}nECBL*(H)3gN#^(Fx5>#Y+WP-q-4(=m9~@Nw-J}TGW;i&a+0*{@i~g~n z4Q~$`N^$&l`^aSAWduX97w|mx9~|no7h0y#-J_AR{HvkVAnfX@R6P;?KOK?QC$_le ze{E9a)`#$JI(yvTT{M6=e{|dWFL8b!4gB<9;vj7K|4Ytsy9?d=2L_M|XN&b!>u1lO zzlIVwNys>9*M)LNFOpV%0<=|q@Q90EI&tL>K6C5)8;$#r=6$4H;gsHIHnFTok3-La zN$NcQ?SJoi5XlHt*Y=^W{}37=Xp|6k`D2bH!7;!sXyUF8LXDwZD<{+>yoRJ&6E1h+ zGUU`o-q&rsJ2Pstmm#Fskih>^Sihg@XTgTeKbz?Q%)VJju+csr5g>5D)X`-9?r;IB z>ysqtI=jU^+4vq$7mE>eBY5SU0u6=y7Q?knkk+eTFrB6p?Hohbqk+3+i26ywjl7J$D3COEr52E*cdE4%EOxP(0jBGQi=7N zoKCVv+v$)^c$=sY^l;oGaq-g7=&3i5!Ig;ouFRxW^5~klco=a6pR=k|6rYJp#a|_A z{YXfq2;;)Tsz3__Vqv@Tukd`#E~!x_<1lL9zL5v{SV24n6(#kpBobf>v*vY48rT*oD_>;iUgOnn3z(? z#Gk4}loH{8+CLR7FoW>Plb06)5_zF!Us0+1J%?6_)8S*sjy3W;mb;e~;bKec^U{7A zZ5|r6Q|0*Plie}dwwaVox#5z)o>9Prv-$|lp~6LM zdj|Zwa-1Dkj6b)(J|ju)GqM}=7;+GSo=>{;*!&oId<54O{Mj_btnQTg zU8M^+Qt4AAf?YHl>xwgamvU|8_XeOxswe1pk=bQty!|0+h~PaInJFWYCJ+F&=vLXX2VZ`zM9PNNLTB)DmS<|eX$+E$XFuO#JpzXK*%0=JWy5oX$ zFYSAv56wWOk7SUP$xv-l%W%Ihi;|QX6u@avpWWTk)2F+9U&l#EsH>mMy*l_U$IF_WvA@tt+7(#_!}EpHCE}43OW`Y)*fpZlYB|OTem~#*q=C}< z=xF$1DyoSbS`2#1Z~3tuf<@ieIdPvgEsVCH(*^3IUtOp*VEAL#6iB%1>Pr5l?f<)r zbi`SjkW>9ji2ZroWc&mIfKnLk$-1ZU%t!iyZ$`gSFy)op!9>)ubZx{bQ|`n;*Lmv@Zli+9?&30 zvET(CIFK=LCmFH^40yJQ@Q>UZ{=~Q33bz|%?;fDAK~ds0DeT2}3)uDQ^;L2W3V(R? zbicQ2x&qW}~ z{|b4SBA1)~GVIb9#`1N{(#>9Bp~26;jtpa<=-R)hc*Z|G-=gaCz5$)Kmj=Tl07I+x zDSi10+6(Zf4^T%pykaPXZXS~S)?;U36omi2fr{$<%n|C5dX2nmNDC&Uzeus;(pMu_ zM-^{sVPoM8mBJjs`N{d=hFmV=)ArC{+;Y#VkI1TM4-Gc{?c~7-{+oaZ9t1$pMF29% zxm>x@T)zh07bwY{_Tk+D`p-Mv_BQEh+iB**?(G`Q`L-`@*)*K;R8M&pzRBR$S^oZP z@J^cpjiipciEBLf9^IP|xr}s{l(>iNXBvT%E1R4FsYxk{})N`m7h#SFdHec{Hgo_Ev$-#PssC z2o{^Xw!D609vves!qOm!QU7pqc0F-sP%C4-ZY|ZaJ$Ta)h2hX-BQF@}aeNB2+cA7L`5?FA_{O=^zXm&05 z2ri&%Bl)_c1+jxI7bo+NJMA3r&P*{@&h2qRS87#A3{>2H9q?A5!q&L6V-0#z{rq5F zwZC!%c-XcFcf(;}GGMaMeGiGh2k~%7l+fJwBXgf5GcF0ExkX)v7Yq!POLLeQC3B2F z*sSY5d-m+6O?O7=>J=;ohizDc)P!{TBlh3-Qrj2Besa}(Zv)G5=r%>krB1$mQk%_} zmsqF%y|so13by0wxOjbKrILpuk}AJXUtLLa=N2?4y{Mgo{VmwGH0!?Sm*F{C_S}1J@W(Y`;zqzz-+e^Vl=QSEpAo`zI{tbBeEMZCMmWl@Rl>SxsNe@x6Nrx`Le3fp11lI) z=EKa^#KoLelU6980Cp&Vx3Bqs9u_p~18YlRC}z*JpNMXQ)Nc8F!((te3lO`n zElq`?K&_aTgCl>i{h?p1VYZ?7JbI>+5eBUJ9;uWLj^Z}#Qg&DxXW+9OdJULGKbY7O z8a48)jzI(~s0gTsv1L;+qS51Q!R(|xA~${x9J!?dmKBp%7>^$`KdEY~3xgE@6-b>w zoQK20P1BV8#^I3B?xcJZbnewildM%nS^o7WLg+VuZ#mU!3jq0yfEAcwsgifsja9Qq z^yk0;f{?br?INuPxJKOVl;QXi!ch|&&KBzJ74C3*hXf>ZG_K;;=vE8%y0GOERW`nF z_tp3=)ItoxJbQ7Wy z3!#|6ROOOe(75Fk6_2qmt_1t!^Os{1Hgn>1MT|4WoLy_FFywTud{m3U&4UM@+1^X< zi$VYqla@_^kO(m_RH%8}E;g)Bn~F zn?I59A*NPqI|55_+qGKlZPMAz!wPzT?h`S4l{={jGc*ic+8NFL$J!sqKv?_2Rr;3yhKY4@%zB0;PacCoLI_dU>_neGPkgSF+W`(Iv2 zwi>n$K8ul-bXTL?js5iZn%Uq|a{e({=YN*_pX%-hN8Q(b59tlW0psCm>+$*vbnqBD zf304aI+5(LEuXa&k*#EpB=Y|2RxUvcNK#MD=!1XIg#NA`e9mTCqXUmd&_=;`9}pdR z{pVwU(Ip3&mJ^m9d-BgG$l~Ed%CjCM;tfJy6+|MnMZWF%^SQsi>7Sh);89cre6BO! z)&Sov->QT;&B@6>)jM3Gk&KCEAb$E@$-(Q*j}S(9zv#|$ms0|3K^@LALGxV&%X%y zELRJ{MI5i1l7*6za}RAUhK$RNRprfT9xd`JE&9Tv%;5?@_v;XMWS`qiak(u(v|l>o z#0dUcZM& zNLzVJU6q9GAf#g>2ngIE^P=GFOX2Zu?~yXV_)qXT}Lgh*UZ&$=Xj$flbwFf}P3 z-|?*}7;g0Yp=;Jvo2k6B7_=tIPla=oTMF{i&1kD1`bjc;p4Y_n_9YnJIgUsqx4i&b zqPrY%TkcPBM1K_!KYDuwj^igRPO`v@OPnQZHl^lMF1K~ZjH;Y8B~aY=W4R-iwkY^( zr^<)xH(L$);;ep9A2K7@;AzoO-~!K{aF#!0)c;L-OtcXm+Y=E?yd@z-eTk5^lwK3* zUte8q2aw+LEurFOe*B!`DbH`oVR9d|pY2B{%ZhABS1M$n!X3Tlqua6;q1|3dAmqIt z=jHcrTMA!`5EeA+y<@rc99g~<)8)3bl2aoM73KKI=Lxs(PCsHz-NONmbnMXPqgP;B zz3}N;b;9lMLUi6KWsrGSyap_@?+niNJ=|n+6KE8GnQ7C`24YdR@Yo=!??Rz;i8bUy`<-BjefG zp5Eg-B*|VCM{M9Zk~#GCRD$&BuQPrPHP0B@%j2=~+Z%N{a!#JL&}xj9g=oJ{fF0?* z)nGxDxQ8t#st&;*U0#!t4e8Yf_tL>E?$X~*^XzR%%$k`x5#xzg<^|6^*&Z6DE#?+b z!&aSiOGpdM&ep%sm*?ET?)`t(UMp2rtH(cUAye29qP;f14JHb0_NQ)t>0 z1+v=ORL0)5nH*9_J&!)+__fDz<{16Su$Q<2YV5Pej#sBiY$GxJK1(MDT^ei!MS30U zyk;;YAgB9ef!b_Cv7B2xHH#|qHu31lElliq_36~3YzGxL@`i@RGUF3Fx5!Y$)QC$~ zww^f>H-BBHRqCz1>SNVhuZ3YQ3M%A@?V$pJza18kahk}ekHy0mev?ua&NNLyT5;37 zN3{}3=>_Pw&J`I3Q5-synlz&`@%7{VLX+lBv1GYi{p?%BO2xZBK7YAi@y64m)avPd zcW+)E#^r4pLuKVVhxz7)WK}|b;phVbB|M09`0Z0~IocdIDi3v<>QdCVRU`>}Y}EDH z?wH2eYrSu6Umo_mB2>iuWFup{_b3p-P1c}ooAEaotp`NR%z66?yf9%!Cbqos&{5gs zLuL89Up@#%2x;86weElQ(E+{4S2rMNiHl)OCoFJE+++1Qlq^C{W~ zwmo^eqkH%7m$l3Lg}dwF>8X#dzwgW;B;H$J|31V-RwD6ix02Eqk0t3h(l-t$GzfDD z=SjqG+?cyWuujFqQ$~1`71_TQKmuoKTJM=|Un3M92z+$;8xEb#L)|Ig_x;s;$2*?h z>G7*q9&y=9MdlcvOPsSa957#>{itm{M!SN$Iv*D#6u~2Ept2Qd5oW);7D&Nx&Ft}5 zdm7ufQZ2r?x1Pz^ijbF5cM2SoUrI<$xr(OCwd-^1pBSDu(s{J;dMu;40jdY5SoZQy z!MuZ@7g=QI;yoMuB6~sVp$lJg+}m2DVqalgezmZiq3wF5ci@;GlU}l5+k2xdmtWe? z?>}%?P*#5P>C?r4fQLVw%1r6WHv-sw=$#)wUVi@afTFf`qJ?G6Lf4G+qtL5)+gJeo zfiraJg$7qNxNnf;6t*oFoE&fEZJVJ{-jp;m$P$V}FEDH|{{~s#1;QmaYV~W~66gP9 zUUSjD$q?Bz2U}y@&#e9+J(x7OhA!2K?~PWro~FHL`{TCSwL}xK*kd=9 zH^!czj)RJGJQNl4=b_TmBT(XNAu6Z0?Y%xeM098Bp5CHoVsY7T#)^~2*Z)?kcsF|PF`#is2; zroVH=UPo0B7v8O-7s1;4&9*^NSdsnAEnGd8s_V}0<6>^xgb<0A?fYEug!$B| zYiQ|9iGGarO6jg$v4m$@iQi8&;gb2Ed+$4ViG}?|=Mccg@}=X-9T7xn4dEKEGjueoEm2@9q%mNy@ZuPwr`kb6?%qsxqRyTA8aI zwwPnX94l*w6T_%p4<^N**a)J6i%_@dHYBs#4zlmrZ)F^9$bY1AvBHVrR$B5fT`()w^{z7hRof|Dy)h49hNb2!z@qsl9IMbHn`A5`@G!WhSTL~ zfh@LytnBoYkj67xJ6V1ss;}=m{%v22$=jurcYT<$wer&BgrE0e3cBH2bL+{&LoX!n zZ4Q!gLFBkJab?z@++1dc(B^hh@Hsqf=n&exm8#tYgS&~6D;HmsR$-KwBqJQ7V-HVW zbC}OEUmu!Ds;`$w_p7I=jEqkR^h_hn# zT)7Vxuvu`tv#NDAe!HB94BwvF3 z3CFUPN=O@XXO6~I;`~!TUFr1GrHQDi4v^$tX$_(_nYJo~Y3*U?wLv%W@RA^^ZBgOi z_Vw#sq502DCOtATKSuYEo@L?s#v`i5%OoFCyst5Hb3~OoBM{0uZ~^bFZmjV;I1UXZ z|0}L8>)+u#a!zQe@N__6$L=BOh&GY4rO{P>T2@*1X`RK3OY%CpOKFSiUE9vJ0@0N# zj~3cRv<>DRb}meAdgcw_BA|=-82+-p?}ErCO?Ajr-M(mLQ$WRTsoEV|ipAVxr}yqr z%z@{v>mpuC_EYoohEYv}`}HQiPr;N#%QsuY8@%@u65YhvbF0;m4=VP2+PC06-n0?G z>yrRcrNTXoNJNJyKh znRI@7w6l1vxy6QVrkPu=Je_JvmQl$z(9bWeK?Ean{kjJww?p{r*W>vPdsjC^-s~Ya zbkEnOq8}s7dRI|B;`__F;KbopgR`{by6&@W<+~xW6hLHY4(XZWDd_-|OjD9m+_KJF z38_gMwRoXbUytU<^S~Ks*3CdDvNGe^q-3evp<#(5P?~VVVdCyTFu7f#RwCoNTFzdF zfso=tMJMM`T3XZGC`-N{1tx3OHguey(4=b%$Cb3Fr^B>dr*!Iqv2J{~bQ`d%-Eywh zXkX1S+_$PD9l2&f2Pb<|3Z6YP?2dP*mDEJnbvBh`as8QN+rjI%U^YLOBX zw}j2qa=9-ou?-NeDrvgJ&&2?Gz!0-kH{82^QWTdv64{KDEwALclX@rc2kC)QVvt~Xe^wR0i8DRxuU>49Jp zcza!K`{W;IM~|Rq)UG4eP-tbDeEIUnWVPIM3yTjRn(w=u zXJgZp#;quDn>AIp2bHfOe8>VupzD{%RvQ5r8N|WOemubQ_r5N}{~o;3^tAb^&9y*lt01b~UsqlhbfY_HWUzpk1<*r?etsx#U15LYYS=p0# z#5FER%R%*sX`xcPPu@)qj`v`XUlgJLwWt&ol3DDsd}4htp>+0r2qz7!q-+r`frgoc z&&E~p25Dnk`%LlAdxH`+0n)ZBdSkXo4d*e5Kwlqh@gORL*p5NeELdB2=^T>y^*tUxR;CTgma|H%#{@Pp_Z4Ie@V%0np_*XZnenGP&<pRV_m#ENkAmGe}rdxIiCbdyzM;n{*FxQ0P88Hvl-#- z$s+nbfWQ5sJC&K4X`*<9nmToP*^UC2HU6a|3G>AWG+(!9NOyC8 zGX7o*`!T+%oefJZvDaGF|;A{tk<%jdgbkMP@PE7^5w2Rj6pe5%;> z=LV10WwZ|yIxu`+A7hlO#rV(&yeaow0yb{W@{pCKl=p6rd7#v7F>yNVmk%{xVmsFQ zp3}fZDFJqN_MpOD3)1ULhtf2QYpFBU&^_a=j;QMD>MXk^b(5Z~`nA=@dkl+xSA7a6 zJ&ea*+pikd>*>0Rp5w0$2j&>q~K{l*Jk?Tv7#x51l}A zY3bZZHR&$F70gI0L#CM*x(U$*v$h0!I~PnYO04|whzP7Ux{2S0I5F)+Ei&2^FWcOeBsyf!>j0@1uIncr4F~ znh7Sj;wq1~=1a!gXB=8}1sQKJQlB>5>$)DM_YF*?&?mN|ze1K<*|0+! zv7;1(fz3iRmCx_#ZO@;<@SDwC?y3AG?2UfdOFXMO($UTj?Vemb^ql_GQty!Yo+(Pf zi+CIdExEZk3cE1YGHejP#Fm2dq-c}s-Zdri`NPPM`N@^CeROv7D^jSQr9x)ceT}%u zkA>Mr?L1#Te;zMp$~;O*si?zo+_Tdw8vXsDZNXY$f5+}oaUY5d=unwh?G%68Sq*yS zv(Fn1m=%iWN*9sUmNrj#;YyOFp$YTka3!f=d_RNfhq1ZQS63xWzUOtsK(pDDO4mg{ zW*^c5!(f-?w^2z_>T3Z~uP}(uQ%Yuj7TRDIJdM)$p2RGY`_Lr1EhzF*&zeh5=fTv7 zC=I)xddFep)cURe*}kE#2I$nB!o#*}4(gqc17M5|=&q^Mc=1=pJ3kHct>G}Y|ZDL>=#;T8M*)d@@KP-1GqL4KjRQ4f*Axnxiz zw$kegAwn3Xkm^rFS`!(rD-kQ%{k&bNbhyDgLAR{Y=~JsK4b$AxUcBkh$-}5op*>j( zcT(b(gpK|IFwaPZ?c|u*+r6jMtR}zTn-?A^CvLzlY8mzj*2TE!Bi8P6CW@0_HD{`3 z8LrU%LJYzO>|1;g=}326zrhyRApO$7pH2b9EWyky{*;Oj8_&K&3G7@Dn`LBV6xY$w zQBZskexIC$f9c?OK z{aBoPX>F&@;|ko;&xwP%wpsT~b}g`%%mu1u8_MYf2e6P&kV*^bx|ocnU(hv#S59)5a)%q&K6@`>-LBT*%m_=g=Whd7X0?mzPB%O7CSO;6qZY!Wj{&h zr`0%E#ST8s<0GS9);-(?Ssy+~ofD2p@3Eh;B-*pb(J2FARdCnhB`u1NQqc{_uWAXd zUZoLyrxX(t6MAux<+`R-sH?qw$ckFjOF1liv2gI*lO9*J$Z*h1A?~CXEoaXF_5kAh z+sT3$a`#kOE=L*YmXg`$u0RO{O?bEW)c+aI`7y5yw!kPWD4;2AAKbrC9kX; z>{YG>o#7EDp|SD8y4kq#_3PJ_azf^?uE!2jCcht;PPK??>Sd?Hys|OwHFjAKQ0i59 z5t!C>|5gGl36+76IzZwA(m;)V78L3p%{mp1KW!a5MOdI-F$Kv+fDQh7hxnp3LBT|u z%mX28kcg7XVLOT+Rrn)>=k(j3+&9 z4HNHWwD6;rw64?^xTu|!PHshcp4`Cj`|KyCwF>KLb#Iq7_lMQ>!YR?wcS#CevP@6d zzmS!qV&dghN>O5K6)?l8RX9tpO|w6L`qZk*r90En6Xwj_U$S1FB`BV66R8u)1;6#- zz|2J)H5FBgRD`2E#muuxx^Ju2EOi;h%hhVotNC_7rBb?bs)*oX@Je<}x!CwePukL- zE0mVb%O4Bn!AgBX5@y)-SfI|HQcSNQys7z2?|8Q@)+F+xnH}!eSP)BPSCK(6Ev{8s zUXzWacxktWfU&gp!QKuP3 z(&)-a`DFG)&G=uo4F-r>thGyb!_(X_IiZ-MIIg=1j1=x?F+g+y#>bhY!DA=wytNo8X}cn1ZIz z$w8XSZ&wIB#@UoQ+n!4A0?sLWfh_lFLnE-1C<@7p&ytkQ{S}@3k0=*P^gjY&Ysj>F zlh!pRQL|dCRU6YfMvGm)wfYR`<#cr!JG;l}l!CX8%q+W&x81h*v9th#!0$(F(G}k-PEL<5cGxzpi#RDm zn{V5Y^E8P4Q^E@JCn%-73!$tLEI!x9S%v1WD}an2MH`hJz#ow$T;g1LK9vKTj|Co` zRYYOkqJ)hH_#kYV1;Pmegs+M#X9_IC?Hkn0q_gvFhn^3G3a)&quH@vzsD984Qj=Fv z3Gt@oi+xaekmcmbJ0w)xNfsT>Uy56V?Z7LfwW%KY=t-v}BjYQF&P#sJG7=VGEel{B z`RROPZsYIQNMxz_iqfGLXteR8vqVyqVNVT`3?>W(pIS8K(gc$Ba}tM@&wP6ng7>A7 zOV5qy{mk@KycnK^&p2c>acIf?xK8ue6FC*oyxOBSA&9DPK z8KEFWG_N3zV}&U9z3$Sa$x_|eyeUKc14Kl-?SP;UUD+4CF@$t~MlbL*4Mc{suN=-4 z-4y9?A2Q)S2u;-K!$)A>^nFFBgk1~u4ZZ?f98`83@m;PQ3*H64B@2e9cV$keZbL>k z9uh>tM3!wY@vA0$3Iz^>7&&3K>|nK|;_^g-55m<>TG@(s3U9(>wdwcT}~tkQRiqsZGt4-+`#;IXZn zo6-cjPA|sUZtt4D578g$RVrI-DS{5l)> zI#Wj79r&+%hkTu4vC8?)=dkDC>*!J{wkm!83W=5xxry?*l@z7kQc1|Pf-zJP!j=+e z&YY2uG=i*>7jy0>Xq%Vqilg=L@L=NLPnrbO6c>(Vt%Y7m(l6lM(F%x8U*6NL|ER22awEGY9wWr$yitv zQ%XAj{QS_Q^TQpOh^>g=&B=t0|8)W~RPZg?cLr#;Phf|eA@Eox-!{`CAz6%rM=;&y z$4UTg)5AMn#tOf86FvIQ0acQhctftoRkHukAxe?{gG77}UppBXJc}tj^qJ@X$M5aAWZ!eit>k(Q7fI1LWrE~dol>UN za^q#M6MSNz2f|CDwi8DiS7!%uq6k-S{VNEpUt1=|h%IjzMFnp!BnV*JT;#iG;9Fwy zfHTOiT~6Iygb5gFVT;`#zKl1%e^H>fWKqCe6Uf@=J=Mu|x3SeQq^)`klm90cl@Ljz z(r&*EGGhNoqry$2k{R1-+C+>P<9F|<9NxJ%jf7cz8w)_+$GB6Gv~lKpi2}S%zVM~@ z_>aPj76p|e^Aekx4uqGI?Oy~+^)`_FCM5FHUsnw!35Qtd?^nM~ToS}@V(dOX^MvAe zGSa=QFhkF1sW!Yi3ETHk+v7=K=dEH>*~E*EmCNO?gEwvmDr&E?kZ7rO&Dk{Nl4}|T zI>|6+R!T`{$hIa+XMXo0-KA>=Y0VM;+q8$IP3{bJoTm%L5UL*>TgWl|< z3J<(tZzyiURN82*607$)W}T@iPYx(#4%bJ~mp;JooLBoG3Vp&Aa%qLq?pP`QPSwIG z(gI9*=8oxZydFx0+tstmt?RK#x#dE{oz_}otL_b|S=-{(=)j!~>oEX1J3p1TVGQ?G z6Q)85RW*McU#2cI|5>rOrbP-LKi0dN7stk4YY_=aem#Gt!1VLEGpD~5Uo@Zo^M<>j zK-kimpV9lceO4$(5qsHziHY*UtYPc37b({7>m&KzUMC(NUl?QM6z$D7BIP#gUo%+B zj@-ffXHpWSg=8ux<~Z!P??f3It^h{!cJQGs2_X*@Yw+G?VR87az?*AS8;{%WRpx*) z;u_sFtz1PRX-F5GGxP-_aJVrIAe7Gk385p4Us zvV7o1jOQ7ub=Sf~TWa&FbxLdD%#Lh>uzo8Ao^-7Q?vzqF-HZ~*Yc>5J6Fpnv#;hYx z6Fe@Z5w`BuI5pCQln?h;WKFt1+;PT`$~sKs{O^k!@P|q&I|B zu5JKS)2G2;w?w5V7Z8|DkMcV*9fK}9E-Hv&y#J3A6q9pawZ!VxYrhGD&xv8-$rASV z`MORQZ~yqA$32^D^zDsKs2sXw%Ld%KAQuq*OZxwTO~Kn;wd=q^6yQzPLfd$Aoag=f z4~`u@DsSTPvd?Kc-%_>M&;xREY46^dcVC>g0T?0Xrb5WsKPkQZ8c3;?yxS6wT3`j@ z_w2l`y>E+qptV-<7vkEv&4rr3g6EH4Zr%_Qs1`(7Qqd!R`=b@DFdSF9uP+(rfnJz3 zmwihzgR8T|`URXr+?zM20s;a;w4@~@+R8q*pJQX|F;M%DJ9_{z7!XJI|Bb;wwwJ)+ z?a_*T3(!kJ*3>k81qW&9xLkvjZ{MB-1n@(VITU%sC64NUsrVei*vf0zb?xKFk1n#e zpAR6iKEsan)@42YvfNTxp%8&)pEA-&E;k zz_{D}Q|$LH;HSSI+F#YW(LM4HAHEm!p-6dLU@gR* zDqgu_KJHlEP$MUAl~v&#HQ4-KQd06X5fk+NjPC_iZ7}weXt*7-K$X~L(vfdv%)eoV zm?MEeOUGu0BgAotANYf}9O1bi+YMy; zCU*85Y@`wfuZxny;w43F`t&K_DuJq%mDZ!twk6`z8yCww0XA(r-n>zMQ#OQ%Adzh@ z+O*cuh@qarUW{ir0&p>f`y}cgZ5mCp0t9BeN4?6Nu4%{apPO%eZ+YB$q9;XFWaolQ zZnk0g1%g$|XZncnUiOD~MKSO3LuAWNq)~}zK-ibRj3UHEcO-~7xn+;-AYegaLl)L5 zTQxOpWV5;`VMuJd?D$W7xEt#lUFE_dj4G%FhUzFHq?N^I|Lb>D!IeyC)$bbw>QbM7zdMPf&QA&Y zWAtV!rPqQYGl}$|H<12d7C!m0BI%8Ex#j4)L)F5LijL^%w)by60XZG*?=R17+AM!N zj60*%Wy6jus|FTJQ`cxiOs8e!{`RJmW^SLgEWJP8>vpB%}PVkuI?fIp7#FGrBa8{ zMcuvKKa9qHu5{!-D6BIPTkhR^m#W{D;ky~TsFsXOv`Un7a#xon51NYZgnlphhv-p* z`QSsWQ7}EWK3v#DSobl`i*)aa39fXPUAt2h9E`#+hLjmb=twX&Bm*$IQn*HHy|3Ku z24CjRC=5G*&s4yrWO}-~qKRD)0 z8821`&D7fN#{Cv9ScKo{RNfSVo2#{L4Ks}s~17E1?tR&DvooARCru8CUo zQ&zmgJ9`E`QzB0?yj_U3MCf!DjopW^;*SG; zxd!b$fb`2&UmMv$M2Be{4^* zf04!95-MaA|I$3~;b8|c&w7cQNoG0Sxn>#m7LMlTzGB^WDfcJ7B^Ay5{Pj|9tuu#H zD%tD}009Bqmz!hSqg|gVU3wYdeI`TB#|RuAXxzKkKmI*GY& zeGSs~9qJF}0}Ujk()H?HVPVD^e>;&WGH_9N@7>2eUPsowetFgG{i>vX z?D9$t(WsZ-fSxl)_kkY?*@kb2&#NZ8xbMFBh#LR^JR%EsQ%6=0y;b1-g*h_IvdYzV z>S$-XdsJ^rC{1MJc=T={%)>Qi!@YubHkQNcy zJ53%fHS8pf>}nrw2;`XEF*LlY!D;-sZVX*$Gu`<5`utG}inuUS8nY+GUMcV1$+W~5 zJxJGEo3ahT6)ks^n7r_&5lRJvTAr`fdF-bLT+I}Kan`I?Z&}%?S=nw<}rT5+h zA@qO*0tw~6LeagS|M;Eryl1>0-tms}g~5*5H|t(&u34{nO@oh8BhE6$Vc^u5heX|f z1x&efR#=d=(Jg4FAbUI|3Yiqvr!qB(y71BiK9U(4{kkWFICuWqf(VD+E#3(ruC_y_ zLvcOqx?9K3B+(M{VEyWGG=@yd%fU#N(VctnIkimy+yH z6CR{X3*T9KjSGp3OUhJJs?X0>kW9D~C&ztc)X?UVzKeyM zto`=VYp3SUCEJUT{Er_$Sx3^Pwql0!W!S@9q}?ChMqL#jO_yxjgm_hF6oWSU%4)|B~-UqD{K*NhyfwTZcxKh3X*y1WQ*kXdt zlkl_uj3_jSSpHhFhL?Zb3R`04!;$pjUi-k|qZa3vk`Gm!%ZpvzaEK9fk(7-rye)q` zr#OSW4fea83Yh0R@~V7t>P1e*rg7capw!6J52J!M2C~G?{k)j<{Y1`qtXk^79=);Qx{D`eJ zp+y9*k)EDf3T_m7AEd`=g`R&*NU*x!8P;(zwTdqFD@B<{5^WN1L7CJ0F9bKtrn=M) z&GBgtlD;}wiLYO~F=fDgF(oC}ST=er2lVPR)$ejV#AL?-f-0h)7v? zzLOZTaay52n<%J7CoiI|z?IQ9vjZae19lu9i4t>#3tg!g?+smgIN#b+fVpno@;$@Y zHk5iD$xSBK?vHm#)C&qky&#>e`6oFxb_Hl_##bg6 z_jfl9>6rsegtygb;lsu-P9*>U*tb`?bnqPDiCi&bJ_RkAdyN}a0*+NEoG>{6<6jk7 zp;|x3T+dpfCykUHbt9nIZ{14oz<`W(fz=O^6~+lFd}Q_w*{Do6lN|PDfU-r}*Yhrn zmJJG5Y>j6En``1W@%l)eae;QJvllI|MHVQQck$?i*6W^w+lI9(in!qrjzd6=SfB$D zvmLjTW6B&dt<=G+=mgT00#n=>Nb5Y}41#fBHjvLX{ILit?Jd)gzMY(U2&MpOm{iKlKK9IC7tty%$ z*gAvC4U5lbm;}vYGdzZu6z`HO0buo1+#KIygpO_P4=i6$#qgBTnx7O1LL+%68CD9G z2X723UlK;qN7|8;*qwJl9ac|Q#d|9+aaFLBjMepWiF%0wr&=^_Irma5YI({InJrrz zAhkpU4i6MU@&PlJkw?iR6DB04@5koKOqMdE-KPXvvenrlK*f+ny5nNN-{&GbQly-*N3<5;6V;1$g`zdW35!ZCXR8A8BN|Ljt zpd|)Asz6Bcuk32D^6MZcp#A&(zbK={ z%XR6iQjdi|3Mpk&GfrN;C;ih@FBb@XVL2ykofbZ)%juT(pEA!vml?}Y(JX1 zotBZ71}=nc>@V{-ud1u7iN=NU;!4|H5y4ZIsQ`r=iwW~F6v1Ep75~*4F@bFPlmn>* zN;CH5zV%cSC2*c@qoJmbNc~`H8W(zw5z~|CWPGs#xP6xDIb8q*sgf!e?Gse4_~8T1 ztJ!8k)Rp{Vpyun<6sA32K9u6>S$K{)dWA>| zsZimWf7rD$b<#~s-a~$lyj6Ei*ezBBQyHgu4vzCy7nu5+u?D}Xfy|-pOo6jIPQVrhBp<=g60-3~2JZ^z<&yycN0NgxzKHroBZP$!sZlX}ob7vshchuHw zwR3#DMesn|DFWTZrB<$q*N3FOSBw|9P3Wl~ogOrH=vp>>)Z|%tCpGC89^@n6zNg^3 zs9xWq+l6rBDjUjFRVtE6dT_h~c%?QFpPe2vGq}V$;y^ zI^(8HQ%t359U@&maT$cBJh(Zo(C~1h?X|4$n>?pW@rmK#7ykfr zXZC@)9KmR*17PlktLMnlZIZb<^Tj%2E^%A;Eikh7Zgn3>MQ2EcM-C_qyR9iIDZNC* zen-E2S>WEEz`o_<>zip=6boEfJto*X$^s}vPpLHM)_|%>aryY&>aDsTzbgn>^@JFg zOV6n$er85e2g^WC8vy|mE?Iu7>(rsRqIj(1^`Ln6Ye}=v z>=U5C+xH|!5Yn08JvzG-|JuZJwD?piu_9uLYib;zTe&Ct_L;jzb@7d-<%8wDAHEQL zAeK#T0i(PMc9)pK%?7h81f3RaJc7r^8$XDiZ0ns(ua*TK!Xe@&4;qC446u;fE8LfI zU*xz9DdxEg#oRmGm;aA?2r4P`b)}7>%_4(?pB)w;$UIwE(@hg{8}XBOjl~F$Za59$ zNESU}a8jNlF*({&!2>5MWG4L(lb%bOW80x+6uc;==0}Zv)^h5qxoSagn-=l$^=6L18VJ5IobQ&v&+Y0Tw_|CO-bTpMOyP$$5PnxIOwMXo#|jeSm2)-sC+rF#-wOu*GHFN=5#bM1K3&uKfs9tPHvkM#S%bOYSz;ncc+L%b;ST(u3C)WCPt(@~ON_S!N&;L(h^B`(z z`0+zgQ&Tgv`yz`=ZGAllE9>`~OzhB$te8^$^1(i^AtJchiT)3}mami8N+3d@FX}J@ zlBpuwL(G?T-30qmscmv?7v3Ej<%h$R!*VvL>%_E|wukL8qbJ)4X!|#se=^|2lieYL zkm9uNDDOuJ;#FKnht9$IF4(+5larA-Y;U@BUb%VmX?uJ7=QFM9RMGn#a~v>*x4nYf z4CnwDiV}|Vx8;Do{0I=*c{iUDp@A==h$x7rkvp67ui^%mxH&xZ>RZ`|*Zk(`7~FoN z3&1xMr<(qjBS4BUivxHNrAT9Au&llGp!(4vFp#{#;Nn41{U#A6_|bUvdjc6bIY?tB zy?y)mp_G)X(bHqQb-txoqA9~PhkV%eU$bt2ukRn%?h}bW#wKynakhT3F2Ubp8c@+Nr9--jBcD>31EC`{d*z+&4$4cVbd)ceq|D+qlh#6@#sS$o} ze~x+*OLnYxW%eO`q~IyKo{#tHCP+ZvqR+u=vzx&?NNFFXCl>u~Q=k6h+#jU}^jny> z=dJhuNEI=kRFU4=xA!O3gTUq8NvGx}!a@J_lA@_J-(v53 zn_!}1g_x>;$$r}e5mq{qZ)ybo_3|zO8~)6*t%Lu0SIOCcZ!BI&fgSvv|Ey(uu#TB| zdaMWBDJ5*1iRoNXJ^+}hpD5pmb8n7}RqyYnV0@}(W>zBG3;84&+AmZ`eZOPBX(t2Cb^i(1?hD8f%e3d;PyO?4JUu^w z4T?FMC_vQDyGj~ZU$F1@|GRw$Xt}qlrp*Wc@9qXY0UH$CWpeY+1|1^i&%{4y?$acA z5K*$N>_ zb3R55UP$k`DKUZlpBPo*1UB#Ny9s zQ2!x$08k*FRFTF+1uaTMTOIeW4%ydEd*Y}BRM;(5xeI^1Jm>%D3>y1Q0{TIjM#~@g z>oXe4Wi|GpLmiuwXdAKpHM*+NFpbxYTarHz+?H1IM_NyWgnmvd&rZ@48oWn8`CnbiNyI}d z$?XA=z)j{m;VdN1bpAqSCTs;c%k#8seimSpu;1F^oNh1Slv~h9 zzlYgTd)F+OSuX;!TQm_uWRka)xkhuunw@306u@KK9kcu#x<6X$i^w|2~;^zXPIqaI+Fq>Nk&_!jg@!h1;x*~yP1Q6%hJjXw} zd>d@Uag$-qdqCTak&~#MYGV>~P|$C|Cwwe==zpQqWcs#m@u&+y77X!GEv!I~5U9_O zZ;YOpYBOT6!Z!EX7dT<`@*$}0BOxE!N4^S~AAodsSCmae>XJRg|JA5z0PERma`4~! zG(>n1KabSUJEv8`E3_A~VLF!#8M8n8gDTo0Fcy`XG`l}cyggCGA{vhDF45Fi!AP?% z!u5+^crBJptTiNj>v`9mM{Nn#2%oCcLMCg< zD2XHYEV|bH2o43W<2I*7q4(Sh`#00YL=o>kEjnO(EC#Iy3spBnhEFu^=Bh*;;Kr%eTfc|$o8T))c+5j;Uz*p5I%x2;!^g9#6>RDM?HPl7$-siO; zNFiJ+<80RWt3+DtG>x=Pj5&Zk6+G}&ho&ECG-bv85+(KZ;i!CWdG7~(dRMl(x_KP* zMO2SGA^+8V$u6`PjQ!fp>c#NmpPSn%ljB$`pO*^CJ10m-t+)sbVJgtoK)$2`P;6Uz_T@uW7@iO=rWjSVu>Cwm+ zSxK30i|n@$c#)-4cBZ?Mlxg-`&vij3gQAa}ZW(sCxzLa8z4I($b7^Nha1nBg8regX z1eH{YQmxYwX?TWXDncz0B6CQ$ZGxbaRS;Qz5W6t1=IQ`e(SN%rL$0QP9cR6bB{g2q z%=t8k%Gz|T;O&iq$uYSWDGBMSCRcE;rWe~am!VV_-yNA1{OLq?=5*(Xw=%@4lVmb( zB=uY?LgBGdL7pR`ed-byN47*{>Cm=N6!OxUvPu^jQ=F~J;>>F@|F^1=pUznH;BF)v zVROy(hfiX=0}SU&hRH?KavgKon!Bw#FxCT;45K-^DwrFCEhicrFC~Zu@*|$pM&S*UgH5OL!R51{CBCb(bz60!PQEy+pP6Dug*#O~r=w2wFdlU6f6{|3=$nk_Z!YGn&~DYJ&(N6Yw4{)BdRqvHS;65g}A!uCa>$W4lDW6Opfx#Q3>;% zk+`s^5GM=&W~jv1cnkZ5Lj`iV0nYOOPv zMHG<>r%Mq{jIq~O^1UO^f&1mLpuMRT>h|iDWH)9fg*{ubbV`g*ZbMVs8b5z`ku+IX zB)6frv9Yn>_iEhSSObm&Ws{Xyp7m|h?XgA{UW{M$hjr!7SoKGBk2J%m`PP!TZ>Y@4 zD!WjqfOec89eVqDNDHG@cBNQM$|{qvYONc#kpN5=VwMCW#RTL;n@(&S(I8FU%*mzq zX&D!27^^rphsiWh)P;3Jq3CG7f}a!m-5OT)LZQduEZs<%2f}+Kp zXVJ6B1;i`vdIyIsVe8n9{ZJnRDM#LqCovYr6HCqkC;<*FU#XU5H3{TYSt<&MKj0f>Hgc zv~tWqrDPepA~TULm4oN1X0J%a<8yXTLMN=_R&A{#x3x)qA7nJI2Bk|74*S`Dj?ZGq zL^`z}7c6k~>&LuBtEGOim_it=4flI5(>x>9*;RjN3=@TZ7wPkdpeUtm#ML zFYUb!# z(I|d^i4D1O_F>nPP1yYKaR(~Zn!06{D*;E7oRVnHktUyvXLo!UN%pFOkBa(1qywph z#5oRj-uWfJ%hHO9Y#oXU3a*%=Bv(X6OFr3#S>-+9?SGQjzln~<1<$t({VsFD*to}) zWWJ8_$afqJ3KEF#`&5Q_CVCierLPtZbc+Bm;BiN8%I(G`UsT~~N2gB@WZ9zApLC|! zY>l|govz^7_CD2WObrhbw37sr@e~2v!lE}l5e|o+>aDM<1ASFy0kG%qxA`nTueuk- zimVQeb=ewlXctD4962(qR|F;SMnt7#s%B<)7~kEwfGD7;udSUy!HP5%-o0ggE;2cD z2AhKD?*_1JL~*eOWbS3f8AkqRp<*86jXh_uwIu%OPy6#0cYIdhR@yAQ{~XmkMnqym z&2F2K9fYuB6yTGs=TtNM?JC>5kUeI11N4n8;cZ$DD=SC(oQ|YVp+hZaL6k*y#toiHGLS6P*-qGM9a zp1FR!y;u9IyFGC9eudTw0FRW|1S-GJ*OZ0|>L~U30{YL>8@2R2V^lLWOO}WL|m###>@_Rleb7Xmz zPiPckodOuRCAD|9eWb}(eG z1)7*RIp7ib0B1Nb@1Q#ryy_>fB)Yv%)^j}pT=DesREe#5XEYkIv=Pb-Q!)EtU)F1U z0nR4`$7tQTk6_x==+@Gia7?0)$B~!4S4rdy5^UAv$?%t3B;3D+fvVrra)OjNetm0+ zU|8mI8#|(E>+*V~WaIS!`_wmBsol7V_ve==%NS<*VY@2fP}{bSQeJD%%9>95EZsSK zSAG&K^{j6Cnbwx37aPCLgUok)a#UujIW2m3DxSun3UE~2B?&?G-8tcF#cS5)?CUnOQ`) z`I34|3_UIeRmtJ8i=zik1%YU?tYt3d$kg+gl`L+tpXNPKnqVH;)-j z2+|VauU$Y2!FHr9MP0wSfezI%qMB#3&TjZq zNb4|HLx>y+Hb>Uw;S?-_!AnBIl@Nn-*r1NlmFq9xU;_Pix~47BEicR`{JGR78$yxJ zKPBf%u!r%{JrSQk4Z5+=UQhYYpb}3{*omi+{EDA$$QC4XA31iW%k%INYc`HlZ!r5M z{hy;dj6fA7_T&LJypnTF=CJDu5+id1+mp+HO4I7p?#Ac##<)k2PBT1TrMt0>_@Xp9 z{8Gr#TMRYHV6j?N60=#z86JOHUo7f*m~Fo0R>GYHYI-$AzVhwcpM{wyuaAc%qfm(UI)@-%|wt4 zjnZ2ztPnWamYQ^pgLn3e+`?$et(BCS?>!IM^>JhndTdF)74U`wb#bq28s_}sM>8KJ zYjIjT3G1g$7?u`b-af5=zC9M`C$JWjeQDKKS|!~&rfBLly1*Wc>6*RqR+&&}@iXo5 zoIA+omFMw;nwKy!UQee&?;-l~(T-jR$+>bYl`t*ESgGJW)u{qkP%2+z#CVYvkkv3pcFh>V6l&)pS0Xl`e z2Dj-C^<6W+IjATp6TwW#w*JZn)r@J}WM=e&kjG|t>ClUSGYtG@B5FvjdjJ-Ee|46x zEl0+qV@SZ>$g$69-XxA!JT6LbXQV*U_Y6bF5nSwGZ6$`mNz?k?mi6$BRZQ|wh&65~ zMfh))aOdn7<_G%0KV7)Vfb}!v>^NX*5U(;NCNG;5+RpBO`}>2cFcNIaJJT}@9SL{T zIqqz~(HdI)Y*D!I966*5#{1sTvX9N+O1{Q%`SvycJ928$FB7EWMx|nW;|R34zE6)< zx8OHEl0YWD1(3064zL?`W~mo!_%3I3E?&`fMPD>6A@FS`O?J9!qf9mRrSp`O&WG|& zFP?|Ppkc2h{m{;GfQ#>R4r7|XO-u2ScYKHO#me;sPLnvn0v8Z<614Bs7inah2Sp#7 zu3%s?^{gE2&2YCnUZkfrW13Rc(gWh-xU(_oyj8>ssjthL`Vev7p<&Fa40ZTQ@ce2afy>u-iNlj_(s&q zdTwRCa9t3D6fC9c#|#}mGq0ZNXdMy~l5~uQr>lI!Uj?k`W63hehGP~;_K-49E}$4iC6?fxsVu2{M=+_ z+3B`8fpTrJQ#-Lz`drTs;cF^ufJw%;ITy6_)^3YbiSeV(yAdZ#oXF+#Q@VKfn<-!WN;<=6vd)iH|2|sP)@)n(%XWm& zKo7$-WovA^JQr308xP4CEkQ+MX~!L<1uIWhb-K7I79GwQm}k$-0@jpc>e~ffQRm!- zp31(h5cwzVJB+sQq&J@cyQVgkGnoDNk~lB@kxn!*P}d-N_avJySy1EYS^Ro#qM*lS znn6pnqNIEI_=F+Cvw(}iBy2f2_%^BU8&=_~lE#gn)%#ygB8KaS86;{+R+2Uz0>0_~ zu>3Aet}hR4M7VKjB0^t72o+`co8uGD0+Ow#m02pN8+0*+ntN70>ZQY?eNK~|It57j zVd+G>XPD=*B38OQ(1qf%neI@{k&Bsf7cDykReNpe=#H9d2oz+9!8=W|#6f3)%$_7zq=1X-5o-v&Xj*2K$PQ?E4rssSRC}a zOF6gn#O&V}Zr#0C59R{Z*GC=N8ox74g*1(jR<}jmC_1B9;@V7ok!qekA8L8Qtah+y zkQT1LtR}XgP4I5$UQ9Z3L=2M7wP4g=`of3TeKl*=(OoAD3l)8|V%9$wG+h-pnpQh( zn`vU?RMcBX-=Dd0KT5Mz_uk8hmP{?KN01kHfm0Y9LDU;W1Zl9n7w{YM@7x)vumrI9 zzNnRI_h~<18ZxDt2iO@I7dzO2X`|@KL2Zg00Iu;;{9l=UWBN9}2Cu9|<|x^ln251%?F2>nw3)&wnM#i>Vt8I;!{|Jhw(!p3mcE^|F)}(D^h-c z@^(bMdc_0`-K0TANW%_t9bUo!vJqEbMRica)+1lH@D$~peS0CGRLahDZFYQ-%dC+? znfuuE^{urA5y5ho*a4C4Q1*K>;f6s;Z=9Ry+H~IK);kQZL-4qIT)}E8Tj`g^6Zd9+ zhs(J9c=9fC<-w8b*Xz>eLafQy8GqVAD{0|kUu<$eSGYv^g;}S~>2G#l=f_M(Pnh^f z^`36`9wwAwUH0Ro(li49z4Aq`Q;Z|;No&@mkgZP}a#gU&klN=&m zva45?ONVhL1_vv6`@BihP*7;$_))|0)LE!yfI&=7kwtF0FE`Oo(5^|-aFrIRTBLUb zjR8<9rc+LbT`6r)zx~z*kcO!wuU}yad7)bQuAz7le6Z?LbJ1GQdf1I0(dtCg)Udtn zR3<4YiN$=rRk|J7!pN8K;N3?p)G`x#qn~tIR@~#)N)2`c%^eyYPTS!`59ajr@KJ(6 z#F`YzfT705@Z>FJa!nViW9fE#{&T#lc`}JK2R7PA(Wq#}jQO|-rbB;~RV%BoD6dOlN z?;rM(kbd%{kffbcs7+>ohd^x|E2bAp=cH_e;-fyRaq~!1&qJn$qkWmkVl}jCX57yW zBvVjZ;OL0Tr`Ux58XM(l8|u=&)YC+=kR;W7GPr#KbUM*!TI+R{JfE z)wy3bJ@nRub#>t85N_&6zgx})qRjBbYj&yYW~gl+d(B2JOOV9`AEj!pVp2v?`AA9B z*4NWwmNNxB_`u6NqD5CJ5`v5=2S09Bz1+5xzUZK>Z& zJWsZP%!|cJ3W|yvGQ=h>(_~;mmTzE8G}?#;9>@RlVa3)I&)jBP+gZSydcQf6Prbr*6(W&H-aR*E zgGXaf2%@h?#J~%`O4itSfxpOBg&o1Be6kdEmJFE7v=ZtBpSP>e`g$S-Gzvuhx`PMhHC7{f;|D5d4z zi=~jQW~=y`nwH)Z&4s@HT<)I#@x0t@$B42@nKGBDnHbw0Jy>gO0*ppZ>9O-YUG6DU z?~t3;p%az$^I6oLa1ZJG3%PW_ChF)1w`^)Z4eqxu!{if&3Zw2O}L(G3Eh)=E1w zyXK4%#C46_6vp7~%q?1Sx-F`dCZJS;1&7G1OKX`d5^(B~ee=mBXj`cJ5$4gnZX|eYmHGK zKZbsce(X6?82a%Q$ccl5hbbe`4G^H=cib%A?k|wA%gyzB%bHG3r=PVV9opa`A?jS4 z{)uH3mx6MbdRib^Q{9zjryR?H@NO0%r~GK~lEuROh2HlP~g04Vo! z=dL?UHO+LP>S_6UoK`#vJX)-dyRHn$^66!w_-qPVw22;ZgO{q~VSHxxG@N0uNZS~O zuk9_JzuSw-8GKaO;k8BBm9hFj#`H@V`65OXy7D-W!}DGofi*DB_mw^ zy#T`7Fk8JTw^=O{q#>FOO#>IZTPNID7rW$0kNTupH9rPsB;yldr#pQs6ESB|1bfiD zcrkRQZ-+lCUyfv!g~$*ZNiRJ_Rt*cY)+7)C24 z-<@Pp$y^=o3Ai+JY+7BGDQ%cTtN?F;XM#JF4W~vtQo3TvwTSe6?lMU)B6aJB`{QP}6 zuCc+va9O&YF2I?9I1BavYzjrN}eNQtFUpq=9m>Rn)+O>rwSNRD55@(tW8YJ z>aB^#?+=g>UEE0K&(HcPMUNmy9Jn);ksJcys_Cx$uiw+u8bHh!<9dz z)Q_EiY5j?Oj^oBu76}PSfqS=BD3|`%jm^<0-q{qoY{+&juecZ?D(Bu(#CDJUl`CKI zqfTOJ5;Z<8_Vodmk|{>qdNkk>V*nE zFqL0;QFuK|MfcvEkQ{L4w3gQv^o~!5?S+dA?f%(wAq&|n?AIS^dxU2r#42+7J`F}2 zhYM&dbYugQ`^-C5y~Vnpm}2(+MI`>S_6)IFZrLJh^%1ap8IJ`X!_W~l(c zZ^cpGY9U58Nytp~*>CR*)tt9K%Ctf!g2e{1^o#1q+QZnC<(jn$@+$lv-V#T>oMf7{ zpDT$fZI;6}s%H~>&y1r~!C;j_#4JR{=^=yI^O!+|sl2XzGbBT&BttDnFCDO&Cn&SX zYqnObjGWSK-;hQ--_k2kpcsY)t-5Jwq%BtLT;aF|Ll3714cn{hZxtH7TU|M=TRQup zo>ASKj2c_-M@_r2slXuS*rcfj#6(3U1WC6!GVGRxZd7N@p(}Gx-a2-AwXxN|!0amt z>9J$%Y-~SVMfW1OmwFe4v@r6pdiH#+)!A7$LJTnVf!UQe?o9*JSt$RK?5{JnWOMQSMRyi|*~JLt$y~YKkoF)s62ykGhy-GX@a04hahVV?jbkNMupOJ8 zMO`7PbBapM+Imw?LanJ`w}sksG*^9FqTi_nL?TsDUUR2wV;f7v^3y<5admf$kOiTz zi_Y2&SE6gEe-ZQT2fMhf+`xKoT@|LttY_b}o>FT|GU)kD6n)^>q4ZNGPwkt;cBTs@U*Qg}>aR zxR$mQ|0rI?;nGzsN;5wT4N^4~>Gs_XVqHb@T-RA{2+ozm6`7l3P;L0(sJx1pPMw1Y z3LJ-L4w1?Qc`jb$Ku?HY0dPY7@5h6u_Ry3-O>W*2*FWm>o^ww-C|m}4bn-Vb(&xnC zXUV>O`)0P$(WP6eUFg^4k}OSKt@o?^N2*+GS4L=K7WrVo0(@zyYq(>9-_O8vJMj|J z^yE9ro0YAVcqtmazP=|Bz(z(Fdu$0-2-}sb-Q*sn`fQ-9ouzT7J4;nY9t+a~!Q-r> z+1kQjW-0;K2^yF{KUgXAW~#nBqLI_mdaK;*CrN;_$6JsQoi49@n5jmK8<(NoslK>X z?wq^oj6YQ2V%0pW>bg_mRDi{eW?hCp0|Z0GcABx@Q5@$N;ryiEk0W>m^%2bj=J$x9*6?=~GU0r7Pc!ccx_I zD=YW)@o`mw7#Jjgez43A;|7hg)>N@TMv;UHH(Z8EKqR|zVb&vKK{;vZr(IyHR1-~P zg9Z(=S5sQTxlJ8F#IBm5qgm@uQ-l-~m=$LEN88I-B3KvH;hEW=8iaxdFa|U|9}V_jiO?k&+6s1jnCt;@b7=GhuNTG={A^3lpnrYW$^p5gsYSP*f+i zy0%d4pdDy6y8q$sKjD9eyqdrt;6C7R20GLVC z!qdkd9vCJ@@+N1oCoAY^yPn@;l#|p{As^#=?oPhy%7Z*$EbHBvu2+UUBV@7k6IJkm;n`XhE0VJQ44dG-(0CUP0YR&?i8k3AXmDltNpFg z;|^8adQWvw6WTjHBLGH={qa|FpJj^c9i498Ip^~G*N;;Rr%s&$slQ~S?bY7bmE4Qw zXkPo+ULnhvos(Z$Xqp6fdVTT*zwn9HydgVzvMXfyQFKrkLgA^c9M_d(qf-qVqgF|e z_7%t2)w5^ej~2xc6OTSdQ{Z6Puf``naf_>_GbTs{yxy>-nK4?Y`;2b*`ZqzRHhQ9=~*earJ$Nc-wG>x z!Og#Ux^eW&#%g49tLr*l&#G-7jMrL0M<;rqV8IM(Xr?IK#FxscUkVayk680`k87X! z?_Z!V*D#_mS0mSeUAMFV0bNS2o-QM_KLg@bi4{a<-YOq)sYs=HhG;B9E{=IU*eY3j zKllV!Gt@b-jVS>GoO6>Q?GG)x|Me`82{-! zCGZl+je7`k>N?gkbpQQJtuIf2T#MbH9BfA{enx0QuWLIv z(qStW=Sb)u|F&!d<(+u5QX-@6W3jX}F8$=j1KT-YDYCuB-6~&F5|SgU6OR9USvs z$X-;{lc)_)oY<@t(xMjI4Hmhw!urw}t*C9k&X3+`$Fv-s+2D;P3)Yr>dAYR{9OpeX2ItceXJ_(*&M~K5K;k`OHOq$J2uRd z4?R6PzP*d&F*nH88@cgV|Lhf45^M=AC+ELg&By@t5Y($(naFgqcQA1zE}%zK>4q?D zZC>;GwQCwL0PQCk1qE(ysoc<~4Ln|;b#6G$mdXGzA(9Or8I_m1J`tW6(!zjzUPS>><+i|fuNl}yf_RR~s6d5S^NrIM}~=qwlmag5_*(d{XG zBOS_V64N@0+1zR&{?Mb^@zBq=C_x@`teh3p9+7r+z$`9juSBS7?7m6iDKYQ81@EpJ z{MfhwN3;?yv(l2dP{X-sU9tNF=0|@kKkN461_|1&?B9=quFW(CCF(?Wu1=j; zou$k$@S1-R7dP=hcW)zp`HCyO46Y2nlxm9-olqJ^IEAQfkfk5qz&?t2DPhqscvUR& z5d-+HXDeCXCHht?^v3t^g~SniMuU$Vzi!&J+^f$sZX7zc*bHd6TU+?}gczPCQZNbr z>R9s&WuMg6Py4gdT|LQ42Gp>BHEZXiKT#yHF9pratGZwC^HeACJ#)jXhPIc%gn{(f z(Z`J*BAJj(ojBWeK^2@Bef-_zlkINat+jh^upV6XF72``Z0@~U&Rcu)?&MoF)cx_r zN=id~h2|?SR5R%3?~BB+bn*tJ8$MGg z`8W1KN21P+feFP+6o-XgO)QcunH=9ZeB`)BS+kofc6vml#s+IbgO3jRX|6ldnqPCp zIsyPxgwE^y$HfJLGQ!Q$^7kSv5qZ9S6`~J9EYT}xAy=PpgvGQJ+`97IE0WO6W<5M! z%8ZzJwH8LxYiunE@d!8+%F0Gfj(um-eq#T?UN8rT+>8nQyf!b_&eS-89c(on7VNgZ zn87L1s_dRcSSl|`Q#Cpn%Bg&cIL{pZoxJZHy%P;w%z64npmo?SZFb58dAIPT@{{f8 z)#twV2YAoJTj$Ot{%b}a>nu@B@TcHA`ExE~pNF!gLx zrfUAi=v>QPcD0);W92h;-qK%73mz83Ah9FFyRUb`nT}qb$+^|0etjMwC^$@LTq&EJ z=s|zR#b=y<%cMlf-BbZb9CzmC@SjsBBeJVX{7^oqv^z5ghleMqau`rS@7`%Nc=d|J z0q@}Fv2h0gOlsYfulF7-n&L!PI$eTXZ-~mf^M~)o9nTY#{!FIS*z*(o&cFMnN~Gnd z+fpgYOp%20^j0D&>Hdb}`{bfCGlNxUu#WG^jQ_2=B0!&ospH?z`iKn=YI6H#`jD49 z6~7y=rlorA^wR=Up(bCSW9+jJ^48`9dt-G^3Oe#DE-z~y$NbI}QrV+~KSVNfR8o>e zCEFmlCFfGsishR$hY0c7+Ust=s1)U*tlXblvJF<?d7W@WJ+6fw}?5OhodZ zh(ciai$NQysnq{`S|n6`C_V{o-g@>coF#{}CAtsZ|18PZcrOcQiSP_H3eLB)=|sXX zRvwa(WA$zh7|wjd@`=YxaCwTKC+bX*iJM99DDRnfJU}Pk2)1o#ZmiMv#ae?|u3?D} z`}ZHXXZbuIc*bh({YvyupJo1ABa>-dzH{f{U{RfU?;3w{64eXCG=P)X-MHU6moJNF zXtJn4X1&>2SueNFf!G!3r7z^W_X^H*`x*S$Df)0W85`+UdH=^FIH+ zeSLfo1>Gcyti=B;vRBSNek-qNrQ>&=QYqx)N2L>5Pbv3`^{kO3hYueH&3FnLW}dI^ zlla{rsUW(YRu{f7^T&GBJ9mE5%r|YEmx}AGIrqg$CrtICSgNrpq>|x`c_WLsEqYqv zPc~v#Qi1_Rx=8+Cij4of#D9-*@3*mh633(U!wZtO)hjn1U}l~-g!9;aY(kj_Z33g* zWeRMSBVg)q_$BaXX6No(e+PWw_@NRz>I3i6Q~U-o*rffJ1QW$}Pk_bS&wk+(`GBA*X6jH$AYNsY<}n05P;A&O%*=^EjP@`UftgY}7@a4<{!BMI~8--u%1RZjgyh(ER;xL4TfPrCCJY@<4sMMnS%=FywE z14PE|YQ3_EVsg-Z{{Pl&e>ZFYH&KpfPA`TWx}wfjFyGWKQF=&|Uw)>2Z~>T*|F124 z(F}32J<6~nsSB1bbd`C~0;tQD&JIvKqjb5HP&DWTFu}|%D+kB8yE0(hfgxU**ykA_ zZh`~x?9cz}TYq1Nc=lk6^sM{yk^w_s#OGVuCL+cje7+}}9!W{D|3B=#S6q|Zw>By& zV(*AjElU&-P?2606%_%c7a<}7(xrwLg1STn0TmIc0RQF27~>i38AWdC)aBw{htdd`&ErY3ABd@*v(fwo z?SH)Ot$?c@R?~0vcON%!*Wy6C}OV6@}<@m~ddKTea+J>#DL%QL13 zh_puU#^85?QK)ysZ~U(C;XVbIqhB5Mi|JbHezE7OHK-;isdspa#4c^v2 zUa7bG{#)F+slOG8KPV(5923|71s8YEa}gxcFzMgU#P9#u@6LmMG`5=Y666Hk{0YVV z?jY{5D!=c;zdWe$Y@iX}sVvfje!hw?$H1Ei*r;3hkE{PaEATI)xAch!<8X8FyD`dD z{Q-n;Ha}i4>3Zr93b&u_5ppNp|kd*aPs%nzncSriuwwOwtk(HOH$Or9n1Rr*za$t_cnujmCbebQ{9~^ zn=ALg@1K{}vISZaVtzT??{6r*ZvA6!bNi7)Clh{?c>U`RH~UL)Z?Sn|f8Y3zbAC)q zzw^(FpFGNa>MvTIdOwdI#sVjU6nBpP`!W9gjhCx1t8NFyFbBA51i>sEzV`E}>k zec%~WPI~VAwIM>;GS79RNI)b0D?nm)&vt-|B)(5WVmJ3QR|>!&ujNBFOg?cAy3y zh)iyqxt{uWcJMDpml_+Jg;mDclfobe*Wrj!)3D-=%D2yWnRAu zE=<_wdNS)L719rd70w;Rbo6Yy&o83tFS0<R&o=9;`3s#I>ZKSvUZ>3W5I_ou#zrm&y2T9=P95c5w~+HmR^5_xQih{r|Gw zAX5qhDe60S?z{#SRH32kz*rnw!%aFSFY3x8+IT}%7XOsv#riKnW<3J zhWh(M3fUpO-HiLQ0NpnPpP-o==S<4Um)b7dujXr>nNU8 zjjP11h=F500e^i6WB$=&>TR5j{%|M^4#E0m$bZ>5NgA96K#pDJ*M97uVl~el2vRLC zTKtpdklXD5G83A<)2~aYRp?`YIC9UPJ&y^}sGT4wq0Sn*4KkCffxW^E`XMG5x57F>}>X>8pPh_3xwaAn*7&Hs}&U3#c)w;#?O%O z=KZ1E^i;Hn>KiA6mfqAj8SBb*feD%>9nj@AZej;+^I+9#TLILDGR6BNGYaKzLJV-f zna{L=F>9XRb<{TYXi>>!vl}K(LjN=<)4ze20}>~&1f0eFNb%l7I)zRM`w6+G;~l1 z&Jpxsw3Q;elAly?;6s7`xb|O5kEOEb;1u^P^-=DdXdDvPLP(wuB|yy3 z?*Xbpdoa4G6J zAYPQ?{f`XUesE81NhJClPH}5t&w^+pQx)O1a;p)fP?EnM(Y4p7roPcNrEStzR+PH)jm&lk*$jKNVy#NS#I3xZ2ASl7s)4b@j5pFOYwlQg z`ZQ_gT|{EJbm#kqtCE02z|y*BKm|r5U>Gw6J9==wGdD+(ED2HzSCVsc>^V=WXFo+nW#XN^AmJwM)IS6 zzgPR#xRdROJ0wJY`FLAR4;O$LfP+%b|MWEfDIUA8$a0;Bk6Q=H_d#8>KsZR{@tX=3 z46vFFBIw<5a(OR?|9oatB^eRnEq*$vr`NLhcJe`YP0! zIUN(N@liBr?HLkG_7F+gpM{<)*W<9zm#p2V^J}!9I9g_&cx>(EB!^k(@tHB+x0y1C zM(z|*N#DvZIgOHav=ExM_YGeBwjFbZEI;!anG#@HHZ(98z}T=%$kO9rH~DwG38Dnu zSk_#*pNV8I4eo`VW{J2F)}jZ!n_f%g1kD9$S)=J66*l-ll?J*BJI4b(YnxNf=@Nrk z88#_O1QqCltZs7JvU-qTPH!t-O|lMumD`Tc6q|F`U-J0zOJ%h3tITBzDkbr>Q_(;f z<*l2#6UoToMB=BAx^03}b3tM^a!lhY)TPV3@i*U|AYZZa98iAic0#%42kjsJMaPk! zN+b0)@NflkHGd?nehTP6{rJ`6V7b(=18z`tu4`r>as$WD zK8mFtd?Dj2i1gSL24`t7X~!Nv(Yg02^+=DR6Fmd`uiA(R-{FB!0A6|y-6dPB=&AMw2Po2mmdXT$yKqtowr$gM)x?tYn2f&B)M`^%zb9~xWB%JV3gDas@UO7zk&%fF@E{lNmT zaHsV!GqxlqdYq7JycSUH67N4JzFVoT68kAgE&@-PtDi6uH$B zCrL>4g)ovrE(EC~T8UfrM>ZF9Z5x9$`JED}@rBxXxPonT5C4blPB~)3bCH6bA~={` z==+C5ZN80ki;_C)6CJo3SJkF1pq(m|P!JZ=_RH_)1!?2z;l|JdzmELJVyzzGu9N0K zl7R1zNQCd}HDxW2^>;TQ<{atNg%c^J@3#q&>bqJ@;(;X+T$?rODcL8kck|-KXSZU< z33<4i?3ni#Ix|?pZlW&9_fe%U2r=(C<$y-KxO}q zkOn5jiM>p;h>j=pHun#QxPQ+i3DQ}T`HEP?`Q;im><;r;`TualqrvS z0d9_&Z;eLKP~7a7nRT$PXZxl*(a3l2-sK9l5MVf8$AB5RT3m$u_lgIm`FA4?3ABFX=n(rfljp(9xMGC8+z(PTtIkB9zg6 z&oq&5LHCb3scs!p(MlwP!dwrm#PQ!47fcd_y%Y^m`A0$TKefrFD~9 zXoub@qIR42yK`5ry|vK({dZZ_bkJn&HQkk!^P>GmnY}ouxKVu##+3~Y+$?deSj;vn zsHS@ljD7!l5=@UEZheo@WJ%9twVBxfiGbW?-73AMY?5m((dkDSs?dJ2OQfqGh0ZA>ScI}uh;5ykWv5DAc*d3p%N)fu=se0ZdK4k&5K(#Ho{gzXivkH<5YX9@P* zZVi=&9iJc39zZ)r zx2&DBy9tOIOg`NB_TDcs=6bsqDT=J(H~XPYwcs^)uUTH>O^$&WJDV__i+7d%5;?1? zzDe1pMxZP+7tuP?SnSQJEMbs)5qXWGA+#-|sQzwTLE7HqhFJNLuB;d^VH8290;Snf zr0Jj`^ytIBMw54UubSlFsu!f>g4tB1O@HdOR%A54F4UJl;Gh=~9c=_^#Z9U&@J5TE zRL-2BdM?jdhbwzGyjOPaX#<3G+(}=))0uPY$?kYvef`o?P|847=%;lq$!7~J3~VS- zXVqck{`C`29)DLbp1m4Mm_10y)7b@E8`(aiZaa`j$(}kM=aBujmnYC@A)1Qq{qV9b zS3ynv_6zswj5w6~1Z=p}D(Fj9s-m3?mtKoTS}rInnY6srs=&l->NEzQvmr* zCQr!~1>2ARQLf?Ma(J z`;zLIJM$#+pdBy0L-gv^s|oa00R}w|VZT1pY1h7sb9mBD`Rd6#q&o5aOAAjZ_5&$W z1=Vfjodx1AnBxL+C2*omoQqz2p*FI)dpvV)>o%#Bw4r!JaC!z-_9lP{0EAfwx;6a~ ze?M>R?OD3C<9y#j-^ZE_zF;<#`a+kMysQ8Du(NuhesfR`kUY4(f7{0GXnOf2Y0J{4 z;%KQivfXBo7Gqb}+-7C*HUHKUMavo(^hx{)u_{g!zpw$kE8kiNif3xQNu#t(SS`%o z$9I)H8GufH-@8czaF>Mb3Q*__c^kl(^{#WXDD@gpFq-OxgybXOmiHow^Ly+}@`D>b zF?JDg-B51_s|S7OA0cq_mZ!(o`jKK&6g{E5(;)+Ovy7pc(m~WYc`C8kZncPbcRj%= zx+9}`G73HCD|8Fm(!6PhYb&ly{F>__+b+qQ1med!7>d}C&;iK`X=$p7*FDq6* zC3VtXIRv^4XH2Sv%lK~b9fbA(d8+sD4*x?BtG zP3n1*llMwoi08}bm}cjcit|Uvm2t1%rk4@?TP+1TL?^^kQp#w47GpznSw*U9nLaz% z+FiRoWz!_74f26W!E{#ge6H$+sZpD+dpLV-ob;IO(9;g92r+t=JTmT>GVO?Ow|C#q zSZnTY^L1~MipgER=v{So+_%I%rcxta(`%opfmK-l5sXWmOI~mI3Du9P`PPfgIKedz zLPUc#D-|dI2Qc!ub(p%L+%2+t2}e;;8vT?#IWCY8YF@S(ZcWZazX9}eYZnhUisf3f zAjyo(=Js*FeATUAud1#Ew5EEba9c@Qo}vDZ$pEvh2B=&qNA@HColyLZr5@kTW?@tJuO!KzZ7>o66B*JDc8ZvODvh%rF#g}7&!R~Q zfi1SR81Y=!D2G<7Y`wdxad8j(``NM2WK}kh#80~URVmODk|$ejo5a*yjl({^KlUN` zYewsS0RhLyf41hYh_@uZeUW-zI|aJs2XXFuo$V&~?>`mSOL0DwuHLePFH<@yk@xx} z=KQ0}JraXZ*Y2swds4YD&ZFo`Jf?3CLxGfNJ<~c@H8OI%7>u@-dg>bWM05(YOUrs@ z_0EE#fCq{-Hr{WC^ZTWlD}{Dv-Oi#z6SbJ3j!zu=ZW5C`*1vUdggq(Sa`_1uN!_^Q z))Oq(Sg)60sT!}kQ!YfT^~w(G#L$Yd`@vRP8P`mFsCgLcf%rs-SEqte^dVf~`>-ic z#Qlc;XqV=kA!dteV&OT3-djPB+^2i$#sg4PPV>G-Rgb3AJSM33)&2HEdtNJyalRYj zRG{JpL``qGr(0AK>RNIt>C;y13Gr=mjjcFqG%;7Ipt51y&|^IZH#Ks#v+a5-_>-?L~VOp4`9v<0pupXv~-7t~_gP)-q9zAJdUjl9;@_Wq;> zV(>;GAgvG+V7&uobt(nTlsuwgqd3%eN&88(mu@nIhXF=hXB$uweV1IHt3U!`8=fbo zN?R$+RHHr02v72>rY$2ynHMrUk0{#Giyy=2`_2=T%=k&#s93L{FLoft@kS>qEU;o(rXD1TN z%;Yyza67BRSjS4}&Y`hsW&8IEX7h;!whTB8{&~OEwlSNp5Q30-VP~!9x3KrEQst{bv zui__+z^z@xvKXlDWgSs?$9UN0eGG(xZxR0-dM-@JetnlyG`!5U^g^ve{#y~Y-rK>U z+1oqium@yT&J%I09)>zupXAi+Z`-mtUg%KE+zRj&T?f5J-3D7$u*zhU9CcPoaD#mr zR8Fx~Sl;2$lSrS6o3$+>RKasR&M0jLW?A#@V^XW|nI{#tHLqQ`xVK60EYMPk5-mGP z4oGkBAukuPlBnlCA=@6tONqheQ`l?X$?_DP$U~GO?P^7yKt6UD-G$y})tL){z*yw% zWVvWZ{#3<+LvkK12a}*S5-v8~A8vLkmrlqlvXT|^gsiz?KAuNm=A;aDVL&88P^2zB zJHcme6Jt0+2`pGjM2Gh&j^5v5aE{*H zB_w5&G+(6RSE;taHU#)R^-;Ugs{tKS1x^gKhXqA$Lf+9Y13HvC;hyBpHH&R40+(Id z$~Z=24FXg6Wz8ac$#H-&`BOP-Co}7t|MF^%XW<2H?K};v!Z<&S-G8Eolw07? z>ba^GbD%zExN|9|GdUZe<2Xz; zicMJT0+c$5_%X9n9P7bS_mYRn$FIHqawF{6cK7w4anXXL#Wev0EE^!!yN}N!M3u}- zoV(lZW427j50`9-NH_)yCG5H3?rY(K_r+~El&`3Ij^okOYkCm>r(@^b#2lC?Gh69C za5Imw+OxAqyd{BcUgj5>iKT|o=R?nHo1pzfND@)0MFF2S67>8va>mGtHf0xZ6PaN& zbXejIT9f7*7xHwh>AGsaS=St9;56To1d`iaXvW@(DNg z715F&rnFT#s?6xGoPWKki$0ZHeCKg%i}_yT|-9xJ`0fv*w;N>e$bSP5z@;i zbDa>l`$zYUXQ{2ixE-STiIo+E)3NM89dmq@;@jjllu8TJ1vn13^bf7H#8@(z9@9Rn zv4OT#>ID$U&UV!X`oN5P7xkEqs`>-lx&ZE!4+gJ~nJbRMYZTP-6v5 zQwc-}?G^l4_4*%nYxDPKq4*k4XyiH7i;2Uz{rG`BciVF1#7{Y|PXo==hF6|4MM|mb zYu))kFg39{Qt-@Y!7x6tkm~?JiPfHcP%)y7Fae<-z}l4Z{C&R#1I!i9+r5zf70h3@ zOAI*Y-`*Ln;d{-@r`l58V=C6kQ;vYhH<607!XZK7w@6j1JA+@!q=R~_(Z2Fn4!Tx- zOMBw(nRe19b1GY6-@|<3;Z8pdO`jJfR{o4(^sx`Fwa@lJ;-`nC3D)NuhGxS(YHPK@ z%3okAt6x84puv7oqROs|)<}y;Jm-6$@LtWX1oasympJN8yT;~?;p)oQIfyCFTe{o2 zm*jyQ3VBX@ey$17Gi4AE?9vkL()bNd9(S$;u@}Y~&jo)$^=Jc*Kr_&%ek+{%$H&>L zkd72Pv%wZMQFX5mzA;)jtH7Mj@nrdsp_49+YfB=kS<7zFyY~hr_h{Su&0*7Y6|Ldg zan}>s3s1z%6|>*YPaTU^pHics3^cnaSe7%{T{HN5-*r;;7IQIGzw7mNb=DWws&}V~ zomo>BQTMu-H(1;$0>#H<)x3g)^u!_HcUlJpAPKWxs3<^Y?EdM{%FyXDdnI>=(+2u) zY1xB`FfSFTT70yF#`cgUiJ3u(7Db<87vi^!gdH=zQ^{pEWex7M@+~q2jm12H-gXZv zY7V392*B(z1H8G(l}O-qtqP)CLP%W=2pkRs`atZ@?%w(~C=JtH$&=}ZUP@T;npU({ z+Gzl)rlRU+J+RQ#iA~U6a$li!8PP9->YduZhn>8v(Xs^V-DA3(F6{uN7dXO6Ueh3G z?T-K4^PbcyIPW^`DB!UR!X@1*HG$G9x~B=>&5%8?4<*RT@z5%C^01LIaiDzx#q&?ic`Q*{?&6)lD zs?LjoK^}!J*YC_;wxeL~oS!|@VE*x9Qa6hS9il5oo24x9y5>aZ5kv#~999~j>U1|n@7$N(aS7Cs;VDnY=Hn*oFO zKfXK|;#~ysRmknFIOGdQSk3C_m?0PfqG}~|5mBZs!8eHrTwGXbG&xpuM3wv;C6)8M zS3#M>i1Qwc?1i}agld=|S+l5boYZ)qm_XXqzMP^6d=w?ZF@R*5xmn8I+v3P_#my zcByyHDcD18=`5?R8=aQY`P^xso`MN}VRPYazu$LTJ_p&EbdP;kh>|yaU9Q}&AVf$E z94T23jWXCX?q$TeIlt@iwYAD(4U zIP2AH7IJG(Dg@SAa%aZP??ZRmbj41U?_XA?aLys#+IBobDq@|AM(OhN0^4NwGR4gO z&!^wXS($h^ppgR@3gLBncVhGu3#faEAv3MBi7Fj_eJ>juK0SC;grFu7Bk2;|tVBUc z_iLdVhX~%w$)2MNk5g&3I>I7}Zq=8LyJ@edO?ecC<#xv13b7xjR^%6=XWCRpzjI*d zs{=?Vh2Tl1D>}+vZ*$GroP)&j1<|L~J7LULMaZq-t0R<{aJK?HG|CJDJFYO`k5Za` zJLVN{NtV}w*94G0%X7-lGD{x}&Q-|t;gPlK)Agar0ghn^dM>uhIj@PdcU9PYyI=47U764OP;g|iE)NgGPx+Kg_}@Fd z-II6Yr0QH>h1Y-C_*_vqclj`7*ngn{ySCW;_N={^olKS&QF&tW0F2oq_R-C7>=NHN z`9>2DBWSMjm^+p{F-gGaB;7RrR_cGseYH2zlLLd|&S`1V(Uj3xGn=UWY*F<<c);4-OJCHHc3(E42|?R*7Goo`&WlhxTm``S5g<} zHK;B7uq*ZN3ctLIsPA|bm$kZq3VtqZ_&Qvnw3!KHCd8 z_QGo*vRP_uO|g*Ojwxr(F_-t~h=^@CkjorA&{3svn}{i5XTAvD+HB^^&%=l}Su|L_ z%n*N8oDpZ<(R`{EFQ_60fs?z-Cu^xiY=qZ9d@RpsvAd|>7xqgVLndC#FDlk-)xJbG zTQHRv(5rrDmESa*`l_?+PhsDSX>b$LtrRM{!Y0Ej*hsrxZ%6xyvSYh6&xr=CJeU>Q zlwZX_6n+ZjIphfB$h)myZVop5(t(PM3n?W!M7&kRx6`W#eRUt69z~8#?DQ@j1pIvZU9d;BpLq z4Lv@RRqi=98V0u!GJ9TrPBjEWSA06Ae^M<*vNW@e9zG`IB|=|atqR__akAMlm;U;v zSV^v5Ps^*F{o_Ba$q)N-&-+2uqp^As#`l<)k@*^sZZ$q9KzH$`=?eGr+ZrvzOeZ@IhF?^w{o$_8qvLc=5r*fW>gf=C}(N-@>F zaP-<^505WFA&Nfkv5O&kJNOk(Xd$X&?HWZ&+a7jLJx+>{sXk?8W+|ryZ@KxT{?V=> z1m+L#dXwIU<8Z7JSWmX=2ifxZ(fE*v>qfQHCZu*{$K*r39BRUB{R1KQO6j<%?$<8P zQ|nkj-H7nbmO^U0?Olinz;*CrV-{-y^}nPjo)2nC{~LWTedGnGj#&}ZiCQbSh*Q5m zoa!J#Y=1=cI|*2$fx;0fBLv&|%ncoH)1cJmp7pXGr1x5j05QCu$HUMO4LR+;LXz)K zlQ@_1uv2q&3RmR3s2;S`G@w^AwSV-w72p+4Fhg==_d_5`v-UHBw{OtEkYWsOB^grlAy4s>SfHLowjT9SJgs0i6tj|qk!E8ndT z-fD(?;cLyq2oFhb8UjHy_u%;&^umCYtj<>FOzB8SQNC1H(uEo^7O8G!n6*5X3?+m^ z*(^W%-I|XlfjWPGEjwJYZ|ZTRHFPlN$e-UpUiO_?flc?48z5e_>3t(ZLsP0FVhog2 z4DE^GZk?@V&*?qHkCzcSrl~}?Xp9{jkWhX(-kNg`+fm9vEVoS5h$phedx-bPHiV*F70s$cqJh|e&j4kShha4{pt`2g<*v(j88rD2 zde)LPcu8>Yxe!LyiLulT?4;y7%j(Nf$?S679NtMW_}Vw6+2IXE-bjISenh*1Q!Q9l zsnzH7t1(3a#|tVa7*}N5x=ZDc=h~DcXOpmtVjS0b#Vf%q`e*;eXSLgcrE<#+TjX%j z#HYm+jiAec&yK_FTDp6O+Ee>2_k1^SnSXDfW;N4%vwnws=6cpbqv)Y!e2~GT{Nv=; zDa&O9PY>EuCvxD-j(7upzVy><#!POCx^9&6+~>a3CKTW?t9n-qAkdff~WVgrP z5XsT4OCYr%5SBISy){4c#jSxQ_2C!hcVAwTf;wFZWv`M_p6!X_uVtRtA#%VQrw)Bj zI;QF%yGgYTayeQMt3f9Y z&G2)pqBR0mRdZRWu_19S^E`{?N}DWSD~cHFyotEWAO0lSH)K3`rLp_iXx0hQXfE73 zk+NoI&)y_|%O3d#lob2A+2Ka7|EvX=I_puzuFO5^-J9?I8TbdHQ{xe|JU$&!*o8>? zF--aWt`R;*Opf$h!tTUf*U#+xlCYT-@ zH#!=@egzAeU)ESUM0mr~>Oy zymrfL;c-7{%IZ=_v!qkAcZlL${Wo9A_NsVGhwqDOs2hA14yxcNx=zu6a@$e_>5@e^$89*zyvt0 zip0-52i*@rLRK%O3mi2H#f)0On_Cs@4Fu%c-azZj)Q^l*Fjw6+!*Phn z8+%a~^?_1^!WC3|UA(mtM}6lVC*#>@y}RD-2n^o`+SFMB8BBd5hiK}gZvRd^LkYD6rno-QTYHabMI z@>Mx9gg|O*pTnTY9nOYrxQya0?TYuFl3&LFw^;v00?3^FB@!nAdsE_;{}$1l+yx9v ztr7|N0{oLkr{x|)jr?IbZl%%kJ-5(3iN|?KQL2oOSNN|*Y@kV`V~83-clp!cOmy;{ zZU`Ha=QhN3tC)G5CJm{bGYHs((*m1GvF|Q3GMO;G39v(2QD<1n3lp@S6|fdvi16^` z`bH!!N@MiJtV#saQeKOLge^rMe{(ClVHKOAbS6q|Ep>J!8H$&C1F@}F5qKjJbD~ig zU)jACu=iRL8x9yPHVWA8l2i_S^s@p|~9;wrB)yT>7^3C=U z`zBH8N4A_jw*xvatJGj|wmWAjbwpQILoANXdgsbH7OkOi;NZvG zOH?UY>_Irk>c~*g^dU!kJCk11INJg2Q7!CJrO;=O(mu&JUJU>!h}%!k(qgvB{^^R= z&Rv?3AAR1b9|a&=l8G*XlUnB-+^M}a6B)|jR{?Rl@Yf8qrSIa-#pcsB_94nEby><6?e{jtpo1z}#P(JH#Vw7W z9TAaHdJ2O_1D9qGamt-9F0F65s*U^5UF{qi^6BBW2H+1{Io8kQ_Z1Tdb>S_$bZ zyZPG(1+-FbxtH#B`U5{U=vrbCtm15J8lcYGHD_IiU@~%FD#sLNW}SsMX_m}u0x%~^;5i0X-6+=mxgVc zq*RTSDvhU)p~MT&BNMaskK^6==CB3_?sHaU4@805ll-ex36a+g_hWYSeyvs>pFbA$ zOiz)skfF@O2z&k!wzwAzF48qr{Ut>4posa|PXYUP+-Y(57gZ&j+|2c`^Xac8+mUSJ zjfFf zE-h0zUv=tJ?87OER2yGiEEaeNqj`$N4{N>vkY$g;<}B)tnrtNaF-1`AUJGw700U|9 zVRseh!IK8r12*YiamK9G;SEdAz~t5X16J-rE_o>Tyod&(Ox0nVAoBQIlp{`Q^2M=t z9>f5bSw%LM6B(ngq>^)=nJ8WMwHEle+8bymM4`sd4^abVSCKHgtPH;1Kl!A>5DA?Os5BUn(iX6P5}8OnZ0ZTkVbPN8PKN=&5*q5YD-r zi0tk*vpF!8@u)6JDf6r_gnkq#jjVF7&l%q;yyKMYRw`m`r@;+3oY4_EFlGNI&5bwf zj{5-8KZx9I2CR(C)ANrGgeUv;W5TEbj=LhDgjMLNimr~rUIKHx=o87)?dw``+6jmA z^ZNtUXix2mUXZ>-Yc^e=ef2?$ga~lT2By37SJU_?(TVQmGTSauDw#GwFN{0WBubL2 zr;eBi+qlt7BpZ*B-(D!5LLHKG`0UkOupd2JsuMY8^l7Q@{muxF>iGSM?$!3WW$dt6 zX8M?c>iD0t#6j0GyR|_2^s+%hgg}z2+BVb7ywY{BKpt_-=9tpDD7Dg_c2Wz7I0^0g zfwmX0%(}M1MM?RbcI1p2m1b1rO#T@&%b}z4*3%-BqX9;O4h2#p&%inCZ|@tEEXL;n zCOxTu500=Td4X*(VEbNAAsRT?=?&Ed*d^!Es7$v=`QB%FrTzI{2(<~QjH2Z;c`$Z? z^OV|lD247^G5#jn)-hSgyX7WzJd3spkyVq;ue@`yl#-2IGf4_Cwav|T-vMZJ>bZ~*y_ZadpXu|zL1^6O)*Y{vG-$zLVZ@bt{*5s_7-bP1~?rkkW-Vd_mWV;7! zoJ3xa7sle;Y(Hv8 z0nDg;$N6U*O$!tg2>(ldf5J(v3Sc$`E~|JvG0fLOR2kaO?w`{;Qni0_=>B^>yM^O# znj}?`&fNy;tLAEp?T^kF5UUru;~+hPsRbWG@tC0KQ)`u9jz(POx}@td5^7wWUd=o>3HA*o!YKejC)-C9V9Mz=CT5{7oe)Dr>2(u14~+ zA|Ys_Q%d)5PsBf50LH?>G)oQUQZsD-CK|uIW>j)qDYB-&&LEmn&6L?D>i%&={LH;H z#NC>DpFErf)n{#{VWw!z=nMPPqg@4se-fcMyyuLxqC;#Hw9gZ1e)n7bogNE^EC>8+ zUu;FM>7M8McCTuwCQ^K6!8uw9293f{Lt2VUev|HXRyBytj{&saCR|EbDl7c_O4q^U zYv-GZ&pUdpV=z@OOEoFb8wc`pE7j@W$1ILhi}QfVM}^LX^71)QkF_>R>D<~ob^OD* z0)1tAOH;+GC4&bvE{F}1I2t(?DCByuS-R}bMWp`1oGJ&@_LWNqd7f;IIH&2ntsS{~ zfAT!jA@*Zud1jnbhu|xlKdzFl4{|jH_3r!-%D}!c!&8qxUuu0hs7f4?on%hDCWryjy!bZei)qgM^vaB9cU}b*EwOBzB^EQ63|sC;EndpFBw&5Y zs`HIy;M8pYi)yD~P@Nia0^zU54T3yB7CAD0jD2&)Ur~HMX@J437ia)6Cd034sCBUA zIs8~{M95HCe&JP@b_0+HX^I9-O@desxwN10GM`mBB4@WzZF`1(>($r zn6!68)l3m{3hfH6cy%HX+YmCf3LR#{qxNSSxb!4|F^@tXjKtZD9Gaq5;IpZC$CRkz zxe(xl&aMDITT+43`lcjBj`#B}jr&AbnC?ussg*|@FS&D@>(pwB4MA7-!;nuWJ(lZl9acDDRTe5E83b&)H0%{f4%*l-rLqgAj8piYXL+t zYpDq%YdcNb+COJKP(hT6oW%evZMeqsTYa3_j9K?O)>m!ZQ(l2n!8zBhtrp6=p(6F< z{kWP<#|8`k0M4uKorAs)|8`T`LpBT>Fo_)nEFj{nh#azQ%|G6JEx{D_JI> zZ!mClL1(PeB|m$+Pv*SgRwHD1u9NPAl~^;D$t};-<0YFWjXuyHdgHnn|J#bHxDtY?73(Qc~-U+2oM`+(l0L6p0^%Z2w!?NLu3b@Kfo}s zs*Kuy?z(Nw)fjPmEJz}N1X{}M@rR=T-ds${UP8wGAsLN;vvVQ*tSdF2_g~Ak%&R&L z5sjG1uPE}WPLJaaT%JyTd`YU5y>0tb{PGaGN0K#_z|G-FH6AxfqjVw-o{KO|OfgDs z-Z=aTb}^3KuPah-75LJwZ|a2Ek{m3>=Is^YsrI3~tll^IQ{VSRw@xspU2;XNJkYQb z-K&|vcQ8)>bjK~0+9|9@ESjg35wq1~9=}H#Al`@=ow@9iU*hgs*sW0FN9{@U2xrb= zau!iGn83zmBJPV#;1w(21gs+3AXOQ0nuf>8KCJpdVCy5bcWSCcsm*?JZQ*JFJ5VEs zSZIH=Y@%850h3d{rqUj+p&EEOG%NN6&i{N6=T8TmeL#V6YID3MP<~$~M5_e0uBIZWI^Ns=^ z#N}$u%KpB5H(g?&=a&aeQ`1KUIjueh=^jgcBW2jcDJMz=v+bZCqrHg)sc$QZ9P1f$ z6$3JOBT`_&!S7+%#z~!9Ax^h~zd?%pJTPN~*^0gwHa3%$<+7WnvhP-TSV^$yR zd_~9!L3352(?;amtmL(bFG_o?Jy=TY&uOt}g(a16(rxWFCq%~{K#-DL*tPHPD5@43 zuswSS)D0i~@1<}QpatI1Dv@iL^|1QS*K3-LYm|7uNtGIp1U=?Aiz55=ov{g-sz2!2 zUk?PZlPxihZlU&K!DP+oPd22I}qk6NR0&Lga1Jwp(VENKc zZBJM~E-}mfm53?`rFxRcW%Vuv)y8B&L*9M_dzMs4_EUA>K~sa=2w}fS@=UrWkzfKg6TuyB9!C%pk zxXz@-7~JK@OOo_>%OUmo_&vc&Ww+d7~8mW^Vo-|=*ZIx}g28Nk7 zYVX3Etv=KlP{gzswqMZvT74!B&f#@X5tj(%Aa-ef2!444Yh@#QC4(7b_ zfJte!8MtEdt#c_+x-_b!*IJWVQ4NqZY0v}t8NAQ~2AZqpCCc&ZGp2YGxQ?@7Dz-Ea z;PEL-K~*tC&{BLkcZu%l^gG97rWnX+uwZENm`${^G0D$rt+(v93Na} z#aUFI1Qo5kY@{rlIESI-iA^1U1Lsh9Uu3i9dJ7=Hhq%0e)?7PdC_i6W+5mL8>Qp?Y zlTwCD;R)cIBnB-f_XQwDP|T2wBT|jZBx`fZ{c2iAL?O-wZO_cZw1FYVUy&xFF^-`c z@(Dd(X)V|hhKpU-2ZL;DtF5-c6j&BE=c9473J5*5t0DH746a)GFl^hkZt1-9+NL(g zO9w+5%{?=Y&{g(R+QH88v*L*7(6AlXv`3SuoPlBQbPpw^@2RXJ7EXoq5TtlaP>7jr z`N)d5NgYJ`{72ft zT#swwBurBaRC>l5A|qw7YiBE4&*&WZoaEbMA>_LuW9#U9jYILggA)|2R>?Ff()q)i zWX-kVM<7>tqnlQ~?}?jc3udV(n z`S`KXX)x_JK*;1}X*qs>ts2-vMR9v}YS6x{d^Ldg94_)1A|YT0rV`h6tX+rc+hW7q zomyU*Wor;oW(B#|ytJbYMhM0{L7?Qf@YDJYMFOV;QF=*AM;f9;gorSvo9TJY$Dn8@ zBYX-1qno5V&rAtzYEvSoJIg-k>NVXuiwNjhEt%^Xa>=0#`NA|{44SJNTh;v*4Rmfz zB*NM8ZFQr5h96W!5DoTiDe6M_P}EHnP6O=FVFSPKU}O>p2C*Jo*?dgwF6} zEk?*5HCfi*G)1^(*S*&&#XHkqqsJFB#rqn#KeEiys5B%(zVn7JaLq|rb0rrAyth-H z-ZH$B)M>8h;F7b7f9o>)lo@qMPF#cI>XROnsuQh@Dzvyi{Ap7`mPjFvJd)7q?G0|Q zzw91RST|^{-&r?Gc1;lkR~K~psp_nx9t6)enZ8_w!ACD$4!7?CFh{laWX0v53YHH< zPAFQB#tpapyFBM_n6n+kAJ^Luc8(i9s6J#Q8>ek_54!l>6%#_HX>Nq$XRUP^vJoLN(Q&@8?}Iuzz!J0;S=!#i-N?bdyQ#Y@Q= zS=8*-!a>oXR^vPnFXK!1`}!kzb2}64Q1Q*byu5tyV}+OV^Z#~-l1;f;1Wz54RN1Gm z19=&xZ-?LOzy*)VV3evkBg{T1=aI{fZ^-OLWOCr0VETBRg)fNnsFg~~lw88Cf`sko zR!-%SO|mH0*=mx8bYQ-r+ggNd!{_2GBcUOTKB{Feiz(Ya%p#iyuFq3 za$i`x)Xs$z&L5S#1gNwz%19mVz288Cpi}($pEXV|azLCnKS>bO!~fTt*Fy6HOSfH* zuv4w!+VX<^kjJ!AGd60le2mqWTRP8xZhwD} zRyvBG(UmFjnJrlkp{E#zR@)p^<*vEbGvG+aebJiU^!H0o#ef54aWC)xANJk^s;R7L z7j_=76>U@$1Z-_ZMWGd$1=8&-Dz*qH2&gC^2#Cy60BuJmXH*2@h{zC`=ime=2mzVL zCy{*1lB)r&&=Ff*zrpaCK7zaK`BZ6a7XWaQ z%)U>45FyeWte5qmDj8c1;D(>$&i#{raAzIh z&CI?Q)%^}Mgr({mM{GE;Ytzt6&0Z+5$iD-#_*ZYUd0cwy736*LX04rT6O+F8@!ae{ z_^s5=Po>+XgM4zh+dy{|Q35a~t3V;q{+`vl!6KC#O$j=@+ua#026Yyl56};$%((dw zrgL@Bb1i5%B$&|JeUJs7-5YlAyVS+566Ab8BGG(Cpn2S2Lk2eZkj3361AE(cA0R6_`b;N!>6+(+%=vN7+QGAA{2_iq{WacF+DyZ3+!>(9QOwI zE}@Fwj3P9?>OHn?LEEflYf3`eXFsjOeJ$w@kmO%#eP?WB`@%s9Thw|y_Iyv0hi5_@ zCU2Ad^aX8&4AYP1W0nQ|*2oZ8G(<9yJ=B8LjO?>%V7{8?Y1)03SUEPHtaMhU5+vY?gowD>(I%t^COK*x17fCt)T-TGKifj7h||yt*QQ zO{S6s+ULQN#(${~j0&|%&juS@f;bG0vwFg}PZ%ZGZ^JyRSU4R#O3%Cx1isE*Hou(l zG`GK9ubH!0`*=a(huYN^p|Ltnce5Rxx@*^EI5Xy%WH`Ool7wX@I$(>yauvK#n~5Xf z#9A0--Z^q7M!;?4zR%RR#m&IDH6(FoBJTElmL%sa6q5}IUadztd4Nv8^T@Nmt3u=U zVe9s7QdQ+)Oc!`a#}3sXJZEQ%!Oc52vN7&Ykyr&T4q;U$CLzKP&;{unnn?1ep+5ih zz0jp1|M5<-&=uv6wo?JiNR;-bcUQ%@bZDsBPoKzUzx9R|D8fq7&HfAH});ltr#12|w} zLI-TAAqc}^HKlDh3WVSGbCJ7hd3prlAXH2}SL6lD7~1&Id&*SU{dZ*{wk_3=;p|_d z?piz;s%2}mJA9Mc%H4}(%=3v0n)8TI+$Z~CBy-51SHs+aUX-6iqr_D|d!_QO6aTxu|GPSE1 zQ%TCe9j4G*{e~&vo*JGW$)!ic{^?G!q&C8G?7x-Vm>v3;+R>3AH#h;-9FBUa2-3aP z7GmbG<3?>vCn;;mnp$xZ)!A($p22<+FrzD{Vq75nJD(%?(@+kLsHi2Slwc&oDXP-G zeeg_s8)#A@oA*Xk`)#Ow)tBSn@tr=HSi`-6-&ddpk&P;~o*}DPtJ^$d!?9KD^-KM$Y{rhiBbiCiNFd|2$i$7jW@k^kYd; zuX+)_7UFORlZmkj9o9&L7Ixd{gr!E!adtNFrEHD#%fl@jOXq^vOvFa;?_?G|A0ZYQ z6sEz6oK~V4Uh;H|Mvx)2il6~cARm=qt22DT-4p_X&A@Al0nHdPdF#CYp-uOu$r^sRIJ&7eg zZGN6j7iEagx`yuAkY(V6CE%`8qZ9ASx-gZn95rJ-sSfqX3Eqy|-oy)^i&WnXJS5_B zPocza`~z;xJLm#EivBr7!t{k!)~65q5?jy76KH<+A|bd1l5i2$c*fi}UzTBXN^OT{ zH3!_1`~Jfmr`!W@de^n-$7+*K-ikEsuCm!mmp8fRxO%phOKo{nZ}lTlgC1JCBmE%c zdL_NQY&DcC9>m=cDN(UeT>=L)VKP6#&ja+hb47v>Vn#5ZVzh>!4IB46o7~R~%IJSI z?0Cw5RvK%ZdItq79NnePn0c%8v}SP{jH-SKVN5eia&A<&4@)gJrb}iWcX=lj%dB+o z%z~RV;m2k@og;+`TA>XJkfQ(n93wQT8Oo9t-&naPW|IYk884e=d#XGSeA=^V%p}Q) zqNVPqLoFeDY~?lff=qkOfzx$_gcG7?fv>KfE{2KlE@RRP2w-V%*?Z@=2h-|dgZDF> zH-4qxN5{F$wb|6!E|l5hYGcwq^B^7Q8$Ro6dEfGId+gWq(udFuAQ*0-TQl7#hCY;> zzK@PIOI4uWytR{@M~?`OyT3H&pj-SRRjUUEcYj;1&0E#z$?7g}gi;Cq!DCUciquml zcqa~h%UlC+9_^ja$U@6+0p&MS2s%^RWcS`b*xl(BE#lH$tCZo&Fim*I<~3KTO0v%S zsxgQPxl^e|ma%f-Rh8;CVaahxQ(_Q(Bf*ox9l{-+B)=B-Fc)yU&@;^5+Lw3ZnoNEs zBTN6XifaS~Do*5*3N{S)fj5yBZ)Ol~0~IX6P&Z>;nGQxhm;Q}mNXmd+O7snlU&iEt zlr+_78A02nt6IJEV-<;I_k=uX>wm3`Ra+70+36LQtVYY2)Z@zXjQ340Hmb^q-q(4* z2xv{L9jUkALgD-+;a1Z+kyH9ChIbiPsiW9 ztxlfc-N`HZd7Wqq)9S@a$y#CDUoRkzyL}>>%dO(|%GUK`+Hyl6gOZvT zjQW$%)P_5>a06`i`tq=~+Eyg?Mbnzn=<^f@C(QI}(3*7CN-?n}ku(SOVaW_EwL-Fa#EJoh~P58bU(+Vmu;THNEi?`@3lGia|#bd7oO zu^^pqPidFjYv$H-7iuHCTGHX@ti_uRhEK?+0?J)JKtc%Dg9VNd4Nh{ZM6wv>D-~=; z6TMUZJROp2KO=B!fz%!9jB)PnRVW>a zdq2k0SmtnSRCoLef&DR~bdLD*quVHz>XI8zy0g*Fr8ZhYUn2!t3Yx!vGEF5aJ7bPV z^MIlG2l}IE@2vh$Ee{!4+%syy{}H`UU`ziXG6XNb<4d>dB=;%#2wEC|%hi)iY+`gG?dgamC7-Iyh z={YcWu3slQ4P){*xDuA|#%*s0gzFFff^=d^pGZ*7&`9EX;z*jcICSn}9CW*u`_Pcl zxF=%>3V@KwkP?#(BD;7o8Xfcd@#;pid1^vzYB&Sey~j!4YrU6Ez%bp zgOUdOea5pJRx4b*!UUk_fkq;pwO04QG?djZ5tc&ky9i!zl+X+QMh^g?rl=bJN56g6 z;I6=3{R^%`#9R`=Mqp70vTMl5Jr$;5DYGX+dEH9g@Fij^1A^ntbFMg;=fo_~)(p9yoK^$WXVKTTKk~2=I;N4WA06U!)gh!Vw-%h;G zwT4v0`r*)>z1^ud%sX7mj-_$$B4@jYHZtcme__+1RPGae0PQ`*m++ zC=W51S6uf%l|Wk)cKT=hOKx5RDOx-`B@S~Oj6k$0N;kv4A0f3>KrzU)?SR~PItZ?H z)EOv-#?~gVVkbc_Lk5RB;N)e2tdc5VV8&tfGjZY~RGXzGD}fJF2eYKE$dyp*$Y!Ad zR4^ zS^992cV_m~!}u+=)BzCAb93$&V!m$@3T_RmZ-~PrWSk^4m|-wP;+qh+v}o?nmBy!V z3dCW7Dls9QKdAb_s~>VDFj^=C`+q=P*r2mGjZqO5Pv$RsC7mvqn z?u`t$>35hTTp^GJ^#(IJFmo7Mvy8ri^e5a1Y)Vl4u9;Mifo0%mTb~Oee^S5H;6m5W z;{@z70DE-&qCr099NMBtl5$Tm9C`ZB$N5z*M0EmQ+%+ZQTht-B4EqnQMrdRpQO5kB zxHm9g#&a!)J4b`3mC|6xvF4!qO+ffus|>F!-ah_7!eW$NeYLc688!kXw8-k!rDIf# z69{)oTwr`tIe=)2o#btz=*0JCUDmM6T)`RgJvZlaHb2WO*~D^pgfw4@Pb;7ev@7a5 zfs3)tQEcVzOMuI_$YE5*dO86l5G9gPJBTjV#NrJ$ACy@2HB{J___G>4RoE=Er5=C* zC52kD``f|A|8u=veKkY}_C4-=fm?)2;(=6+!J%}IvH2xrqO6PS=BBgXCw`e=eiim3 z#K?9s!K~^_v;sjJdet60dH&V!1c%96;#~Q`F%Kv`WHWlO5|7hD_nN2 zG|_JKVKf}{2~$d>&4%-}-M)U#Z%)$Fy$rP&n;^0#o%msc&RR{k)2EULT;dz%O<16gq=n(ZM!mnC`bwqYtvs5vq^?2@bfS6iWZdYO(;cla~!tDQP{ z-@Wz4;Z1Y2zFbuu`n37X$>{h%P$`2CJfWN@g3qDy`*&wxpR`~GDv*ZVw3*5-XF5L{xiZu3T)^ znNvx6=lSdcsZ1d9!b$6XqPu z{m1O!eHT&#at`{c{XWH~y21Jv*2*;J1r-t9sgp(XsXjda0;x))X2afIAFTqO zchP`K?k>66?}>2|BwfC#dxd^;K{VMPkjix^=b5;tGMLrquEJqKuL7CKaMKyIU>R*K zgM$|DyXfxhVlkD5Guzj`chqwE%1<>%oqzYu6YC;kySHe-Gp7D5`f`BjxOHr04xE7&E-dWVC2TGO&GK z_il%_H`|UcoG#%UWwIDD!J5OTKXqRF&5wjBfPa^p1lM@cJ(S@lYqglUx3pAS zGxsEz$tTvIacIu5Dx&479!goZsJeNc&@jmpkD?Q_H-yBt;d^$t}# z*eovVkHO3u3ms0DOqhmikRw%1u2`EMQ{X*aA#r!0=hsB)wtR7O$~&Ry$oczRh-_TB zg=YGi`F^u6WrgMW6wC~qfHbk$g>!L*LUqDj+4Cxl~=ST4lj*}w1EARG(FpJ-7sSX4vn#U$rUA|>_ zP?^H#aiHj8Yjd%u^fQ-zP@E*vkTY1BSqcr@tT_SF+>t)yR;ov6^;~tA+bV$l=*_z*#o5DA>iGy1|^>`_n72jkNDz@&b-eTm)zuX zUMvr~(lA&ZUE3c88ZA?QnrR!~l~AZ(M+fleW$o^Go59T7ki#47GMmG@=+kA=vBalG*k?&sf!DKMS>F~~Ec`|H zZg3)gTPnboaJJA+-I(1}RAm+ny?Z3ig_y{PyLUwGsA!S#?5nwB&l`@GI*X5C6IxTpH$^ zLzuj;K$2ldZW#vI#AU|kg&hL8 zc9dRZ7xT$<(V#%(nzc_M#_3v5)X8EMXVx$*BeHc4dvc3v=nLu~2_|J|rnZbjAK`NH zd)&iaNf5MS;VBU^j*lqJ2zP`GcWGoK7 zlr%O9O>4<|+h!$+FL8Mbo5(A)vR*TTFm&*nJGKEdED#s zQ1dI5!OD1T+^cK>-9pQo{HcFjUBRv=&#RF6E?wpF0=Zs0D%X4mixFaeH#7hUe@85R zqCE0I8R0 zr_%Lf#k}(R+Z~E)-ZV#4nTWv>7+Y^y9PXHBonxvPq{;iMt;|xoP?!?~;gv#2waIht z=?F#K5^cmcsmEc!tiNr>ECnrvTXS2-G|+WTWTJ3+P?UGq=oE5PQKeT zfjdhfJG@&3ssM+mK3W08^ayRp2bGd^cI0~v*0c@v6)HU9k8mH+yqHRDsv3B|k_Z!8 z$EZ%*Fvd8-ML=OOuD)q~h>0yuG{7mg66|rrNvLG^fLw`+%WG(jY8l^HYlB+e6wKk) ztq93CvLWl)C^}DVll0un!P&`u_rUx-QH%4y?9tx=S9tZFG9&3ZsK1{Eax|iKo1tJG z+yqYr=yEi?NX6yU;OCF$rswC_WhWL7LoW2Wj^B;(fwi4RhR*KYVJxpp@mF1HMGqHLV9Ten;8X2>D?sw~wG#hb+UAw{ps)C8|cVJ+aQ+N0sc-ZC;G#6d{{e z7#rXH;DjK*gObjv7c0_Q;$A`eM}g(cYY~qqvU`zSSwq@tw-F`_&N=kotq?fwmo43w z2LnH`X($8Bzgx}pgZ3ej{JCmS^Cl6ZQMfj73)wV#8JD)dp}!%`Fsx3H>NfP4BuFe$ zmVR1HUu@_h=k;LwSf-HyH_)v~;f#t1205$$u=2Y}{jT(mueo>DSJ+8UWPdc)>Bsdo z^80(p^XC1ryzm|UaI$S@ZL*CMqcQu-5;^t9jJ#~OGmyJx?LYiA#u+kn4Z&lCBu&}W zJo@jtFFlj)c~Y}MB)d0&84!J^3O2x(r!&M!3c1O9BZLwGp-B`1{wQUvi3PEfE~7W+ zLtA}IGnDb|X~^8eu9c9jVAb9V%v7UEu*_^C-f(+}3JYIU9_lpml(G(FqO@COhRjsR zOAl8bbT#HI8RJ9*kW|pQSe9Ux#+ce>%puRAV=qFYLtuiF6-uXD358=B;(f5Biac@X z^F^ar9j^8}ls!%T>ZfK8>0NCRx4c_+ZEweteb;>fRA6?6f9|XxS1-udpA_djvkilh z*0aDw4TS^>!NoA_eFeyFZOUL{P&KQ`=!HJqfhvUPHuBW#Fgq8ASlkrKY^XbO!X`{F zgVw<4VZq>Fh?R%jjah4PgWJ?>1eF)+mP}wXGeC}Ec^`r$BQR9@-R8A;1NBY^kYEZs zyM-EaeA2pkq=@zA+jieqU5^||>w>6b<{)SfA%DCgnhZpAd{L%1FrH*okMj2{8ZC&m z=0vLM*gsb)>!WAJ0*^09-qxp1ZJYHoj4z|MU6@z_#e1>_)c5)FXfsTGm4Aj*wRguZ z5vuOp4J#lDaC;yIK<)_AaeWTe@TUh9Nms{4u4#dS$e~VqzcnW64emQu>EdQ!2s zli}_|<#;yIn7nZN<~qbLfKf zS^{m~YRqHL~ESx0R1pF6q zzbLNt0rkIt#!b8JRyAM<4|o%)0;JqHu=X;hcP+obgh)5Xd_PeP*71WM+VZ*Df5Wa5 zur?W28VYg13CNCS{!(=mQy!s-g^Pj%0$x2DHNl1b)5>gdUo%1ucdnHZ2K-s23ykVp z;+l;)Gk8XkM$k^kJRCWLEv^df>v8Xi1y7ik(O8VB(<*nwahYRMxZw-8RKA2!&scw| zhuF7Hk`Y2mtvHapbL_jT%;7N`q`geOmjWWiAbQ?}rEYeZ%1*d2WAFol9M`NAc1U$I~XU55$^K0x|SO;^zy|4Kp+n2Yoq));nnUnBh8qRI0R zqEUp80%JA|@+_VQ%@dkXptQwNw;iiLKm^93z^Hn0@gLY2C1H#Si>;-;hv*`G5Gx1-e@brf3H+$d$ zvxGXNfq|Kc*n|EN&Ba@=rTg{>WQsg0Qw?tT4}a7WU@G%JSW5u<{gV^&E<@hWevC!F zkoF}(D<1FAQVmQMNq_fSabgLgmn)F+IzvH95xR8Xttv`-(L1BC%pjo#<*&K(Cw%*a z(WxffMG`(uT~LS@@ha>E zhK@NMOEHqJOW@%IyJ7__)L{ z8xIv1@`4aANCyv&78fxZzwuKjp!dTb)|l1WHwb^9(4IO0n{>>wBR=gzIsNl~#mJN1 zknZPnL->jbe#$7;ujnC*+1Z;6?UsJCUKYWe z)&(?P#?0B1)v#XPYkqF{y$8)*CCDk&Q^w4aC;m%6%n~M{Kg^QASp5sL#HTif`PS&) z9G=X(cXh7%AFn00Oj%O@=bv-Sm%dxp{7L!YvKb;j&A7B|(#o_C&lEe-R-9Waa%JhV z8EfXK2B`-H1YDU?^ZVA*k_)HKygp&(ufIt0>Ge#;Rju|C74ObQ&)caH`O`N?7)N%* zJ||1`JrcSAB9Eb4f1T@OZ~3Na9^cyKlVJ49#w|{X8)$Hz!uNG);}-A-BHGI5*pckt zP1~>=XO`K>2C8@{UIXqHesJiA41IUS!fBZTeH!}>!$%x8 z&4HK2Wb@`Ic>SrS5rto$l2(A?Exd72xIoq-RGVRe<98MBSqh=!liQzfX7{IGl58uC z*VP&cW(3RdsV90<$?VJ_da!pw^WFxB{O;cPV4mLYgZ6(!XRk|fJCko~Ep!*atr|ot zr(>TGc1j2ZU&Qtg_wkJCxHB?w(gJ|EQDuKi-YjKL;=f|$h(~(aS;S~Na8(&2{>KWo zNya)18%eRr47OTQ;ID>g&3MuyLk`>Nc>K^$jxRD?g|r*kTbk6x5n zyj{wB_kWY99Z1?+w#wQjB14vDw7QcHrOX<%z~Q49GaW3|*EP##L0* z4Qg+b@lA5mUa%dxyack<-Pz3F;6 za}^rhL;mD^!ug6*K7wl-=}<0$-FwLg=A_OSP9OfF1a)xm3fI#p@14fhpWF}0*1cZc zXYKQ7r&1d(NuF3m;MW|Ad|T*Rmn|3CewOU5R6O+hWc;gXl6j&E9Gbx;?<8{hM0S_A zV^6k9UaW?^a)VQ1sVRellZ{^?darvaYBgrd{bo$`faV7ndH9Q>xrk%W(-Ft&X7|_J zadCgI(|oh6c#*@S{$&K8;*gyA^!wZ=Zh49k3!o{&pT?3Yl7Lq>?fM0KIz09|IR3J- z6-&q7K&qM4AX1`WoU(SI{NY79A@a5jQxu6KLyMLy32Dg4gMHUFmX$>miO0LrC?Wm&2!}%Pl$9jir-RXG|JsFP% zvKc-MZ9^^Qy()@bHPcwrIaIa(#T!GLM~mR>WUD{#^OMPW(7|#;PpG5)CjL~7w|1F~ z#bja;oEkPl;b$}Q_NbWV)Us&Acc0@2l=eEX?rX`%q3`f&?fcD z(BFP3-qg(O?KI;jC_QEHzvd_NH1Dv(_HWyQiDwC_*~#1=j%@_yTIrBL;D(v>RJ(AQ zb~%ptXDIZtj<(nLdRJZK>Cj-r9Q0gZQ)A_A&{?jY+~Y9O*)&Wh*218dGbUMg1v8YZ zcq&Otqxrr=aT=7~dMy|k+Axt_YTfGNMZZ4uAyRAJAzxlIpWW_O7|PRos^|QBZ zF3(==baj5jDnf)wievw^i@7GQjcQGDiF4T@l#zx~;MC5a5$-FJTz0kE#9gI}lqu>-GZ3SnzyNi5ZMlZGc*a1aR%$0{yLsa=t zVX@l>+aawG2YDy<)Ti4laUR_62VvbOU)Gp2!sCNaSvJNF$1|Sy5kC@BY5tR}!;B0Y zuP6)eKD-CHtaQ401tQ+I3jyr_H*c6D^Z|4smhZcWysw!Yb7|{-`izAcI_R!cY z{-xQ<)u&G^w*eb*DB;&eX3wK$!<;NNWtx|D*6_ONdEuTt4eWYd;u&`@PLuuH`<~3u zPr95Dar5kfO3BLjI=f6IY10>^0hSi~vuN&Q(GNCU-s^m=-u9zZ&6DS5H`P#y#n$H} zL`oPbbCi)|W(udrKar}g7{|-ihl-Y=p#pca;O-PpwzciZ zFxBUZyCXfB$7{;Rpx;*nKozL$4cJvRgrW+9^?wS0vQ<=@1W$w(YGIO=<9M;>bu^buM z?GXobakKIQX63xBZWT5wC+4GBY0Pr(tu_x1RISh8U$`G1%4&L@+SG__zZnT1JmGen_urk^_R$rmf#IK3PS_PyKYl5*>kg3TZGmI_Jn1}KuI+3H|+Q6TWxmf)vH&xB8two zO_v&Kt&IK3KmAst>D{Bg(r3{ z@WvVRL}bt{tEw@B?u`sO@zvR>u|A(QKN>dhXSkWGFmj#ki*!3SpXe-->vrWd5Pc34 zV)Z&b^xQo;3&DhdEg#{^b7L2*uMF7NAB} zo>MAntn!MS*^}Y$YO8pDrLldPwbkPZTq)Oj>FH8mpH{LWsY6GydW$&0Lmzc0-VPt` z4M$b7A-xY~RxNx-l}1Q*#;o+o^do8KLXh6AWgSSaOSMA{O&=P1G<_eNmtNz=X zp{s)R+#h^JL%#{!wyj=JW;iHIm3DsnAa)ZgZ0x_Ax8C>qiD zJW;-^a;;VKJTvyo$;>7tA67vnD?##8V(6#sZ1vW*X#`FFZ;_qE)W&Xx0 zwvsDhum~q%D&49kkvyPBq2a6Q*3Wj^@yHo8izEvD|uGYvwJgFwC%wf#^ zA*fBwm`9>+yLX;0zRqYqa&~5WjSTh1YTrvL{yh9(v9b%(uBeV-ZK6iaI*<)64dRa> zT~ELKOv_?K+Ft(akYkbJj7`VT8rD}jM3xz`JeARI(c8e_v&1N0 z2gvUF^*($vbDCj}yw*sm&*I*iH=>@#^)8+EP0t?D7F?9lI`fV)^ys1ZNODRgsy?2<_#~t1EuZJambtMs3h4R%4AE zrQY|h6mOQJ-(LV-H{G(BCi!zXrmfB|Luqs8P40ap!%rHJ8Rm?KQ1>C+i#jzf9@Ad- z(vsK--ZB@OZjEysQ)6`J>y$}RfUY+LleR2WH_T|*IqfMl_kZETF4^fY9L$#u=LJ~i z8RobgT>@@Pewb_hyofO8U0GhK4aVl$yfoKbB}4u)m&TSA4jt{5S`YF)X^y)Odlps1 z7wVWwMBcwmGcp@d@4A|r#~^a*R0q?qpjkg*5e0fD2nMJaDwNJjK9`5V*H?RDl z>kMbtp~X5rUrM`)gPMG*a?7tx?$6eFv#H&+__h{^Cn zL}5?VxR_kas^~5C;cYRNmd0`91aM>xHJl<0sUm#j>z$=5aPIavN1T3#=*{*$-KS~y z)!tY{_n-snC>o6(pF)qdUtiJdC+htxw8gi96KgL1F<5sh)An$Gkxuw&C7GeyT%)63 zvj>SM&?+U>TeDnL`&Ya4ma5)xR`?=&%{E)x?x5$>+Y*20(Oq-IBG^^I$$6pN)oNn0 zaWei3*PY^eATL%nqHh>2b^pCFEOkDeRlQXO=G0lV#L|3Z(hsRzX*S6JvVJ_-(E@u#PX)=wSqS`Ygw%x1e z&Z9ZOq4t#3jok3&=1-|oA1iyFdH51*5>8xeDE4WdU6tQRu8(w{%m-we407G79rDAP zi|pNX;JHVP7vg8d8D`}2*_(DaV*(w3Z0ZM7K z2vzE-l=C5O{oqFPG;eUO3M>O()}{LM`B=-uxx92bQH|(XXWKihSS&F(49Y?VhtlD- zC#^xE49;GWp?VBDHtp2)*bkNMtDo+!QB|&SDy#Il{{kFX@}P|9DqOmu0|`{>VkK#8 zS&4wra5hae{a&_sf}iwE6#dQYp2j}>c(n@XjFd=JUD>PwUY~InPNgM*(gG}O$=J6F`Q{TJg4)0z~e{04&LlvtPtWs1N{_(D~=DnKF ze>KGc>R1qe%2r_sTVuuoOKdc=r{NQR!kC8NE%>(DR_lvT8wuJ;FMt7I4h_jw0l<>* z_kZ-8sd4-rij)jR>jE>o`mU>vNHLZuZPM<40Bue3w7pMn8);QZef#l=j7gBwUh-?+i++g$Z$?$l(LgDa_+wkMI&P7z8vfu1+M7ri+b<`WH6E%^#O$fxUWi6&lrj$q zZy+GdIuYzo;k=`ZjhoR5@~joK;qC1HqH5iSB`x&5yiQP`*>2{ewPjrSW zrTxNCFaL0*$AGN1rs80=Bcr^bWW05tABrf3SgdjSYmBw~(796{A0GXQdo`Azh2B;h z{mP*lnNv@Q@K5t2Cf#b|bkC?Ka`?H>NP7t!K(lA9amuN;!JD{?sR56Ih=#}?syq4d zfA7{p;#^HZ2+|eidC-MlIVfAYmGyL$l1S62T4Vhqypdsu>TowmyB#sz^qakDOlsrX z>SKkn`8Oi6PC#U{%>2la3FEj>zglF5_|* zZI1I)N?11MJ@FOg&JZ0ZcYct-mOqj%Q0IM?W%ONP>4z3>p=Tyz2`?wqE;Ey2gopJAWu0(n%6>R;p!tj-1IJE?apU56M3RSRIp5vcjNQ8l zc{a+gVh<#R>lOZ@EdhjV&i&d~v-?d<)9&G3Na{sInT_Y>|CZbvmjV<)JFx&Qd-ijCheW2v2 zzY6bMhv&-vtM+ZX4XZC4(fcTx-t_e9uY?zl^g8zI36 z2(DL5J}22$7N;j{K8gq-fuC%@2NReXzru5M|5cmBn+C{UsP01^ryWV5dtnK_o#Gk< zc`Usw1icXs!ManZuE@sl>Np;ODkK5H>6QXjFo_i67Z4$YKcUabJ>j=^z;h#R{tF28 zs#ui7aUg;7wY8pLR=bXbhgu&%mHQ9HBJ0Lv#xDUl9^!0)pIw~dAQ5q-qLi*~=3XJn z0}?uAFpT5UtMFW{)sf$HXA!b{pOL_-5E=wlg_2}U5EkkJ8PA@9bZnK~n<@-g z0ml_OKeS<5hy`>Cf6yBsfgeNTsQ*FRZ%1vnMNQqH=gyCs%vj$Av z0l|d`&!;Y@CwtdA|E=Q!%xUaQY#@G*;ki5iRa?@Jf>iwUj?wk-Ny8*{4C#ZUaw|MZ zPTe5`>vXXS^~FA9xh+Si&-jb0P|c&o+KtR*Qs)6IP9z|E70jvecoT@7@Z1~!sx$Z# zfnl0cK5CeBGN(G>(u#gp;7Nu~O-@3?WB?pbC=MXvW>+Yegr%`SaL-+2m`+wj3H$vb z-9XE6>PjK~KMd0k!vt&M55qK#{lhTf3=Yof{xD2qhV_5eW6D@SGD|siXzk&&-)_sB zNWd-&1@guK0Jm-}JA`uS#>~M?qmY=#w-bID&A7NbQ*O4X9f~X;jgeOjA77Dtb2g;5 zyvjO90;|j#YC}C4iS~wr&n9>mL(A4%El>W$mPD9S2~-gJ>Ai~6*gb;E6cn2$F7JDA zQQ9ar6Jae-7w&|d%7Mb_BF=N0x_ha#iVXHl$X{fsN6b=%EYCpn*WpWJ1cW)DY#MjU zea0Iq3r@g%s`P$RKmS(wXNsER1zV`aVpsgU9OtQ$a$s5J4zOG>0=f|a5s*shY#qKv zfn5C}NM1h=YcBDH;fsr>0tlii{u3^p8qFN|`7|U?P@a1C%i$g`a#j27;5^7fNg6>R zR{Gmu1u+uYJXUcfZrmLxCqYYB5|(e830?~w4<+)ibJw@&J>A`2c+j(Vzq!nz8sK_% zwNEzjA|w#Sr$Wj+uFCnDI74qdd(PO}&^@3r#sbBOmWLCIRUu_gn^nSMN6um3+#6ql zB@8%hcz7eGQH0-7w%(oDnhP;W<%iAvm~1n$|XvyQ#LzG8XM(;lCnGRBgm zvI7qi23l|7@}${OS=rwH0?*UC?j9UU0#Z!f9?R>LGGQqdTBQ`WUsAh}L|p-WbB($K@SC*^@QJFNEdsH7C9-~Y zQ`!!&$$D%Ce>Z#|*|dt408!p>S&QsLsTv-DTPt z{awk#a27Kwg3jHH1ol!stDa;*qDo4&uY{wJK= z&bYZ5yx}EGk~p@nsfD;lmBpIbSlnaW=|g_GF6{9ln2%l3?2GRo?U~fC{7umn+G0fF zp#Oj0IFG$UFtt{f(01!u5pqZ2q&DT7N3k*%7+48903!TP&v|1~Kw1n!UUbLu(RT}z zfFYD=f&fq>&Jy?%T)`dA?2N{Gj1Zb3ekG~WG7XC>;Tc8PEZ9T@jPCwuC9^H6anw4Y zh?Gr|1h$9Rh;X=fnRey4?+7=(8+E~7k6;S$yp5(CutO}6AbWtBpc&gksBLs_R*uf> z7}PuE`@QGPwGxDw4~_2r8ZTf!5^QYIHauw@tiKUGs8?sgSXklPNt+FY#8g>zv}c!0 z@cRj0(Le#sVg)w(V_V1p@ZQPvEQx<{(1u{|H#kPnw>1|3_X$e9l*PcX0FD2z5%+_j z?}+%7|EJ~Y$CUk;GQkr1|0N^&L#TfU^$(#Q_kjO@;v4-?`yXomL+$Zu-{#`W)Ah-a zllJV2Uw;y?wQo_u(H*VK6q#Kp)sh@Gk&0!p&{q3MH|!p(iVMqxg7gS>%Errtf8sB! zE(i+3y2(CVo3%zsr+44!Bx^4Mz)sEB3ziacQWQVAj}tUyMogIR1a7UnyYTSLBlG?2 zrnZHwGhR4LTt485PRKSF$C4p9!SnR#-vGX1$WdF50b8?mvPYvT2!LLtk3iF-aSrF$ zufu5&N79OBs+jsX+h}%WQz<5gzUtgwds)q~$E4G^Es?S(EMqG)q0Y}fg3I{~)i!|=kEv&w0CR*&g}iJL zs-^dQ5}9VIvJA(rj&%@lV+<>)kKH3s5eWG2alVdWRnLJx5Y#-`0uaF2&$Y9?f{h{p zvL7q}Oe=KSlN+CmLVzP?qS#}$x~s>qv>+HJUJAWx6j%cXX)6fY25J%C?7w|L8aT~8 zv17CDSBp^F+O9qIN**VC<`3Omt!LeNvTM0+tUFW-q_qRSJ$n@@6|gqRo+60r`5Z>G z6m58(x_u0T95HiaZ-5w_RtaZ&OX<3r<6;@4Vu*h@hntvVH1uOZzS!R2)>w}HuEiYl z<#ETDrnHxV5ad%?=X9*dm|5xNc-BA=f2(8M_4T>kOWoi-WT<4~^F%NQHiOXGDS`@( zHLHRp;i%5hn1mp_@b`Rc+bDcL{`mXr^Bg*P+g}0*T|wTlnaL7@U;< zm>`aZ_tcs@R($eurD)KWLsJNSm@+ckZX-@p%F(G?JjYXiVSEh#JNmu)U_vMd9qIw0 zJxwWd1v87TR_}@d!F0yJbcP6~GaVJeuo4Cb`%K0PVNgU-ff{B?DHJHf%o5R9kYiH= zGX{>Yz63goo=RE^Pn$flKoH|=P5e6YaI62Z_K#0uQv;z3?zHLCZKT3wjwy|*oHI}K ze7U;ieP#nynHa}Zv~A$~6;PSN5T6-i7b>aSLuOW)dFtETV1U{UG#c7MM~6NxdxS@J%!mP$rTsXD>OV+%Bu$oj zVjd+mr7b$+XCZC|WYzCE*k8jFPy4ZW);@Gv@|E2;@rT=v%QR^-!cxr1EK3cJbOCH% z-2o4*{|e#;@&oPPZIQ$}kdO>PFtV=?VrbBDexOnJ8zT5h&ZEVgI>fPHZ+WlrZqD%4 zZXhl7(U--T!SYLiP-NZHzNOz~*_w5N84W0hqgM1yW(p6$0F5N*gPn=qpvT5~emuJb zQ@eHLF8p!1VRdodhQsYY@GLq!M7FyE{eQn8xo4= zaZ|WhC>6{NX$|~P|L$ro7TrC7kXp;co`m9sS~$wKeTeyQdiuMY5nUYRTSBBD;@D;& zOL9Z$1&r1eLQC@j~F=02W? z{qfZEx)>GcjU*``&E@7M;g3sFfMs(FFE$tLd+0F)Gj-OM-wjja}iwA6uceTefRN@thDc zjr4KS=2yMDM3)_C2Zm5oSqQDnoKNu%)*(%lKEhKpMFSk!0vhc3dVQ2 z6NzX3iGQ^r*U8Yw9u7Fh{I`jdS{4F?jWyCHJQ(f_ ztiZQIH%Q_97~$&S`|{3|{nPTRoR}j?Co}VsPL>_3jVm+q{y+@V4D~Jw*Yte*(tGg3 ziV#!vr|Sb-Hm{aSU_nLmiG^N5KLb6h8#t0`-?9^I}8db{E zuU;Cd?_K+94(MGiIQmR`Qh$`iHVr&a7aamjbb*is{|*y<<s?IlGDh`QJC-sS8*WxK56}= zuxV}nM_GIAB4qwxY+L1%G_$+>K{Z9PZMzuD8xssPSSQpf>~4fE#)uj7L0Hxa;mY6e zW8-F0*(W!ZN&Pk5%tM)*%~snb%h)PwIIl{9CI$D>7{X^D=MX_LibuSJVJXs5@OyTS ztENzr;Jb@~-2Bu#8NxJOz@EVj}hqb690+AI`Y{Y6C|52Qu2O=Vr&hg-?WviBIo4t0D}@f%~E19h6VVg z8wX$_97zNG{eu4(f4}!!vwgjN&O5sR8TOiKZTTC%j+r1s;>KBDEW2Hp(Rvq;(EwTq}oML|L8{(?Jp3bny&w$2P;f#Q%v4#7ogCn6_M-; znwz1uI81%M#wr4BU^R0RMj%L^ggnUS*QrN_Fnf;*n4-2O<1L0zNd3-;Jf0CGZXZD1 z=JEas1U@CXwM5(X&dj4S?Y{=(C^Pnce$l~!c;<$Rxn0pYl(M(oh3dP<}7S+jY z5*kVTMafTo^I!E|vwJ{ACG?aAa!er1?bg96{TvSmj@KY%{+#GRV_!J|FMY z{&CTwMVETvuwN()4S+7|C;5fdcj7}`Z5oM&odpNO$_93#t{DirscWcLSbZG}3Y+D1 z+>g_R$QZ3(-3QJ`PE{^%^}fT)~7pG zsb232{yk#DiCr?=YC{brIPZ#E8}2cR=5%!%cVS`S?ASlD-9vGaY5O(y_OyrNk?DKR zdCtoT5wVZpKp}Q#%n13B_ny!co5N??pUBC`L4CNrlj0Vz-nBmsYvDOl0DES$xZx7X zF;yhA2;$@__NJQ#tIe0!mUC6U2!OB@B{LfBuM`?Ys<{pm0W}#tBAUDWC&5fX{y=OL z5)05+2vDjgWGvB-<5aDwvO4?D8O0gjd ziQ|-Yp5S&@WR9a_A-`Zq->X1t79>5O-Hx*cfwoh#^TZ`nU&@WUeZmS>#F&1Blw-9v zVga0I$plyk3qurwp}|!?G&8$Ub3%o+2QbRhrcJZ9u}0D6Tw6#UIInYomP?V9Et3Qh z<|IN|WCIEF(Hc-4b$IM;>~H*+q1Q5s$=U_QhW^4vQLcu=GyfZFD0-A=lJVH1OlQ{O zJStK~@$T_P6;`NV)nqjl?{WzFNUBAEx9pDxmA24Mx@mPfqV0=m>=b-Nixh<@ z4RdeCoCznW{if)=xc9wA^S*@ls07wiEen!o-p*&3Bl~;=f!{ML-0)R^djQsH-xMQ> zoe@g%hnJYVtq_97g?mPHvFle9kAC{Gt+mY_q3zd@0a{y`jA~ zCP;WGLoT6q`pEWp=x0=IPW3)kpV6soUUL3Vd9#$Y#J-%6+%$*gW5L-x<;#8(yno&n zh=0hDKzPx@>6khDcSV2}{1MA|tJ3AY2_|Oq&Ye5=q}x(OBjxnx*J&LE5qa!R-76;* zXqxY%p+@dE>u=!AHmGJzRY>>0saZSSkZhQ{SyXG0M)%0aI~z_s`Z$;vD&_h0)$}H| zjnBLrk-IO8Ge#1ACI3v)ef?}6&Ig(igX&S6gdX$1*?YZ=%^7yC|6lFBc|4Ts`v;Dc zXrUw}TXiB7Wy!vjN*l_)Q%I5BSjOI_M3hQ)S+k6NXN*!*ma*?MLt-!(%#2|$^Sh^W zBfM}4S>b>5>^_3~Ay& zKU!irnN&bTVkY82!|1dKEeDLG%oYARO0fqR;=j^u#o$bRCM`_ANaM#|+yh8NJO{ke{z**0(kw0Ig^GL0Cbm5ue1|*1# zZ}c7gsp|flnD%Q9qZ-gb&TB@0e&YLQ7{}To()a9)Qwq{45i!<_xc3O#8z*<#qK43P z3s`Yqu^K@t@>$z{S4XG7)&=PM|Cvm^533vUA$;#Kbplq2u;zeZV)Yuqy8dgFSSL+* zYGO5nKNK^xkmLR1#a7tds(Tsc@R@$^(Y!Ftpjxq9p5HZa-Id^Ac`|zC0Q)+#&^`Mz zNeVKMs74Im= z`F)??U*r)4nDP^9szg$w>Z&-^uXjc)P}w$B^H!ARnQ$Qb`#6PixpA6rIYxWT%k`{{ zYwsB=RoPrVVZ3lhX=!*1y%**v&O2%{k?!?^Cv>};qv1a0^A~c1LZ3);Rql^{ueaws-1EYXJv9cA*=j*; z%pzcmWP3qYU=tF0RD7f|njgk|jX5Q?oK%WFC%H1cFzCDZ)!(m}g^`&@ zZRrZXe{JIjYJ!)TC0Qo;P_-^2_n$ZNbpp@rl?SCnCs!AZ{IaSPnFr8IIaVLeu}K(R z-*X}K{rRUlqBn|OH6zW>)>85p5R*zu?9FDp0<$`+vPk;m` zGclszzH&N$dtb1no3g`}|GU*X>Hd#a|Hll!1o;0C%rMJDx+F{M0(b`vldHw>A`8fj+QaVexv8e^GWrInmFU!_~$a)+Ti`J*;RJd%aNm+Z8h;M$bLfTa+cVIf zyF~DMZdWFyH{DZ^iFkr(W05f3y1i}i+hyBL5zx~gPya_9W|-MIA1Ew(&*^k}O~ z1RQT-Z=Zd}dEkA02({!K=u;(a*q-vaCV%3H7w{hwUa{@jU@J9-U|``-7;|jAXBiv# z*6XNmzu6Rmg;{>4J&Wa*j z8wL-3oj=IDy{%YqfIBWG{QB3rzA=WMXN(dB)+=Xb`inc+Udi6=Ir2j*5vjyD z{s%>-Pz=VtQx67xl->RQE{=E4{~T{Hn9B*{eH(ZDr?(2)F9(@QGk_W0CKX=pd=nZL z$~HkA=&pRYRNvx3D+~>J#?cR)_Z;In_)q^ z2(Y+L>9B5M@L>aBOg;NA*3SYa)AD;ANi2(5(C~q59g+a!pmw0=J$xdH;=f{`|K7;B z6xUV!zxAvfL)i6Ow`yXg9kafGzE(4ts)}zo1oCBa4BvatAD3}Ncf82}dgJ4zV;j_> z&zUKG&UXJdbGBMz^Fl|bHM>QIPp`o0EP^a0bi1;Wk$zSpFW@oJJ6ORo&*?xpQv<#+#zGU%YrB8TfeWb6*UZ=nyA^ zTuFe76Pf=URwuJ}!+h8dIG>8+`DwzRhtlZJYs!p?_=8D!MDDXvq5fG0xcuGPtJu~6 zV>Rmb4$fq9lirQLt@*NmO#)KWZ>=+mpflSN$fmq~dnZ8&KP9U7J~DKl$ljv9umm4u zO@%R`2BYp;Z=Fv+R!oEZ6TzWU@^d_+2OFhYx6k=um-$V!%S+nl!bT5bje0@S*>bvT zd1AtVN7Ca70cJ~e66dTcr<(Cwuf?Fgk~~8uSldFrYc;;zAP0w<;446(kcDZ-opHLtAP}iui|1?wOYxW(Q(?>{_s`il@9xN7h9am*_olC4gGw{NO2`q-&S6EV7;h}6y+dWIg}Zp+IA1! zHVKrP)pm|#_AsAh?)A%RU+5zW1T&tVkx?bhMhc9yo^JNF*db6bZcNBsuHmepk0*hw zh$q)@6#Pjv`sa?B2kd{&~!w0*n+8v^;+L^bN29wc+FO=1BtJ zXSN+XcJx2B0wkh*Zi&UN@n^@;X9vtxECY}yAW;*n^uaYsJqLsFO2oe-dt5!Vhd!+5 zrH(*8^u1LSs$M_d$#;Zc=V@43WGh+YH!=6(S?$cVC+LThotE;urZ|%k#}&8!&Poiy zL7-Ag;Ckm-yYMi=rNP$aa{$M;QO->rNw z9zp`faS8q$bN2x)aI38W%uo9y&so3 z(60!`)bO)^K(z-wWUez2RkT%QH=8KY=LKz;h*3gX9iJ{oDP821c1hm0n`gq+=eO96 zx6p7t1KbX#%eHQ-ZLIr$o(YySyYcXPK`1q{nIBg6Cd;15162)=cXk6^?Vr+=r9OT! z0@#8h8n)lwkg_a?Hro<{!zT1WycRQ@hdw9I4B|-2!JhT+)I+4bhZ&Cj@yVh2nMBUb z)QgPWpFy|tJLsWGgCvvkyQwdaUEQWf8-fUdnSRE7vdF>-^X>si6z)H@w%b70lEE<7 zD33caP;0E|b#G3-roN5MD~WEGh+RyDz??G4(27a9-4dth;Fn8dC@+okjg@h@sgBbF zdiw)N@u>9O4x?_S)w<*{Fx9gDkd{l7XTUo?LG?1g55jWhYV$MOuw93XRT!a|o}*ry zO}s?5ACnCJP5V!hmbsIB?71Sxs4G2(V&wvAy36C7D=&-+dk3%uI_;eJ^5SS!2Bf6B zD?~~~zcWMAJ++^=ywGvb0Oi?q(m)yGr)t=pt>5ID;Hl4%n^7BvAghxd`jJ;*rW#@V zuvwJvlFlyj4>hYPhkW}1hkRR8bp&e{fY~nwDp6i+eFHU59V`_Vj@m*y95YQ@?U_!z>49XtswoK`PZzV}`kA z?`Bc6E(*JNJ6~As@&!u*O)^&Q zX8i5eRXq}}w^tD#FkJLOzSwT^5jIi3@XICt#pM(Fxkg`Ue5qTn*2VKXYsFde9l{kp zIeH1d9VP476UG2-8(RM09+3tBSXCfzyVfdb8GL)@iBX~x? z`P>yjoaOqnJ~pw1l=63V{x;>fooRm9rct_dAMLKZ=2J0{o19Y<^K&Nbc< zBRT%fPXR81+m7N)whKNS=>=9MEUMD(#|Ah4eijKNaW5}e9WN5$a47dG^(P5~#1y}>vY`TJ;oW0N2< zQ|-3K&X5t!9PS-ecH+*nr_Yj!nT^#e{hy~={q_R6!S^zHZ8lUs2u0d~fN+YaVcz#n zznG{ART|i|ZkF^T7{G^DBpGc5rBeAc|$vvhH0(2kS}E#)y#-m_;9KiOfl!Lq~w>y)74Z&K>eYcQh# z(R+XWa5UN19x5c?Z=$iXIIDl|-2E%jBB>dgaq`$^{V`k*>_sI$R4gaQtkMm1&PSB^ z5b{kbZ#}y5>iE0CYEPt_DKJ*c?xAzxzT~O7k^0aoB^?N^OP9nWW40LB5^I#8=xdOw z5#EuPpa_%1*N_v7?>&11dnIA1g2OjH=kH#eK^W8cP}!Pgi3n=dSw&i3`oo8u1{hoy zEdR=D(Mq`RnlCoHK~t~7f;Omyc`j1!jCaCmw3wc^TTxjf)G^MW6K?nC`doF`wF2$VyV2s4BRt}xF=C;N zcS}^lFCAorTG|D#aBL5tt0vVDP`mW6WqI!Wu+Fv#<;f%w_d*7L3MWp}$Gr&7+V1PV zJT;uRODmatF{x-17Xse%QBcF5G^Et6)%yFw;cF>lqtE#|BR_0`6}XM(0CD7zb-MLN z&BycR6qZT|t6f@kHwXJ*#GabhSC+Sl9H+@?i-=$ox;4)4>meMyn9~U>ubN3>y%FK> zT2i}oFQYY4Ir`lYEJ4xX$NSSf$_rcVOrtClrcR7r?pqoD@Bm8fA2BKO$hvkwajiRB z(?OX$w0-aau`tKrF7f>y;I}RAoOylR_?&Jg{N0lyk0665%Z>8=32SEhIg4KJ<<@p3 zHNSm#w=-j6sjy-vX`h(IHSL|eDg0jO$(;|XnLK+VK*9o-5s1lsakSBf)zGrJi+R|t zdFc|qX3a7|F5rlIaR%D6*CL185_$pPc#$3HI9m5vUoU)nF*4$M{o`HTndB`B{9Z$% zy~#JX_%D9GhgH6vH{O!q5FXgLh$-%}RUw|XZ#KehVVun0-D`1_bIH>ShMCK8Z^i@= zRCnGp&SO@m>0Jp=^UJQP1t(99EH zE64O|sE60K{%0B&vu>n59;o$?+s&tBQ)*mbRe`3tJRZB*C_`(kzo3#vT( z;PF7g&D6b|-y!|BTV-x_J%dud9yV|X5!+(K>bd%mUG7y~d8UmDkbrs$skLgb6k*Vb zS;}v6iqoCN&$@tQ}=1 z_q@FDEMY9C`+c5Csl5XTKMYjdOeGN-3@u?^=oP^x5%Tjo^3gTQWRtPw#FvqyAeQum z>ht+^_VvR19mpA9d@an-0{XRIE2hY{EA6sw!jp*Jf$l;DFYybG>+E_A;O>gxzEni#lSiYN&$O3&0P4dl8s5#db4kuQ3?A*as)eMa=vq(u>oI#~x z$}$qnEHf?4{gtdU2oHTUQYZO@wPUjFXj0cMXGZ0A(lmzyaBo_AmJ%%RwfKd!rRkou zdP3ICdQx}1MCnX9MlV?{K*p)#6Et5@YgiUCgofo@j94D*^m9qCFLi{G&qn8&%Zz_U zSYE|s_<#))`kN^(O`H$5W#Q{1D63~K2U1phW)zjO8_m7ZL(Qy2{yjR|gCjxAOyFhb zx8!w4;BN{Jgzzd4Cd-EVa;A{~P;#F714P&^ZqkAaV2>K8#Q0DWBLx_?21)*uEUx%K z0<2t$`MK8&MIA2Sr^d1<5XpK*RmCc~xHpk`LX!c-j1s;UAftYXT8=53XxHHH`LHGM z+rux04S6jo^Zm`*#Ea*`8+MxzYp||R+uPFGN*tN!&&O$(6)Ka%}e(10T zZp(IdC1NDMcR~4;Hz)4CmKCl;tSy?xE)F>KU>a)|LY=B%S04ww^}(S_iy;G*@>%}G zXA;>4FojmhacQcCz^voed1qY51#3&_=Tv#x+yk<-XY+xadlA*M9rY1JU9i(p0$JR+ z=K31H&v~JEWw+)75S*vR;{bUk%=m-7fCe3DhM5!DIeD#aq0|h`Ll8T%Q_oTh%b|;5 zys#-5dpR7)P)9(~sCu@iv}#@9@I$Jnn_h(*2F%=RrG zghBe!jCFCQ`@g*s8O_B;lW^VT+RixjJ-sCknRh!e9xEf91pd2z5Kj0R$0|aaPcgrc z)`3hfOu%5a`%*D78>>u)mJ)drFNB^qI!v>p5(FT#9o_v6vnWLwpN{h_D>w7>@k>uR zAq0e?{NSk>8o5Q*wzl1BIn?M;NQm8CT2B6LhrTFn6ZaCUd2Lb4+8RUuYnFY$vLzCi zr#})hEa>jxR@o!}oD6vpl+Q?Uv!zkJlsntr(Nz1sMO{<|>n~5DS4ngf^{s**#6=c; zG(;-E!868#HjaC`$M$b5U&Jk_hX2eq51jpngM($y%kI~uHiYT-I-!b)_G8};oHVJp zK9dJGXm5ulfGnw+D)*qH6VN`RTOU%ISjXae2B+qfMf$dE> zBJV--*Wnlm&QG;?bfO7dvwjD6Sp*XaSAr)pD8y~LQ6 zUFNJvlPgcSrQVcIOx!psJ(?PkD@bZgYSv55$@`}BrR>;_gkxi#7beW((N*dTH=*A!_}3vdFq;UAX*86Z?9*o%|!49J9mzkt2Q?k z@OMN@W&S|MC|`Bx%_~$nMc?bz&NY61K7>cvEsgXpQeeCVxd7GLLL?JwbRBLPn$}|c z$o`M#YElaYuxrVTYb&OKe3UNT9u_`cLujK|j$vo6v3pVcDd!%w@yLIDGPwm6N3xp94SX#buBbB3PkZB0=0AncMgqB*KC zwAHGIoFJ0}CIWEMwEZVdr{L@=3)z8_J2Khp!L&t$n`dR|X4ni`6py!%jA)E1*AM6f#gPgqAvtEnrf~CTkG5T z7Z)QA+Ex>Pvf#AbzKP%Yct!wMHJP4}mxi_86$qUeAw0u4WFJEgn90?1E6U zbKbUHhZM9ckWQ`2u0D?6Y~U)8Z2MkVYQKAJtIM;abHHeKfP)xcq8_#^Og^1fYoQD~ zRbdfcu=dHRX=ap#RlWpt&EO_(^e#r`t=8+_d zL|eNuH)(#M4&pnsLdiro?^w6l@sq|?-Xzrx8xftJsX5R z9tSEXQx)?Qd^HaU{QXDN30nb8dU3G0#qdiYo*~;FSHVr#$1EY%*lh6 zoekN9HFi}4!v2#(J+x)Jq$%*P{+Q@%(&+uq+%m3)fQs-cYSm&-6Q_`NkEZO}prbh+ zzEWo4GkFY0YG$O2AeCVKfP$Ahg(!C#n(wW2X6RqV#cj3Vmpz-{1n(AI={78unf@va zqa`BlJQdZataJrxZ^MG^Il=zgpl}~3NB(rx0L(j(Of?c7q-zMg28Gl)x@8-*BjJQC z@IP|+5VX8N))^6B+gXc;*R2EFt}|^6UNHma7Cw5M*n0v{a2-ern+@ zK2yKaOxj_{j|P#zT42DAv38GPQK<=lvkVm zu31(=X_?qrMGFpzx8)PC5dqf0n|!l`k+_{;H%#@TiGdb-_B<-Rcj4aS-kPcRX~4bf znCdHWez`O}8qu8_vqDZQhv@Qy3^1@|K)etecHnPj&|iNpWY8^iMJD1!CI2Vr=((CK z(7OR`RvpIk`}Y^hep*BrU=b--d*NoQeUHyhoX*i$UKrDDF_GymF?KJ#>mF`$QNvXD zgI%o>Yn!Y?yP1HWT>Z=?T6ZCQai%|qx@Fr$d%#-4*z63X{FY%}N4P8sM0GN4#2|(d zv-GGyBO_%OVl{;yRc!*fc=5t*`2;0@YrEFOQnRqHl<>Hr#HE13EHmKpL(x8&`qMaP zgdy<1htLZI*;9D)a4zYuU;s~7Q@{HUkGQeCfsX>kj~HvIm$ zyu2|XaP)F44juO zuRV5aHSKcM^0;YyKFZyT9ps5GmV3_TMqBV>^)Zs%&w;x4^<69pxCVmqujoPfmmtx! z^KWba`k%oyhh4I^5HI#WoM@4;nYf%b#2FxNE{K4|((3y{zT3^u& zre1{jI($v}%+E3reZn|RgF>7?rwW_+_*pgU+~;X^+^nc3X8c>X11RhmD_aAmS4wMw zf^ojx?l!L(H<0Kb_@R$kw<@c!ucI5M;9lq(r99oFmU2~Z{RokGRD5Py+C=|IJ+YMOY_3^1GzCI4Db%{y$A$9PE~L)V6i4Aq$kffH4jyF zx9e@;!EY*y6ZIw)R>65QZ5H|3flbhvws-11<#KP?#muHnd6kT9KyhUvvDZ8ygTL{! z$YSCb-EF~bbe|-a@a_F(O8;7ygPZALyBRqWDz6_DFGT-A`dfq!pheNoDi^$h(Cx1Y zSfoS!W0JvJ&#`Z>O%8I7Dz&dXJ^P@GJfe{RU$hINj?}_Qo!>v(cjRWNP?kFHrSoRm zS<>Syt5aRi7A4kLiMknD-LJc|eWW?{vscx(vhACDe{cNUp)>ccM>v(6swOTkpOCrN z^~F({Fy7kD8KHU`K0C_{0 zZfnsYD^GW-H!UDm)j1L3JDQWG2fe4b_U{J~x-;3P(c%UNtG5uHX=F{!1F~9=c5rtL zSO%01xO>4&Ds{-DD~iTzarl7irBmH7T5Ai#8bT4$0m9(L=H;-IINVa99BFpY@WD1a zB|dBFst>frQuR>?cUuuxp{PYo1=<(S2bc8B{brb({ODQ0x1PFA*=sXAck$|k!&?Y9 zw^)j6@QtHkgI-HITgie!YTM~a!r;p`+c$;3Hp0B1xX5$sgr;uDZh`sa3oFlFiN!7A zE1ow+2LS8JNjhH5nWm(BbkkZLX#vWxxN(QenTRpfC8K1FjLn>y79Y);ZW^MlLO39gt0S&g0d~ILNd^pAaIe;uxd$#8vk~KNJ=7sjh zRPuMqRzOMiq^cDyoQ5aM1Y&JGkVnD0K%cyN;i_Ji&arZj=LHDX{(Lid4_lJuG(4)d z$hnq!MSJ(b6abOAOK=`kjyYl%^H<#L25g{I8XJhIf|EZzCXr-^47`vi<&xVZo}aeRx>u5a(!BN@aI`&5wQt=@ zkt&!uZu7CjF~!vAEIIS>u3mpK2ERZ}OZ%f`)#EW~AY2xmyTB!CqlnG1w(icsnj6IU z?AV_L0@UI-|KVB-lRccH?m=i`^-Z{f0Zk+PU``&&EdQBohIVI8lu$a!?k|HV5ExQf z5vr@#NmISO+?G|;z;p8V7lc-8Z3pqKE1ic_)|#cN$h9m(t-;nc;XIb<+I9gWC!*to z+blhe&#qOtJRKyH4c%+I3NjOmD2u3}%Bt_7xtK4f+&;NeJQ{@U5Ar#}E%WewA4FBj zja#!R5rIF5u#p-}?&0D$&10HRRQy3isA-Z=rpd~6nBx_QEYb@TUt&N{$urJpVzj>pegO0{*{=|_Ls4MXf7AeItfsB_=-93}vK5y-MSGWK{ z4hHF#SLwTX$(I-yc=4CdpVdozv}6Cod?UX;3QCd=_?WhnaiMXA+_~yQ7^;f>hV~nU zXeP?t_#DdJiEJ{E8+74beXi(v4VG$Zw04Ph_!*IJaIQ&Ws+*vdpcwrfFh!RaA)MCH zs_JRqH#af`aqeMf`1JX+v4b5Z5pF=pV!9vySo@yCb-YL3r%mJ zn`! zLFgz3MCKiN$d@6Igb}b3?+s>5 zc;umBQ(akhTWvLuLqUuT>e_qXIL3gtxJ`LweV)+BUuE2K4|X zE*~PGW9fyWrg`zeK(#AACD2`Z z@pEBMd`#q8n6_$Do3nd%K!jq|R0PKB{khMfJkmB#21+U$6kt>U370%f1z{p1hZMVf z3m$3`Rw~CYx_AQ!5d^IAjuevNuE24T8FcBOeWMxR zK%r(#s;?oAg!W&_)`+Nng-~O35a%`YWTZq1Yft4VR-0mOx!9J9k-lG*Fm<;Qci0Wq zrlX0z1|V_@Sw6|)$vn{#@&~^!@h0}bwq32>Jdf&&?^PBl_)Qcz;d}JG(EXj1>z`wO z*qk(9EJnwxn8(RF7TqTa_(`j4#>u$jW|y{Y)d=Rk_`HixxuX_R*DB^Ni0*PcH$U#? z_vAD4Op6TU+dk}5(sMWYwd7|B*6+lbIV$J~5s7d`*k*@(W7(=a8QD2pk;AbvE$_Y$ z2YaII4LKtZU(MoJIIr9LSH%+3kmk(%xswqW!LN zHb(T`UnaoW+Ty-l8?HjCv){SUj5+JXd0h7Jg0)73$xMvZJe-Wm1d9&YwM-IyTWevK z(DuNf0(B$sQ79LvE@#By!8+ALl*Qns=>XIBrMCWv6Fq;*ec_i;vYIu0<37D ziI8tA#dtZCi`Zj|e{VvXGNw3bb>n+r&mb@1mDk$fI%D4k5Y9=2^EA(y{O%>zDE+ic zd$4Q$Ez2)I?;w5R+~RR&IZVIyPGJfN(BAi?&WGsRJ=HnZoz)40(okOR`;svr>dfyq zY%zl=$Jh%i)db~L6vcuFo$b~h!dO$L@U~q=J&!8QfRXnF?%OMF;63CT!>*HSjA*9_ z-mfg$(WcmQJ7TsR2NE&{K2o(!5(qOEcHRP&AuDlfImp1FH_nu%=fAAETzm5be*E^f zxVhyXm}$wK59e}nm+QWM{dB(Ro7wGfc76ptc9kySCWXtiEsf=S-zFW*Nk5&F%Wl3j z>c*9VFj9w`ja?Y@?(Gc>K zjzqrQlY^jwsp*6Z`zGBtT&ebj82|<2fuAnqqq4yJ>aWVBw-0#CFjd~|N8qRZv(Gs7 zy?qxXFH(alosqfM=K_pXxq64nJ;}1WV%1j2m_UaFY5%3U!}JOhg$pZ>4>=Vv)vT>3 zf7b`jyeB0fEe^=a#T3`f~=TU~)2HFf-?N#7L6wrgp|pE|n(3rfXaIt^53 zyIO}Q$@Q}ZS#~N6*mr`8lA7D+h@F;6*`*FWHWNw94CgaVoulMmC>=TS(Gf#WCR!vY z-FUWN++(qRU!GZdk<+9{4uq{{^6F!n-PadKTWkI0Ms;Nz2c+w9LXlLEJ(coX`nd0j z6Y9!QjqvuGO4U0FwJK>YJ7@@kid-GJK?|wgj(xgC8UA39l0~ZiNAqzx~ z`CHExA626bI-3$*lF$mC#J7ouPMx+p>A56khQ?y5}hKpOxR;`pmQ zn`@;u^hX}^^!De~3cJw2FF8l_w9J5kQrxE180xpLlY|U@Mcxi_2m>4?rR~4*8*rv} zF*p*`bas4-waN#9n&oRR?R#=zdyidVUY+Bwpjp6-YRzOZZqFLD=Vl)46iP!lV^sJQ zyxSBmtcHr^>%BYos3>eLGig6=1PzBv9z5e$0Y&)+zVX$HmwN}iipkYSJF-D3NZC#D zG>>%Vn1Gy%TBdeV7nh-dVWrzx&6MT2b2W>zs8rodtt`)(s?gBTg{c6;N1U8{K_8ck z02tH8c6abbW{xL4Xa_SbJAU}Ig-mv@-ZC3k&1|(iUmzZh3Vm`s8?={!`Xeeogn1#s z1=SZ;%Z6Dd`qAc|G1?Fx_ozu%E78E`28Jd~JgP>>jx7T96 zLuTuN8>UcU3C|7@3&>BHG`o zc?)J=;F6p;mV^LVD@Hqz-KQ!C%N|Ssmu`{-4OHLyBU~>REx?k;MZ)- zijO(Csf8sYe;DTBvY1}(TgR_GFcW&DPPr>HVjH_p#{(08iCe&WAzQczGjVgfa3v#_ z%xACGkj6bUV^4vy`qy2^tJben1F7+Pjc{=1AkbL3Z6*9o=)cw|^s>tWDY;qk^UfM| zs3b@nYhmYyE&x(aej}f7_P3;JFb6#XazDzxH?sM1zw@=EjLFW7ukphHUPue^9O_ak zyY%g@vGBQ4^4@`Css3NK1x!qqh2GgCX|P;j-~eBiv`FsRl{20hW^8{i8s*|^1X^T(Te+K+1J7kP-ZSQhkA z4swH0bG)aUosioT^glKa_|HGB^6A6wte!qc9EWhG$r^g(Oq8gmni>bj7SxRLs5<#Q zM%eL}O59wX{y~2kXS_oCuY&5IQy^Fj2-9=EjA^`teE3098z?C#KG~b@V5qz@v;$Os zbEIq+oTD+x?`o4Uta=iTxaCI1!l|B9)Hnu>sZg#jL z)tY-pJJyB)WEfl@&${bqQ+#LWL+Wk5{W>ENiy7LFHy<1*b&vMM_dB9= zXTI5IgCxR0nPcW5r<(Ru=<$bY7JI#K_vtElOhhVp&E}dapvFmUhIwJ_&DyFrY76|b zb<6VyXj!%2 zuk1nal_O@i0T|Wtt|sP5>sz<>aTX8doJmPF6;V(y0p*aSDHUh7xcNgYQURk@jUckH z`@+;ORq)ed8v+R!JIDru^ofFqpn;aC`sN4z`Vo`CF!tE9S1;WR9P=iVK_SQZ6;;i* zXfvVfNsD$S@6iv(_b-O*<4qyg&9U=oa8#l#Z|CIv}@c8AZ)iuY+L}@meFE5(6NsZYI-1RGx=P^ z0I=ip@4ybgzNYe7JO2%lw;Kh+OwUXrx2)*FFFNXHQ6hcQwZz_4H*NdRZrG2T+xcrsa2NRAo?7RAk?qWoJqPu>aIwLhvWc)un?;+Q03WxCOz0 zT5}vrliU3R(JMZ;bu#b(EwZNljs@LU=;N535)>7~~gZJD`h7LFw9U z!)`#!L&vx_n=j%ZM^Sug!xZAce`d@%zZ9r2aWXdlIA~-lMuHeX(3MA3BUOKcGH&qBuWch=|+9r$Wpm z1qTBGpqPXYt^i<{)|E%UtDPXVZ9uV{*uFu#e}au(-yW|3(ZTiP>Rh=J%9WCDnt%37 z!g*({L`==yRIWsJ0E8M9YSAybb9xX9tht#qy(hmfciU}1<00hz|Ki=k)y#%m9jTB6 zt`ynK;@7BX7P129KX!>nw8J-mOLcTzDHG?<{*rmd}zja0=A zPUrw8)_sw3L0>RX1rhf2e`OGL%oBXQf1#AIn5(Z&zyL%-L{jn!FbQ&Xhw49mWU{{p z*s5o88MN5Aa_Nbo+0bde2*s&s>oHMU7gx8v98C+DNEs2W8}Cj(tSmE5?p=}c{cHI2P3u05u6f_)m3qs~HZdf543Uh*)F-pu1xx@b=pVNwFr`O2jd z4{joTBoH6A|6F{|9*vY&!%r+dB48oYeK(GMN@~lBdaB?qbQ$37*ed{BuZjC#L(yA7 zo3&))SdYy!{{=K;N!Bl7|AUy6p##>19CT&SdU&vk(j zM1sHu(p|ST=-=3_jLi3dLF#-|NdIqog^ndY@t+fB`wy^-2GQr<$Ags3Vu2~8LBMnFcIbWi5GwK5%WVdY0`9C0JLAoF8cR^7)GqRKW_vv<{ zI|g}Cor3FB;}`(iS=~3_Am93xzgwO`fndNZz0m`k3CKju{q4s8pp_Vye867A{`_$+ zk2irJD(Rh9dcWS~SijYFN4g!Qj0pR8)}>k<0t|Q!cXeii7W6Y<)^4b=e_C=Ff|clq zqXv>nkQAa#=(~gud*1dsiGea)j=vwQYO- zsV~N>M+xt>Sk;D}T8FgK|EsPa^e5tv4jg?nwL9sI_@4+@+6;j{|1=wq-|6Wx3Fh3A zBA?8%epT%fD?mRwQ%{Q@K79C@Zmr)kNZN}K?A}`dsHirgj1Cp;$DPyu3nQvYe_Tb| z;7I4iPb$bikDoZdiZ;&SHC+hA^`ePSV+i37+GBfQWt4Q zK;H#;Ts7k<&v9kKfK>}2fc^8X2!MQi4xe>q{P|YPZF`6R!WGILts^90eeWovIxg1# z9Dy87WfVx@_tvkQA*hi+;8{BEwQ>KFRA%PYJ;@Qzp1F%z*Rm;zii$G-Fk=rctoFQ# zHYU7V%E11Dp8`%e2o?##^HO}&|9_w$ftqLaO!6b2~xKQj7xbOT26 zcSQrhic|oPxuU5F=|^6tX@~&0C$=;&$gZUrAWeOQJwEXG@Sj`!yYI=QhzoEzrbo4vZy11zU)(ix7?}FTVF1qxo)oyFptrDhYX|k7q2)SSI-G$H*7Q_1WXj@Cbo~>OJbKXZ(uL5z z74WJ8m$Vfqn>sSRI}W67pWW-zzf^7X^}nM>U&M4`j~+b=68@g78q=7JgNlU2fJcP@ zti9sZSm{w(42DtIoBos9mQ@LTh?8`x(+>h2fs7GuNzX}G=k--O$pa@BJxzYE>js4| zU>wfPQmMbdt!|oN9MDc5&O~i3EdyXV$-LQsh@QR+c-)T8Xk+Ybg}w7(qsy)MGpiPj z_>#&+RRi%G>Z>Y9JmIg8z^7-1&FtT-18@aal)wTo@agWhUqlC*VIh~UofSztdDpMb zMbJU()X9^jzw3Yvo%XNAgRZO}(hIxV;&d78dyLPGWEyP-p^B z_is8OGdEihx@$VX! q1gTkpHJ?3qIazkoouEuKmYaR}7*>|P_YB~l%NMlH=bf{<|Nj7_)`aQ+ literal 0 HcmV?d00001 diff --git a/docs/next/public/images/concepts/assets/external-assets-normal-dep-on-external.png b/docs/next/public/images/concepts/assets/external-assets-normal-dep-on-external.png new file mode 100644 index 0000000000000000000000000000000000000000..64c918cea3cd32970060bd36f577a8a3406952e3 GIT binary patch literal 356464 zcmeFZWn5JK`aKLNAOfNyAdONY-5n}OgCLzMog>{0h=d{{Al)rF^Z)~ligf1yLr8bd zPy-Cl=05j%-u=$GkFWo4@S`7R@6C5#v97h&H6LDRD3ac!xru{=L#nJKuZ@F4Jcxrs z;CX`p_?Oe$kW=6fTsLjSr#R*Pbn7@cOgPH&PjtM^wq^+3@9EOE?x!FwhWL<^I zE+^uKQsP5(U&zyM3*i%5MR@NX3A?|rd#9x3sKoo^BYO71=!$XLf%^~*Uo zsn{7wubp#i&uZn2GmR>|Q&-tLGf{o!LPKMleOh!yLj zWBIc@!NK({|AW-LdhB492Xa9&%4ycO?*7>ofb>TES#tir^!|V8{lCTgReDSDyr^61=cK;m(d|Akc=g*Fc7eY$%i`yWlwH{;;%EBbuKIVW zq8~BtU45;cI2&Vor;7hiuK6CwtqfIW6+D_0#KwVYhNcf>!`?Ky2%@V zlyemnKyNv`&4EkW@0T;|pXd4bAx#0h5~HCohWQWrMNrvQ{UC&1UwACGD<=L9?VQdd z@!%g{NDl9|Qq03pZ=d|HUpZc?+`QmbxT%~W)xe^f(0dXc2Gj-?>tJ@xj0o)U$-adT zdM!rkCQ;0nFSmV8_biJFW$wGBMJouj$6}h|QO7Y#Q z+W8vXMcVlS?@&U2m17%R(qUQeJvoMHI4>E4)cYH_(Kl~r9Y3;Jpea@N1KT0XK~2(+ zNQVgj3LMa9sGH@R|Dk*b>BSu%X>b;eT3VE)&{e%XZ`SDWCSU1ZgXQmf8jW}QG6jO@Nqvq z(XEzeUt~5uIdc3Jx?9&(63QPrAy3(p_L@8n-mf)$`R6U1NruR_%?HHD2)sHU{}f8M zCjjcX+w^_0BYIimZG~1$MwpxqfJzh#uzk;u@!oJq;A0dRUXXD&NL_YTz3p>&lDuH(F?eF@=&AyU8Lo=?u zs#w6{d5qsNm!j|X)S@k;>+c}+MT@lp8I=a?RP?M%lYq0Sb;LB;^!nt7wrlz#R4EZVt0)n4mO z7Hlvm83>1tY$Ak}7!Pk>Yz6HL*o@a3Nb-2T|K~SW)@L%sxW}@or#!-y@sK?gh2>>d4(+^%VeG_JjZ!Mb zJKmXzOBHoy8*g2#mwF@@G#ipc zNXq`^a4j=X!Yc3B3)>X<;*&2c1{ce1Ik3|gGs{c+zhKZiMDle9O?s(tztzvv4Q`@2 z+02JcP%k4O>(q5IN<~A|Kr)oz#IKQWCLd0_+{rplvwBBU^miR8?Gw-0K!JCs?0WNE zldC-4hWDDn)^bN~3@Iu`JN@$<{2m_!$%J_O`N5*x*u@N-=Wcnzz&a8b3?>2^t*Iko zMGG7+lNJ&1go@5{jp^S$e+b!61B&;6AzM<+8%Ze$-)sCeVX?Vynq-o&AM_GEK^msL z9p&a=E=0M4v^-iFj++ReTX!cbT`^%JN{7+s0w`L_{467fL zZA@=!(*6j^%=Gs0QSf?Hr+8H}YQC4HiZ1WW_z_|5C;9gRQ&coH!drUd>U{+Sio3BT z3faZNra^H~zfN;WZi=2+=&cM7h zooDOU^~^QBR1#&<^OWP-M_L6KfNcAcb^TqrS^SqTibVGPY9cH6HwHVJT$-kyndoHd z<8t!$sx)qbG4JwFiPd{L`(QtD$lGt}BGAStCKRHE3FV9LB4|bG9e(3tgQT63I zzJO&f6Hw(o?se`?H!wp%GoLHW)janOQ4}E1ZqhWq1#^%b-rc?Z{TLxTZPo6t9IS>_ zmX!i1pjj#@DMd`fyv*N)gjiT*)=B#8{SG#PdrFTV_uN{90;TaJAW%vMR{u=~LKwqu z;o9}(K@DEB)7a@_EA9uUd&Pr_qH>VMm!Y$ZiR9P*@euhX?|M`72=GG9-*?b4{zQ!{ z-_42Mb05MDmgoKQP~j$UmuBEqx2;!R#X-3_{6REBo3p|RP{+E`CA}8W6O%sYcbViE z;r)uA`q|2h2R5JNE5}WLdkS)zEYXSBGa&R@6zaiQ&O+Z zN&UPS#{D1g(A#^1_I=6KmI+H;&)i;=f8CjZv5SJHKj{ap4;RJ>*$w|H5$C7Aemg=s z7$~Q0KHjswH8eSJU*Shbx6I_Pil08jgrY1=^O+nF5A4~SOu+MUR$2rF_M(dfTvuw| z9!k?^A=3M9-RA}1n(9-V^k@G(Yj8Dhoct?q>z~>2p^XNGHMjZ~Ei8M#^3Ptkl@5GM z5`lyF`sT^WNknEQsOfH_$7bDiLc)%Xw4LjOJ=@4i*nokwoF^p*FcHTrsMMz~R$-cS zxh>k4VjqN99p+mEwYaWV*B4$M`fxg!=wu@{Q2IRP%EP*A zs$aR*54y270~zw}Z7{D#Ks$3&*=v3tZOn3oqEU;T$%5!3g-z@fM;q~1&yVhAQjdvvIipF5527KmKt80V?6^8z(2;k}TOizu}kN>m!9r`->eJ6AJI>nH4{$X;7*|Gd+g zA#MCV68z@OR;V{{e<6Qjw(R=cc=xf_n*jL!>u^x5Gj|!X#!Wo_M+kwEHt`U93eH6g zxy?fBp;dqOE3DMMz09UaT7*98L$76x>%|Q0;qi{oO32HCJ%>l63rl zbS;hWa`QaneccLkpVPMYd{!$--|=0SIvIGQfBy`PtdxNOfSst3=HBL66qzImubixx zMo@36q^OLV;P?uFm%_d+z6@&lyCIU$+x?`gdtQ4^bvmZpRoZt+sdqO?`o9Un@N9t$A+=iU9fZ)eAF%+=|3hnSiHtgwHGiX1hWSvSdkqu>KN34e~OYWm{s#wdrl z>q!LwU-Vs9x>@s86NTTMr5Gk19I_bJybYFYWowkVzD7R75p_%`A2f@-XA1XJXOQx~ zzdMjx$ZkuUh4_e?dj6Tl;v77XF(Y6($fUU|U>2{aC5uY*rp%sy4=(AvrYA3S_T$x0RqQ<-KFA<%>-kW z8N%Po*Q=X&c3z@KssdXxSMELcKZxl~I&w16_z>{l=2p486o2?Au`j zDZ;5FH0Pgf2hETbm!$EsmnSnXUn=n}0-%zw@bQq^SDfaHq9(UhjdzcqQcn_B%6G-) zgg&N6Qsq8Z?uyNi3U@$M-K#Zewoc!_tiVIRj-r?pZ|lHbU$a;%{Z)u_(MYN=k4Ev+0UvR z*R>nKm%+vtJ#G<(0|euJWdba0a_bhSTJPZ&WlF zh0jjIYMQ6V*^$$!>vZA=44q|X(i04N-Gbo^sF7A9bE$7aq{olu?`OgNPpeFbtNc4P z*Wp1&jV(Y;Hc#|ZfC_*a9G<7H{@jQq^B zbR}~p4z2#$Uvx#U-2u+2+!&=1#};+(v5A4}YCm^i;QG?W80X!3Kd6?8V5we?ys~mS zTWy+@i#CsWi>1yeuSs6SSMIYW6L_F$pi%AcazWciX-=O^-|kLpdU1b)TZ}R+$GdZ0 zlL*})vuf1=sh4sYoMt9GW=%|IM>`aK9)q{{-pi?7Uwa8qMdWN638O_LN||OeIquEv zz$erpW(_HiBH>1y-Qiv{69fxR*N*3%1ic4w5_PwwT>{$2?q6S9RFO$IGKM@+T;G(P zE^D4?W_H~y2naG0?M)I-_V#HImVdXQlWT;%EC+sM`i(Mr6Oz!`Hp=>+4DQhNrc3I7W|YjZRfv5P6!?psvFnrFT*~Sw zQnKtGuc1nJ0yq1%gu4LcOT$#X9MH^y`Vf++9r{=5-@!xgZ~F6H&6NN5EEqgNt`x^s zo2Qy7(@QVoi|)MfrV;5_(ye$e1NzyHHV0vUxNQXW^zxcN8VSWMzgeYVwFhAXbF->^ zjfIOkf6d+{`PhBi6WKKMt+EC*H^i|7$v!BW(}R-yVxGZKKoJMGx5(KVt8?g8sEXBK6DE0(joGnFaS+H8Pe`>#fcD$R&Fe8DC(r;9w zUPlg?XZRFk%`Ezwo*(o`Pz&1UX~oagi4&~Vx`GqSrGE_!3~U`Ob&*e^kLLu=o4pjO z8HBFqFyAJ}#n1?wCwgv6b{DAAS`Gi&*Ge%nAy@;=);~DjX)s&5n;-IlkXGSd7@`~H zw%Y4L{ocz4`5+|ca+ZNCsh0zoY&C6|308*xMTa5EVi=`vH!v*GWevn~1B}Yf_-&%% z5u8iPDx2e#>%yxQY{rdhc17m!meZQRwG>RMr!0kPk)2w&rkU{s{mx)w$qwA8@v4D+A5iog!R$ZWK;{(c_vmhk%t;Z%vn>E zJtyUZN^xs)-_N;DILEK&u@dkYQfcrTy*}cK8MW(5XNQ_DU+n$--dJ{+VAKdQQ3)CJMR#^)d zuQ{Atwnx#npSHMMgc&0{JPH?BKdRod_kQ;rN z16g)Jly@nJf(rBCI-Lp9usgrUjUUW3`Mmt;2X;x*w47BG63UQPytNo^U@Bzk)Dg{? z7)2{eW4_^l81qrHU7fDPYqkiQ9j~;ld{gym|1Ru!`?6WmfU=LQOz-zv`Kl?X>nkrf z5CDU@l{uGt+0W+n%6Uy{A}sq-`tEQ1xHz*u3_(*5tA4nfznuJTpmSL@(X!+7wkyY{ zP4+5W3`w$p%KESsi+;IOd*DF2By%wA+Op9X6mRiFHfim@y#NB%-CP4Q$E7~s050H* zV4()gpUAQ&L;WTN&l?%9tu?L1i9_jp#|En2j^k}4C*ty~B~^6prFN0FmAEhZ+Z9}6 z2%peTc`=eRAj4_ZEyYX4n~^9T3|l_P@43@_J!*AKVIbFR4#QBmx^VJ=T;r_bRsy%8 zCx_0r3h|?YDB8Z=W}l31P72PJNBZR^vD?VH?rr1#F=BUc18OP8y3bf;#y2~YzWp;b zYcR~qXjaj4*R`R{m{6YmKb8~R>kAD*5J-D!fFQRYf4fQ-fx$atGRgxk-RA|*}$zNI&s%uC&<<60VAy5`9OxWKo;1%@bUgQ zPQdq4YHg@<0JGp-Iy$aGjdVWj^E2$;ajy&Uy+@DC)id7cq$3J^zJ(ByDV3WvM$pq1N!7RQcn$q5oFw*6X0(v&J;GaCf-5iU-)ae-KsrumE3(@G6(Z;H z?{%E!mCx;;Qf)1Ru%hl2wmT0WbMst7d%X+kYqHc?VVJ9PA^DCd*L{ZOcS<>PA?Z(i zWH{{4Tf-13BjP%Gzmjwc%y0h{fHG5Su@l9E>GVntZN&^#gB_@QZa{3bkz7gb3-{Ch9e}&`^cfjPGHdLFJnjVGGy={1VFvKR<}P zZc6IvQ}!#7(749&LFi4xz(*qve!Xi$!=*lM951qh_LH!`g*|)k6A@LKSG<-42lVOq z6n)ew(rz;edUFQg-qmGFsYJezf#9?Tze09x6PT@qnu z^3{ntrRTOV3Nw?z?sSq)JB;efpD4CpZ z#QWRJMCajVMPArrB8-7EPqdr9Yv5S7$tNEt~Kf*Ir+En7xGsZ7kE@xo}NC;d) z8quW7dFuX43AKN>?WRh@0magyrmSc&n?Wki<1^Zl2J>oqTuYTxFRSik}{6GrAFTLs15K0<>^V~Bnt3D%Hx&Q_EH zk)NTIg{ayhUxxHbp+x?G6-Q>Za@_@+zBC@$a)TY60O_27uN=mD&!bO=Pml-i@KKgS z?@ph)$U4y+fhIS&?dG!M_75X8WfO#DA$7D>>#2Qt1IUX{9SOX~536kmJL7w@@%tX@ z2WrV&Y!NT+w&k{*sOgoT4>|ie1eslWmQ2Qy%`&n9DAwcg-QqUJLVwA=M?7A8b6~3_ zD&|jIv9MDMkehLbGnRo*jGFIg_>_6ZaKQ0Y*?h56I>R9WD+b9c=EMJYM&kn)uGC700$ ztX70x2&MA+@;uRnXd1z42uPOyXn8`Yf|2$K=GWbgTZn8KSPNH4v|H7GQ`Rj zW+0i%tvPjWtNmSQyOAic!{IXSHNuo9@?~flhZU|uenEg&55S=L84-_RoM02Xz_Jn@ zNzW}?{q>2lUv*AMHdzREa)+aX@32_6WGO$Y&urfl^h`@pV8;Lsx zraz8PB%zU-aGsXMo;(Y@-1dT*qPNCINOrJ3AYs|HEmfkY^!D+^yiHurDcMqrtN&Dv>!;?-gQuaWkCb;)y<0lmTS2 z+3&b}m{qlHpXc6&`>RXbM14g7_I&!@T%y2NM{9fHIh3QILHwqs8#=rH6wE{dNf&c@ zO4*yadv7Uv<4no&8k*=dO;H4>gs)f9d=7?Hb^Bgw*QzKC!Ne!(63u-S3N%syp|5}h zHM&lA5JGqq$C$_q-CVq%g+Sg46j(s+FX#x2HGKkMUEy`B2E3=ih_UVEC- zT|<*@A`%)2L2W8Ld)LzpA`<4`?!6Awn>tuo)=TKe8lYy zQ|&ZabB!L=HD?fs3d#FMTuw(OJu_|66j=y(!$x|i^q7!NN$A))mU!8LzkeGErFnhn zgshXeFr9LN8|k26j0x1p%bE>EpkWP|Xq18AxWo0zp=Zz7*&PbcPM z^r;PV+Phte--?<3k9AA*1mA61JddtIh6~F<=8ZgB-p)7mh=00pZIZe?Cw^WJcUZmk z{LrUkJlleuc?)78Y&R+=WRue;A_w=}9ynO-g-RppQ;`L}uYR_-{=Cnoj7jLu47H%Z zRsItFoxf^H=(u}lyLEzf&*q3I8v;0Me%gKfN@J#O9kwPdMGBh-JO|vJ9v-?59iNh8 zKj;@HrG<9>tx6}&D06+nss-CDl|g3(j_=*S|A;dI^44J>gn&!{)SCBW_fF<#KB&WB zMex_hy4@#6`Z}L!CH3Bg>`p`(XTQNOGOR8*O>!DT*Vv6Hg@2~+`wk7`B4d={qKQ?{ zlxe=lp)K#uT2R}UDQo_jLEJuY`+mVV*=XZOoz)zQw!@I+N{fyqi0q|6-8#yXQEio~ps16SqqYp@$+n*lEWpaA)*dgo6sPV?hYJ8;{_n2(@)3j@B zwf0YUJxwJLK(!PB?JwN_GFM|4S85KOP0J)e8Q0odiJF&LGxBkM_g4cE7Cf3p&AZci0!0EwqAI=uwfw0=oY2)XjtDUDWIpj5KGPs5Hdx1VI z^1`bi*sM*#dPTzB8scK}Ra9ETvd6j(r*x`0^~>+SHZz@g)0~b3L^#Xo2^UXU|CX6! zr$u2!a5#Y=@`94&$BU+{S%A@~Z*z5O#W5VBT7pVRe5M072Fd zX?tK#0ET8`iDPl@s&uf*$y%;m=BYv%3OtU>)2%M121C{IRT2}#QB|G5sglqV^ z?>>yagq4{t7W7#ztyQj|OeLX-=7{hX(M@uNNQ$`l5=3%9J6K$gEzoYV-6asy^92z9 zRDbW7=FEi`Hyh2AKWveKKtjKK2|wuAh0{_zHY>+`oZ+1+$H!L=J+SH%-v}JG>1CR` zsF)^M+1ym)BzBxdL+@-s%yPr-aoINXysx{pJsT?vCNm9M2CAgb+Ohcs?$Pfb;C#6s zPqiD}sbOyWKlWY=c-2|WPpj&d(7@3?)jp~)oJ)WO=Lv>+QU_4-B-u>c1zlOA{%>Rw zB=_dtAgmx<-*JB^OR+>0xDjT?V;V#Vq<#?0g98T~D#^r4>W)em77@GWuI&V}8C`($3*!$aS2 z7?Z~+?&?&HRCQrom<)*Yx_4qIz{3|Ys{@QLGP@e|2iAcVVJ3X!Tue+dcJ(|}o zie?XIT(@KSOKv;TeaXx{KTmpd1*-EnYLikg2-}^{esEHGvE_de`vI!iJ8(Mrn_O+AK+0k|!tE_>{H;3w488Eg< zEmfk=zZ~N*mhZaU?OBH*^&w^ zBbCNlI=<)5#QH&6xCggn*iZQB3T=b?P}N7on8%_)pR8BLbbI=pQgz78zWHZS_&B38 z0SVo!_W`x*+qIOv=C3+zO{H>vydqs4NaJsn(yyG~-jSKA1+i;NTM^yfH1QaUI20bQ z(pBX!sL0w+!!pSN(@)|UG*uX6&E-8r35?irEtu{d;Us38TkFI5k^Ess8|n1@8R8D; zfedDt`}WjW+72)L{2>@rbD2FL(GvD9nnC(;PXdqNnc{JPJfUmT%l5ws@vR%=81ZP8 zpq3xBpw0dM(o zP#!NejOx(C)|+4s*R5ngv{_Iy3s9~9%Q_ILDU()%3I$P8^>>p`Il5Cl@E1nTCNSqu zc}uOX*&H0p(b`c=Y;Y-_G=B`pbAB+mo=n<(rlk=<=?jo@PXPw22++!z)J47g|3npO36NDlV zughp4o$HsB&X%2DwZ_XXO#vFn^2p~fo@#mYL);2tPA!#V%)I2ae_8wiveu465(*N{ zM1-=9Tw9@zyjznl?q+1K3$bs}pudK03{s;ao(S859j>v4gpmk$jGF6_qQS`i9J^}D zgVWBT6^kg^af*v4AOC9^q8gM;*_?}SwNKem(lGBtj5h%iqr zDMnv&mAwqKW;(x80RjHM8UpCax$kNZ3Eu;pgK?nYW#oQSbTm|pdj?}-@Whrh9dP3z z>D>M%Ru&yPnA~5y_~I{p>W~Q49mY0JK^3D1?YlR}%i{V{pTeI|aTv*`2s>EsIQ)1A z$ohMCP@8*rXrN)ni_ZCvY|-j@H>*twXhY1Tb2AC<$zE=`xutgV>LqN9mC%}Riq2^< zE2hl1Yotpm17z5z)l!O)!#RLaiUTMN>S#ofv0?@o>$5mV^M*l^t;XI}epOr~0}2RW zSS+T;)dt7OC|%09&M}r*@fOU!SJZj>3#dSipZ-k_Ui0D`{LM58&y7b7ZmS}z-{xI5 z=_%1u419b-e4E7*uyQVh0(Cj1+pjRFpPyeT94Ky zTTReeh(eyPE+*ocZ0h!j^Ji}cDB}v$Q&UDfO8Fp~-*Q{nN`C}8*l0FIuNmwBg8RQp z$lIZZ2O|Rs$Bs_s>L=PQbPhy{2>%O@WGPm3A`a5tbhGhst(l*0 zIeuu^`JszxeYN!kD3&#HnM#ITGwsU;dUMp2XU-w(%-P@Lf~! zGWC*GQ9E0oXfI*evd-CT^)>p}HJYhM+shY#TnjaX9@(r9)mC!{lU(ju0B+_G06^oP zswE5d6qHFSl*ri9?sq8)SWH1Zk(*Sf8U4I)#~T52wWLr|AoI5 zvLD1uz%^b|5M+oBpHqpRjcN5xt~d+RnCF=^s%sp5|uR)N?R zEYOS(G88mr2;3BL%2P!!+@Tea?x$|EX-)fp&@B8e!#b%b5mN z`uD8U2bz|l7b*!g*RR#8r-^BmU+SOg!^^d*!*oh*4fkp8-hF74jh%134P^XYnG?m{ z>PK02K)n#@NSF4jGik9by1g^ui{7%+^E7YqZ`dNLDb;Z$c?p_pG1a@Qk(lNzQ$_GR zS29KdVhhVFy`V|>lU4%lVjYe}2U1^PBJ1b*#Ap`O;@sS*UFT@%##OekU2KQ{8ri)9 zG}FXXQ7%sT!|*8QnWvo0y#LZ9xoD)uyH6S?evg+KSJ{oQT}^V8QQJA!k+)G9(tehw z0;-{lC&R!5f2U71q_|P;H=yxYBY?>WG}EX%e!kVzU9Y{YaxmopNj|%EfNMp+&} zp7Th7Z96>6Y;s?ZUE9>+M9l$4mT3zQsX~lQ&J!%4M~QnKmAA(p{N0jtrgUKXJc?aSjJ?H_G1c1!Nz!@ZdUJ*mQO*jE{bd$@w0AsqZ$7RlTaTdnY zSv506mD`~bSCCEo&CI7LHcdQWYVE+N4`I~}&5p!RY0}cf^D^G4Td0pFlfBR8X5A+c z?wK|Rh~}2(Vm)NE<9am){Dut@?)bZd4?>n;3P)Zw-}IUr)zTR-7sdL~JXO}UZFy!P zqDwtQ*@$GXG(vh{@ zyIrJ(@?ic3Le>nQWrq+hzwW$Nq_f`kmUA$iDrYU#u0hz(5Z+c{g*#IRTM-h|OJG&KuVkS&SZ!aw-`&F6jewlw>z@ z=3EAG7q;i^Q5#wNta9dmi~8$)4ALrWP-m$GLNa-@+dHVHz|c)J|K!7B8ThLo9aXMv zBo;SOlyqLySzdI^76q0I@sI7RHMnqnJ8lku&-cU6vB+KLZ1?SP-71>_=c5b9UpHWK z4xiJ+^HdWD$%-PWcoRY{@tLl$ap~Y_!Rs{+b4~sZ7ZP@uF!i0udY9aDOb_I_msjg( zorQj>j+Xss#k84xE7(Z)ygwcGTrA-Gcib!I)nWrcNp%gBfa_BAym2v<+{Q}TPeZ4r zNvh`niD7vyCU*s4G&DCb;41l;v>Rd$HM>d0!PS>4D$0Ot_JiI@*`2H|+&={Rt|g{s z{kZ~|RKge;%c^P#@SQOknrhCs0hNiM7Jj)SS@>~zCj+(GBh3rQ!2Dz#p8H`i3#qf| z>ft=)wHa~un2s)T_C9`ks)&gxV7OA($dKY7yhX>ib9?Wk(yc;_t!#!KW_;}Yd>QNn zI5ZOZb34RzKSP`A4uw(6iROT`cmyTVdTDwdBVeUcXu3IbTO(73n~;n_DDXQV`n|SF zh?w9ptE88k5&8t)n&598_B|$YVEk7zCs-ptxS9uMD+L;zeou)gZSNcwTsU$T0z>KH zXMEO=6kmZWNstzfU0yq;7uYqZ3Y?)?pUsc^D@D7s^DVm$bXUABIzGjAk$1q-oD+AX ztvV%lQwD^=)iGNWb$usByFnE`ep!HdarVUbAn${K=O|S>+_+`zxeOL9xHuR6^l6@U z=ql@)y=~}O#N(w+PfTRGu538=`^4_{ED$mRFrOBk+Tvc4BpVrP+N|B_vhXHBG&n|_ z^!Xg9C4(!s<^Jn)B)=TvI6b}$!+t}JBLyaszGI;3`WkZp&BXuh&$zPnFvZ-k>`NFs zPc0~U*66?PnpeVy)XPTjx87`XaH*LE!JYK1j@`Y@1ciP5D`#dM$4z{X*BzGbZ;Y37 zc~|1)YwTIn^oFE3-)g>Yz5dKW2xp?C=bR+LNft#{C(sJhBfgX$4>~&?oLxH@gSpkqrxoX_)ofx z)FN<~AU;ZrbXzPe8Z=rm$ew@`Fk5(_0u0mYoTODeW;J3zM{+p1=BP2B4FilokwW%M zj|#Vak2W}x1#Ltapn5Wz$pWET!lwF;_GbV@w^}r(?Q=3O!xlH4yqSPf-E?p@O7>lk z&2JysA;@;>I5xT>Zuay_T$AIzc&x+6a5f*# zXnEOkF$^^~S5D+f-)|ke(m|8)@biFmrss^vP~1D)vLcyYB!inzaU4oa8meY=0#BEz zPW>#_P~+?~(yNtsvJm2Somk-ap<>Le(5EtO^43b=F{Rm+QCla20_zh{H-tk+q$%y=C#4Gn*gGqIqOC!G5Nz$a*Rd^WJ zerA?m9`3PeJ=w^L1|2PP>hOi2#xLxG%+BO>DraY2e)B3DNNbBF{;1KD#1VHT#NS^4 zoaiaRCl&3ZZ!C?R%#5RVOs|;w2VGB|&Bpj1(%f%Iq3nA~%-kp`3Ic%)YkkXHS^#S& zZybB*>^{=-6JS+R>d-LFq053`jYz4n*zoo*+fy~MfW@_5QX!WS7o+4dGgj}3DPQB_bZx}JSdZo}5sUTZcr}6Nmx8fU{!Fgi zkG7bw<4&rU=M#fBk>qR_ENvM9;~$PEVgwHsqXm~j4}|ky1>d}@hwn=|wJGCqCny>c zAU7(p_P2QO{_V}y^xUw?S_vOO+KLMFEy*TYWS6h661FK%8*Xs3adRYv1YXV@Yn=H4 zL9i6B&xQNcbh}8ml$e6YQcoSx0eEbb2wAyVv&2ub_NiUKIs|NSEIZ%b z2`i#!pKerl@WHGXMunF}64{TIKA_;#6Ji+Im^W8<$G)WlV6RW)igx|m*KYboxU=b= zM_Yysf!mFfV@J-o<8S?8N83>^ssg30(YAnfF=MpEToYK>v2cXi4n6b%SpB6?Dl@*8 zaef;$=sC`v%oL{}fbjlnn0NCvdfhmH1gRG5Bp?-Rh0zc;4WXS|uaI;Dr@+>SC90rL zBiyj%sZWt^Ny4eOnobtc|vWw6uYRZ%Q@vePS5ODIe zd+d6b2qBLjTxBZDL-H&HmT3Pkfw1G7?hx^%7gfyf-$?EGQ|*<7P*CAOEZ{@7&UjXq zvZFyvGR;w_nIX_P1@zTT0oKT_vebn5^Ez&eg5D}8o2=78%@VzCWHVQ!>`&rvvc^sI zr3Tc#>@mvZ9X<8@{gL0@(`ExM@-x9}HlGTjHhOkUvm;Z{i5dU__NsqAKJjtaSXN1u z1;PwVH|V zyJ0SeA}QaQa-LC%{MgXQ43#YZ`~Xu?R~fchYY!9A)XhP_mu)(ehSH0|kbvE)NdOeg z%9#U(n>L9Fi7wc;0_I|s#PS3!WJ@FbVhIA@s8tl}~e0v>0bn|AjYW<>E zOG}F@VC02`1C~7IMRpAV28YQ7b#brpmhT0%s0kPIZq#zOqPQHyOWeHfO?-^sja~1N z0-z(!*;;OSwnE?$l=h>5HFGMF?wY(dQzNdgtgI}x?CUD2zyqb(of*}gX`;_dBqm?`HNijMEhr=GH0wGQl!dzzTg!tZ4%!abV2N9c<%3Tw)&>Wq;ZyBX|vub>QWC zI@`+qaWYQJOanah%~!^gCh*xNpXyR$7*B89%5uE6ggc>ZyOB1aYOjo}0o4d>@Q2_a~2L5uL` zDn`XJNc*XTkx+l?z@99UonAoa(pT;RS*bB2ad3_rphemaUPKPaCWezed*~rGC9a+_ z`g}RiVQhD~Cy`dSxptCZ(JABzV43*w(1z7U&WVR-yYL=H_~o7TfDSYPK(u?z83i{1 z3dxEPx*jmfJBXIKua79twInd2YXFT`GvWMqlfjQ86FYbk$)IH8I+={ZN6Hv|^8j4| zt8Pn~3&7(9*iZ@Iq)cw_pEb>NG_91;+SSkKx{*|BrU-RylZ6A`IFbGm9=-~|gm4AT zpM~_jR+sSD(9te-w*x?{DDid)v9h~MDEq9Uds8RSCl|z{9fJZ-2pLt8NA$hVOdFR7 zsYhkewZ}t+naU;5srv()+XpICyl=R6KB&2o48kY#vZ>eV=sWnE82H@6{Lkm#=Bj$* zdUX80NXgV!h3uG`XzcfVh|4nz=!#@L2K-IEkVEOyXgE}TF zB64U4pEY@VDI9^TAXIS{h0j$U_)CU%S{EQ<}_Fp z&%MDc@S^BOYRAF{oMvnp%yDuRSUu?h^ikzPAA2qNfIz_8**@1yZ`|U4%Idb#TjhZ0 zzd>BeB$4fld?(l-Q|94^cvNioI+Vm2h ze2+C8kc}QC_jt`r&ESYZQp(Gs6Wah0Uk zPVx*7q7r~L;gN+avHWz{3RdYK5bz5Q2BRul!^wJn;uA+f4n;g0y%p#W4|h(?vI88c zzO5mQeqhfVPzxW9@6ZqM%kA3VQF$u68bdUAyfvbaA-ivx0a1MLU^veuU=p^9Ojf@ zYPXSR(L$yA$n`_MlXS0qhvSU)#Z>IHi|loisrd%BMoJW^46A)>_iM2aoqLYtP<|=fc-N2(YcsYfP)F`W zUh}W0)q$>kz_fA1t(qSS?E66NXMgP+#c0S$U44_re-gzX0*fk=0N9gZwB=B9b z6=nfB(WH1eex}&W1$GijBZUj^%v}4X4-dXt_2U1e_c>F_E@x|^rS$}AO>nT(#|fxY zapg04*?3@Cp$6$O|5M@9Q;R7@s@uJ{l2|Vp#9pBhF2F#>+U$EA517cP zADqoz&MZ$$bOQMob>1I*?acD$32&5B?OK*~dUqm(=~)yPzt_Q3aQj6P{?l1quM=fo3B@|!O{NlWwjuTJ}pyv$~ zCI!8VFfPaUDYy3nK>zMv+r$|~d5nzZX?IywyF0ZkJ{1{U+s7DO5$Bd&!vesD%cAti z6k6QrbF|&1ba2A-dQ*d&)nUru)BbsU)MCgJ%T-{*v+4~|4P4J*Ac-UW=B}68`THvw?x@TDkTSoh-Oz4u zwQ_60`~V@!P4C{nZ|gszvc7DklLmQ_{cit9-&9QwW^1x~8CW!U&EU{L)vdfemGC|L z2&(r2aT{G@D}@S-$MtGKFhXSoL2p?=RV0I z9w|MGz_u!cG3o4R(}7^pWBFT(T_<8n2t5m!b-NOIGxxs)-pAT6$QP;{>Y5e5p<07F z%et+oiMvfz(deaa=iqNz03=B~Kw16j(apSrzjk=v5yE^ir5k#x2db?qvBnO_UUmZ7 z^zu$REN84A(A*sS>!D77sS-JhD|cKSaOP=v5y#qV?*tO^eRk!-38Wv-`N(Ik!+-K? z^Z#S-y@Q(G_P$YRA|fIxAWcB&AV}{;5TzqsT2PQ)rMFO2KtZX}s|X@psZs(8BAw7f z4ZW98LJI`$VxP0OKJ%RSxX-*Z_s-lo|2P@cko?wK-}Nn@Qf?AY3Gw_k)(8-ga~tm( zAFpdrre3%v@&NF5AEQBP$w$#|)n|f>-rf^7+4kn-g^~>Gmhu>XDC;janV_9;|E97g zR{nLefjO}T564=^a;*< zT{%W3n^K7eZX3pyf&AE-%xBXf;&KR}K)vN!K2_dxp_>W1x#?SL0$z1Siz`$9a)hm! z1jH#2(uzAC#@bFN-|$@CJ#5}BoQs(7a~WaDJXkv017?fYVm_$E8We{FABVsW!@0`z zI}3~7GomiWgr{2MB4doy^q?8^SAc9ToW*zh^;2e^xy+z!}N3Ziu!+G6~(k}E^c0~^#`Bbyc9|9>FzDJXnz=S7OG)3@1 z(ZF2c+WZ?DzEyuY`ID}dyd~LZ@cL`^6f}LAHq@F3@i4>YI?t(R7pNp;%C2SD=Nlc! zdz~~!bOzyk`&M~(`{Kd2wAqV!!TaR^?bll?pKk@S*7RYQ*+|E_L<{XY#0NwJ=+^@B zh|!09?o0do@Aua7zWUsdk>-C?A+fD;$E<$Bzfyrk?HqQro|~aN2!C*}+=Woo^LCl6 z9~=UEGlTfxfnzl~vgK$nP5x3XpzdBLBHQ>deCyD$zOCOe$MY&XpeJCYo^*cnL}yGp z-5#~sEc!sEdj=_FML&qXV(1Bph&cnLg%o_QvMrRID4Fw#=GEfcJdgFA76@vNkYC3) zltt%iGG9S`qExRI#J+6U4j))>>vp42`$Ag-7~$>_|6nXv;P2Tb+e=HXr@?}2BrKXY z$HX!`#}x5gsinHoCy?x!>?fF&;*w3C!`PJYQL6#R!rS(RY>rTf^ut?_vY|O{0V4p8 z^{COw(<_F=f>MKOsxBwZyN&xCbn+|C!Ep_$^K<5v3?9w0mjbqf9o7&)3yQs!ET?BU z;(iVU1T1ZS_X@8S!wG(MoIZZg?@}o3w8Xn;%LS6t`Qw|_&*kh=@x~*%;Pv(Oq8`R` zQtll?w^7`WQw>Y!JF|ec7B^gZo1!z@X{uq~1wf7BLDoi9Y0yV-)B8Sw?LGwF2V?ade$i=`7Kpq)TJ>DDrxD1M>m4(A%r8FfCj3WD=X?{ zS5v)FPd1jA&Wei$nAgYwTR*`()`z1&$KmxdO8vTY93_W#BrXA0{neh`?d9|=kBFlA z)8oEE$L@~ZE*|&lv)QvT-X4fgrFdQCL)T0C8yf6BcZUBJM245As@yP;xY6AYUozXe z`x4pCnN4-HJL^kK&cS6|;}##I1#sVnD)q{z9WIxxGRn5mcWwD33aC9XvOol zTsm#*)+DpV#-#>zSc}lbi4$YNPMKQqRcH=vtnLoIEQoK=~ESeyB!r>6f%c-oeg|Rh3?s z4{bjKkgwPp-Z%B0fzYy`yGFK^$apOSHK_uQA6HRz7H?IZ2!q$O5XdJ-_>u;WG7uhV_} z^%%S5UMw~*B`7n2@c+_Bi|gSS6?oh^E)o5ruhTg>Gs%4*r!fhJ9JFc*B?km*zsifB z%-F7c>zfxfa!&(6ac0Nh=?i}qc50rA$wn^i64c3$A9(5TClCvCE@BLxhSBuom$ zdW*>vA9h6Xhm_fm4zf?Kh|rS^uB=RA%2z(Ct~>h^oTrG?EtJ(GSfOJBb6>KlF!3Dpc!3+Y6mh5z?OU_4qpggj@KiIB?yX-y%db4q4=|I2q_(FW|(QQFP_dZq~|c zyMDAcNKga&*ncCyZwmSCyMbqvi(L1FV+r-v^9Rkv5Czl3z301z`l$BDV!AcgzC;RRlf448kY8N6Mo zg-x!qUuPD&c&nPx+eF%==157UL!Y?RS`h8ChXTa2khQ!|H8s+7awi{9YiqXvkmhZB zJWo|k?*736mB&Ry!y^N(2b^ln+I^W;Rn7|(^x(w!&N{tbZ}+}nfYypob$L{jgcM~g zFdMuOl(hT|-(Z>&9FjWC?6%%~b?M>HX34K<_}gY4JOy+BglhKyLj=9L@3n5~mlLIy zl~Hp;{}31GaPbDc7_11zMGte9uMvP@wAzy zljk};!#O>Z%oQ9CVanX7UHAOta4FQx?*Ex|SMibl)J$ZR4$4#QEsQ=Ka6{L|pa!n&hvv79sm)%J~FFseJf*~*#Z z*a-U4dcn6dY7byPXQeU7!`{?oxq0e~$XzqeFh|Fb+~MHsfmd&-s&e zfIJ68PF_~wxk1Y@`9DY>7<@KJhB$f`+U>hNe<~c108lbgr>vb=4jmwbsPrm6K3-JX z`1LyvEfLqn$fJs>O3(7kJ)0xWWh5(HHs(vGd><4grm-h}rcR*o2Y{uSP(k^JqK^tSps8nm|H5hlzzE$)lrE<;b<(WQY`l^2nXDQ*UbJ_|24r*4kTQ<$s|3?JH{TCI5l;sHrp05Z~U` z2H2xSK*3!?zOMvq`B%n*-o_+_mHl8|FJl2uSt?TiAZqW_`v3u!ZCDA32Hl4kRO&ap zxx*&nnYleymY{!MIR~)f-#DMVGG0>XVY~PJc#N)*_pV8S)8cskP3oIqvPDJ__1fw} z+Wj+uFBmTkoWZi*yhrWfyuX2c4E|t@ptmz3Se!BKiD}+!pnAaBV|xzyc{q5kkbF+B zSl`mMlw@GkfpYNSXD$1cR&7E@-fM00 zS}3BqVcGD#fnN>pO|2fOVD2ChI$gO;42~3_cm1$wq(Kz7kJs!?KT)H0QUQ`~WSG9m z?BnjpNXNj;z!@|Z-?f1q$FVPuwHXi@9G#`OQc}q1-gaf60!L+8N4>bGykKcmQ*F9; z0+iJ~uMQxZH)A646&Bx}?~P)S_tr`x!T+orgSn5RA@k`OIq|vBfFYT?1s`8BC>1NA z$eyBj76p)L8}9X zfw+vBoNssdp`bJK>J4lfeP(CVk$_l#9j`j!zMgaSR@KvH=-op$gMyFxZ^@|0mYh~o zw%>9c$UVyj2(|erBaBEp|P51y>86 zE3QY>!H*7$`>-os6JN|j*2ta#$bZbPy+M_kmBX5x^nKyCoUCA~%o~dvPAm0xx|PI1 z@)1Ys(jKKvVOVnmj}iI@qv+Xx+iE$3tIv`l8U;W)AH0q}wkW)MRkgaqYaoY)dArAV zCTmiBb`{8&oIKKG(8rN1CQSh{a{&l^zRMX1R@w_*HeY5tDVZGWHk0oENYcThC;omL0PZCki;85zG_*36JeYJBx7JnrMi#Z|s1 zAv4{qaKexmPgZ#OMDQ;;70X!QqSxsZdyyqKWa#P0nR|M8(^7OR)@szot1CjcTL~2d zq(;+(&d#$3^3;v7-?RNH1fC}FORKuvO`Z5mqN^qBM zx`+8FSz}FG5gm<$yi)x?s_Xq)88xDA^%K*$GV4+9rh}x7^@j(u?@6J@61cx<_MO2+ z5F8yHWqH1u&OA}iH?DQMGd**qgxUDfqkyZobh!+}-`rsg2w1>ZiseXLhqb%h_ZZtQ zw?)jZ4!b}k3$Ab1Z=bw6p3np8K6K^dHSPzruS{akn?v`MHmds8JR6GZ4`AAbz5Qm? zPoI+YR(im$Vsj|d_U`T+q>2l*wM<2L+ZDOK{%2UdA<8I|F6}QhTr$Jcnk-zr_I8O> zfnM!c=7UNS+pAZvQk4GPR{I0N!|*DAe_(WGW@c_WC~R1>$m(+@Xn-Z$YW|UNN#?5y z?z?wQCF7PRsMyDkDJ6uh2_+X^F1=+}xo6 zjs!HNP#eYe7S8Il{cC~+pgfHzulVSY>DO6 zdxApNKbZ+GFY(Di6@GNXV>SLaZP(LhFnI;n04i1H`Emd7Fh_J^Xz1aqFIS@qRe&Z1 z6QHlP>zbL~2LFmK5Gcg~9!P-4#rau{8#muGiHVK07V_68QfOW{>qzWf%FLsYl9n1n6(6imdv+wbK`%M3WMr}|9o(7|gN8el* zXs%qjsS%~I1GRfQj>y!!ZcF){gMUP{6jk`e#paD)^0z;9$!8g$b-={N#x_o75pqd#>cCa~3eD-02C_FG z5}a3l^ypDsmtN!c)KokWN6LrL+&3~xd0}#4Z)8*CLD#7}27gz~{d?9VWk6!|8~S9R zK{u#)nBoW{PHk!vo%ELq1AIL*KCyoWqW(Gk&*s+)de`lo zAcAy33p+bRS1fZ3K1&@u>^nbN;{p*qS>^u^_Cw2Snd75_tVi!;)!g2Z$G^XElUYjY zTkaZvjmByCV0(DGom17){rYMI*xXNa8Tf9z%Kdc;6 ziy-Fhbi7x13*UKRUBLRMQiY1@Zp~y-)_Ihv+qm42K=oTcy7{+P{gTt244H4ZKzL*o zeVwHc}*+7VqEp zVt)DC334{0tWUF@$q43v@89qC1T4gY;1i+lWNotYBQOvrTG&MD%H-bO9{d6Q>Zv;R ztWY4RU0Sj?l&kCwI+{CA1-36F#ZPo+|+Z^`%?9E#*=r^j@Wn*CtM)1|An)xEdSet=^e@2%+36-r$sQ19K z7V)yBJNF;^^EYeuXXro2M9KExQvlGB#rs3TFOmS47pxvRRV=~%`AAU>phvGJVSAmr z^z2gk&az~`{e1;akz&H9LbWIxPbsf#?@HB|;lD{Seuc>eYU6J$z*z~1$r}hy*XwH- zZWI}r^MP{#Wkkz6=W4KPQ~b_K3i#P3)InetOS-|8^v}-D14t8txJN;}vA9r{n3!dL zmn~syD7ibP}9N z{gk7As{G&Dm-boFc2TlCPWK_wk6!QDc6c6Q+iQ+qD^Zc{NNk-!rq!^D&%zq5vLx@X$~zIgXy}_Mo3u{*83q z9j}J%q7?FqzrXQt)QLfRLs3ED6F7eqSV@x8$IUQ<3ub6Brw5dd?k2{E8kY(y<_#SZS(u>1-ab4QLCE; zPv%L-$H(_v`bc{ge1aA_Tq1A)n1u$lQ&>gP#k;I^;m^)Mb<+cloQ_^=CwK_hTgq;D z7wob4r9bho+-)c5Q27reo}B|_9z4jvpX=njbLY;BC6{jpPtJo3g9?YFv$?5BaV(be zG9@J{@p*SX{B(hbwNPoVqz5g(pqC{Dv{J3l_zlKi(JB;6uXQ;z>%uq#wMG}y58?2x zzaI<(WOUEXw#M8C=U63DD&n{;=Kc{DE6+K&!gFfH5SdI+mQ;JJq@waRU$3}r;01oq zq_3|pNEb+O)IliRds`5JP6jageBj~0XrA;R{NjIRo#5*-g5>1-vz=H6R+W@nr%UNw zJ=k5l{>iraY|ft9>1FXtfchnB{=2_mCGQdJLS)>{Ju}Km3Rc9;erc@j}e-?>FYZ ze56_6W2Gto{R`%Qe88`)z~!^{e^>iIJ>dV{rn5dUMMIcNJAs z&_wvZ1oz3L05J5QZJpu4!SMnXJQ9lT;h~|t{@ey-pKMc01pkALGWi(CFhGiTGwFMC zvogp*LP!<&e#WaRMTXnAWk4F!G%*2J2|ELI5Uc4R_`5fA;285GTr_oBL|7Pq?82?s zyLc+h8n)*+JX8~ZxuT!w%GCRpNmQ|Ylc-YQ>>(r|NJP;SbzADj|HEhLZ)E{rwQyNv z{O6QXawXD*9V4v>G=B9i?K;1>7lpxQ`O)W2s9(qi#P4XFWM z3A6BYgm&t$>VT7Z0lM!i{nT?$5V~bnkM(LE`}~iz;NM5NukUgFC>xGz=20m1Ij;!5 zKG8pyF7l9|>@w8UG}EgjN(dKSb&|sW_j5MqqyUp4|Ak_JaTm^clf3y;=j@4G;Ls2! zC~yW|zdi#MlDKn$=m>sp?`Qb{c+~@KgyU+3xe5!orQV;@;(plhrjKcB_l+!5f&c$ zCGJggbNA0>c21Dd)_zUJi-VzcpT5TZ=X3rUDE|+yq<|YNl2DCL>o0quT6XZ0e?Qm-qyqOSZo)s`;tWnDD5N|%SyO*5;P2in zfChBSYRF=Wey-46U;#`1yZ`rBffFF(epqUi0vy+zNRIt!*LEr{8 zMBP4ER~MO|$MSctoHRk*81umHFD<7pH!|LNk-%+VO64a3caFPYdgllp0*g; zU%h-8*>XC<506`1v{F=f$$xTPQ4U1gTE+~2IL)O?OpJ_-r;SVkVv4swA6yZPPGC>> zT_d7aaM3X=szNcyPmj&=VjKqxaxc(fT&qRw{lwFsfSJ zQRr+`h|BujYgJ_DNJ-;^h$#~%C;@wE{mYE|6VR$w3H?&iDg0Jp)!wVGmdj+noc7--_qv?1BR&nQR+6dXN0{=@=U%C`&RVBXLm zlq(%1jro}FqAu%&kBVf;F!EazmiaKQ`i9d;tPEQ>(?lw#6T>o_GWTy_2>jAUCvn!Ve@1bs}ln z^&%%DBUhzde*h;`DV9rD=m3U~Hs1eO)WPC9wSlYyo1tzf#jRyGqV$X1HF@J@*8EgL zb}t3&N5%$>hoy)??5Lpxy2xoI-ksEaODUq#N~IrE$9)1^y>H5(bk=F1pF(5v5@KyJ z5xFNRWxnrrcEI>tTrU9PA^I+h5_1wY0Op_9_{O z367W;0xm%*(#~A`-zn>`%B1WNfvDe6!rgn2=x5->>@1gOIgz0VHA<<78}EQgGq)4_ zQ6{a(C%qdtZ)83C)}Z-LR-T>0pd0Wa01#mwFhL`L5y&RC_j?R+7?#1Vn}!roMGnZtku%@@&A)VmmqZ zxyPZ+#sgTa9_@EspDF?|I75t5;T6z1p3B~*<&g1|3P}?Q{`oy3NeIsMST@EqMEI_+FOAmcBYhKwgz2$Z`KAoBxX;Ql6JJ=J-4wVvg!$!AUGV%dHig{ZBp1KEYc+VFEj7qvi z;7r2jjczaT!+Q5w*0|f71OC6minJrL)?uYx<39MuZORLx+6(~FvKD6N%tp|7@6QAg z3_tpCt3@Qh_@@usjq|@Y9z&!S0SF&+EmMt`Dcq4S795KVrE}DRp56=yh#+1CL+}@i za&pQFv3WYoKq?DD)(XCZI{uijgAva-fd8>JUBMc1`Ye!k@pLBZQ=DO9VQB;EJ5K<+ zNAtY}iFN;{cYTz>!TUmh%?N%1rc7B_BaZ0tk<;xZ%k_rA^L6$AEKAf%-Uw1lrV zR|C^0D#W|Skhu>w16faJSU?t%vfku3)j%*@>0eJP<@%7j`G?XljYA{_kUF-qvdTLI z@MQn#?~0-TSV=48{O}p>^DGcMd-HUZ&8AcUQR*#l2Qlo2Y3D`*A#fewMw7;0qhzr= zU@M20=xx7vP^h2(p}$B+8N{g7jiG$q!szecZ8LGaZGG0OEawJw3p?m-2MZX@+ulC4 z8SAnugC1TS1|D9E<(l<=&|Iwr3$tY#0?aMLinb~6xlKL4Xy+RY81-2jpLD&x#aY?2 za+d^<6mH3%r;JmIS1>+DZ;DCmg?rDVdT38df z56HV$z5J&Mok60&G-@rM)|D#OI@?OG@Y%FU6TLY&ifS2ktT%(4toEsAImfnbwcIq8 zZ(JE!U+I;vJDBI(_Bp&gKP5*v3kNT%Q)F8OW^e$XmmL0$8cLf*qZXmp;mngNy?=oKI&?nUScpYo29vX>>sy~UF zYD|xsYuAFPS7!hRLB)&Dr%v_B#>T&uWGED$cp&|h*K6~iuZ~v2Yd`Egi(%y=j$TC- znsn@sah3IWEt`^IrycD9Wc#hCG}elOQ`E+w%JT=MNlUV0oqfYzYcjVDmGC;xii zr|w6k#s=w6^@`;k9Z=zCu-ou@2EYuCpPYQ;wL5GHI;Qwu+ntr?x_!3qI&>c>`Nm&<;CVx)7-zR@4;jydpRE%v zz+^K~wuRo`M14p=y(P_Q`>63g#?G;j+iv{JBZZeoyN8bKvNfk$I89xEu?j2 zMzARrl5;(LKymA69I%+wkT)bs5`)7>%j=E9k56J4Y3c4tlbS$xvh^pUYljIJ`CUw& zx;bMGR*~;>jxa0F1Uv>NPhP`wScm6lTm7>hzrP1!vKdFR<7hQ=E9?&CJ>W^$;CmMM zRB{0JSL}3Cm?fJ`@m>N5hsozRa|L%&(ckVaA&MQNRab#Bp|`6r&Qs&s$M)cH?uexc zVO@F%ZiXjGF+X04Zem{#p|0!qfdl?R%xc`x@p}qDuTj)^gEYp$3{Uv8LNU9_ZW?d= zj1L~~?)6rz?KEA0OYYUGU>D8!*K>j;*v*;^C~TM=NhS6pxgfRQYk_s)mDyL#^d39C z?OSW(WEuYGo+5D5-f*lF{UISZ0}gpCuNjg_CVkFQ*phTI-CwN@Fu$ABk2-PF+jR00 za%j41Z_4a=p+sD+{!ET1u4bHvIi`77{7k$4ksSvV69Z$ayJ}b$*|ykxh5_5bEw_vF zpd6~YJNdN^wgs1N2Hsv#1~udzWDBT(Xn>3+I87ALc2)2^&t$q`|!YS zM4%MLeg0vUX_evbfSO;LxE{Ec%~s1XmAboiGrPMip0%J@NW47-9R?&C(KM;KN!T9C zPQsc~TQm4pyP15;e$?L*ThSuL&~PFrT?LVX#o?|;0R`wrB%;RZo92(t*P zWMWXzIM!pift-KUepvdgM<}q9)z4ZXjxTP1$0s2trNfj}b*+r~mfDS!>DTR>e(1o? zQM$YaT2DzqxVwmYPP5$~6&e~E&WLfkC(Rd?yA7m$fcjcY&3d&K$QXF)JCZG>`ly)@ z83(mqF7lj_qrxke4E%esuR8|nWRGrD_Q&7BBRo~JV-jPzr(5)py|`|-dTogsqa8Lo zb@J+pbYu^YAqA17(+SohFreJ63gxF?(Xln$ouh|dA8$W`q}x3%bcVow?D)(CVtwon zm$i5cB6bD|q=C{$ndGKS#QQzQ!i)Zk-H%=1w9b+8)=L3~Z)p?4jWN59mox zEKlURuvS&M-*PdYa8bWs3d6#5@bbMKq3*Nitoth7Ts=p`JAj(-KC~ab!F1T9G>8Xu ziGn0{&=0on!*ZndRu!sd!bdx2Z2HZX`y*x}q)!A;`8!8makk?lW8@h?VfNGKV-ui@ z?dMI}1OEsKGw3u)4z8=TALqi9-$~XTadca;pWn0j;)pU~=jOy3Ja}hn-dBmFKap~7 znTpZm1OCn~^d9(&Apc_POfvRAPXzi(X?ON$R~mwDS9$hqp|9KKKX@07sGqm8I&5U8 zZvj?2OniKNt)69KGl&U4p1C&T`r<0f-Lw_?dL*56n(H^M&zbKu`@d<$N(hI(-bp!P zSE5=8g8h0WF$m&dYw&+<)ciARo8-hzdOQlV4Qb62aD1AKAM#?qtv`_!bT^y>6Be!X z>mA-$T`yJ}#{hSU-WyZvX;MImSsn&hXj4|_T-L`#71ip3GYl%KRoziY`+G;N&GfBi z5j$mXj)y+aXr9EKH6>ZJ`l{cE+EvZJC2TX+Phsajvx9i@{iUT{@=%CGxih?W+Qs(B$oHI7@$FA;^${4B)-dQED`` zAjVw3-tW0lwJT83hp-!TBI}?>tvP(ov!(OQB+fPiW{30ZfK)2gopBKSh6Wf-=kfU? zjSR0LyvNqZecEadwma#|+e&l|D=H@ZP1s1Kw>}=2P1HeEB@Q?d2riaxA&0StR9mx;^g>q3*MLazt9l=#%A>5m)7% z!y;q)*rh%xv)*uA_mXh9EXiD*dfo(T4c$#Hj2mttDzqj@w1mTt7k}T*(H2q^mh%-FO~?0D9<5hbKV)asCzEh`?-7{_8b+aL^FP zIVx#yGjG)Uk=Ove>|0sMz8ZbENT`P?CKs{Uj~mY_{y4$zm3HFZwhv!lpsJ!MH9b95 zF%iiC>X*{mLFa-gQ=iM%M^GMHIZ7Yxm%dfng{sVlNADsrl+49-d`dzECMYL4Y?}Rg zEx|?9yEIxo3yc)7cb3@Nz*DSV9M366uQ*nDJVAVz@MrSZ)UYudKm`F)#-ycO0S8n4 zucAQ0LS;{$@R#yu79R*eQQY@ujk)rp>)O9)?CTML)QZ#KLCHg_k7K=)-cR-P4*wYG zGKinIo2XFaei8JFqG!PF9V-nuW^OR5j1#Xe^$%^XTNWmP5A&%7>Tv%7qHofj_Ca zVbcbvT@}vn)K9rD2hYJk?+vz%W=zNWj9w_ z7momB9jgCF7WsC$b6v&#>wXI04hgoegiEJ0s*}=L9FG-PYw6edTIbcZPPBB?(fi(R z<+|6sJD(uj5ra!xUR;MUID4@KziqfC-x2RJRF1nn%D7R9Yo&{;V|AABVPu^?r>F|)E&}N zo9|H)b(=`bX=w1ylLGgSuJxSGwVC#dI2Z(k?iQzBfr6XE+DK)*o=K@Mi;I#Av&_lF z!FC1f%EN^m^>p_2$-3pG16xlBYP_o~LJIRh>Zx1+Bx2yxQ?mopc`nO~?Lxn9bx&}i zn5}3Lx*R=8_;bMVOQSV1{Of6_1)UqCx%MGRvrbmN7TiaHM$Asr`@EvAPU&M0cJJld z#QSf5X95*Y?sDN7Sg^TZhqX!PxY&1K2L&v*Qak9&F&mD=T(Mo{ZnZh31rXTCk(8I4 z_aVE@m*Tsvy%kCcA*6JXfTot9x+<3zjvKy<9SY&~KUveW91;JOSZ0 zU|8imr)j5EdzZbZxy>r<^*N6`&B)+wVCcm}y??woquxZ~=Mv8fM%TP%l8;8u)trT6{ zS{Nyxm$V(gqPqfhN2H%_Wr2=@Qm{0C!>)cWfmro}=F`+jePw$Gs17B>`VYD@uGX&Q z5;aGp-wxAry>mCVE!G@SL|3tvnfV?#y(-Q3m>Ni)EKY=IN!T+=hOBN$;b2xQ0Z4mL zC!^kpQRRAVy>e*xuBxz3d|aV{qOWhtm{CrVK;>Z?Q;lzMFLekf4n}&gAiC#0*98B= zQA5=0SlvR0ZH%_t=VatziZ%2Q9bK3#oV-y}9$L$P$UO0bx?uu1e<^bR%D>1OZ|(WoZfyFRT$*F;ankE+n<{x?T`zdoB3w4UO0k1>Z0K<>akuR! zgM57+{gVD8xsJf+gv?xA)(;@!I~D;D&V>_nA8k)0usQH=?YVj1E_Qc~H9-PcB$Chx z8GTSck$Cm|>>EM00ZfJA-0>mm)SFAx6~0(kDPYA!ND3+LDn>lC>7+h()n=RkPUN8R zk*qX1f@a5G=sHfenFP`h=FSt?xaH+my!KZ6-lX@zHG(Z4Vsp{-tLQJoh1mT*_1>I_ zImNR}1hxMAQ}>sERqC*imk!jDA}Y(x%Pw_sMrvNC?wP#`aw{QM9P5DbMe_VtPv;(!R_JW zlepJDrd^c+s}$R&8m+LeIg;=wTFgv>9`3dSmztGNC7*L}(bC7a4MBr0a@j~Zp4~Pr zCBq&_Ls;OvEMLmJ1aq_*{p42&Y`YJd(W25PT);b+w;qKkE>=mDXdO`I8MRXnIL71x zG>97xrqEERmA^GjsILyYw$*0q_#_#wbnk4m79M(Yljz2@L4e<6jCvc8%HV5YG8A%r z>j&l9(+2TG*nWh;N03P;^Tk--67s?uLE-f*^=Kd#lyg*W8F4^u{7#l2*QhEFw9D=R z_DVWU&MG)Ny0XS@hG{vs)X{AvptY;0ExOlWI?A!0L0`3G-IDkec94Bo9Px-fXW zbR7wVC`_M)(0gfve1F!dMbM@{^5x5Oqu5@pKf+wxSrP}uSk-jXcj~3x6<&R3b9Gj{ zKhA~mShPjvqzR!UPmWx++S=cDqUe1JD)p~sbPl&27NrO~Isl)iY9Sm<0dY5ve(eLK zw~I=&vOl`LxRuVsAgwk^%dm^2+mIqmw6B1Ck)I}GqRgvQ)aahve%I@r<3q9bD7g@3 zz^~*o*$HXPu@vo7cOPQm;M2TFFUkZ)@^KtGT*TtW_SHiU%ONr5KhR4TDJy~~NMpaL zUyLH8_x_&C>QLoro|;su;@YT(?v0x>SzyEi7~{*t@bSl`)_sagLz`Euhdw>yf*gGjukO%+N}#*r zOKb-2BudsqlsP)G^F=0js^;lJm2ceOR?blqZtqAOcOFQAKISv%c(gt5{~fMQd~+-z zKPM#_Rc`QFHY_DY+GB9Z#fqfYP;QS|DQ*kk?ld0-{e@Qfnas?(<6u2<-E2>8H_45T z`&BN+9GrqBWE?JcAC88ahjn1r21iFcU2VXyVzzS~2XpUDa$6nru)_|>Br@IVd+4Q6 zs%tO$bUJHTw?1R=@T8gcNe-~F$TzI09nfw4ViU{Z>=^Hp4HUXG3vG0+IP~Q80Kp!i ziA$b0Wr}t3%Q?PWS5Ky%0Jgned1+vH{S}-k;hzf5tt_G_9i3Rxn_0Bxh*mtvW6=gpPNLST0yrSO|JM^3ONB1nAy}>m&=~ zp~X%tIt|;9FrUuP!~m~ADt#1r@Ot?Qr$cpa(&>O|mGZt9hs;S6B|kt~+?uMKRn6a7 zxjE!Uo>6f#gmk194F|@`eCll7kyQo`qt*J{CUsoo9J+jA(XPDhAm;-04=5vxO~~;V zOSwS{jg9)SYgADx^9rjfIEU1tF8S>hqx;_As%^KdSb`I;TVeQ{-;?U6ozGfp(s-X` zxRPPaTR$moXQs6^|Oo?SXHJxBWcSs+Zt$ZsshgBUT zgV{qZI9X`)^k`P2Nh&VwZ<_flZ%)TGS6Jqp!G>r_M=zxNe|w#;mE-edFrHua5wfo7 z#}6d{teIy9qXPQ15mgGV{Z3lc8-z&=*-b9;hPBZW^Y-j^=w5Ps@A+=l2_7&3?Z7N^ zQi*Qqk5(N3&%P|VkeWW2Y~Sb}l6$We$P1`QPxx-fjv=y@tD#~eUxDYfWO$mKWcBJz7#rDeRqWj2j0!me2e7e*Gv_8QwGFLa|u%9WEU zt;~ZzGH%bc$FUH7Fl9uc^zbq24C$xSFOI|8_ueG+ zetc@y!+P7QFJMux)QGfuWjY4@)|QGw!?})O{!H~WHsic9kc235=oQ&fT(xFEXox${ zM*5zNd(0Oyhf!t^-K|I1c5iqG9KQ>>9<>Zc z;(Pzzy|!7!E1mPUy2T~XTP5rCz{IoA0oFM<5ztJi)EXU!x+yt4J;QGjSm&^oZxa7XP zD>S*Fad40%?Fe-DG0a-2$4x!&vkisY5mMB+`izl=a;fW|m}F2F<@eB01_4M(Sf8@) zDbAV0YmZjg%M)p!BUOluL79!moq_pu5y5_5CI5UCfI8)dj=R7+$Tr9UsFOR*TAgrA za@^nIc~QDACh_$(-rkRbj)NWA?K*&bSFgS`Jt4gAt3bg6F@?~B+K)=YD&qiZv0HBPI#Tz` z5txoS&$djkJSf1)nC+C3sh>o6^;Q)6=j=M-x8L?lh7bCs^Uml+-fiSq`a(j^jfaL7C{8M>TC8kL8HJtIzQB4Lxz-)y#=+KPtR#S2 z2yLw#naoxU+CMpTS0+G|VJglpy47riJ!Zc?RT$gLsh4Zfkn5egh#PeI#N^hk0}-@m z^*I^Nfl*<{vx}A;`?6`?-XBGeK0TJ3D->?d+UPyMKIg#Yn18&U3$7wu#Bmi%(=!&j z?!|bMC@{>pZPw?qJNoQ^{RbH35@*>~uFt!Xw9`wtDp0@plgj8Go4?-r|vnX9X z`ceB&Q7z916w@n8eCLvwOz3-8^zsd)Jx^e2G#v>C*}wy#b7_sg`mqHb+Q6&MBwcEL z1C*p)i?=)c422zqpuW5=y1|D%D2swQlJS~o46pHB(*esBL+QF1_WQQ$JwZiXQ7HlOA5; zux3EDGYz`XHspy?jV1fc{>Mj|Vcfm8gcq#@{n}&nS)lvx%GAtuYLGNio+nUtNahzV z!#bs-?b*+6#X753fN~|&KW{Gu$mS$+tPMC+vp3yFv#Wvpx4}0;tH{U;5Wk;_^ncIS zJB9e1a->7AvQInFtb7IyMVrIp;p%k12n*vc?;#kM*LChQZ-m8;APkZ`mJ!)|8I2*N zN>)~{E?NE*S;ItyixQS$B!rg`Peo0&3ap>_#H#{dwlO(W$;P<9-u$r2zEv82oS! zZbHfPqvf;NlxDdNFZ;vW*cHg3w4a0#E*>`9v?()peFiA|mo`@j*L&;0xBchG-tw4e zhh9~*jy$5xZH>A~nRWNorI|QZ_)d^+ZYo?Whr4yDkIcD|Bq+;HznQqFp^+!-tPgN- z^$9Y+V0??uvUmg%(v|#3PU3CS;C35={&=c1lO_|L`)R>lL!YYY zVDh#%Hg?6v-S#;N-sgtGZNAZa%W(#iu2{&}J8}_k)tsEN^Cg6k5?!iRwJdXrt;2tC!tO+%@-s}voLRmN|>htoaZP^eJtk&QjVIOS2dZF z=9q?b4X8!4VmR7_|F1g9x6Szy$i=Ny0geK$i zi9TcjQRkGGpTj-|D>w#DRGDFyiFHI*&3|3qJ=ZDOlR zTZv2$oqrQ;cUk7R!}$s!+3B}cU@jfu1B~-ehC32><-6gZkHxDPI$E|jwf!G`D1>~vz%y|d80K_ zpcz8KU9AyVYx@u9qEpT^f;6rt-hgoALq>w%z#m>(XJJ-{^UiHGa{cpdZ7H2f;XoRK zNAu8jvy|>sS#;`<-O0sh(Fi-BcINuZkhXGjaz2A8vqJKHPrH9Uz%*l>%DZb9Ezi-V z(exkm;FAF!rP*`;*q9b5i=XmsfuE>;6AaCzM5~y}bSQ&kb7sVp|Mwu@Uu49yDP=Vq z)M069={`s5tQaAn;&|cHZK~`MNlw@6X<0ATvo6ghGj>PJVIiRJitBR z^9cO_aeLq!`PNva$lTld(51^r@FV}8$WmjK65l7ymF9Q47BjGkbfA>xEXeap7^47tmqwwWgMVm5p zX^8i^bFk*z<=%{mcLAT>Uzi@;V`483n3!!Zo>pi?=NpW5wcbYEUF??Q5SkW0lrPQI zHlAyZ6cWd?Xb-yVv83_Hw+c`B5L8mQEh*d&u#!Lgz+nod{x^8kD%YWn z!Hy}IbKqHIpnfk*zi(S7DD)?V+i~LyXkZ9II0N@+ovkqu`}Pncvlf#&D_WOzQLnMCbn?w5fIhX=2`biwP? z2;v$Q?>l?AUJEIGwBV?y@XeN3RkL%NdLbmK!dUyyl!0wbIAQKUGBM{Hwy^>sBgDnN zM6&pI#)+WGeoQ_6(FMKXOyF6p*FD)(Cd!2TG%SyxXjOZ-*{!zLbN5GF&s3^0nXmV> z`ed=2Ru;rR#_oDPJ)0C{7L0x?HTi?jD5Bb21=z#$vwsrb#9N)XY`wLCgMUF>DZSp1 zL}{i{YQ1LaAQ2Q4Gs{?nEVFF3Ee#qMhpi7aES%N85yZ~HXL~bmWwC1HlZYx^_OXNS zx2_$y-7=f}y^XsP!glaL}G%_ng6W(mi4)q^tPH> z#uVtUv2<>{zeqh96d(g5h&&Mz3zd5J)6jxeWVD+;dA57jC2#nI3pycFyg{a0PQKu! zUYdqTU0L$B+mETyy_v+r+a)*S(EY}NED@R6x>i}?hbzk9sZyKA>&>*a=1Z;K2FJwK zZq%1YNf>07eesN#K(t=!Dbai`b!)TqaPzfpyv*QsXv#Fp`Wq1kcW+Nz;;z(?=TeK} zdLSX`V|p1HeXH!t7JOz5nuesaM*|M`gN9l)SlfA__`}_)jr-m4VZK5NRi*u^N|j+x zOY`FcvHR`*?@^eT`^S1aUzA}&wu38k<0#>$Wst`UB{G4@_`=Lo>R^nIs5n*&{G(nF zcH?5PbMSJ(_hZ>#V`%MReSWYY3Gu4X4;xDOeyMYNj7~hR_w41B>SU>a5rL`scNIzv zi2FJ#P&?196;MaRl#9Om2b0PAKSg8?6U7I+!6kDkGThqlUaz^ z%$z0u|FRxRCj`5#P58P1^B=?m8ob}BRzszuCML(QbF6;L?SZBFY_|R9Q>CVeZ7CVWb$xlp!nOwI*KXgpXdNskbJ18Jgs$U(^db` zr_nNf0Y+fwWi|g(RXB}XvDRkc%i@%Z9}u5cyOqC9L3}z$tKLTvv)lb$rQ0f9jyfmd z4rwWG(MeGU4Gx$y8}z{1l~XUB0I!W1>ta3Ei_R1OZh73mJH-}l+Va&68yp4!IQwd; zPL14w&tFRQ44v=qFSbVXZQ$ox9MQSmQN4v(USH7uSkMlG>Nf8M?&DEMGzix8;qWdM z7WnIPuPEB6UEZyb%$KY9w61inIsv44rLl6tyf(>P|1^V>>?azb0_+}4;Np7|m_oH! zrZyO?JOTb`KK0-NtoJFZBDIWKcgx^;va4M=n&z7WS!@R0e0>Ds9|57Z-93BeF^al5 zS&(gA`o-3|vHsIo!N5i|aqLte+2qAoI^QVV)gW!4{pl^c2Fuu@N;!4u`2Fo+qKXNa ztLF7JXRHqj9L7U$FZ=G?PfZqX&NrGtU{g=ny1XKn>+MF=t=#*;-Fo}OUJ~O)PWa9_ z(A%{eYBZVE*P#H2XJ2lAvnzX7lF*w$|jNAWRB6+o%e!gKBeHm(o9F z`^N}RKd(skQ9Zr=()v@Bx%+Z!MB)T^F;VZAa9En1FH4grfHE;f|6EVxs_lNXQd1rm z(V)&HL&3`@=c8MWVmHjVf*W*v=Z@be)}5FAxSM@|9HUyI-P-epIE7==wCYOg@OJVx zni;|4_B7vs?P7ny_@0RD(gWYLk?^c0W}UHLs=>eMqzTYEfFtS40TXntW@~t z@jkY`Tzz=BmZ8s;-}5}Dl+jAF9*1D%C!-b{P!AD0fX(u>?|xBCOHnxkkC4gjq_u-Y zLnZs>rn_uOQMq1)LMGX9L}xdBGU^`a(YYp8rha_1M!aH+B4+^`Eiw+hXO@5e`bGEs z8y_(+s1S2oSP=-@%-6REjQO9eC)l>!|EtH$F9cl9ivA@UOqT01n+~OOmH#u=-FPIn zXD(|RH0qO=cRu|d=>?(u3!Kj z-Y|`)4rUQsrGJJVOCKz_f7UdHIy_aPugj$IgTYd(H2wyiSPsODnB3OxJDeT1(gJe} zI^}yA9iZ|T8-J7ti58lac-mmd>9;V$<& zTIIrKU-uPLwPIswB|cxDNZ=5;e+uj^UDk1NqA%>7qLzlVQe{o)BAFFPD>V?XKs4XTitbxWMCF`R* z7X)R-7+Xqhc^;(DnGWi)Z^HE97Ia78+rj1-@;of1=J@jAV12S69EDC%hZYnGd8(_Z z;MU>e^Wank%zhr{5mRwVb?T(WkgqTUo(X*s3<^lXE!w>poL)pGTvo z82_B&2V0i+&CmT;6y8^b#TxemJc(JCOxmoU%80Cqca%$iIq4AQX=xk|e@fk(yDZ=O z8l+XH6?1p}^z(YJ#Fyo^s-Z>J)h-t2SFov)?S)1t;9$t#-qxCZ{8+7a{aE2jV5n)b zK;qL%^qT;#`qR?t<;%D4RQI$Lu*kaG5aC#GIP%LqQ8EE6De%h*$#d_0#^dA1+rftU zHEyc1bHURAugkrv)w)Z*dCx)kBJ^a9tioHpu=bbVQ|NEvIB}feIK!05!LCTc&S^5z zJuGr^W@Q?fi)>LTaS(g%X?_i-!{?{{X|AMTA2z7DInwTzh}TvD)`I{azLcn_Dc2`k zQBmH9JN4RK*!Ylq5;-b_%L0!;`~;WYtd0pRf8x?}w|AmESErOmJ*3MhOd`%}i+dy0R`th_*Jh>oUc4Y6FAuJD!MQdBs_T(bT-^+Mi2CQKFH;;^N2WRG;NV80GiWHAEkq+cr_CZ+Bd_wo1tl@#9-J zJ1@1Ace@5FYdbmwh005~(DDy4E@rZ!L>zkG1YF-Y_%0^5qU*z-87{S0=70dc4ZNv| z^*&5&a&l_n-LXRc0tMAf0g1z!(N`Yl4p=Df7J2qw1ltGj`?W`z`WV zrsG^z?{jnO8d&A{uEsp+VB&n2P}+yn1+kUk6OEQwp~No4ENpyyF`8MN}ZW_`MQ1LHAD z`~!78&~u(Lr!E=w0Qin3_A3U*Wp2}C`7JrBPdHg};;f2Ln$7(hOYQF{da<#-nUb$* zWD+H-Q@h?7b=tkrttDkMXi>mJ5Na^uaWwY>d(n~V$~g^;>U74PFH)+SN_;anIu~*& z?(SFX1Sd6A{SdD!KHYoJ5}K(GCd|H0)Q7t$+gXE!B7D%;;;uMOyzVxW?w)nPhC6wk zmn4|YaYMWe`<=z$)b;h8N${ZWlQAH`7ku2YFz)eY=SzBMTWp%2ok(IULMc_LLSN$l z9@mJP-}JnQMTuufd;BGE3y^^5UD(j)s9ks-V=vBPQvK_mMjj zkz(0SgMcL)t}aGn$d=U2Q%rkydvTyC_v~A5>TZiszi1G~`L7jPd0fE+$z3Or{IwU1 z7^J)*0P+ccls7sECmf=NHaeZGIyEP;Xp8AxIt`EspYbqhCpNn!tpEdUvx8ARR_`R? zRw%O^x(7CLB$gg+2rE_3>0mVl?1FH(12L8|dakMy*QTEIULvy6UEiydic)`Wv zcgIVMr<&&Ucs=8fe0I!{tcP?qRjw}~5-n^o1sO=K8U7wEfQrZPTC&+xTnWW=-%`8W zLa!?o=`4vY*!ZvSgPe1m=b1|Rr=kxd5Sn<^*uVR8JnxJfDQpz=o$uRhYL27ejK2u8(UiR;1>aKF1!jks3!VuK$1RUs4dyp+lor#ZOGl4;9bNG?#y zjKHK=>{BkJDQ3|%K~Yw_V?by&qEO1@eMWJpDtDhMI6=r|(HUNav+Es~j&xRSEJM~! zyy7_$vsxIyoAIm0U7fNB(qfU&MYHGG@d^D`8FXS3)!TZr*x%l)4e4u~MX_xvlXg-R)!;QmYj z0~#+mF^nAq5^k7eaqNqi^ASsUIKO4=mg-!UHOkbGnYDUcE=K4XKIZF~%s4x*oDuP1 zLI*Wjs8Ra1Jr~m5(Nt?v9V)r`c3t;JvFmIX#gl9njyBuGP7Z0SpUxCvcZkBD@=g@{ z^Eqzz`D~^z!3w=cFH8AOH#>S57aOj@^1a>etSrsYNYtxHV&?E?HSc6(M3pzcsXy%0 zkVuZwBGVc~knlVcsI`te^nhU+$Og!mq;WDvCNR%YN`)SVs)j(_+UOx{77NN&EaKI)QG(-sdCxG?xgeA z+*}WbR*j&}Vz|q>WGD`w)WX`IgTHT;>qpdIl4u#!2gp}=UUJnpAir2%YrZx9#$VGP znAy37F|mOfbJFBqI+88HckSOkRW6eKWdPaj?uv`s3S4gJ;eERt8so*G4Hee6*JOcN zOO)TjZ@MBYmvr#J%CJNdE5fg95CErSHY!=`KT)V`pqlpwZ#l>NukUjvjX%sHn`=Fu zVC@a?4*mYr(t>M)3pDYz<7Py{K4Be==NSp}eYQ&v(l4$In%tt#ci}MATsKw?`_-;K z`-6Gt-ptr@@GK3RHkiMWh#DaixLF;}mKp!9{gPR$Iu7nSrgL*Tv@iZsF}nxqc|?`S zeg2;;z+e3jbx{h)Vd2h2^`}qx`q*?`h)w!7I@9+aecK;4Yu|q;W>nMMQiO_mvEbd^6Jzd2A1&EGAYqm2q~*xST{hevC#Ta(p2T zErVb_r6( zi;$hrTUndHVKTc$Tf>bOWsfwlp$yN6qQ@DIkE6{Nuc{vK`M7NKTgnh^$P2zWBl?#W;rok6cJtZhH&_PP8}U;F7~?pbYd9z3|MvS(0hMI;#;Q9XuK z+TGSBPqp1S%U{Z?Rd2_wy16&g{m%VFtfupIv6$(mNxdB9q7*m|I!;DiXGC@?Zxe}k*7Kb`wn#lLtrUg@ zdO$O3E|`$nW73~2mh3^mtfkkaTc-is!e4W8Y6y;5FX$AQ-VpgOhIR>ih1_v-7pc{h zNCny5PO=0TG}0(2r8U%l$U`r20#W6!gsqZeiIyABcSm4;9ONP%NUoPHZs$8(Uxrfr z!|<6?ODQ&r=185Btwm*cuxKfZH0T&nwu+U{zM8G%^?Yb_B7(GfvrwS&jYd<55Hx#S zNFvOA4W)w>VtlhJvNgaY<#ki(39XS?{*^4?T8rg5t(|xuql!N~W_tGZ-)sBu(+=Ba zv8k9-goePy=||wU_ZTOtodTb>MaX===|CzA=_!84>@4x%Ao^dP0d3X~Bh}vKAuQkQ+gW4DNl; zU!ar`wBUL1Lfep0MpCmv99bbt7@fvf8{J{jdNeNu>w6NT`mh$Ao25=Wenjd-%IDWQEEuodV{Q6WY#JJ# zrCu(I9%6`u%2*)?ooC_X1)-DpO@EkSMH}TBEp%lAhqcyxn#qt+S4tcXwEATau1QA-Lv?{W))WCG!of*}@7P z_19HJt|~~3w|X&MRIUCp0eXHytEbC}t8|Ny#+~i7&7Onm_+rcRY$vew8M^8&Zo}wp z1hL-=ZJ4p+uuF4Sxx8rW;1UHeTTv<5y@$INeQZvh*8JPwfJW@fr(&&#Lm8WVKgnXt zwYe)1Y?A}Mtr1UeGFNXRIc)Z1`(!Jk9zqjMafnGm@-a39uR2PnMu%CiqC3(u;NgDN z_2gUUaU302ARNB2u#?BGUdSzRxY&zHCmp@WmcCo6Rv_9r@kwsQ%VKHKoBLql_IaTa zy5#W+-x`!^+4}REI;~%mlU;xE{fFu;nxL1(;W5;!xUc&uVTIfg=4q0RrbCNk3=p52 zvu3-Y~d-H<-@CEE38F5u`l7Kz{qd7N>B zWy?r*lM|HXW}{?zQDq}@>F(=4nLt-ZS<1wxR~qWoR-zquH}#1|TojSd&TzY=Z|ZfaVIlgdMtsTcknve#l%O)!|X2T{+`;2>V&XYvq-85?VUdD{u-PU5V ze~D05(XD1cGVa=w>IkB(_#{EE;viY|rBCI@2iA%%vtX3p!?Z9SmwWkiv!rjkQ__Fp z3h?H*1J?u5DwqC_&Z_>XQ_A>V??6j1S9>m5s)M{9{IYq933W?zs#$}Pb_oe7U3)f$ zhWi4&Hz|vCQb}W6CO?3rQCa&3uSCcJHFa=DFH<0;yaYr`^<{aq@56N!J~!C{r@Tj+}rBCDcwC+fz{TEpreJ1*N@q z|E5`5&D!^j5cy+T+Y5yI)kl{aZdZ~}RRhergJ~eO zVAXXl;7(lK?Yb8~J6Qj%k1(BL zDnnGA)eLGl0rM`2g834ub6PHjde;$0Mjzr$mU(TgnL(DIJ5@kYmM~OOn1p1SfB{B# zA(x}Cy*rr!ceMxN-1N*$j$!-P_tE|j^J0$fFq;E_9O%pl z1A>&o*{_`*>@Ap5A94OL@lO%YmeKZ!cwQ*bJR2rd+M#clNF}B*yaObn=%v7f3jVNs z)MNaS9qme3JtiKjo{Zw(WR)yp9`m5p?&xqT67C=Bax(9n--w)e$-mfQ;UfP{hW}>d zy#vsT`+M}tdNd7+QyH%irkyeDd@; zI!>se+AC(5i1P{WO#?jljKb%&;p&$f8|d8?c05Bad1$_c%w zl%_+LKMKu_O$9@|eKR~S+6I*tX7pb34V z>u}L5VUm$tX|Ahlt_j{Rvcvl;C(pG?E!fzoLsvDC55f{;Orj_#eAV(-T$N$$b9Dw% zhl_r|t>UfAp30$PIKg-OopEa}`on+&^r|lzZSjoHhpWxae|#JhgN&=F7QVZru(rP3 zu0jTONaCeBQlRLz{5+I-E>eLh4xhCFKoRqhxIS7xi1p1j7@v<9(3tg4@getBD(7iABo?ph@IIHfaj{>jY0XrKX19F<_VEf{Y<<^A@fUE-q-L^+*ZCUJDJAz-wJG|QrwGSd zBN_W=!pSnBw7>UKjnDU&4*ld3R)EZW82%G zFtFOj876^0&?=Q&ev!gRG?X0@n61^1&XvxErIMO#HEZn{9L%R+VQ0@&FjK9(1A@wH z7ulI+c%hfu=v~nPD`mr&>#i0uhbi&lMIHmpcC0LyvX6PBuhLg|B*xMIzbJJ7c}fHA zt__pw+bcv5hiLjb_a1jhR<@kDhpdzG!RZFpa9pC4Dz((%Ne@xxVz;cA(yAYcIghM~ zBP;YKLxqT7cvt!DJEXI+G8WN8R{)F!xu0%k>ouJGGSv3qenmtCW@`G!+hg=^^r~M) zp3lqbW~uI!+zv1KbX2wW;~j!}S|(*wu2e_gd21}%Apa%WmQO;;hCLk<0{H3U){KdW z8E@d}JtB2Xz=5hcyt`0DgnHtYieSWEBA({EI8CW`qMA^GALi)WD2I7r?t4+4Fs|n8pFzYb9>7PN#*_qBRwR6;dsflnu zNG9;svLH(>1sx&H&^TAfKhvfEwOFgs)pHy17O&pTWGa}lnIVO+S6e@kH?P;0ZN3qt zBPiFrYU5<+gJ`+JNIIg2EBjLuu!oqoM(4f_@h6I^*=kbbKR(Lu%}NfQrp6M`)_F47X7muBlkj#dwxE`1 zeW$}&&@gVyVw)`258vaf%++FX#->k*4vlPY7_PEVy6!^VqJ4fdUaU?TpFX0A!)3l1 zLOk&qo?QI?7K7|-;NB+bUW|yT=o_tzame#7Q-slaQs%3o)n;N$&M?n zT!GbM#Z_K@SBq%oBv#|qjn^RlT5dBdCIIOy{;1J(2_e$C+Il~XB!&m8-n|N9Qv=Q# z`J~=tA%TYn8cikEO&1qj5Q@ws=!PYN2|~I)RIcF_DBB|N@T9b9ae+oMIF=Ur$v6&q zdHEPcDwg+QX962U*!g!k;xbW(zTcf^&bwA<$`qiVYN0u+tx5YDE zgd)5ziGq@8hsekXg7VJU=?u-jaqT6`gh^2mM|LPzwY9?+R~HCV`VaMw`M!=FI7OP@ zu}I0#%<;YC2N%Yp#RDsI&BxU2?0!l`61&JG$jvBT@6Qb}Xqd%b3BNxrXK~qY4CApC zURjZ1TN|Qi9rJBKI-`ES2O1s9WUfAZeYn=>G`g%_*^VgNKIu2+B(9w)wt{0lhlGL* ztl7G9P(*(2agFsA)+~t+2_<57eXCsxq@eW|Xz=KcU?J67EAr%D3R60>t&h+6T_Kpi zo$ptEKgVO{M&@(;(ph8KQ8TEz-s@<0dEf|i)7dgH^Q7S8Rg#V|^;-R(P75{W>g3^s zmWwWzSw4%oZpXzrO%;eY;PqzPE%*FMdm@7K0`8R-gz9lCFJr)~6_Oq>0RS**BW5 zl!EdQGd*6PYOf86kb|lcwENxvGMj}b3*Y6wDvpFM|LDwqws zW{?Vayc)_9!vCFNF(r0E+AC{#Ga2`4{ex)MtM~@=|Ho_roLsLNv3^$jLMEIu1CB5- zFzkw0J)gaQ%<5GsY8)GTC+lR(ig2GN8#nUZ)t2@wkFUY?kQ@x9!@*=A-1R9zo$(gN~?jx;`cR{0DZOk?}yS#+IV8 zuy&-Z^oWOPWK^@`@McCty(@8i&;$XhmWF+xT=`m*wGWq0iQViC(P&|}3VxlYKhqU5 z>Qam2^W7=iw`O@@$d1@BOuzY|!id&LLo&XxO90kN50)|tz9&mXpL*%w9V%-#@FAU@ zNdzLrt~Dw~!6{7#Utj>pwj+5&ScDAMnyi3r`;3~U_&EwC3(FfBaHjoq+%2)#U!~nd`|aI-bo%?)xcl#9)$?cQQLaf7w6MBz6#|2n zPRhL%+EQ6lZ0w|wVyGvrw3j*%I>KjK?w{ANo_vD%SNIsb^(s`BUhPUHo5NRD+CjXj zwVqo36T`sOUdH6>b=`%A;HJ>e3qC$-qz|fyH$t9e#%EiGot;hbt{f&|ODaWn7$Cw9 zB^Yz*XEK<))fUI`*a&t6p!lp#kEDg%5+z8&BoC{Etm6BVmJza`w(m@s9{&}) zE15};+x|R86I40nvmo&Td5O27xCsCw#2GSN(3Zpjil7@$$ zo6R~Er#@}2#&Gs#d=l^S<{Q#kZS9QR!`S7SACKox*f9!c)?B zE>=$?`_1O2n%uko?$-H45)8{nXvWcMkN`IYysl_Wt(|DrPxZ9{uc4FC8g#ac_E#tC z-ywxBDIgRO$j;J7kYyci-2Hh8`Bz7?p)qb&a?XbeR)pq?T?0b;9se?Rl!|ojeTGC< zp-A8^)e{^?7l|*IH29QwEMNZJ7u;dj1FL90M;)__=vdmq<(aCwdKYUNhmH9CmE&cJ zbeXfO%iBlFldcy4+0&V~v>Z=n#)%%xoVdf0Eth?MbT@+w&JjsGQ-tQ0@p{t#|1k1@ zmpX6c+hP$tG>@Csn6idLG=^e&b<{-5rzw0M(Ob1@T65ORmxkd{vJTu{+j(R#o0Mh@fJ^ytBjt-V~v( zpm1gvX9RX#QQxtXVuYLb@_ig$;1H_kXsKc1zzSrt_ogilgz29n+_yh%PZl)K!AZ@A zbPV@P!C`@@VEGiw58**>vlw-YUOCm*|M^8)L~}X(IlqgV3lsNyQDyO^Z{O6X8=wL} zJR_=(Lqyxc&FosEg4wR=8CaVlTT_cTo$-S8lkDTS$Mp`^?5q$$wlV|<&B7znw@;u8 zx8{PIz@)O??tv667zKmnZ>j@u+zrO!3%Kkj!bg--1hGk;A`VY=JRQ)b@ru;>n6I^y zRe|WSV-@!v9qaVrIvSv?s*B3uCsX{z#b-PI}WYPU~e73E- zoZ15QWYZ`wdy|tiNWE#wzd4-EaEwW7K3SrPq+92L+mi}_FUgUdI5AnIY*Q1=UR=~! z7SWO4i{MF(p}zcdeKK}*Y8gc?#3`_BJL`m!nW;ODbQbH`oTM#OZEL{V{-C~;PlmaM zai+zHb#M6N!_(9p?|a%8n9=Pq)R_$GbkVZ?5!l1>j*>Qes;bmAQRPOzKN=At%cE{# zk~M~2>;qPYIxM5n^^kUN-!v>b!WrgszC#^`ml8qB$G&poykB+;fxs&~oEbI-tpmZ) z62h&B4c*mw96e6uB7Dt$@(%c?GH8+!6Gwg-NDLp^O0}9Qqat99 z-;aP1HzJ*NmHFtWD}37Cbj9=b4i<$Aob70$*H{w12eMy)TgHLF=n{q_-!Q~z`)SU z#%rq+6zPX_KF6??wFo6dTaUs9uWQui+dQ4OtgINoYQ5g)QJDR+4BITjho3Mvh((C+ z;x~6#B?#+uyT8G|xrj+Qt%rNUE_@I@jP!wb@G;-{VZTp7=6Y{ppmTv+2O+ymctk{i zW^a7a#_Ksnzex7aO?hAGjT_*R0AD# z)d%KK!01mVg^JedWDa!XB?LTs@i*;=9=UAl!ZH7g=6Fkp0Ho<~e74XJfeWv!tWMf} zI-UV$3pa=+3C1=V6k_5tUN5U5%6gUv75PH!x*4 zV)O?3dgsG15HBouaE!v&O`rG_>UM?fv^!5!tX}wQNjl0BU;XmZsr&aS;ajsmSoP;y z`BOI#_!s;yN?B1(+oM!#d+x`EwApU|wsTy}f~6}k-*xt(Ei_(Y{dPT^MLvw_7-=z< zjyNGN)t*zMR7W~nD`^FBr=Z_KwVbpT-5IY8ex3SN@5)lMoR0k0mF;|p$O8j2TY21V2Zi7hgzH=Pt{X$f!{fZm;H(G+(RXH9QjO4O#AOa3re#GU)xk& z==(u>G)?~=K%N5eUTydA<{Z2O!`$I7wj!-{ z=gzSSFVUg5&purqyx=rb!9hUCaDsFbvI*l2ZpVw-UHzo*O-v9%iVv9LvGt&qPpszO zRefW~iU2h<3XqM+`5_dC_A?u&6H3RxvM6whz__{%{&c<*Wj6UEUTjK1Z{C%cpS7)2cj&G0do)0W ziSOZMTVEpUBYGH5wX!M|{QLod3g_fx8s6WqnvECL6H~ZezD7b0OdIeM_Plr*5GP~zXBl$QRKw-cFzCF0`TR0VbBK7>Ix}4-&JLRAP*TA z`uqaiJ0)5?p@oGvN)q{UsRL<6xt|%hT@Of+u5Qr~>W#iJnz-`txOv{1L6hXf|6TVe zWKo|j7$-)ujio^hEy{93G2#ud-!tCmCt4xPGx`?dz2E4(l2IBRT_;0c?;@nOy8fkK zP$88Q8*O7~%Hx{b?C%?-T|Ao4Y}#6h)uQjSz#3{?7X3i}IKa*bJ`L`Dgk9FFeIqhS zD${@GTvD#%IlDv7d!%eNTd!*Qcz6F)r4AyLaUaL@)TXnT-Yc!vO zs!UH+Ckdo{1p0_Rk&9=@t{BR*hn-8FDjt5trv|7-GWUS8FuDAq`bSY)*+xB{|5fI} z2>nzSg{jiM8Fe{{;4y0{l%M8_Q;2x~nyxU$zW?iIm%*l6`%43edNH5H{l;vczPO4(%w;0h3IW!?~1QdkI zrx{e075J-1Be-(F0||P9WIe|KPjT5j2qG8q{Cbl^-*M-!p3y3i@2=edjfZ!}4Be z5KyrAwpIc9b>#7kBFE!sHCNK|)7buw9 zpR7Gu8_ZBO*Q436Uh;m5LDAw@VNCL=&6oIO6(AW)4OJpwStg2Bem2|L()ltrQ;6r& z<{OKS-E`-ycpW;Q z5^_mt$U@18giuPpKtqDtIyoC0K5mtDUzvlO;W5;;`i-cf1%uCT&u#82l#> zQe}FTl66~K6*5Um*Y*F=B&vTPc*BbV-UxBYPP>{`G$*n|Mh^Ww*eBNK9y_~@&KV>1 z6*u8%+o`8VMWLa%Buo|gpK~frO#fb2)qQBqM&oz-5Khd6YTS)6U~>_)I9H}0-gf@I zauczuemKperRClPG>3WrU2}L;h5Iw!7cv5U=uw}kQq|8^6ja`%O?)a;Gci0vl*AgC ztABd|K$c9kx}|2%=Sg1_18;ey3KUXQE7YVUA<;;o35;6{Ym?ShJlD|} z#V|ns>eXqNYAg~c)Mzb_Q<7QVjo^&DHu=k3ohN(+0uT{1_4eV%se``AVQlm@$*V~N%bdU+vtq#ncLHIC91xW7`*cU7~$_80r@5DQzqxHv?6A1H?pkR()E zo~ynj)@;|IgR2u-rF88UFDdtoa!5;ndMWYGO1aPs_$8(CV$-wb#R@2M*VP{!G=}iU zAwPdn6b8Kb3<_*}BD9Txj_aRqLES>etX&#d+ROb7w0&o9i{%S1>OD7E=RGd)rs8O@ za!VPSrO~@bI;2(lMzBve*fema$Dn)Tp>YiN-4w|juEGv${frMv)^m->9Crnw!rr$R z`rmys$tz<{q0Ju0UzKs%8=UvQ4rR5rx!yQ&+WeULq>KL_+qFF>K(`SG!AYnE%#JW%h4=f!-;-@m)B_$m>CUfSa=By;T|6)5mM z5rA9q+UJOXY$V1*h?yCuPK{n59-~k9Kw9oMri8uQfqZ$)<&{btdxI9EiJd4pDh-_D zxH)c=l#KK+Du$x8Q9J2~>2Q2z@!@7q&Hm)fj-n@CnqFaU-X)=^4EIzD| z%>XOhO`Pvc<7Qzz;E8u#QTT{gKLb3c2oxPX(4N`%6AgGX%<|?{9vCnW5C; zVhTrP!|lbrz}-EZiI#czN{7=Rl+E5i%3U69OG5l0S22z2>gJq8P{^`Z`KD{U``fs-))uv|eJAVLulO8zz%%SyW%~Trtd5I89#Rs24Pn@$u#1Bon-d>irIYistrWR_rm|U)W17Sb86bC|52A~w3e~YesI}dVLMRl{4!sE z)f%`~pd5+~cj=K_2j@|Dl#uUg_o3mPn`sOZb>O(j&11Zm;}J|?Z|}424Nj8tE^7!< zas<)X-!=r-NVq}-g%8&0CIz7*@Y6i}X(&bl)R)0;bJ|}`qRE6JHit69*n>j0@PD5= zmF3TG7TXXEr&%{PQ2jUKg;3moTv~-hLk056zC<7%z#~Uwz(?u{Lvn@i>zgvB+@)oii~2 ztKJ@$20-zT=h(xzmN$i2h4g?cWStrYQy(1i1E~GomA&fvh;es<-YjlOTJMF9&NeAh!EY9#4@n9+oNv+jl&RI)_3(D5ikc1v{s*B}xgJbY?sV@$i8U1s5 zdiraDrygtz09A}(P#I`P0GoDWW~_ylLff9{Kz@x~ariINkPH=)%si|yUkhhDb?X^a zHnEdGaZekNBdyTW_q_6B(Q1qi8@4K zDU0v(J1b-oNPuZ3ve2ybjEPTA2xcmIvq1TRuSr!h?iS%7kWm*V7XpC zQ^8Pje#Ic5XJCM|pdHjW(#9K>w>iaOkV0rK852y9{o@DW*DjGCNdXtbKOy%OzuVhC zGtedP^$dJY#jT>4j-RGeHckh#$Q?m_dVOp_h4q%{hCX~vZ*a)5;CgPK^%C3^+H3}| zt!FutgZV*uEl9C2Nk?!v9lI{|a21!Zj?nBqRxL(nFpXWZ_-^(9- z_Wqo6_Fik&teIIe-rE4PtlsJ>_-Ji&z?px%?x)_lZ<5~ z78R&V=GQP4<>)M9^vS7=RTWvb*)+Z6(Wojh&y>L=;&p7OEKU+QQUSQeT+M3D`xdS} zDW4+ZO*M9AnnhEe>{O2`%>TaJ&=3BPO%)F>E$cGh*|-SD`f63@1aZ{H@;P zSv(l8%ZM#hXLaLwKrbf%If-{kf4lTAp!4#G$sw`>~^_MW@ zIu}PfK4=|Vqy0DR_Ec}yF=#sSxx-7K6Pyct5HT7|JpSw&LbdgQQI>mZ3T1mltAo?` z9*^VAIL^H05wpu=KDS&DX2$d)9Ypm?%F7$@CPy5yD#Gq$y-fE%{hRVf;db8c`#Q2(cp_R2uZ~V2uY$IYie==pK|uvLu*6Y(eWkG zAkwcEOFcJZ*)1d>(hBnoNXs{Iy67uTPCdPfFpF>E>X^^9V)mDk2(IePKiS{HZ+*zD z$R~Y1S~x>`ablFS95UGJgRA?lFgh5O>81FC;9GPL5;G1CH*5XlxnXuT9L{WcPPmMQ zA)_^w_;*>LIt`uW&xUPp{>VZ8`P_j_5uWdDLrO<`@0JAL_N6 z)86dPqkMcJMwO3uV*BO|9VsaxY0Fl<_Dph_{*buM?eq&)lPcNuQIOO|;Coj=X=)-x z)J!IC>|RmN;fAGXns^85BWLHfZ%c>enZ?RG>N-xel)2PXp8Hn;X_+OfuzpHVFO(R0 zvf2dgFeF*n(%WeDc<*pl^Yr2zS3Wyb>#Q3D8Kh%7!}c(9d~}_{vUjAvc0K$h_idXQz0ku|^AE@*5BdC~S5E`jdcSN`@P)i^#birAk9@^T73(-E7emt=>4QfG0x42rfgBOfQ)hyTiB>K#=fCKWG9tP8Pi+CwAXbsMWJl@s~k& z_&D*Naoh*I!)DDb!%G3p%}vYgJewNb%oP@7gZVjvfv86-kmE#xf7TA8A7oMAy@}Jh z#ZF8>g2JO@qBVzs&ujXK`c#!g9u7hdF`3Fm;&FV#z#!fnHZga18J-PnsP^|WZT6`J=Xe|WrfK!V@yz>4T#qo8Q%DDto z`{V~};^FjuMRTn=c__1G&oAy7Pu1WNO|@uI&BsqwN#a9R){-pZzPM+sILj4*Qb#1C z5u_lNe&=X}_)-5ycLW$|AU3o517 z&>+tL_MqvV<#Wg~d{euc94x|g!(f)DIl!nK&&Vs@pP49RPnRY7*@kRYVmo=v*zVNKHR0FcRD^t;=Oaz)cjEr$>x&IU$ z%L*@nw-dvHdUv7o7EuDc{$!P}Yy`-kAR%#SYNNWdHuA20#QX%3)n8qHL&PsTWxt9g zGzuWVSI<7aOp4^#qt>hmoLstZkR~?pC6cXIVI7N){>%$N<;(8hpFi5EW_Lbg<#9X$ zV8q=-LC@X7qIM6i%C=}>KIx4>W#E|MQ|#o8?9D9qI~RTo=HQU& zt2I1<=mKst)q`MQQ(?8}P;<)3}*+05zpMRTDm1xknTJ zfHN<`RkKWC*U-ab0{d{q`8>O`lKP-`yqf+5g4C2HSKhLTfW^wH^6-^!akNqWs`^M) z@g1f7Pstk9-pUn)X^avijh6CoJMxi2J6N*8_(&V~^EIj|ZF3_!RzeW<#4~_j7nz$U z;yMcTGjb2YgGW(_0I731zfw%(FDSz|4Sd)X798A0K7M}ay@~#W9B&H?ANEggnwoC! z$E6P*t_~5HFCCb?LBW4dBKY2rV5Gs#MQ5PZ&rkpYjB@f4N@fNV9pfy+92al#u8&m;W^GpOuQuyJluL<7$dL&bS?+*Hd{_k4 z%vo!dR?x( zzen>_N?W3pLDaXcgr#19o>g5Rt&^Nv(Drt{*j+F``#LR`rP(o>@aQXBzstAtbr^fg z>1q(LbrTKH38J)C9mS^0WYHEfjEG$&;&%L?RG`%~Tt>{Kr)bOEHz+y=N6JbGzr|bQ z9;ZP9GW#ZSt>H=!AuGD`pMxZf8TFr@ot|^X^EkZ6Wrzm>E@O~ro6%!&cdQ4Leni8^ zq}^OA0mVr!%Pa;JZgWsPyR-@$nMansU28g7<2CJkNra45C}&wUGIY@UCN}mRNT~FZ zPN*Ysx#&`b8xhksCO@gS-$(lRO40{x3=(1Fm*}PfPG`Fd_`8b)IU^528Tz+AxSrHu zEa@f~9>JJ+Le zo3CUrsf{?d+Cyvw^rZwGz9%25GKkE~XbM>6-I-Se*kRiFjxhd>$b4}S?4=o=0VxX9 zvgEW(dQv$%Si;cfm6k4EOj`BMq98+uaq%r&v1)8oevv-N!<_>L9$NA~uyQkziu|$$ zVwW5*&ahco=Rn&UaUszF9J+R~I*n=;dzOJDL5i*fqAhqKs(zoi$Vj%t(kKo~!_luV z-GFskyE>rod9Y6A8~`Iefox5oWbyvwuMb$MKDp%^jF~iysUDx^K9X_*!8iKF$5My6 z07L$PqccLfehX)8QxA`cn(C$j#?ioZ(&x{5pMxnL7@IJ!Szo@kP174hQS1<11e(nJ+F*6QfoddHx08p2E$8ombY&zEtiDB((ayh@*C3IrAUpuF@a5|qMx==Oi*&H)lo7FM z6$HM1eA&_l&`d35dHiqARTn*Ig)gC9k|oEFHkwj<0l-33MAO}YXd!PCxP;(G!tYxVhHhFtkaBjS*BpT{9K@EWER@bAH)8fVZb1HFEii3jv)fy zx8P9r#|JFd!tq?gqriEXuhYKyW+WfF!k%TSohJ!;b8=jb99Td47=2VWM9!@TbuO2< z)ixTUJ~(NQoRv^f{6q%QJD+7pF&a-zDBNNsp9if>B23_NX`@+a{dVT!tg)HLcTy+V zicy%N6jM7%v=X$n>dp{BJt2Xqnkb=~DtWqbFM)5DOBwo)|{Zm%d zb5xjc0IQ#4^;*8~%nDu03VjM%+V`bekTFs+t#$;u-p9tX0`||-J0B+loS|rGY4Lna z)cSjy1-S&B&&Zu^`$2<|6a&mC{L>`hp9PqbJUIzUYnw8-PxPDQhLje?hJi5`7w3%? z&Kq}=O{cJ4X%AAh0B2;m!6Y)3bi5GgyilaGkPRE5nDQP4ozS`eIrKTG!}zd!L}}Zn z2EBeRKfRSwD%SPgO`J@mEXk=MW-#r#^92r>f27*X}s znH}_zft84$Q>4ijMGDn!o(|U4fkX+5S zVQPwB}~8fS&aB`myCs|DIm&vuyeMJJVk+ z=RaBvi0@0(J7Jc}nEL)A3x08-J%{1_;0i1bK~H<_%!mk?6gfvxV{f0@dEnJ=`&)-ee1@Uj;9w$>-0A{aCTdNpew z+U@2wbji-Zw;eEyZWVw%I@XBJfW$P_3`WofJ(AT49^eleL4oG54g<_SIg;0IL>n6q zV%PV*UeR~=_gcCt?vy9NoB&tg^NBk|v7EN7jf)23<)BkRV`=5C&Fd@CiO$^zaHNUf zk{$q)&3A=aZ?56O=@sskml7UGye81b*sr@cp7S;~jrKw@!b`ZhH`WRIWq)IH5qj^lpnc&XCa)IAa5)z0l-bkc*Er z)jkxbbLn=2S}DI~xT<|56|D9R@Ve&d?xFIv%jt&5Hvv>(PMiPVhatNS9 zJ$nAM!I#hR5^@+&NVy6G%bYg)0igYCqRqw_;zY&$#v+d4Y_H5rY9LPlP^B$Z3^VAl zwc$jqEieCGYY>^}^ZjfEq{n^uoVOoU%mbC#W{)vqTAD2YG0nYEyGmMRF$XUoC}_d@ z2&=L2biSIDor6PrF1_1cL%J152Y{I8kGOuZJ;)hhAc7w}O1!Yi?-B2u1$fU>CQ|ta z5b}7`U)tXrCCZ(@BaK%EbZ1kcHdzpd1dj1|RSPjR_t5#~B}I++u;`qAM>L=5*89(zXH%8ZMmBu`S?#zVn;a(RG!~ar}*z~wgy-TDZo*k`>qTy>b6?-Bj~CxOLA*j zY*&Fjc<{*!Q-3_Yl)bw-g{ajZKjTxt#AIg#EiF$Bn+x<eSN(o ze*DXj!n&;VqWeg`Xh&z1E@9@!@4QGVvWU}1D|G<+o_?F#@B zdz=^0wMm~UxPBo<@I&YhS4xt+ES)ir_fL|cPnJY0dT0-5IgpprcAPHG@QR9x1Onq| zZKX!Xa31A6n_CH?dULRj2MTGU8m-+bq*X=)o*v1Pu=CXgi6A>dTwG6>GPppHP81Tt zxa@f}kRiRjxQ`Da#W#TY$4txY^vw~R@%sCyS2R&OKcN-x@BAd)^6F~?g*vR`)4FA~NT=?oc`jv|rDCKxgLVjK;< zHu+6_4@-+ve9s8*q{C4ViT&xxW=z$s%5YOzruSExzgz#vsPV;XVznN%#ebM8VM={wmc-6j)E4goXc)^ zHaR?QsJ*sMKZq2xmdHkhaM~Dqf&#P2`Je-0GC+sjJ>{Eqz?hM{Lz76Q%|<~IIQ4Qi zRcv{(I-r#mR6lvwad7bF^dK)9c+(yNH>rPu7@y0QbBQAYwD5Cpfnx$rn`FaM!`Dk8r3ifmRg1j{MityoMk-{;0@32{ zZMP$7d4sZ=ARs*TIrKhrzNm=dpyQMC4B!WX06+&W{SjmfR6U*fHtp&0M2qoBpH$O& zWaOQlq9hpm$8AHUa6KL#i2O$&VO=KET)WibzDZ~91iKP(1z79F{+nDJifv$$k?Bj z#>>n+-36^nnHEo6fiX(jzW*2AB&Puu9TknKDV-N)q9+UJax5`hJHPph!3rwixg~_~ zfmD0vBYIwfDaVR>AfU98tTTj1t03)(ddrkeeroUSJVL+ zDQdtvMW&OgzIS-k>-HW~fP9B=lmH^`;KPu6NB5KXLT+$lGrc5SK1&7m0nZmxg!!_9 zJE830T*mL13W_7F+A}Hexva>t1A*(qW;XTV;tLmOEqMooZZP&lwd3w%f;KmAA6u_3 zS5ZV-0XalDGZ*rgQ?4@>*ejRRyA!6elprx6RHe7^Z^f}(S74XZC@Xs{e-ijFCzPk( zl*q19(Y>;G1ne!5m(py2dtrbBh?c_FEyx!3e0+)KPn2%GhD+ekg@=K;G2it1u@pQC zg?MO#hZm+ytvyO<8kmbTRgJ?T>cIJ#2PS7J860=D;cyomnTWrT8w^|t{eUDso6eV1 zF99nC5ShG0y>iDIhO(VNR{R8AF7sr7&RHP>0)-`giCZB5;I z{NjTcSW5^17H`Kr#wPz5PO6YCum3e0J6&?GIi9=v>ZA^!P(6$`gv%2N{2=fOdUT+J zuIJPVy}(#WPRPjM18D@`$t}GxU14#9IsKG`on3kTF|lx~FP=>M6k%Q6fROBmG+@n! zQ+Dk$|2!SLA}M=-%9;<}hS8q4BlWram`yJuQ2H#p>}Jhx{^fgT<}l>(ml|~h2e-FS zZU3Q6=W`gZC&Y|!kS~P-0#ao2Rv(V)K~l?|RTlZPa(BUfp?j4pl0j>ZRuCr;ctKPt ze!&kQyjPa>04+Soj@1K2qHbRm|FRw*``S1*rn*%HY9L=~uG;WJocOh5)oas-PrGwyU6n zFk+$W1#Xl+A>}WEBN+cmE0p6c>_mn@c`X~{FrTpfhsr&{B=#g5t9#5C=MUK;hf7ut5;48IA+ed?1@4@Rp6ZdJYS2bME{;K_- zBSViZMt<76_m`jka&t6<1m>co1FKZG^o=#Ybw7G1z&LwAXTJ{&OX+hVG{Iq2Y47{*wR0!U_>a&p!$dS0rli z67B*-K$Bq3tJOKgTip3YO%q39fI%?DDHG!SvXFnVjsEM+*+Upm4oCF!N=Q>^j#TaY z8l&mkF-WG7fNB!xiKpX1f~PI#d9NN&NwAQuo=@#GE$&?! z+qU1i0iRM`oti5DET|Ap=piGT@IyX+e6;o z;>%4NCA!VY)&u9B;{@L7!-#_EVgl`iB?ojqsJ}MaZ8!!{pL>1nm~HYIrZ*Jw6eo`{ znQp*<7DJvejiLeimvxib%30oqBl#PHQJ|-KSVZHGXSgL(@wwJ5$8m6q{OzIt<<`6o zv=Y`UvF)V~J2+JG2{(!5C1Jj5TxUmJ_;L?k0})B4_5?*CU!5bP=c(&dR?RK{9}VEK zxIxz=;jON(j9^&c7ys*@>mD2_8Cj{*xQn;9x9sT}XHH~5z#i30_^ZCbw_$+;tG}v2 zVRv^|HYB}!VPj)sP(_1Y^f&Lm z8d;zy3;>)Add)Rvt5-&^vgk*O7BiuM55B3p)mGcC9POlJ%U5OtOLSqPoxIZHo#qpB zTU*N$p{G9|{_F@&M|Td3fI!zY6v}Ca*UaX_|DN<8FD{3f(bgWTw5*<;owd?fv)QFp zp)td4Fb33ebq6Wu0;LAJffExGg8-v;97ebD4etx;5(en#@-`yEbAU<*22kkxy16wm zpkZU{UQkR{Aok&2?7i~*UljjyXm$+dAVv&F#7#GiK`~g>z5m+6qWx6(A=%a3q1-$? zU`-~_G9_>svJFe`W^2|~-4LlgL8HeN=%K2ps`93RD77WST`w(PD~!m#UCt2A+@+rn z6u0fo>reg%CH!$eczb_oyn%p)@mlJtCLr)n5%40BU9FoPaiQ+G%VW#M(6jT-Y{iW+ zzyclaDijghg?%2&%Xjb;mLK3q#6RsI|2fxp62v?_nqVB#(zijO_LvwlKCOzL$jRUF zAQ&RnJ-5ETX9OIM${;TyhU5g`+-1D#zTLY`%Maz2gLo%w$<}n*@cojL)46=D2nz79 z)AUZLgYd9E{IZ{TlE^nNafsckUZEd@;Y&h$c)|JQXiIFap3LtuzQ&QGX#yAeM8KlP z80N1V`oHWwzC>4Kb;Qa<;9!j;u?OoTU!W;?9M)A0WeELJTLqE$S;HR;V!K7JQT3+( zROh^#RYxO|G_~&e#y59mz482<&)U5z3zqy`L*C*&I$ZnD`K$*#18yRW}eE=M}wp(CypI}?GKUv z4cpr>pb_Gv?~#xobGRsqWX9hFG^Tz6BceQ2|J1eP=(MxoMZ2ShYBZEnUH<#_AHo|N zHs212)sZvwkN>zPKR<9YQYL0%lEr1v90WyLuq+n5^VKVpD=O|F(`Ys8b$mgu7>4zo zjWsnT+}+ucVI(3tJ~_z-6{>d+4(QR;Ncgmf{oRDG_su&|S0L9oBy8UPi?#UUD}36R z#C%$#QlaP94><+~7*E1&*Oi|$`XRu6j{C1y#DZYP6Zd|sG87CdHs}NNObTA`=R*AM z3IX(3wI!w$B)OK?PhgiTZ~pGpKOdWz2%f-@N!48S<2PFqAm>qqDz zR#ZHpr$>5iUDPqe!jI!Kgf}p+{}cwop_797e;BPBUme179umEGfTe#D?Vw&KpF+jA<#FTXlOK!lgDbA-_a7gaf8dKpSoZ64e41O0fpPDnGd%zGk3RD? zv-`UXT~;W+Y6)81DO;<0PPsS^F6AcU~UPSqWEfImOE`qyKwecp*pJ3-f)wR6bU6 zMU!2x0lUDT>*arEHCq=gv7QDU<)10}caxep0d|+1&rH%>#;U}D$0W$$r-Zr%E#0eJ zSw+rH;0{$g;1zXE@xMRgKReE6DNV+&8^If(y?3#s2*Q7`&p!_MGZ}OS z!t&gl1~%qexahd{-Rqyab-{8?%)Ka5j`J4!tA+n}p1)p*>klf?%x`SfV1<(xS4k zxVZW`Z~jeg4&3tTM7iqep~fItz;J)x((2+H!pGkg#xr?XaQ+Mnd6n=Vulq-i2~g5elk+MZQIPJ36XSM_t7({;;fYlPbut74;jX&Ot959C%zrz% z%lMJM%x~rUbNOVhtj$5G*z;@6_1h8f1@i?Vg2rtZAPe>(!eVUh1#kBb@kMB*kwV6Px_tV&j@%4 zNqBj059KZ8R9QQ|w6GK_o+9%sh}>vu(uGVg<98hr5y+3=BrJjyn;AR z`c?gH?024R)Wi*PCSymm#0|89A?K$IYh5c(fIlIrRq~9_34>3cMHUl$POGx)_GD4Hss1#*6AKZ6%}7Vp&htN}{x<;* zbd)e9U&8n|2U#@U^0`!=LVD2CB&WJ$(2 zq&n1DSSZ%+oS-^gZlL2e?uIJ)Mc3J%`z@~a75PrCnMqION%KsF%$&cLD@Tsz&WPJ^ zB4e-rw$~iJ-_G++^F!I1UQxwzdOkMfpgK=J*+VUwQ8eI0dfL%Rd1DM=xwC-T`6y9y zL+i!*Lfrj4>qG7^letvfL|zM0fjCaOUi0(!)4lap?VE=;Holt$)EIyRd_FN2i~UZI z{8YJ5t_*h^vzvOb%T&J8S(U|yUlpIFsahyx&8A4=_q1nXLNa#NPJ#88@jRfc>ioL(JjHBuu^AjI@=o0sqUM-ot?_M zd>ne>Z6vQ@pI$S@C6#^Y-Kxj7V(Kp4sxA8+Iex`&svdn;N{RYNV7*nBx=nxDM!#Em zVwI~&Z^4TqEQ4&doHjyDqn@dSR^+=ZRYjl+p{X+gI(i0$kF)`#^Ri=LKtW{x(3hsq zh25!sD&eD`scC-nS&wy9se_%J0~P$JU*PeFh>L-adMWVTWQuxg1;4vIHk_!p0~9)6 z`E-vgI!(#MaYjgzF>MV9+OPTLEv}s-41NjJTg`T~V^FKEzu-!tBGRJDVd2N5ER~&h zR@i7Y7jflTfc=|P!I2Jq^<8>kXk|EqkKmB+TuhB0nww>M!fN z@)PuL6I*?wc359xCSB2?K%Jh}X)4+SnL-LBUslJbm+I##7hZ-+yF$t0XWdMu^FdS>yxO!|nambKt{2-g zw5v9ATZ_T39KT4FPpOhjF0`~!WXfesTQABBRY9K=rIfF(6`*EmoI*JrTel`%5IM8w zqca$XmIuU?Z7n~Dz&bw61qP}P+x~fN^hF~*i=n!BJ$+qc{!O6s5x>Cyq4V8pfH+UE z9q#h@_nHV7>bQ>e~*DX<4jNjDWYI}k=s#Tp|-J!HyR@!aidhsQV-DDak zeMDOGE46HPU+BA|E7>NYoZPGG zsVQ3x4+_*Fj6tw3L>|9wfbZTIKxUyyQN$&x}vBbXuOu?wDB;Qp7MEYv;L7@ z!?!dh0?J9|T&Ah+kDZ)PaphF{Vo;>`@1>b}DzMOOpik_z5n>F0$v`DFn#NIlkZtP3 z``~mV{b{e|RE8&H7+-AwcA1QHb})4&*?9VlX0#>isprM{TUWr z4ojn{RDCd^;Y4c5#H2>)Zb68AV()=hAMhapwsorky`K{f7rXw2PyL6L0SByOCmz^k zJ?9#&t*28Y3=BSHTdNP0lp=0zy&ut>%xB=?4K#&2dB5+hU}aT9=vYw(e=9dAjgyAB zL87kCw5&`8sgIZYje9r)r}lxtP?fL!VSetbZt*NtYog=h%9w)Qcx%28O5i%h zIfi`Y{MWltSnbN3951=q2ikiMmBrNu({xbPhVNFK9;&4c79hZZ%B5M=OitV{TTWsp zczDm>s59`?_phIfbf_uh*%#gNTmd%nFhZOp8=uP@iVOA8=a-X~TGqAaQa-VC!b&&$ zv??(&a3pbQd3g8(xHXkh+8&A9#;*(<7_2&Q4xsea13lcGP_88z6E-YeZzWcKSyvlj zHJ(S?b%@xvv`oG7eaG^J1LIOJ%KQCqt+4A2{_Ha>@=I5q72o&3Shm^d_>9=Wxyl4{ zSc%dHs+*>vZiL?_;lF!o$jrT~-)$9vi-VIx9~Zv1n>K2712U&Mo)5QoJ5zB3L-j&b zsaU|lUg@c?0umC^E~F&Y?$i+aWPgnz(`DMF%h~19(kEMSPs|o{CWXPSSbPcufEfHsb?VOEMV(QJ_Bn^0gBoNlI6qs3+2o&s3N*IzEcl=qfWh7PLI7HfPWtQFkhDZWg|s+D2?7oEwy){Bs*L4g*+LBApC_|? zQa;ysHP}DzerFBYSEg`#~(}1VYnxWqiwmQ`i(@A1_;OyvgUT*7o-9VqMXS z&AK@=<}=Oe{Dk+Yc?I~Uw*mtKVh+|TGp_{sncb-Zu$>4v?Kvi)Y8gT1G{F$4Q{7E* zhvWqG9nP-WckDKVfQnNm%gyJ}ES3czF35fU^hWKrj#Kr5<^w*nN1Jxz(__V)Pp|km zXD&M1U?BLTvbxoRc&y^Nr6QT;iIPmv&f>ilIFLk@XJ|Req;V@CC@01z-nq)n9+Ih_ zZ!uZ57k$u3Ka@F>W1_|oN%$3|ulMXt{Zv#G|%{b}T8lPO%3R)IH7~X*FeZUus>LnaP&D(0pu3Q-tAA zz1A7!c7&TS9jWa)rGMBsSR2i19kKC2BK#nwROzLXtw%Vl^vdiurR1| zEtmNIKD}XDCZ?%%)rk=eY+CGr#&g{6hH4i)hDkdmh3>do20-NgrD;jrE}A_V886+` znr!(8`3cUO$u=h8_vw40H=vxh`)RAz@?EfONXA%=$AM}v)JCY=`O{P+CIEtN0_XoA z3*>?LSkiT!s#pO{uruy&ZGJh(!!=3XV=RZ!?s^<$ZqTtC&pA%)8DmqF#y`fOUn<*DVXk%xN7-6qq*ozdlaIfoO20(7)UF7TY%_t%M#n# zboUaF_JLRSjL8;-(uftShJ5|_SGMmm{1WJ@-n<`b=wHkjI0(Mmi)OYx=cjqHF*eZR z$M(E9n15?v9mj>zjO+u~k)-1XAfi<-?Rxz0r$}568%vlN z;C=8+K)~4~!0zWSWsyO=L^GaIO1Kilitp9S-{;NfY5w-_r|HUP19$aoe~XBr)!<3% z{vTo*jSo1D^Yei&{9xVmxm^$=(Uyk6{ipN%pPfX1dROTG7_(m~HT_hu55*VkA8Wza zAOm9>{-(0JjnlBgs*QwiOI{OR^lzvBf4+57M`Y*bBO=<$j#MuHyZ?6EJRqb+$9m9w z#b5CM%lAB`to9;Q^zW|cHx4cAhMBjn#%iVWr}yliM|eF^D7Sma^I|{z*{Bgu*nV&l z6tpPfpZ@F*NlgErQ389Tw?4@!la>`iRoBdg2x1xo%!>K+Nd9+d1{e|hIO8Ib{tw*h zc@-yM8^*!clTp8?ux$B8%Iex!c}?+c5r@H+ZAmGC4F^n1NmN+x=Y)QDWuD9z54Y^c zEd*jahSnCdHh1n`6NhiX_r4iN{eWS3SGeNE+XjTm zxbZMBz(DM>x_<>E{AP0m61Z{Q?!)(Crs;2Mt%}mNr~gU4wwrg7>}(G!xbnCXN^ZhH z{CTBCuA{%dU&%Jac?nk@gOxwxlPdR$F-3mH`yRqgOn?{)PJ-E8AY}dGy?)A(-#z#n z4b1Ufz!qh@}zO;PfCkyf}<^9V)M|fdfm48v$EcrM$2jP3+U6`kYnJwXWeF+|8de z{HOCXa#};Gmy2O{v|!X`o^D&Ty`B7sK&1=o9}&htPB$3r#NJ88<)2FI8QPUo+C%O+ z%5Oy}h$k-(S(Ygld`(z^=d{jEX^|jgs6qT5Q6_E!`pojFsOa?qyFtK{Ewz5bAy+hG zmve=xWw!o<2GnQZIX5ryiUof>h*6yY(#Nd#`b!Jk1etPWpus$8e zDX99v=tE$A1YR8{Rnn75u1}MEieP+t!d|Suoj4Q>2LM(i$Z?$fF@!r{gq62{GSWR7 zpyz^^AJgz79JFOQ3$m%u(WA1wDr}-aBOu6fN4O2(VelW7>E0Pxu7CI7{PV`7LQK8Y z$Dp|zI(e(szUL8`K`rLDKLVN5Du4*bUcc1xxXWa3Nczt{uuF^5AulS%t8>a7H}kAll>hh55R zUUh2&r+m6j%Y^mmhF>?MRQ#=xlmO?vzCJ{!qZuc|$trOfu34+Yd-v{TH#Cp|Xr0V- zT{pqzN1Ya)I9?aw{nao9ADl1(9%n^(^^Y$i=rz0*Lns%9GG*T7S|v%+15BjuekTKG zmrRvSUdqRpKI0;wC#Cyco--I&HRdLP6>om zVcVCmPB~>$_IB!7bVPl6#q@ycEXy%2T#8Go@=Ha0 z@5L>kbX^+fhtsPV8}S~k=vg)UX%tT+{O%}^CG7SVD{;DHC9=vU&Xllu{p&gCJ3st( z5}?b5a@aJNdNjV&9+nMGRGxP+rc0fR7Sxt~I@@zPEFFyH@G7trn|1>2rQn?m;6bw= zO3v6+qY=EZl9dnO#8pfwwZ5>uQ_S?dGkk0HTVwHE?y71L|C;->8u}p;ZgLs)S1m_V!r1xfhoZ z1MKqG%is7nMk^&v#!BRlcm2;1>jKiHhq4teBhN%!of{*ju@+OFoBh}MUkfzhXAOu0 zhpQX|6UVa@()SAl_U>m*JK3g9Ia+2-OsbdKq8YH8|G)xF)9xM~R<4S1?GKipoUBRG zXiZ61+xP{y%o?s?Vqz97FDoCP*lSqFCJ%y{$UNT2-C&`klO4*geZ_jQ)5gC##l(&dTYqre>n>Fae5$|*W zU%RguU~~k#YYLKv_IgfKqwWKPW5gS7tt%c3X5Fs;ry&K@G)OGENwV`8=bl+ew%WAh z)Kt4P>DCx}&gdxY-nr{*9EBs7cPEjNup5(jIm~*q3I%S}G)mSXJ`%&Gl_6BK*pBQy zvDGE@!eKd;SpvdJDs(FN#B^QRA&nM~o#{05ak7k6d<;iyLjt6S;}cmIC#z|tl5l4g zva(|ghisHArax86ZI-_fQ&Py837<)CYR(<2XHZ<~RbAMiuq1l#o}o8<5XADJt*k@R z^#XKvdF;$JT9yd1soKZKIVa&-KYfZcad!`(p?oazvLjppKxP%njQOGuP7gQaUdVg! z#2&acH8&e?1=vySv^(HdW{VA^`@ehs=!YEp2b*s9NupBkSJm2AF6qVAtIDE25nQH5 zAx!!SDs9`_<(4Hr@!Wbw-O=X&0G}St-~vo%=L_Qu{T^h=0?I$!K=JzB!PdpAiu>NzKWMW)ds9K9NEbwLn95&l$&0Trceu`x(%1Kdd-pSp=bMP-w391dcKvgB~F(Z1=P60#Ed ze=Q_Y_C$RLMkx+TQ@)+dka$s+xW3L?YvIe`$AZUuyQX%s`yBr;C&y@b7Y6|~mTfC$ zZ;2u1P<(F@+vE^9L}c(Ba+C^kX%r!y=J>6L1^vSnOMab4c7qib9K~;4_^W{sEWy;~ z{vmxDSm^m&HC2n~1QMP~pgxd}*HmONsveg*@IvT`u~c!o)?it; z!6|;ye$S-i;8x=-jST5Iv6Gej+eJr5nVd`ChnH^TD<3N=muINNvmMOLxLLMy1$x?| z%u2W9pY{ewod(ba1u1r)F9#1_U?x>m%v<>K#B0*8a`s7cZ_!bXUc4Vh$YZ8eOs!E@S0Ajh#@{E}PNb2~)OT20 z2x3^?P7=g>Tv1uM(pga7DMv+lef;}r+5LJ24)bt~e3vb_mxiAM&0d_Lnw=iVWXn>V zZ&zyf)n6{DethA1mn}cF)F`1?x1FvlX7C{^t0qHzjk11m*)zEia(5aRP-hd~EKP7T3%uAQ9r;@}c3!SVvCg0mH zsCO3Pwtw__xae-Bmzi`wZF6=ksrMK!m11 z#qtT6$XRwA=SCZwz^C%5v1bG6lfeBpZ8Z|h#K-%ptJ@yZ|4y~eF+)R5OUn!e@5|R) z<$738dF*zR)1Mxz-NZ3|W>s>2jK#2-#_yg#QejaZeJH+4%LZ*fci3Kk_E9c`s^H0- zQP)7Su!3ZQb9wY5v5U#q9mjsBhU=qg(VTjDI&--;5;?MIB~uPt^pme8bojXUUvjy@}}QLxQ%>~%(t_xO=7FwrTpZ;Bz{Kq^~cLl zcf8wlPa_Kj>M!ePyYGe3gnhv`gY?xNvUZv~lRdl~l$%});9WnhuC6p}`oszOgt-xB zrS<9n3Ey%NHtKNdYccM|;_e=8x{}sLz9^AN%)&#La8&vTU{Ne#1aAJ=OHeQeKp7Pi zB?eUJ40)$T4Ah2AOsGFPL2+R*2s%K5SnAEO{6F^IIxfm?Z5x&nK@k-J0RfdRQ7LH@ zr5gchl8=rml+K}JU}oOMeQ)FW-u>+TJm2s8{@eeJz+7I} zy4E`5IM4Gq_K$YVhL5^RHX1`#%W~zjEX&zgrI5-OB2|yXDhl01NyC!n?9Yy>8>mKV zHVXZCagqLYd+G6PZ_Gf%d2UmXkm3Snxge|7k;{0{)q4jpwmJpCt%t5ee6jV-nSiiuJ+PIt(`Sp3U>a_I4dq(4Uz>1+=ModkaSmx#7eVV> z@-tt*wy+()R^+^RJO2@peX*{9NH+`F$>FS<=-$vnXP@T6$DIXc^NyKs(Q#a#7eob3 zWGEHizjK4N=n2-Qf$otT#;~B8t$dsoQ;~OlRo1>cz3!8vkMIa{d}SCL?d7ELm(V8- z-FF&i+$LN#TsOwaZ4q92cgq}e+Rb+e3*3=X%NT{Wc1-lydvr3j#mCasgmj^Vyh83J zrcJg_zKHu_q6D!gy@Y^9uX2YbjlZw{zy|PFe_xxOpD)>8w+3$X5~BH1H1kBpXazjs zz)lJWl|}eEswYFcqJ!OFT%AVXdYPz-1Hv|}(qTq&nR>1@1cZ2!DS|(NnSh6<;}70A zD79=ZGd{1(F`t*rsrTsa2)vk!=VlV`wrvnNNcw8zGT^%WLBrVKTz~%T8|6ZpnfmqFd@EjEneOYI$W@xfl?HyvUjh%xE2-}oEuWKRjx6jtRFDRnuo!B zy;H19`z+je4W`P&&1XN)Hrt5he~i$8!zz@CL^offg|?3o?Fl$2Dtg61aSm*?ZJBYa zlGWsgY@fH3@O@j|Dxmd_5ck)I*M%8SM;?+s1!hU%Sw;qG#`~Ck=V07B?Wn1+uyCUh zY6uy;EB-IfD}W@f$WWy#(|R22&^Qj?rR4UMOA%uByXRsytM6KRmERfjSo@;71X_`k zjTucx3p1QM*Kl_2Cd@3Rns`9e7b=GN7y)?E*h9W)fMl0|fOfc<)Q@fU0X{=z%H&Cc z2A()XL_xnEk~NsyJX~gM+nwMHV}Bx3!fPM+=6E;3{OlgM#mFX%SGu5k2M3!3a(z9& zt<9owTaJ{3dPtK=-Ycp5#Y;&(&+Rm`dB0gXZUqsePqzl-ci#o0DE1y{f5Iu7gKP^L4>Q^o~))Nxy z)TBO7U0tsc;1jR9(cbyqXtaF7Ybk^KDj8XBnjZw5ZHBo82!9L6o=$VIjrmr3!hCCZ zopuvTH1z_%guTeu=lItbASi!*8Axlm`U>2>DV(<6NR0Ejh(i>rqZ`9*5PFY(H|*}_*1E9C;hpu#kL5V%@Oc0HV58r1!EdeX6$*3Qz0l0TBer1; z`S7Uh`5OYdgALKb#Wbuo%?OKFy3x4q^2AP|7X844d-vTb0d_16=|&7!()-mrbgYR& zu2sxv>`Zn74-$?(sA=1$63oQI@iNc5$NY1bssw`U(3~3(m$57AdMwYw<=ZDj%gVi; zzSqbMO09W?2!rSNQKxT{M~U~?4182U*NY9cq+x7C&NmtjpCxP3>p<2vws?lccvD@y*(BY4l7flqm+U@f3c0>H$_MK=yt9BJXF_GPC zsCYl&-uI$5es}fY%>&<%Z;$Q$o@i+WKKwX@oHc(_osT0J`LZl#7T)_ikk_t5;bKi<|k8l9F;AZYQ1Gc>c3%`@h3$ zud}PD;O+WhYL(rmL|ezLjiz@%mL&Ko8NaU(Zc}W~Xecl>t}^A9#_yl1{oZ||F0*dE zn#wG)IP^uHlu?y=?wMX+w_WJ{U z87b={@2;i`7yy`w0<}AMJXmy>nR(LZlF{mj@q*s*K|~cnTo@q^1=m=%J#3;#DaFci zbK0anE8xeEBC9^57d@$sdCKCnMrYU54FpO0Xok_S6Q_!iV&S5ro#W1_DVFZBXfe@r zr4%OsfDzOm(0k5tpbuyLRuDC@X5Hnaa2k@ZU>*Z++XTD1{1U1w2fGR^WID z5T)spBxQn#E4+8854uo><-IVw$76oGP%`+STlvPFk|7)vht`MT(wRrEUOl#${#1?H zZ1x@~`s^{29S<2U(zfnGQq4wlMLk3<^Y>&@f?C66-of8E(PMzA}E0P*B>39}9hdi0{;4J%)IugLZ#z>cKD$n{M4M|Hu?{f_+Mzp*`y3Cr$D4^9^MMX}sW=_?!)HCC&|@vTf))4TIJsn;DeDIZ=b zoG10@Qg$Ct+UGt#+7n$UI)nZeLfL)BcVM_qQMd7MpwkTz8W90{lD4*9E_+`}?}gUC z_@nmwXBzes_DE&}2=r9GF>QT5C`i~tY#YR!o@gPwt+nV!zGAf=^yEeK-*31@5f%b?UOzyO{4Bf#1V=`NYuH8E;V&SSOEU1Br$AieF)ISQ2=@N7Bg2WP&G&UWz z;Y~q*?7 z;k3Dl+}OW>&$z~dMNzLV@TY{kT|D}{kcP&`N_e~4waQ^~GmT!|yuHZpW$^WV_bK1B z<%22re7E&8w&RB{^j{rywM;e~y|-&{tQ$4iU&E{n=6Y(zJ+F5Biq->wT}!SkKE1cT zm{|L_ji^hZgv{(Q^wyf@*prmt-iOM%;QRN+@36Wb)LT8wuZ0NRTgCc zq*~u%juxrv)5zFb6;VSMW~;z4;`-(phQ@BI|J@9PU_rQB8^_D|rLyQ&9w3

V*jKi%g)P0IZ72-MCl@lcPPYv(qI zx_-~Fap9fUUb5$~#vHSR{RK=1M<=iC*i->XudD6pEh*(v#C)q-!u1(JGGF0TTT|nU z!<*@6hJ!Neo*ao}Smbk>;{wG2-5_s4hv^2LD!C$FP`$aPQS|=gco=Q~)SPoM*$dra zeQOofcQoop+NEb!Ff=&+g2enH6$b;;WZim&HMyC1Gb} zweSeC=6wMWoTdVh05xtt6lg?QZNEI1kO2g8WmiXN`_+}Xo;-O{1P0f$M=RC~8->9< z4mo+QsCUm&q!OyG;qeR3+Q z!j>?W<&z_McCC`Ypo@(U@Cd7;(hL_y@r^~K$9uuF(Nvx)6T#Ur@zBVDLfPncfO7U1 zLML-54Lo#& zp_CfoQkv%iA=lGV16rY>k%4Sw6BN%)%O>p#RH6M;k+?-)*DdX8CKQ!hoMHfl$ezAvn+7++q$hnBwduQ)^3mN!fFXyAZ zja}Ack{@z^WxKgGxH+K5hPO**cz$?+B+`r~^DQP`T7v^(0tfmkCp_qnac2y;?~{im$rPqDADB( zQwl(vel2rp>8tW#i6)oeNwU|CS@$L>Blt%M=7?y*!`N0gIz2tJZf_)_%Lt1>L-TA# zf84_z87_?AkpvKOn1+NubgbG}8GxzPt5Z4M-edI#l?I;K!3I8CL7ZtPV^zV!nXRp6 z$6+*vY7=#qAze%))lTy`6OOH%JDo*^ z7pi6wSMP(3DvY$rIohFbb4jMLJ5X_xCt9eN-qd4$|JpF~F9 zbu$(MTlDhf%*P>aY^+PK-H>9%F9~~B$7|)yTrn|PD`$Ka6dJtt60`SKNfC)u2TxnW z`G&LQY4_GfCBB8^7MI%b8dO>Y(|PZ508|T7&=4PAuTxWEx4+(>8IZ_d{JtccVPo+H znBg5`d01(&6@HOcDEF>$o%Pq(Nj4PW-(h#(ZxNBU9E5EVU2f@3^F8ia+ReZ7twno% zTpc{Jm}zI|J&wWCya-@_)l`$nmfm1F^43IKb;2cNlXoiS#Q8wOkC}=@DXqx@bJEcx z$)b(98T~Q{RQLU*D1TKS0zyH&FDx#Boi~r)+Yz9w@TGBU$atShqdhQ!1S$0{0@`92 zGkDQ7@Q1H2SDnXQH1RGp>|B4zdcRrgbFq>{tnbkU_)cGAF1aJq7PlGLxc?|MntRiO zx;1Jp#A$S6vFIY$nKafHFX(0Vilt<3Zidn)=IInCrqT-O0i_wOGl1}*NZnlT*M0qT z8Gi=c0WWU7D=$9*1+{$j*2JTxO5i>%kN0Q2*2-4*CcWJgS4V4*$vQQV){~uq?3cqC zl3_0J1XcgZb5dG)clZ(B$A`0!l_^+uaoXkNV)aF?isy$cY2#3!bg=}5?XQ;9n;|r^ z>4n|$bCeD9GbHVYlI$mQ{^8o3lRtf01ffC;j6tcfZOU(|@OUH1y2)pAx{zJHrD;>Q zV_&x>Rb{b<{yn!TpG4Y|J(z0dTjC4XbG&H1JdgsC_8gs3Bmd&{_Gn5zkolsIk*f8W7QFKS8Gn+*KU`@)1zx`A0O;Nb zTS4159VKZ~iO&PBpwmP*3CmOPiUw@U#w{JaWDNje4ZI*oa@jQ6X6dFuQe_+h*b@pj zShYKT^diO?^scJe!_c`9tZ&5n&4{dnu(ufX+(}!l{z=t?ohs^d_aO|tNU-R^%cg;R z^?grVcLA;8BJy+^yFBpv&fn{0H1(hKc%SKW^fiq|>-fpJOvP+D=*sqd9`7k&BwN1o zayfud$GRo-xP0&%n=RmHdKLTN7e@4fL&AUZ2nY38Txw}&^jUS%TG`F)Ib9Lky@Mh5 zX9-S#dHUVqW&bH(Pwgp4`;5H=jziT9mKopEx~LS zih;IoM-5}O+SN)d?M_PX6w<-OYoj0?XMnC>nb*;%bG6vI5QTdEMFTDL!q%pX>gxWo z_$Xw!N6BJ(4P*)?g~J8<^==kGh+D|P2nLQ~9-ySIpNQe2Nq5gthy}$b>6y#v?q33U z82_cGmk7AR`WgonA*PerpXnD1^K-2D09Z>4TOtfIYNjv~Mb}}jtLG|keY~{H$d@`} zX^b`B_-(f zXje@Ib$Xi69)08f*cEb)h5TriskXk6YtDNsk}Jd3If4+CzVlXnH)=g})%-CFNi-|R zNctX^2vA(R%i$}2;^6JO`B~o#0dUyiCiIgE6wI;dU=s0gqHb8d!i-#_(TmN~+xz`G zYBvEo-{~&ID}ili3823F!`%6#Km)zE*=*ZY3RxdB)oAi@bQaaEz78i5P~bsiQKl=` z&gj=Gb>@xGU-Nh>AGcPj(F8>WJ_rM*Indu6I6jE?O&{GEN349JxO-^f!&h2e!R#Oo z3xjiL`ep%jk z2@uNe{OTor1z52fckp%t3P0>(zMiiZHeb^<1?qnQPN?uA@jXdaqE$Yzfaz3;@|yWr z9>tZCe(F7*i;BbewDX&QU3oGZUwPO%cOV6uhf)*|Aa#|_{_F(^8mqB?(@&EbUDyT2 zCvq>sr$%c6={)Yw%He5G=o-|V(q62Hers*D2FSh6;V>nrM8~YJ&u}+nxviJf_$vB~ zL6K({{&f+o?K}3WQu$5M^O*aLqSEW>3HKVDE5d}{lh<9 zX0Ypje|O@ZNe8Dy8gGj~j_q#0^ufY=ul^4y)OU=Us5ZqNrpc@9u}%AHg2nTl!QTr4 zTfcrS)Tweb+jQ$q)!O}XUOeH!O3@Q@<;K!??>GuHR!P8EuEqA8@ZOMmJ8V#4V!sLw z863*@OW+1M6?Vd1m(QXv-Ym@)A(gLFrA9TzNYEW7anzqVJGU6Gm(Iy10a}DC*S~K$ z1sPxCC+7%NwMo{ua;wG(~=2Xb$&OcE1&8N{+5WebwcsuQqlUMpOB z{}C9+RcM+O9c@~cRa|K#!u!rBRxZ9OPtP+%?l`yDEzNMszK6Ka z@T0?n)(AolPELhq&ob(c7B<|5=fluv&zAHPB9+T*eLsB)9y;Ft8sX1-dfAX~9|#N~ z471qUe+CKs@w1O{1Rzl=9=6^$uY09LyK1o&)w*MwbTDy{0DM=x_Pp=rba>rzPLT$t zhf@MOAP=(|DdGd=jfN2@ed@F9y``o5I|rW)4D6OS#Bbe_8ZLyDfNmPDr<>_#FP`Wz z$x{H$BxuKTS&Y6746;*YGpfYnA#62tt3n4z7lcDhVFh*7$z-^yy`RA*=x? zalZEP8KAJ>`O*iX;lcjQn`ZFSDAXU)SZALYu>Xb(*$0{%+BV0LAB{^iOU4VUZLOC8 znt-U1C+?$}LHS{QWvi@WW!;laJKYKmVV(=#07wWOtIW5;cn)NV^Ev)-0_|^dpan>- z270*(s^gk`iy@PiB3C9=yE+05|005_% zeH5M3yXJj+dFH}w>bdhtHUchc9u0rtDEDX6_+%d({T-Zs4dx2vs5u7kVKo?Ihc@}@ zT8WnX3E0qYLuoak+r%lK1!D$zI zNoU7ukQzXz-m(Ad=B7+Np=Pd*vP7C^;jUQpk*d$&qPuhqB@yyb`uY5!HbNK>O zdo_ru?QfBZ!VE(cJnYe4aXi4hDz%+Och;N} zZhxDYko@+Rsl@Nw+xbMa?2lGFYIs(=`jbqvAFbpi=k+r4*}iPmjF0I@kjO!(r!016 zcJp-_SLk)Ilm^uK{I*fz1zYv@O(O?We%9W8b9r~IM(?5hINQ0!7asqF+5R|Fe?-)P zssR4bAJa--v-*#E$ll8E(<#U?6l}%}aZD+xZSJJE*|W)0=el4=pKx>Gv@vFD);mXD zz-s7ucM@e*l3hVzuQ>e;ftpH!jwoD=)c51BZ&W5+wVZwTwf*Y17BCTw$7do6ED-lG z5?8#CHyY)n6aCK-%cx$BxwS{Gaygb+qwcMXip26Q3YANjE|X6Fuoo)EIx@ z#hh$fF{;SlzF}5z)a!cGNZ-!J150prtMKkkUiOo(vD;zt-mi2zIV9+hE#Whvzj6ir zxE^Gl;+B&>LF0|y$_5@8p zQ64|Za!^w~g6@1*FN=bQs!kOkpoMj9s6o z6=T;tk#O4(xqNfnZrvNTYnrd`9py>_sDPkkn9$x z^=W;f*%QfTMT?wdf z0huO1?KugEO(IP@h2#=>+rNy4Qtbpc984-#rwN8fSi|jP^VJyUhVnxW(jc4VjLMiB z^v3HSJ+|C-hC-z2+PTtv1{AgzNz+u{xaE2p5#sLBNz7gP-C;qBk9$`Fse~OPZ1%mJ zF7CY$?t*mC0i=D_g$Cil53?-V8KYPoZ;dw1*IgI(LG0H{)63r!MTx)1zXO@`JX(z) z#5swIV2r*uAC*ALQEgw#Mg!N6{`%%IFRMNI@tH=<8mzd+#0WY#^Kd_0le$2>bTLP- z>AKsE)-#QCFvA+2a`VNPu^WTg=c7I_Ma!W=AD9uG(CXTMznFAaz0`+Sv?7r4&EbQW z!sxj$+cmu1jq5PAdG>A1npCsFHr!o6sU8J4^nMKZC@rdosn(CS=i{$1ffAXpyUA)b zmy;0|f?(q@@8L9VRgRgSCM=Y(nQGFQq~-lot@z{X8z$!k&L|dTy5-5iJqE%FOG3o2 zLcs4O`t#EzE!=$oxKg;E(+WfN267pZ!#VnZypt|5IW;FGx7bscwPsQp*?PAKsSXP5 z_gd~JYZX-k*_|TJ7fAYREL4@=l6P7)DG$ZYONXH(Tf)&3J_m?QX)dZe=|K9Y`+b>a zog0a8c-$MZB-ze5(`H-mC6&AwPQkAI3aK!-ByTr}T6<8h4{7O1|+btk^c zwi(s?@Zg6y<~vtwcuIV=Ek|c4Rbe-OKT#i$rqf>;O5jA)(DM|ID<^+{(h=Kf8JRcP-95W; zr;4biIPD69DAZmELZm09{uTjogLx0UZZ8w4hVZdHVXZhqzafjC>*R)a9wV_PFZSxf zSnzg1Zu?RpaW~hb?VTL6lFjqfE9uYN^l2fpEtvylvm&F7$610tbX}%+Bbbcaf38Cr6R^Q&Dw=u`Cj)m@ECgYYT<=*aoh-hNS;j^Slo1?8SRJ{*6^n5gZK9;g|B z2OXOccxg;E?Zx@rnM>BYPhTAc2%?Hi)RqT?Q%u?pjmZ6dz3tlja7qpolWxnTdNF5r zC6QB1j{LZmF5st747)iS92|6hTNgpe`u>p@Ea`z)qo+>K6g1P=-=Jo={S3p@`B>k2 z`oN`>DkkSRqvauv#;aU(j@3?j2EmiWeb!0)>j;LIQ?!g)P_g%G(?!K~R*ga16=oz@ znJPq$eP?}DRy@j{ODcTSY(5Znzxb|5n_h%`Vl4OJtGngaaM_*S8q23r) zgBmzi=(ww>d_~F?pF#WMC?~~^nRn}$3-`ow2-vpv(YqlHHC1e%>CB_{7-_dZDFk3P$5B5Kh$ zc+aBmxjHG>XlL_j_>*r_-VXE?D4wk{`pjAG4VSI;bu+g;`YH~OV|-%8W>*~)1`+xrN< z3F<;es&qJUOC`Dv<=q*_(>;g7VvA z$A=DCxj!rN&o>8&1R%NGz&`;V12#;LkQ+l5NR%AReESF2T8 zXx5;@ND=myrtooojM4Gp(|fJ&+^_8%C^La#%bs** zV~mU@Kq#j)Fwlnyao4*~#t7Fmn_ahN)4!e2a3ZBMT749xKt;d*`2_{ILv(G)3z$#4 z>uA+%PBLjD$0$8<6z;3&w&fQfGI+aQp}GRdcG4;R*lcnA*l3n7Sk;`u zo_J`of=;y}Q+xEW;WQ!%$E!*ZD2$wxE(vQl*(8jAyu6( zl4=D{l#W)RJxT)W2=_A3k*W9a1zG2$?@LD8u{M@7k47afkJj$Q_@MWsMk^L8LHMO5 z7vIA+TtNP=BX*~)+P2mBdOF;BwO|q1co1^((Zl+qgzH!(Tb5#yG}tAHw^_))6E4-e z7T$hIhy_C^MHeWOx%KWB0!!2E>hto)`_A*d^TP8Z>x9p?aS6)X4!6P>O*<}uXW7aLxsJDss#r%T zx0ODVzx6#^Hd>F)mp3hxYEWW(PL@6+gzu5K;}5w65zklG#e919y-C(rYZ2AHPbPgT ztg<8-s%Je%%SC}xbO$TX=1fNd=}kejq|C57(xyjr@A-hZsvl5oaKm5G6QGI&B=;Gs*yDZos(QH(raxa6DDZcCrVE*siQmF*RL$l0Y*qN_j|$&Ausi zWWJ=Xv3h1Lp;UaqdH!P5RW>v?FH#91+Sr{U>^?VMTj8|dWhooQn-9c{TTLw{hifGz zT}@wgMzN`z^=mA#{TaLblTzqcX&exv6{3!tiFYTfhc%i==`O^AFgNLOB?wfg=ZOy{ zOVn*5K!&-iVYm_Ri8w*O{t8jKmt8$>!UmdJ)*w`BCgC`55;J5)ww&p42r zq}_Lvby`$n0m>$!PS0PsURjUl(hZldw#lEL39k5fVMDrJO?3?j%-bFvOc{STE5z)y zpg>uyuJ_SF@#6LSEMWW~@&y4;lAAOim85d|vyJB9-XtvWoN-Z16@vPXlNzQXcT;;o z0FtY07&%@$D8D_2*>gG06S~R2Uel5<_9orvv%*3h(Zzj*k%ti}qTYTLz-MZ#ts~sF z=S+8of;;2wWg>4SFiBs8Q$^+}G#q*3?oJNPfB(5+61bw(dqMDig^UMDomBA%lFbq~k9-4%_ltAP)^ZO~X%348?7qVawMBT4GQGG{b2m8FM>D9&=JbmIEmM znE>H^A&7`^fofiEEi=cHw)tI!V%^>tARwd_#dJQ>7J-tzhxr?wgKaHl7xrXA;Z^%MSI_YjvRp7Tz>I*!U+~3cZnuyu!I z`cF3hU!ne?!E~?qfm|kKPG`4n4HFR08K`&XV$-@Ef0gsSYEN?QEzr9cLn?MCxxXRG z^!dRX(}kk=EQ7`mO;A?zM>hh_C4-%AHFWqioQ@>+-SpRivcYJzOsNNKnw*8>A04Hu zZG~^nwqn`*&{c@nF6%mylfE@}pO!AAlfEy>y7_az`1=7~h=T~o=3_3G*dt_9?X4Yi z!zkdUYDG+CWNumR=m_Pb6`Ty8ggF131m*NO4>LZ2KB~Fm8j7~jig^8z+twqz+snCJ z*;d0okngw!ff7bUOVKwCAfDCWwRM>`M==w{9R!PTfWEeic`an=qU=}C$Rr)UtJdJ*2G%bLKzqhfeAevZg?z&gZa5HZ zjMsATm}3~kF&`BHp3Oi=njAS~srG(V6>Hu9p-5y~rpF)eh6KHXzTC^7_)5S>Fkwtz zgTXlL*SkNy!m=NnsPaD{DfXl2+q6@;WO(P;nO?Ofkm;RDi7eQC)_Q}#_{%@7g65e< zrLO-CJ377Kq{@-w5 zhlBoy*2^NfzmPFp4PeL8_c_=9P(Ao>SL3f=+`ydfP^_=aZ+3my6;Qv{ns{LPzgSo$ z|1+Y^f0MtRF5st3@OYvf?VtY_mw0*lmDlYVzrXSbyj|ko|NM8K{_}SKuNC^w+x@S* z|37E%sms4bUW&WB6`SuH!m(AgLR2!T4wFlm9#eSvKMgsa0|_`W zH^|y=f}ClfOp|(Zh64okO;HOXzk`T>^+{qH1j?kzO<7Do|H|L++*2GBCujqG?3?#5 zpYW1;fI5qE5F!>S{+zuX^78_x0doXc9_;dYt6!KUejp%5@L!Um{#Pl{t%v@p&;Cd( zB&GpbHEDxb_T!(|mPkGgh}0oUzclLpNjyvk5>4a-g^Rz%O8ouPPV<52@O0{5S4uy9 z@R-yr+Mhr9{l0#E0t9wI{oxA!V}Jg?vdg zBh%BV`^dA*`~~aAhW;|BLEh zLEM!OfaD3z=(#;dRJN>yQ1DO9_;*H~arX>*xKL|uH(%Xp>x*Zt*It1h45Gm+Vejin zmz&bu{)e01BntqQ3)v{fge-*wxq&QsmV|p|Q8D>yN3!eVg)PGzT5~6_b`~13^YZas z-dGv|yN@+Ix;Li@ab=^KV@=xCWD17znH0xdqHfy?$wuDGrXT7;vmfrbUOU$m&utjT z1Vgj?LAAL+T`-m*IHD~(gmMl%{D%7#-BXnmB87C1_Nbh1h-#ZRa4u3};x6}$;sPdoWm(TS?vl!HH(-vkWUZ&sT)dMi>@B5~CdOpm zn|wt-A^L?x`ne_c3MwAc$gYG6nV`^wlRE9KEum25CSi9BOPa5c{z7kxQbp`E6X8+p8q~5WLSb1l334vq1oidRTQ{0TTb9CsmYb zu_u)eCLhK4#-uk=-AN=SWc5xE9&;1Q$~l;XigPem@y)Z&QIgZ=yzvfiR}-0|6s?n3 zW5LCxQx%si=*;w52}nWQs7sLgEs?``=M=~-YPP)n(+W-71_WMB7k6FBx;_q5zHK04 z|CGn1i!6Hw^Qof zPIq@WZE_k?(SNd6`1Q(PY>j`1S(os@ySyu5{Cj`}2aZd0Qv%A-iyW;Q3|w?I?HaG$ zN&MvVvCl{49loW%7;76vUQO{szqE+hrP`?9dFz!riKl^>2mYAJMsKHs4gJwt<0S2M zeU^L*&Zm#=nf6>^c~bXdZ&I|rTRpejto!yi`M8VTAJ_RK3U2rid6T&op*ey>60*is3X~5d}qPeaYlM zIb97w7=IAUyZEcLa^DO=B~*ZHK!hlBDj>^p35ZsYP^^t8GVr%)~j~KRs_V z#Ef6hH14}qttfYRO9sh-Y6Wij4pASWNzDN3FGCT44;ZP$0!Ys!{#2Pf=b>|>4{ukv z+BVX-HC$$UjxF0LXDqf->S zBv53?0O%v5l54l+B7;Lvm9^e($f%cu`*JJ1mRS$Ku`2}a<%_ZGP~tRFz>D>n;%^(o zh#&K+&v_{8Ycv9)W1LD zE8)fw_8SP6nFatW+2NV=qn}xje=Y&8poHJTF576GY4mulvo!Hl)>~}q+4pXGl5`SU zdlIO}|Rq_uP}9s)uXZxZArsWVgT9Gj-Fuy-oc5VzSb-J(Oz&u8 z{3uOCJvUAn6%~DYK8R#*+$3C5&z0pOV^)l2kI*3bcpJF1KMNGVfPtCd3%^yhaUzKE zo%|;;{VTv~UDWjj=LLUJawB;0<%<`LjvpS~3S1NwD!Kn#OGo0%)xJyU9&B}IKSS_b zMzB+R^EbIe{D-}sbDIy^6L=!7vgvfOEY5+OulUqMiO@~MaQ;XYG3ju(1o_; zm3`WzsOd&B6z%z4^^{`z)Thp!X>@y9h^>lrpY#?t*w;6SW&XeJ9I(z``)#5&izL#| zE_>h;&f~5WM^)DW7IC6`|!+D-_f9X=kv{*3sZHd1_~0m`tLmj{=0o#EvR1P za3O}b&@i6EP*yEVf$I3BloP(_l=v#VqdN(n?G$TeAfBd0)BO4J=DSZXSMMama2WQl z!XH1Zzv#Q}eY9=Lrd#kHlpo=e41vvm8K#YbCiDL#SR^< z;`KiyUjyz0gwc$2)K-|!&wOB-%Sf(m=x|zFx;$Fu8)DM-UX_kbx4&@l$Mm_cEI6w| zoH|8Wa~`a@T7{{Xppu3tNX@kF@|LDKbpIydX@ks_0*QyDebz8i+PgZ5P2V}wgHGXy z6(#CFv=IF-gVmS#WA*b+SHH$)k~LQ;<=xt-v)n?_C?l)mb6pPut@N5NJ4GVM{+X|T zmNV|)24AFmH&O4_CMoFL+59-fTVozZiTavs%zW>MUOc={gyWU0gdDmUV*UXg

E46YQYs^I;Xs!9NL zI@Wtnt`@kdtE`8mS#l|3MfZWf2xgRhuK58@zD(5X?ZWDK=a&V;!MDMs>&PgWnJ>>}471SE~OV zn)ooKBft3>ZdZ76V;+T#83-4rN>3fHPMzVi8lGoUH;7*Nw5965+E?GWz+q_nc;=*b z9)#|bwwI*;+Lfm=UXfove=fJ~4fvnF>}%;->De%}Nis-AG}B0l?sD|4V$)bvZ@u87 zqka5ej?in|5%29p<>4Ln2x!mju#|%Xegorl;cBzi;?@=QP}0>xH_EOgo)%Y=zsRry z+CVB0e6%L$z?dFH{69J$KzC0>?IxI&Vthl?ze@bR4!5zUW6D}HNfN4Ep;PCwzEC{q z@aN3yiNPnW&uWG#t;o;$H*l5keO!GHS41!)?M+sItj}-Na|N8p07l?oiv)n<9mvb! zI`>u2XgO+X?#H+D3uj&tlgEL6No$H>9iVQ-W+ z)tx2%h?Ee#f5d_kMgZv~?Sf=${PfHc$wZ8R^`%6_9){}+a+ro6qIPS9+x_23ztHN! z9$|n@h9*b6{I!wK-7w-#4>;1`&}uKxKp7rxHdLhqEfqe=za{FmzbljqeUaTH6aoKy z9|zaq(&q5qOyQ140IO#3u0}l?pysvK0{vJWUTaQkc+s0!HsRCRz$OZiaxp9UPr?}@ zc5Y6mo%U`=X12-=w-$O9eE_9esrG=s{g6!QX<%UkAN(cP`$_mTWQTI}5e5g!sHnX2 z;E+JBg#3^=-u^7i5Se*;+p6cUi$nB4lEeXeS?*#_v-g0Hm+k|hp1`0Wxe>sy>b%h9 zs7jE#cwN*gCX`N;U1x8(KQmYBh_HTJT`7;aQ5Rtquz&Fm&lP zVh0ZUCSrc8Jg3|1-2Ky~B_pcrEma2UwIziWCfyy=(x0ICap7QX54-!@V2ZaSEw7<9`Ia}sr1U+8HACusC?r4n??0-29uojdY2 zNbI(^sRTuEo5;U6cvhT-ZBFwY^N}jmXB!TXQ})!V)i&Bw{LFM|-E}qbZ0aqh9B#-- zE^t*IGS52KRpvKjEa8I`t8zQsB3Pe5p(VK;9k1QBXPVjYA6BL)drHnr*51neEq|Zg zgzj}j&3uG7Bp$fB&J0OHCJ>XTm8v{ody-fBBv0k$z?S3T<}>XIv#^9|#79SCyIL9Sbsg-xVhAehWND3eA2a-&*Mjr?pRv|#YvTtIlFh~1p!469M4SJDj7Ac<4 zi<(-}3y;u(-bQ)8ikt6@lV7T>=pUW1(aqH40@+E}5jOtzks`>#uC5z@&7~b{Iw@!v zXnM(cv`$Gi1cSB~U{rdN)iRzxu60MIa_W`d737@k z_F|8epWd-{bc0Sx?kZd$ZPsq=qw2+RtanQx%hk)^JbwY_S#;pO814+ge8=VN)W`Xiq3*-j<1a@1?+ihg6HuCK9dJUXA8I~y8JMZ5&ee$u)*v3$`#ey%zBKH0!%mNI z%V(6K7AqNx($$o%!*SPBoL{5fz8_345AsUA#W zvAbVSxdIW2lwl$9EC46A=|Z11tAV@;&S9PyO_`6rXnt=jwgE{Yn{Y4Uy`l1v(H z)P2fkzDKpn>_B2PG~rC6au5y6gHOzTC5jwJ_N{I^NC@6!r1^^YBBc=EuKYmrt7P`^ zRAajD&u|hrdw6mQohaRKJQ0eZEUxX#4$9ly(7*nesd5858zJ7|r_^+=34}bhre6BB zidvKIoYecp>bc?4tyc&oK@nQmZSCD?g?W;ae3;prQjGtDI4t0TqP zMxS3m5Cn_8X#zM#BpfSW3DEaa015~4?}#M2;o}gr`lFx=RL{S3jWDtm$IkMjD!@bs zKx))a(NQx52*ywiIN=<`czH2-{r#c~qoI1Ww`dK#6(Ps#TDq`R zGRs{eWdXbK7%*bo0ZfPuY_Q#pK}zaEz?JlaBh%;`%1a>Hpj_-fPA%cLFBU;61?xQU zT7~2u$l&fqc50`2J|Z7;uSPYvnzRz&jW2Udw1`Rg0ZrL_&FmKdjBA~k$o7WT%yVo2XlNLpPMo97{^eR8Vl4HjA%jYe!RdO(~?Zmf-Gs z+1RHHlI`uc8XtQ5H%TMK1{xl7fc99$foj*z`z(wS^zQnXhRr7(wJP4H91XQ^l05;Q zfVu+j+snqD;2^p3xprXKOaK!II8F0kw`SEq5A3Oe`CD>lMPb)^#T*FqJfxQ~SeIYigdK0$P{fn)CPo$+Ld_E=tn#WNXE)W)#x= zy%IZ8tn0MUW!auA7=jN#g65W*GiZQ0DmJR;Ui8LQb$QEz!fwxZ0tlKwHtBXsR)cu{ zT_IN^-G4;#uTRhHtb9UM@ZnJ=%< zaadqC2Iz=pLVGe72QJA*w}hE~dka%A2K${u)O#;{lQ%JzL#u7` zS+mJRovT@Wlkc(Y;%)seIAA{FyUHw32R~dvZSH$sM`}<9!db{PJd561Hez&Z`tNzU z91?M$ggIQ4{l&IBA4_hH5bti3(AmQx=q4u6$-`g&4_$8^)&%^0k4uO}DHyb(fD$4t zjUb>PAkr<}-GdDj1tnD)q&p|wOhiCB$LNq63>Y078{e1jPyPP+{o~^^9`sq=d+xdC zoGT*-M_>> zuPUtzS;Y-`+Db#hfuRn z@tYZLu&=wPy(J;IhB4Sf>Y0Uy4tu}BE6}XO-DVVvznxn1J0v|>=B@leGA9`9fRLJ1hZcgg~v^g zW3XM&d75!SI?+iUumEhw(q%kDU10#OeJ+UQJ?NFAs@E+y7T?}2{D+l6C%-uMkQ!9X zm&qcI2?md(0CmNAxy5J8uu88Z{3bA;1&Nx!bDFFKmP0;{lJ@YA*KR+K10T|q3oyVX z1WsMR-?g_|^mnYWDKoYz8Aa@z?bo{5JN-I|zC1@gX1KPG^gjp`k|Aj04*aLTH)^d7 zaNfc44rL)gxJo`)Gbn+pE*7oECK)qVYp*hn_~J9Ll$Uo;60S)!|gxy9e;U2mK2e0NDEh1O;I12`#n z?O_U&A~4dfF5Wm;1$tBz0sOJH(p`3CDB#U#K-bhB)`mgg3`)lsQ(A3zD!V?>;P^r9 zz_V5$C$zh6f33mfB_26!*#z!^!anY+r!{rwM=>qK5eedDgPv53J=4%jq4?SIG|82; zchHre?+EJ0!|`KgK~+`$jiHvGoe7pp@HTu`iMHD`bgnMqL6-uJ;)IOo7K--#$31M% zU$$G~Wy(a-i_23;N8pMU*bfK-kzjR}P0go}fCk{a92G^6UgLa4d>&5kKe0rBUmEM? zp_5^B#Chtu3{D&_%kL140G0r2RhjTm*H+1M8_j(2$ zQWJvR&Vj?c=+G7?Nw}b^Uee!HTr|^#flUKZ+NxzQXMu(?Cnl#J=`*mi&zyH~xAl+d7X?c?Wrq(^&4W_G2oPJ(cQU(edNN zvDOi}%yBgW=p+nzX)t#;&_AoNQA}cy5w$$vL#nx5!UPsaO3^HmoVY()&|EkLm5@yz z+RLmg)uqn5_m+WY(3#_pzKxK7_2G|Dqw|lAEa%8RyW{QId=|!(SFx*YmT%L_kvn|Kt>#Xyq{rA$Xq7+)X*4=CD%pB4V8b=&8GuSO;kC>+yHt$Rw0RvBp zOu1%G=%{<k#AK$o?S3DC{rV?&PceFz}Zs8 zTyH@~KAQR-DPli@<#GaW>qylLah8C-anJSXU+XbSliCGL<)%MC9O$eth$f z_225+`RH!eML0~OtA|tH8R8*;{eVQpaT~}VNKXR@_}FvpV_Dp7gJCG^Bdw`d9?}wE+NPM*hi3zLo)+&h!)$1eTWwouhM-UpqrL;(`dtULg z(g8<3U?H6+*Y-X?YHEA}a(23wVRKQVh33}7*LO#@hYRk2VN{Q`=c~fiTina)dYyJm z|EW?MuQKY*WySyex-W-?+z~H-05@uEN@+gyS88k4JSQ+r%GCnzXt7;-vw#1gc9V_w zB*N7!ZYgN|yZ;_KCw_UTEu>aQ`Tn&b%o=@%O5BqS1FQZN6Zyf6Snv}4*-l&33#m~z z{^547J4GaQtO)qi>?Wtq>Ipo~ee*+xwa(MrYl9yp%4{1c*H^wO_x#)Sto-`;dU#>F z95j>ihy|T*5AMBL99CJ?XF50F*Mo1^ioMLW$m@C2_{zWlvF&P5V`q^98!EVUzLYLB_X^LUP>ie zYQ_cjtjC{{r@2}X=CzxXdC$8?zAa^KdhGqavdOq>tZWZ`KfFrgOul5p&$Zf@6CC2qd z@Z3_asrW*55uJ7Cfkt12$fj|v(?w5LS;+Dyr>W{^n|dN_lCW&;=5vUJjt#Nd)vB*> zaW!MI(mHXRzec2_1#X0|3nU;DYca%z(81n_e#4YO`_bH&jsfgOPRFSIsY__@JU8gp z_@Gd)UZItY(G(&GUKGhlCQY$wsF10iBT|dXHj8qs>Iou0MA&F*|nn5 zvc=;t^UYc^$>v0#TK;C0>JgWdOJZT{!^r-Yg#uCFp$Sg=jnJ51!{Ar|6 z^M2K53YEh(LqG|+v_|4Ls|NcLYmXE|9uCUjopMQ7W=-3L8^4uM>}*BACQR~FZqmIS zrTJy~M)=189*gJ&L=bx^W%kzi%`yzK$XQzAMiKMTc80ZF(F>XsBqasPl$kX1+I`h{vWy zJGUG$iaObGex67SBAx|L zcch5;FdCo7qf<=3ErG&ME3-kZNc$xg7s~49JQzhYB)p3oT7~bF9!uh%w9YjN?-W?h z!6NHJ%%c=o*g%l~7dF_*GjLSnx(g~Z^RDO2T6)01`hyF2OEkfNi%CDGZJca z3pcH694t3!VmGb>c31&RwKxxC=$~zrbx)iCoUZ)+fdE6*(cvcz@E<{pj(g9Ecz))R z*3P1a=>B?uSvOQ$&guCKwHd`+nAabOwjA>tR{weR$-~%RwW}=pA~b3kUlzTuTgF~a zypu29tzSeec~Cm-F^)MM0ZBJNM>u^r>p% zqx+VffwV3+8sp zM1!dLRaLN(D43PGG|{ zkHf;!LBd~g>bypZlmUp9y67AFCbD)H5anh2zYpeqUp*LRdpwX{dpk`B&2sz7Cm7Xp zkQcuEl5RN}H>d&rO zH(jsIXFPWba!Yd?JKkcgw8S5|tpTQh%zmuCs29>aQ(MD(e!rAj_X;)jH)6wk zV!iG5be-ND!-R&ec=I_5x)?HPZoqnV3~4#R{p^=u8l;lUo8{&ZXU1Kngv+uFdBXA_R- zg?smNZORJRtMAt0F(g~SCXdbT2=D#JAsubfa1Ij>*94PAy(0L?JZRXRr2bVq1eE91U^`>N#XtQKTB?5O(Dfb~q zr+x5t6Eo9B7B~I>>NYokJpo6KS`q$VZQM>dcqHl6u--f^u&XcfY_+d6zCQs>khA;* zqX!DvEArdsqrS-9iQJcTiHmPcZo)tUP%Uj;Xtc0L;Gqmx`%`T<3h!x|R2b;Fj=Qzql3p!= zw{9G5^rjbPuqx;sA%B+MJgL$ppnj~jgL`mM7C>RIj61+9>|sm|Y*bU6#F8L)t2# zDUZRpe70wj0s$VMNg4OX`ELvBm(g*LwJ|a=4h_A0F!1Yp<3>g3q=;WJA*UqX;mb7S zTL{Ix*GHq-c>vc%XyJ-f-pmftUO_xo1xtoqbvk2a_-Ak`lOmmnaZR!XL*Zq^toQgB zI-feYMPpjDYgtva0!vJJxO(o;6-M2F0|_K?2=mj>Swk-oiI0a4WS2DVF%sm6SbxrLVxkYy!DiX1rX|K_WzNb`Ukq0wvIqN#NgZnWhoGV+oHNR9-Lon}q3BoKsK&9~Ym zf+4ma!kl&?8!?()vX04RC!|?^?oO(SzCOS0tJ~s@JO#LEVtW))wgHvTAbqR$eR$_k zLzZAwuG&(7{#`zi04C2Al+70A`u$(6GgbUJMp65b2m6ira1sa>xzKRvMLMh7WfDfs z@?CvyXx`C}2uBbrO?)AgEL9Te2SJC8NR+A`32tJJjAc5PZe>)bnT9G zGsi2OTO|g2x2Agjc0}|~nBoFXzF56#&bc~-A)GeYVy1u1fuShTW2Lt{dP~wNiv-$| zbiPGe0G|cLHqLk6pH>2iuPPs4W`qosqRZh-xH-*`x#*Hfqj&CU+67p?1U@61?VB{W z`MAZ(=fBJ+@VZONA$$i}?Z?m;r}0W@g++Ag2UEi+d%pY1QQx{_IsHZC5Kz3&l06|R z8*Add;%oA562f0r;=1wMs7q&i_Ap7rG2aV2-aiw7I^T~m=pQv;kns8x$tE^%rdDZ< zslB7|+uL^L6fm3P{xi_Op)DUvcA8z5itwr4GOjh#6|R;|l>yq89pIH1ms#q9-Nj|_ zhu_T1c)EE1pHSprV|MV&b{xiOr_jvvpnS#)!N^nf3M zOF38<`{{GX1R6Noh)~2Yud17WkY6)jJ&w7d9 z55P`2MdLg3(ZLfv#k50LGpbjzriYs1-hXqI`(_w2G*Ch6Nj(CNUOP?dG;5!UV{iX9)|j{HmgK6$8?N>l-BsL{Wl)mUnyhvYIjy_N ziQLV3HyU+(PV_B(o5RW9^GVavNp`NnvWZy|q8)yV$Axl+E)CP^51~Zpd_sB)KB>Th>c6*5 zk|sc`{?VFTFe&EZ+>nUfL$yzcBT;IBtY|B)I2jr@lzw#JkD5=L)#ihU7-wV z?ee{($kUx1H{~$|-9e3(YDj2Qu5Six5ym!6C<_s%KqHb7L+c+{O?1rkP<7$0)S?b2 zk*AazShM-Z#+zQlUv1RV6BWXK5eM{7+6TXhdrpl0mC`x?(|AuX2v2aCZGMDg3wm!(B5XsUQZ^a9#cQP)}$j$DL;G_*4rV5w6>JqPy(<@f3W5IDcIw z;KYqEx77OQn++jH+dPAap$SU@Ja#LTk{Zus1kYN$FZ*c+`XyQex?yM^#&xyc{j~Bw zDq*m#ufGRY>r@X!|95htbu$C$#CY9-S})@$Q@tYBN#4y~O%rR=h|tZlAm4`bs);m_ zy_Q+3A&e&HtE9K`JT$T(cc^E6AiT!HFaK3?U-4Jn3YtX6%Sy6eQ1R9xlx2%*R0H-; z^%T}{i{%t9UMn_KUDLOv2fEmToa9lBZ)(ebmpo(T!!WINRk~${HaCOtL?%J2PwQMJ z_e>Ppb?sgU<-gm>Q%y{Hl6RC!akngciwN%fsJ+%Bt8DbBrT9`B+{LUnSIA+JFXS;6}{D~}e zFC?LsPMBM&PGh_^l>JVzrL`ubWJ0^$T2ubgqBuLa+iptDky(lq` zWsMs(#c*@R=9+Yi%wyFS4Kx<1R(GrzY>GEB3#if9UmJAYUTXZ?sB_Tr?kG>4dN>~K zy%)apfR`n8ip$XJrW2Hwh#Q4Q@dX;q7Ow1y-N~S4M!HHmmg{MIex8yw5TgNAuB&J$+ z6OLtnsrgJc+sBT<&m)sx6_sSgFpEgcXu+yk2F0A-%5Oz6iX>d2Yk!}lTS2;n!)0SR zyl#R6C`?L{=rSx*Mj806odUSpQfEvepb9>#GmnBGIQ9Lhrp1h)KIzvOOw>*EA= zC0JN*^2CW96G1QSPNHdMdUf=lQA_{0nN=F1LL;_a_4VUgjS!Hiv3=WC-$N;qAIQ31 zS_cSTC_f0S@J(H}Sy~`ov94?4W|LBi;mehKSgKX%r_kXb-0r1QPwR=EoM;k=Nz}i! zLLYQ1;}DY@gqW)gSbNN*vfb>zj1iwt3C8}y-gBW9>a%j)bj%(%` ztRruLBIeZZueQ*f*NxDnSP~1Zx<9&^8qCbR8G*P4?Gjy+gU=UCBwFsJin&y)k@D-; ze+k@Mzn#wpnEv~N;{n{il1k1dECrYL4LWu7I$7{-cH-_q9##Hq(-l=-(2^#5+re$u z?)RsQWU*uvEnI;0(2Dfpbo#A$XlW-TTfUDT5xpLc60~ zZ9*Dylm#?75PSO2+d(=h&X(=KcZGKPNF)8E0!t2_LNsb{7zj?Hz4BSo$R0$fLE zI_B(=-286TQRQOB53Z>7*l-+`o6MqGhFoUs#p*ady}9Prfa3Elt`}$lu(ILe7j=Dv zERlV*Zp_zNg{%>X9|IcPJz~dT#)*>pg7?27w16-+|r z^z0T!7?(F5fNin;l@-0HgLi9inXj}~rJM5NW$a;)g*BhPf2HS+L~d_I)HGIyKAHDh zqAK7qaG9)BY&{d)bT47t@VfQZQz_>i3zY+gyP0v*x0bZtC5J?xirF#v$E#$)T7}6v zO@tg4aQGiy7sGbS-@P$$7HcdS?5u@BT&TV{N{bv6EI#Z4A#tq3cWWu`ZiTey&O5%Y zGehi@Trud@?$kz_yQ${orpA>pW#L7-qvy*tIe7haOOGRNAd)X8O_6be4 zPx>LbeC$4TP5v$hhugyEw8j*$VFS8JbpttVu|xuXn)BmtpT15MK8?_#uID~Jly5m4 zP~T`2^XCXO94r+M_N=Q~M!Zc%$`v_CtU*<58``Fw<-#q?)2Xwd`Ibst&UWsULtU$^ z=naVomta#&p~j^luBT}7r+6gpu(cF{!v8qRkJ{2inbQZq9_Y^Q zjINL7f%}A4H*B0z5Ox3)lml>rWmmtulEb*y&EI@F#w|LC1vV3Z+h$T z*+6p;-no0^@)q)?m-2DDRE<%VhSv-V4xy842@kMY>$f$*Q9V(b(<-XDzI#j1uDeV-t{d4yaEjbATp; zHe9{=>b3E_Nzl%dVj~q3?PG#h0b_e3cxYMK+ zP)+Bymuxv{sEx*Uj`J2fiUz$S-NTIh=%7h2HC3c$qYPk7h1$>NM*-oNI_%WHVsrB5 zEvdujv+UP~5JVfU^|`7#GP$ z~XMsYw3=mRvU%Ts5mZ1sgq`hx96>u%XJz3zh`1NUwsvfk@Dl-N&;^iv02>axMGPb zg{$gm!o)VbtBPi*&i=T#I%vzg?ujkzwBgm+)3fKOLZ8jKeP{czleYI#>4k`Np?sn) zR0G&psW!q0YMCo2j$RI(jj^wyT(w(RR6*U@ZomX7&;~c(9~_H$YOjL0KU_es-;qIj zK$TI(RiOS+x*=aH(dD1otibfB1dUl+zt##iY-wD$AY{+Yy@5=a$LR>iw7d7;#8!m% zr~@*?3vchN8D$3hA4l1Bpu`vA{y#l}6gQZGJiiaL4()jRnHtgkBU{xk(XcxG+8QAJ zIbG`fO$KuClx4;2dl&jBv@UXS#0pGgtZ(o*GFW-M8AZUy#TtX%l0l27-S0 zGY#Y2S{g#5Cya{LKJ%sQ4Dw4RbY7_jQU0fl#NT9Crt;^H!voT;g?iR`IzZr5x5bh` z(uT6=B#q+{M4aj^K0~X{mgpeNmCKAysP5Z_UU^H(to{$UA=B8h_b%PL*6TOMYV@sF zw$cZN-QRT@75?_)b-hXIbN1dzkjyk1>pcMATqCry;;lZV19 zP2}qOBn)I#Cd1Nq;x4NWaJ(upYhNXW=pD&9Vg?LIqY0t-4Yu(-UmK? zW9T-wHZAarJv(NV-s$C^Elof+cye*X@tfpRCOkH=sZ)o(54l8>X%ofhKxNz**s;8P zUG2#L{^!St#53`)5{8=7qRu~DX0a;ut&Yd6CHMuvvJEY%db#^vLq8fCOupY_h{d2> z<$!9ZR8-bAeWvI1$9eF(9!+f_Tffb@nrmQnsAaI8l&RQ=>e)cm@q=|;*>@~8p1Gpu zw`S+jh`%ND2UU8ZGJj^!&d%4Ca*7^rjf3eo$}{&e*YKauPOot<=1ca`$wNFzpyg8j zKOFiY$eH5!-gfM!L6Djm!k!1Hd_%n*QJHvtlP|!+RYD3;#u1?JNcC$iYNmudZ2EIQ z^g}$K%gI`uyx`c_6A!#hwP7f$hz(1r5wMvdZw|oUnyj*Va+U4yy`yJawLe?w6*S8_ zPfmCS!cPYpW{1qaan zGcQpYuTj0^WbB%x7oAk%T*Z-jZaHZ7BA#1>Q~gd@Q7osv^FU$6Qv3xkZ2GL-WNC$f z$x1O&zY`~qc2G9z`gZcbb&{5nt?y6#xr?>I?r)9o zy2e60g59ikr2swR%vU2H^<4Q@K!c9L${W>fz5QFiIF=j#TVC$+Ws-Y$$v*_Ek+NqdUSS7+12)kA zdU!z>Jm$-zu9HOUipqSPX&WHo{+;9AHrQ~ufrFhlTV{=OOKvA7a_gul7S`FMJc&mg z{?bu0Gj8z6!&M~wHC6qZSR*KMJ-bTY-RORw3`l}7xe|FALUyr_KGY!}YYF76+8+#6 z^FPN;(x_!(qLZ!R&{E5`_ZOcA5t}Xkpc^6!@8DId9~(JmKCuJa#tmgvrPlnriG(W7 zR%B21&NtF$jBDg;p(2*QwZa1!_)^)SEe#5CY(p|XShnehV*L*{#&aq+6tnq^YK7Cu z<;+D51i$g+^a=`!+%HrPr|;fA_-6(Q=T*E!At9(QBq8ydTz%;!nUuR+)xzD(wQg#u zw)dUr&F)yPV-yl>!9%hQBo>CQcO_`v>Gkf2ed5O++TbqxxAiyVPFu{cMF9M>fV0A< z>%rYC)&4IkB&KZ~X_g@^-k1bLEM>JI2x|UZ>&&OTIU-4;k7*i*5x(0`{KyJVR~;cE(N3lv_^vaP13ZNb` zrm5Fw1dM#YSudGt%CGDF+|3mZpSzI&$zdIsw0ap^=r88qqkVRg}3%6iW83ev71>8p)JOCIy;*`U5`O zq`W=DpPg2iw<{?(E%Tdz-sE;ZCoSwUb0?3yjG@Oxjug>x=S+om0=GFB!}jrPb6p6gnp4TMr!Nctu<1gBfOOtxMF zO5>ldJbIZLgK*4e)Im1dP*%IGD3*(iH#3Izo)pL;i4k>{%#>FQ$t<(lm9O_+%72Ms zzCMJV^}YNumgC=!_=)|mrzLiG3<6Pegbhd*ssGLnk?hd5Wufs|3)`aSpMyAB=sS|@ zL;L=phEGbg2_z;kQF0RWY!#DknXx_ZR2nIcRXQL?C)KCt-bt4s((TAK#81PuT(C;) zoLwn07#bFMUDAiSMs_e+gseob>?cMqpZZ2Zk9&>tIQhYq8=Sx2wJYny)5o%k2TnMM zZH$$KnYSx1jrXTMG}y!x$=~maR4AMB{%ISQeUZ#3`?EAji{w-{tnxc>0%ZnenCjw_ z#T|8>CagZuGml(6*vu)Do}B*cU#m=haOCstCt?80-X2EX35t!jrA24>$GsJvdmyu7 z;~=z9GyAXXSr|?hDF`zYy*(r}AGsb|VKpot!J?ADE6%Br`uSuRQ8i_*w~x7afH>NS za6pzJ_=m+Tj{4U1>wmhXDDK|i&@B9M9&6FZly=WjDOWwMb9pJacHZP{@=mKIT)#ZG z!i3&`8p8kQL;2Caeb9*~yv?9ottr}W>;&_&$WY^&)Uq!- z{uLJ-%eD^4ftD_Tij*W^Nnsi!E*R(hBJSbW8+7|t!NT?0P!b2FuGooLlhzlmV&NY9wAI&e~jeI{K{VPRtqU+D}vUZJw4*g>%T4x8m zNHQg>)Pz0dzZ&U`Si)H%8d}$_6l|FsI|<)rlDp>8|2_3;SLpyBZ}|{DOIU&>Xld!e+wb}$U1pwg$a#0Bh&Te5kKjb#ucaRQ zsr4&IJqWi&($&EX__tw$NOj9};g-9tZ`URa{HSyHfs~NH+BN!eGv7 zSp{ciL5?bjE7A5yzhTuY5Hi>mSi7=67nNE!RwM*jl6D5}i>Dg_nlIr^^6+=e`5@@Yl*AV=iRpw#xQA&q1yhW1bf^~8nshqaONw`BN-irhz6 z`8sn`g6A|@$u3d;1e*r!1AnC~)GM~b^H1P5?ADdbzWY*hH~a#!S#6J>`0jYrQvSnz zOfwvrs2=LVFX>nbPBrE=-Qng|L<0H2A*1jwpcX<3LE5mMi~Wx6Eh+h*!w3Uuib$2v zwE$)IA?0Z1E7m>n%G*+>Umlx4UjJd`Qa29bk|)=e!}_kLxXS|bNCMKn1KR^6{W>Tn zvC@bTJe38+=z4GewJNU6H9{6h_IRkBdwIk*^V1WN8-a-z%zuT7JnGRRg|xIkJ8x~N z2wb;_kTSMj{WHa9)G7t+A?8v#AE~XmG?=@--j|uE@w_yUA-VRo)_`e{S=`g2Un1oG zbi~@=?$l;A8PpQgOmdj(@^K9>!<(fg<#v4OMb(6?7hf$VYW8WW?I-)LQxZoe=!Tl6 z)CczW6X7MtEFlyJ?cWrymDUQq$^CRPjjc3{$a*HHL7Q;2^pke5ijO^$X6VMA1E<;i zee}pMt<*jBnffFl*XuVP?3|aEs^nGr;g&zE*gpOjoy0v==UhZSBks9Efgl6oI2vH$ zRETD7&@58XNyV<4^bd+&)RsH%ir%_2fvDR62Wj&JyiV*2PMbzxzB?$!zJS-Cv&}R* zr)rLak)woasq!N;?9`tO{>wqvi8*;PVSJM!bJTb?S3%3XGMLs$urgWgf zTDqSpN8Jgf!k~&BL26WI^F`Ijbz_F0fY`0M*`_2BPYEBQ`AJJeL7vO}X@NqU$7Vbq z>f`?)y?1a>=J>0g{SOrU2LOMT{VRQU)Rlwk^=myuS_jV$I|^hn>OLF{Ttz?Hv|P5l zsthdA?*Y_(_%G*y^nTx)5AEoRRSV{p;JJkQ@7BAQT1~<2*JqUXo3#qnE#oG@DBD}{ z?BrWGJA1-Pp=L9}Z`b0<)%v*;OJ>CidLFIdS?cXp<+xW3& zit}WyNi7CuZOzn7`(%s)2^#zzNZsVM&JL{1!Beut+!T6R66uap`U{edvq7f}nDKJm zNl^I)j|s;~Py8iLTHd&xuz0wC0Z?;lYs2US$X-%Dl z*TVghkg~sR-**NUNkS?6?Nr2CW8nG?+`;xcx~^!32v6nd(kR>AZyW-?t=3_rMubO( z=z!O`%0_kWJiyNt+7WTsl3lU)$a_*_26~GEW$*K?ABT^wns>wU~Hdfoi#*Z zmlAo+R`}nvo^HC=oTrL8|J=z69X&z^S6HkV2AzqPn}vP7A9|;RT7?wn?`dl}_dU7T z*^Akr(V_;iCJwvg{JKt*RjM97QsXd79eCu4I?J={X`Dx8D)L|oGDn_$q=r;K8nwCS zo8G)VdpTG5Ka(XWCt2TvFQXMs$-iu0Bl(}l{R#N0+FXFMMPNduZ)E$M6&$A3>_-pB_nUVYNP2& z?EF*^`Lo|{Ul>XbxhlWZCHCoLlc6ipvLIRV;(=ju^wGO@mg_7+CV+j5C;P-l0tLo4 z&1OIMlFN6xUq(zgW2z~XzN}2RQ$EaJE4m`AMCml!SR=tAC&ZWHnINj0r_owT{0gyE&Xi&}N19~4Z@b$e;A0FZ}6)Hpav6-I2v(J7;Yzo|w{rhv_ z^+jsVk=1)?{(hvN_?BK?^!jGLJVVXWdR2otxp(IF1V7`k-}d2nB>wWWJOv~yM)v$n zOYw=#n4E-(f#l>b!v=t=aFrpwu!6%TFy}qg{B<|C+RBS^D7y_%Z15 zb>?N1l5MN~8L<~rPE?v)_o^wcVw@Q?Y|MYVIFds6;6ed+_oV`k`Z7j`hjWypqWu%n zc}~ch8eaFbtvHBGW+{aIfDs_xX#-pXHYh-MJdeLF0NVznf_>FhzzxEHz($lsCG}75W?woA{12CTbq z_FdclZx{JZrSz!pc>Uf9^>>8&6|?$osv0sZZ&9z^CDi*D&#!v61CUe3YIJ=aU>O?X zZm7YHp{luO3Sl|05T(qY4e|!<$5WmCDOqOcZ5v}%^7CuIKwmcz=GlF&N9wzqd><{k*=eue5Ybc~vsz6vwF5G$jfU zqL%)^+C}Bf@qVV0vq`t`*)v5|&1!vjW(mIwQ5*Om-?bP1N1#bvJ|J-}=ydBVyq+Y| z?k17K5y2pND?06I&f|}qB+~d^=!tANUrY78;a`40HR`@v$X|TP+T_Fd{WaiSqk-Bh zRrnM3uB5k}W#mYI#40JS=cY;tcC^BxLzjI4z`}b}MmqZ?_ zH$-?|hnCey-6e%6x!Ta%P61g=F-$6NAyJkY7@Tf^!Rk;i%~Wo%@G#QUwg1k&!0sFE zn#qyIud;ZyfWuvXmqUeFF_gZxVf3MVy6ik{d;wsy{=_|s?f456HD$k7=WYS~EE7a z+96DD9r99s)v~a~94uW$?h>Uc6seqPYXzzNVe{SSyQOeIfe5-6oQ!1$?0GSaKGQE< z-lbpoS#jBmjib){VA_0CXng=<%o)GMquXaz2ZC+L{?T3Z3SnWvj2D-*3e|aiMiGBdAg|}-$XPi?`(pEeMk&;i7P_sSw!Rs-uZ|CnrxLw8x1+v^!agCyRsJgA>iIOEHhGO*=J5`A0OGZx|~k? z%v>f-wb|UVE3$2(!ctr3h+RjK)c zGw1jY%Oxl^n-Y)=XbYs@7z)bZhfAy%+VmSHi9#EZoNO{)mu1i1F4#ZLELf0Iz#VK% z0kY?HrS_wUm1?^=O5oW%;>t8@@}Zilb1c|nmUSonOcxY2f*ra*ahFuOH%-7&f`W>< zQ2pXtVLS-7DT=*V?>j;gGRQdhqBz?|{zQ6Cg?+Vr|@Yx>>;A-H{ikw!vsY2E{ z#CeyQ=iX{>*v%4{UdAPDIRt)?EK_SV1&PSAJvQ4^yB~y_y_bm{t`ak>u`ghzM0@Ql zM1<6~R=T!_b~NlKA_GW4z2kb79?q>(!lbTWtb1MW@#L+rrk+0gKQ;{E^gN+Oqqaq{ z)X_NBXR~aMj9T?mqK-=pvTeRkmn}cNp596v&`0=`jn3YY2#k|ZT$rpX_>zWv+o7SA;Fvw^3uRm{+*d;I8(bR_ER>wr@9r3_F7VT&>lPuBqMbeXd+}g$A zS8wv1dfxCAk>#N-3{LlVoo)UcO0}^t+7(kw15yV%W4t#?dS4`{B4oT~p^eThaC4@~0AlS*@2q zzS=1yg^}f9Pdxl`D`K+I|30|q_Eu1AhA-3XaQdng6gQ!N2@&g{PR`sb|BMHD%@;%g1G0cIBqR)1gDk`8UUGU5rsrh7-rYp8 z4n+#L>Q13jjEI-(i7Ohz8dCOEx@>WOmw~F(+*9a$m4t9B3J@cCr1rn{5Y$5J8~?q* z_^-NI9}gZQ3mu(uZv_3*P;GoqP=c@*??^g+*Q6Z$DA|P0Hi1V^{j>N(m6Zqt7*P!( zrE+dtajL{!PLuGiWc2n1UazMwu zyq^P~gbV4cU&K9a02Mxw^dZ|L+zE&xV;Dso^jWAA1}pg#ACG`${0F5@=H}&OvPE_g z^?m8P7X3#{E!an`hkfcO{Q!nt}yMaW_2O8Pl8q`jIHKDCJma^&Y6!QG?)mylI^=oUC4D&;AQtY7B#PsYI&G`1!h)9eev3@ z%G7aP37ezmA8cH&sJ7_Ph{tmq{6fF2aXp9qj%4Uvtv`q2s>AoxmA{#{hvxW4{6EUx zGOP-%Ya5kNN@)q{kdp2OrKLl6Th_cf+F2#O;1weXj34 z=R5yg3(*;4j=aY`N;*5Wum;P%FSdu|W0{{n5wF`l?&cT{a;r=!{{^0M ziCRXu!R>@$HHF&_R@{qcqg@%K^k?e~4;IBhMwaE2ggbr8qFX%j$$PN2JZVsY_7V?- zg>*hZed5mv`YIk<7$4lk&>D(Ao}J}~y(g-R)&w;? zK@aW|o!wRhKSTvKXT)`mz@U>YFNvx&K8yR6`fZVrrjywJ* zO$3CPz(ecZjvd$n2mkySkL6%{Lpij&A{zBPg+i^e1O`vrI;rSjrq+;aI9opQ-@-** zbI=9IopXeomiuS-#C7&g+K+h`sL&UPeu|@aOk}4~{8s*>$*h?_bN_=m2aG_PhJ{Lj z5ld6S4k4=t<>t8MJ#jQft=j6b5kx?=7=W5mo68vls=69G5O&~iG0DrvbetLXJ()P8 z{x~raOKl@~M-#AwB2C@nqJHt`+5oI&&z5!ls@B)M=$|8x_ zU_F{%_h^<|gqrkeB~sdDU+>PB*z|%y@Pma?aP?l++(_dy>x0aWP%FS>KC=Ar*>%nm zOiJZCy$L|h$wzqKo|V`|JR)GxoE4feezbLMhPePyt#OFa{^k+eL%^#j$7YXVe z)E%UgQGCZj?(K^`SUYO|In-Kexkln-w|~i$g0_xLO7)*&ZuKXi`DS~RUt;|(?ShK0 za4t1)6xVWmYVM^9v0g&YbKl*);*<)SH*w^RJku>8A_7!r@*qBaMPx3;vImmqiW#Cv zfTyn60P7gmjY{ADtvDSQmynPUy|x+mw|d)!mR!w>d?`#_!zbfXcEs&ntKwuR?U^zn zFbwz8BjTxuO+`JCpECr9fdpQpY!YYQVm(NERd@p4Ewv~oTI4VDYD^Im=L(||b|m*V zu|q6hoB+P1{M<|IZB7vQlE{VBgBI6E$j&`6w3#<%Us#rU%vLKXp<@}YLf$uo;H|TF z54;7PaS`}>{vuX<6!yH9#mBxFuc>5NMXmC68)_{wAvEO~w#R^&YST~)*3HsBDa~G> zKaoSaVUR0QMIHgUn6KY5WS#-Zl4C7eB~K}D{UmX=(USl${wWK2X<@~&F|2X6*MY7l z5r71eZF9u7SG1M0Bbs>JQ)*ryJ0)mnXQul`5U)X(-QF?i{}63vv*;&F0`s{_J=y0_5v>8cVtdhL|5X2+(bsd-}I zo+K_Ni-{YzgZ4?J1C5$az=gR8JjVzH)1Zn$#OW5hcK56MA z-%Aqtxj}OK1eA51Nw}h-yzl6Pmi;F8k+Bu}(IBi1G%<^=AxjMS?We%qeFf0(dnT{U z_VN(>?kdg&pdu!y{@H`a@wPXfg(8kYuefa?F3er6?}-p_sZ1?Wt}ASW4BDY|)m2Z^ z(>N@t9vttXLHt&GQvv}~9<<=yrTBZ~6}z=lYj!ZXbGFu*>kF+?H8Z1Lqd%EQV1{vz z>dm2`>mmFrIyI#MBX)VD^%GPQ$E~qee8=k4@7CE z7gHN>LgRUU3`tziqR`~wX>{wWt13}Skm?0$Q2U4i0emDWbd9JR>%;Q{qo$J`yI1kM zW^=uBQrwy)aRZ9Y4Hc(9K`Ap&^$`}8tg?IiB%3#dY4HsA+ct!gtx;CmepPtrPTeWF zY+^YcYqb!LCr^=F8jn*lw|awb+~sEw{V?^a7l2p^Gzv{wU(zNWcW!-rEDrE z*2)h8$VIiZa2lKQo5FMJB542Dx9x9_cg9*jZ>-U=xyKH6XM%P*7w9n(T^vKO^B&}~ zhxKhQ_h`OO=Un~U3hT;_GV`Oj^${B%DC6K~ooCxKrLB5(+^UunHcHgF^cwZ(+NOzO zLFmMFxBNlyNLY&{a5`yhw}}zf?Tqb2G&rnK&~VCV zd$OU&sNqn+8pQ9y5;1g-dvbG+K?)DbRic;aSYT2ODmXOdOse&iyR&tERW^|M>q|73 zi$#25evkBwdP`2b9}f+&ZPbn|jce)BO?e_ogj)RVGVQ=f2S>LY2&-v#h`4OxXGx5a zX!_Y9FST>6M6`)wU9kaT9ob}Zyxt7`&lzTb6^O>H$_QqoNfmW&WZ&zX%YsL}_iewr z2gGA8Q!VW#p4FWEbaygOuxiHJPN*$K!zUNC)h?*34b$Y->f;MAaaC2L1R!Op4&P?5r=wjg);>Ko{D@PSp- z%~ALQDp;tYDsv6)fzSOOu^A{Ep#g{n!)_piKfud9B@LzYDlup5YY-ij7IdeXjk$0UWyoIHU2j_o7& zeQ2)@!#my-bZ+_TQ!nTS^_XcyUgeECr`WkeVtR$y(2MX-Cd-lsyk~HyMIh%wwba>F zbJT>W4D?+9DCkI(m2p4-9;Y2Jii}>(CzrAd(Oi8Id2rcxI+Q?dKAc6za0wbhk;c#> zP~PwEOjWSuy#-I^CH#P(?c7L4vHkXjS#wYoeI)s7Pk%j}maW1x!`^7d-ZO43&ylvh z?^y%b*t}g)we8(b(J^_Jajxj;`yrIbicbUcvTRWc$8>}v6WHwD8+gM1ju9|>BJ~r43&EEp6Nm{1 zbJ%_dHo!yWiK{<5WXISgaLAu0%rUnN7&@Ayvdjs$6)1hk+ilzU_U5hNAYR1{KxYOK z@m0QUTxhfaU?)ek^72aHI;^=IU=y$|5&vkIN%|6rw->@Z8jX?I@Ra^URbcAeV$ta) z&A#62`a!NzMLuuQ!crhwwcAd92z#T+iA9CbAp|6$9~iOmS$D_zq#osqT%M}eIfYL} z20kJw-o;2%zuleXTc0hc!i*hJEz&hyd^k~JqX&xZTC+>kvZ-M$??ZE)Y;{J0j{s|; z%Cb;jH;4e-DvLBmsD3DZ8gCRq7%kL5aR^(2G@72hc}OBCbA7qeqjrA0ktqfvaF*2U z_aL8jU1k~#j(=QwcI7H`RrxlXE^VM}27w-FU9Zu_H~&>Z?xnyP-{JBZ_YO97s@!P9 zG*ic#uSlS$O!5vaUAkV?u5J8J3Z%I$C0StBrl=?ac~WO5Nq2cO7f&}-aGs<99;3ZIo#q42BOlDaZp5?cE=G>NJNU{s|1+Z zB|e1_7$Fw&L3BxNj$RD|mgpGp9h1#eru8s8^x(of?O?rGgaXdF+#Xt3L!(t(PiGJCP(Q!kIBVD2 zJ}=5Pcp8$ab*3AH$r0j4(mWt4Y-6KZcUuem_JPspr{Dk|hUwodcN>aEV7}rx6A#6i zHHmEvc-Rux7qIt~%`|D(9vapY$JkL$`=E2eiHJAzxkzhwR`Ke@=Daf|1kKz7&HSdg z5C2X5z(L(mIKjjiB#K+nz~d9CZ#((Sm=_K)`b{OY-834c`$%=`&wtSVP^9WVo~!p- zZScB&vMwp}+3TB8Tsu^=kv5uYzE#4{Fd(zTxlG8}u{WWAi1lQ{PV7;y)pCQFRzxU|0f)Ai38S1phxW`?w{cZ1qeqv17|+EG}?bErV>I2 z%;{Cfa4!1d>C{N#Ghf!j#TTRi`#~WrNRI%kQrMPtSG%V*PVxHZ@#E?U9^ z;x)s9t2v~xH_`Yr+0-@^so|MMT>~-bx^jmg9XaUDtNu+!ebo zn6KrhIAflyhn>S`>0l6l_%y=`bUQGJhZ!e0z^i}N_hkv;qOi25?|Qc=W*xz4IWB|5 z%brQm1?sVcT6en z5lR=MSPIR2d&pbD99c7z5OkdZ_wekAJpeYI3Lo*-l#uW_$;={b*e;qcX&S_M3RVsJ zitkabY}9Ia36ryKO=pYcOoYndcc)_P%o_g_fC%6sXYx638fi0)$15@m*bN&EM)l5E zA+0CG4bYryjd4JvFYw#vv@@sd5!Up!#;uuc2T7wB@ilI8c!lUpaWt!K7or$-G*<>& zOa+R!APtv3QPbwB*n>yJ&6UhD3?KxkAc5(dz^NHWJ?#&e9m`HF}j+Lo+mO zT&6r``}$)gpgBnkdyq|_{gK0}GHfW*8FPpviGYWhhgWuVvwtv_Vv(j}G#ya*rb*vS zx(jzJ%2A2+@+OB!+%2meQ$>$k+b7_vq%)V@yolgW0bfWgdJ$Yj|{|A zOO#+Di3LPASXWqF)@=)lGTHi>ga}TLQX8P3H*u*6)s7H2BjzDD-&fVX7-2*^$3~O~ zUS8@U$fQLQcCQ7qv)-hp(-|ZGhD+=V0w}S2TFofUe{wpAzUZ>T@W&C4N;nSq?YAfj zc_`AkB70Y5bhV05&EVbQq{x^kut&C$&OxpwB$YNI8!5T+n$6&_B;_MBgsSq?SSnWE{U)XytvFtIXAgltZOcIGrbg@TRo=45%@=J ziyB)0p&XxTI5!HIY$rg*@r`Z^H?w|7Svex}#eg68L+ z6Ca19+B-WXiZTHoT_ShphesuLsJ4vjnP<~m1vTd>?#-YfU&bpe1cxCp5l$O7u44!v zMqpZmjAVj=VrO$g;7k-bxrTab+LEW3zrk;*m;Dbk`JEu^ow}u`+A(5bvm@1aL}XwbV#d3$!5$D z;w{db%;n5@mxuA6OSD5tgf{K(fnblK?8x&(&(pVqNj!)r_u~YXhw$j^kcQpC#G=2a%#14x(Q=$0< zA{J+1=>=D@o*Ma~^WNO+>kjKpdypX4&D^gSD;R%yB~8F%fVbM8xPT_n+95lXWo}O? zAJww_gd$8njr0DqIEDjivr6jfaa?wXHnS_0Dq;^*k{5opLW!fzdnOH+sKPrWTe<7Y ziHdE271Xv+yAeBQK*u4;=Vn6JbMTv^R$xwMw&Y$fA-YL~$`5zbJD^*&IUA*1a$EfS zf#xvndV@(NP$KL!QG_v1qFCzA?IxP~mT1U(9@DL8Y03x&yjUfA`!4+-=o@oQFEgo1)(QvH&Fy?iQn zM5&?pBDA7l)2r^Ps-mN7xp)=Limbno6f12<-7biYqUbN77?vp!H$~ARSp#ZP%2DRDP8HP1>3~fh6{m z5Kf%WnDVtm*gHDboc$;Roo{vlJV$Kb@TUiOH)Iy*wJMW@9@_U>BiR(4bn1BOv%is! zf)Fiqv0EXm;oYuYt!dl0{d7L0KqwZ9NMqYcM~rp72sD&Hcz~>y!V=P5vx`?T>v2V+ zo_YQPB)32H)sLT|oQ5yHHl5C7x&^aqepcIMVp6dFXb{LG^`}>7TFcDId{K70omOT) z0iX+8!BJh4Z~7@LXLjtqI;LFuSMJulshQyA6W`|QAzc#<#+=)R$Z5=pRRJTx0;hdWF8trQp zbVnATK@S=Q1y5^>i<2alS4}-SSX-G@Ri4 ztcO+yk}E{&=<*T&>=L+bd6#5+CAb5Z8e!ZvMYE|^TIc%`9tL=62yXW*ye2gF+;-<7UK2>ws_z7Y0@$ANTsDp-d`ppfwzT(c-(gl_+v`VCFkS41x02jH)4c3&hXkxR-%UxM`kKWLe z-A%lx>TwNZ^4rsfUuz@)fK;1wdRRA`-pcPKh71v=)>Gw*QEb);0Iy*8HOqnw9T~|?5e(^+3kQKhAR;I$pkcJG56LlGuxGOK z8S-DW3}Hzo)2gM#4bcPN%TK4y$rod zCp6gsxJjyjXUG~;YTlYks#o&}NQL|;lItorzB2+Je;a`brX`XY356-9e}wFF!d-g$ zXkB{lKTQcRWb-iK1gcPDJ0m7;^D18smP)HR><*8?QJ`0gL7KctsO6FsBve7r_b^S+ z^969shRV&A>mFgjH0X$ayz6V_!c5n9zB)hH64C0HdEV;VKESKKLYGgBo7`E27!F- z{U6x-v2oESFooHAyz}YT_eGo+Q zO_!Yq^lIl|&IUS*fhfql@tARq`;G(5IgI2-$}UWOhpWfWRKE`l444oA=ABRw4^Z6A z->zip=iCN4g&!9mF44~@ShnEnet2PJ}_R*-!|{%WrgMe#bi|1Xyf(RWZ1 zT-j&4(d>agHnvad3pPN_7-9yJm|^sjZ^@L}g2>a)<_SM${PEoH&WFbr6xIxG_3ALl zUch5BY7S@mpl(n5p^*rtOO`5u7g{f|^|9r>uq`?1)?iuYifm(zy-|A)?ts8F8LiHp z0!Dv{6y!r3g#U~)BISxtv=5X619KN*{%fHyVql%D7{A2q-<8h1B+Wuzk0cZf;u`ILJPKar`PiHGyHtuO)!yANX$&x|0w4HnbgwGibimPq zXRp{wn}YGTRD=N4XiWasL088LOFkY<8N|9Kk?EV0TdPgqS*xnHCLTcWZZ?KYU9AXm}Nh9OJ zHV69@iVQ@=ab283EBI7zD_hElJ`Vr$j$*)SeA6~Lz7H-?h>Si2oo^PR1G#vyLS~-!}|C1a3wNZLZZr2vvfmA`U z4jGMDkpgPa#>U6J=lq~_sqJ~cc51lOUbmOzS^20g+EaNw%)AGfeJG%&6^4)4$Hd--rCpUL*c>w{L(_yMGKGcRR}*1*q4+ zeKYC_ULVSMh5>+^v%H>J|8=zUk>Bg8Y(Ozs&>xKiqvmK?WSK^h|7GwAk7_;#mit~U1be;>C0cQshxq%QQQ zBuRc`=NUHbT7Jxq%XGfbzXQIY1YaD>rRZ+NtB}LP0!{JyUNcWT1M~U6veAt5lJy@+ zvyBlp1L#7tQFu56G)HW;uTs>oP<=L)VTHdXah@3{LB6n;nEz(Cei2S^zaXVVSx@8l z@-H)$TkyWUBw;n}^94>msnyS~=c`FJqa-YU|2w4cGt%=bp=;buqX-MFfojyxL9fkp z!((;Yor@z=?#nYNkE?SG*TYqbu=vt?r4q1#cjPb{fvH5X-xu8v7VoRjsZEuoME@4; z55i_RgA$>{k48*-pEtjFFEY~=>Lb|pD9=Iiw`=$NF-gL^CBXe9FuPCDKi)s4bcF2D z@6I-agU&TFiDG^_WC4o7P|zoQB>5-s>h52;a|-~txMfSNHgt=bXc64bX=qYozXE#W3EzY(!)D7iB(;fI?t> zsLVNt&<^OjuGg`0NOzC@Un%#03ybM17$bjgqw$YNIwMP(g|dF@dP{6NklfMpX#j)k zjfYYH$47GkNSUI?27m1ie5j86KG`I{u+VxLL_|#tM8v-7U8Aa*)9>I?tW`o zPUOL?Gg8!3EmVZJQkZ1u-+ryzSbm>f*_1q88 zh1xs%1c2lnWeFiUK!@4|nd(EGw0)=qv&hnmW#&2Pr`xSVXh z#~d>4Pt-}hgfphhQnJAUZ&YLPdGH#l`~qk%rh~P2*A6a*PqYbx47L9cHh~2T<>~> zJd`0J0X4u4GM=8|py$3cU}y*qs`vhiKI>JLjjPYe4QbS-*Ck_faEX>si@ zUMRn3v-@Zy8ApGgTq1apwqB|D&szkN8U?=|?{+yZ3-}O~Pnh1!oJ%IsyQYh!as~EE z9$U>tQ%85qjd3k%I? zf0+&n(qz1c^_)6l!Xz!Y#SZd+9ly$5;F?S5v{C-YYWO8c*^$6oxy(Gc8h>83oi(p2N}4q)KMD(Tq{{df8B+?_2&P+WyU~R=vjsCLla( zDa(0~GYY^m6t6QVC1bd0$w8!>1qhpG`DW_e3C{Lrn>!;(Rwjic!z0)`G+@7>raWEM zhR*-^)Ii8$g}B<6u%aFKwBnyt;vxAzfblF}A5y5&6&bZ0L!*1+CIDQAXWN=i=gKrJh-AZaJUBxt?u%Mafg3gBG3@RLFDMPN|bHE z1SEj+AB|OUtq8`19SWAZun{fca;uwqEix+la3v+maeWdAJ^7sV$#u)c(O0Jvn_m_S zMf|s+Wriiw5Kbd~{UU}@8P&KaR^m>3{_L9EjR3+kW-D#_*Z=zh>CT1!=o|Ou5B>jL z3;sockM7dt%`6|k`we!9USrF2mCgR?j+V#G6-JogALZlUzL2l5KOzQ*V~j+@aUKmB zwu$!Qv0(7LjM0C;HalJAa&UNGx52{~0KAscydl6opf%^C4gVaJz4OO9g(&wu!Sy@s zT)|0rptcSG#XZyLH&LP+k_#zQmblv(X*?DpEi4rDjEEhZ4Mp+FT4c#45l?L5f9abUDblxDa-5;=XI|gB9o;u7+Ey=SCQWFi{}Ib~oM7V! zI|otYe`qJtpBRWcz)W)K59|MVw}0yJKb*hsbiN;`(!HU_kDRuv)1g}F!Y{LA@H-cV zKsP;E1W8lTuv*^7`v8}u0^^^^r4`_L$(U&~hio>?-qgBsf5qSZOjmqOuHXZu6g|Bb z1fUk~H;$a0Y{`SU(~j?p)Jw}Hlexte#PN)#PY+kzt)^EI4(sjLcp5`Vh&F~hB~Gn^ zqga&RkxsuceNyQ@E>4L#;XMRRYc zMJe^VMxvA>M8Rcx)CSrB{4ynMKC2Iv=7;$s87^*CG^2jPjloha;6&lO<<3duw$>D2 zx5hb-fGN^q==}IpFkhC4FRm?jXv~kbg><5FZD7uz>jRPcw{vDlV}-{%?d%_uEG`P! z`b=2^agKP!+O9#3*L0cAd*7bFp@$tNl@(4S6M2D6A2X$gp`3HGzE|dXSqx|u3-CCt zLdWQy!g>meiyZ#i(2ECmQXgg)qx;AHNW0L1iu!<$Y##K7iuzv;f^44r?(V50P>EDu z-_*ke;jiKXFjF;_*?kus}7#3Mk zd68+@Reuq#m?Os+=}_X`#1PMSb`MPbj0jJfa>Cuv&alf{_!5*Nq{qtCTRGS;lG3C-)8l%Ljx50!LZQQz>-XrZlwIB z0@Vuf6}p5SIluN$&!I0TO0m1=!H}+a7GJ5<9Qip?x#V1@9wa#=AYG9(isbM}QBbhQ zp~4OFr&l*uO}BRuacixkwL-DSmwEruyvo%ct|z7gGtaButT4MsKFvAY#1e8n#Hq0l z|Gv~&62qMuK}^JpBk~S@D7+g4y|~X*2|EYLwCpZVl|>OrPzg9IK+}ymtaeF7*lcZ4 z>#@0e6q6p^)rA^v)u#biG2z?C1q=uL${;U8p{#)XYsc^?cL#veN*Rs~`yXX5ybr1* z(0Ml1VWmIL>JI_?>u|nv-UZ^Ok!422A(j)wpT)zq)oTlbXU!FAh97QE+7t8GQ*}0r zH2suiLYk3Iw}?&nS9O zJ}cg^nTAWuY-!n+z9e~cCS8ZeD?-}U%x|*EVWB|wId?Y*^&7@JU`TaT8>vNJpA zD3SjoTyYi#6`EKP*aKW9$ii}WUA65Ej6r)u*zrb_NPn^vVg5vdohbntas8JcMM|!T zP(FzWBRtmf5UBIRP$_3NpDg6K z$ylu84tpr|f%E|~uADLEq^=(Rbjyw7+gB2=IBzTx+5?u(arGCPsD1b6$MDUu1C>LAtJa+BsV0 zu-xXB0H;?dH{*;+dla!RiJ`sYQO?1_618D17)fX_yv&=c#Z@J8GWnAIbqn$FMgwuF zfml0`?ifTqvBO4}+Z&26@+H6?9!(eeUAdo31@V;KwB!ujCT7XT(W6qy=7+ysVbh>r z8p@n7n=(Q=-EElPb8Ga-8?DZgBbqRP_=H^+X$?e?UEeFX?oQ#me@e2x$vh&LqH4IC zd<`4{oU8sb=PnJO_m)F@-sfc0aSS#VN+kDp%1ns9`-AqQnIL@vkc*;3INqQO32fFW<| z%I8cGuD;PMR}TTC;k9L#ql3+(JudI1n-m2F3!&^W&v zfQr<4g40cZ=T9JxgKQH?VW9kz-^GMG?dY2>0X*a9HZoaa|528Iro#TR7XHHD5&czQ z1k7U?Iq(Bfb)IjqCY0J4S{C?(U=v3!b=ssGWLv41eR<#HaN1T29%$!qwjiCJKjMPI zSA7e9ufqpx{g79S&Qn2ALh1Hvm{i7SMBj*718v0WU4~+9lL0c`oZNY*tZWThKy&VP z#OF-ndKjiaVm&v6Bo_L`&WxO!EmhLRnb}PbzKUz>W$X~JRw5BGefjNUf&5`o2&#bSh`0|cYX?}D{w8X{F7^4zKaFB zh_p8TEMmgG2aYJ*{W=x8e;m{QS5a~C5thUmWQyPOJKg#8?DN^!mIwxuu8KE&iMDH) zl}2Fhz$XS9UlOls6Y{40{Nd}9vr$uH_)`p4s+jXPnO^TY?& zkIhFq*eZbv=c7@vEr;Iv`prf?Q&WmQt*|P6*b_&@;$vud%RnL%3>(qTr}NE zkKDi@O8~eF93Fi4{+{tPTvqhZ^rkU%s$y+%yeJ2Nr$$cJEm&%hhqOd`ri@#&wTYsjX7IGm#@_Fn7U&IsZK5ai3zogIgv= z%htqg`g1?AmxG|f!D}@QA)fQP9Fuw;3Xxm5nwrx}5KU}y*ucnFOUi2-Z*Y4h+H|ub zyXkB^Q}M2xW`DLewsoE?Hrd2w4 zA$r~nexkt!c+u9=ZgSx$?U&vw(lA^us2NE3&VJLB~J)I8bPV38KBx3$%<3yW$IZvf&#>+A942e~S=Y#E{~ zq62`9g+L`5VGtLCFKE&*v~~`-l4h ze@BpumbgS-^viMi-+Zj!4p%buT|%%#w-K<+Uhjh%Kg%0!>vC(gHj?y(*4yucWJ$62 zuA$*%R2bhX+Q!>zf!2{o7{+=PbHQUz@Y(B{|K^AH*)GM8Pe}oWoyC`-{sSK7BM;je z+y{odg&!YpYjnh?O(=a?kCceiT?{07YcN|IovZy6aEr2rf6yy!aRB3_?~mq1nlssWiIDFqLx}zl?ABnIJCf-=3xVeJo6mX(r}`<$#6AHXw2i%a*nAhnmU1{Z?7p$+z)y*dMgoL z92`LudeL*czhJrC!}l`9R`@8u2ZDc4KkFL@|If=urvvp@L6wJP4oZI{k|1}$^u5x! z#J`Qk|Jj@jU?O(Ix_$hunBQA=#6-RDiGbUOx&0L9syJ2SFkrue?v?%KdD`J3%GbkNF* zuItk$t*lBpnc(UW{H;iTeKe!+RUv6KH)IQcbcu|m{~+ZL3RES>@N~%)w#c* z1%N87guF9-dU8NZMW~n-5L;TlZ4X&8gIGl+BA59XU2c{ihl(`e|~^aq66w%x2OZ2CK$;C)(DIe!FOk=4LVUm z-pHZrMZu12rcd5&O^|?qKV*Hnq<-vmoyP^sH&cR)V!cLIc;LG-f4Mg7bX6%4gu^bF zYvh9kRduC_j__$eFVBfg#Z1_Eo&rWzV?r=ZH+4QTMMt69mM+%hAMFdU$ta;(&H)r| z$3dv_MBubEzESy>*q{{{oAff{aoGqqe}Dc8&}W$kZjEDZc6KPipZFn^-D_Nxf*6@2d@GG%5>Vp$+a# znM&=n`3WFf@?tovlyK8o80$bZaEW?1U8}0%E>94Q+1?Uc;CJ97j~B%m zc4Lxcy?jgpZqKQ9@X1W|kU^oY=RygzQO{sp)--DdjO{|~kL>*jSy?`{oB zvv!T3@_ZoGI84R;rWPiedM}^|aZScbwvgc2Dic6jMbj~fg;c9bY&G4c`>&SECQWCm zZ2ZV%U@DMU;D_OSdI+8DTE8W;>8W50 zeF8>ORk@=@CD5W(xEey1ai6Q*PB5S+C1JV^KX_jWVOi$Dsbg)jz4Qn;_Inkx|4V1Z zOxo4KZV{tN1IGmSwDe9-WZbg<;4X5K)SK3BDe3=B|MhS*4^bJ|-PP4@8V z4lU$=%%AUvyN5<*ubr*-hu4J2erF`vkCjUQxZM9uZSH%Y?!w80KyOIS1M4!K+aKhr zfAwiV2-hh+BX+;Q&+KT^HR#0AGT3hD#B?9U3x8aSftu80q<6&6583JU=-E1&orIfX`(*5EfHp|2os)X#qV2vDTDqB^vea;9DrnA&^utN- zUZi!JZ9O*{U2~V=&p+V)hz2@nk_HLS93Z{P0MizJ+ zLhxpWsUtFtP=_xK1B|jC;E%nTG=0#K>JGu)DjZ>eHhLMj-4s%P*K=Kg*!7Zl)hQTu zJNpQ_{y6_Aof?O4zct>s#&-i?WTv_#0;~CEka~mErH@IJAE}4tJSUP4Rz6Xz{+!DY zE8{-wl3jd<;v5Em zse(04f-q3%sj`lbS9Ewu$l2U2Rm4T>s@+PgJN@8N<0T|KGoqfa!Omdjyh$vYN;c!$ z+m{ZS4y%`Lrv#e$mS(~2@}YK1RGgANUav#kXYA10cdAZg<@1tV`dX4V)nVZg(P_wG zV4&o5U-HmNg?;Y9z``Rx7xrmH&)0tle)#*VzyBc23ICEz__i5AWQmL5nq$T7sA$9> zoTIj_hshg*&+0akot2%vNPInIE&U?9gG;YS<;L(%zHX5R(Hy< z#(fHLp9{X_+f0=qy-3JG=+Jea#cpsb!ehGHi`*!cvaPfH#AdKLnt$%pCB|Z*Vq92Z zlIAdHnwCb=LqAfgdOg5PGY13Zd*jU(A#i&0{>hQ&83}&fUSkLBz=hHLzRUGV$>PZb zLZ`C4Cmt5HoK)nT+ZOS`vT{?&=?s`x)y}u~Fxcz5F+@%GhFsU(2C(UXtrAa#3 zZF4-qzU$9hAH04od%25dbraj zHe&Cq>*GtK>z#%(-cks(rvv#J{SzYI@|Sw@Z&bOD!a6q}ruw}_?&Ui)goGqU5>urm_ zJ-KA1`_Z#lvnH=Te_l0pxan|<%2$kW3!R9nNVPn4xQLc_qa=1)pFK`CO%)L2oU3;) z>c2gXCRrS8-3^Wuykbn`vU$?l*hHyWVG);_R8(t90oYe#nK~xDUHLy!@1UK}7i!K* zve+$-s=vOlH|UK=vRe$E7EKFvl3U|=Gn2LcZFEDC!`dy`wq@L|JG-uTV)LW)4FRt1 z{vI#FdfAY?<6gI6q$l%k_4SFm+lJWE(epLNuj6H2N3DsLj951H7h5*b>fYBq2J@Y* z7WK3XO#_}!>UNw&raI?tsD$+jxZ?co?jZg7efV{P7%kI(FKH1MT++>xB(neIl4u_L zDj^N(d7gyYjVDA(OcaN>4sq4oshh{iSl9Ylhk5C^rVcBB&G+@# zLCqsOd=cJY3x(m~GVu#B%L#d2a<9$o;JVeUvI*awOk`%5Mjnl@=(+FQ*qviEY0RIS zzHdw7C<&!I^X)vShbZUORVP|fnM2Of$j=sqw=K|GyJe&~lPK7Zc3=}SH zPj^!b;Y{eADHYbesd6dW3qIN&5WjnrX>OaDKb_OGh6U$bjz-n(`-+cJ0J;~;pooJh zn8f?)shogg9rxZ95o@Q8OSAVPuo}UtMTwmSC8>ALl zyn%PG>MaAQaGK@o9jxEl^rZ}Vlx@gwYX8qKP&hs4{>!JrY4CY-wuwl;vId-_#Y^SXG?R z+1*|TN{ER~@^|(T@00Kzuc;dZpB{`&;Pzq!+-#R&l=e^IiUpz;0qvXzI(lNjVbGTD z)=D!SBlns5++>8NhK544ma>WL z&v^@RR&IG(dt9spS-|yn*E}m>KayI$b={3D-T3mX{|Y)Ozqp>c7g=gZw^Y$cZQOED zhPM2#0n@Cs!kK&2z;qONKrs62q*&5 zrAHAF5rQJUL_vBJkxnQgqEr>6Lr|nQ={;CzN(s_JkJ5V$Eg|GvK>|n5d&c;F+SG=N~u?zetwuOq`z~)r7|SJsNUH|cdd`YvC1%b35o^)PQ*#> z?DFfzQQxA*m!i21+EQ$D2K*GSW00Ipc7dytFxZR5Cb)oI&@RF?*wdIzS4wmGGz7D& zu9WOWL*IDrf?rVU(LL}Vj@6br^p>Y|>Hx0uV{l{Pi!Q@f9S2%Syk zbl{blVvi&%_A@wd+V0aXSX}`c8fe7j^BXQ*iy$l@D($XMjxE#!);1ej8douR)%!*g z^cbQ5oMZ=m+H7Z&p5f*WwwAjHcTPUJ@ajaI!aS|H#U7~Dn$B$M{+Fsh3}~} zy+>|6RvSm*c=w|g*%g-M{j(n$+C+Ni7FNf;&c0WQX0Sy``PSHo>haT*axTMRc!4Gv zVkFb$`!_F@n)a-Y^iU_1yNxtb%WPbcF3oN3g0sG(aT?_zhuKmZtbr(Zr-=kW=%{=lLnPL`B%HglfAH;R@COpgwQxYoXwU%F*; za=f>MAZed&Aw%r~bzlAUvD0|c!l1YKWzJ%gMYWp7>rNG%0tRS|@TYdQRW2XsXX+VZ}f~j&{ zfIl4?4>fPK7^FA9h^JgxafLr}Tke{}MtpyXZcFFkB$*D#ApMbeoALs|$6Kx&a`oW; zX2yqFhy1pD_wV@aA8tLx8**XKSV*!Bn|r6;iNxAdcsRB{OFqz?`rsr7C#-F>qeTK?X!E~%d#hfpoK#(366dCH!bs~FWO<><~y&? zAN~L>gcbsXiQH%t>p&~C62*eHE8v~~v|GOFAor6flbEGUJfQsXXe$?c+=oT2o}}n3 z7GQNcwIdtcRdSn@Wu{*iFLyIqNP;JqRfF)50Yr|rafS%E@qLyj&9Qg1#Kij0S@3k= z!GpkQxL;c_`5#vYrVWV%bZ{k+;A6fU`%P1XFx@`CfyX@ThE50=3>p2%(2~Px!||!( zQjxv8az1W@2GdMhv~cg0Exp{B*FOJ?KlBpR#^~F1k*u#-)m=o2!t;q&l8jm z+u2kU=}65W@$s6@iHo3Ge?W_-8Lj!rM-t-ce&#cavga?Rzk5am-7m7Sk>s2p+zF$5)SL^5ZBFH zxjrzTEMpak4x(g@d7YJ*Cl0N^uW2fNs`0Vgi-D*Q8u| zPk@x-yM0kNkw|G+s~iCKkQQ^t+3IF=!sj&|x`KrJ^Dk4|tj@szPzI5Z``A1K=`uRj zX4W6gnC9Jdqc7ysnkXi~FEPwK{H?I6ny?h;B&3t|4SIP#%#^vWl;P69GKH$d3{`{Ms@i(49dDoFP_|R2agkQJq`Q zd#s+P$TZeQC87c`q=1iQkU9^)MzBY-9`R*G{De)!e#*rS&vRkp>@QF>O zsq+DkXAEoF zUjO6muYZ34JFa;mHMuSHsrI+2{|xc^I`~s|CI~2P`f}(< z(E2~5ED`kaD~IcfN3W=<~(qokTR`-*C!zP^6qahlyG?;-lYs4jlVQr%(>WS-`S%|Aa;X{-&z+x@}A8EmFDUc$#IYyk8hI}VkFkh*H8)6pBW415+FERd?7$iaaU&;7KdiMVnF;Xl%-4}+wfh_z( z3iY`-MKTs3EzNIXVF5&x&iR&IxfJsKm-Qk|zFrZ%&S&0rG_i;4m`z{l}r7NQM_$InkT4-8>T{bV>J-BqqM~q{# zT9SD}uk|uv=zSbG>04EzVut{xH-VXW-qyjmWk=$vs3wlZLpQFvg0|hoF?I?Z*sFy; zFv^($D%GCrWG*)8@uqZdNry<`VP^Ch^t+luV?CR56=M0C?uGU=7vr>DY#7k4s#5KF z{2Olf`?QNXtbQF_jYOf`e9FYr*|f4cGIzlZb!lpDlMNN*{%2s})%TMP8AN>#7y)Q1 zw!{sstyzKdh5~YPb2EbYVS$k**K^ubdHj>ZzAZtd+67?<>ZNgXX@T`*p8JA*mc5ST zq&n%UJ42MS+@-L}?hFK`bD?&} zlAB6`p#&zY-?&9Kv&Sjz4>W_V;nTWHz-AZH7cX8sS>0=K@`Lk@2>nIh}Vo@=_{!hzpPdE)HT!>UykWvCm=AJf-kRNKIi zahG`82+|t5z6b5CcujTxAk4Y{JHkL?uJB*v<{piNfwLXoTlEwM8uS;O_&^qx$Z|&# zZR}|H@(nX|CJUQ89$M8H_A#JJ)#?XQ6A?aV*DLZwH0HiY)Y#YAr_8AvHJ-}_qmI&0 zc)68u zx^v5pO#+XFye0)|W)GD7$>B@<2QU}K(sd5#)?Dh4;1SD81>}h6UxOKv2k(@baUn-t z!1F1BndnZKM{hk;Pr-GTj zX6VS@4i|5M z^~$8r_7owqVEUpT3X$a5od;!A7^R$u+o2 z7tR7XWps$$x#h3nh=J8I6h#^bnFJt-*8(zw+S4qxMZx$^FhT#!_am}f6a4E5RJSw$ zC!Sc?jBL}Yni~wiyg*ox%-+9pknM?+Kb+nNL`V$M&c4kxFG;}8(UM)IcrM~PxFCCI6pvN_;dU_GYXQY2{ zC;kb^U==MJgKFRJ*ZeD${W~xGlN8`bkAW&6cHdLlk>I*Kz{WW0cvrG?<$zR?QAe(E zb1CYN0sMY#XcqXvct?N3HpTr&BI{&fVN7OgIf>*xdddGW5l;v$Py##8GzBs}DJY$R z8ARDpJRuunE)hU4gY=ZOe@)@{ki1$*9O&zg79&&qU#a-tiT}TtTjvF4LaQRYUc$1JO2YM5I-NE;GHCbJXt@D8BT=iiZhVsuNU&XY)Hut!vn zEKpe_@5GY<{|^#E;@?sE@k3;uj5*Fv2|`WwhwE6Ebh~h#v`qzkJzl_kg;x9Ltlrcah7q zQe%!kRthxU!)s~hD;Z(_V2{;~^laVO7^@K7z{Tg2*@4_d7qff!LTfV}d+DOr9a?qm z=~w#gG;V`H)XHPFVuh!(d-xFK>n7LZ>qy2sBR+|2yz7d91R=TOJ^pKMr*$Z+nc_W` z+BF8|ygkCtn70-xmCrj6O}lc3DZ9Y7@3EbZ*?pr|MnbN_VVRnVgxky( znH9>RZ{N13H~NUJy$y%?ouifFk~%-W61%}BhhLs%OnNC6L|E{@n$stulay_*`N6Op zzac-4rsD_*EbB5mhNK{0N1vL~Z z6xPb5S4cVZ@k_7&^v3qc9+nZPhe(=F-ryU&t>pa zttl~=&qzJ2p5c}&bC_Nbp#AL>y=r(t+}g=)x=ywQlq3`X@hKB1k^Z4M+uEpNvI!u$ z!l>usJXMC-pnf7g53Q{Z36_zr$ z8t%+@Fx%98_C3YG8RuqR5tYpgK?u1tVzT^tY|6ws^lS*l^CLY8q*D?n(z7^WF1Co` zr4i=V)?i6o#1%q7-iYbN9u9j}6Q$9Lxpj}hUc07%AXf1jOm@W?bs+Ik%BhPR*A6t# z7S=D%&-)76d@o)KDl=TgEOLI2O|vjH>0AO15OCO2M0AS>6cO;Y~~C!Nk`xg z<+x-)o8;Gr!}BHiuDg zoa^l__?P)HI%w@li!3o%WVB7W^oGlL_2QI{il_TjXU_Z*8YUOrk(mIt?*0L`t#@uB zu8rQ|f3W_n)1Xy963$oA!3U>=`!n!>+J50;ObAty{Q8{iA+`tj#u&oU$TIr0(6}*59th!x z>rH{+GqT7)e#4sUqlx0R1B#dUgB4rkThNJ&Fs5J`oN0HHwd>4@WW;dzvJw4uADcR?>A@dpk}y zR$u=;VeqW~rAN~bCo3)+W5-gW)ZZ?*BjLUcHh1cLoRwzx;WwQQ1P*M>WWuN}SzWxG z!9REolE40~M)8_dw=O^`NBi+$ISeKS9e?v_jUjr;@0~0%+62=}fn(qAhN*C@r9tb_ zos$n}V1eDabE;>ySe3=&%gdbg1@4FX7*j&QKr)+0)a>If!)*9KwuY{7Et2z;$EB=_ zk%?7YdbU7zk$Jnoa=#7n%J_#?fp=3{62UUdV@}KC$*nfo6~9IzG__8I>veNZ=hd30 zSBDTcpl#TyRrW{j!^0kdwWroJPD&#R3TB(7m-|;dugt8zEjEf83_SaeHGX<3eJQ}7w?wic=bcE;)RNT7Q|ZW( zk@wkq_KYz)^r519G+;DAc_?uyX_jUg4(0Wwy?$AWe5~?pc9v5fpS(^yrwfu1iF2QL zxt9B6yjxg*96v(&Eq7l3rg-OrvP^?!6GSq+502UW!(vdWCEMN@tf*X~tUeVFb0jPoR=r?rdF1BL&^WkM)E~S&-GjP=)PTXy z)5@$mj$4$?AYCQrI}IZVdHcpf;t5wPdqn%E(AnJ!e1)q9^K%K%BxoSC(C0uiC&jWV zbi)ou1*&9{dG2w;~>nczAwL-;GyS1O_ z8IL=yzUCZFsh*;kB>5a^+?co1Y^>wRiIubdyeAKDOy+1>mke8!2effO5Shp{5=5$x zlg@Q`IDBALW*v4c1Yp2iRu=?8W-s)IF&^^J%@!vy^bgXU30t&*rNGs0t8tJJ1QYWW3sRw)Mow}i~Dj^Ixid^Wi8 z%4{)e(mDWaIDO;T3z&WTsqsmG?MXRz*neD2?MJSW{}o)sU<)UVIQR9iEl}`k9h>sk zr;5HROH>P^`y1ZD5ljP@@6{R*}1pY7vISzfDJRpS4e^_&c4)XH%n-VUvGuVoS3b| zTN{^`&6YUox(~N6IyDfkS%I?3M0)8ixLyZbAb9t34|l)2-H7U7QO^V4D+6^2BtJJl z!Od6gGa4JacajdH0rTL7^SOc8_onsOm5Di7fkTxNW3M{h{CQee>TiGnL$Yy5Mq^?9 zXyBL|EOd9#lDw=}`GJS$t|L%$Zj_u5MA2}9;g;!!*pf<6CVGuKWk<5EZ)H~RAs48| zh_(c!oNqTK<<#2qHLcf%0d^MF7%F~1&Ib9-rsX5{R(b{tR0eR>B-gtmiAkamLGPo3WJocQ@jXWY`C< zpIt?)MLps)?wno&IT}KQ`22HbAep}oL6C+yCT=|e4`jogFZS(&9`?I`bcD56O z7Cp|j{qy6WyvB<(2!!rVBXpktp0}bH+Uv5|z)Uk@U6dqdiE*u89ay|NULwz8|0Kq? zqbt(EV{JIR)4GR806HZz`^A6Yleh6+B+y1q490#|#&A%{JzA^?wP&c%hWU)7%ra0?uL4PQtbOm#tZihpsSiVDR6Gq zl-RX4wCI+3kKa$Ku6#^Mx=yz5ThSsp{k?TB{x~J}o9Y*n!{~lHqW5@6UC1=6AW#OZ@pzk4^`Av=~JylQS<1f!NT?0>N;(RjZZS_yP1*P*El*4Wlx5#N`KW85J(UH%Acb9G;H*4h4)fMx*$X_RUoCZ9E<^8@I?K z7J7B<#@b=7zh>zHo2nViN;xQ@6s@UN=R&TIx6U!)Po(s$PdC+Ao{@ft4`Gu59$ux-#;DIoBI*t_k9rW8D9GebAtiQc zF&yN??yqo}>Xvr=J%Cojx=PWC-^b*4^h|%0)paE_zJBH=I$q$uer2In=u+S8^u`Zh zv`+5EBH+trSl0@hL%8)tCV`>oRqMJ}OdEt1yNm?XJ`d*M;o)%$;XPv*m&L% zCYW7j!H8A}DO}FpdsrDJGc@;gaLSJNW@VhzefV+-BWPG@bUU~)rK3cFJ0WQNVete= z5}AZdRSq#P>$wrye=?saE@xvv?y5edz!E_2~v8F8p#b>q=GfV3-AN)H!gQQ*4J3ihIchk&FncUz6- z&sx{>F!GgSwQ);RcHK#iv&FxXdPY)YAA47Q=e`u}Jl#jZCJ)w-7@P>HN}kd>@b=^J zuuglAj>zO1?-dLp5+*0-oY9gXpHa|JnlsN4U1cNM=Y|wL@(`>Ip;xX6bpGa53!5~l zB=2>D{o~~`xN8ScoaG?9F@1gFBAHqIpu8md{Njmo{}~%Glko1GXXZ1D)~(#|OQLtw z?*L?K1?y}DUYUjHo1mIhvE0SSOPHj8&7#pqHzrS9w_|Yy>EyDGO`~j2pe5Cb};3d+a)!#6K$ai154ju3~#E_l%ij zSF876gQBe?1*s5N;m1RDV+EhZ%__fz49T3ps@=PHuiLe1YF~(<4mO|YXKb>dPV>;aHVT0?TAL)OCJAZXWYNgvGNDm~?oRvxg@!p4HlTlID z916-QB<741xVK8YBDtRhUX|!0@H0N6be924hL3qSS@e&GgLufcz18x+;-L$_!N>DF zq8O%dy+Sif5yth_55h9dya!D^9FGD5i<>pG3JJ2Zxok#e6u&k!8z(9a`f z;zM6n`$qB7iOKA|8^#xSZhLN!qMtMSW)N{bNlHD%6SG9kpKqo>43Up{m=5*asAcmA zuqx^nqEm*(75BTAu^PNpbFI3?oymgm*f3*R^pH;?JmiMRfaYm~@Yq8CJ2HgOLRXxY z2>|CdmcM*3Grd_vIV^D{Qz6$1IUJVrjU&3+udhsApo2N9Ok}2<(D4KY>{@{{C7if1 zTiahY`)0Xp>T%zI2j7xXWgOuEw(FoKdIbFW9Y~-F87@I5q{KldkdM=Bh2wzt24OG{ zDifQ~=7?E=V7r|{8Z2n&<83hS;N_zCe@aeTr}Ky?*yJk|cGZUY<9O!+tvN|Td_3mU z(`#5ChB)XPSYNX~aN-DA)@?!IUUUX`g1mn4FZVE)z6sgoG_xuHQOa-h{rSOkSuz%& z&9tvn>2}2B#qr_v<*5@;DS|G}-X0)_zl>CBc}!y?*q!hL+=TaV{PKyqStRo@o%KSE z5j;o*f|*5mJG2tyyH}>Xrn)m)aUd=sw9(Bc4+blhjTe05u&vcwkppXxNj)A?>-S^a zzblNd09k`Z3Y=fYm;|rh|Kyq9XjRn1n^kFRyvyCa2i7Dh1$S}ICCo!PlRS(u%F^S{ zoYr503jcKK_k+6xetceMVNvP5&N$CR;@AW3&|to+3YpfXV*qs(YZ(C zWfrj^gYWj8y4f>X*!y^XRTcFC+M8`H0i5;VFeB`?Bh0QZFz0KKgb{6KMn*tpQ78M2 zAf`y9h@vA}{FEOxR!tN7w_uLZ&!T{k6RzNmjQXsc#n zva$CWNM0^ANw2JPadRtT5GDwFZOK)XhjA2Gyr!=pHB6~Irp)slqZXD#9GCM%4uvHh zATPN*1`9g~9atv$`@c*b4X27Z1rF>&$4%R5f;#kQfgLB>@6M-PU>%E82ThjRYHAsI zBj%CG9f&6r<2gMuu2pRpR0m5&;{v&P%h*9M5N77MybHbnS~RQ}5pL_YrN9}82JjU1 zi-6_`(Hz`Kix1gSgxTZ)l!v4U%{(}^O(+OQ$oY|ut}*+#Qi&pP&L9_;^9cYoQ{$h# z?5`ZlpomIWGA%CsxavtkFwE|AQ0`Iau2@gAF!)L%>PIT^N;zWai7uwbol;#B%<;rg z!b(k-NnD)SY*Dlo19w(&izIc0eY6NbC8OLX2p|z<3x_$_;+DRrXB-=iaeR=`onS)B zd$3Dd&|K&;iWTftPK~PlA%6<=#Yrk`%$*BXZlsldGGYO$la-{xX(xv5tqbQnkiGW_ z7j`+|nWWe8PD8);4KAXj>jTCShow$}lS>sV`D#G^g@zjxm}S!lLLbY_{ggEv)pH00 zd+EznqVYALPSedu-yl5 zVds+&QUSlAM1g686rhpT4VdDg%1+qVzJvK8&<3hcqo*Kfqp%=fHfI3_9m|oxL*LYa zzTz_QT{fO6S#z#a1yEfan!ANr&e&?UoS?ow;|Vu(QBxZ!P*c|v-XKghV4kb=sYkC| zdptubkHMF_d1}@rUE-3I#ikp^(6a?jgaUL6oDwUHG-BP%YxaN@SlFj(ej&2GH6&qR z1JneK|I6Uscc)Bb0>N05!~if*ieejUbLYTD3aCSkPeEFQwAgUB@3rk|+>EJW*h4GO z()j8~4>3*`t9`4^Op|L)zq3L$1w3B=iF{b3J6=38BMCs*UD^& ziQpP*a`3zoAF_wfK@iyJXR%;!H{0=6JRR|&V%#nF{ymJQSl4>R< zpx$fc8Mo`#SuE;D*e}Slxr1=6n-f>LVqH3;G2JBy`sR|aj2x~7!Phh&WOi_KjoR-T zSk&eEZhDNAIFZU4U%)~;UyL@>f&bDG6&WuBP=SMf7|9EP%#td⋘Ktc`C2vo42Qp zgcwVJ9yrAN+*|>1zIL(i+!se&XTXlrgTBAMCRmjf<}?V@-*9$byS9|`ai4~oT6O$Y zL^8#&bzhjjpLtEe$7$oqRrWDrH6yHQz@P7wB2T+mh3&gS9f6_;vc5NbgIHUWQG8P( z230Rewr8`XKiw@140XjvrQ_A}OIj<2wLwZWAtLI!Ij%zBoECMCw{#rW7xXm^zMTi{ zdtn9QCJ$(%1dMLoW|EN>=gqDA5;L;d@P4|P3uK4Pa*>C~i}%e=R1+YTm2`f5J4)!n z8;=i)8?W=67e~a_eB;~{qfC(8uPQd$1zf{?n%}rET%*qw-MiEfXLKh2L!6I_&s5-5 zsXUX&*|0VN41Jw7aUxO==-ml_=Rp|G}Zox0J$%XPtqoXEJE`kdO#s$Oo8?tR1Fp)slMiOiz6J+&rk4jJRfKy1Q#Y zmoD`}1NQ_sEOpqY@Mr0PW9mCHwHcs1giH5`2p_-Rzu`TLDUO_)MV0gLMFXj(oB%jj1#Td_7;5|CWN6$-G97A zXwgsek_Yq-bg2Pt;s(brl?;gA!w824FDgFh3;V(Xcfd_$OfSiLA`9;Zk!#9FP6JT^ zV}oV}klPP{Fp~Pv}Bi#Cj{Oq z0*N3mCUcYcwTi0qtVx*Jag%qE8d(Nwu6|(1N{DKoQp~8|q{_Rj#yx@Q$2+V5h|eS$ z)zq!;K{aNM+ zkZ~?U%cAh3`orlPA?fzq5SWCAJQ+^4|A^^r(#mD3b`2RsWJ*PX$8xsbBFV5d==(Y% z3#dd|3Jtv9QNZ?|EW_g`hqQI@=}r$=$ASFb)8x)|NXYqz{JJxw_0gBl1wCec%zCk0 zO>6N52`)OKc2`qgGq1z)L1tZdAO|B!cpa95wt_0RL%i?c)@t&0wH5AD466V5SZZsjnyOa^F^Da(p6I6{W8ovhjZHMc2=>ssR zeYTGvnMEKyOarO-JN24ka(o(iFCOgkaYNkb>wf_asr>E%PT!=q(B87&6qM&dTRE&{ zSk#&9beBPrlV_9mU$8jHX&JzsHCr_Ij&kdQKxy=@Mi1F390Puv@ALcU-_1 z2|B3mf>o`vJQ-%&JP*wWb5RJ#43VeT1vW20AyXO1-?zt0e#bfqO;oFwy>sF51rW9? z>RFN1{gw)tE0w?X_nlkFl-m4H{7q7IHUHxd&`_lXOzpg$dk7h~^S2${nZnkj765!2 z!RA=8v!q@}YTe?sFCZ`cb6o{^}U-ni3g_ z_6iOjF%@n8`dRzzAC4BqM9yF}KP(~#5QU?wGP?PUCIJQbL$;>WS19X$M>(d_>U z#!hMd{~5u!41zkDgytPJp`h#{VPTV!3b#eznrA-%kN(Mt?Tdb(oeQaX(d^WY{JH~K zDLT1*M=rn(Bh-`wx7Fxx`E))WApxmeJ*GHhD^&Uqqi~uTh)0)7+x9v#y(k)2!qWR4#c@MP88{KgJX zsKzqMn!C#@16V?nx2lFam$#X<`2lhYykPM~w#AO~6$3p6sG}LiU5)uS_tQ}Rl7TFD@0!oIP4>yuQ^nXoa|8WEu^L%?-zz%`;H|y-)CY2UY71YJ# zn%|ebZg5<7Ucj!dC7ba(OkpE5#dwl#zXXM;8|R{Zer;NBuc-q?%GifevzlLgogq_{@M33wN`~b8Y8If*b{reV(J>@z1 z+kx+7lWL&nlCN#?%>2k$v8fzD7inE;PrSfQp7Uq$bVj$Ahc-*_n*;xJ4g8BwD3We_ zmJFZ;#0I{_?L_v=4h{KWp;KXg5vrCbWM%hTnY50YqTih@&F+NPa5%EvIsOiC>?lKf=oU+ysrJG}E+9`xIwJ)X9jwEo6^g9N za=Ef2jtVBp-;S0BdtM`6pOP^g@vz*aWjxoODE2(_W2F5N2&(!FcZ4$)ZZnBW-3Uzc zJBon@nzfD)E>Gp&UK}fN@qaJGf8jS-Cu{_B)J+ zMht!svHG4+dy*#s?z<2nDR!U<>YsKoI1+Y6OP-{ukPG|Sui{1;>%s1!3jm3EPI}|f z-2hf8`=He9mOP_Nt(jUE9z|@ek2T*Uy+`5Qp^)vde7Xi=Dn6<(*=-)01N?P%SKZMq zSspx|5+#04-^0ia_i#;1r;L_9r^$sUq5QqV?WlY7AsmF#Y}=-ml+B+*X7(8S0DxQl z0KxI1VOy}>^D)u>a!_~C3&VSjkqMwe4Ge+yX|ZS9gMgd^w#DY*9zinH4f2kp6)rvj zTNtR6^iQJ*x}I3TQLwIdYtAg1shy+0LZh z5-6rUNy@QD`DA2xjlszpk^K`EMM3AoE(L$T^=r)_qeP74j=}Wb9D%2+D;Z8jn9a{b z(~XRML?>A#DCt1oq2Tn`Epyfev>6uiYI7a>_rFe(?Al;@i3`VBp#MP=?m2p$fKn*Rs%Jc%*VDzd< zPB=H#E3vmEd`DZ(HljQ=KS0?Gk{q1K)en0l)taV&zIC{ixiCmhLQSac2$l>QhycAQ6+TzVt`8Ew3Mk;8on%Y~b^JB1A;|qUsM>tL z4xIpxqgImh6CI=^II%a*yI<&cOTsPHc<04jCVn>;U}`SxZnkSw|B-0X+Cs&@6;;IF z2XCNGI=+`|^?{YQAbI43HZuP`At3IwVcp_>HOJ)z-&Fpe72g~3OEa4#8DP1Wz#_$+ z?qm-#HO|$kAY}ug)prsR++Ft8v?A}#_=?K1X_KqMFY2o@M(<3$QSA%mLN0+$3T1 zLFqs<@gi8xp&zUz3LLi8(sItX(5R&}PQTwx@`he(2XDuQ>vYc6l#ZkB*Xp;XC|FTODpU_kh|yu<~;G zS%XBu7%7O%T*z!LM{izjiezigN+`9*W;8wL+`cxhSI?4Wi>F5-M69%@h-Kb}*A+DYDqP7W6QOAi#~3bw2uvBZhrtB(AuX zuid~1g_eH}Dl@)ew-|`7fyxk=c(n5aWMZAn^(UpRwABusS~n6eq}R|ji_2k$ekayq zvi;a^Vwfn2)OQw~X$f=Q8}qZt#?q$^Qk(P)A``;{X;~Rx+M{(Ur5hR6OR|v*cd_ui zqN?v?EWq@zrqxn`LAahH-|GCy(m;x) z%%jH?HcZBczNrd$6IgY2QMZoTx`m_A^ z7~_fVoy|;_e^zMPx3?RV3~Qt~$}E}P|MF~pekGj`8hvH0O)x<#&v{g8ZZH_^SY%@e z7F7M@7^m+cW)9+d<=LDszrmdR*gatZc3+**YBp>f`3dS=si~Viw;-$DOP(&(Us?4Y zlIHD_tNU^!NX&)EgzFOpmh@vA6`0#&py~*2t?4c4S~|+*vkN|u*U>#$O)D{@%LBIl z&lcK*B}i#FTHAxBV5}Ni1mL8zEqVf{%e5*`oCZLqGq}55AnQzQ83Fd)>p6&{ZJ129 z_fEJ4GIQj^TUPHkEoS^fs>k&4GRvJZCXI8YlUd3=+8zWhk7%58GPvRk| zY55nrZvXF4?7~43Mj5E#%XhMs{s1RdC5_+URi8q(xYd_G0?08^{{~k!S1`a=7{j~3 z@&!Mj`%w_kM%wgz+wk*C0{iCOnM8&pmq24QPh{#8y#%{S?1|F|2fgP>RmDd2>IJ7- zGj@kyaz)MCQh=3fyyn{Bpua}ljCwIYSerZ|-1Sft0wW4*s|gE(uwEL&`Ab7%=phY= zAb7QltXYkWsP#>#wYfz5=7O1oCz3l|F}vTpuDrF-RsE*PsjeK*Vby(IOG_)T)@0H1 zVbeEUb}G_tMa+I86kBu&tR#u&0W5O#TX420nEhXQ;6L}oNK3m#e0^!nK7>bSLk8?h zospVO+`X!C=g9Rizk|i}^fC7F#Vh?~*6CJO7Vt6)M|?*Iy8D5SL`K<6#Y(^HY$n%! zT9xCB6T~&vjhD~Z*P}( zg^n|b?M{&mpBhL;?Z<*7oy(sz@xctN`twgR^2W$jy;ZD>Y^C2r&2Y^r_Bj^}>DPp$ z9))Vaj&$r#_ZKjd4GbC_d+mMY$s^HnxZ95`xMMnhqASB`?0eO;51GTZ<2hKY4VR zmaRWnkrDL^9C|r7KQDLF^t_OH?=d9OIn(&lb1n$Q!Ok*>HYIj>PW0Lhr^hiT-o@CC z-j@<=5Si;B8WxW^Vu`1r5L90OY{NH1EW%o*nhWLOrq(mu<$j{)?T`tp;vJX@JW?os76WW z^reh29Pf1Uk8|zsY|6zB#W^pG#ftI_zVQy+GbT(()l5C8oo|^bh1!6-=D7En=jr#d zxvyW?)#uOpp|GdO>c+z%Vc|IT=w2?^JAp42==Qas%Q%9eEjvZ;)1N12{z@Z`Be5HcUG|Oi6=@|M1IgD6`fz^#bqSZVa!1AKb(T4D9+FDd zGLkMBZDEdQTR}rLOCM%MeMNYz?cQLKI4Np#W3s`lvV3iB5JmTj4^io|UiS;mgzq{0 zBB0#h(Tqm+oqY(m<%(tM29ylz-UEI`vz@!W_a0WQtz-N^I)B}r_;hYzK|Q!JiXZGH zPw@7mPdBKl!(=X(-cs~*ug+atT@3!Q7e$zuoPBRqmgRz)?y8`{1g~D7t?vz$c#gJy z@eARe%t~CfHT>w-CFwo-=Ew%OH|9pY`ueqIb2=}YJD!vdJol|V<2S2@XGeMAfp;LVo=xWb!;KZNShsuR#NiCV)-ZOSS zUc!FCNz!Q!!a6w~xeu%o*9_JMx6ST3$NfUsw?+lv_ zWH09Sdfc^6ppJLMP-y0wzB^dt>gt-VmQHtUCVkZCtxX9Rgl?fdM(0aO2xpM$XZ^0n zBMqs%rs19imd74hV{)jkUDJQ|Q6o$9X?-}anRi7l0s(9BnSGmnPp(nBpg=anb1^?P zhu$C@LZSElf7O=Pw(R3i1L{eUjiqh zC@pn;O?iz;8+=ad3N`ucjSW!j=4Xf(U0&_4D9dqJIvA1|DwijHa>J*|Jzayxn`%xt zIvat|X4O+|_55@iJJAQ_X*(U=_Hd(D>(#E3bv%2t?u*m@w%<6G>}?fFdpz6jHT2kz z1uP76PsE5;*a@vkW@wI|uVdXm+c<=tpX1ix6wKIforu8atW<0_tN32vx~0OeCN zd`o@nA-8ruQ|Zc7X2mM|+9yX#jGq!S+6+H7Ihu~U#JKx7w>~8W#nsLnIQ4EUhY7VK zNy#7-c2rF9qVg~CJ8E_?g6CD-E&|y5IzL_UgXC+s)zBYB$Anxzn)SXOe1^OHSD%NP4c)m-Xm`j}(2|TX`@4BrWrMk`?2m9N9azR-{t1!Sir( z{4=0fVVq&1DdxmTreMHp1oim}ah7309i5KW6O`E8gN4sRIfIDIPy|O>dzn%7{E&`0 zh06gEXr$07Ticxd2U0yJ9ta<0VoFgjtb8(mw$QTQGbQC_D3{j3zS0!|yep;BBdw=x zx$oV4gY@p?-ZQ(&wKmn&s#*B(HE!0XgsG~%ohG(yS;es?gqRc_kN-}uc$C?d+35ST zcsn$sV@Zg+QY7|p7A*K0(rxWWzLw(g3)=U5-p^K5KNxMwe5Ll$Zm7;{MSAsmpP1Mh z2Zgz_^rcHIfzcwObI?Y+281YcGBoztv(L<;*3Uo1e*uBQfPSR|O;fByruxvS^@-?< z<=I_AUiRbu(b3%6MdP0DtNq3^I;}22Y`zt@oQ!8Q6hpk@k(Soq(=}}1XWXch`W7qP zlN!SL@&1P8QhF}|9NG7M;)@S(Z36>7)S88sM`ti)zAj75@09y+;*ItHhrPEBi>m$l z#R-v8P!JJGK?Ffc1f)SE1S#o8lx~o&0R&VeEI>L$x;uuH5QG8g?vRclhkzzWI#%%6N(1PL}P7I39uQ;RQurEx7mm+l-|(LTUkL@ps8h#oImuovVZS z-XR2u#8k-2P9e^nz4i&8)ndEXYKp5zJAH(5{ZaHNR|9}I$6D&M&4$OoVm0)OwrF}1pOV7KP;)!< zd6aHB?PFITv;|~Vv9>mG-nE_e!>GUzXD^;4Kz=pBOGx|wKC3x8kIE*wi@(d>*uwnC zU`55$+r|;9q}cz{-s1K;8}JdI$xcG1syhbVmaQ<|-Q+z*mxw=doJF6`g|^18ER5r8 zhm3a8WA*4rvIWL;k*8Z&JA;E0(og%|1T2m_0*vO?XThQDh=p{J+ZZrd%~f~U=C}ki z@JUFfj`7smOGrXrCUa6l3v=)0QQ+ap96nI>^gcMyDVZJ+B_%i0=n1a%!1Z!3Q8)M+ zdL2#P7ErB;^Eeb`{bNtF$Q^&n)$LhZt(;R0^Cz`K5!9^p1?pWc0&;c}9$emsm;_37 z@i;#&c5T}xy+!M^_B#gdJQubWZn5gF;r^jxdW}JpT-M#`ew%4&X?9xCxy2^Cybt*t1@Q(e4Fl3+py4^;!1bTVB;7 z)s2W7DOrEHMfT#PN}Kd~ymQoTPP&1dpv*u233&KWKRgF|8n1R zG4bv@LcS+9%LBR10J|$?s#3gI-Yo3wi2$ACpHsdsJnfm!Q4$`)!HgzS_$)*-kSmp* z?;et`J?w2c|0bKrpjMm#zG*X7h#wyxzeGgn5Q*hj2IS(v^+VL>1aq=tD^lRgXn(H$ zYq*C?h8@ah94%927`S2@g5mYKaIULDju^geGn!8XaK7ZP$-9fgtCC)Gj?D>DK6z`} zm-VY?uHUn=>atjl2wAc}zt|CXdo=76=sh7dgvD?BpMgHGnVDF>@Vi=?nke*A8(LbV z*ZtMAobO{(&fLe8&S0|qb-(0e8AktyogZ~R1li6kr>L*V=4BGAb>4Y$Brq@NG#F`c^2(yG(W%pCagxUK*!+D5rLEVih#Y~; z1moRIU4Dt0ySbJQ7Vi5yThc_1x%Ab7ysJU)uHwrA}$y=-xoQ6Ruj5-OA`YnYc}Q z>9Rczn|5iFoLbhe6$=)zTGwtNDpBVvtAa0-SwB$@6HyDn$lA$MTsKJ;hq4l!$>qcK zsG54ZOn_DI^yKAbx5QZ-$khsfdpKuGo-en)Osv?tyrnXlMw>?j`hDoH!&&9UmPJqXFvRnQQ`p4ZWzyTWGH#x>m&VET*k~8 zxj208^=rkxoIcbJtsnc?;q+?RA#i&|?pc#DV>cKCqd^P)_BLL&PSxWQPRljzxKBHC zTofv0>>)P{26@Z|sn)$VJ`KCHf92VK+8epCyIWt5zujwHVO-_LeI6U-q&kE875SQ( zT2~>>rSJh<_o3avmfKD_l?FpL(Gs6oU}R?hJygVQ#PEhIB!pf1e@7m)|3~C;sdubs zpQ8Gg3qDZNV5zOXX-Zy;H`-H9SKb$3fX-MwGK)`DC0-ldrUI;5Oaq@jwJ^!=>)7(_ z_p&77v$qp$+E(w-!u1#!@-q4g=b(_dVtjjVdS-Jus(Z=)SIp=Q#Nh7icT%2RL3{gf z=Q7)oWc5Vbu6+Ck%Vlo`BNnlf0ovMWF!z6P#3#J6-9Kq{K7=xg{}~JL7Ec#PIQ+gv z(|M$}sdZO|Oam?9+FjvA?`FT zm(B}29^J`JN;&TG0GqM&RhSWJ10YwyDZ$>wdC%oTnXqa9g;+tC>(;~0VokCfYY)vs zFwo1k)j90RG8IitP19i$KFY*CN8cld5jC^)*cF3`&%YbKSRZO<_N_Ky*T0Xzfb)NN z`5U&w9$}telqa#r{U=!VEyja_sYXH zFX^oZZJ`TzhCpIX!q+^rfJ2|C}V;X!^ez?`zm14l7 zsDvG^$wl5#x?!S)Ggd1mo`QJ@UI%9cLPBPx{7d>2$V%lW_zqz-JP-6fva<%a6U=<& zmA4xjKoCoech%~|r(zz0uy%T}o8i@~@|WdF{;7pl z*!@dc9uGD$qElIATS>rYcH_n!ok}GRF%ASE2r&gqZM!=;Do7syZVbfv5@u#NPM0OM zA#Xj?g7%GyGJCPQdsh}Wl4}^*d~rG7u|?e}2&baQ&!k8Srmr3usWz`-A#w<1{)tnf7}Ju`DA-t3y($j zUf&wyty|M0wj$q-y#)JYm3Nl-B>OB%CYP6SUG_$oHYs{qvUuO)F8WYz3-@R1`$>i9 zBpKFYlpqkb3eDsR7aP$Y;0+9UpIy;?YppYLArwXGTAx}0p_tYfGz}OW3$pwQLSE41 z#KQXBWqI`4ma(hZLEAO{B}Hpm5GKD|m%cEt?09^H!(}wP&aB$H!M4JK+sa}ARJdVY ze%WqmR=LCiZe@h3m!8n<^9uni%$0l%-{NzN2Yd-|buKh|+UHr?L?=zybK%tjmS4-H zmY+rVJcQruJT@?n#K$QJtAehvi4j-i zdaj3tFEUrm_7@co53w;7XoQ+`qu47*k+wS?r2t&3gokfHcK<}S8>?#W)|2S*#3zJZ zmTR!C?YSCe{p2s*&Sf@3FLQKwlOHs(n2oGScHCtUt2H`!LM2d+cn&724EGJfJ#ev+ z=Fx1UpHH#hTYOG2M1}Yt{enViHB31$(|Hw-GUcyhRwse^|E`h^?+K=O9S>VBvK*A?XqN#Et-KUA$?Bjvpg`>Ol3~x5PWUA+^!3qEha}Gcmh{W&xFRCn=Ud)i(Ra(V zv5xl)e*3m=y*Bh)H^`tio;;4tW4}Ks1qCkMI3Nsmc|jcd8w`WN zW`8m+{IV}Qq^oP|JT?;WDSqdi$3{mt#&I7dkX;+L8GThR>Jt_aByDrFFh<dOo=*MJSYc=+w&*{k!N=h4i`fl7{m0W;ktfCa^q9#kf{n9aZbYm}NzS;P`XC#M>5 z@B6G#Ij@ceE-o%FVTEF1q$o z81ry9PO0VrBYb8q6jStg173+YS#m?-9e7`ENmzy;L#s%$dZK8)5AAHj$0w<6i8^7C zA-itZ`Px_AS zPZyn}Z}Zv=Uq5kM;EofKp-s&z1l5dYs(H#f=xVnfi?=j8WDS;lbsLIvgb%lQD>TJ; zE9~gzoOf4lR|{If-1oC=y`QiiIYNp*xrlY4OVB-lU*xyt73;qODTwOwQAJUdux0e2 z?Y>k<%u{q_U*@at8i|n;KG%gwNC;@QnmP9mDQaFF0cH~L-oD>qR$E#|EakD|80bo# ztb9|1!bdjBHwt={6tPjjZzQQ-{0*)K`i7WF$QF%TDjZGH`<6#amtPT(v8I_%>yRXt zJ=gtpv1@(2M52SR0%lalnpcnD$m`y0qdG80o~UjN4>sWdO6c2>TOk&l5Talebbjgi zj{oGhu%MAend5g^o@XRveT!Z-ZPg=O>a|W}pXYRuV-u_W6TQr}YdokTihFzSfXe4B zwLkEyYGt$Jcke`rQl=@ybKc)e<@!8wx5c;!%tBz%lYDrky$0Ky2 z`RGe^n@0Lu=T*Vz_9pZnhjktY0Q_&i8PHQP=4S%6QJ;JM>`ZY6eZhr8$*8-(h=0VO z^4zCapC%-lZ=4TSOiah>~V zo|&nkiWLH0zC@LK?1k>;bBIs}rl)6Z>~sVvi{2V@&@TmfzHq^^w8H~1aA*I;zLHFr zL1{c(Up)9KbvT;`Z?=BPHGs^mZX|u$n7CYPp^>Ij=}Od$Mi;rW(ZRYhx1A``{54vJOS-?H9|hx3gpI8uuNNco8px6D&-K*G4cBceStQ5iJF#w|?o{YdI1jW0d`_E=l1Y)KrQyJ_*A&|eRf}a{Fs8OgE9(8yT!m@saEBR6ivT90 z|9}cebz8ICkZ88S@0Pp{Ge?KXZ>@lc0uWzyH0epy?5=Qlz#Ub-Hk&cYL7Y&Tkp51Wu_h1HVky@ zx=7QmHRUA8IC7ih{XE}m+_a;{N}j>dFwZ_{X6IB|{5S}CesPWp$`7MK+Wo^JmIm)H z4cFB`*PR`g$>l5$K?kk4;+X4WY>In*goRMJ?-HY{c;*gl=JsdEaw44JnZypdb&}VJ*h7dhiVysRb zm^$?YZ8?ye39Fu;-uj5?KEyQLX8a>cL=ES~oDnjyA)qgA$!1ih(r&bUgc6M+4m6Dy zGErn!sh;e^(f7o8R zs~q2eb^MVz*A7ReXckOcOaM4<^lKVtY5S^{Z;ecMvTJnIE3kqL!xwWb98+EO*izWY z=)Ue$HvaLBj*AjO@GP|FcTHeWPkrzRNRmXd>%+3|a0DCETS zt#(dlYzKE469nJ*tD;N7Jcp<$+|*VnFJ1alq#gU!my$1;%S?=E|8Tx>|IxNn^Oqsh z<7)FLA^STZ+5P$;n0NU=1kkfTJ6ClIw~kam4v2t;rlMns!39bNPRc;g?FkS6)jvV= z3?6V$s_h*yyoRXKjwMqGaME?v)hEjBsdJ?n$37|WR>)MVX52OF{gOeDnP$vQrmJ8J z(?6tpx(#y7GriwQEf{S&)RUjz26G*+0;(U1GQ0DYRmegWE5_iCxWVLzmcCbbWH*V} zRj%q)?)DDJiy#Key{S86`7_{R2F|T&bj8!L9cY9DBs{Q$SxE(qORV&c{6`C}lz@7CZH~o$tfE{QhzydWRBStH>H3vlFuG@M#_u|a0YXs6Mlj#|L2-LnDOj9th z=&$J?ex#mp0usTkAhkVn>jzJ{!|}9a5vTX)wJUe7z4E!$S=65akx*ioK_I0uJ|)|R z`Nz&sJ*ZR0MwZ@D04~a6&MA_LA8;L(TSgu_yG^PiB=KRdU#n+1piro^iae6XphIhq_jGXoS79+3CYE;5mG9Eyl~ok5GUp?Xx&?tFESdx;4wX8RK{i zSW>(1Q^{)%QT=S(^Xa1nVhxLh;eJ1Ow8TXjc=3O>H&GFjkbEo&3$u;Ro|#$Clw|y= zO)J-YD=%xL%$8yEJE@cX%R935A;04$Lu(+Y9JaRIKS%RQBJ2Ypr`Y-|DXj5#dXnIN zA`F~(kG(aR38R~i62B2(muzc2_&V>fYoTkT5xyp?mN{`ys=dyex;HW*0E-~S2HuQE zz~ZM{G1?dT!Bl~^rTJo3udJgNidcNW#w*9kqErHk>ZX7jaw&iBAoj;N1YH6mz0uYx z{;#?PB{6A$E)?*7AFfjyl10ENW~-*a3&qxujfYX=X>yC@QG}lRWnG|_M`IIgGV~SS zmo27$GBTl7t*n{1xT3r~(Kss<@;N&;9SXXqUv#|z_%2iIGqbb{=r6didVm*ns?U@O zW(-cZ&U{PCX&n07qdWlz9C7=M#DJyX57LXRn@Ld-+pX&hmD-NL92XQAc6ZrYbG~jb zaKQXYQXM;?s`|W`VozYE$e6 z|9+6kQOJn8p!wbZwa+bAzzVpQ+m!#DFaTU0gF`3Mv*>1Y_A#rQVADbjhwl8oGhG1W zY)HD8jV9UdpYpTcyBYyVDBI7pVPF5*XPMJ2Id}H>p|`-Vrl@_K`JFBLKOqHJ z?n17pwBjEP1V1zx1+BnDkDul2(U`-{!6nn{!ita26g33Mvh4DmnAG2Q{sQzpUd8Wh zFFc_2(quJ={9P|}NbUl`v2D+EpREa~{C{joH*}1O-#7@Je#9^j+;zm#c#z}wgTNf2 z13kL4u=p<(rcwbbXvt=kmi)VdyYev1!^bHLug^Y~{|*R`*Uz}<_nm#OfZuhLJv*9) zVbE=!sSPsyT`!QXCL}mxZ=vY3H4!NN$CliO^et3fIW*|>BP(j)K|FFUp#D6FoefxS zT-7m^@Y!B~^Zg7Q`1L5Kx%4w_N)-DF_AtR5Da3X5F*&J_s+ixb81k&fcp|@sT2Lnpg=48Y>MFiy9`ROrPecT>SqQ8qG@8! z1P%;JIoAKo?EXLVhx7l;A7I!2pZSAf@sa@FA(d|x@!L#CL!(3BUX+0L^EVjxr1+WjR>UCi!W$;Y z=^EmI@8rN*Z=IR^{7PU})R~O^&ukHNv(^{TFT>BR{Nu^(?m+j?il@4Gc7Iq77E%t> z0!;Tj1nEz5OkeSgC$vKO#N-^YlAj_oQ(Rv^`K%pLk`)P>t=Y9pe!T*f%ElH3+4ft5 zgCpVEt^JjHOtbBg6W2SgTQj5}Ond6{?IFRcLjbES(JJ)Jf@c9-XiFx=`{3UuvI^Rm z+_w78Kh_n1m~4(Z(59PrL;5!y0S+z~dd8kK1u-KddLbc)6t9+CJ10kO*;+@GdMs$_ z;Avb=emoTZn5{ecpvZnffvR$61>40X?D1+akAgQKvVyPvXa;mockVL{R}Ylts1Os5o{m#K#F-^sV<*M z@k*;IBZm{U@Ual@dnCeiMDgXh|%>eB~+_#MX3(dB1o&e%h^u5QEzm66X^DYZeGPrsm z`X5-Xr3#?R9qs$hF=wJrfgYu*vT_=w6gMT5NHvZ_zuI~q zT0>{F>Dh19d4dZaWz+HhUVJMHP*2lOvIPl$uhIoWLA@kFlg-(~8J5Cwu#r2IyiF1E z%i*8~uy}p&B29(k@=k{;>=;Ziyzeo8#V9#t0}$!1uCDZ)%y^8l5rM(M!9&mbR`vna zmn$tfHQa~yt$ExpFM;+AIY-FR@v-JTAKtam*Aiv%UXyEUNdwLxn!N9{bZ*+nc50}| z6tGSZ)iJehP-iA;GybZ+?-gsg)AlWr1QGFgp>aHR9ioG+IX>67#U8(=-5%;zyaX*S zCABL$)z5F2S5*9>BDJ5p%dRIB^rU_cgr~udrU<&NC9-l*01SFuGQG;I$6i@sSPWRF z3-cb>gU+NUHQfUYpKOyJ_~Kn)N!ERDTToDDztSqMd*4R^W%v61BXY@)d73yiHIGn~ z0txdl@@G|kFHxI7dnIa)a4zY7r5YU@+Yx%d$L-_oqg(Uh@<0kbR426s0;2}WH}nSH zC-)GTG|)+)Zp5vt3oex{ZxI#(8_Pmy=PtFZ5RxA?#pC3)v89>OsOYBgZ_+ z2`&jfzRiE>?9#D8T|egG8pmE^0Yi}8(csf71bCnl)*88@LI_I7U5MKw*}nO|Jm;hf zI$<`B4jgwJ$2||al8pstL1)&Adz-~RYXrOMjf2@$V?;c)(=uOk61GRk`9(&%TJJFy zA)npHc^vh|ZOJCC!hN-~BT$ONaKfg~6d%>T{rdH53v&E&Tr&3LqS|vm26FXtA57nF zfx0s;Qu0A2Z}=IgAN?|%M{_k4RHmkf*I4O~k1q~Z1Rc5!f81O;@z|sM;-U+>1YfR~f$5j0n#YP}Bno+L0c5#N{z!1{kNv3$P9la5-;SIU4{kEu3`6jmt(OL4fus*b;ZaUQ1yKg&?L$!Ce|Mb-MC71_-xbd zfh9<`@o#3<dA9s zH;STcv569C+w$^RC9m#tKjx?kD^KLq8hhWYyq_5;j?spVb1Tvax$i55JW_W=#E(hl zM1WFJ{njQzrzPHp-+$H%#4aG>>Py)uKN)D|47>LRb}|WCk5%jE=yN=E;pe5}ykuWs zZ&8ANCQV%5(?bgS1YcIQ1b;f=nSm-aX7CB~veajMw)VM*sE318m_kAGG1X#!QC?*` zCLbj4-8;Y(3Tp&;{i&6>p9c(gr;bNx?AeI{@uEF|*Dj;O4$uEFUMkSRy5b!F@$6%i z1kWGBL^N`B%e_!nBRI4Y{no5UTzt#I!kkqBbu3tFT|dOlq+I>byw|czMp|5*+n~TI zN!xx*iFxAE&@UCHe9IPfP^luBRZ6WEC46uzE32JlxzK5O2zPCb!*4Wr9??9uUu9-w zSh8K^vYuWb>~Tz`SGDt`-hGH@Wg<8wh&WNU?|Qms#+%NHn_t+;uYiI&ZJY_?D%UBh zQY&p@TU**q1TcfAL^y+X1LUpm9RW4ssy(S-Ba@RFpp0a;xD|K5yd<`4rGP5`(!K_|=Zi&yzqR62d40v*%uuddV2b#oE+4;L{m;n6hhKaQAd;*)@mSc$D7R# zol-T{Da%sixkdSqRWJ&n?Y>Uw%3^m`!me)gCK;PL$;InD-OKZxt1nY5^C%`wsYeiYG4gLZiZ6 zsU9331VHzLT2_sm*ht4}lk9vnwy_!tqzT2;c@=3ABm1TY^7Qa&4MbHlkKWgB0}q7> z?2TB?hCA@33ag~#l)c2K?N zzv4&LsCEF;mQ|)w5cFKgYiXg7BLbZI}O&O1_UlhD=1|z|G<8C#>kg-(kA{I)?AN z-qE^84jh-+P^It|D4M;N7*<0QpvD@JDAR3ZHdtQK{f;A+kI-d(+&BBldW4+ll{|i> zfSI(nL0MTbodgTFDfKyRZ$UkN_+7Lj<+MRWYus|8#SbJ^v-IVvY*lA@Rz_=ZHaA^L z!rW#%Lr559v+Am{Vq)BN5Ba>7&H=AWD(tbtx2#+E0n}cys9A#EAeQrpvt40yEJNdq`R3Jf9Qp=bPG zE-D51E=PyII2@eDx=o;tF4fTv5C3H>p|7cW$N#mdlz7iy!1M#{c7K?!43<91xjPO} z_R1KX6jWe|0q4xD!uv2}t26HYooG=?sNFJHef4dqXFu{g%iD=8gxK@;cwx09e(tpC zsm7qizCE(?^73n=F3q*1o1pMH4pd%z+6M1mP4bj%eCjCw3_yU5?e?sJ-H@nj4<+>_ zU%a?-Q4eVVU!O7JN5gQ@WyAU|KK2aOwR+N_I{5#em-r3HH;mf!B zjs@~>kj>$8qfrrTWjO-r`Fijd7rYUIfja>r8Rfx4INST5<-T)S;1#ZGkwtYYPZ zHmnBt`+Jr{{gV~54S}RBVTs<2Uzyc~%q3liev&3mIgE-|eLEX|Z=w2C~h2n&9}sUy?aqGUHPBqjwM zJ@*t*&qWL(>cL=#q&a1V?9xYZ{vbQlI)#mgpYv+zX$f*y)EUXSyFQjm1I(Hacqzzp zV|P$W@nUh5IyX1hy+u}ZY;0`m!yPJbCUzn|n};dSGYp+>h;7LiReFGkLOD;IVW)S> zA7#HdDe{pp19qLKai`NBq@V%PCjybn!-8qXzU{AA^M#7QKj3GAHtCB{4}9!8XsFUG zv}0czs|r%kbEglBjNEmT2d_G}qj))Bo;aW=u^{LGo?SNQ?OQg`@vfeunapk#PDphx zCS#LJ%+_Jp5tO*sg@ThLy}%|_>#xaxswq-3;S{6x8T2|;d| z`M@KlS!!x8>MRX*Ng{IcVym9|#mxEdjEr41p1-h!+_uhz#FRW#Qo6j_Q`mO&#yFVn zsoM~!VxDv9uvh_@HCZdT-_HJ#_6@z#7(OG_eVtOGxJpY@#q)06Z{sx~nSGYYo&mDM z=mb%pP~fU3LFyDQPlYzs- zR%cRZ_{caTy+9Ri`*xw953n=g!XD0jUy-&W5f@pqp*qeE?cztBuRuwb(a`ikCNV&D z%03gfCN9J&vWLrDACb6c@e?Z`_(~tm)?_`lS|n<34K!H>GHVHFqGF-#$df=A05ujQ zmyv@S(udr*dIp92xkC({YXq0g7jR@H_B7k?85;ZQX(5p0uR-T zrRU1Nz~jyNd*Dl?j@}eopPG6n$%w4#_@~L{iI!qNH|w!PqgzbDHZM0e@~)_h)rema z=&Z|#zAMHN7AN3FRpnx%!dfoZB?yX`LBqIO{R@0gKAT|@K31S z%&N?Ie6|n4E4Qwdw83ANgW>;4|_2R4s}xK%Ts zTm%jEeHp2z2WGc-Ar!zSYP=}Q#D$3m6sWTBoV4@H-`$7ZLx+;ROry#`8-zFexsq2G zMg95E?DZIMbB47uvIk>h)@(|GuJ$X!ZqwI5O#2EK zAAgvF6>}vrGa#eOL%VEyp%a=pVZq}vbn}$~6jx;GX#@-~ev5a&aUW8Au!`DU`v%JM z=+OOEAl$G~U0qzBC_54=4lR<^WDm$wR#x$TgY(V=3gF z-vYx4Lb;cPa}S($p)(Q(;``vF>~FPhGulvrfpwK32=)eYi78@|R*%shgO%pOMn=hf zP1q$C0|3}LSZGKr^b}tva|0k3bQ=wujEy?n_~I!EIyWTORlhd3V0dYpitYjK>#j=O zD2DLK)waRf04rf8O~wA6e#akIR|OlZodUVG9Fx9i1f_VIShl<06|)U?c~W{7W1mX? z2ZX&G(QeLv&&;G9)CoCJxkFF&k>uOR2qizLXTf6%CAItbXZnTXQ(-Mc_I}uf1?%Qc z9UYv-9O1p^0bBbXn-+7_RDF>^Fc?;x)ve@y`s@G>XI;&^@S0nCr=>NSDmkAU^xH1{XoY{29>$U0vU&%d? zV0y+Q!mx;#z-$69UENsioZhbanVAn?^CN`)9b#cAy&s@XlQxf2_1>2H4#~0 z@YA$z>4Hd%_Rv^7s7~Zp%OT_J427|A6VPQ~jY({_iG0Rw@@b_0P+QqJQY{ zAW|7cQ0=hwSVpF+$PN@i#S2B`#9AoUfgYH%Cp!lnyq;RJ`Q3nR-e(JJ*vR?L$1THQ zAibK&Bn?8o=NB;@v5`kQ%+F4)7VfNoy;RgeaOo7UU)b)aXl_LWB(7a;Moh}r$^rIx z_?5@Mp%z^hl;-SXX+A!q9ZyX&c&NaenC=Hpq>8@d?txa`)(EzkxUl=yV>9=u)8eyJ zXL{eSi+kqDj|f(vS2|8x!Hi(z-t%5kyDu>h-}cUfu^yNNH}Ov`$i@++NLG!>KA&eZ z`eMRm$8Qu%zb-tRt-XE3*A%-)2(Y2@w8;OqgW3>|$#E|z`+K!+?X*+KSNKn-5b(p! zqWwtc@R`SXv1sZR?O_A?JHIY3o4<}nQ@6XH{!iIt0PKTYW~5UX9EAOy@glE(8n-0@ zI$q0#ZUqYceDB;0kH8QXm2Q0yz*Cg<8O@jQd92u#FSN(#L6t?_*>i$k_Ci_HY++l9 z#4=f}!M}wS(9^BRg5bM~1y}HI@QqVg17ieJrbXRDAq)ncN=#`e^L{z2jq5Z>f(a9R z;SsY9duEt$hUoqaJ{)`#02Hc!0hIr`+}v5|oHG;)gGxjwDU-&`jr+$Uv^qd?tazWA z)d+>KpvUksiMIaO?V*T~*E20jV9*1gY=^e)T`h}yck8N4#Kpg=;d>7Xkfk`+&X6Qn zML3ioiM?^QCI5Y%UHU?o0sOizsN}OG)Tws02V;9ivmYqN-Ngs>NI%yEH$K(d+1qn1 zYkG{`77?Kdq3o?+)hSa-F_Hff#e+!BZEk8dS{oh83<3F)B*ls2;oZZ%Ujf!Se2pUm zxXXN20bzN0k_&}>_5HjBYNA8zkf5Ohx68okjwA*z1tVTblep#HfZ-K!n@mvluO})D z1PjqhQCML-`^G$|V6d$r+dC~@1w{t*fOuha7y6Uy62sKNzi$NdZjb1=Hdq!J@q;^z z$$5<*&&6d=m11vwBzR3vU}?p1`Ug&VWu^AFla(QEDgjrq5h+kfc>}Z;m-$- zp2p#xa!{+=9*q!zt?hd8y`-Xb;F7wM%^7y0!V)-ksqbfc*Z7>!WU8`+nZAC#v{!rl z*>E?2@YmYgci4GgI1?Dd)K>%|z=wv0;hphd+0G|LPdw5T;vP1jQJL+jvbc@y?X0X$ z1|tJMyyHwB-E)RwQ`iUmD|aTt_C)vT+FveGB)Syhw@v+X-gThGTRts4Bb3CXz=eeF zX(8HE@!W?hX*#vtLjUAK07Kh(DKV1Z?X?5!%Du(U7F^?Kgbo<(^R80_G{!RByqRbu zlV3WP!i0#vu&v|RFC_o?8>PhgXM3G`py3H)-0MUlIGXQjVx*K0dXDLFEGH2d9Mxhl zbBo@zGxlm6D;ziMXieuW?OvpSXP2eK#^#2iQ*>eJBX`sKG_q(J4@UrH{7|0yHT606g~LV_V@hMPY>NwJc6pm;3?vfs3XB`mJY;92|6Ax%7!EFIrE9LRoId<>bBHE?aWLri+nvA~3GRaqU4nAM zO$PuUz8kWEnsHpIkRyfl3edgjvNl=|O1dXM3-V!M`A<$wCAl1}j+EJ*jCq-aylwQ%Hp{Ef6hQ=PGNl%ZV)uqivOwKAcgGl~K$ za=!=?GS@uybj$7Z;pf4$x!gFPubw^I0-tz0+=w|}ti58@2;aY_yrax+@owNPQoOWR zKcz5%ijtB%YiWIb-S$(k(c^=S-rbCg zc&fjXP_>kx>is)2~3~6rZ1NHk-)-`{;oL z`^^TN)fpO^wZ-8SCNSTS!J1iTd$x4SM~*PYcJ=z+ zH=86f$D2XQn)iL;2@))MZE0;^tRCMTzkXP%&?Q>3n^~)hUvNTuPQStgTNW2LYhL7d z!|O*3cl+#eiha-gJ}LoU0Hs&qNnzl1aNF5tB9KXuqy$06rhfsAdjZTFLzjeg#ByCs z@YyQp-nJUAA@ubXza{?#Goy*NHmv@@aiJI_)ZSss$M80_G>S@3(M!JQHA=(o?M?RSUlx^)>4PH%q z6FJA;uaL6w!!T$4Bi?z_w+H;@p#$k!q}(PQJ%SyxuSqvn`3y2!+9Ozlj9CXP_8zrY z1YYza1uPiv{kNd^!fL!!ZlR(vh!`}CULJ%yrgbiVRe&GvZ!Oy@m)kAHbL=ez6OU5| zqH&2PwT$SGw&o@GXgFrz#3D~tLk=hK)QT3- z%yFB3@DVg7Yz~L>b#Kp&-)d>4KJ}k<4!)p)!Mrh zYz&}9Bj$!b?qqjLixi3pbD9{mC&!nm!@Q5H`S{RAggiB6poIo zY%SN{gcowt`W*60h3@CM-#3PNgI2O@hs{|sGOmU%F4$h64s@A7)y;ZNpLS6(8-9i* zVZe*c@@}|;iq9;*|3CGqb=}xVFwaN~9T>uO=rfV^z~`_@;~J-3xI(u9U%jO)RVUp@OM#KesoPlrHq>}9toI@5c+ z#+r3{+K6i= z6Eoe}ZiG7?po%7%&pJ^rr^;z$cd@GgVuHiSSRTO6aSLnVj~$zwwpZ z9ap>8Jpc7`-*w+~X+e9c@|Op>GXd0 zmrE|63yrxPJF$SP%$WFqTa!xkB&<`lg4H$T$gI~+_K@)Jr&;D<;F|G<#mqg*XkU19dA z-Q2ig^Yy_lxT)ej+ivZHY|VlzMW#QLP~S6mvifaQTdr>*w{`=wA8A#p^oRZJ9_{DY{NV znvm@JnnbheX>jQx$r~G`vHs^(q( zI;QwwAP>y$0j~kBzSeI$`qhj5#tlp|3BQtBT46VJN|Jt|!AeO|vZDyPmHkI5V8$T! z>M@9w9w2Pu>}EHf?hN`hwW|&4mCjPVo(m^P$5nnt*m$y*#OfQg0ovu9``#mhaB%wD*HylD^lB-i!94Va z+1E3DH6%+0aOad8SL0e{*D$WrR%|4zE%Od9gn?;ipSiy#gE<{wU_t6J(@5|44;vjC zkGvGz>YLs-1#hh7m-RY6EwOld_@iQY_opK}f4M|e#PFvn<26~B$iZ&MK*F=7FOQaz zZrBnvdC+=3M;^?wPBZO&>_bMf8H9i|QE$QJ%Nkwj`qrz|ShEVGMGIgQ=QrXL6phab zzxt|9qLR4P#KH75&a=gN;9pTRN_oyvyc-im8nj0)4}ana7migrk>*76K{=tmb>J!X z)|yf*Qn!}SkJNi6O4jK>6JMIJ^OWBKG)}D&FWI=gpIJLQ>N|haZwE_<#sYm#scIgC z-P~dc$J*Qfrnj*Hs&mvni^C_)Adsgm$xn0Ya`QEyO0{3>mL~XI4=JcUPWC?Dim_F@HmnH~DfwJzK6C!U^)8L7_RoYG#ZPTj>PKIq z_LNhNUJ-ncAjGf#NoI*u;4%LB@Mv$Dqj%;6ZFAGTW>g0q>!}Me-pz}&EsNAK+LL30 zQ8#r(4};dZ))1n7?0pu!b=mxcy@-B-E+1sQhXtekkj2Y@sfk}jQ|}pqv^ZLWh-r<) zCL#-55HuwxI*sG?KI3kR9#&wu#NPUZ#3zCq;hXJo7`xO0M0|XFaUx&>E_H&}`zu(A z-etbCzUv>!m0N4Th&$;4h1EWLhZ}Y8NKJb!_A{*E?#_MVUSe45242Egss0AKZ=Wt| z;y(`>NC&Tr)m1Ino4h>xgvvrZgl=NfXP|@4xu<+$&nd#v!sSGmG0)BqXZwTlK{Fs>&{qBk9`xCsO zK1W8VXa2Jawvk4jD$C*f@@06n=`Gv-JLWO+yhCy5PX720KgKKOwyUb}H;0CFoqNX1 zZ1aXoY@Wt*ix^L$iJ$qeZwj+&*&JOx(Bv4I0bNkaD3s1{cF4jr zS!r2B&&OQFC7#KxRJqDDsZWCy`zXs61CNg&DO0s3lCYW49Ol+2L@p}k!^$H2sV(yE z#L+ViQhI~w&ahGCJ*G`PSZmM{iq4s73H^o4 zP;u@@5uY%Cc{JO)vh}0#lzfZt)`*95^VH$)ew6`%flIAZjq?XB37${#;3C8Si?Z*I zr+WV%N3;}`w1i3sp%Th2p^`n0kx^FWA>-JGN=im0J1cvWJ&q%VlD&?3tnBSL#yJkZ z*Wnzu@BKW!fIOo25pV#ZT_j6Ok(`&2nLjrc4ArJN~2%dN{4z#;nN97ygV?}I- zycA=^vJ>R!X6nayifl#(hegGyR+hfEZqWWgRrnxz;9EbWzYPx$>*TpdtZqV!>6k?l z!S;YjP`*-T$5t(bG&^vlW9YPRcO^_niWMGv8Eg8QS+pp_GG%pKt497)%hwRghMduy zG?@%doOADya*;FMtIw0>zee|!85KXDXo{Y-ci5^hSgFS3!Tf_n{gqU%t=`Y-#dcK4 z(3XpStSfXGS&}O-?c~Pv@mEHqxh+^{rlwvQP(%3nmq#9;Hf~L}hI8Gz)!mhoZ^V8& zPFTVuPABPWnTtUEdCok9^(?YP2?7c=UYxqBA6X?Zf17D=cqxY7av)L*KP6D9-fr4x zP0tjouZ(n3z_^Vz%^e70QR?b(zLqp$zDcKwIhX6yXn~#X`Cd_BoTiHNaIw-GVV&v9 zZXFG(t&F(n>4gTC%3^$^fMsY_DH0i556MiO?p)A(`TD3MGy$*b&L->kE`<^hlb4q? z6&=l_!MUfeCkKWvNG;H57}Ajl#LLTv8qC#C4Zli5qOV7Qf~=@_Y3+tA*f&snOW-i^ zEr8e(6|2C~5!w62F70mE=2m25xm1oGQoFCnrq#;~A8Dw^I-Pw8su0@BmnPa`Q0A=C zjGajUJ+JIibWLY7TQcH{FlmQc;DpZ8=fsxoJ z>*<=h?}dIQhUv=@G_Q^_T_mi2cMh&KLd10Z{w^DC|3=hv%RKw_5te73_(>zgO^X$+ z8v5tOW!s(#g!Oi6!fgS~=ajCa3LMrtg7(f(CRoE@o?A|j#1_+ues-ya21}_;rK_Ou z*n8svZ84~NVOF0RS7>N2J}?=er0ZZc|Imyk#h?z;4vl`Y{r{ zxs_sYI^Cks;@AGkawnS{Ff$`+j`rl$I51fy0`-YoPxc~>w0efAhAxdNLtGGi&74hY zvkA}2q5I2yV>xQ{oE%sa0QgJow`qXYu)}EJOqNcu%EnFj$`w zC$PPhK{ux}Q@w#PMdtWBD+!>U{M2y8@dSv`vA6m;sT%)VeA4}YLMz^rO`^`Cl zLaR;@m9>t*kjGx=gK$XJWBpc@JXoCY{K?qV~ zcfH)93Mtb=uYAlpLwQ?^h7YxvX|kU=O=9N}T4yycbcvh8%Q6_xjpgL$e-5@`XYyeD zIm&4t$N4uKH$?!iGCje#7FvW`KObvKV$A3Q&dy_@dJuj~`-Q+-52kQPv7S4Z9;v|- zX;epVEc#Wg{Y0F>wQb1JbBmvEKO($7imibNnDn=b&95){-gY(l`EI;;>Udm`zkkPf zdhxhuQKxW^BC7!%R*w@*UP`mhj_W+*dc^^w8+TqdZjZ3?BW@`p8=@^^9_COW@;EKj z)2^`1#())X)&onDU|D;kO~1APvUP^-b;N{-_@WmF663v5$OGnL;Xg~l6?5oVlTDEP z*IBtWH~F&GjiUAPz4yhmS8pIAC&qanf2s6{>6lVW`6K{QwC*ia)Bb8~XZ(&UDeog~ z3)8+c(D3ejx8mDfWeR=jxrTRqYZZP8B!K97<^ET`z8S7#%i{y19`5c1&l_DCd0kVY zzv(j6nd9nMqb5EcZW#`0kuzHBB4`br zZqOhLT?(oP2am;>S?;hikk6e&8JzVc^8gWPReCfBTgoj>!cXtAGMaZtfzmah?PbhD z4h_aLj|bPi#L$VTV;e6`q*^K8!pTRA7F_T9#H}Wa;mQ{C z2J6&D3HppclZ}E0UMzl0J}h#gjZI2&GbC(l*O80A2zpy*A?viMmW&0zzSb}fLO&n>oW@DH|QepMosqj*pHX07}ZX-#sozf zq?}w=oMWEk*RO~JgXyMcd-BJl#3i;zbjN;0p@hOVFrKZn&1=F)P`2^;6tA{TaV_KT zu&JpO*NBNoA!s(^@6ZSXnA`f305|)OdVK}{Si`Wu-b(AJp1~i#7Eg2RrHjf}>HzqIJg{R&?CDQlQI=wTwDIG}a2+$-Yx>U5+W8Gox7F#S4+&2tpS;|+ zgLgq9XAcO9K>=;R8YSW&WD!th2`IwNdP@7&E=7bgg1uxtZC|hLJRl4f`7M~zJP{#L zm*HV&T@Y3pSD0Q>KCO!l5N(BaS19z@|GC!8&KQSe1VQso?!I}Q?U5EpM;c~d*;yAg zB%w$vn6uIx{X}H`4QrfmQw@?9HYeiPQQ#uqnWm>$jC#%l7#ojkd4wDb^JXEFP@jWIQ<>El|2zN zWdv9dAIRMKWsD%=f%`PRbxjVxHkaQx1;!(*f+!&12vI5MO3zB=ev#ERNz?7x6l$?0 zW7ZIE4ITJasloz6^Z{oevoI^udiA?=*A{6+n}H$NnWmu!;nOP;m9%dPGL42xerC4Z z>}~}c%ce|WU}QxnTaZNZ2>ZiylZ>u$ViY;8?`5>(#t1tGv>cSBYGmLUPpE2vqO308 z1!O`7Bs(%lA$BP#Qpj@ty_qYp5$*)L#Uzgj8qP&_P804a$;i}H%2Mw)RhuirHW+SJ zio1Q{hfDND-53GSilb6JnK6#PZn1b|hV{ZGQ-_8=>mY5iIQ&_yXY9My?5&}h50oD( z?pks0s^%zNVRX$*v>Z~gd?qOjWJ06xz6GqmSsK#kT(HO2HACYQcITg!WPnewtRt9R zBciIDrh-=rs$OoAUn}x_Jy&~uUYw;9h$gG33u!?A8ve0lyNQX)IWcf8AnImo^3fT` zQQ21;>vNR2Aj!xt#e+EL`Gm$fm~bp%;3`7I>e)x8Hq@X&PLTU|r#kzDC%}e~y?_L1 zo+7FBxqZbp?XvQxcyB1B2ucm6%P=rYIA5#b)J2QVw~SkhHaf`#v8aS8&RGplub*Z* z1vXr@FDD@*QlRuL2IZ&HIReg2gCW#Kzx_pF(1ASN(&u9jg`}X57u)+;SC-{pT7Mhs zbKi7aMD4#mnv=wUzPm9DR*7T@950}d}# zN_IX`f>2MYx$fZh+onOI(vdf`-|Ki+0^K)Oy8ghh>NzpvY3snS#IHjyKd7Jg&Zal7 z)(vhH?zi2=gW?5X{11G`yr!abMa|K;?s$Gg*RVp>ivj@&jh12Cu@w!?0Ww4R_}@tr ztv*|@Aa=z@{Ydf)k{u~T2|WUA@v+mcZf*D4hss&U)1THn6bb^2Q)|Rpv7KEReq9fB zHzxvhCVn=X-VwD3F@O+&6|w0W?y|K?fv}s;S4xR^(p{nqn=5k^v|J5xDqot29iI*k zO6m4#%r`x%JeJ3<%6NKqiOz9_jKkk!L*X(mpGCP0mStdy_U@ zH^*B9_RJ{ew?75q`uqnrUu<>NRxeP^yv|nrW9}W3cH_~Foc>HkO$)@|ZYl}h8oINu zz#bo4RPInW;2~Zr+EPc^jK`i@~qqDKji>rx;dP_VJS)rbuo@E)%cH)xHa(fFG!nw(K z#tK$-!z@vmtvIZuvHM-Aq=yWm37<8-BmN-bY`PwiM?U>%;Vo%Xq1CBkymu!aJW@%zcEXF&u^kc;pfnDoa5N*~hBErWpM zVOmTh30 z6(3)bbY6@*eDX@%U7y38t_QG**}`2;%Rdf{H3ipI;N2~P1k9QyL7`+lOSqoQyHgsg z=Wna_$&?k))6)x290w+*YSKflD_3sScoMB<9l?xVsu|j_t7@goV`>#m5qbWK-?I%s zXo~jrt1H*8U2C<=)<;-WmVR?)Ayf=T1$<@Bk+_faye-&)?0Pz`jh)Ko(lwg~Cfw87 zlyNI=&1{f`2rc>5P+@saR|wh$mX{S+qWA)0uc^TLKQz-B)*o1yLclnpCZ;1>jU(WO`?QiR=l+fW@b1HSQX*u;_^_33ha|iU z0Q!}pSRuR8z{|@L+@riUMF05DtKJ}@V-BJRWoOT(W$6}m&r3*-{)Tp}S8_xM^5(F> zCgopZ+k*z9_7fFZKH)&RyU$^f)#gE+(x(xpu4=_ire47;6%XQ-!5WHH3;t69$-3NUVJp#(yGVr1h*QF$pXeI|ZuLYxt_>QGA&ka85@qDk?eSFad93HI)*My3D z^Gn9Y1H-PDH`6UhPS9zSdWN@4TGL@^ghqUM@uf(~w$w*~RqPt-{YCzawf5$pp!Pvp zv-ai=82Yt=5EO;@mhL&sELXq}?R!`Ii3Fw#+TfS0j_u;>KSLZKtnohC{`Z0a6K~D$M*jWXLsUE@3ht1onjO>{f1v|SAlAjBh&JL+E$_+6XVGFVcT4RjRBUZ+Yw4Pv?YvYUY=}l%K5V>8x+8_R zKUnk{d%ek@Ebh;8N_k0)W}ZDnLE!Q_D@sfCimR6ih-tSu0};J3 z9kHyD0M;A$2hCg2F&wvJjNnnH=p0o=){B4o$M=J34Le53ycKT>g|9dQk+`9NW@{ zV&ai)wOGPEr13ckbM4>vPO?2a);Htf#1gj~9GV}yGCMf+mfe0hfM=r6_Pz{_Z-?^l zR%GeZgE&BQ!l!^wP+RlFVi$_ec>r_JH1(ihdk#zdr53+ftoU%r&1r}F4g!Pos0 z58A_l6B9t!p0RyXf$#0RLoMZZLnMwbo(FJ^QR6WMWC4!kB@9HK=BKX|D5!n49x{Rx8?TX0^8N``HeCy94xc5Yu#j}MXE|ddb?{_3=jCs`AV@b@;i(~qa?hdl zE#_Iq$nnuMqo$)M7=wnb&782gIooSC3O0~tBazD=3`0^T_6P1C%3#Z)=(BxK6 zh|r6|94MNM4M0xgBH({wd3~QV@@9MtaJzC1T78>!-THNQOn**2{SlQv*xw?R=9NwJ zeQ|d+qSD>VJ%4raZxt3_s&;Unc^WNQqJ4XLFzIdx6IY@R0|vJ!Z|2~NzY*Psd?3u0 z%>(^4&D}S`wQ;~ysbG6ietViobYmTNWM0KUiau{>r0S?Kra2-v_fc7+KKJ9I6t@c> zZ(D7)iA_wfW`0?7xQ?tWM@20!VeQj?7xDZ4l9Nd9v2=goc&lvxXf+5!yv4FY*8Ute za@Lp^xqK(MWARUYiLZ46w|KwD zOdNSdK{Zd572?xXS4p~~3J4!@Q7SS@0A6MQH5BVBXdmC>yEG!-g~Wa%^BoNlO<+|a zl5TS~jc!vy`yyj++PAo~PVq3K2ft`0zw>WpFhZTW0(g8IJDDi|gil_<%xoq_Whf4E z)kpH=Y!D&_EDKg&^405Be26@2|b)Ulv&77b$oKbs6wP=C&mAY+8b)gklljHy?}iF(&!h-b5#ZGp7wCiXa*2>BQ8Z z@=kcJYi5T!=LKXk&^90r)bnuh0#z3=*f7xh^~NrDh!NE*CnTtpWK|GZ=zR&0B~JUv zi1Cst12R$xqA0TCcmUi9AN%=+0s%Lq%Q}zJhA7&oU~Lvs)$`@%G-JSS3~sT8BOeTI zmp&NNMPz;{?Bg!6HP@3UlXp92?a3NWsJSO+xzyrndI5p$&Z<|53m>?3 zn%){!U#!Cg`5_4=%L%CCfl8>$A^-N)p|^5Tv`ufZq|+lwL*wrW|sTnK?fB zdsq+6{OH|9N^c+aICrBEi!QZNzfS`AhL3D!Uqb@Qg>(SC1ipG9nlPv{7mj&-HZ`YZ-ce(=Wl*TrM;dO?`NwH|4XCB~ z>dr{+D~Yd~Si3VlXeHWu_Q|W4Ojc+4T7&{CRiK5=R4A+Op`~t~B65TUqa~i9(b;`UbkzIHJ_OJn;>}4#u78 z8dms7Zin-R#U;AJYz3}uY-Q_f76YP_5XMxkW_Nc~5Htc-p3`AhP+#m(J0BT67|DIJuLAnwXyXC9I&bz^bCl9|johcA zL@htJ88V7@aA>3ly!=IvqW_ug@Q@JU`jqDwSC^E60inkx)7k);jzlR!5N4+7wvDHp zf=b)iK@Yxw<$VP&iL{Z~@Y_1R5CxSboYK_1KD0OTcKKFgY-Q@GZly|EEkT3u0X6qJ z;2PvURrh+#RzQ`b}ti^YA9m9lyAure(aD;p{N2Ry6q?J?K9B zr>my-7Qs$qYB{;S$d$X*@9O!-@*jrHI?Sh4Hr8v2$(P(ABP1`s^>=P1ZYpU*tQmbxe1lJUGt;A zAPQ+`2NGwHvdgF9lx(g)$hw$Ci8EsI>p0%*onH?iB}KqQnoy>sEhBN)u!H)LKIX^0DwKm$o2#U z)fDhJB=l}i9U(D8ztja#VtD;}>y7zb&-tN+^pFKG2UIF+gzfR9bg|13Ph1%DPg87(rb560LxXeD>7~jZu6MEL@#>}W%ntx7A zvaw5OJ`kG}MWBi4dECi## ztrj5cHxfa0ZZP!G9rGgGY(QRHTHr?zv)&Z_j`jA=Kr%n_rLPPLNl@Bqwu z>J_}j6o))}DxGmubgH*UZ#+uCuiqa20wMDy z1P{(s51swSt(;exrSPMZR_9hh!A_A%3~kn{jeGRCmXCuu{1@7uiD+q^^(s zgxsU`?Pv!!ks4zhEJ^+;Xu;?j0$Q0vb|yt4qaEI;F~!lG(fd7zqpbL|_I6>r9JcSL z2uU9EwR-Yz=ga-B8<6t?8V66*IKYyV5a>8dHm6lj0FCqUSkPbg!jKDKOz|AkhBG8a zJZ5VNsLMu=g~NBpKcJveJZLQIg$^IDO(QrypIGCzgGvH%fB9fb-H&^gTUGVTSbFtx zSW%Lz2~)CIh8moZ%JDsPrnb|*_gZ23!~Q;qpj%+x%*@b&!DJ4rfo6d3AmkSZy{K5A zD9t1sjtIHCJ&?T)`&rC;nL`${U!${eQ6LTE>PVvAmO}~&Z@oFUH6}R5i`EM8ZTgr4 z54~qO-+&7c7llf(Z{TQB33yS4xwYO;DAfv`Wc#x>UWG)dlo6A;fA?k?M5*+U zU7MVhCsN2ZFl5by|3_MBINT!lO26%^N|poL3KZZ-1=YNeSH2H%Cm%Og=V-J2RDe3R z_+ALL4n&EiqcNAKUhQic&<1lXlGq7o@8Scc5cAa+Hjl+~fLdzxkPe#+Y!jU8)w@-q z0e@tX@V-8})3A#3W02AdJO9e%PwNZw7}+VjN9fxmp zOYpMx{S-sl)k`kZA1n~BoST7Q*LtVYv470CHH)QRm-e&7!JLk!9c_sn$iHJbxbWp_ z4{d5s#=B31!0HDQ`=SRCbcc$faGg_TJAx_xhWP8Uu`vm6#Dq)e}D+?BB`EYu}r zrQYXtO$&5)_i*G{j{G{tm#%3P#(C^UZ&pOV+UY>BwJnrC?I9}O$(oKoHjUX{U%BZn zEMw!Rk14!%n2*xch|ey5uJ7gIhxo90xa>kjJ4?|h%T{PL_Bk2^g)ii798=gxz2-=DU1GB`hY(;&)BEGhF2EWIa# zmN8uThtQ7?i~@xotF+^5-!2MvJYRt-iW!wsQ&RuSHaFDs1{DZhdR#nYJDg}`xB&`v zQ_9ljA}h-G7m0q$7CG z+1FS;Ji)K4M0v->VdvqTDMo+y#W#F7E41`S(HrYiUoy|AvA>$I@cV1_2 zx!s#s&CxWB5oL5*g39PihFp+>gDR53l>k}4N=NC^h0GkQ+q9|QFzr^bVOYqn1bZJz zLFiqJ>4*@gZO|FNs?Ph%Ol$Nsl4!VN?995LkBi;rjpw%KcmZ~XZ+b1Or6~OW$u#jzj#H92qfUkO@L-XjefK88TghSw$dpq3*e# z9B(;&(Addqt1I9dFLS#hs(DWFCIIjT@WBhAOX@y)y-PtVr;m5&s#%cJIQxi){d%36PBZp5tO?4=Pjfa97SUFiPv*9); zR2eAs2=87}pe-<3%`jD#m_L}-uMH$&m)}yf!)X05h~Oj)Y!Gw{urpx_@X=?2rui=p zpDM-<9}fFLwbGP%QIHasl+ZglotDKV@v;|buS|KT^s_$F?g@zQYzePfGjELw04;t4 zzuX+D6e4m&<1L5y%I|!cLoF#lp0}MPJyYWCl|i*Kg(wK0eCA_tD~Sox%!Ox``NRwJUSX-8T`<*3&5o7i zVrAvJl9;&pmAb_EJjBtJFl#qZ@>~;mBz-jB)Xwpb)XUUd(dl_UCSPDKTXw7b^{1}z z(7I@CxVmvkcI<|ngy+^_K6Ok1-??I`7Ze_2QjFz1ReCAENg;lzDtY3y9>i;N?A_iA zLJS3_KhM|NyE~SuY5yeKrArQ@wMS)SWPUX_D}mIGhM{5H4Cm)>Y`ING_QQs4JKB6o zB3cBo4b%>&=^lvdy@@)cS1v8i|2e3wNC9P?k)zeBw1FA;NqzM7ZtZMUFBL>O-vYn- ztnU&JSxOv8o9W61I_jjvbhL>S z`r~60EA}tx%$Q4BIWyOHqvB^!UkYtBQmvy-@{VVy*070L_gypCg+jmn(4at10eqXc zPi!GASlR`%!5J=$q&1mcxSNf>)e-Dk<{DJ`1rF^lM(4_I@m-eQPDJ|FXQnb`Od2nzv-h^vXQ6%~7>*_w080dmh+HY+_7yzd`nNAJb}_2)!G?xno^c{r9p3TB=nM6a=u5@ngZy5T3A20t4E*iDBulGxNsB zbOgt8&GQT?HaA=ARyz#ODllnAyGqoDvbT9Gg_A@K3x_R_$h)p=0<`VUZGy%&`uyAM z>S#|nTXun&KZD#mM~w2YjP{V{%~F%!+G51}g@qy&7~)`^N=I=rBqjlfx_}|lK#Vxs zo^2a?5}1x)b(zUP8Fo;vpw9PKv}p0>=H}HWH6rX89l<&}DWSFRXaOK%)}>#2L@nW0 z*Yobr4KUjD@zuP;Pi8-rjoU(&|8X7Bf7-N*u#$IGX#(1gw%hR7dDX9vghKeD1Y4(; z%9U%k`To><+!aqt;aLmIz^%Uam^o_sXW?#M^RbN0#;y=7n5vTwQTsm}n3aVkJ2y9% znuU^*@-Q7;kw3#sfueX?lB0!rMU^t3gSV2;x{J>uq0j-p)_Rk8zd#)SHG`DEpsyxl zd@C{Q=SgRsXFuk`BNZj|;|dPBS#eJ2km~B}TV#C9pwEph*hR?vnQ6eMrO3yf#lo|? zP%Sq2>5uD76-GHHs3)22zcB8}1PZDcqPz=EuX#+0?NU(E9|MquUj2Qrg=t%jWB%Hf zE$hjXm%+dudac4yHRI^6G|s{TMP8SqBZXZ{5W6*tUo*k zjBP#F7)0knfk@cYdxNCCy?pKt7!1MQyT|z;iJ(pNXfpI0h#A`*F>32ZX|`{rw)#s# zLgTi7OGT`~2rR%yi{gs^LKTd$#2qv*0bffkL41lJ+YAV1F#zhXquuCX>+>Ov&j*{u|f4A!Bm?i42W`6(8i9tUlJsx z{>UqkWwkX;t<@EuIL03?JbEyX`EF2g%Ohn%{~Ex#vO|E=4M zUsVmtU2-&pmJBFwZK;+%uPhr4e70dLjPt*;&u9k)&gou3v3&ZgC~uEezc;xC_rS0a zjbwKrCB3s4a&62zqCS82MElepk$FLg`h0ph-lGPb-KZ7t9 zIWX*3YI1KM={LcY1MlaI`y3%}Da`mMM93GYN&*1ZtCyc30i5_gFfaimlb@I=?>$ro z9?H_y)!a>S5)HEIYMZH{%KOo`nGeyE;FG926u7Cap8QyItYIvyp2wVgiG#+U!3+KK zO)ryQ2;|`nkiYhjeIP(bhVBqM5!Hz;SIJR;S!zDLiX?_QHplS}1sOSeIoBeRPy7oE zx;Pnk31+GSMr=v#Wg~6)0kG!ee^Bp90P17+3&BsT2iyIXEq)Gur-zkHV2xDQuebSN zRLWe(LaVEvItbj*sl83Nd(aAvsyey)k2P}MJsu$fDLKpIn6TK`pyKDbHl&*@N&#YI z(Yl$ur*bZMNgXEsz@>ix`tKOxU$`WjPt-S505~{C1a6YS0T96l!XOKmw959>M+LBZ z5yhG@j(f`EpC2LpWhhR`vwu?Q^R4Wfcc*&&G``$BBnWM-k00>>9Ta|V-sBc~ET>5< zV=(rUIxy=3t0}06>q_cv#QviT8Z~|){f_B;5hzd{>G^@hI)A#x>MwZjU5Iqa3+BZ0KyvhxI9c6(6ivnrU9j%1(DU~8{$&GZycDN` z8h9t`)$_N$czJollEQxcfP@Q)7JBh})0 zdQRPFg6^v| zsIr^~?MHN#EDk)E@sa?A&tNiYp@Int{gSKl4qG86nN(u|9EMYXz|31mb@tBZO-&@P zlmCJFy(?WXWTXV*8j`LmgKRSRY}^CbD0RQ%f~N5wg>oV4Nd`_fDJ3s7caC@FP7yH$ zWd>2wS(s>(s>i{vJ^;TVa46=<|Ag5AnhJI)Xcb{)=B`GTCa=o(saW^KVh(?$MXl#M z`|hXKmqzQsM5%|Lu5!WQ+tetqx=tFE?PF2BejPWK65CKG>NxcU@)`lslfnzkMAWR5 zfw>|)93nb0ekV2N#azy(dw`w{u)Gc0#3>he0dr1nK?>R+m~>rQTAJ~=oiu3gYel9g zMG=(3SBhK?xJnvGf_iS@Ru_>r*mGKcCe$;A^Pkm#D_kk@Bg?nbKeXgGqm?fVBF=)kUvsV|JzahOa1Sm%lM0zFwJDSShoTL zCn8!(l!HT7;T#I})~Jp_EL6+ObE1ja)w24Qb#LysF7)7_(orVCuAjoS(@mo9dYa(E zGr9<&&5+AH6&3@s0vA|enFht0Ms@xj@%s*#yviO8)G4%_>7m@`xPS;?n6qrJJsP?C zOWhh>atF-r3?H6-9dK?c8YbECW>^}F|HwY>m{9BAo*>RzJi{hvM0bAi^Qoq7!PeU+ zxpdS)%eO<>!k?J^OVW$MXip`R-y!W)l+Du%3383Um(g~<^VS{r`HwQ2?(Z07<)WDc z`#8)hzddPvvEdXerM%K4idB!ToCf`mDgeEAE(H`fiej|Ge{%1250GNOE&#h z1JNQLvCY?W6|JyA&*rINmLSuS{MdLBAy(A`!pwTC*G1Mu2aR74m6enksVwRr*#GsH z?OU(TeJe0jdXj+Imf-iLp06gIWeRZ6RQ=c~Gy{^;791r0G-jr|G~bi3Vb(VaB5z>c zQ_4KOqvm91%e|Q={@3U7`8(r=5#yu8X0%1L=#4G;=|xaZRxl;dk!UVZ&VgT@(^&)4H~j-c9`#Vp<+7j?rLAgf+h?Acih`uKFS|&4ZG8_)PJ1%o)?n-U_9J_tLyd**oKmbr68>CQdzNmoaX-R+c*KNT zZA7tebZOs8!l1K)0es14Lkl^vf3oHxf8SjEdJs~~>J@Hf&dklddOZ(kh2tDwL*Q52 zdA`eb>y^8q40Os?UQjGsw1^ZVP;~-x8maYD-AuP z#ogk@`E}CE{Z1F_;n85wNj-j{w=#$ooKv+mUORH;@kc~LdNa4|MH>Z<2pRL)`50Q|N=q+(9pD$sH zom+~9M`lEe7DbH?z~Q>(k?cjDO(a`Ws+Km9{T{U65e=OA@jJvp>SvkAZd`60u*PXm zg|P1#rvIUJ85Mxvt0c6tvfaRn>U-Gxgtps$uk z0q-FD!W3M&70kMdn2_99u?NOu8))|fR*i7oRK7WDl=c3~#V`Ah_NqzpiLH)PQKypB``%OLBpFIcf$BMcsdVBX2 zcw(GrXlU5a5&H?%3m=*|VoX2v$Q4d^GFW{9(=P-Jq36eV)LUNE$Xi>V2RSHU8}Nkk zw{Lm4Vb=!b9H##~U>1LVPc~e1{@Q${KJtznn!?_gpgZ*+Rs{`zi7OOk9FQWo~2z_FSb2@K&_)vniDhg z3G}x_9sllY(UhrOBtak$)GMFLXykYnpSx+5>c%vX0;bfl7d<9ZsTb3sM2>q4e8Vm; zgD4P8)|dABI1Xyv0dQta|9S2&E%|R{P_@(Vd)x+!UePDmuRu3&c*{9lV{eh;yZvH) zD5sC-Yss|${Rj8jxU(O<6}65sYW8M3eDGk4Hv!+7tRdhlte~u%>okkDw2V~+y|F)o zTnQSDG2-%|KlUP_giZbwd}Cu{sy5&tMtzgQV?d|VajV}2v-z17-Zik+@$g#R1YR;j zhITw+0_dA)=JrX>Brr%9r+DPH=Rwce^|#aL(GS#(67Ji22(Wc`e1k4pWGZ?sIoyB~ zw6B%Rn&_1}^jHZ%TcPP%d8#iYMtz4y53DhRak;=E1mys1|40Ed3^$`<>=w@C+ppVTABcoCi1_l|9eO-iV*E_CLe+Wjx+L> zXQuWj7)sU#^h}{0!4-p#_0&sE?@VV08waXeG_efm zIK^nz``2;~h!hnLoRtI}Kt0z3C_(*cO47m)LjVK(s+*#;K1*m^&u?k{j(*nh@QDLE z-0l8tQ$zt42ha5F%G9@B#Wdm7I^q*XVR1oGL!hy<;Azbb>%Jlx(PAj760wcQ8(DRA z9iTl%yBj(I-47pWTg!Ew(T1S?;7-)Lw0~SiVj%=W`j7MfgJV&3oDzC?XB5EEX>``l zpHcN6*zTip3YILJo8yqOup8(AowvarC^sXypti7BxM>W0Bu(PnQt$opnM-VJ8X2-0 zeLB0ub>;$zxC8+kqh$kDYEbznZd$_Nw7R!Edd&<{wK1nmqd*bWlI8Md+_xKB@8D-( z0mx!c-e_?!2NUre>pGyF8NN}-28&a&us;)P+OiOl(x4=LEzq|NhFU)q%#4=5xtn1i z2CeU3y|)lJ>>~Y!!ee}nL6#hUqQ1%nfc4SDMCCv9$DZ@xoJ4dU0;uR+E0L!f@HN?8 zMs-T+Yb-(G6QETYxixLuh+DPH9~f1VNJHtfTBDjH1v9`5mxWa2_`4P@xp`%d@b1<1 zzF>~&(KOb`LD>9q90!7sJx~ZdQWY(&5@LY<$?w?uFn;9RlK6IC7M-8|e{wLF5bBv%`4jsJ5X`?JaSL52{ADa?D+khEB9mv6U)2O&OLtdOm#NpF? zPiIC!lbFTw58=iI%pZib?pD7!@EVNng1#(9W@hUcywTi#V!QR+QkJgJOa{N%B*><4 zE5~_BgfG91=7aY!(6rw4B|x`wy>9s`lVt{j^NkcAU7QQN>(tf#=7G zJ*Hq>j%x7%(V68MdDFiCrLoh7$!z0m(sUpG5C#vV`}8lgN8|fVBgE5Y@uhV(oqUUs z4KSOGxpSd(t3Fl|DP-NBs*)4bQbT*%$^-Or8&taYR4{DUoYc z8oR-ZC%`svxcZW)GP@r;=0I!ndR_u{!>qkg;@wH!bU7A}N1$h0{kLY`oo%9(&=dr) z))g%7Mpro3qAzICpM{<2nk=q)ck=jgbN$}wExaIhbQ^v1{eQ8Qqfm@Io?&GY-Fl!c zzte9_-lXF8@Oa}DOrG#yWaa_ZX&d3f_1OE$<%-OL)@W7G@2eJ)uHgvAj|!$6)b%UX zE`dQBK>x|n9GK|9Ia&%n*?a~XTf2-wi>gMsQ}mC}#=TMwHA=R7(DVF9&yFMufK*12 z&y%NQ-tkHSL5{VK`s+}VfoyvKG;l#Tg9Z`<yLV^W0nW0RT|ZRh$_b{ooSQ>q!&p2@qO$emar-IfM4dXy7nBP7q-F8D-J{zt z7HgS*70-8Pfhnx!!!KN1hc~v0L?i2vw-S{vV)(}4bN+~ZuE0-OBE(A{=q$8%)1yU% z8V+ivX=)@r4C&&RBp6Gal9Y-Kyl`V-&A zz&&#hzkb!N7XzkzJagXKyshADpLvQu99sSkVORMqPqcGl%5!`G8!RG$cZl?lbMI$j zvp{8KXy?NV1Pwq#e6kY${&rvCZ>J^@&S94i(~OlMbd<_NAYxfly~c!|+-1AcyOV6LzFY@fAo!TSio_c0LLcg3@TG(U} zPAOPnM#-sObRG?+;|C4tVC_uuXr{x$>xg5F|s% z6uhqas!J_dX_8@a0VHBpW{;3Rf_T&aUI;}`MB*@wG45ZN4+WQkmVW%YR{;awcF!vY z6rGUS=0D2ee}l`oX@Aq^BR{eGtXChAw(uve-}&ocs$giEM5poNzg`E@Sgxykb(upI z;N!$x3As;VkLrk>nlK-3kG~}0`%+>+9TnTUN4>uRgo+Un;cUis?? zJ8wLH3|yb$gu>qIL4WTxBGGDIOWq^el5%hsSk5sFnJi(%Zoa0@CpP~A8k*SH%9PO* zxpxriOg`|{G^42p`BAC;036MRwy()AlP8{4{kHaze}U~^1yID3|JTVw03-hnfjbjv zo1f>_?|+d7NPdI5PJ>GvdzKf-q@@$HtC9Y+|J?l}Kzj4m|3&ElCR9!W{h52u-hhA< z+c-`@gnSFb+u$cddAMjvnPl%H#EiCg_Yt2x3L??RK7c)DsV|4f{}O!7!U3T6{cx?r z9w1>vI-O^H>Pdp*P!o|&N<|uuk|RUAN2y{0w{kP;)E3x#omVILN*7U`Nl$+Cry)4o z*&|K1q?Z|T5zora`R-o~n(GC?`Imemp4>JHd{^{OSHu5@@&I@G|4g8Id11gcql-r$ z{srnCpg$(|=P{8VB{fczo{!lj!J4knjC z!m)c{yR%n;1bF%F>~3#77^6tccdCvVUniZFEE!H61LM@7-t2j#5qLWXt{|pMQBX<> z@D`|;iQ7Z55J%74iY0Viw%Gl zI^f;+4)TR5m57y1z`_oC>lI!BVP;6h}`_Vr|*SK_BBvyC`rpbhDDaZ+CG_vE>|m;+-U1-&ZUz z60M@7Z|RbS{b81pvC0dE=Jp?LI7{XvAd~(8?#h?mI~R~i#Ii8rTnEu+PNzS=g5@eJ zE2l*28m9dnA8PJa1qGn?fzl&^nTfkPwI+i)utA;MQq7fxO_fQZU1{u~4sGjZQ9V)( zRQo*oy$}}R!Yc^@C3+2niP>&a;m_b9CI0>E^SX$GR8~@SFsPB8ehjYHGcDHUWHhOc`VWrTA0+FCvKo;wO+^Xz} zETotf&P6`p&~d6J>PdcsN-$^MhP|w^ztsMBb8|CWS4`Vw?SE?h(4f{YqHU&FRHCV} zaK5texmV0y`J3lyKLQst1fD{y0}DhJi`?M!%w81v^HajESm?h1hxh*q9CEqY4DG># z%C@$-a+!S}9(+}tA(hA3hqzw?E6^dV>1D0~hVoa-7-Gs*UMm%{R7ZQt{46-8j+Jymwn-iH<^G^1jz&e)8 zK%{C1Lok^}(x#4C_RYmBSMGznLFfrnGGXsjl&Qg67DbF3Q5Sg*Ou8TnyJmQE?sT~y z9T_x=!RdMvDqK95H&=c{H&mJskD}%^!O^U+)*tb!xtLAL)r?!DujaXy{x2Sq^GqcOR^{|iF+ z!ja@hH-wycNVHIUU z5nh}NK2s-~J!TlJ6q2p*<>;Sv9WSc}aum*UhI%i+1$qn=SO8NZbvTOlb$*8-%dN|I zL4HiB(1;UML>mTQ{7VW_*(HGc^!9c9(UONqVu8q%C>p2RFZ%ra30(B{e3Z$a(3T1S zwilAh^wAy@3kYV1OAH0M;P(gdd;oSE*pBz2th>@v)(jxVFx*u!D6c`bLMB23r>gDgVEj%?3E4!oZf z3nN(Q_vA+51IK>N5WLjHEV6t5yE?E9u6lCQL?byO85qg8eoU$oPmE}&trHv}va2o! zj-BNj5N)!Lznbq`2TxqV{ax~@=7|DO6l@IK(b=4VDE$7_=u)?Vl@eN;-Fd);%OrRR z9f){AI5Z^>hY7vUNMYH3b_S(3J*3g&3;5sH1nF^w{0G2Mr3l?&USOyOi z)O+y2(;V#Vdkgcs2CQ6WlcSlHf4yfZFfncMXWONScLxWyMX>Y*>B#<^ZUit*6PNZ+ zmlW9d5_X~?`~0|f4*zom|8oQcQurS(_&*?s3WBA)y%EM{%pXA2vvnsMy}vILX<<=O zI)E!X%fMh2I4R@BP*_N*;NT$tW9y&B>8_A-7s909#nbHjiIfF?$dUb$Z(QZ(7yy*yw$n{H6O*-@&G zfA7?>39}q1P3X!&eaUxB6pLj?DaJnunRZq3$x`QN&mX&DKlO&XR>9l*9agxKhdQe< z$=8M12!NG;aGvdvAF9Zd$5S#jNnKs$2IUpYmiOQ8D+OJG)OL0^XWSFTzd(qly5*$I z%0c}C1Y?TQm#o8^$`am&0=E6vczFwM&173CmSq=PPlZU154jHCtQ6ZqOKSh@3vl&^ z9pCOai3~b6{gzDE`q$uRehwZUErI9jE%7z>V;K{-G$l6|_4@#(_$dk7m|`o`rK^w< z>6*J8XC#%*W=rV$*;ZpqdQms{t$LPIY36kFMgU%ERJG??jMV1PngqI{E<QcN4v~P?MnEph8(yHGAFJD@oM0zxYuIGxCE- z$*laucp-$L-e-54=S)L%oNcvQM<*Z2JW1WgH5CJ+=DNuSB%gPfHeaT~tiZfUecMSd z@f+EzSEQIVCNheMIz(z6xIuxa7|uPi@pT0M7KN8$iP#22y>RPj{Kcf+?6masCTGFH z%S%g37vBObk6X2v>_|>yfUVpCg4GT6pU%?-Q`&QeGB$)^?0)DFYXN8zJsab3J7mm} z>+6Mv)?$o@Jw&mbKhl>yDpYVAi=)!FTw9+v)Yrf7M55<#6>};TUzi8r0J`vP`n|V3 z9gD^HVF~1Iv?^6g{R)M)C<8i)-?uC}(zIhKLF2DUBXAj-dPEtuz{^o;wC=|iWB^>% z<~42!{XIk7FA2OHnPNTpUqnH5JWJ{}DVio163A!VHnJk#et#w%?B@Mah?8|T4baYuOZl7$MYWk3SMfDQQUp+hG z!ZL4)UkNDXi`r=XEgNd8@a$Pw-H!P$K@JW%;21*|idH9nwHt-Shuyf$E5U3wALBma ztNuylj%v30(#kJ?dRF!9tM>Nx-WPQ@7wSToB456gKTAD^klb|F)4Nv#%fV@)fKXx; zD0M#jE8&ESUC>PG#Vb}{XOG@_XRn2MvJlu&8(KM)Avwkcf<|Pnzs?bY`b&r7rHd9blHSwu1@rEzqDq-tyaOIg0Zm$-o+1Aw}Q>Ex0 z>;9(q$+YX;QRaQIhako3AHDbEE+(L*2=OkdoEY~MB7mh{X_C+Ui>NL?{xW@dGtn%O z)!p44koQCCk5!`?xtz{Kj)qK*WqsMceRV{5Ob&+9&b9gHhWftW^SpRG$%36c+WN-gz& zi|K%Z8q@lU5-Y{vXGWa$9Njrh7Mc7}2Gz)A&gXhn5=Ca)H)>!89AG4sK=Suv6bv7C z)bl=C!uYo{Eee;`P{zna@sz&Jk0q8(zK&!H^Zm|Z5|;$FN)0BPceG2b2iRsZCcCo} zE{l7tMon_*oENs~ZDm^iipP}ICgPFhMJbT*G;{Z;}>nsGSzC zF~Ij_GP`cf8Z+UBf!dgc4=dwlp=?uFW)))+e@RGb?bf2g{f`Z&C6O^~x~0zrkR?|6 zHQJUvxh7&*?cdYxRQ2K(8`eV=KObWT3#1E+LSvX`V=#m1aZWRXKQV83cS4#9Qfnem z-n#78{iV@XBa0it*NWwmpW^^IJ81 z;>^ccIb$dodYWjVkmn>l)&tHFaws1wl3{xP?vXNBakege!R(`o`DlxX0GGJFO*xJm zE-ifo#h0NR8O1eF|M?`3%e>JmsY;hw4p0^P0n}rBl*)zO&q>xB0wszpzyOInE$Jo; zdK#};SWG)Cj@;3*Q_c4rQ-qgOq$$UWJvz^&_oOw|&rTdplh)fki(j3!{RUsyOg)dA z5sYwmG>Z*&_P<-biJM4y%fs*Ey*nhRXzo$}`E8o>Z0Wt$YWPm}Bl#e-&)W`> z>5QqKmGz2`kkQRz^FE!79ihJ5FZtmNw|{-o#m(@K2XW?qu&oW{yM)Tt$oW{IeaDDm zaW;*D0!oYO*{aZ~4cSn2qH!Jq#?8Z_Jo?+V!}8o!Ze^TFxlg-z4Iws~nC_$u8xfAH zmo9zXsx+w63D(o{+65~x0ZkH?SWxzUw@<8KG1}c;&Lwz$ z`C1dciDRgL{Xr{6ji*}U`TOmw7^`4lj zr^9x3bqQ+~Y-?z~JKMQ(j5M5@5nWxk$83jarFqKAWQ2y)R1n30XUB$93P`Zqo2Mft z6ns7mRl3s6Uf0NZjqv;4-^%QeTx>A_d;JN12*LuGn;x;!V`b{4i*y{)g6dXKh}eGW z%F#kOy9puqaO0Z3J1L{ljld&NP-^8}In@QfjCPH&5qUbBZ3W}G^(fP>hNPNVzNp=E z{4G*aAe`B$)68C8KFHYge3wd19S-j@)X&eF7}rxaGguxs_p3nPsUs+4;>Qnpz{0lE zTT?IJOVt(OPSLrH+QkZwv9E|f8J-JM{n;%+(P3!UZpUYlzo_ZsJCH8zO(7W~9q38sRCyvIW zdjuMQX=QRtEVdk~&Aki;4(xQQ1N~hp+#RW9$m2YlI3tn=|B}0a=8}TGSCI?5&fc)7 zz|EHvHaqUiT64qURkst3EyWZiolAlCknVSf*`A&2)a+i?{jROPcT6>gz6}64?uoz# zRBY15m#@-kPru(cRrRB{{A8e2*kgwVo{I6esUB0DtaPAd@+D9GU~Ts6=Z3|AsLIV9 zb=T*u(JqSWSZOveReb@Yf^lvr;q8OSA?IIq@GPZ^!Bh%+WVIrgUB22`bd|laKmm+$ zMK?8el~3Wv1o+#s4Ctj$RTkl1yF+l;s@`ocnGaSw1&}zet#;4ar8(~-Y28prw?>5X ztKa0P5I$>WA+#58gWg@P%bqxi6gTm9*?kduUb}Cw-+nm_W4x5I!KR)))#hR)%D?_8 z2W>Ur)G5e(j4Y15b#h8>V%)gM9|0TA=U121*VorN(z9b}SK{Vt@FEyfwf&KN z-HOQrE2d`IlWoFw7%jd$b~^mwBEtJcfWroj7Jew-pJkGEQV3EhGvG3>$fBdgk6OoL zZaw|xMvb29#y_8SH?@V%&zS`F2KTU@F3SaZ(vt+DJr!KZ{RchrP>vktS3 zg5H1L{I&bInQleJE_F<*e*-OV`Z4s8YIf)6l55T3wJu-Er{qAfsy_8g0h=xT+K`>W zcPJ}ZWrLAQx!Yk9x@gPHG5xxQ#WT@z;cPAPUCuM1YRyHe(67-Uj3yGjrPF+O*gv3G z6u<9?(8)46->M%h=WBV~Gc^x6Ev9od24f3K9qnz8qMiMHSDo8Nk~_-esN0v)P{K{o zT>PBRtwuHqIbl(1%6U^WJ{E^JrRs`#1X~#LtBvpeN}M?Z!#uQ(qe;@rA0IcKmVEL) zE5{qN@T*`93ICBBm7SI4&{858<;M3gY3+qzv>Zw~!tgxLO=OEBQXHB4a<|vf5plzy z_zUT8oM-8}V<)|>hyeB3-z`%(hTGScpPx1r(?wEkU+l%uFc}5Y&pL|Vp`1hUC`8>5 z2=8%v*7_+27O7KWS;=H1!WIWNzhs6JuG>;d8pu#&FxmQ*-~1y{z*@9^LptEqT&2zY zM?;flG*9zdKI{jo@E#|h@csOB)X`0`_6DHVgAY|tosst)9vEsm(ADe>Ldk1y-zE^1 zx8G&J@^lA~{{D57e<`qlDH7QdKEsSfL1h88u$raw)Kw*=9VFPXS})d`oeI_L-cJ@J z?)1hts;09?ZEH5N$tmzfmy2lVdmYP9rp_WI-FCrzRqGtA=+xlloYKI{Qu&;N!yW_r zZ03*MJXdhhCYWLIlpKDP2VNYO6kifb^SP({y|ad~Rj1Lg#~2N@N;l1y*>fkhbny>k zx2kXG2r18W+p=iAtEhE;{`914vb~k8^5M;;F+^B*j;4HzPRT@L4va7P*S9zr8sAQa zk$?XD-5`upo%ZBpWhLiHxCICZq&vr@jp3fU|^bmr>gbF_I8^4-}E zc&4@Un>P#%yhiDOsrcBb+rr9u^cxv1DVe0;sFZd7G1|PA=(Z9A&xs(Dj#6tfi;iOB zk@X?Cq2Slwi0xw!qG3b&Q^N>I966KKHGBuqeNqmiidx&-bK$2SnybQp-%)|+el1K_ zFvbiIrI_{vt@j8!{r;+D=w-R^6@Ss~NA5d}a?S0R`BS~*^dh_&>jJpIhFd;6j244s zv3d^ECJT)-lV*Ossdm2Qp!^yp$a)S@uiWV!QSNB^Q3P!q`7Ms8q`0;UhRvf^?t-mP z0t32PJc~2i>SpOe7Oq)b!wZb?C}$LLYs-C{m-7f;cvOVDX4l65#)e69v1PYrfPvkk z0)>et1s(~`z|%};^%W5sIRcZ83GWJ@Z8&Jd7qs|NWUs5 z#oSy5C&3r}DmMHrDc+SbroKe8A;Tot(aqMbQtiVY z6rP5TL&2}!AX_6@9}cUY+*{NGfNC{+>t5A8 z{>t|dHL}v{sOzeSc>d<$x=WSCTCgC>0Nv#mDxL6Yb*^)lk9w_Q4b5tXo7PQf;qyu4 z=JVNU*oxm{qUcO|{JsHve4UWvH@oh#pGuGi2{SX5KNtM_=?RaY`H42G^))aLAi5<^ zuptYy+Z}?_jBs&D?E-sDL9^#6bt0j-a)5+e@20YM%rZoZGk%sFQ#I>dRA{+m-P25G z`U>BZH_-6@sn4r!=`%^aUp^g^eQT?EEzf0M!~q9H(YjY(hhg(|tfkOQqm@3CN5Akf zB5pmn7-JW3irx#M%dS7aTB2om^Xm*5^G=-kYeT^t$%>#w@ip=1{$mRQ%Anfgey@RR zIC*>JfD2}rwVYiE*b4zxuNFHN?&m1>*Lq@j`q-Fcpd2i|Sz8lOw>o9jy^A`Tc^Ruf!n7mek3A>JLbSz5vum6NwJTE5s%jcTyXkp9DK2yA zxwtfT{%Gpp!+`c#Ch?D4NJ)HJc`>3-22=9e=y`qH+vd1yLAIMcp|puUjb6Re1B^}+ zkRgA%z)IKo2sp~4Wy>Iqx^1w!Pu3^16C=014d%s6J(Bj7XcG+@`{V8n65PEf`^5bJ z=I$Lucjqr!Ee;;}d|oxbEA=F)H9@(bKCF#b9B4c8V~fbyK85X=eogna}? zUw0d(3hxaU)#0o>Z+KMX+~G~&e>b`Vh(r4!;Au4#XQ?&Mi~&`z@q0ysQ>(srOg658 z^woE7l;W0V6u0(Oz@qFGBGm(2hU0mHh98XTLSiE$obQ1}hAesfrIEIZkz%ip20 zbL6!6%!zC=&zi^99g#V<)!PeaGx1}g^C|P~1xq8}LnGJqv|>PiXJ!l1dG7IH&!+rQ zi{SC9l|c9B8A___kngsvn(3>oPWoGE6(93q=V4<$y$kwvVpa^CakzWK zs#FhZuwpa0kan8PXfEB$^pAVtGGf?zNiTYmuU$aAYqLwE(_a79HJv={j+>Zj31#-i zLhtz}A+UY?lj1M6-VRDmPdhZ_`Q)iw1QH&WU$dH`eOL$Yd_R+$jU_%py^meX&}G?K zZ>HR-kII;TjN)dd}vE=F9O6<r$0-n+RbZNUvZ$}2gVTs%wnLsRO|jw>~jEyoizlXwu%5OAG!k;FW+db$J= zh26t1#1l2yy{3QUe+w`5a<82x-(7?5hHq_!n;!MBI=cO&HW^`I{QKon$EQbXbiB(B zq1HWb&Wb$s2SnU{7cgYPkN+h*rytlkHP1F~ZLG!>uo>qZUIY{AJWjRc1Eb*G#*Aau zqoOFisi)3Q#50feC3-JQrf{arT(7SnDTTL0Q*8l^5R-54vE~!w^p-^eh9TW{=y%g< z3c0ryI2bJxMB5H+i`rJRfm9N<$GUeaTF(Ony9c|r`l5Hx-U|R0I9OiyYv0s;n*Mg( zI1-AsT_KaO8;@`HPO&*p_e*cC&SLsez}{HM^>bj7tg(H})NM7+y6v0!g3^|U3N(wm1o?0^Yo1p zBYo)V9+X&igXd&)R5&xN=wV$?8tcvQ=T2#NW^j%FR^aB3?mn~w{9V?*zP?H83!9qn zeh*9nyY*+#L?MVn@x76Tw*Q2yfbbF%5S$3xn!_FGR<5D(nhz=$fAA=e0cA3;Ag*w- zEpSCtZNT&>TETPR^@3TRzfU*6;DZDaXGOSwHM3aX<8V=@Z?Q^ZE5*}SMH8M+aY&&+ zTMXUpvhlRiZ&N3Y#dJ||KT*XhRSAZWjVuBXL@V9R2O`ch&m>kS=rZx*JE&d0HKWl( zkeg$6d%l%wf1Dt;qK6+;fgv10IDT2(=l~Q76eLmb4}XU2r;x}Ag$pN$3Ce#eYrtP~ ztW?2=J@UT%YBy{Iy$0hEYS~S|$Gqz4f{AiPxqm;GX;oTCI^q4tFHokuHMroUedZ zg63eFrD}mMO_U(-VD7t9`NpuRgS93mr+2q#H=RG-KHRh;T|uyy8$pT&=nmY}2s*Rc zRGTad3sa4e;^I?Tk5U5|MVuaZC-Ua~&^?yRf(Bul2dVcHk6Cfrb<~Z; zi7DnLkf>>KcJAJ$=eLx%)Cn*)HhxgONnwnQhBmaP_NfZ}%EoVY@aX&Di?_iA`x(O+CHd)KPw3klXQ)NIz%aU^LaUAm4>tkf2(1&x*Cd&Ta! zzWlC}y5eLT86RaH{HWW>Hs$T7h0RdOKpM0{x&8kJk>wrs&@WgAy6+4{;LHhBo=%Ai zLlUaS2WtLF=+dP-3!Wp7LVtE@RPr-MN2qHrAlB|cgXXxB^4OQ?lav*rbsyRlkDDHC z(kXp@F_29zl33G@R}g;!a-|r`LefVVv-K1t=*fW4UjmNJ zS>YlrX5pe=me2ElLz?&@ZvIA@YzrL?C*dN!c*ba_&!A z@<~}giFfu(qxNh)-)hnneyMzoUyB1C8^2dVSpfzQ7@A{9h+m(} zTs(o@oHB!Y>xD3hUWX5QM5m=^R&tF5O*D-U*wzAHjz;2_u9WT??pzYw{~JiWE`O2s z5rkYrRIjYg7FtbFa^xJ^_TO-vsAFc612LwFsui9^t0erhWOgUoq+TivpRzc&uyEaY zZJezjXI|xigM&kJ&>VeAw?=NY^*?gXz$&Gr9exfyaAIo+Boun{bd5CK^YzLL2i@1U zZP?Ed;{^1HJrC|1xTNQhuS~W^S*T?`$my{*Mt`nh(?4gEO7p%ON^{zu5aBJf>Qw?U zp3m{^S>snvB$TC$dgsL=+V7y|o`5KL`r1g5vgFf{>d}mW=7B3gwpP7c*NyJap1{(K zC(G`PQd{4|EwbwAB!VMbkh68`F+Qn3q^q^DT6&a^yF-QU9@6>Q~uDv zZ9%Rbzu7aeAdu5WJM0|~$o@tQXG?9B3hxN*^_lib4oIbug6Y7Mq&#+?l(E@(N zpzh(#en9-)y~F6z1QGku*vuVvclYwiM4-ue@$U3KU*B2SI(H#~`kb92_}7l$7af{x zAOxBE$;!hcV9;we0ml7m*88*nZ^H1|CRzQlD7%3kuH{iOObn8Q9LTA}Jd7e_>`lPf zkhKT%blQt8uMeIWpI=p@$;G^yTa5ouo-}0OrzZx%!J&~|Kkqs?yXwlg&Fwb*%4`DE zG?4ypO12iDK3~G@vN;oGx(Lq38tAJJ<49s*)c zw;=uM!g-IbSfECodS}KFkIhkoWaxVmc&v0ENJLW~uW=gxboOZ_s0Ub{Y2u0bl6CAl z|Kn-vm7WZ#BiIU(rB3A?WU&eQD6K+{rnUU-dy;sMtqWP&MWw~IsP%TUE@*i2q; zTb>(Lmkz7vk9uIzrLwVZmW)ULT;6s?#XxCKw}A%!_Eg!44Kscz9tRK~mj4-aPm8)2 z>LYIRE?1{yFdx`7xZ?s+m^@^9@W!{Y#9GUnYvYCKHH0K|y2)&hO&xm6-LW|>HGo1n z{6nq7i(?@#==;oj^X*_d+d5}WZz%&9@ zoeFy}8YARE>6+ou^6KPd!)Y~yU!i?Z%s(plTv9L6iC>q3)Us@IYlY|LQ}cP`rt|E> z2f~+U@{a=|npvMlsV}!) zZgzrXm6oDL{o`$mNfC9V_+}3Bo|bt}-uelw$mlZ=hL+KCP*zk7Kgm-e19A-3tK!~U zW#l(nV*}Cb-X(My$W-b($@>^*(^^VN`skr#rxk#_iLv~le3aP4n6U<4GBU_DyY zvbCa@snOdb*1jNU_p3{eof*u@Y<$Qfuc#0W z(6t&s1&j>pb8-y>8gt4o7Z^DIggKGuA1_9)a0^mvJ9VPyqPX-gtFif*u_aP4+s6`o z%Wi$cMEi;&0wW+b#a%|fZ7hEu$23ED=ln-I(k#?gntR`zU=Y}1GeT^B^Rl&M9t4?^ z-wmMT<^jmbYM3Mj4FSn`u^3%GlU%O!dAE+{ z4Z}$`seHdiYEy@R((m*G6^+|bEpaXDfb7B$&$!&7R|uqBwAts;X9L| znqI>uAK~H%ES0aw;3AAT2^)!l)n%k+9SFid$MbWKt zr5He(;a%BTrq2{V6YSJ#nk44iZm*j^3V(kn-!_#|s!`*L_3p~k!W5-X#a6#v^kDG$ zDrO{~Gj@JkVrw1Z$ZVNZu9{A|!WPl*JX1k=vd%RlaS&*Rvd#vH&8pslEF-&t(!4Mz z)HZQ98dz|*&D%c92!dp~*G5N-_^cV<*H7yi;pN;Hy|bEQcw4$|zbDB1K92zGyxLh9iAJ5S9XLfJrx)y>@nZmE1pkXJpnF`db50_tz|Qty1P zC)n^s_yi-8k2}wYN%}otAj`H{4wK5iV@W|}_2l6Z6EFrooX-4o_+*d7F`0jV4FSZg{#0+P+Sl~;!@nGrN+IYLIk z`_#rz(AN5dfK>kc<6=8M;EoQ^sP;fDSTtUYRusAdL$PkM6)LGU*}9_*>a<%VdQo-? zA82-`T4P!1TeVv|-``uGjq)3%3A#S7qvsu$FB zpw+?dh6_-s1vzm{@(VJPO)RqsE?a}`({pQjgmrgzURGv6zyOS;~W(c6>` z4?BQLmX0jP-phf3o1$vUHb>Eza?GW^BC}90e9N!PNM%SZZrU@%JxA7J>>CPm%}^if z-79yOLD1%=5lk~rq@&Qpitp(dU`7g1m)3jQNsQJz0R5wIY3zOz^MLa+xvwkQs85|G zy2{i2d9qSQ4^Lm`lFEmYiBRIo`K)I`3&*4HnTAE!Y(i2vw@%Z!_{APy{T_iCf=NkE z<83FRf4-dn2KJygazm%{0V?zmBu(r6>OLV05a0!h{M#7!MEq+4Qp6bzZu-AkuBeWZ zC{eTp7CF%mcD=WIRVE~-pb-A!N1B~MYo_Dui9-4an4WN7=62IFcI$8vriR8akV_By z_%XX3URYL4u4VO}mzP5>Tch(0g^A;klt`xE6YHE(?SZ%tAD-UDB5XXKzbZRn+C3Ad zivVS=NYZ3y|3C!CSE%Cm4YE~Cc+k=k5a^j@nq_MfjKadgrX`s5Ae!`kj&8+6C}Me}AYC4~AOC~Ek=*-Ys&XobQu_8UNoUWQmh81CKF6{;&nSby{Q z9;kH95&jAT^UremsJzt$P%=mep@F zoSiIc^>`P!mS)4}9DjbYN&{%5CK>1^-5ZU$yn3ISK^QbzSeSM{?HLcSA%^>K>8^W% zk;(u(bQCu+M--I%eQBKFLPOrw{r1rlq)xB4K`ytgDt*)+l#YdGv;?({PeCJq<$DE$ zL_VaYT`ivv@q`%M5kFT8+CP#nluv<*^74|@3F6Itxxjh`3UAy#u@~03ECZzN%X2}d z`@8b*r82>wDE<=%Khs{{r61MF@u{h%n09CraEqE_89=zevKPjwk*)qgI*_s*S4L&mPZV@&;q!m%CNS5xDA#!-4d>`M$`cnK^c6nvwdx8NCYAfj0?~#aw z9fJ7UQ3``Ro%9Jg!Tl6nC2&yS3asWt3=WwT@WcnX^wxjc`2Tr|T_K43uxmvlF70*n zX_JF0-iM&dngc-jgwM@g344_HQ9a*4x}&4xDjQpA?#D;tB7|&V{8%09l`GO=Vb`M- z7|P5yK-CD3q$g~@6Zh{D@T7M@BSxRO9etql@Sm{w7g<0O5_by@_7Cb`hf6E|#Xb+j-2Aj$XDh{*_06`q^X)GTf=)RQ=Yo);KZy z-!J2vn^x+f@nM&NXXwHaikg32>LIe>s%tsDwg|@^?&|EBSgN$|>a@M3%Pb{st$(qG zK~{X5tq6CxT?J{5PNlrHX)&wYf~kF#nh2`%xQ`rYab$TyfD0+*IYY6}lb01(-UE+{ zY(6(P_bgNQA;?v-A3?6Ky+sJKbc9Qaca&8V?|cAY1KPUkzL?hf&FThA>rqVP)|OY@ zG4#fcQ|lUgi4y?=;X(5c>z!dbcNA1C@B;jUpyf%DimjpW8qfP3zf78!bA!{CW)?8L zuLhXsFkZl2u)p1Ay6*=ZCY2{p`|&^Y06I=s4sX67JN+?oYRKBc!UFVvF?0w-^){e0 zU#rc?aV4lJKrVt_d2IvK$*+3R_OpZbv*U-v$9javqI5(uqqex!7p>3A zYx4tB1-2SFnvrE?Wv|hd@cSLgXEH%IJ$Rf4iZI7#e#5#BNe6Fbf-6zB@!2Hs*<__V z60io2qL!65JWqgmG%j1!-O&O|)!!WJqwSx;At_G>?@KdpER0un_L`jpvDoxqL>vyl z!G86i-I`grBqg0F9zfq3>DeS?b8|#hl^A?mK~a%FWim3TYW^_qAffbs&aDd{1$z5! zk+!1Vt!2&&WzGO;z?mF-#t!_zi|8Q5x}%x^kYHVoZR3 zv)+{*m4J}Y1$nINKZ(dJ1TvQavr0GXvuL>Xd(to81q8GW!c459qLnhczsJT{!Jo^U z^9om{Gadgur)pOMBE|%r4IFaezuBA1Th*O5H52PS7Zat$0=*bGR{P)DHS$cgw7;>7PAN zQi=lDwX)=$%2WTIl-LWmp8GSj6v#_v5`poINmK5#xl>$77Bh(#>-fiMJDMuRLxa#k z=m()NI`})?+C1^2WrF2`^TM|jY&C>xxq$3sBriUQl+gi1I^Tgt^t;!o%zX_Q?3c&o zRzd?NIn_T=b-46R8)G|_x{t%Sx^4+cRx&Ay&ue}d>6aFNu~Ib9-mZ>}k7p!=6Y9=h z%}jfssv0&VKd$AOl9i~mI~1j}$&aN8n@sfztntupNxq@%y-UKu|0Llg<#aqsK(les%=lt-8nvO5zdFfl3`7 z>F3;k?U}FdY48|`)HB+HNAQqf9MQa++&=;rLKpqoiFS#-t3DI5M50=OrF*w0zJ*LG z*b(zw5y&6w!j1qEl&KZ7vwx`xD2YS7`&!Pp$I3!ru1$4nqnz2k(em$87zqdkhwr7G zByv1>$~gj<)%*WB0x)#o|MnwL5t9P7I6R|er>6ET8iLZi1t7@`1AgfdO?|4@#{jFf ztC(yOW#1^^V`gsc*1y=)SLz9YNi`vldXwFC`Af5DfCN9yD^a$fXm=9 z5aA#2m(9rX1Q-&0R0oLKE)iTg)g5thpA{y+taOClM!C=)E69`q{yEOW(z5UU{QL92 z*_ps^EI?Y25?6JIb51u&8>uG7C0b4 z{$2EOf-lu3!-y>j`M3Y`S5#wXuuG@{*pAim=q*=BCrn9_! zFEJnRRLnD8bnACH1)aZc~2GcAnReX$1wgDlWBt}=Af`<*CHn8ER{W*y}s+L!nf zpj*?>sboXNx}cySEb+so&6?-L^$;Z)3l#yBGP!8{;er48bq`1?{p)u`_YXCY@U#b) zrjP9%;7g4WFv2IyeMG7rmduF+`QQKM&IDw#WZd|{|M_(X+?gp`iedlGWCS1=72fF+ zf9||E`rYBSs3_VBZ2JRCOUt&Y(E|jqw@ww%aVwsBaJX$o66%3+m``^_-nQFNd@@c39vjT zDK9TC^g&6~AfX>WUSA?7_hzm;{O7KFpGF8clBm322i5Tdg5Ay$nUX-fTIwaBlhF5f z+aD5zMx@m0hbv)`krR$_XQ`=KK`0u!W(Mq_wM&0n^9MewzLdQT|KrzP(oH#+3>Z@q^C$zi0|19;IGJ_4W%_9{p;QLyc+oDV?bNH|#D zQ`jdrfZIx#-pL?OV<@m$pO1dlBg_g)mUNSToW4 z&;AGz!lmIk3aI_Fa{#d_LrS#Xf8gC7xZAhO;H_`uUJ{*7mJT!q&7J_I;xYw8A{#k5 z`Q#ud0jc+EJ~C-`P+9yH90ULz;3N9o8dlmb#*!9`r;J`z4JDe6URWpC>E%NxCD>HKHcG6Gfk_u1v(*dBurlSU>UFbOlyR29~wL>;hI)I5_ zhpvZKZS-4B7Mir-!w_7$Px&^)6}<;CaM7 zSL?qVHdC0~ITCQ0w;&8VJz(eob&e`FZ&xj~8M+SjSuJRe=ly=qd0P=)|2=f8MXTcT zyq4Rmjkf`z@7WVQYUfM9G)qvi$M$@FnO$b+SdeBm0=Hz^mKQQD|zx1)bqZ9_o(p7N?F;7!jWhiP@78w z4+FR-g^&Twh9*%C=b4@l@7`Hn{JDlfv9l{D-iB)BxCB|%=!?3__k6fv(e=sxjZsl3 zQ=!`Bzm{Rjkx#Hi|1O zE}D~q{`T#sp4Q=|ih}yqlH%f5d>?&301ElHXDjueAb9`HOS_%3dj`5?7xacssi(&cLuqm&d=`@KEHCHSo)?}3MQzL2@X)4ENu{V32EkY) zsG7JOy1lY17Y)--*IMJAG}Gu*HCYOa9Ax@)^pu2I^j}gsSc$BYBu3!mOo`q4cxE@Lg_@d? zwT?nCZ7jM(sCPfU%LtJDhDVQPc>a0!zdy-*@TES24c%QsEw9Gq$6YVA?p3Pw7UxY0 zvKcD87hrf>W_6*@Dg-lJl&NR;^`|Na?<2%TNhMI8*{~%hEPPL5+o2fbff}6?z8&6N zUk~ai)UcwvY4@4XyI&j1bS04TqQkr>q(?7HcTIg`dyS&dy_3y-&h~UuE0bvRR9=7a zhe}L2Y70MY<`i~aBRiskWSf(x<{DCbJ8gDw>eL06?N&M#QP+it{JLP?DP$Ap<5=h+ zh}3ug7kjdTo;ojmV^;iZezj_ba-69Ivyj{8 zDr_SIOqGVR&2@fQe;7YPo%OgNWn&8{=#F$pS>f0?tDUXiocT>jU#H4Lw(C&Lk;;-g zZCe|wl?#No&abCh|Cg$_Teju~@4OhF&_X#uhI3?T3GgT@Qy-#3!w9`TEwbpfY!C&n__JMsNL^~!~H;kNwETZ`$DG;OF4P@ z>kdPpn?zv;zq^CndiP(6B#4xRynW!3SU&4zDdu}eB4PW{K9fsS4}1-FU$|&$YQCAY z@I|cql-+nM@p6_P+bN7oi)zhEAI1Qb?10&F=GK*4Hgxb&M#E7N!2{`+tERsNd@!ea z!zI0@CWL>KQJ~N#P_ZDSKCl=JTa1t8ZHl2l)}O#=f9JPJxRA6vkb1h27V2$z0zl7{ zFK@=#;`JGEHoe(dNB68+9QQF;JxT;#-iA9NO|61;h5_ zBp<9yas@Hs=lGNxa(Jqrcni$T@+Bfc0zCfv}^}ns?uz`TC(BtV_Li`fojTlM7@)`kq zx@k^9{0HrC&sS3_uLIR%sCqHrV#a>#>Wb^06Z*K`{v1|Wq7R|v#)b02SNrs=FAoN@s z?s5C~tfP3ZU2u+cFUqXpY@-|0F`~IHu{p)u=|2PNt%zfX_ z<$7M%<9b|=%kY$d)=?qd$!Ycba-<`VGi#qRU30I1H1ZkP5bfWh8uncKBo>;p@`#ui zUU&C>$Q6EcYFe7|v;E8pgG~iBh2uQz|^B%6mS$-IFSW_))=!AxKNw+l|y9I3o zX$&hxg&4E~Cc1bVhd~nXcnOqV9&tS-Onz!>Ko>rE(BCj_oN-v}L%1edNf->>s1(H} zM$+W1@H8=lmw4vuwvp(x^xm4}ULYuOI55wapZYcuj*-!R$ORf)b=I(z4Dz#fAYtq6 zc3tv5@74+`tEyt!1`S#`#^0K)HO*{vDAOaO7N;*+1zjeHD`{_ztW7v=yE@ilIx*j+Tw zrTxF2@<0WSYekWHkAw*5kx&?sA=SagC!SSLMn908yFkW8}6WnNY4qPIyuH zn#?!H=i};ULk+8P9jgR(vgywUpPimx4*+RU)4m>kzd@#WyeGx>zh)22qFOV_J=j=a z2Ch*Xmz*ubh2Ea4k5vYy@8NVu?nmpNhnw4{oNp>CMtLvGY-T}hOXjpkZ#mJ zabgdFxSA%PNbZ5f{*2(Rmpc)Q1kLmwQmiytu1!*E1k5N1oO!P@g^*+wVh_HS7}v_U4$Q`$yeG<^q9@wgIGx9C>T}g^V1OW* zyXS*Gfe@Wn?A3ecE#OtroReqZ-aBM|Qmhk3Yw{8R{HT_9QbOay%i|Za?0iyqN#>WnzN&V+Axm)33avSnvb#RNLk}WDMQdx%Sp$ASn!TWs1 z4d2Aa|IRl8@-`9ah&4oAf(+_9PsO*%^x6u73=d#2^4MF0s>>o_Os+9^fmuVOgrk|V zt!*9<{7}p*o%N7-<9u~(amu#mCg|>Ld;7$moG2B<)Zr96_=kow2N7iQNqAH9xeuP5 zwB6-H0Vl;DpYADB_1y_GY3_2CD#u@v7BI4G&pb9KA-<=>&O_(_2=sUftq$XeW&>i0 zN*}`@F+MI{-k(Ch0Xd{*8QVMZ&n@(ltL93uy6*Rho}EksN6GM^wqY@*F3GJrSluPgXDb4jAa>KX|r5nCFJ{D8m22Z2ZPA zY!s-)A)F^WRAg6Y_&i;ocl-hE{*MF4`PzBqJ-@!gozRmF&b|GWHOyzZxU>%+e!wLt zXnel%`*rIa^p^Aqr-?Jr0+KHTjJ4o+Y2G){aYCi;!Vc2h#^8eyIXTiLm*%4$q#6cY zy>T-v*hO-U)ICcKM1OYQlBt^CQ{yl?Kx3&TK1sE|%wZbqdINt{;9>2U%0g=)hw0Y< zT8E1zzc2)8{Kn0TPF?#h8-!fh0}H5Tpd3lDnw17xaJ~=_gC^@+uSQZTzo8Il>$5$+^2P^9w0& zYm2T!L>N98UF~rmPPM>O6|`*354*pKaMV~>xL&_LUAxmXt%14eh1vXk{$UQ|FXiE; zn>{DHKMJQ`58^vlsoXjH)9myK(q`to#{xZT=GBRw@VGcWT=`PkD-Jn5F}psV>M+Cw zZlrGZ+e=mh2DYe8!AZBZ<4Gt^^FRCR1Jv%JH!9$ty|ok$V0GD#(@N93uaW5F&wMki zCKOv8JkwTqE&0+4ai*Wz?T*$~)jaGO=v8obaX^ZOLL-$n`LuF@I)9g^erMivnXP@U zRi{>G=svn9r#ac^&Kr7APjbChe8Vlm&7cXwS3=1PDb6nc%8qCS)2DU0C^4zOEyphHdjNWBK*?&k2Ii0Z! zC8c?jWt3rcG;mzyv8>Eyz{i4aXZW?9WPrRq#r^yDU%sH#xpnKki)~}uOVd@Nw~e>n zt0~y?^LfYNd$^?+KxGHG>zw{#toW%xxPN3^1Igf$k3-oIpOBbSJBB;y{9LV?g|dv> z!kLQt+vz@$Jw;*psD|{^1a;MeL#yhBtaaNR&l8;}^79XgUB%d{1QuIMk6FHd0NTXI zGwYNomhL63d7xE1dO+Ecw3z^KO63QFcQPJ|D|=8(Jd{qo(f&|y_o~-?ugm3b2Kz`% zQP74RSSQ{y4h>~D9e3B>1=~{((a=Sl*Lr!!$teJhaJ)LHr1mM+!qp>Zsx4PN)~s}| z69W&#svNDkVe?CBMuAy*qtvh z?966&S1fKw zy=-2pH$A|o8pU+Frb{w;d#75Nxs@CLN_thv-48A)l2L>frk z9vT`lwTue}BkTW!;`ro!z0y}?GJK?UPH_{=#l7~Y&3o&QhEts#JXh_NaIrm8L}dW} zqI+O>AO5=J|Ac()tqtFKL9|mr9cv;C^*y@NzP&5Fm=|not`p_hKKI_cHa&%^v@dR~ z-Rpzxr{mNAvH2D6`RTjcurDGTD1UyT`~KsQJSXSAP>zVVLO&megs@Fee!O*H$3*$R zhLY+IT;^Z@Scp^5DN6OS?g;4=B{v2R03lV(dv_iA2~q9;XF|UyJ44w%by~vC&u28t zCe^C4UdyJptt9Mlq7bYl(S`>mZ;bIVsaP8ftC3~^i}T!jFg-l}@?-XsCEr2axO`P6 z=|w5QoXN_k(fze}y&%R;^)IKrl%cpG$ji0z)otv~vkWxPI{15Vdw1#yk&9b2AJMCW z#0iu|1V4jR6E*(){TUjnJYWev=hC0q@!B~f!A1;nSc;J%Z5uy7P4FkFqK|cSd&?8b zOv-&}71+JT4fNHl=zL_+MMp8pme?p&sblP;tcgyz#{u+`p8ak^I^uwgFxCH*Mnw?QF4oc9vmrPw~ z{{7;r;DRr@FvUF(htuzKCNQ-HvVYc54V2d`b!a)Z`w~=sen8l1;GJHypv>Z|=kU$8 z#Y&`Xb5h$8uCwQWOky|?TSuEU#-}}{j%fd6FS0bztp-GydD_P2hpQJSt8rb`(;Zfc zTa*o*A5-ZT0UY^g5Z<4qgGmAvP}zcpjZThO`+B(OHBiaX_J#7Na+Zb`R?4Z^=IXU; z2@V@(URIqsZD}FXM>r6PM+JH_oTpnsN~qv8a(#5%ea)u(fv3u70F|gshw@XQPuHW) ze`j{D2l6(S-5;;@d~|<$kWE<&Gy~`<#DYiohnF`qt9Lr|q(n}P$T_Rpv5+26>6mb{ zzl^@ELW#p!Hbp7io9+4lQnw)O(QsfH0ap}>0PcUd2h@|6hQBdvf;d-1w*^? zaiX|V9L%l>E`zeRi4(O;mG%B7%RVBxQ_W3DhJBuf-xopW`4g`VkAFuC-F5N+h9AlHb{pLk>0ZaD>C z-_o8K^UKNMuUM+0i`}Rnw_Se`nwJ4o-mg4u;DF=YnWlUfUMK&vzfZRaHZ$lU8bI z*Crw|^Dt_NZJoW4mXQ%~jG zFC&zyw=>oy?beTtY7R52>g8OIUrD}Gbw-<5=ju??F1D7=JVT2sLuT9yt*X-*%zSSx zo4>9~f|?Yd+LPidGym~?RpqBONsq-f5HZSo@46Qm(47_qCg|x;RrA9dZI<>)>0U?ji*_s zTR||IcG0fC>;_~StD6$h&E=m;>^VVlezs&a8b!ZXb-dNQQurkw~ zt#o>X;c{*Pw&n4jZO%Ilyp}0p|=Nt z`!8@$Ucd;bDPAUa1c{?(d zdmBQ&-aSRbt3XxFiD0NxJ5ysz(S$7D3ZE`5C--o)DVgefySxJA%Jmb$pX$QRQQUTw>pUBivtAADx>O{&HEUGT@GkWZvKjqVWXT z6+Pzj8~IkHF%h(*gFfSwz;=dbSB#8!mrcDqwT$aDa7EEKjmy;CeapY4<+?R_zw_Ie znZJ}BYj&QLf_3<(jt!Pnt!0GV#D*oaHME)Rs*j z^0Vu!IZ)Ni$pmx1t*U9o9>cjaXU;5*v=X&~-`W_M)UjdQf90Ww$>in;JA{Zo!3xQK zpSaP&$B}V+*lf9Rtm-KWS6=nc5c1Q4qM_temRGT(nOZB5`Z42JZw$s|0oCtSI1II3 zV%e{{NgY2E1a>)vKA5U{$W#P`*qFz+e)_fR@yqQqFHRd zYY?G1-XOD}V)cFDyT{y?KGsA{X5F~RwtryDy^V{~)MW%Wkb-32s>R?93+%t?^*dMp z^ee?sECM;iCi+ez>*@=?f7d!KiI$AArW{}Isue0kC^n%QyO-1G95)v5(woEc>)3P_ zFWLLWy`qm~e}48#zC2*kTP#pETGhOKs^H2d(3(wo|2`{EvrdI!-?VehwVDWEt)V#A zXEfb$*Qw%B0PNYDf1B!E_UxY)r~m%Q+!J}8eG`}!OWULm^Cg2`Nu5UCRW-5;6A>=y z?v5k$O)!fd4WaVIS>7d3LzZGyb+BL@A+&> z31YDXv(+u@GHpYc1QMg3cSiH0jT0%U-)NN>T>nAJK zO!>joo?m6%#|KWDjT7?vJ%D9&oDp1hcYV>=n?XAe$F`{QZs26qdsp1nR3}kdD|=b# zgB;<6qvn%Ri7zM5T{pTglJNFz+NxZAe&eA1_>~iGv(G>rs}&t0*(mF>$_#rnGLen1 z=L4ml(XYg7Wyg?Yn>jIqQ*w>)1K;EPFtlqd(zEDky;ZEcPn-*H)zpa3*7yRmOCwxEiK@W#w2QzKFY;)eh`YMmBtyL-)ND~hDUiO4&= zst7N!ir2%**YHbS#a(K4w@QuFWtF& z_mgD2PhcK6r$nv0DwkID?a+xYm}KW4y)xt8xNVZxyyl$kOfc;1va&73%F;VHihOw& zKHk%_!Y6O%f`h@B-VHCosAUE$|WyUYJKI5I5cG5}4&ZPj?Ztj*%?=wYhceL-F5z}+3qhV@F!K|1^ix;Mqlcat$l`25qbI#?3O5pAFEvRGS!6MlsS z;hW()=<^+pZ{#-mDD@)lLNTpr-0+tmgnFaMISmr%D(R?OW#4~HGAtp&QI~wH@jcavHoUI_vpYAm%@OWm{@Xp%xHK3K{kH?S^ z2Dj#)0P#Z;;@12p;$(`wUANre<6*>|Q|GgOEQ8P|<(!Is7aJalG7FHw{l1B8>&mtj zbgIAhZEbR8-o%Hl$#-GC_XOcfoQr{)?51&%MK8MP7>DPgt$?1mF>o3eC&xt$P&jUG zT&tGO0bVB`kXD|dKVew5ENO~Wsqoz#r3S%Kd*E!yQ;8Fotdo7^Re9z7Ffr|fid?ZOorw~m55dNq7DYx$Mwl1&V zJ&e})RckNNvs-qt&e1N@G`o1Yk&C`5E}^eJZ%$V?W~$xOZ`OBiL>2EI2IO*Xf#Xo~ z>eT?*RqH^Y-IURu?c3_@i*AOC?V4-%JEQuxCsJ%McE#jt7$SaOSe`Jqsku3y)OrBD z>$r@|Y+qWm@kYs8VsV%3##pa$29UAH(+p*+<$37(yOLG~A$G%VgCJK^SJ$SyNj*91 z$zIesQXxE)X->@U$fcvMr#sm5s-9{j*5@O@M-F0lCtVRwsx*sYilEV$YW|UeJjh|5gk!_D&@iVV{DR< z$;CEO6R!l#kCx)gOS=*_!_NdQfu2N<7GQ{8APGKwiXKE7VV&gMr~UkVA-hwZV6m7{G869IO7wc9-*H>DK9%ZpGyWDXjw_e%k@bDz7yc1~vlfpV9(Ocn` zZ8-^z4Kqg&ZyeM?E0s$ZtMSCuATw12e}U(!qfmK@Vo2=K1u?t`?BIwD8on5_IsNR4 zAg|LF=wsqUNYLr6=J4E*0P%CsWq{<)0qmi63R#Q5U{R!v!yuUuqxMatd2!%RgsHPBW48-7Wzz?ZBN?^u@Sq2zWXLr zDijSJitFkrfu-ue3!zgoG9GmvQ;{E_8m$-JNUn9NFXFa)-i7f+<{g*1b>oKCVuuvd z3?SoyE|!V&2=w%F-~r9Sn?mgOI@_+X80P^Anf8ct64YP=@qi5!w&Fx`%dUZ0F$cc* z9!vR`G%#!p*}Niq(ews%>L53qNPEtRktUVQyJ$*uv7{PNl4M|D+y}+o@O`WGOVodprvYnod%J zs{`NMz#D9PtwtFdSV2ieh zq*ntmoLWi0J*LN1{F|(KgD76F%*}smZKWVDj-sU{bZX5?ux035kSGX{PS`h(W&yow z`tCR@BlpQ0-g0(<{RvycYU!B|ZoRU>70lzNH(iYjHVr&C*`^+Qu;{$@nS5i6jc;vj zb^4jAGgXsq>r!7kbBnlCIlqj%5){%hQJd!+Vv3E8@>osfcAn>o?)LMZ6(*)j6w5e* zwv7g`ey=dSBHMH=zUZE66~nsQ?Br`8)@QYDl&K4ybsWD{;JZ1FG1d+_!EN{O0*wCb zIc@s{bJ_gQUJm{efP}7=B7{-}?oT@s<7G#=RRze;S#Y0O`cA+vH?H6M zDAic~T}%7ct(?P55qDA)1*R?GwVu*zKMeIguCT24b!FS#9OtEw`Fl{qvYE9Dk~{vO0)&QTXv5wAioteR{f-veR#9xc6*j+nR-+i0jU~RA8pe`Ul)9}IBA0&C5gm${4jsDh z%GW3}yJK0tDu@EXqx4Hly|j7N-@bmuBI*y$j$M!%1}EZRd#0mnr$j4ey(e(Mu@`oK z251JdiGNT#cha`!6VAWd5H(9gI~UyIl#*BtYx+bRRQ+8?djY5rq7_5YdeO%APqPs) z;(GZxm5ob{U0-vCnp1*o?)IHLXKkx64h9&E`qxcDzh_GS5QwOx+q2r$6%}LoXa);) z`DE~Yabic{p3=(`{d%DB;WTbcLWHBB)zjKK9@MRCmDo~NY>jc4oUdnr+IBkChNJ{n z6e>0PET;waeVf_X+HfQ`0l%><*U{RVOJ=^!%ChCXJK75F_50A~%D0@;>mo4zK8NWF z$!sF0XK42-afsTk1Y!Bw^H?z02-BbaH}jUPzAlByY~@tU`~PplEr8hc1$B>(>K=%pKjIX0lX6b2+dYi zACU?UK`ZlHL`9A;PkkV*?y#hpV#XFyI-^G~zP@pKy7X5k2R8J#RIyzYtwr3K^XF@3 zWqoRM2|s5Njpw?`EuI>AE*)q6*g)SK=NKz$m0F7|znsuG5srIvM(N-*ejaBv(09D2 z#I9soqJYjjw`b{+!^mCwW8$CG<>**8-rIrdZnNsuav0USrY#Gs)MO}%U0XrnLDKZC zxy|54*(Mz(*|(EYgbQCb$<2)u`H*Ed&OL?`E}*i*)_>7^)`JiqtC{9gGPvr8PLRHA zmhS7gSv{OSPB##}NS9hj?$H`1?1t;_ykt|Ob5Ux2oIO_$kv5%&(G<`xs)xk`h2#mO zZ|Th-hSaIAr=odeWcrQFZRM}4NMwMJyhki0g>R~Y0po&gv*Sl6Y@~v-BL{Tp8?kDg zaTp}z_?NB4_CB<2`vr0wpFu4-$BO8KfE~9zAQ>R->N%2*fAU_WlJV6&uSFK>*lzN; zjO1P-=>jzTwh{=KkEcdeSUbD>TUlWiLt={w!En@-if)OD_vEHbW`h-;(FfVDYimSh zf#!L|{r>$C-vY8qth?{I>LYcKNlM;viL1qfFg}?xDxeMVZP0H|tviZ#0@qujME$0| z0ngp`;dX(-Wfxcaclp?Uls+$YgjR*3KszwA=e%$~^L>d%*uGMgd3##Guz}NmK}=m3 zS_j=*x|UhRtdsXM_u)Qlb;Jox{Q5!(KXBPT>bDaY ztN+|@|IIntva`X)V`(W&3v)R|Aw5SQrR=-aTC_Z$j_}sZ)hpq13Z?DA^l?lKlPIlI>soj12aX&{4{Cz~R_ppOdU>SHL8^MDTyCw!(b`j} zVl4@1zw2dSRpL6P8+!8!!!Y=Pt0f3Abz92hQvj@v17J0#xm9=nNi?b`p_cpDIu zSol|N=!CFMKD!OKUg*##zSp;{KwZT$B?=wVQHO7QALp2AN;(o&#m>Z(eLDp?Khd`! z%?leb&eVusOK^sRc9Th;KDos_-VDWJtN={!v^a6dd8SPzj5xfyFcVu0B2YgFG*=fs zMk-YM1b#0#wrBSLJMTKu+;89ju3ZP-io|TqJPTLm>h~Tv02)!~C|^$DI@0ygG=+V= zwjVg7`XfQs&xe6#564$y-g#wRs1ca_=w=)fUlm#%aUC_Mhe?KNfx(nmB)JD1d0h0EP zGPni0@*(Jw4qYM-<;bF6h6L|U{pVWn;uQyO;s1K|e;+7}Bt5OtT@Wx9eCA5QY~2Y6 z?E6drzj@oKhV6cXdO3K9-ywge2>kCs`BiIXlXB8s3@p&;plBX&JE+<^PQ2)nu;rvAn#@k0e{pRjB$p7nPlz#+4(Dh{^R@)a0Lf=p! zA+zh)$j^hP4u<{yyZHMF$Z>jq(0Vn$dkq-{IjFFa{Y+Omw>w&i5M$xyt*U)H?-2>a zKD;x#K9Lq@IW(KKpD&RpJm4xn4lv&9hz1YaMfP5Hx(qbKqkcaZTCd+KMZ|J_&Cds1^s`s zF#UkAuGi>?rpkLAJ1_5c``2B?P#^vHp{b;80>Y8e@9Y-aO9FCKc#Oc<%qzx|;9u|| z`47;hD}E_#M4B?r&-;{F6Xig?+r^6)Bcq~HpFgL$rliCN%K1K<>n{umH@=D6H^tm` zQJqh-%>H&`OXH%0R-eVBaXP3Ca(d{}sY9T7w1nH21-)J$)jY$Ix?WYHDH|@Ux*nJ`e<6 z)GIkMtnB#6cnvkn$#}h3q!Hlq%Xl&~tI`ge;30uYmn9asq zueRl(IFIdk>rk9&f*LxgJBvkS7 z+U(g7fXf?$9lN!Z?n)Z0&-N6lv`y*h>3jkLxv#H*v=vYrV>79gPSD($FqUKBMW4=d zoao54>jXMo_cqSouiKugFFe02{NT7}Uh5UFOF9n&8MXl=eH>cR=n&JH{! z%rJu^*<3Y#j|v91-UVpurCADVlb(T-hDwsN9P?mN#k|1Qiw1gD@*spSV3(7^a2cY<3S&k_-3>blx(zh;t zbp?eE{;q6A2g+k3O|P~U@O${X3M}pXww1>XYX9zpS?$=>IfKNrRJ1Td?(+Noxizt#yF;=r+*dyq6$s1y_v`9Tr- z4@z?@Oc+ zCnIkJXMyKfr7r1PrJ-+4*M7Av$iVptN)otk9^2K^RE1V=Tg^qGz0dorgUErSrrmsY z_Y3?0(gwbfJnG!;>GBx~zHzfgZKomFDL|-{4R~39XXn2(|MQTfWAy)DBZ-S>U|<(d zjE}e0%G9e5-&}pDvCZ|3dbDlg{Yyn@X@p%I&u zGw8)uzoC~p*?MjoG)0jlJ{iga6|=_G8te%i(r)r;N5_syKMVljs}Xzs(e6OxI)3Wm zBS;42-vnWGK`7ZpTpCB@=ycA?)P4_9BuyQ-ng)_S^*3Ov+Jiu-lE9mC@}Ib}8+ZF_ z?swypmz&)r^78TB%+HUyXWTTOodmkYa($%&BXp_*{~SRlF;!GJ9nVskG2`t_UcuHU$Qpo{AP4z-49;T0O>s>ESS<2(??dIkz?TR#z zq{w~u?wta=d$n$b2R*A>7iiKD$}W>L)ozsq+Mk1K6kl|bZ3hWkP+ZF3ytW&c`v0Z^ zp>1QGbe@{rntai8TRvfvlY;;$BozLIgdRi-?BsD#I7EqD* zsvmDadWH@3X$a~*)`;`4pnr)SEiCq{A1<2O@2c+QtU>}&e!4E#UJ{vxvn=wR? zxm`2eVvsCwl@qlZXvwW#$Du9k6ku{Gs&NZNU(cMLQNadjXgUF%I?ppJAQPrx;8i}o z1_&BUOFD>b4TE^WvWK^l2>;Z*K=?|kDuuB(r>5ko-?AQJT@DsF!u?lpqD}$!Dc9%U zcSt7)ng4Hags|9E9w&J`1{7$TrnvrbOw3}c#zA)VPFwRV=iUueJ~MM7>^^~a&?_}E z@~LHaSxBsy&HZn~U<h8yAj@!B@xkL;%+7Rvl*P4!Ka{Rc9su_$h zH%Eig-)iBaduVGk(Z0?vYBO+}W3-BXdhuI%cL!2GOoUFr&2=*-0*#Proi(!iIy=@Q z={$XY^xn&U8{h8>sV{?rTUS{QG_suikzj0Xos*ze0S{voDXUI@ZA@>d`FCIqrN3-6%WsG-lK636>j2KUT{9K0UqLXP8f? z<9V=B(Ch<~F_KeCMj;E}9-N4!BniEtQn&)ZfJbdHx;s%3qcXG%baL+AHiH%aj6fG& zCeQ^Lkk7b zgINTc7EFWN^Br}Ha81I-#^>JK+VVVVDJ-Q&$C=>M=wo5#X&_b=b^9@=z`-pyjIuLz z`O?U0*~of<P(8ovAzNC^XIg$y~Sco_K_;0!wreF!!jdfFH4ySd7pYgE)X(Dk^(&(Y9ktf4U{>HZnT z^JNdNbvlj(J_r4RJ`a#R9$39%P4)yrlmF%K$WQDSmjHAbFPIbtX4!YuyYa-M;Enz^ zyP0&fq0{6st*j$D8FT04?R*BTyE5|EmgKkx@nfwsb?cO4RZCx34mJ|vOOFrq9U1&^ zTXSjJ*9AT9;QGa`HScMe6PCR!I3(r|%Jy&gmGNJ-E%H3HE1=cfEgfK!gO6?{1K+Nj z%+t;7Q>x&3K zHFc54Z$8V|pVvR?ND0~p6>2vcdH0F=W`UGs%x9{eezsDnsyM^dI@Og@B@2qh@AcTq z7fc=E{QL$ff~?b1e6FA&F%5TTgF&u7O3|=v=}ug6`m>g3LC0%k{+ytilq2Ap$&a1I z9pL8|LAh9pyYh~++#UD%f zV!I3UL4korb%P;)8H#KZ;=axY;kx>}f($RU*x28i@aUtq;}bil%@S6g(?y{gKNr)# z)K68Il0xkIDNRsV51u0s&0S+_0YeEhB8Hh!y_J=GFx}Omx3NPY4l<;m20}?duc&w& zlxAfhxqSp%Z8bEQiyn0=7EN~)IXbv{iOkb+apOubkHSFbQWx2=fIdI>bdj>jOEAm| zY#b2VK&sVoX)40kmDm@mleeNm=bL~ODO*+t%`gpzc~$(jre7FO_{H>2@G&zn4ZGNy zReKhQDH>eVo1c%uN|fTKyE21&Q2C9Ucl=$_6Y?4i%iJGU-2g-8hk?M=n{c09pW)XX z3Q+d07JtD=DePqX4`H1$*=MsW`6g?N!vrmfQVmH;ld005gF8g!G+ut2QUlu@=M;X+Ban=ue2X z=u$Qad_2J9T~MZ9!;)xR{4gAJ zf}d!9`N&hxGod*(Xu1z*i#^&3!#l0ao$55nk2;@uBVtT!j-8p!h66UtO4uSE(M@`y zXzUWD0_k^nWIc2@3qnfUJE~2LRG;2?{xyMuvMQ;>a6=I z=ZZ1E$IM-3V5nZEM1tGT$EPW1RzO`(1;_j(3FduySyVs5ZjsyH+PkbbTg*A?mG5+tv(xUBr9#1xVdoM5IN(d-p!+jA|er zX4jM9?r1jBd);sJudvjw1e{@>`tO!WZlOOTluLN9&%A7JOCl*31D!U@iqoJ}f>lRWF1~?7;~|&qaY7#bu(8W`(fxD`a;D!Q!vo;w zmwne73^tmn?P_NZM!Qn4^%*t*N>!Q*wm+!eQA$jzODj{Os7ZVo+n;SHRbtf$H@ukS zQNZe(s%&3&Z{k|k3h@h3uF5s!^y$+wdfwmn)v26G@9{@##T8F=R7Cfp9ppL&>Pqp) z4GqU5FUi{po&-m4jriLnkUQ5Ny4GgVkb{GQBTtiSpt{D|_j`R9S%p$d|E6h!#``x<@#KK!R+>E}b&;}{kzx9cBKwA4Puzfx~k;o#?PSph8 zFORj5x34&w)i-qru!>q&UgtVnM!YKd7SwF*d2geTh-vvX3sTmIkp`q<7Ehtc-t9*J z2an7t`3+oXh@DRWfsC$zUh$hFMm+L%4XZ2PT#p<*ns)oz%nvyHZQ4A->z-@A9P!Eh z@7m^7u7j4<%Qj_?yT03AnT4FkaYSeV77&TKaf9i0=e#omLt?Xxi3P#f^iNE6f*X!z zzUjavbJklAomwOgErM|M$IP0n+YcJht)k+S*$f_xY`sMK{wBZ=QW`%{)?{N`djBcf zoc>Nq6}M{u3^MTrX1Cgl2TOWb&%Jq*^$BbDl?5>Y^oCD3uC~8?CGAdm*&csJ9@l(v z=qxx~A7<*+_* zW%hOl8R%@sD(bp?05$Dh!Y;G)^Aq8Y#Hr?4mqyZeK{g5r*(l{$A5xg_&j_VKPd_t@ z-Ce-1{Qx!GjCVi^!Cb{_dLkk{C*=0d zzN)g4M_5f^(^Ukhgf z>-;~H)L<$GOHEKfk}c?5EGdeWRLM*v7l#$&mI z;AlSiN>HY4xo|mWm-ny21ZGPE z$UvTW+`tKX>U~}+>^9x%wL`yIy;dTz03LAA(bWVA;*$mmyAP*NtIoa#`e^IRJ#!@z zbxI${1Dm18EykV3|spO2QVV*yBOeb zNsgD(EqLEJ%Z=-VF=0M%$oLL51p`{DhfWvL49TtBa zTAK2H8!Kivj5-6#4lJ0EE~K4AOSsKKbYSS48CPe)z84hl8-mW(i~g>rJe*k!?bF-w&9_VwZ7% z`}C+Y4+Pq_6oZui9IPqPG_W`ALt|Z8dtj&L-=EjIgL09<4wfi^ZBT^ji(@2!wpaW2 zP1J*?RCTYr_sL31>VX=U+n~Ryh9*M~D|sCrLO0i2rAhiCdLNnD>VBR2$gUvBjHPsJgtleXft84b&r} z<%hnO-O=;WBE`YsJ!ov%d!c-Cx-0*Hmg@DUUvx^J`t6Cq^TRU+z#1IkFxEBnT>OaV z&62}^DsBGUxA+>QN&^C}f@Y4!fW}kg*=RfZ&^-G}wTP2Pb;GLi-M$SFc9;BiX9ske z8zA%oT@LeiO}%`cUn|9;ZcGg16k+REqF zAooQ`cKt^&kTGRTkeX09N@{ZfeVHtjpwP6HB=MN2;-TdAs;F7uu3?My!%!}cvGhiu z*ZO8ZfF6)`Q~4h4`%#Yi9(u9a(&zm8Ctul$EgH3esL9y{CSDMSlDNf)fxtxS^52YHr9fz4PXsktpZ>IRAqq$I~JR;GTs2sYim2A z(>unpA=5a+CjxV=20eflH{)Y^P_AM0tc&6WVE{_AQFHDzwq*s)RzEmOJ?89gw{~*6 zR%Fp>1l(`_HZABtF>YyZTm|b_4O+i#>2|y8mq+=t1pop>hzxkMY!%eOsg|O3UOM7t zUw2QTeS*i5#7&jBgpEsu3ji2#0loDX{4YNU#<^OD2a&1}JWUWCHMvCY)09Dv+ozgA!sq$U|BfJ6aTx7+}E{V5!$g0}aIM*FLfywLT>yqXMw11|LhK!GeF@`cfDE zg~M{qkLijxa&=ts>wh1O$XR| z<9<@kF&$)IT=lNW#4(V2G{%cVb@)8cLK@Ayp}+eXbtCW{wdZ1D-z2|as^U9+Q6p-Y zkB@6K5maS5>jkv}CT+<_(T4Uvx1foHo|G4K71@0^z}A5{McO1sQXH!4Z#_TN?FF&R zm#lQM-D^7l${l0QVt*7`Yx4g>Wk279p5Y+Mx%Qk*--pW}%+jdWb8bt#e+Ix}=`-}L ze+%QMLd{E*#Rc~a1mk01A>dJ@xmSf?BKQ5JYa}i};meC0&}90{1JlBh5p~~S=UoDd zp|d(qu3#^d&-fTHr98|#qICCYl|In7!8{~)Q$XZGU_=Rr`RjKeCv`BZGB-(`B>q#L z;J=n6;K6TC8OHS$jHqBeC=vx7{g53w7+2BGd?Nl zCcD?ny#ena^reM==YoB}Q7kyS>n8rES_)XI|Ea3N15_&DT>qZ|{hvYnpF#YemH1yM zj6kw7t&#!R%3p=kRU%e*4x3-D{)LN=1n$1Zo>0mz7Pf5Sslg3DAltR6j6J~oT|sTt z0mo{0qkfJ7e@f8RG23I?tUyL}aANY+S-`IIVaLR)JOXVilZFAE&)ge#Z()8Faqa_KFO)H#k zocZ(Vct9~6h;cU6CY=m^gHTKw0uSB2X4|S)P~9xpx!?%M1~$IgJ-z(pAqe!j*8N+g zPlI$_B*c{L-uOg>dypug!jnYpK>J<#=zlkeiG}2ACe@+5 zphj0$R~JZNs5?0of&~4B7=xX2opc4)^twxCDz3hw`^?f}S{|-94UUEp%)VVUSxht#gP!j8PgaADZUh$H;LwA=~=U=pSd|U0w&+%_M+sl;y2Xr=WZf=&z*RNkE z{`hfSNnBX%C-08Bw!3{2yiMd7G5mi2noIxTM20Hl;i_$ES;kisaL{_y=ts#Qox7*N zZK&<2=vc_|mHYjHUr)LlPmJgUX$}-DnSqJ%9FB?<`?I7O=RmVz58_(uo`%sc@ogTY zhK3U;v@ALL;>8QFWF7}F{b50}uQsH4#YbiI?K%KY6lQe8VoyN}>=7b3 zJ40F`E#;q~KCZ8bO&@H%QJ}dc^{@`D z$+pu%qe!ksx#%S{(u;dmUhx(*HC)*@uR(uNO<9``!KMO-6VeZSC)@Q;TdYyfM#p>*lspI<{M+T2< zsmzZo0h%GVa{Tu7;HJPn$UlZiM0p;^Jr}#=^15A68p|9Bv8{Cg+A1I1N%$LwPpB6D>tKKja z?O7D(>YsWcG)dUU)b28YxPz>{$9+*Goya0yXi||z#NJvj*wmHLo^eerf4`_Ab_Epyk!nS$A_7Va z>K0T4RFGa2RFoF!H3W1kD$NQ4LQtfa&_hdNp-2ryYJeya2qi>H0tAxW2@u%-eV*r> zbMHMbk1za$m6f$-e&zeCGoy4BS>8|67Ayq&&aG!}F%T5`t$CRiTW*2D9Iy;X;jHuo z95w})B4lOxR^9VO_6~@Ui5X54V?Q6e-)q2G_O0h)qV4P0v2mT$v~$UI7xZmwLn80D zRhYC-PDTItgu6)5i;9}w>Xb13jFuCR6xe{gcKtPvX3Lo}czFt>$K*_1$7daT=^W#v z>$Bx9br$BDzYfGJFWNXLkW}&dzQs19!SMrM!+tPg1E9iHoug^)Vu+z|xqCp*xY(QA zt`iLTuwpyDvJG&_+!fB2fgqyE1D8ZC%#W^igU#sx0}SsS;pZ4&$)DtPXd&N6IVKtP zp18edwgLl@Nl7=io?#y9nffL?>e_a?edJWL{iD-?hi{xY)BQZcK*|sS+N4vyOgUh$ zJdz%;1wXk8+MOSQXT$#dc>Z!(*FYs3j3>RPf%7hq8Z!dMHRN(~%<2}%KiTJCX+BI5 zUQvoaF+r#eGyJv=Qi`z1GAmZZsHf1lmo->LVLTBaIxh_lpe>M<0{@f!lSKcIzqb#% zcokdW`c}#)E2ls_F6G@P^T$KRmO+}+qyx^(z=~HYaxIfdoA=rjJaZ_VNcXd9ivIf5 zlI+a5|B#j)gH%oBy&A7ST&C1JMHKA8(gfSdJ~DP z|CO%RIo9=ykzra|=HN|2mRW5&ep0I1voYdvTj3ZgriVUVPRo1x-KSlRcyM93$wcEr z?w>8lG8w%eDL*A-Ryn~G{1Y3hdUTXj}@%Rau`(e@grt}rGnyW0ayJ^ z7-<^P!y{e4He|G>J=#w#{p;4Rm;aR50;D@=c?kXt+$ZsUo;UZdI7~0VnQ*fz`vs+Sv&rJ&P22^vY}+49XX-VAvgQs0#kqxJNe4dqH>&wkZi?7L?V<@$c6 z+l+>5kBiTCQNd)9{+6Oq{EY*U;?@-xZ|tYGnDN4_O{FDu48=w>3x6x^|MOk_i1NKb zReNe>bfLo(VLNrV(2W~p9nG9-w0v!6f26!+YM-=Z`mi&x=nvd`)Q2v&5b#WIQaYhdy`ZJ`E=tDOziZJi^)fj#-=`R-0~yw!%{b( zslo7h?cMy-AtaS1kyHZ4=dW%v9kTDyz3_u$xeWKW+OYQFiCN3etk^#l#sy{S)*kIj7v7(1TgX6K1!-f4t+f#abKjUP(xegO7D^c(Joidn&v2B5!> zO8vst@MPV3_JSrePRr7NnIZZt2u_=RcR7}U`*pc5Pb)PtKwodOWdd~Q!bVa-Yn#(k zbn+8gZrCV6#3t zjCoiV@#n?PhML1lH^K!pez;V^o`;3E^}mJO8%c#11k*bN)iadOySP(dR@}-P(sU1I z_~Spo4dZzj{3e-sG~~h!M~v?c|4zLd1x)YsZ)qV@;f|C3ypGe6(2N>A@shqgf`!}! z*FBs%j}8_RM8PR@fVKETi&4vZy7} z_>tIEwBjaUDGre>rZYSAeD!o*qk4wBf;HD!FYe>|_X0Z5?FJIvx|Li15u0#>ouuiB z(n1?JiJ*V)VHKoRyD?yQVq}ED_0i&@SZbxO@-fYyFNK~X`zn>XK61_IRx|YChAizxhE4so7hm7lQAy-M@_Nz@0o1}K6s~p z*tF%L$KDHP|H>fgyybb7_~(G_hQqr@j!B@U9*S=h_@qm+Dw`3I;BOs+$E&e~PQH%0 zT^AGM-UOR4ms50~?H=yKl~dy5en&UXHf0HS^iUyQ-OnZ`h@I6AHma)1y{tk%R6Mo& z(_AOcOP0~OmzxyFa8r1p^?{q4w7p$F6dV!0ZavrI-5@HuA}#wj_{_ZpKx~j!JTUW( zOJA}tyvP8SIph6N;KZN|}#F=q^I6Yxj;3DHq z_{YwlxAN+76o(G-5H3y_C0<}GV|pUhno(O7y1f~ds%y8NUL~87u2iU`#cKM-5{$I* zy*Fd!QqV>F4Y?3Knmk%}k4$HQ?apk6eyiA_rmeG8Y{->G4E{uD2Tkdz)-N`JJFjmC zz3XfO@4B}yQNHq{%)>Pb$7~0gi1|&q0UVO|aBmnzJ=rz9I>NW-t+|I6<-I$ zmyF1x<)l`tmWx}M2ZF(sDyCGiM6u?T59jl$Zkedm&5VRVt3%;uG>k<8Z#sE>e^;h3 zyJ$Q6Zlq>}6pz@9V`$5i_lxDV=V3F2E&Od6qj+(_N5+JbmW2wB zn#T%*j2gRWgoXe*%Bu8KiZVBr5jpXknaSgLO<2&I95>r1V$k>CN><*tt$APiB|GE1 zn+gqxG(O|Vghw6E45LT!QKm=F}sl%fXAH za5tSK#{BO=9b&M%mpd0M)LLN^-?0cFo34KMk1R=U;zdBk3!^DTY$;x*WEzle6yK6q zvoBod-<4?aw(a(cpl{+jnX8I(~4dwR(`6?=zJHa%L6 zf-uRB_ndEaL@dW}%MGyUxaIw6)C2_MWWVRhPs4ytskC_@%2v}b>W zT_O%NiMvQ>5wc9)pXGio$zI)`sz_)hW^DxZhBAf# z*mG#>u7MdUf|!kcC0@F@1!#E@6yo8i?n<3a>O47C!;{KTN??6M4uo%(iwNf_ zkudJNOWE_?UL3O<=)OR*4b-)(DD=W;Pm;GxLDSPRg#yj^UEc3c$@uLsRm22TYbZb` z#!IFv8*R@vL?R=6PiB-EcZS2p^Z5q)YLJmi-unUN%@!|BUFyR6vtlLAMsdv`2{4$t z`}Edp*IJH5;e9Bke&Kpao%xyCxI4yS36G*e)wG?36jLkuKdyuMmz@$wgf{Ls2b(12 zN>p2W+zy!QMc7iw5#hHM5_(Ae4vqBGz`47NM9H|C#CGqe^6?&aNz?j)3sofx(fDo6 z-V^wQaz4_j_3P>SN#1tL%6U@(NL4Xd6um4op!a$!u#L!6W3cD@(nXRH`~E?1_vJeh zMY&ZE0jVOs42_7GMKI21h4plr$9)+sLab8ZQ7u+oJT{cIIA5c1b5Oo2TEFao zRJn(9;c&JYJl`G)x!aePl@Za`Xj5C~1g zj_;QNwHJL|@h_zTsLak)ZjaVWIjlR400L!V?GaZA_o+z{-a= z@|39?_YRDS4z-@pBJ?N~p3-7XhVs^6ad^;JX?oP_G_{GO@ZjESGLZ<)ab*%!RgZ+NSf!{ zm58&!8G-E>mI+q~5G@6tbb3XY__^Zw6Adnl&W@uen=klx-axmk7p%;{2QaB2xL!V9 z8#gVZS!>N=wUH=NX~T0*JA1vDeY1!Hn*iD`!@T(qCe>4Mu%iA^a8j+PcSNtcl$5kx z|9ERw& zx#=BQAC9c2kML=R`3Fw6zf)p7>KY|MD)YxW7n2^Qh-LQ)QZ(t&KI5~Sbx_vQl{~5a zyZbiFSa^r3Oq6OSU`GCK?L z3gY}WU|XS-?Ds38+AIfnK>q4*eTAZY8&ws7dM>0p!bPB(bLFo9K*FZb;C%Bk-#s_?b41OK7RGDWU4#{|h<|V)=^@UNRp7&5Zr8u9cc<*uX4XRQqk$wrmig!%#DQ76r`T zWvs>fS1P6=3z9zOt43M7*O1D4V-+SKvo-eO8lqzqS8a7T{y31;xrE^kZdF5&b5o@D zfqhumM7wtx;o{ zwY|$ocX?JoB98fk@AC;<>o^iH0+cRw?7s4fj!kcz+LK5LIL^Q1i3Jw}i-~H)wJ)Qd z*1{I>XJn??)>S{vBL$$I{b)HTd zM77DoDr$6uFRzNOg`41Vac5Xd-! z3q41H>?%V{K$ipiZ}K7Vr)JLGBP+iJ?~7q$csiwj`F-nxxxicnbJL@i?SGx+&)1Q| zm2Hs*NJCRNqi$a_+&CuFRCDQF7j1qPjPwnpOAdAFRnX{tc8~cp+psT%I5U^swJQ<2 zKI+5Dqrq?74oZNdXSI)mVO-k5RBA3Rpc_~;Am7m#WvVu z_99Dn-^=-2d8uz;ei*{r%!~h|v%|XB(Oq68V&z%?LPGX*|E0ORjwJv_O!_%oSbAMz z@5Z49>s_<`)cc~}T6Ik-RUL71-U4Ibyms*0^z9Y%#wefRc-)2=3zs%EzuBBC49%k) zsu%tRK;(#d^@>Lc9{4>3JY19h3FLN{<;n%H&b>|Q@yac-yXIyYhIaHHzMdvd1$uv7 zXt@7n$CH&WL;2Ivp2Jz?Cx;j;agCU`}0c9A>)ne?H-TpWSq-ttKfP*OF0 zD~HyDvcOXGGf;j@Z@+339BuPnh*EJT)5TZV>8cQ80mk&a*6D1uJo6-O&A5Kq1iyjl zw&BgZ=@GYty(ww923YIx=&MMs%;TqWeoD)cUniN=k}YI$T~^`E2`WhpN+SF$JIQoX0lh z8&^os&4!b}NF%V37iSpBKsIIh6~PX!%j3G42QDaYS^f*mQBo{kfUgFZZP6d=Xk6WV zpuq0SL)^E(3*fZvW!0kX-3LjxsQ9xuoQj(r?>kc5lVb1R-`cy(p>@G`L8Ph@SEseY zGVq>K;0ogOGWUXwT$Wz*FRmsIa`9E;!X?GyjVWKll{3OL(e_0FCrws#{YdbLVp$b2 z5-yftfI%^zXxM&t(?_5#Eb}ec38=1p5^u(uv3bFUhS=C44q z3Wni(6O$oji{2@f1V^07yLp!J$L6JI7N}8yaTi)89!$|gMhqv!i5J+zHSs?Wv@19f zC!7)eJ2=yP1K4Ny@D$i4i*scMz%2VTq07tuFZY(cXMs3xu`1lOLTFd6$cT6`!=sJq zDz~Za(srcg`D?qWSOd^|#J=O+Ux^>3s$soHtbNm&5Y4gf0;wmFBq6Iz>D(p4gPTqs zx>~r7(Oc?Htdo|5=0PdeXgdUh2A%W-lFMQ7iwi)oWxOuvamFPA7&2VqdwE$0xnqx< za35OqnaTq|UQkM{HZjr3D%QTRV;n0{X4L3@5LR$%C!2J^ZoAXc(w_QGmskF-<)0*U z3RH&0P))4SMt7X@&2Xf?)O^c}Su3dX{h3w^*GyZgLFCR3vw|x6`6oQ#n`<;X2F5H` z2c{(v3PjCp{CO-#<<77jGuP=iD_+1Ui<=z{^OL=bingtP(+|*lw))Z1o?8 zye6dI;;ft%%%0WN)4}t35_g-jo*GEXKgDTg!UqD#w*J(SJ;F|18h%uP1o_@gxe`ls zUouRwN4GT#-c+;z%VL+Q^1sMf$^EI*Ef^rH>=*eEHyQhO8Ta!K=k{*vvn5l@F5F3m zKZr`kq&;3{-JE6Oz4df(?n8~PJSO*IXw7@rUG=%o8`?G z=Ea*hV^|k_4#=+H$8dqO9L0zLlI!L7VbAg?6YaQ<8pH}h(F)R9s z+7LalM;h59yNPG?dQiLR%Dk@L4*s~26gS#FStBM2M>BBAsRpNAjYK>fT&Yi*;?z`* z>LnP>qJuYVzHp)HBWsIW-J9bI_D$ohC1M(W+L2-!w*3{p3in$pTW2$*GC#6{)+gKG zViYB<{mJe7R6R;bok*L4;uf|5GG_ZjpYcgnK9|nkdr4GmMb^O7%-NpZbp;{^M7afk zzYTq~qRfL*C!9wY=kE2s-^Z%$opY+Ic&7qATYBAe*^ZkUCFUQb)3U?XZz!+)d8b?( zfw}R9BQ2%C^daxJg$XIz&ZT^_tu7o%Mk2BEQC$JkvZK@!Tk9n4Qm({W(wRr1t;N*H zi-U6%M`U%hcMoO;J!oQjU;;Dy<@8vCO|@sIrI7njLwH?XUDwU7+_?_xg-Xo@GJ%A- zIHXBY!A6WBu_-Rh+dmVNH&^A=AoeXxcxxpEGcBD?URbqF9wmTRA+O89aAhd1ZQno3+G1SlP16D08-S8e(yt1@j5inUo`ve}ApxOx=)7NJ6~ z}(&OJT&qivM@5Nv{cipHQNBvYs(cu6tXKsTwUXzbv6I@zo1Kn{8| zL)*ICKTex5@U3q`v!%QQVHP&wmqbiMV8qe($B$8DKQZU#z zapvA{Ay8{N^()v@YfM%8%PAmqarNiR{`ZeKHR=`bE0BCJPMqqRPpZgt$-`< zPiunJI|A-^aS`1G_S#bn8I6pLjE`l0=_GS2QLtnOXx*kNa3IUU`9Ql!xf@Ei#tX%{ zW~dk7e)M!#@(q)>m@`u$k~OoDr!!{8I&E0c@%(SaU`j%&r^I|nEx=kFgY})zf-l6g zo_cneq53PqZodX1QxK)p8YFt6Y%*Gnd5um5djJb>hEvp%ZQzu|^8-KH^ld&>>4*eA z=%q%+PO>V7nWmoY?XCTrhE@s)OSVzz0A1$q;n&Gqn(?ixXp_zkBI@p+{BM}rEEW7@ zJzc0RwMGq!6#_>}A#Y6I8T@!BL`GaXr|_}|r6tK4)Ww{~npXkylKu){hU?EYC>qqD zya?Gdy-$RpBB)+J|B=pUCT-yKLYCrXtmfj=B-wev&wH_nH%hWCZ26U+OX1cCItF2GJ$|a(`3^g7vO+NxcgSW2EqSYQt$azzUrFa zm-t5Sngv$fuq|ru%vvxH!aV-G^NEUz7Q(W)FIu~@9E#<8`}XbRAEH6k=io=SNDb_6 zuuA^+{SGyjt2f)ed3u|k*22!5)UEjzse!(jw;iM!<+CtueekWnCKi~Y(%BVmBA%5W zCL^(o?Cwyt!5YZ3M?9K^-Nc=oIAGVkaxM3BGiKXxlF}eO>rYrKE#v!rj8al()U(%yC5s)@G^)l+%Sw(#aQbLV;| zXW{MTjX?bmd$4*#k=56AWp6+mHj7t?7Y953M? zptS_Xsx~*jRz90--HxjU2hb(iJ1cdyi(N8KBr$;ZQXJ%zZB`0Kh= zJ!3qf!5cPxIx+pxFkrDohl(j}`1ytGhGwn6C8*7q);UyD&Fy7pTnBfkRUC85ogOLD+Jzh9Fs%^v`72=ZlVQFMAn)G4b%{GKJ!NVszu zXiS=3i9PjUZvx>#lHN%Zo7jyS(=)G=cfnXd#ff?$sb@Uz%dMwZUqa3??zmRt;O3NC zwz*a5^{786C?k@{UNozL{gQiYvb_(pTk&qgQ!H~3H`ybNjKUDIEj)YYLhzSyotpX( z@{f{<*v!i|JOJByG=Kl;y7yYo7I=%U)VI+!!1uH8b3yOyh=dTBs2{UIs}VD|`Oz1D zladbF{{DVVwmf%(&h}<+iDC;*{8Gn`h(yYq!dJQdA0J-a2nL57o$2>oXe4zPcsu2H z4?R}%#eDF0+rxMr3EAJ40ux~4c2?ey;C1Klbui+4armNZfNQ9p9dmKsb3@ZyJ86#A zzaZ!Kg>9Pm&O^sBI+n01{&4-pxM}&YJ`BbmJ^adWNGIOMa&f-HB5vIIB;`)D4ZpCn zNSC3D_7X+#NnWRx+O= zY&-DxR!=*>*%48E!2N~O<#?;#!0goC_wM|!r7cM7 z(ocX{k-QKe`6oP9njO~OjQs0hV~rOd>T6Vw=b$aQX0p4%**+tY#8_#hj8$2^31*L= zm77^o+o>JRkjWp@$*$8wjiD-pfJ0Aq2<_4Er#TpJtyK~aoI4vZRV%6B@x6Ip#dv;W zS<`ZuwnWbDAX3HiuF6FEwnAksh+{_<@TBiH#H*>W-4zRzOAE+{7&+E|z1

SniYI zBkL66ySHXDiqi2HQ+XIyY80dkNcP66FZD&~F%fnGcU$UZJPx4u&nRlt%QSpKn32x; zDUs~`qRif1f)q4Ao^d!95*YPu|7+1}pFEF#8CaN@x4)kREp3nf2$t22IMYQM3jd5O z2s@SB2S}sz7V8KMaK$`PbMb^%w`=)3WoL)`pDA+KlU`k}soI-W_!DtoimxB!IAdC8 z*}2dj(|S%m!Xrw{k!z;b=IWJr^Xbi5+aa&k!Co?h2&$oN251?5`PZ`|j32lrdQ5lJ zLJta@n@KaUw{j#1I1=>Vo%k|5+vKN^9AJZNv_&gZ4Izesp`;fh;})R(McKSg<#-6p zg2z#20I?mm*iz!dEKNQ7b7HTKoEFWnE^@ZU&ty&)?UNX^vVm7X45HF9UFwbOtx9wfd#o$wVSJAr}ynlDxOMV$Ba zPbn)?1u42wOio6|Q!9+I`$Ow~F`BdMEcvT{%kXRk`Xo$XY5^QnW`_9X=r_brhWmHg zA6QU^mUkpH4ky3{CPVD%e%I#J-nIAP>V#DsO_(KEIdvqN#vN=<)|-bn%HAWufZ z`#L*}%UAP&L0WMjqlK>=w&gg}!B5x|H%L%F^U6?nzyD$Q3-cQ{ZfvNVD+oibol!Lys^Sp@#ITyV5d2QT&fIB53FiZX zi}FTCfFu8nw@WeKW0d7wVcS7+wA6v1&!;R?16*{o7R%sudsok?uX_pNb^K{COAgZ=(hty zSE_?ERP<(Fq*bfGw=rT?)2Lzg!@}6Y@acmhEdMJkAG(Bi?c*Qq;oBQ2QT!u(_cH&auB8q-jMp%wmF$LSq*3Ls9 zq}^>1{NGD`hMXaBEuX#iFI}?+*sV@is~2(!V$GjtLWb z{cl=Fn`Yo~I7<}LdUEN0XMxOU035#jf838C;~gtp7&7`0>PZ{46LKAF&l`ht(?!Aa zN76waBjeQV-)XTd3P~sXTQQ#UzK)=@rPzlV2zGVO zf0%E|k(WbYojxaJwNJ)z{H;r)aM1l<2ECP$VwdxE6p& zyzm9iPXp>ACkzb^04O<}X_K*1^~yg2N+;QYX-_!rJv%b--E&d;bia`g=1giWYOtp; ztIaJfTPH{}yXjrX>U~;XV7vuw+uw3HvJIT~Zu3;xoO=a+FR=xTn^L?+0#Hp1(%S!J zvYP8p?bfM7I^i<$%7oq;mDw)4mu;lih9rxVI_{MA2bTMJw&RY2t_=MWPC*MufEO^w z1$g1N9H-RnI2d|;@$uN@A$@-eIHkS+!;XJa@b^`T@Si@Qkwg(PIXj;rL*^wNfw0 zxpQqVR65;nN&<`8t|_?vNYieQ60Nv0vY@wdTzEUtyYl#qv**1MQ&4Kf-ji7_Rsgys zI)O*m7gGgReN}O`uj;BKy-dTaVt|H&E%uol1J0(kmj4#^yH5PUeBvWU?@cu5c=!Gs zf_vH#7{ZvSv=r+%VAlJsgw1SDS;Jiv*1UTKBL!M*H~1?I89ij(c} z5%5HEpj?ULQ?Xg)243*f$Df<@x^4KiucNrGYP)8*Kq342pd#*3C9)|%5; zs(GdM7p%W`S0d%F02J7nW=$@eHumfnL26vXMBVDu5%1)?KNnzVx!tDo1jvQ7J?Jm& z#*a4Vg0sd7)oyF9$x6ID(8hULLB=HgZ`+dFSRg?f9u1W+KZ>qtT)`-1)zt3g?^q4{~-VhG6>3ue3ryUsqEM%uu zEceuFU;K27ambS*kc6;DHRuJMeI&NX=>_1BC(#{fty)c=6$f=5gFZ=lCr8cnwC$@-i@ft7x)PhcYf#KDyGfBZz^>Rqn;+%>`p`w+YO{qW8&-)j?Eb(}wA zXk}H8CoNQG2dYzl<^EX&V((RBxkih8fTW0U%`{H)$klz|)n5x+&n-VYSRWKrDBK>X z|3HPa+^t(8RJ^vq_9Pgbn`wj{b`KNMIQoiZGuN^^6(>2X`|Z}RGEii~`x4iN_*)0K zGZ6Ik<13Sr5Cn`Vz4!XyvUmiC0Y2InD%5d8pe6j_;cnd)5h+4ok}j;MRs3O~R+s*`fD@`#?qI9c zmQO+=tRXRf8MlW4=ia>6hS*j;TM3qwwwv9@XKi4_X$t*b1VAnapX#$R!`EyTE?w`k3c75h5ZV4oRK(yI zs0bi;*2}B;#Q0^9ML!zg&jxAM$Zt#L=m{|S=RgNm2L~Q@UAzJ4cSHa6iqX3RT3BrE z?nGIx%@edg9DCvQprW|&@Lve^>ag^A))5Wuur4VVG`YC!*3JzxwR~iurB?-B5}gGH zSgr_K@FLq4K-x=j(DSxAcvld<9c(|Zzv=Dj*Wd%#bssob_x-$XKh<>JyqHfaA8GUz_Uo; zFlBl~ftTb8<+Q-DX3fzT;$%4;AClmEuZK#qtMUXs)M>FPtMlr}0s8NjsMo`eV|wf- zAM$?a&+%Bnli5ff;p!#Gd2%=V$$FWxZZ!`PJT1rR$)qpKRUT$_^^V^sjPS z=K!pv0IVVNa2Wi*4|0Yw+2>O)v&VNHKYVwEeBhm`e*t&o{JTDmhj>*J2*hBUqx1?+ zwt~JS1HzB6wO;`36l80sucs~>%#dnl`_{gU1<@2rZBr!4v&m+tzdA_&%euphk8F6} zt2@@5&XyKRGp&@97YDE60ToEfuDdFnMyEHCK%M7jdM>Y6`xzijZ7ND&t@RoU>nF^YB*y`|S~+~Dsk^9-HnD<2mB^krLC!D-p{`hl5~X3yA(zK!E}b+7D^ zSjUC=-*$?`#ex*2Gk-pRqgSiUioL^E;Zdc|eFq7E~QDLx<>>k*RWIps= zx?!r^bsf;`3^q5+pn4A7?l5~FI0{~7wh;ZP|4~J!*AOnGqZhw$)s+yE0CRA;wPA+l zoJS-7y_GK@5ZWm0vk5ubBh%q%2uAY=s;Dbdiz$Lems~n|I#Gn?d?qXJDO}D-IMHD2 zH+2VQG4{sw>~BwmdF8{QW9)pGN@(9dDlVmafGxODzL(<;e8;eB6NV{Qj>oGLx{}~5 z<~4A{G~=;K(z$;m^2Zu6Nf$amhmQ_w{}Y#ik3zd7usAuGt%$zviJIi=;MO z1B~0$_AEP(f5he04xU^n8Nb7&{z#M&&qA?ChiN4)fNMrco&W;E;y{q5w;8~HT)&1G zZXnh}lt&$Zw}N9YoP#?+g?=01u=GkE{s=zQBi2rLtkiO?MG`a6g&rXi|kAx*r<#Rz9>Cwjj%?<;NLVJ`;L)q!nD^1(5?qzhQc$z zJS+l{2ek-c;_IHQne*@L3T@N=6pquZ)v=UW2CY2}aVp4`WN?_VJg89RW5(~c+sE&| zFp_F5kQ!YZY677am_Y}Ggl`iBya@u}zyNkFN0~oOs%}^VvD+c=&xn1s=DJf!B9tPU zvtse3p(pJ6$dK|K#cfh6_4HW52OtBrRMCJ|3nv};2dRKNk0^L@oMQoBc(O@l z-|%iXXm+rs6={*7_S+*g^DZGcnd{KSeC3k&bh8;;v0qG)i3xtR1y?+OhwQ*; zFxTlZjtnerly@{4v+q!CE%vMXfEdiJKFH`UTnB4(EbF+1CwuEJ!3dsa-ED2cnk_Op zG>=S@E&6d?!RcFg&zSPiYLIy>4M^fyVMntS5eRPDa}R9GS>WJh!|`a0xnHegn5Bk8 z^WPr?`A*yHRqu{%mG|la|12em?F!pZznvno7`J*E$a33ZD~w1gYdlO@5ERFu)wn=6 z2P5xHxkl7@{`nsD)U6znap!}kPDEdS+Df`d;)O17Ni2)?T9!wor42k7#nvr%*0e`L zOCp3F7rS4)Ds{#=%#>Uabw56>LJ|8uiCth*#h?GpC=G`}MhQg+b4H zdI*zobMe6sunhIuZtNM#$1ab8U%FXe)^Q9(@FF|NKzU1X8bEGe2jhC}d~NTt+w_7D zQ1tz&%Q0(e&Mt1T{jwc$d1gnF$yaCA!S39-W9i=4m^bQ>sWfT<4si8vD4ndzM%_2& zHCH8-Ip#bMl~;xM%}(&av19$$pTaIh7J^8gp}Aj0E~Ct-D)FqY{0J?9GlpH?OW7DF zoVL2myaO<@pkp_6g^|8&dtr$B!D;1RN?$r{1->?}V>H%AuZQOXZ?k0nHx$H^!;$QWX zWoF-A@+plw+FMmyy^gsB1^)T@WwKThQ3*7e*oiHbC>F_UKiDg}I}tngb(&b_hFq(HwhrmD0{2H3N-9WO5>6?)@M>!0+K`+kc;1 z@TFeCfB3$h9KIS~m@ z7oKiMl8vE@Q_@*`35@=;p$#o3v>97;?F_HjNlmbgiLY&-SRGKrj4VU zG-0w6PmD=LSpJ@;i1=L1DrvnO3fsB}j$$Vknb<_g7BAY4=Mwc5VDy<1+Nd-U^sR)? zLVO?q3>aN53!+5na%C8+r-(p}R09i762dFps`yQwgXzlfXwLj08C!`)TL#=(9ERZD zc6ihfC=sB?x&m{mofD0Ge*@$zrRT#+o;pa231Wq5!g&BEOFUII0Khao-nZKR%D(C= zxD?t%7K%O>OJvQ<Xn#1A>wqj&oCJ!+$VQw z3&T6wiXXN>v-&-MhzD{y0X8&p#HE1U)vNY&h$vAjI@8Hrmr=6ck7s?{;mt7E&{f(-5wg zQ9O;Il3}*xWUpgZ-UQfer7XPSxaNKn_dSdAf=uNy&oN`DW5wc>F6dFlfi=*zkp|B$ zy*{dq4P*@1V^3%Hmsq3yYXmX5w1fBzWi2eV178g~4t8nyQ^P=Gr;Kv%J>uLPF_Ji? zEB{PK-1l^u7kR&lB@C8T+%p>5Fxl@kb}jYGh(kBj0qoe8Pi;(_A>E zKi`t(Gxe-Smxu!}v=1{c5YNbWgLACof9|)zy~QI3ka(D0zLJ!G0R`9_Fl#_vMFjAH zbcLOth%b#Q?p1RRaof@?d20?v>p4^%zHoxpZ_heC@i*U?d9zUv6K=q~)$5P5N?tdY z9Bm`&m{VkoE@l9OPJHo*C+zkFxtMS~(02$K>*VO>x3FQBAZUYko@KZD$F?&;@%-fJ zGs(>N?()yUs*E32aojDl_+g7=!(Xi7pXUdfzd3xJ)YEvyH|O0GIM&(*R`N;`vAHzW zmy75BeoTS*wtnh@A~fu#e%HEIzO1SC#w?)yP#Ds~=>hs!P6S`zgX`l(-V8&QebalP zatdwmodQ4~&)E$}?08pglLtG3r=AjCiQp5*!Qb_P+t;ngWDh$Px=B6yA0JU1a(H+reD|G9LIQ4bersv!7{HFiNM8oIbh5l^>(X$ya)GAkGK zV5Gv`nV95Zmy&sdys5g(Q>qhx6Cz8WqWk14{vcYPvBKK} z>ms$QVYB^hStsBHpLeWQB+@{*0X@I!uY3ZfHH1`Z?XsE$pJeyi7d){5Cnm4S#C3KG zFv0e|sOpU#p20m}3yGU&MZEra>wcKXM)fNPDh<8AP5|qzZRQv1iTS$navJa&0*jxq zzCtLyJk(O)Zp6Em9f2CLBSyN7TD~V7gk2 z-F99wT32t^N9~BL4}?=~Q^A}A89Sd;QRK?=TT10mv)-2gZ^V|Urz{(P75j%sI>Zy6 zn&5Xbr<6IwUc(?zilK?~TLxAYvD<@{Ln=8Kye?P(m^akA`m3C|1s?&~bNTM?TR8e+ z2wW=Ke>hBA3XRzb9mg@!_!uz0(d%HXs9}V^G7n@1ep$BS=SM=-(KwfmdqK=$2;vsj#yggASQpCzbCSO<<&ogV*li0HprNh}}g zbITc=NL5bR4N;x132$0RwP5bTiouN8z~M^X2{(0$&)(Tgna@TOE3ZMEJSGmSBmO3J zTHE){G2V7|2)lztDB;q9@cSZjeUAnfbi}MErIn5pC|PVb|0LK6DH0gura-e&2uI8x zfN4H2NoxWaI?AgVv*M=UW?* zY80gUU5kCOV^AQL!5fwosex%(AozUqbp7HRzL{j>J2(jB9xQiyb|_L)4pWR*`DlR9 zoNU#V2Mxk>BHLElMI*=eL(VV+8>ddJd1QA0F=VGtuCzsfh3H$sh2oWkqFRYm@K-j< z{Xa9zR!d1f4d6qg)-g6yu;L371@;Xw8ft+ab@yF~J3D3eBqDcqBf+kouEa)^oLA=f zG+E>Ftv_V%E`>nX>hACejhVy^#K(e6z8aC)M_u074Y5VkDX~NYf%#}WOT?ZzWuHtn zHZrQbuIm{oq?i(o>Gyf9M+9gYHaa?b*LNSMoF>tH5`cJ=uMo%ekjUi*c~A>ZO8UQP zT409($b|c9Z!Cf|M;0CrBZO5f;d(pvoseLgEpgb)H8dXy2bq0eGEzPEF+cz0MA`5c zQa{>134aIFG4tAJ7B#^Av_rsnpV~#T&L$BfPLC)0(7Nph((EE48N8=+%`D_8U`rD!a4YGJ4Vflfn#RgYqg`TyX^Ae$p9?yFUk&$hrh zcjlnB)))Y33+OOst@M)2zA0x*XN6x1`D(;^I!HZLqh=TVt|X|Jx(L7580wuH=)k|h z1dg}p49wX@$}s4?+vT49`lbT)C|9R-CAlst=*YNQ?l}x!%+s-BZNPYM!am~k0toAf zx?2_$Tla66iBwlt4t%iK;@UME`JBSQeMwf5H_c7-*n?i(1?Ru$yaLLV)#IGE45(Ul zV0OnJ#jbVgfC=0=%&rM3l!5h1HK4oHcBnxpIhQgZc4?PSdUBZ#0Ru}aD=Yh^`Vwf~ zCerJ>jFBivB~k}*qZ0+9|Jr9${*)sB3Rb_rf(OaXbx^5OhwTJWeMapORkJ+3@|tr$ z8Ky4tgTx^Sv(&>DW)~SMcUnFNJ285F7i8;il>oNvmMM3g_vLM7G4lyk6nU1F({!>k zF%<$)1g=rXE#z*MRU7gYIA+^d9kc)KG6mGsd`|+4l;o+Bh+r)&Z?M_O3tfQ8b-{H z`S^^VZ%H*kl*~+Jf|Ys34sIyO3}v(3k7qSYYf>JD%7@?jrYsY#cW%za+SjaNYO3D# zS55*Euqhsb=tJ5E!$Mhxb2m{oGnH!FYC0Yuo7)nmwLj6Sn| z!?4X{U#wjV}M%FR$(j zP>hECE~RUp#+F?duPsrjwI4PcFM!arQPXkT#@HsE^=`l+EhX~J-lM-r(H_PNRW_kKY%>2Rxx@6Ea@4K z?){tyTNHG?5Q4j0E-Xj(d-VYc9>^B`BU245?K?J>J$a0sZo)g8*8_+0N6J{7zbnW& z>}Xc$%s)b8ue=ctyz2gty-wi*(i&~<=G)hGSe-SYMydRK_R&Wu0h1kjXiU(`bC5!8 zC0Os>Ebitb5 z>X9Pqg+%Z6LeaH6h4&PW@L2^8+gP#=FXc#>1BJ@VqEj}FH|f+2vUJD^%^pUT*rqx@ zZdu3m(a7APK(T>?UoQ2XZIoDi3e1*#!nXlJQ+bW5r@P|BsdZ4qrwU^4yAic zcj){|0Zu7og>vc`UvoJ1NpupcdRof8HbO2xYAR*fEP_|?3Y6I5f;4PBw3BA`hLV-JHciHw)D zh7g4?B&o>(7s0^WUXvCY%#pbz;5z~KDA~+{hq8)oK9Hf>1f?NBqRO@`ijPWEh{@Du zpnw9qtE3NF?(6RPk#)sf&&{__0PbUp^V_@}@H;5V^mrK_caZ!9w==X$R$))Ke(>E9 zu%dA7&(njn#QKMJR8S2V>n4ar%ZMtnP(_XY6nK4M6F>Q%{mg5sXPPmu#C_0xaEs1O zl$09q@`xuKT8>glcLWMqs)>B0k`AaA8RrWcf?G)^3?Rsw4MaLQ zi%n>^4~QQ3%TQx8c}74_5u7qlQ5gne{vy);k57>IKV*fkfw)PDH{j$9f6lK@)g0W{ zz+#VBzEmgA)~62)H1#4|TtGwy+z5_ec zOGnib|2a;{ARPsRpZrBjeC(B|M?L;{!}f!L{F-lAQJ44NCZN@|2DLq8TD_A3!}XzV zRZ<2Z{q{SsWgg0Wtxk_bCnI@ zLp~~UzdH_SwNLcgIr0;*88DnZMtJ96)Cfq3h=9!(McnMI+&v%G{cF!(75WaOGdwIm z@@#sRH^J-EJ1A`V|KjPtj8=(7?zf}yC@OEw_*-jB4+gq19xqB=o-lw*#>APPrQBcg z9o*jw?g&o0`L=5h1NjIv4V2G-J>-EjWzXtA%2UP^j%fQEQ z(|7)XlY0z>aQmCvcF&8RP>B^-)Cs4m(0{up#~bz%ELNm_Um|6;MG#QZu2d1zG} zNGI#3LFIWl2S7FDmE>rq#qL_ubA2p`c%Vx?}j|Y8x3I4V-(t-tk1-ACR#W(g~ z07#_O?07l89%r*4^AXc1YE(c%jMUy602QKs3wmdvvkJzvUnO?nLGA^O<&s_7iI>qz z>3RSC^Z~XzK9Dt}G zK`SWLU7+f7cQdB+Uc2PO!r!y480$4?di&7(`Ce1h5|9seFuh*-@yAqI_aTzX{KAA2 z9sal*2ZH{q@*t~tJQN`ke|D`MLe03?w}26cn2xMm8dzBv9vblb&DCJ#fNO{@;|6!j z4GiZ`b(9DfJ0s2Km*2d3HDFY7UsGFj-Q5iD2)&X2a~)hS8}s%Y8SWsN856bE*!^e4 z4T-WfKwC*}>{;nVPp=ww{nTV%P5B_`;T7Ys8THualL4ZWW@XnV zsn>%H--3Vz8#VnN-a1+XUdJ{bsNVF>I)1*}&Gtdzw=%2L&XzRPI*yZr-xAj;X=#ns zU#z-~V!VEE{{yBgFnSSY?&qf-ykCtOC~7$UqwM&PvawA2fiV~N=Wb2Q{+LB)ymHmp zsyckB`j#|kf%W_M@BK-G9)LaZ1AU6} zoWb|pN}NQ$7yC~+8aQ*b>!_kV3vjF~rA|+*g@W^M2;W@cyjABYIWT6-H}(lWpras%x~@QGYfi~&^3ieQ#pX&6_;W;}2aTx0pALP;oFE@P5$iM^ zv}(imjv>n?D%&O<^QvH*G`yVd59`6gd>3m;97;ht)oif;0u8u>`jaDjVFzpBr-XZ0 zxi80}mh6wy%x7Xeu-nSY2hu@{q`sAN7dAUaffhr&phHaDhh#W)`W10BXxedx7odTY z*T+wS8Z6?{8ZMpiVrUTkuYxb!TX1e_mUwh?8Xr5A^f1 z|ESdPLr`_><0ojuQaXwBPY_8zRFsJnOyC3*-8mm4J$Dz6y>FC(bG_NSyt?m&>s{jK zPd;`GTIuZ~ntyyILR@!Mi)td&o78^LCBY|;9LA`jjxrG5zLb!h7^8>Sb5{prP%@`+ zUAZWrWJk&QwG*I82W3vo;((Gqxrrv!d2{pbT;1I)fBJ9`r5}V(%AxPQTA6$TeeoYY z@Wvc7mFZI1u`7}2aDnak-|s;G5(oISvh-*tDbUBW6ZgEE`|qCt4VrTqsZy|=_919k zYRh-VpYjFRLHg1aBB%7+bMD&q<%2o7 zB}6W$=n_8Yj9MoaO#l=ht}_bq!OEo~MPRsPx+A zV-9>BE(R^rfX%U(L6;4>{Slee4+zmv^bNaoOuDU2;>o~TpoAm0blHXWW6&LJMds1o z=?cdAMz3kXYk!9r2M#Hhhp1Wg<`6Jwh>s!FlooHG=eMeCwnnazz{5u`jd@N_BpFCu zdct{T%N9VJaXrKvTlu@#bqa<*aF32S?rJDxusO=VH(+HJ1~WSTw*l)T?}`zT$9WzG zH_Aj5ZoXN6eol|UqeQ+zOH!g+eenM*I@?^!? z&BtrN);Qlf7e-HE%IC1b^zNO-8;zx|ZqarYFB`=RwCLsJLa2+Ueuy7ph8g#6pZ?QT zjPpO&74N!ldE_GK=!L#_vhnYv%t472k>{=*FO~TRZw#WP3M^)(4g{LH5|o;y10AaW zw*IB82no8|7@l{M%Gx0gH`w}1^#B!ZHM;~sg_@GOYSyYwU5_R$!t~NPP``l6nV@{>1+|wKq9GqI1qIUZ9 zH(tIAIyY{F@iGfqnKApa{?W_!uAA;%THZ1=D| z3nRD+pLibDcAae%D#iuz?MK4L8gHoL1F5KaGwCg zmw(WG_#$PJ&*Lg4gKrcsr`<91u;=4&(UdzJ@b37(XsqW!kUUHlP4|SQgLdhuSy>gx ziKJvPokdRh6vv2)^_QY09H8~5mJ|PGcO1~M>^!v5CVGbtQVGIb1eoMxfz#V#6e*}u z9h^AhG#Ovl^L(J<@P6uZWQ7Rp+TF!*OpImq9zoF>`@tA@N6M*II|jXGZJlds3@w*Q zH`Scl4i$)r&pW71;1Ow#O%vD%nvCM>i-q~9>;I?tfmdb^Wr>Iz>FcMl)_poF>lKJm zD0tYRkI**(4MH_QAIvwZH?tbk&kE?+*#XpY9cBjW(v#~Jg29Q17+6-s9#>RU%;D%z zp%W1Y`NWV{yC_TzjYD4q`0-tc_x4Tg(g;rA_zYKJYR9&J-REsS-8d^^B5E8ogP+!jAQ(Ofx#qY6(smb^s3r>f81o^p&0pg8?b~cl z_6Q`xT*ul!gl&@IAA${Dx*p)oQTdxo8`<7@K?22t%{QbYa0%WN5)$&fr>Ui7G4jTw z7MqRGmg0~yNmm_M_e2@a-IqFMnGc+`{MKgXElauwTxpqyij}ZU&1nW{PC1USom-yz z#{^&xb}z9X8$gNZ)P3Sl6SZK8<`R6Ag<$6UBtW2VVLE#G7NR!~mB{7AG06zJQ(iEM z+fSt`*y$<&AoS1;epb1;y7!Pv^T+D@zRf9Yd7SjS6RH&hgG;L}@N^(d(!WO=L`3hm zA1-%}M)FxW#lx4d1-xg!JIe-xT~4S8DAvJI7vV6UD-t+2BAcJH`K6{iyhfC3LNfi0Vox8-pXRjwy_ z%U1rzv(@W(u5&Ju*t*|iW#_^B4U<|uIVM3Rf%WyDj>Oc7Wt3loMoLA6-P^$1pxsW ziPo*tMu<9ey^6i%@H_qmq$)~4>SiU?-ifW8xX=-YR@n{oB;$WmPi~fY}UR>GK_3!xr(>}JU`MyPm{ga?4bSf}gn0Eik8s3v4B4?Rb{>-5sRbF<6PfRq3 zx=ua$|FF_kwzGT)%_~8q!{*^^qkVG4@ApsD-3Vtw`gAe#VWltnMdHH6ok zlaRP{f9y*IdCK%=(0<#SdZqpO^LVkHheJYn_v_bJ!Yp0%!baVf!%C3PpVKObyMKgx ztW2uGlWE7-cEo~M(y>eUIKygYG~B@5DNn&o5n3H~>xjhd86W_Tkje>cgl-PF?Z zwb_VvZMS>#TBx9M=epop_(ToR#4OIXvce+RPlm+ z1r|QAt5xU`WoymjD;oPAk32lz7G3&=$LPld;*?^;f%bQ=b>qp#7NrTlU*8B7IS z$?5BR28JK28h*T$Khyw!96t2ouP^W(>izm5y{3!8AF)C-D|;CyYeW-kdtJscy<(%1 zjaSGT!H2k0*q}Py41Wq`euRmIqIQ?g4gz3L|KupMt!K#?|}rfeGe4CL<;$ z;d3KqGkymS9Jpxl=eDxd=f-ZdWIN^@&_1=5->B<_v`-?Wea`QF_E-DpfT<352E3i@7pgk5Ief znFzX&#FCpAb6f8Gb^m_*`IBQ-unO z%6q5A9>o#~9dRLP+4_Rzy7saHmJfp>sqXg%%V9}jEqN6s4L!XzsaEIlk15-qX$W4U2#6g0 z63ej-&r0`^F}&SkYpMntVWtkHnXtjf}y6DAUghpoa*`oChB2e zR%5UVUZ82If2#~Jf8<^K6#r|_g{l!U|A@mYVg{GMeSIj)ksSS-C*=aP(?ltrFL|WIG2fK;6||JxLWZNFN15`iPhA8AbPP zr#%5W^Lh+p-W{R%gWmH);Rn|8GTZrX*xe3}Uh_43vYruB(0603Ih@nz_$MHzOQ0nnL z?|+chUh_L>J%&>DsD~5r`q(^c%#|Ix7;RE;JksU&m(@pRy0mfOSDyK%I!5FxTPCA} z)*MrM`TzUNVV%N8nN~SF`r_dK4qyK!d{sTdsv4V0yZ=bhwPT zDl;;3)>#&lZkx0IL`Nzy>09BKxt51DePnque+yIpu)C$^96!Id*ZQ^0L!JhjxEv6y zLbSFj1|~^5E2s}_|KUIJ)Slu8C~hx#Iy4{79;Ll=e|n~XWybF?Dg_%Wqr)X#;Ri$3Tl)A!=3ckh_f z+uOxCE}z9d8o6ZMqc(~j{wyNgQKS?p&oXM7pIURtHpL>E>|q150FH+tpFUash=0L* z{`_p#6vzTenmOOL)Yngl^z(a@2i)NNhuy<=p02L;3`Z_qRZvjyw@lAI(>0ZUjEG4VkQ3Xl&2p(@%j4oaAFdVIpK}^+Rt!nluD*88}axTjgBNknmo!= zgEuM~2Mc$%Aa(P<8lNFf+8E?pBD4`XAK$-EZEx57cBW7!hOf3x>&~4FkCi_tH|OYu zss0g^w&~dN_Gd4F!B|JHq*Sse-sjwIVtHf&bo?w4Y9gn`Dd zmfFf^Ld2RcE;BX0Z*O*3I~U|u&CxMt&6UPX-61)b%-?8!{K!^RQfmIPIMvF=g?Cn} zFZ`SsE*kjz-9(?Lg2&1k`Rv0Qni&E~}C;m}`%Z2`5 z;?9wYa6-FcX7GvV*UYG*o-EI(J$v`A<|wGzX3j_lsNpVIuURI?`Avfv54;m^qIh!< zb2EdMf#WBUrf)Yr9(pV2IB3YZi)wEf={4D? z3`xz%*rTzz58N^`+702~aBub&Sk`oIcnm5MvC0T7ZS56%iA&m&lS}(#-KfqmC6?a$ zW~5FtO8`#Ccu;h=;Que7w^h}_Af^Qu-BYK(O;(mqJ(iVuWS$0;j@9%4+=m*(>^gPV zDQ;tBhug)KwzZi<8~D*qUI9XBwsmWssiH~%$Y+IgyOARdC_%^GY1YngyMH}i4Psf zx`eDy)Ihw5E;yo*;z5H3^B}h1?CBpy2_UJk3QF}guH)mHjR==nKHMif^53>5)F$Fk zi40Hw{8glROp|@|1^HXCI}fl7&Ht2o;kw){J(HPP$gh+(ziKUDd?qPfZ>|u=^m>7; z1YU`gVx8O5cFd!zKtxk30Jk|l*uV-Ku33tCohIdV>JKx_yglca984Y@h-FG93LSz5@|gvi3ZM3j6!hi9Mck&? zdT6zGlYxzq61CX7sU2XT^E)-Y1;Wez49)N-dVI=};ZtoFxxA*fOD}XilQHZK^^*%Y zmTnf~h7d}A%t#j4duOsNU5aMP?UUW^09%%MeIGsaS?>CsP%J!(ABu(dTN2>gyN7t?h+CO3Pp{ z*^xEeMJdzm1F%Vkwx%Spyjux(YQ0J@D=X_8KR*q1^$+Um z>c^75U5?UFx15O74-Zp=(&#i91}o{EUE9`BUJnyIcd)o01q6xdDm+_3Bf4W=tpBnG z3y;cNx;%(>{k9XLxqTtay6(!_@9=8S(NTkUN#f}oszh+1q7YQ49|0yw)FHuT@@hL!QfC zUG0piGXP`^&dn{FS2wBTah`y2wdb@Rq1k_~xoexpM&$>PDeL|D5ihlD^i9if@`#Io z30lXlu{iP5dp#wPdR%NM-tOups{u}g{bT!2ZW>WQ>El2-G3@?*nJl`1w_riE(6sx@ zO8RAlg@q4taSePHdXD>1uklz>IyKSW(xP7=#}s3ZH-wl!N%dz5t@ zKD&l9uji=5JSA_Q7kNQqWJLu`JvhWwuHRtSK)SdYR=w_f#_8>@n3bf0ZDq1C=fF1gc@tHB~zU*wcOM&^R^_0o?Np6{Q{fxOhdQvLKYP*OUrP~&riX@6G4Gy7Rxy| zdP-dq0>tA3cQJ8hrpj|5ZO0l~z@gn7)5uZmG5QSHtH%Y|P7S;bYm*`fU9_%(!jDPW zN3VEPA$u;00gZ7oA|qoiKKOn!%AvNqQ`E#H(UmY_mLUsyM5%2m&8?^R&i^=rx6+wm zbe!zFVO+zJ`aYrBjU##YD*=Pzo2CIEqt-t3N=DL-cm#M~1&&6_LKzat8}3W_3wVL7 zYGL!{M}!Hf3hB(>EXk>Ei_!V@>1k<-x#deYl|s(9cOoK!gFEDEIGuCA+ZX2o*by=| zr7lT4w<1k@|2&*zoHu>zcgQ2X$X-UrX6BOBH1N4P@0ps`xz?CC{oYt#u| zpY7Ur%yN3T+M{&b6gymu&7J-@JKdi7objxsN0D zY5ZliRN>CRl}41iCY^9EKxMUv z8E`_$Q-~8}BY0q3Y0d^jas-GJrQimoFXKDRlG?n3I^`(>}HqbTw76^Y)ve2>~WqOn-uUu-(Y1O@hyAU zw{)&}dC^&KEcDTGQzJm#&!x1mwC9?^!A1JA(wVZ0W^5CyG4M&x(%8E(oGwcOM5q1` z&M{LHREMKjU*`IbT^QHK5SQiqy7nr*&kAQ=_GSn=H+%_kVLdd1@o*kU! z2N4=Dxrmx;M`Fcr`2G^x4jp?L!eQgbk8@1>2`QjO1^##6T+_NCvV6h)TAf-sdR1Rp zcH$Q9vVG9^GOx7+?F`*1LUBf?wZj)>=QP`aYd$Nxqy}kenP&}Rv$qqEpFD7o&8s3o z)-zzxdH2=UZSdY*qgK7|4hG?~5$v;(f97r(MUYjFb2+4P;1RUr*9fUs%c=VToDEti z0yOy0;t18(c%`@`@P(hdOQdSoUAO7AXB2uP^1jv6k&d_(^Ab?)dR5BK(E7z@-2!zrO9AC#E+UTv_ev z(v_ZVZmWIRqJHE^*qcCAHKTqNL)VrgX(=hqv6UwsCN0MFOX|uOYVCWn?jc0W`H;m5 zUwi|#?yG<3KuTD?Y%s{LZH>mdSPsPH%K3BJPT$=i^aS9Dat27I6TsB3u zdzK=wcvb1O%Y?}eouZ;%Lba8ECBFYM%CbXF_44wY4;t!ym?dEW_&z1ZW6j2a{niAj z6~932n)6gBBD#!NydkspIrs9_A#^=1TYjcfe#524(z(9tq^MKhDeL~yA8FaYGi0tF ziS_#Pnr^;71A~YQjW|{PR;sl}Vm$&`u6Fx&9D#k$3>wch*;kbARyrznq;D-mR@Y1R z=hrq-&tIC>y&n?BqSs1LNey;2^}g*AB@qO6I^aGaYNA(bp1I<&m<-t%@wHo+#(6C* z{W8}gndK#ED??7VzH^h(gb0r z2B{IuJ2wM*fD9C1f?S$EoWmLV030}x4$Ip$?vF_et^?IIvF{Uj+JCJy`P$-s%j2^- zr4~#59M<1nD=C4y|D5P4vM=!@;L;si&zY4VS1__%oh7#A@yD;pPc+KP-bpWO0=*G9 zT^}Xs=RZvJl3DwNx|S@lz&6$Pc1|^~in!Rf_?)4Zcgrh6|LSgorTU=46Z98&puR%4mvGM0u- z0EJS?Vh*~@pA-p*IZ2DWcxB=>KKohKu{@O&v*wf?{rge-#Iq3`;0R`?Ep;o}_3ecX zP_@$EX`*PnMIk%~FnD6t*&WX|Q+JUk2UWfMCHB-)gJ36abBFJk_f5wXnQ>T8uUk7g ziAj}4d|Aa_wqeYzb8p~i8>?TxzSwV{j&fU7Lb_|FPk2c$XZ3rGvHBmDP$}s5^g@jB zm&_lt#)c=@OX?EdsN>I;SeV;)KA?mid>vhAi}#t>Dmf8zXkL7sS@&>OwB0 zvCc0#8ZjW8ZC{EaL7QgI)(mGjic{F$sx8T|OqnPw(3U^(<(QY-{7y%l=XJyas10@X z5cA3;=z?uv>T@nZFRh-$i5l^l!wu4MP_n zLhoKrmzH43wfZ?MW~w{afNj;in6Oy0zHV#cJfWQ~ooiLrUDiLPXzLUvhASDhEf*Fu z(Hw|vQS)#CuJaU&xuthMI@)44S+9~11RX6lbTspgPdo(oQco?wPFjLAi<$J_<6jpq z*c5G*qtr-0H~4#qcBXC`%1YaOtdXO(uGeJen=ucO-n6S28)?FK} zE+sf4%EKcV>l^3@96;5*uEAqKiF@4A(vvd6Or-u5yS64!&7B5%%l()X*4$2n-2628 zf*EN-fjko;*V6aJUUbWJfsxf30TlA*21qPCz%_N?te0l9i>Dju`W|*zHDqLal@qgT zGJ~uu(t`c}$iEaeZ#!gbu8ZS~4}& zaJn_S)?>z~2kak?`Z-Hdpyk*Qh(jrtVe)}}XhIL}afuk}PUdQhuq`#;8Pp_h_0n}= zDj`wn&?M*+mKBXW@;DQ1I)HW$i*@+nvtzQOq&z9cAqnV#`A&8Ly^^K~=DD^eG$J&# zOMfWkNGz9_=Np^#HACMhV>`R|jS_PyauT5tw5A@j{_r_rww3$3*okXQeW^_{Yt2L) zyRBE;=g$w2#z07Tp4iZghlL^LtI7u!%~#6wkhmXb?);onqa9xy!FM!yIX&&z`23@0 zoXBpoczCF>Z@PFXB-tR4%VSERcyhxgdwq4S%B6T(6;qfg{l{E6R9ve!-@1iw!&C9} z>C@qtM|6SSvNn=-p6W8)IUB;S7YiJ*MG3~3sS$j*iMFK9c}H&_8^}2J)Am$ z_$hmV_P6lW9^9kyW!L=mKVuQn(N@^vsP$Ehd6^^Xxu?4MKiUhf)Gv6IEumAZ>&%*+F$)#4@N-b+mHJ}z_ocqQ(MN7OR} z%L^(N00SB`r8dc~G#3l~^e>Rrxqn|BvC!bpJo=K}{`mUxpr=r9T)c6fM9V=4dXU-ELjt{eGzk8Mqxg(N5W_N* z*(lWrz+e!X{&}z*#n^Mx(*PgOt=D9|1dr|j(VeHoJJx`Gyc2N=QBXCF3F6bNfa)!g#mAGvBw(u~)v7X-`%7+&A>- z5I|r@Ri%d9L2}9RMFicxV`;Y&&YTs=O~l~BkH(7m-?8g%%99v7!w2&ki{Y}F-}8rL zhM6W)z1Ee1a?c@PLf|Xc0c>QF11T^Eoz>=l?N!ycmRbF7KiQw4gII`pvg+wEi^hr3 z0dm_C%O+mdp&oxbVDccp*>A&Xx*9&WG(4o|I{dR-mkG2iInKawsfRfCl3?XHL@3X< z*(QOD+zW)fqf`0t8r{B@aiO5evLkVMd0(xvJiA+8xX$`W1s7N)6WI4hyUaA(8k*0a z_;nq)=z4(dn;hMy zwLt)<%jyls>7In!tyGt!+CL^E!VACWn%F2PnAm83CW0{h)?hQkbb0kHMkt(}wq9jO zG%%^Lj_b>$r`4{$jnxqOi{`WRi|-gYFQXnXIz}7UQ&TNE z=cx9DkY1PBqHrdrBXs0J)EuV76KUVHl345{RW%e2%!-1lEFq->V`b}^G(0>kS5WsF zK)XSapX3if`h9%}t_gF2x zvg0gyxpE*@OzIWfv%82_;t_1R*SR%V2RqJ~#Ky`xiFGz=`EX;e1CC>n@4^M_(IA1j zKOI>-_2FFFlf_p2*_^^ZU0Cjm#Vy|$@JeTk*+}N}oR@HZ)mz}AHR)WwmU!u*t`$LO z=xLq9>hENIFVBTA5G`;<%1nXs+A;ilJ0V{^S&X4aHo+|SaC{Ap!fJt3ayp99OSkMQ2JV*1S|tngn&jg@*|p!l=QVccTj#C| z&mFSvD^&3yGPaj3mN?CfK_?X`XBi+;M)Hotb)ejj;W;N@JhFC_*~72Z|3#5=yFv&H zRYcf1s%SBTyCdhX+)HAi_*(Sdu#4b$G9(R7*68rL)YFzvr=lbsTNAjjx{O`x?c$$FQtT+2^_3rPu2b(zOq}eH(ePZ;BOq!O0!&E;N40I?1S2q|4cKVX z78Yx;^lfx}Gqb1(Ky+T?xom4=DHs?r)Y*&2ym~*>XFP9W#HW3}6XCTKRc|xnxMJRx z|IBIBCYQ5({bjGH#UCO6KM4G(IfK1vJMg|%B3SuLGp>yL9zrPZ(W& z=KO{;fy;8O(;HmUGCrBke|hT3$^v^vFW$Yv!~T1uz(%?+SL8&#Ae+Hh$fSBwk^zbW z(We3n{_ank1Z7{Ajx!^+6L-+PAt29ib(+O^4l;(bSMwXXj)&vcKg+JK^RgbCN@tA~ z+rBg~uxenfTVA~q#?6Rqm!ZCqaSoTtwukAvX- zSpnN*l!nXgpPtkGWl9DGqq<{+X{noweH%>9q8{BoXyUJ!OBrngT;-~=8AhNHp`vqy z{IM2!*^O_=i|$`_7md-4b!=aXl^o4xoUnGigq}PL0KNVL$*8ToOUabQ?uIPB>`GrcXM)>Qi*MfD%F8ZpGgW&V zv!S>SXdls$+f>&1T#ri%1k(d&9=vaZzfQtLr*U%9o@ zdQ)$3`T~dK56~V+@~8;1*gCZMA@)N8_!Tc-x<7`$zJYeM`dKxn#|mTnd{E=VsR@Q? zv#0nXse*H&qJ}`}s^7mKUn2^lvAT-`ZV`?wHU45v^H2JxjJz=Q5%bv9qN9=z^JYSq z-_=N%8jg(4WIbK5u&@BgS>@lqcQXV8vVg>zAQbc*QcNegQ!T&=>({a>;Q-|(u144D z50WhtP)Yw=3M*^J{PxhNo^uiDj^AuEcVBNq^{h_YddB(7uBE`i4K*r~%jW~v!SMky zw&!HM3Q&G)A5&|r`o(e`!Hf}6Q!(z7j!2(z=K+^*Jq+cBB>}{-Hd|VEPDiO(6Qe6! z58g>$lq!OY4-LyLUYhux;JD`Re39rbrQ-gFi)pgV8Qxa-Tf<=vkN?v%4ANxntF~j~ zrx(@pGrC z>!zof1!ag0BG)N=SHbUdmv)@({E}RU)Q?`#neb^^KG??iz;p`!1X}bivSkZ4JaNG+8}<DE90pGQcag%+w`N%;A;ULSIuM~A63=+!R zoYG56>w82?4NDl0%G!nZ6yV2kj~P__vIMUC7S!Gf+0sUS%dZ3M-N__RbQn~aad(2H#1?Lgv9-+P<1 z6&WNBEwMry_zwAxTMtvVi}p5H8FTgo14At`}o(`iU(L4oEvSfJL=`I~n4PpLYC9uGEYI8&^$TL0$ ziY)v017HVH(6`ZU+bP zrf^E2!{;V+EKdzK?jwC#1>)V0io#4J603p5I{zS5u*T`P%x`6jL9iIQ#!sK9TGuB9 z!gU9mn6;AkQXEo9Cu_f-A`$9@E?^2;w!kixHe;r80ARW$z)$@G zGFwVdQutur(W2Dy$M?J-X}G2(hU(1PAN*cYnY@dDr}qp`r?oqV;2i_;IRhr=F6Gx6 zmnPIC^PLpoe>hfILEbWY0E2Z6DJ8K@8msbum7}7WzpdxQL*f%}?~}Ag!ENt^$U~Ap zKMKp&hbfSqAZ8UB(7YJeiCZZ0z3wd&nf67Oyy`2Yosq~TTd9*jJ3#FqHNZs6EM5v` zqD`%m1^5mGkOu3K&qCxqXQq#bY0%7%t(`di>SO|?GBEzy;L1)qhbC)K!pYHL`iVp~ z9|+>3j_UQ{7~hetZ^5!fc{T%nWJ1W6Z2~U>AW<*;AbgxWNW5I^x$VTgUL7Ya?}VNu z^Lgcg&^0|YXR%daiIh_;)2GX8r?ITmRc3DuKPz$8fy2ZAeaPY9Eu$_g{M z{9=APEKz~F8qnE{7%SLDsX8TqiJ94v=|?(Fhs2N+=0NeVCFKo>QdN*b6xV(sw5k37 z$^6W(9)n?Sqql{U0d%qwEG|_hjCYtEXxQ*l;xN9zWBLG2zqYr*W0A)NDq>J z{I_&5;Ls5>=*YiPz+XfO13=UEc?5|;fY!ZRRECDp0<9G|_)ExvysY>;U_~DVwj~9U z92sc(#|5_UAhE@(MiN+A`mNL735ka($1i&l7ay?Z-p5 z;|TffC8)OfKg}1YqemHgj5i|}|CCkkf8y4B-a$IpW%psSad^HLih$QuGS2hwcZv<1 zW!v+G4D}>U)0<;lZ%aL%6L_`2;Nt>W4q@E7V2005E6bAS_ynl)rdqOeRDj#D=J{nI zZ??bO@khrN8az+f29&a~)?sR~4mWcp_|D0~C;K=^3->t-McYUAdQXsb4MgtmN)_A4 z5_yNj!;EAe_IO^dc6e)`ehSuf^sjtofnvcjY)v zD3`mTsOLID>7RyqCveG+rB{VgmJO`!A@H5bZ^q-~WnG8LZ_mQF-Qptg>@Zl(H6xl` zByWfLC&|x!48jnQpL=rkMb+#U4FVXule7_}AKqB->hz&R>YWwzpTLLjDzq_?=hzQK zBUhL_i&-dGUe^R2s~2qzAB$3j77Y6HELg-(UDoQ&u+{Y}@Ev-7&TABObuA?7%QSb1 zr1>9*$o=Pl;5HKb)3aqsi5rDHBSX}tv?k$CkavgAy9O-J5DH*)W&rb*Z`enl08lF)D!v( zAl&Y|2N3Mq-nzU^nnRY1DfluKy5(IC9Xy*{R~)sDWi*FovvkB6Cx2n|0;P~di!XuT z^!2&eQMPIzmFT$UCA$t=S!5f3K9LNn__~q#RX=}~xh;NguR3-n+Pyd@N1x6?w{k`x zHYm;x(4fK;@b0B&t)$Q^tQ{&1NACSj5s>1LLo9#2wnwpG1$2jk&uuEVI7oXadJw74 zctKeRuZiVkO^~h!iM6Q5J$j0_6w{oS=FsbkR+x;|P&<~65>;8Rq=`iozH~Vas{QUB zYfw{EOdFVfQS4K-wnAt$K!QRM#WOKZp8&RNjZ=LlU|PS-vPcq?sW%yUSfbcot*>$l zGC*X#cLMkpxAs&DlI2AkI#xK4jul_grw)s6VU(>J9NON0&nG_Th1n)`A>5ciDWhgt zF2K5Vy-?HFk5A+6&QDk;I?7tkL^gFK!@c}DMJkPFofg@2IrJeWdkXZG%#+S}5(^IA zA}PgSic)NJUlaUWChs#238f*yG@q&?U=@Hp=^{fJDSOWiqlw$oe1?Z7EhFPr5Ra-x z!HoQqD30yKZLHF;hD!%ynbZm{LY~caV2+8e1dg+iGyr`z#9iB`Aa9LX;9>C=wZ;wF z$&BytRt(x4@*AVnua>m=B%)8aG#RN@&IiGKg zKw5&7UX(}wzQoU;UGvJyV_~wZS&GYU&gnTh>R}i3Qw$cPE!&c;%Z9%9qBIOHjhQpsri-ll+4{_MGPshy%A{$nwuiUYNW zXv>!P7oBT`+P(S1XZdA}+5%+yC7ow<;AiDpmtJf1VLD4Q4NDe2+0KBlt?kMJM@P0s z{)qZqSBZ0=_IZO0aQQ+J*G&HxM~$!ESFUx$zreZMu4%y^ncGP4^7kOWem-S_vPlQ^ z#=M|J_DjO3IDkSVD6@OD22A&AYUaTa1_fGUQ~b4-*rXp;-q5u%K59^i_JyPesp45& zfuzUTDZWUMk!bx+ve^SWz^xA{D65e$F)Ft*LuaAwPZRaX`@>9M2*D-BR;OqXy7|}n zbk1w7FmgE7+qFrLx)yHpxP329=E;UN$65aTOuyH!!9BXcgfVSBJ;|A+Al8_7&E~SX z?CZYxGV2$^R{3n~?CG!NRoXz{e3GF#qGAn%8b7DiDh6iHPh#bbtCe%Ab>?)l-BS-; zh_vU3#Q>Dw9>>@Vt`L@0Hceddc%&z|aBLDU= zJl7g`$m(H#0JxR@Hix8BT7m4`*Po-NPv-%k7)1SqemXwyEj)DzueG#Mp%(iM~!)1d5)D>#s(QEAq z@&bI9qX7`0b2UpHREMX5j(y>-^Vyu0j<$I`OX~xwGVP$uFBqGcPMqh6y=2{Mfse+5 zKuhqY^}Nc}o3}?sOgN>$V3-EodLj4W!;X_5t*4`Hs)aXti+KCW;R)4mbp||rNAsm@ zLqzcaf~XvMSs7HTW?h|6+K_;ICLR7Z{;{n|{qdi;C``l5G|uY|NWVy>Y*ZY##Gs#v z%H-yjhUIqqtY_GJWfm4bu{z(e-M-DY8|&3I+tL{@#Zv>v&itgA2Wgqqyw6|5gSs1} zFI{Tvx#0EV*G^$Efm@;CH>6z`(!LdWV)YxM=3b15ff95kOWU=uAT;rhh}EJii<20} zX}s(9vfCU2yP$I(9zQ?t5GA;FG4V5J&vLn|CqP7Zy2}Mdn1g(xk#^E)kni`^;$!9F zLgqHDydW4bibJCE2y!qQJMKLsZOnUa(h)q8a<9(&Sje|n|4uqSka`Y*kvB@@Mxm;jabISUyNByAhp;%M1LUOdl zi+(H40gn}bZKS>pwoA~YCwnG2ZOp8*IW;*Z;q&l=Mf=g0uXNxpmBrf!0bE$S*J=9M z(wlh5OvO$B5s9#=W+jun9T<3gYZfF+E>W9iA7m6S0}a)feokc54ZjNDIWnJD9jG<@ zp7hTpVYV;P_qz{xls`i2qiS<#rn0lMJKs+3ZPxMS`3>WsVgWjv5G;e4rd=reWY&0m zD=LCVRXXWP)Fc)gC}C3!6~o%0cQ2POHnMjP0b~vx02o>c$RY!xBylOmDziOJQ%$lR zN&YyiokeUOvhcbP{@WNEtmE*<1aS8V1aqEdN*A3TAL>Z8&#zf#9T^ceR=9cV5uP8; zA&3VR;nm8a!>GV7JI5OuXnC4vrc?Tb0GL5F$EgY_15mYM7Ym-zG3i$e31v$zuI}yg z&}u>|VK~Dt6V3+SYg{2JAq?l9$pvsG103-s^LnUy8+I0`bYJxtGA=NZ#L=0%$pORu z-N-|ukOM;2LZIvu)Y*9K6#QGUvDG63&(SJ#AEN<~MJ?je7rojBY#b;ywHTj$-?>`0 zbG)v$cK(lFlWs|NjBB-oQPiz*Vj^3#wxwcP^6ivK#2~%Y~|&x zk_-BIn#lgOcC@j)l}djA$Zl(tIFC(C*_)j`O=zB3^o`JUtZ?so)pAta{1rxni7ggy zcU@IERMX9QV^R*Fw31#7G_<|CZQS8i!R_1&5U>Moi^|hN_ELB@MFoZ5k}A1FWqRz% zF^kYF8 zOq#JJxMTn>_h<%nmT8|qU*D`SQ7@}JZa{ZzxH{DAUFD804%9ww8~X61=;JV^yCLl& z-i61_B|{1Oe7BVy=Lc#zx8DUzEFNECQN0bEoU&cx_RbG6@?4GTP9d4$!qZ)@>2nM0 z$P@sPuIDEx{D_WQn%s+cv}(TrawzHujz-+2RRC$S^15;bv2CsO+kn-NUoZUikAo{k zTK^yR-ZQMJt!o<DY4~#G*7+&|glH`sOr)KZr7u}KCE{ASaD9nF2I(b=ROb(Jlrnj%*Q~YY~ zS}V79Vt;h{Hnore<(OC&N*_PRM!q1!Nvkm^zn8ejDBF-Gvri?(B)_30VhR3P;A*ig zZkXllAseGKHDbSaFW;Qt-(s|E+*~=kxb?|YkeSZg5A)4eUYYGHt4`P_R$hakc6t|| z4`NhYI?fu-K^0^CbiSDMA*PXnn4uw{1oC1phArNqtj{80(_O;bAth0%QAPpxQuex8 z^|9@%(4c93n&0!BWinRWX07HQRo%!>-{q{k10hTaZ-wiVs^x6W470r>T^jWb_PRzu zC72hI?c3rUA_;XDo6{w*$PJ<~&79b-u|Knwo$~o)rNM>SakF#JgT8(HxTmAun9#aX zG-BS%D>0NWS8$&2iBYD2;9{PIPH_C)&0Z2tH8r=v#CGcu-+=5FU-85DYSZTeDtB!MR5RK`Zc(=w)( z;ibtZ%_|U#CbH)1lHAOZ)IhA(@vHX~&q&l>5Od z1gro@0VZhqFX=r&U&~1sMp7-$=cP@i@5wXPWJDeZ(<6_`arQh?Ws~@je0rE_y(W9o z&k(f>qIqzZJiH4E<{^@%L>_E1yOWh82pjp89A?v1%ukszs*ow;{kD>$E;0V1`?T7M zm?gQv?cI@tXmBdV>J2MDx~K$87#4_iISO7A`Sb|EhIN~``&0FdU)v|ET%g3R*+bS; znb9S{Tr&p%K`1I*la>n?QYu)rSv(Rp7#ZZoslWUz}-)i_{al$~k+x15Ir3k50r0@ne3IkmS}s=t`K;__!ar-n(yNZM?mr=LR$rmAWd z`~ug4tv5ntf%_Pfr4zJeCry_*;Z#NPjxOh1M~Y|bGFR^7JT zYr#f_UOjvu?B`V!t}rkg25|F>g^wunwOkM@ln;Ivrz3E0ZT=B;%*-U>kq+c$K{ok# z-XW+r`X?dpwbjxTtB<=eIQv_!Nbj8exz% z?Yvt*M+;9~^YxWvyCUTq90U+#paF5PS`bc~(2YpF?ON_Axr38B8VZJBHrDnLb8^&D zS_v%_^%F6Xj-a}6Lxene3@Jqq=M%cO!6dAeVwiU6Ay=qDt#o>`VAEu^M@j-ef7zO~ zwW^25MKPrBZ@Q5%Rnec*t3(|>lfzwzZIF+<`Wg2XQI8MgofZ1&U!|nXk3$7n)6U(P z;YzcWER>^!<<@+tm;?VeK!ttfTb_klDCFdRY_Z(!I;U`BQ`47iZEagF^sVuIxR~I! zD$q&0@(-VBCbw!kI%d8Uw9S#X`)sdbX&*d4LUXwEVXO<{R^?oY ztv|L*>mw~jyd(@q~sdXRGfvZ$(5h0C-d9JW^VsOihe zn}=C!Z7aaO?{ZAJJ#QlxHZWIy%+$rU+r{e6WRpV=G@1%E?hn669#YJm9}2DG(U9*{ z^COz&PMTRgur5#hOUF-MBRnNLqe&6+lFkH1e>emYI>?RSx=KB=nd*10aXq-ieJ}SF z3%5K{)Xo>r$86aM#v8jGU%9OU>G6JauyAml?JZllM1plhl zvX^_|4!p$u9fEj;?OrUiduqpbI0o2m(Nm(zDqfWL4NUNsyjrZNtjVZUY)H?n{J{gs zMB|~Ew)B`p?L_`4IX~KKrx#t7q2`0%t!}mI>s!9XQ@O;0gx~^vRaKa{R!~94DHhid zW^!jr{aDVC(f~j!9*@~~LYi23-5&C`g7CofQc(=9B2t>F6KwST@uNpyUs^8f@5+R3 zp$2lSLERo}`C|_rzMENmk#%g_cAPK7bbU=SS|Yb_X`kXngPb)EcpiGXFJjTIx1p!R z{;BTPI{Qp*@&X~s^25<>%e$W6MvFFgDBUAB9;1c*z<-(}KedK9Ya9ISY4@(Xc&KFRep-St(?b-HbiTg6hkIU|F!~S zhh27BU?TOr_9H0Udu1YM z1=OD$;h9TIG=zFIj*W)xArlj(kn;F?E;3hvp|X^5iUCRveMvu8}De>wrej$1x&&YIq?}zs&}FR;E407u)3b}j?g5?L8_#F{ zD7xA4!y_(LX|&+Y`wL>~Ap+DPa?Z_GmSt-~uNh9RsPc0lXGbh?*I4N-gK?dHSYz^` zU=6{ys>F0_M3Y(5M5HB0`jvAZd+E*9XZl|(!4KL@LgQ#p74!8`!u3sVUJJcRmUkQi zCChF3<`}ayJchsqouu@34)piO&*`iv=k8QFtgPMrH8_%%ljz^qnfCYCB%W5Lm!Fnu zKx+=r%F81q5z-Veky)$!IX>$p8lO?h#6#8lQ!Adxji2Ggb;kMEip(Z7*cMIBBy4<6 zWSbKwTglnh-4lCTPqZ8ikvX*@Tr*fck;1*BcZiPPq7XXSsGqSHM~=8HagSaFEw@x` zb^RhmH(4Bh+b@lcY53aEyY_i_{%YE;!#+nSNcZ(Y8RhRIZ6?0L^~0A{oex2~7rZ^6 z&tkoNF6c&)QpLW|6lK$~sNf}dcbslyg-}L5>#r7^$lF%o_w>mV4`9du?FM>-=mwe`ZwrjX7wjY#(sf~_CkT#>K_Ro=4GB49I3*k=;a@QY&<}8taSBo%T%fh-8 zA(fRu#{)eWBgeiX3mGG7JN(O$-skdk{^-#8V+TEgwn4KSjX~>iWJ(NHA48gEReU+N zFy@BM7#u(IosOK)bHuzmAr$$pC(>ByiAEfRaC^_~bq`FCS<6*^q0gFwPUMjT3?kJm9K0526sbb`W$UVY@fNn?i z9XilqCQnD9F=t^V*bHE8Nb-iF=zu3cjHB^Ye+ru5+=sR`98E1-7_36Rxer#3v?(%lff;>BqmC8y<-v#$L2eue!|ae( zVot$+@%nPy4w)h*1|H1EVxfN+DOS+ZnDKpNo{7+y1{a?A5FPLZbR(0v+{DS~2DWlD z(0osJ=Pg8PuI~b2YwyyRM+mueNW{)MCvQ8nuR^+`-5<1~O9g{>A+T~d&&u)zW*`l) zrsz`z!TEQh4hm-GD|CAs{^~0$fxL&lK<;(GCpe+;~u5}%>55HwpeyO?drWECP`FJaKo zHe2b1w?J#6TqXKcL2%B1;1I)8tdLi3_XLtp$;Z;0ru873PF*<_`fVwsJ}ZY{2Ic@! zNY}r_6d5V>`3XF~u*i<4O8{mwTM=KBE9pQ=+k=nWIHOG&JH&n0ie^V@(M^(bguuBk zJ7O$_TUuxz18yQ`byq^fuH7vL3>IvoD_XDXtax>3xfc9wh>J-ntRsZRuuxu3H>1i$ zL<>|jPLsI>9XWY4-R9mcrH&cAqzmv8_p+7E^BB#r84VUnne3+z$Qwk)f!b8A7l=~m zO3!0oRE0e0&~iSbq?sXs@ZHzYSj>q^*LVMZIq6D4o~aaK>@$ASG|!PPQD#^FTo93a zpt+b$=iL9Rx&EuU77ywFYOeo8bBVWR>CFxcR+H+vQ@93N9<>=L5l(VhYO%7NK#m?y zS@B1R;xa(`Uw%|a3>GdcgTv^Nx_o^P(zem#yE>}hw#OklP$7N~eaC9KxI@t**Qxh< zMFHylZBZ)oT4gQddx@;1soR=`OXX%oBB*^1#d|y=&d!=X$@t*Eu7x*{F!CwxourwO zP>Mqf@q!OEiK4M9Z^COdti6>lTZrZw*Zy1}|HmE6&LW@qfMU-1*o-vQ(#--VA%|dz`?=-MpI?wn3C^%T+k z*@?`UEb?bual04MFV~maACyaFv3!3vWx}=n3t3o@R?S_UJMS5!>{@@fJGZ?}qIfrV z7$-Gy_TjhwPF#}jxpOixFeErWkIf50$tM0yS4(2lM^b`h8-Dtq>DFD#J7NsJr8@K7EENFlSjbS6rNh2mL!>tidA zN1kK#t9`5!;SDnRl%fV3Qe{4mYS?wcz>N_bOT&VqTJh-oe2H!4K`mdyMA>`9m%il7 zk1h1Mp0Wfle%j}_hzgtpGrj))bOT?O9Wn6Zp{*fu?ld&d>%!%U!AHz_MfhfW_4VD% zXSa8S_{u8HASnsiMCV;=T}$;$0uO5noKkhmlMa0e^q#Ku<%-gj zA&3`kh{D~8lU&iE3fxf1eWOk1#!G}75_E2q)2tYm9{S5ez?#J!t;}VZ2SCn8ILj?p zGT%x>-kSg2a$YEZx(!?OcZov5$;!+>UzSCvcw0k4m%6!bQEt79SU?M2$Ku@hUu8k* z&SfoT7Y45Ph;PZt`lC2|K5Ny*lEM07jb%do3W6>l?j>doHx3tmDa^kYzD|C#y<*R| z&Gox-+wW$~yAsx$3uUV~iD`NTXAf{@3)8wKFISAnkZelw3qN~s6%&X`qOwW)LhI(* zR7a;_P~S?0ZBxplK8o)dpO8 zN}oBjJ$dgelr{S3W=DEpA#Hv@kmyrPaJ92IJNZ{x z!=&4{$yX}(lZIL|M!fMRMYFftrfnrk`r8ZOx^+bh4Do~e(;-qDwrdRCez^dhMd5~! z8z_K^%o6um#A3_$*vYURL6ng@tj^$T9>#K>7eJOXa$>a5rg$sXQ-P4Ry=&N3Tfng~ zf6a6w&yhi~mKY^Ix(1s|JM#LiJo^0T)Q z=7_GFZ0(hH8egw-@aUf)7Z;id3YrJD2o*=*?aB;ZlfTQP)4uE2^xqNYlT{rqFe?~0 zcCZ?lidNy3YE6xFu~no{g^NDu(TZZej&4ii| zUvxh8&3Jn{NkM@kON=$vTTzH>$cdxOcMmw-5SWZ0u4}dJXP@7gZltJ~W1KnFQK`JC zC6@yhk%V)#x0|hZNvo@ICysvA$~1esgm$cbo)Rj2&zU&WtV@+LDZbr4y>9N(CzGzDlgaf3T|Sy?8#Hy&ns%>sE{$oA^NZY$$**a-j@hK-eP&>X<| zmmXswAH4?VJb^a0HTmk`=L`6TH;y{z<}=fE=d}wr<7UqdK+o*sv;M@Rec1fj(Tuid zV#SSO!``{rIdXF8xTZI}&}VD4EI-%=IIn`>IL-%?z; zr0tvWua`b!@4?B>-?phd6L}x6IqUJQyLOo8OY-he*@D_`+7U$?QtfDT4Pm`4St*)A zGt8qV_gy6eyE2o>_hVLPcFU0{*3EW>`9VI!j!lh#v9$wMw9{cY?q zF4sQc>vuVs$c(f~UoZ`O+tc-Gw)3_-QGqblnbAo#E6l-bdU8ih`M<|vN52o+49m=O zf?=ib=Mv-0^9|wop5m3xj%CKymjrC5*bjG6YUL_?sf4-0{cTUiO=52Q@sH)|W_fRl z!act`%GRT5UQym1$?t%j?EPl;&T(?(do zJrtfBg|nm7IO+q|DLpL6(#oT%*V2eJSkf4veHai%P{y0%3p*Hnbf z^&81$YlP#?!3lCQVNJ7h57zYnSI+k{hpn=yZ(RLC=f{yWUo+Dh@?u?~ zW`+Frmz4@48q-xTOhS@yrCrlM6e2iiw9vZ=`Pawm;6{FfcLydv_2SX^rwvPJOY7%F ztfu=65gk$FMIz_+gztB>=Jly_9i{sEPL>Y)=n=+zH@B}@tyqO+p-5U{xi@SN1GfDz zh!a5$q$?>1h(sG>9Ax-VxwPH|GjsV{4D1Bs99IV)QAf#mP6 z<%*u-jQ8Cv?-VAeE6%*LvuR$Ig2~;< z!@k|QL-@l*EpJUP&yKV>7rY%L`vl@UK5yu5e(Q8O;pR=71miJGf+59vm%a;rS7yIi ztvQy~OQqW!)HJRr+>26Z%W9JN6p--F{Cn1gD&d=2lsMXgQugo-^e@p6oy48X-9ng% zP>Oe-4J3U3uz{M=F@-ItjlB7Sqo;V%(?-%Ad@ohpF@ZaguGj7r$l=pWKYXOA4GD{f z4+B9qGC{546Uc4m!%J~SzuirLiL7Fwtn3VVG0z_u?(AaGnxSayQglSJyJ49xPZZXv zc#TbMG)M7s6|?$hZClOo4kL}(u2bb1GYYQU)STIbZ{&`excs?(;yfvX+$d4_12R#CnT~dx_f{`gsh5tvF2xQeb91U^h~)-ubFR3I?L(A?`il*@hi^|lHXGUyTBpos zD4Qsx2hjH((0;{+ho2!fs9s^Gu%mhcVK$%-Tk&mXMnbr59+Wmz9kQt{*7P)5JCm}1 z)-qDSMl;cQtU9A7w~`}DxJJC<2%9J1pV*v;IC>N7MsWm)%XebHg``%zC6wq#B{C84 zJGx!Xgo3sLgePG$?oS8D*skw+;J*ZaXLQayAL=U6o&+g7%ll3s;T8HbuW>{J=RN69M@dxySXMYI$FS8JO*eT2zScV94x9G;Lm z7|$3ER(%L5`aB(38OFDc)}@f#29x9dEraIgxk>wmya6EFlHbXprt_J7l z;Yw|}Swh|8H7OogFc~>h^KI}5&g(X50d5CUxF8v*|#wY>r&0#<}goNCkfKr z-XlFPn&m9!{cZecA90{TIIlgkx^;TLdbiL(`0?r#w*&}77;g0=df{}hbeSET- zmlGPYX+I`rKC|nus3Mas@2_f|OXwL2-bkGLItV9=^d(xUaE8(z>d2{Dtn*~duwUza zoj>yD<8fhvWZJBk4SLF0+QzP{k~8>tbLj-4Q)g!%yKdrmS>a<+nYaC=vPsM-><^zI zuXyDtv*d}S(zbi^!%fLGEXAw&=d(1!H~KY?;aMoGN{0T2e!Uj+>_IlD%EzVann|0e zx7aJM_01)>hBY2^BP5I^EehKMVGy?2iO(lc_GZH7tvnLCi;mMQU(a(;#GHlld806` zoYQ8|%vdg?k-0rpW~ribSfP5?p-D-bqCKOJr1SY}y7uto*njjCYm3vIbxd&TsJEFm z%;U=zaB6!H2SrGExWfrIZ`e5$2l&LBj3pSg{k;w=tM>AGi;P$6`qH7yEmQJy6L4Nv z3S2L52@DsOo9~8@cfob{5q;fv)1R2Y)+27tyWFyV*$=rS1EKoa($I@Dx9j!l&k0B6 z_kST4;^z9xz72j#FL;lm6ki}DUgzy)qcDuu!8fXHW9uS$`0JDfR3j+iLHVwO5}e*s zwLN(^N4Qlyv2#tNg6F-J-oX03FUx{e_7^GzMheT}sV%jxk&^RK`{$ZZD#ima9}FjR z318AVL`aX(8J&*-AHt_*$MHwx3bzGC$hQ2g9&GU3R#TMWn~dRkWVfyeeXDOzz;G>{ z;2glZjtjT7n&)ZR5!Y4e5e6el=S8h3Q&+2{FE}Zhq;nUAwXD5286QyeQO;-+m@ga0 z$p?1$x{&pb4m6<#T_1T0-+3UpQkRd&*Zlh#fxm{xX{ zT7XZsDgXG9bs}CkS7HzC`>E>m%lI;wwYyC{MD|xtD!Om%t>hl$wIqX0(+~g%ObM}Uvrnlgf)77$ON8-&1 z2ff0$Vk=LY=G?L7>+khhC+^_C?DT$Dhd9O7(^WzhVilE3Abyb0robW8We9GlBOu7T z>31tjUI27uX#1n^dSl`$r@y?ho2GntaMd)HK)BcL{=koGW$Unik=^`5Zy4_&sjF+@ASQ z<`-$6*&YCd)$cZPIov$$L)N8$ z)+iXIsa^*FZ-0Q2K)>Fe`>~bZ4s5Fo33c-9y9uh*Q|6?wXHwEVxbpc$tl%(|(v1JZ zXngv1GNG_t(xh1FF5CNb8PZ)&&A(hH3OjFf33RTvV23sd>7_kg)=Laf{+>Zj=*_8X_+p(wKP{fY^}mbz__5XU?`t2Tld1ia>Z zf*z*E?njqS@XK1#6bLVIT=__0EGdm*~h+4;*rCs`=93l4iLB*R-s@GVO7?#i3jaUdz%yG>}W{ zl~#=?m5wUWN3;63m*7j&65LUZ%C`K58c=I8u?>~hO*&Bz!AMW2~fF55WiJHA7x8TBd3vnjchufaL_GJXLu*{(OBS+JaDM4fsbLW zf;04l`%%^jaovPh5sE5bkRq$kva_?-2lRPwKcO=DW`Ayi;<*VOmGS9qCZse|(#opt z(c1HLSFqNf?#X}jL^@;s>SS|LQ5_k7q&Vmr`NMFWH&4eoiV@B-oB063Sz@(>qlmKx z(kh``;0ms(`?qfOs$*`1hBud=8I*?|kalu*UUeb1;$HNGj)YfD>F19}R3@eUa8gs2 z=2L?~ODBYbIF|{14J=MHVR)xSYFNoFB#vQx{o~u_ik)uaBeixi?lpd8T^n?R-L?vuTl+FW-T;fyJ~-%vp%Drnfe72rKrlW1;A1l9VZw zyZPNi4(DAV+P-@^!T6DXxI&uU^lDN_<%mg*rt2AAivC?buW8%OFo}LVG6{s&n)3oO zTbLUeZZsNzc{*e*?LxQ9CN*RZS$6#w2OCpA{<%#fj7QqEY`gaZ*cJ@>*YN=Tih0%F z(i5A~1C8Hqq4ZyOdc4i9c-|SV9sa~{A)>2^gHXD!44pCVYOBsU9WMw}A$#PPO(l^B z*N4iko+suB%zapq>^Ls3qWbBETyft1NJ~RPf05&Tg{+jLyM-1Gs0rh*YW+ zx{rEOd-X%><@hR})+u@{F=Wx)O8XuWH^M()(a z=MRxD6*eBC#6~)9+Ws+bHw2tHX>N!jDjokrz}Tg1^!OYo3}rvfE9R)K0>`@@INlW& zW+4)bk!GPhUnR-(Yhj{T+^Iti>m8cY2!AVNx1~g$m7ne>QrfSMaubKIp8q4Ac?}d@ zG2790k4U)C`8|EE?}=H#lh}_$m%5?H?FXo8w~jG5;!Q`Dj>9!zxbrBgvM^CM;Seh9 zkM%@lLJ&M{!`t&q7&_K{8@q1efLA|p=Hv14r627a917*~CN3qMOTKJ(Bc$hmdyQi4&F%BxX^m`<#zIWYbDYBFtY?pMZJ+&9pV$(Wnp-KnA1 zv9G`DtGB4sh3Y6Qukt(7YPV?Rjfp}aO{B5@4(3i1;Rb3muRK1&#Ka6YWDJGKBs+DE zoS$t>-GHUezIm^}y{qT`I*Ab8$JVmdIPR9#rzVOd@#&<{P5nAWLP5m%)yo-|{u>(5BF!dFt8aeG-FHEBFcq4@R zkW8&>n9n8bL-`ISyYlQ>UfX16ZP`N`?aMG36=o%<3QBKnrA+ORe5!X(*->;geNpak z_w*0@;3D%k6GmEOZl_wwWsA*?St4%CA2F4~S=5ei=8?zmIGhrpzyAtyG;a$;j%J^s zV`P*B0HTt>bhCFhxyVwnw}R*qv)L zUT)YX9*Gq^8EC<){XMi*R_vuZ{{fNw^XbQv$$5{xmPK!&uhs+P^>>32cCC<$eToV= zD80Wx>52NhUX3p5&jDb-f_iMOylx^N-vGZ>!`o!>youhYn<<~kJD=urXoPSr6PDlH zRXOZg@|f~0K6ER6F58J1SJ#4~H5>etghoR{Ws3_{&T)tbkk>4A{Ak!!;LR)Q(JiLd z=gTdqq~s*FAD@?_aJ&;)ti4?Hn3@9&^pSSf{+antbQ%JkO|)crl+Gbga<=tStC$+( z=!UatH*9D7xXq7FB0VgKEuT}QS$0f}j&n&d!{V$e`54lNs5E*Ip2Aif8hwX32)k}o}X)t4@Jo=Q|b}MxWjKD-Yid6z7XY-bvqbGF2!P zN_ytIOa+a5OYd~2_cGnjZ9Wmi-pxOb>0ouY(;bJfoxL8m)gYhbcv`+07ap`mwv}<2_8bN6}aOm z!3+l`3l@%nOe8qaj=C#?G39S}--)inc{aJG?|~+mQze;undigkiW_!E258MYM$639 z1xP+gOxcX1tAzxHT4-JMQ*`nitmQcAFemfuVw2hmIzHC^>}tALK#f56HT;yeq(c6- zr_1Eh&fhpp*Y)n#SC%!|92yu6E0MG`a5%tnnxWe^B2e;r1WHzZ-?i_jXh3g>Su{)7 z9B`P)#ZTEV^;U%|G{!Q&JIk)Si?nfFbGoFE3C^0KErne*|xOm=(*#+=RPW{W*GtoiEw$(toof zK%!}*VcLXWr))i)0q)u>fq`WqHT()?gFcSqVnv``U)<1QMCe{Y;z!hFko7W5HuO2Z zAQ4e1h$@|wL&=D;hUYuxv9}l!;%>7t!MK*&yw2i2FcSdWxt_on;h3mW`d#M=Yi_QQ zq>Z+NT6i62lV!cm+p({ViAsqg^TY-*qd?07(izwQeQImK#+6g+UH}l?AA&j>T*a8> z0%hojDgsDUh$Et1(p@D&8J9l#=8gveFx+k7!u%no1co_TXWhv>(-*fPeJ+^)JW%qt zk(Rhm!3^rl=za&9bA$KdzZ>j^Q~cV6cJ$~vu?gWX3T)v2IAZ&knIS_) z(Lk1D#*yG++`O@@DC7Oe!JrHLOE>s7%_PD75$v(Rgdd+5ufXUY5W%>J_dP{p0pO=< zjOeaT1~4L>+_IY)J`5azYICd;>Z6(Hy%`e)SYB(yKk$mFF$N(ho)}DzcBhix@G=w; zLgZTm;AQQHB2=wCLBN4aYxGdS1r<#uk^gyM_Tns=Lte>;eOj)F?9-3;| zHPBGq(qgpS-0tY67m-f-VaB>bdk{{zJWl?Nm}|u3Xcs~~&^nY|RHxVF*ni+jajrRu z%Z$`_S=o692J3u%?f%t}b40Tc$3zx8{P}N@J)q)pX@F1G-C$|6qc|Myi;Vp>2)RJ1 zh*$N^-@+?i~Z}b^c(-8RFtL9|nooku}(X+~c(5L7OkCs>T;|pzHH7I?( zPc40oE7daDKvH1uG1vSX;wZ{px;UbS3m(^nEgFgj;$YqyV4p3Rw_=DVU}mf{8A#fY zXj&e^sidTI$NIF{U#BRsfrY)7JAWXuQY^;{#H8X<#I^3$oAF+o;C(v9C;qwH0if%F zJaY^D<%sl%!;a0_Q%Z z$%6RV!Nv>yLHlhkmIX{0jUf#nc(8fM+``mZHqu$wEiaAKzjUIU08GIR9B5-?JJ9VK zedLCF-5~)@{(W#5@z(fDiGx#K`15L)GlVE5JY}M%ukZ8I4?QP!={eQ#RF~)h(iL;HKxDaZ&++95ePLkeAoSVn~O$O>FlI7z}7A((T!(e$|6NyBM=GJ|rni`$6m+iP>QW05o`hzn*5Rq2Sk-SIs z6+SsbQWGhwWt=p3)b(q_7ACSrq&p6tb5s$%+1i)_IP~Wfgag!H5NP7He=DcIBGA9v zOWPlUp_bSXD+pJ%i7nbY7p%>z$R)1k+8y)~*9gchNyseaA8bITwGp!l$@b`19(hMo zcb7OjlJIg!xW613w|M6;3V4BHo`KAON=;${>UDgu3O4w;wxqTsDX z_jZqk3l3%mh%BwQ9vM288*q9tKhOoZ z(ndae2fdl40fd+C-*jnF2mCTp)K27{U)jgFjpafYBGlsRfcvQOywYhF<}~P8+QHX6 zeoD_qzl!_@a~xP|?xTU^8$HT#d$OQ+wRQqU+Np-gC`EP1-LduC>VHL%F_I-qaAAU9 zsf+CXpTMu5$w7F20?FStzd?sJJJ_GAq4aMF-c#y zb_IH-wi8ha9SDRHZlIN}>ma3+vB;m06w;TC7bO4rFJLp<{egnm z#zZE{?uO|{5G|a=AOC#SAbS_tZ7O*TRE^w-hAaVU;|dhalC{{GM7paXsAZfW7m8Y< z++j{`D`-Qaj68xfoE&4--iFEvCKge@wN{0wXc@p%NlqrxC{uo$p}P~^?J&yB-l-Qt z2l^kgcTnD;+~0m5c#&9*=t%4%?gvt++_0ShSQi^-FAlZ9>1?Tj`z580Kh%9eLVo@5 zm3|=j4Kr2J)FQ?IdzEAWwW~+oFN3VO{LSS_Nl7=mq`CLq7`}-+f#|7G`*)LzLh%L> z3Yw7qW+n?n^kCI9uLUl!a*+^(ncyHt^5^secIj{IuS>K%X8!p}Z1Tso3e52#tFLu! z1`{<$zCQ)BkwOv`N2R9&$%fJ!e1i(luUuHa{T{NRluC(pq=;@beToyo`aNIi!K8mP_wwh|4P25kkw4j zzqL>Lr9C8u*Kj{u{N^VF9}md7up1S6R!~OVQTI>$ChX`xH?M%Py4QY>WsiZeD*H8F zTP<$1XK<`E2`+HsIB@&%bw23az4(=de`H(keH>017q!QxBhg547G8@tTT!+_CIC_# z<2WXL;dEY&B%O->y8a6T1HJ}=(1bl0!i)H=V|u+#Eq?RTaaiP&Ew7m_05=Q^?9WwI ze{Egl!q1b%3?V{gyKLm^Eo7{2JF&A&A1UzP9dS7iq5&F9+>xlU;|JC(y6}J9l0|-+M!^vzR=V5;=>d6H$q<=8k2*;ps&2!$I?WqtE_D872@kP2qpezS&W7XioUk)Se z`>B}4!apD%stAZ8oYH$>T7(G1Rmw%^41~@=AMvtj%Oya=aQYv$mH=I>`%bM`_{J|E z9w;DVHM1zDen-Y~Y+1o5)k6Q$`5O@GkepkP& zHhuzu#mcwU(-tZ#u&}DbqRFl3u}3L&e~Peg(f5V&kwNyAd_<#A#g9pwAOf)uFUiR3 zLS6{fD&V|{<*#F8iGhpV4d`MCdBui~{p(nt!jZ9tmP8m>BV*+ZvGp%Tv+T`+e6yB? zDi#kQT0wb7%zKoZzwip<_AUsK`Qm+7|ByHVF)5Mnw;7^w0zlU`ShP^K8>oBB?VT19 zDYNJ@SlEX24{x!E@krx*9xRCnL`WVFx@4e$17Pc=d|hLXUOaz^cKpZv*r5SqDLO3C z+`S(e%h=%-qs9iE5j@Bhc4%R#j)Cfpd{Xn1=lGY&o>xO;-Xvt-ZSEI{<~sj)_fJ0V z$FOMXtXcHxE6C5zzXJ9NF0k)ckq3MVwsq^=$}eR>h(M@gz0oBDVLAyz>G$1DMQ5`T z3H5Q?w)0<{wbzj&AVR?{h6{;WS0*aFXT)dGdFj0e^tuB&((*q+2Wo!4y7!;UgUH@( zh|Ei>hrIXF2F+#mx+LJIfY;-&U$(KdFtSr%j(b5QT74JT_uGd-#Ja_~vR^e-Lj>Zb z#8Gs~z);!>L+!Hr$S9rygt8-*Y%m=Dzp)ABe`hsmmw6q+Y(T*Ut3ms;hNTfma1mq&x|x zFF3B&J&pYRVXvn#e66=GDzbN1%HBtWe|*rQ2eN(k-L4%DV$o|XIVgDb-H!){{f>>9 ztzn%Q9E@yOplSKr!*Cc0e(pkf^yC;Luys2U*s2!G3~Zf11M&`xcs!3d7?JAeoH7bf z$mgbr$sC_P?AqO*%?vA}+9tl3r<^6KBN2xL<=0H0OL@F=T$+TjR?Jq2&lOidFf|TY zDpit&GBs6hvB<7Qs4qcUd)XG1i4%V!z)hhiXYJ!|RZ0l-yu9Jyzf+JJ3 z{w6utkA*)S%~S%Qi;tufo?Qb3E2LBTLK`B=yCXQGh=XLx5`0SJ3|kMsvu}pif=@D43RcwdyAaLXI>kXnlM7rvqysjLK^a1aR9jJEHa*I^RmXEO z>nmWu@owFg#tM3^mCGa2r!0inJ;xFd4&+joER-*A4W2T0q&t$jpI+RsYjKAzEIjhzdoOb8IM%BF zA1J<`tEl{gCgphyhy8TWw({>s5R*;%+oh8WVlGnk91MlWZdRzTeLWAHg*a7INBlyO zTO+2c6O9ExqA>n@H~7EgZZOGt&Y>l>7He>$HM(zp?JV`f9aY_V{ErWJpxo&l$&ukf z?pNiA!IGyFzVH9IO5N{*?Yj$ z3RjfTtq#TdeuJ%k2a(WYl_{^h8xUaaNor5d_{HkzalW|M8=nvVbq|WdEx{iibl_O9 zf*}kateh+*E)<{q6bbua&dId!ccK|TdEkoJNA(ddV?mnX!s?fTAs}oe+l+T!Xqa%P7ssL7KLoWaeOy;iqxg_F04tssugDn6=EXlimYqr-c%vi~i zkbosB&2(Idsn*kYDqP895*|O7wP~%Uu#Q_^wMNEI*rGjhz%=oTFB~v%byp6wOu6ze z=+DG>exC-nl&Y|^OS4qHk%Q&wLlc_waesGH$FzOcSY;bZSHY>`}u00TGxfmEsG z3&A|w-@^0LruhB;;vKIrDep%g9vY5`BpeMR6!ES(qo~c3FBnrL*RO?5_eb3(KV3VtgO zw3hXbFYQTO-ieEUS}y+;f#59fQ_-*hxQxw$v~&~AbWwN1CxjC(n@nu}sj!G>X}JTf ziOZ|>G$M)K3M2k)r>T~yM12p+D0@%()*qzLk-34)@BS$&_*jsXt$Zuf1kC9+EqUVuj`G6hiD?VWAyE-&)?E#HAa=MS-*w zyr2wb{qA;IU?Jl}$6rwmP*)T)2YYC}KS%qrJR~P(s6}_LiVQAg?PULkD9x75?Ta+_ z{@ zZ4=TJSK`{>WuB7hsZpkaeZ}06c$Kl3c(wj_W%du+4rL&7i?eeg!ymxsZHyM4K4D|v zB7SJ5=^7ST)V`_;ce0s`k_q`Zny^D%l~T~)0nAHS6X_NqFLVo|T7x0H?zdI~i;$e% zppXNSd2^5se$!8R>SpO=D6l08(f|!jQC|lLwI!nN6*5<#?<)k~u7tho8B|OEMt8%e zV;cmBDw28Hy<~|Dk)1Kseq{5xoXo>kSGReqL}cphSHvr`P)2*QQS=&c1ZBVx#;e8R z7l2%didU9Pd;=Bu&<*$VhwTsOIwz(|@54A?t!MLhn(UItojN^7?!^v527nZHrAG|W z5R%5j9RwFO^DG#5-7$K)sQE{)Ah)OS(!-y}UYw5BUiZ`QM~`%eb9-@x%c+^|68j`) zi;J^=ex2tmY=gK8o}b*G_6QPEvq~J5%3mt${>p!Zpbj;W%ht20Vv&jiY%?oWJNHrU ztm4+atfA8G{oXt&dnzkqDC@JzVK~=BEb4p5{b#i0PZz-jsbSUopTZlx4?uBq7FHQchNCf_fkyu{32x7hF1fZ$u ze}~hpQ!`jmt(V8OJB~J-mdCY6Q7X^%J^SszTBycDx^kIrM$ppa|0BgWui{K*5j;|x zZ!>%Fki?2y2bSJjUzd2goi#qfvg(#+?v`VmY=vvTuov8ozQ?&~)s~$*qyLV(x9{L{ zhvk1x?3}k1n>W&tEeSaOMtY{=z_9xzv!Q0{OiQp4HX;-4s=bF3TBk;Y?bv4e^deW$ z_HEmWoJwXbWI2NTe0_CqlwKLGyzi%O$w~EwbgDrBa9(h){&+NVXpuO^s8V@_D^5B1S3!qQo75~zqyG+ zbB63U39DEB#>It4kus#+v{{F>wA5d?^rb8m@30kb8J)xF-N`U@(898k&!NJT{s{le z&;_i~{A4M!H95kbUnkD?;VhlZbu!NmG05x*#!yj`V1!F~KxUa-_d-WMw|Ioiten+r z|6wfV&?0JdGK==&@AOc~!clWe|6s#AWeY;rRHKz^7Zw`YQCJ}DC@in`@Sq|Etyn=E z68Lk`{;$D2kvFy+KF_m3tNGb_z4^| zJt;?mdH+P;^!u`5f{;eKm1RGI*XK4I(syjF!6Fo!b*X(n_cJ>n!G%(0)3poz8zv3R zgGkF0`1u&}*?j_Pd0u$F0Ua2G?E@Td__O}ED;D-*1$1*Bp5>Eb{pnl3 zie&eSUD&i^qc%2K&yU+4Il^hvy2PT#=3Mh;TAD@jmS@plGmfv~H~15STeZb!VOZ#7 z0x((6#{`iZ3xZ&s1$#6`@4=ktznaTNoGiNg4F5 z_Rv4rvld;T?>F4fFP(_d2>(*@Sj=u}K)i@` zWWL=&x7hG)j@rz4w?nJQp`Ovv_TtnlKPq)sO-fiLoXfo4Fos)`fDt})YX7E1k$2w< zno{`Xou`H<7u^{bb-j1MY2u3K=Fla%p>yj9MZFPD+9%0hzck;3US>DiocD|e zIF{zD!MYDv<4rvz$9NCcEiIoVpPg-MWW3#m|hUZS2bpSb~4R{$uY?BKP4e z2qq7=N791+*4VUs8%9`I7?-FxY%Z?-{NA37@2q0l&wHfXpEPT0Uh9o82wCM1{1eM7 z*^3KKFKZg%{Is^-%$`Rya>K?X$#fS&OvG1d7n01EAzrfK&|!_R>@q`gnc=nGW&oj&V(wjL7`6CJ@zmo7cuvSmxyod+jyyfGaC46dF_`uf;OepLM~ z@bq%)T_VOlLpkLm&y4p2YoE-{aRrcxAJn9W_Ww_NZypY1`^FDTizw71c@Qc^B}uZc zC2g`~Eo%}&4B3Y3K-c_kG^uJ&xb^ zuix?Que)*2eP453=XGA^^*KN1=fac>+!wk()MtY$wZrLY+I7tn+2QuRolPfnlV6;& zZ@c#K{^$b)0UH%!p%FyIi0 zuk>24Z?u+|(c0mmA%??3Y<(UQqSyRdK2dVX9r{h~D5@jUq%=KdEI&1=qx~u5Orz)Sx0W75d%-n+iqxr0iZM~BjH#5c%u)}& zZJ6VI#k_C!4^f=n$_zxE(tB^jxx3h(2&gQvk^{J9%I9~k|3LC4X zoch66O!TK*qV6k8bNjY7CMu%XxKE5c52UqfZWydtDd<|N>n_eRb1-Lm-zP-*yFc+E zB>8BH_v>q_?7Fhzl`bSZd%EutZ4V{o^y`>O&!=D%w7gyhb#pY{9LIF)i|ROZ=WCpS z$M1n9mXgg;f1q#r_&4EIk+XhDi$)6eARfJ5^8XBs?1&t7b&k{Vh6rv4ATQcxW(fS{XA|8^X5JOCQIP>Dm6x}Xz=Llscj@3K5s~_fZ<2ogsgZ-+$YFgJ z>q(HqB+-n4qwKJBG&+82CRzFe5F5$6YM6B#zPhriwQ#z(LFxMk8_0t7FuTl~gv z{dLFtOI6z@lzUbAW$)G%H{lR(L-y%^xLw0-`0mO5@#eQmAKk?4E(9ry7H3Ag*nr2=?w?A^WY0Iv%EG! z;S<}!O-mN&h6gFRIM|W`PLl@5x#{ce*c1!=zTOY21%BBAZKre9g*yTd&{LpjMfOa0 zPHl)XuDNO$CDDC?3gQHl>(tE==NNEJ7(cgWa75W(9aLfhf|u-uhQVJJ`j4vvhhHeE z5hoWvx{bS^UJT$6R7xJM3!8jiR8PASHxS$&1HzZl=0xL<_s5M^nYSAh-gO1O+-!_e zA;Y}z!$6t3=@+PZGh8E)+M>AL^;NEnS#5kq;rV)t{<#_to)7F`UO06iOZo`&f-~~u z@UK0NT9rSDKNl0)4zFAB3nU0p?FjQy-s>;;=hz0G2L2ABb7*n8KjB@<%hapg?Wd8O z->DV@A}-7ssP;ySO7uY=DH7^4{?RkP{`(qmS8++uy~(J$2%}#g@7XB|+V{hN{Z}89 zsDWqg3i-Nc(_O!;j&XH>5tk(W@yGw3#h4NQ>#zUW#Q#lm=mijw>gz60aJf}EG|VwM z5mBw+GhfmSBzj5Xm|*NHw~?rO3wA7oED4q3vaN3k}tD}i0k#Ss+IRg>yL72bEY@tJb{lj@+Qjpo6XI+hJ-37 zKm0&Lo9F0$2+6XzQB4D-FD_Yvu+2NHzYoTo*fsTGXNnZQG=$t6BVl=k`-G7mShNnv4}mq!o>q+P`Cy%mT|58iuwj9bxuY}`JoM!8 z#7z%9s0ljPj#W`~BSPvE0}dNqWOHY||NmnpYu@`LR5?RB+aG1*Dq$vfRobCb?~Xx~ z+m06(b%uwB3a#i00xAdWkV%&57Q&!|&Z^fILx{K|(=jIvPpCJmZyxP~FMPm?DTRrb z+K42&0p$!y0gz$6U1KwTD2-Q64 z?4n~;q;)R9S_e+KEN2hwZEcgm`&%hh{8z_!f{~7FJGj2if|0&?3Sc7*nrbW?2I2)4 zPA_9+UiM9Q{njS|q-`uoj;R22+}S%CgzDbyrCF!5BjD}<&}c{TMkSPpb0zidudCo&;N&1X2R$s9HBH~%>< zdLmZB{GRe@-Ap}0^BZr|kVQw;AY!7S8(j%-2?q33{bsj2^ZJx=0U0YOrQ>D9hE=b( zV|}3IGzTYIg-!A&jI-NA6AE>~*cR;Aa|gm0TfgqZgb7=5lgwM6&Yvrenm-LX@_O5z zOt=Q_X~&-P;lQ-B z1r&?3?tNs-GAwi6DSlKriV5u$bv9_dr2tU+4j#^#px3g2`&0>lU)+EF(#61Ecl~}f zcrviS4&(4?#YX|YT5fW|GHYc!o17!lp~c0e57s983e@E9kF=yZ^@xG3?U(&r9HId2 z5&jMBF=F&;)&ch&8@&pEO6Yjki@pEet2(zZGv+1*(A1FqVkwV9`4k~cG7H`{#0HPk zo|X6xr{p%9`kt1= zncJD0*lUQsC~lZz(EU_>=SNI$v2*JHe`n_39r|@TZpV#4-2RKdp*=Yaw8v0>V)y!J z4f+9J)SP!n`1inWV|altN~+2~`pkXF^qj|J=MYV4QuHBJBzp%q{B zK1BnV%E7js>yHI<&I%+%R2tdNdu==`9R$=~(Cw3|o9_DERJgY9Z(O@>*Zf+K(9m<- zYo?D^CFs?wN1gH)!}+F_W5>N(Tg8zB8ka7a!WV^3AFQTXYr9Yv?CJ9)&iZJv@Ka9h zoiT3t%>Nx+QfRyh3lJ)7G2O5}Lio~S_FD@X*F&B@jSxLaBtE^Cf!l_cI&%$D3pvoZ z6#7trACQap*>C+dR)Mo1Hy~s|PL^H&kpaVuv8@Q&{kzB8)b&+?%&LBCp;&^av036F zwCtzl*0-0s+dS7+x1Kq9nE}j0O|NSIpM!-U%Q3U}v{;kZOviC`wKIo*KAO7wxN_*G z1j>t4FcAu(5Rn@o=M1n`VnzI!!<#(b#_%#e3!LR)Bv3BC#$}PfSYhF6aEY+x?t3Ec z5cAX`uncmx^PoVnYJiyKhGXUGl8kRzz#kHanl@)t(8JlRim@)-Weil5IiD!R87c$ zS8h{?8Tg*@>WcG^Lbj+w1}ek{t>q4`Y}Zaw%7lzJ7b)~)c{qaD=H!Z^7AK7DUQglm z>{T83*8M0Y(`UE|7}KX+2+R< z#CJePFo5&y7r}c0xZSnJ-m2dXC)7PIhiL{R?b@6xqp;o~fYV*Z#23Cj5AQd|-cxx4 zj;Ei@H)@UfTu4zhh!oMv|MI0@gZ zFd3$l8Jq>u?4zDJ1b>@u_T~<&_>#x z%&D7i?Auj$Djs8AurmHbVbd!9`@DU@zU@3~Snu9joFJU3=W+-b&>zx0;HSj}h+l(4 zR!@FSq;Bo1=q=PjqZrFV?+9n@!qWQ#DzJ`p2;!cxw=Cx?CNvByplBPVLY+{@W*h53 z(DYmwJFWYVEl|~zl|FHB@Bje+I#S!oAmMVc-vgtJR~>X!HN7J%%XWwCLA==BQG3TI zTsFH&!nCx5XyyB0A*lrxnWn|sLPOSt{c6WQuS!f0Mh$w8YbWmGDrng?g29tS+=cqj zm@w9+VLfney{Gm5+Kjb;E%y7XrtGLcy04W1|HSRmdfs)`;|%k{me_!;o1DW=-@to3 z$+FzMvYxMN!}xhzTpL5k;wEQBU4-77jR}HqQiaHq1UG6=*V3my3X!gTGP(+oTYJVKfght!F zl}`UT3*js=FyB^sW6{cUa5jn=i*+M1flaPCU`@#e((NKwQIgN$TN zK3Ss+7cOMcwQ<4lHM%hnmu?%mDEHYzg1D(kPDAk1M0Zj0NKcle(`B#EP4y z(P@+{AJScr`8!qPbDQ%(3wWc)!uP${3=4fS|5;rCtG9Xd6?E`)SZZab18s3y!FT3$ea!Ln{o*D!U?jU2;y%?><6>ud569d`*>-sz6k_;HKi$`cw(ZpI!iA##Gx}M z^>)oOni6q-RMUgnaa?(=&#kB2KC%d7s-F&}IN{#dWUWqe9O>pr*Fh8;>EVifeX!weA)AHv>yRxC7d|4LgwEJU?lLeY%oc@((7-J zEmok*kS4FOs(79h>x))Vfii!t^d|i$9lPPi==Wy!J?}j2R=+6u@bPpkDKD9PY&uqnW zXqS(5p#{1SAM4^MN^W^)DBdKC+cISXLJu{Zk8R+lJs9M!W=9|0#&k#p5!G^dAS_!r`zFcoO@9gS4ROI%a8FrAmd3pp$ZOiuv zP-*$&C`TV-DQl^2?^~m0CylOOF^AfzAGcy`Qo?o243{COgp76uY;1gHFqP4Q4#qWuY@oL8Px-%~CLLe=;? z^K|ZC<$7^owJqa_RsR>oS9?fbVl6u#AmG}PX4b9nDm6N>C+xUILk1HahxR2?9rGsI zd8OQ`3voxr#dQx>I#2s2P#5`MIDGF33ZI8O%h>R8+mUP66PE|rnKzPY?cTTnw+ZMh{Y*fcQ+GNqB%kAF} zyy&^X#(nBt9d*ID%M?OhZuz9Ij%azVqtY5}Y?&%)ZP{713M13K21Gt4&5vW!tuRV? zVO^Gp6X`w&hbOIc(n>6@B?haIqe^i-%1un@aVoJpJ63SAyTLbUq>I)IL1!9^R=#sg>YVj#yi0`D0Ij4_kA?b?E+h-z9|NXs_f7`zsDG5`F+?zx z68f=9_oSx29iozBEnk-Nyb>@ec1}%Cf{JLoz#VX##tMMUC2PNXgE(~YBahx|Do8M2 zLl1M*WL6s*<40NZZqT;5_4j(v>BWAT7t5C(fMCjoJc^?cjVo8TA}f2ztAAc&KQr{| zi`X}oKawz~#99(qjyI}buyu~})l5?I$zgfjS1hSbF^FR1Y;BJ)vdq^5dTzPHHM&3uz(oRK&%zu_58lP@ntX);^!(RiXs#6M~NH zg(zG2itdLT*W$oq;^hirP4PCS=Z!ZNM#`O{?}XFV%Pgj9`)ozO-&MNLirpP zUb&WL*G>0SLJ$sJnU34PG&u|>9W1#iK#Pn+r0(3Gw=&G>QAyrU^cP9S75calR^T}| zQq);EZ+&VI&ss&98R$}9f-L3yTB|KSc!t@~4qoHfQ= zxp0Uwz=gNW$%sci>3qsnXY;&qD+hpmJ7O)HuE*uu=)dmDxiNqyh=b&93EAZ1{HQ^Z zgEaEST$wDBdFee~*j&v$?y%hCC|zb-8@^<)OXGKb92S#)h%Y)o$6bN z<@9383*fXf{Fd3MPRnN+zBR5jCzjn^`ToIv$Ap$;<VSP(Hbs&F9EQHbfONC_EsCwOjFI}MD&PEk_tF^BcTBvYVOJN6E*#+{7jMQYa`@#j*^7&+) zvp@>PQHU#-fF)UphdjOhP46g~Bq4dFHexTM*u>{DsbUS6|L0<(ROuMJ%g5MBHt?49 z+880vl)kh`6Vl6JgEbin6#BG_n7fWfLsyr1B7?AS>f9yePQqTXyUI5Bjw9Kr34^f8 zI>7?w1}+hFRt(a_Cox<=F|7_tDLg+Dnjq6!){fKB%LXy(56z?^Jj27T*C5NjnpAE=9I&IE#@>L$o zj26%&hIZTV%`V+C_qO`%C+p_X++{hLnW7#orsJHb;CcqDRup%Mt2W&D!Ub*gX}@<0 z?h}LNys9!XM(R6Ma}nbh6!RzoE^QhuW=!@xIj1Dad3OYmrU-{3Y?~5H-I_Ycm}oI+ z_la9{66GSuKwYx*SC*}j6*+mc{00TrUtKCgte{x*!!WQ;S&3KEBTu>F&y2F8&$zxJ zlV&4?J*wFgBL}^id&W#80~aKREw%r6xkktTHW1R7&m|&Z)d|KsFWW6jirWx%ze#Ju(?zAT>m~G^riYb*ys9yl~`&X zhq&$}1&C_A(x@}}=9^}BluMUe13X<<2Ok$f9OmtoA&T_{pSkaEW6;}eg#+Zf(mAx$ zcP7}R-qA?2Q@)EcB-;F(=mjFg8|T)hOSSHqmGfp^Nbw&NRwGi}Z*6A1Fv!~~0eUtU z1901J*V%b7)lNayCHDga+5AWqNimD(6vUH#Z0`>_@r4%MW@Hrli zohIkZxRfxFqTybuI+t6wJ+gz7kN`3!>eqo;ErIi)^h5nxxQ;IpTrYWvo0or~qFEs# zmWR5xDfOZptsgxQ@>{YJdqL>F7O>hNVLO>1@74jY8ZIrds@^x9u;*@_W8Mej3>*4R?7m7SkZ253Cji2h=edfuhfa~?(w$HRt4{aMsMpv+t*gT61@-r1;WGE z)&ix4K%P&eICW+|m>R`@n_26uGD>5DzOI7J?xL-0u1&0th+zTPazgU{ME|F=N zZB+WE+VINxLazaAHJfx-`NU!L+K;*uy2)xG!ebjb9!nVV>(`8ZFn$1_P*S#82I2?&P1AbIJPOW_S3HJ2FZ)M4~=wnWF zQI_vGKcr)qMA&85^5q&gj0%!7O(*uIhTrM~%km$dqJc{LJKWh zS+iX+G<*M-jwBj!EXx=Z#J4CU*J4tmylk8Ml#o|;(KUD|CziqY9Cn`QyIFnd`L)*7 zKspVZLLW6+qSx#d?#LvCURnIO7%zr|q@AYZVwz9_`%b-n6Z({^)^Yw_5ai=~F*QgX zOtOQz^m@hVPdbo=AoZ16=WW7?s>k=TwS%XGogn zGIuWd#8w}EK-r|rqx=)J-wootR{)6`h#M;qT{WY4TEGFFw~a-pNf%e{16!;k;r30@ ztWQgS&9MF>_y?V}MZ&!FMx@BK%LR%6kkQ9n^0hOFedtU9>?y)`!0cJ>f9#{9>GbUR zb6^GIUJtEGg@18Fd$)s-*NR#kVT)pLX~&$F*zN=qq!gb=V0N;0y)sp))YrlDRuK8@ zDbH!*ltZG;>vPgpya2?5Gluq$CbqI;5*9&pC0DDF1^8QLbl$CGiDd>OWrM1 zu%{=0fOB~Yn-36*I;Z}euG=vu3yf;!?x_dwZcCT4HTgvAb{k9MJ0m5=V@avZMplbx zC*>?oD|+|N2JMSv361m;aSd3l4!2v{UE=$B6=YG`=eglfV=bM&&WD_R=?FOWN|%{0 zvi7-vklN8djC3KosABT?NEfbU)TtL5j#5-3Lh77}I70v&bR~NAr`EA>&IuMfV6Ot3 zdq#E}I+(3Gt>QAEY%)v27@3bZ(wm7`uh4S|o#IHB<-eg$J!SK<28jZre8JhJ;ZFSI zQ(5?f!qw-(b1EWBz8=9rKC(ZejbG*zCwZ&t_kulO7TR}_y4!%$*C9h?c|us)3D;-> zQ!Sb%-9$Ld4u^{^n_W}VF){BkfoR9YJMbsDczrs*ox8w04v+(ErfK9Z%S<>zr4zrP z8IyN;o=JVE_IaMGOz(4yihh4787<+<++(XjNv%ZAE_<}r4h=zzklZ7YaO;OR>9FRh z?#_pQ6jUtLj~Cf$FV0;OnwU&a%5Ubs7qCh`uJ1mX>g1=;@9HZnZklhX5HeOyhbf-DbNBpL>E#%?paefdFP)vG z4O+7JWgfr02h;X>h7DsO{cyE@=Q1rF6>=fvY|@J%^>C#(7HHbL6OnRil@8OED%I5C z+OSUkJaih^w@j4zs4m~jwnX6KN#w55a&uEHe=>XJ39APlip3Ccf}bVScoeqyh{wHM z3*{Y-nPQ@^(rGz^D}!}3)9fhA-VbjthOR=2mVlaCmQz9}fVEzPL?6rbok8oEm-dag zanFqGgjAB#E}|D+k_n3f3KO|yL?iH}_97|(C#3J*f{)g7r`S~HU7Oe1`i*mO+k04( z05JVYPmyj8DZ9p&NcA12m2_#_43iwm)bjlum7sC?zHZK?GkdcaLZ3qjJfVLprh1po%Izl*CS_j*BE5g*(kVVn=11F=*FQZcyx9uy}4in&%on#I8LsY|wn0w5ar@|5e$z*K}^!(WWTC^3Nt2RU9;ff`EfQp$+A5TN^>efn;4!;ea)Go*Ad0a^J2v;wYB19^q=h)$L7L(xsT}{toZ;bU(Kow<+Y~crVyR` zg@p0|(uE(o$WH!oN$TYkUgw2Ofacd8GWJ{KS*sQAzeD&oCBKD?*oDpn2ypGP8K#;5 zz`o-3d>qqYVl{9NqLr965ySdpu~KPe68#hwgP~nmtrk@PNb~mRp!dXisroW zCn|e6{E(t!jZUWL(eR&i^C5Ap4&N{U%hfG1RJ(yQbRk~_%1&=^AMf8QkQw#^Y(gB|U15Y*;9NL+bK=;$v6P)8mmpTZgh7;@uT9%Ff>rFB znrHS_jW5pPhuM6cqri?>fqmq{KphdQx3{HfifTRJ4Tt$$2t(sPky2Bf#Qi6AN~sF?jKaZfFF@! zUQ^y*;S~q!Cdl9HwlRW^pEo2inIWt<| zCmxrfcmFqL(a!*6KFAS-_f34#8Fry}KEJ!d2yyMh@J~O5J9efcvaS@Q#V_Jma4)5wMgtm+&L-lHfE4oAKh`dG}YE`C4&2M^K&LNtDOq)gMe*MbV*vF1|Q^@wyWvTh2^^?H}f9?CvRnPSVXdnL8 zj>1bjOf+=60SoLY)0bX(#G|(@iwm=?)~pHwUw42-bsoA>k$H>C;?TBG$)fiXW$&Py zx2R#XgBi6x9U?`v+sDYPcPzXY6IItPDZ8)Hw+K1pn!B-! z4m3WzY;4imNiE({piEw9FD>eY2SP1HuM$g9bgJ)~y0m@U5v(MQgd6$7L>CTM$w~@e z+l z6&eJMxM`zA1rHNOAlhUODKhHK1Itl;j5vYL*>{m0kN@&Cl2zn&G{zgZ|Ui*6A9L_5|+6_<1nH*U8U4e6HQ($!}W9JsjZRwKKeJ!KY7xZb>qYB zm$3F-ZB(pFej?6qSUOZd*~Eu5)(U~=Ty;A-1wgs48* z_vXTJVwa>SM~f(Yr?8JT@WmZ#xmUeUk2aywvJ449hEvJ_WDnj_tMh8mSO}CG<@|B< zDwM)5ojmP2(VmBxfe)q_&|E<7SVLBG=W)9+wgvrxq&E@57mpo2obGS=(FBxeT`da# zJL>NNG=DY@1?h;g3=FrPw^xp43~Tn>xIw?*zHAEnwk#nOSvZQ!@H1gH*I- z_OV+4j?2Gt^)dh1+anWaFJW`8SPO)O>xY-kbR>ULN7Ei_2KfKIOk~|kc|g6tD0>2& zK^-#piu+TO`c}Y5Yno-EKiEzpSD`5;uvaTR{H5hiricoLtBpBn@!B~+W%*O($s5Tk zXkYyI55uMJT&=a%28AnaM*&7{^B~vF7E`>4P;iQmmklpoUB$*C9B-|yb`HQ_SE||6 z$B?s|MiiI1-mw?{kWg7Od$>bnVY-(H*9F-}J*T0t-ZJ~{m#HdSu>!ewv!_i_nfkG( zpsq`rfu6$O*cQC{eA0KQ&J7g3DE|QXD_CnZ)TDCt_ToKkhHC_wA{xBAvyoTIAlFa6 zi6%LTr>}Y*>j+rnp6R+6HfC^sqf)ejWY*3{KygDI~P4j-FLUub0!%Fz9B-lWt| z<0Y)o`)brNE2KZdPb#knom14fb^F8M-K_%3Gx?rRm0i?gi=6H0{*6m>rDO4q`U3bL z)xm}(E>?&<-v@;r1Z?|rovmxCB7V{i)V?$i%|unW$`wQ;kzYtCebK3RVj|)#FdvLE ztx)Bain(F7A$CfBfVv>*l%xXdQ7{GxIK2{!&J#ZZA3k$1e;0>+be6mG30*KKnCS)xQWwBzDood>R{^7oU)@ZSXtTt;kOXcxN^43e|2RU=4ti0 zVR!*A#yTJu0RqF}y6|ipl1L}&=sR*i`106OXH;Jfpy_t+Peu*@bSWR}jtq>KI)YP_ z+2Mr0_5&*@sNX!j5q0ww)Y9eCb`>2`%R`F&YPf2qd-s`mQj-@W2vw%eR(DoF(4;W1 zA>0n@@AaXldLb6a(Z}Z=vJhdj=i&!O_FmJPQmIvHye;S;X?pMw^$SR2+$+gdflvXK zICE3ocM}fs54y=ai9J_clzhDVUyJ)xDZkyi@U53P01u#Y{`q3HD)J`-CUP$*s@?XV zMYYKnejG@)Eb~M<&$NAGyJk6%8F{h%?1`GMVE}k%;8}`ZQwGayqtcWzp_khpGa{Oc z^qUdTzVEHj;i9e_lHXdjAEKU4UFdwBSJ^o_-QzHy4e_ST1%2x+g5+I^wA6IX;T^U{ z3t`KN6$KUz6HIhjx3LVOHQGS#%++TxC&|1qsHZE%9zyyC(~l;ZlwemsDMr_b~;tG2&bSa z*utAqkX}wz_ey{2k4xO*7T1LQm!4vxag?dhMFdvCX9UnWr~d`B21(Dvqm*zkLy&F$CHPw(h7#)!f zB$z@fy5mV+IFDFCA*C;NvX3j5$^j;$u7Pq$W#tm}9rqhz5pN7s_9eFoEBmJ29Zna5 zcT{uY5G_PO<%KUVMA5X@z@n#nEYcpb+{RA4)LiK7@QRj&;PyGKoQ5Pp$$mEgPXs$5 zIsCHYFmQ%#M>RVD;Mpm!IN<%(!rS#i2r18kp6-lZ77=^xT-*p4O(X`<0iqLkJCT-| zj_5@8J8QPpegA&LW3uDAFJ(%r2p8BaSx_lJsnI2HU8_{4M81}HbLb0VEgcFG(?4rH zMsJe!0JLpIEWNlFOKQbkS&XAK$>Q4;fBfKZ6OyLr7xPkt}wX`FasVPONoS|8D6)2lqI*D505rFYcJ2v(C~qxw6#f0lSN0oV2;D#w9F3 zZ0kbYr%D)2##Com$6FcA?P=HiDg`Vu#ZD9GLp4ZOCy*Q&t_v;rOtJ-gR~z>L zl~bp*#S>()VB6`qJ&K4Xsz3&P*H&Ti^21zOf2!i}dEKHu0XQka_AUkpeMxX~lwB8s zTM56`f(`0u@=p3w!2mVjn(pP?-!f-{Tq8&Ti3uHR%cNvmtprnPW!IsgYfPlkx#2?R zCKC*HPutb z0z*cA=yJ(TgA`2XDn)rVpwx3Z?e*ThfMH~CnFODXmo8ndZ8lm$n~-;jM`}uxHB!fd z$b_zMjiZwQW;kEN)>Gl+n zyk)!%)BR=mr8zE#?$T&M=+a(n9{d9Xu}q|ZOosi^vuXf#H?wtDg5<%KFqCj|;*!-v z-$57u>3%D^T)*DAtqayGs~_B_INl#)|V z=ojaJBMov=pYb&KJ*zhCTG6p=jJN4!TNpD-!SPII=p^$zo8yy9M%coaQyCfjLpmy{ zow3E&L!jVThU_#`RsfaIpHcvad#Stm1{{!@G)M3S9Oxl zMRSOj_raQi{`>lN^pEp8jHIDz<62Ge9@xBExjcfkCZUO>7W5I5;M5kxisL3I(~iRL z9&Yi8X{CX{@~&0SzlR|QQ`Is6+4|5T#v19@mYMf$;s)1Rg)bq^Nuv$fP$|QgIFsVx*mX4St%#i8SmRBN z3nLwlkn&VJgg^{lmhYV>okps9E}!Y+T^EGa>c_>&C_jp1{h9zmsxFTqfyS+eg)fD- z=b9@r2x7f_fGWT`T5HEM#Ur`V34^Y3RZ+z}X{A>i{~Uha+VM-wO$#F8CWT zcJ090J!Jwryap1pC~4?~cc(U`#v*`?XyGr_?gL@LrTmN4cSkmPXoj z4D&1=zR26cr6i>4{M;zdWYno)fdKFF=y;!wzMP{v>C;o3h7B% z^;glOLf*5795w8@WVFkVaVvjnYd)vk?w$g_%lSC7C{` znJdQK%@a&$yFmc17FL|QT^A;B!l2|mz5|)<>C|i5^~tv#_w5A}THfnDMWYPQ)mU@e z!GXCG;jf?%$^(28PE$ewvUL}bnUE~||9}9^3;pok(whT!B?sr~JSfVPRJo$K0a-IR zQpS17l&3Qz_a*{vqr4}|d)G)QYMAQS_U=m7h1GJ|RPw4fzTe0I zaTBQi70NK|f}}I$&|Aze8Tp&$#NKF8hy~yPUpcB7vpY|l+RD$RG_ZhC?J4!h23+Ph zfL72D)cLU68Q*sNimoDB=Gv^!?)NY5q<2Y8IMtR(j7z6LeE)`?jpxn z&*{EQAdN(LV^Z`oM-I_B7tASC?BHO;FS~!VK3a1xn+{>=?WJ&s-eRXS*)L=BbhWv~ zcG%ieE1Y`wk76u!Q?Ew1{fz`pvde!X6jZF}@t7MZ)=1V`_bwt&m>tstg}m4Hj*-WY zh@8H6zB^5YXQVNO59^raa_3G~un1?1FLSx5&g+&Q562uP0Q(pko)^Hb(Ri{gb*q9(N21PaIlv&76`Rq`0qk*Zmo;@|__4am)t)<{LW)L5ug0`I zu;jU^6$d?gPgf$@c4NzO%@)T4UtJbe_O`5_g_>;TRl4(JW+Gz#9A5!`WpTQ<$d2L+ zN*NgM(g&&Zt!G$0L0%ZE zdIGkc+_ZciB+X@Zwzx@e-WRGtK{-}C*N{io&hOmGUhJ2jZ2cgmCUo2Qfr3&I6HGVM zRkxIR>=QFd?V*p$k)kFNavzz$qpMCzrNFa5N)6#G>eTnCnF$@*|K|9%Gg_b^I_vB)a`)q^EG*HR3&HdWNX;7i{_Ktorjqc?}9%(t`fTilO z3rXrRV80LTS%zXBy-0Hp>H=@SPnS8=Z+4HPP(l7Ol}up9-%C0F$S~JzIUwwV?>G={ zvZpN~w@9028Spwj@{!rr$B?mMH@vg^U%OkIi=h~4($ZhDjq9C`6L$GluO?E3p=RjS z6Rif{w76Wh1U*E(I0Z_6Pwiuf?`gn1=8rI^W$G z2I{%?l2mCbIi0n^x48A~MbTyXWwi&a!|9K#7y^Or>4J)ztYdR=zaqe^ds8Oh{EYN) zuRgA$yI@>TIRg;s`PSx-L?1WpTzlLNv>-q0fnRtd=8^WM+V0==2};a>&Ry}q&-U3vgn49uWa(h%#32#Jv@p(Ye~N! zbpd)^Kv=FlcZ|{Y&)~Fs*q_`Fb2oM5@2|PTANbFD+|b@_lh(IygDR}k$!-@ep$yz# z$|ZL&F>#1oxp2hS7BC|4(5-F!!|c4$}X1ALa67aTuTVo-$-ZD9Wv}5o2 z@Ngl;G!4mJQNouv%zM(%Kq=t--RVko*8hr98x=iIurnIKfa;4Mn&i*Pa2B&&~ ziH4c~qhxd$jS>5rT)G*=v~`OFLvf1Zb4trS(4C){we$4=An#MPJ;s zfPU2j6Ypj{=G(?+4BWxXQj2%*|A&id2I6)M)g7uww#Ufja_Inuxt_1N`qv~mG6~c) z0CtrPjMjl%she^cn`3Caoo&Cth2~vn?p*sXH3Cqp3!MIzLluQhW2$xoj65e{zD0h$*MZ0o zkOwf9f^G<@ZH&<0u6Iy{Az>$_P{2q3ONWRf&ZU$~mTO+ZJgvMwI`1x00pSf;%U?p; z?AD(O@>Kai`WDt%;%2n|8BluxVtJ+}J0AaTS-;GA@L=G`7N|}aCQ&(iE6bvo%FK%} zLHQiOT_ylxiWN_mtkv@WyX=xtAQSqbi0CFkM74v@7$S47TjZ42hk8&3Sc7x+vEfab z^$9O|1N0cDTV~+%kyV;um7NfW(-Xc&7%`6o9_B; zy#hZp=mc2h49G990pqmfttLx!Jg`@RX$+a@6!$6ly$ZP`v0IVZP%6v{yjwLq1lS2m6|`;RBE zEzE3unJG{Se7pRb*1G2WF>c*tiBXjNhARC(E>2%p3-mvnmip_dXZPxY!zqKT*Twer zw(XnHe|dJ}V z7{h#NLVZCk44XS|cGR>WR|$MPb)tbA$duFy&x5;BX-b4Cf8WlMtte zNju^SW@vY7n#JY~k}V^mq}LBO*|xKT?O00}wRipK&^@BFR|8jl4wKM#g9_3ifN@ZoTpZieE*^56&J=es+lj)`gxB-|WGn2#FYuPRi6 znnC*Z8duq=AeO1PW;o&=7Hyb3Fy!R2>7>Cm9^GWGUndQwv6bs3SDKx&TqCUv_$Ok< zKlDzgC4F#^bPJ1k$#}n?3g^giOcv#{BCQlPGp$HT$2<1-zV;PsczqJKp9{0h=d{{Al)rF^Z)~ligf1yLr8bd zPy-Cl=05j%-u=$GkFWo4@S`7R@6C5#v97h&H6LDRD3ac!xru{=L#nJKuZ@F4Jcxrs z;CX`p_?Oe$kW=6fTsLjSr#R*Pbn7@cOgPH&PjtM^wq^+3@9EOE?x!FwhWL<^I zE+^uKQsP5(U&zyM3*i%5MR@NX3A?|rd#9x3sKoo^BYO71=!$XLf%^~*Uo zsn{7wubp#i&uZn2GmR>|Q&-tLGf{o!LPKMleOh!yLj zWBIc@!NK({|AW-LdhB492Xa9&%4ycO?*7>ofb>TES#tir^!|V8{lCTgReDSDyr^61=cK;m(d|Akc=g*Fc7eY$%i`yWlwH{;;%EBbuKIVW zq8~BtU45;cI2&Vor;7hiuK6CwtqfIW6+D_0#KwVYhNcf>!`?Ky2%@V zlyemnKyNv`&4EkW@0T;|pXd4bAx#0h5~HCohWQWrMNrvQ{UC&1UwACGD<=L9?VQdd z@!%g{NDl9|Qq03pZ=d|HUpZc?+`QmbxT%~W)xe^f(0dXc2Gj-?>tJ@xj0o)U$-adT zdM!rkCQ;0nFSmV8_biJFW$wGBMJouj$6}h|QO7Y#Q z+W8vXMcVlS?@&U2m17%R(qUQeJvoMHI4>E4)cYH_(Kl~r9Y3;Jpea@N1KT0XK~2(+ zNQVgj3LMa9sGH@R|Dk*b>BSu%X>b;eT3VE)&{e%XZ`SDWCSU1ZgXQmf8jW}QG6jO@Nqvq z(XEzeUt~5uIdc3Jx?9&(63QPrAy3(p_L@8n-mf)$`R6U1NruR_%?HHD2)sHU{}f8M zCjjcX+w^_0BYIimZG~1$MwpxqfJzh#uzk;u@!oJq;A0dRUXXD&NL_YTz3p>&lDuH(F?eF@=&AyU8Lo=?u zs#w6{d5qsNm!j|X)S@k;>+c}+MT@lp8I=a?RP?M%lYq0Sb;LB;^!nt7wrlz#R4EZVt0)n4mO z7Hlvm83>1tY$Ak}7!Pk>Yz6HL*o@a3Nb-2T|K~SW)@L%sxW}@or#!-y@sK?gh2>>d4(+^%VeG_JjZ!Mb zJKmXzOBHoy8*g2#mwF@@G#ipc zNXq`^a4j=X!Yc3B3)>X<;*&2c1{ce1Ik3|gGs{c+zhKZiMDle9O?s(tztzvv4Q`@2 z+02JcP%k4O>(q5IN<~A|Kr)oz#IKQWCLd0_+{rplvwBBU^miR8?Gw-0K!JCs?0WNE zldC-4hWDDn)^bN~3@Iu`JN@$<{2m_!$%J_O`N5*x*u@N-=Wcnzz&a8b3?>2^t*Iko zMGG7+lNJ&1go@5{jp^S$e+b!61B&;6AzM<+8%Ze$-)sCeVX?Vynq-o&AM_GEK^msL z9p&a=E=0M4v^-iFj++ReTX!cbT`^%JN{7+s0w`L_{467fL zZA@=!(*6j^%=Gs0QSf?Hr+8H}YQC4HiZ1WW_z_|5C;9gRQ&coH!drUd>U{+Sio3BT z3faZNra^H~zfN;WZi=2+=&cM7h zooDOU^~^QBR1#&<^OWP-M_L6KfNcAcb^TqrS^SqTibVGPY9cH6HwHVJT$-kyndoHd z<8t!$sx)qbG4JwFiPd{L`(QtD$lGt}BGAStCKRHE3FV9LB4|bG9e(3tgQT63I zzJO&f6Hw(o?se`?H!wp%GoLHW)janOQ4}E1ZqhWq1#^%b-rc?Z{TLxTZPo6t9IS>_ zmX!i1pjj#@DMd`fyv*N)gjiT*)=B#8{SG#PdrFTV_uN{90;TaJAW%vMR{u=~LKwqu z;o9}(K@DEB)7a@_EA9uUd&Pr_qH>VMm!Y$ZiR9P*@euhX?|M`72=GG9-*?b4{zQ!{ z-_42Mb05MDmgoKQP~j$UmuBEqx2;!R#X-3_{6REBo3p|RP{+E`CA}8W6O%sYcbViE z;r)uA`q|2h2R5JNE5}WLdkS)zEYXSBGa&R@6zaiQ&O+Z zN&UPS#{D1g(A#^1_I=6KmI+H;&)i;=f8CjZv5SJHKj{ap4;RJ>*$w|H5$C7Aemg=s z7$~Q0KHjswH8eSJU*Shbx6I_Pil08jgrY1=^O+nF5A4~SOu+MUR$2rF_M(dfTvuw| z9!k?^A=3M9-RA}1n(9-V^k@G(Yj8Dhoct?q>z~>2p^XNGHMjZ~Ei8M#^3Ptkl@5GM z5`lyF`sT^WNknEQsOfH_$7bDiLc)%Xw4LjOJ=@4i*nokwoF^p*FcHTrsMMz~R$-cS zxh>k4VjqN99p+mEwYaWV*B4$M`fxg!=wu@{Q2IRP%EP*A zs$aR*54y270~zw}Z7{D#Ks$3&*=v3tZOn3oqEU;T$%5!3g-z@fM;q~1&yVhAQjdvvIipF5527KmKt80V?6^8z(2;k}TOizu}kN>m!9r`->eJ6AJI>nH4{$X;7*|Gd+g zA#MCV68z@OR;V{{e<6Qjw(R=cc=xf_n*jL!>u^x5Gj|!X#!Wo_M+kwEHt`U93eH6g zxy?fBp;dqOE3DMMz09UaT7*98L$76x>%|Q0;qi{oO32HCJ%>l63rl zbS;hWa`QaneccLkpVPMYd{!$--|=0SIvIGQfBy`PtdxNOfSst3=HBL66qzImubixx zMo@36q^OLV;P?uFm%_d+z6@&lyCIU$+x?`gdtQ4^bvmZpRoZt+sdqO?`o9Un@N9t$A+=iU9fZ)eAF%+=|3hnSiHtgwHGiX1hWSvSdkqu>KN34e~OYWm{s#wdrl z>q!LwU-Vs9x>@s86NTTMr5Gk19I_bJybYFYWowkVzD7R75p_%`A2f@-XA1XJXOQx~ zzdMjx$ZkuUh4_e?dj6Tl;v77XF(Y6($fUU|U>2{aC5uY*rp%sy4=(AvrYA3S_T$x0RqQ<-KFA<%>-kW z8N%Po*Q=X&c3z@KssdXxSMELcKZxl~I&w16_z>{l=2p486o2?Au`j zDZ;5FH0Pgf2hETbm!$EsmnSnXUn=n}0-%zw@bQq^SDfaHq9(UhjdzcqQcn_B%6G-) zgg&N6Qsq8Z?uyNi3U@$M-K#Zewoc!_tiVIRj-r?pZ|lHbU$a;%{Z)u_(MYN=k4Ev+0UvR z*R>nKm%+vtJ#G<(0|euJWdba0a_bhSTJPZ&WlF zh0jjIYMQ6V*^$$!>vZA=44q|X(i04N-Gbo^sF7A9bE$7aq{olu?`OgNPpeFbtNc4P z*Wp1&jV(Y;Hc#|ZfC_*a9G<7H{@jQq^B zbR}~p4z2#$Uvx#U-2u+2+!&=1#};+(v5A4}YCm^i;QG?W80X!3Kd6?8V5we?ys~mS zTWy+@i#CsWi>1yeuSs6SSMIYW6L_F$pi%AcazWciX-=O^-|kLpdU1b)TZ}R+$GdZ0 zlL*})vuf1=sh4sYoMt9GW=%|IM>`aK9)q{{-pi?7Uwa8qMdWN638O_LN||OeIquEv zz$erpW(_HiBH>1y-Qiv{69fxR*N*3%1ic4w5_PwwT>{$2?q6S9RFO$IGKM@+T;G(P zE^D4?W_H~y2naG0?M)I-_V#HImVdXQlWT;%EC+sM`i(Mr6Oz!`Hp=>+4DQhNrc3I7W|YjZRfv5P6!?psvFnrFT*~Sw zQnKtGuc1nJ0yq1%gu4LcOT$#X9MH^y`Vf++9r{=5-@!xgZ~F6H&6NN5EEqgNt`x^s zo2Qy7(@QVoi|)MfrV;5_(ye$e1NzyHHV0vUxNQXW^zxcN8VSWMzgeYVwFhAXbF->^ zjfIOkf6d+{`PhBi6WKKMt+EC*H^i|7$v!BW(}R-yVxGZKKoJMGx5(KVt8?g8sEXBK6DE0(joGnFaS+H8Pe`>#fcD$R&Fe8DC(r;9w zUPlg?XZRFk%`Ezwo*(o`Pz&1UX~oagi4&~Vx`GqSrGE_!3~U`Ob&*e^kLLu=o4pjO z8HBFqFyAJ}#n1?wCwgv6b{DAAS`Gi&*Ge%nAy@;=);~DjX)s&5n;-IlkXGSd7@`~H zw%Y4L{ocz4`5+|ca+ZNCsh0zoY&C6|308*xMTa5EVi=`vH!v*GWevn~1B}Yf_-&%% z5u8iPDx2e#>%yxQY{rdhc17m!meZQRwG>RMr!0kPk)2w&rkU{s{mx)w$qwA8@v4D+A5iog!R$ZWK;{(c_vmhk%t;Z%vn>E zJtyUZN^xs)-_N;DILEK&u@dkYQfcrTy*}cK8MW(5XNQ_DU+n$--dJ{+VAKdQQ3)CJMR#^)d zuQ{Atwnx#npSHMMgc&0{JPH?BKdRod_kQ;rN z16g)Jly@nJf(rBCI-Lp9usgrUjUUW3`Mmt;2X;x*w47BG63UQPytNo^U@Bzk)Dg{? z7)2{eW4_^l81qrHU7fDPYqkiQ9j~;ld{gym|1Ru!`?6WmfU=LQOz-zv`Kl?X>nkrf z5CDU@l{uGt+0W+n%6Uy{A}sq-`tEQ1xHz*u3_(*5tA4nfznuJTpmSL@(X!+7wkyY{ zP4+5W3`w$p%KESsi+;IOd*DF2By%wA+Op9X6mRiFHfim@y#NB%-CP4Q$E7~s050H* zV4()gpUAQ&L;WTN&l?%9tu?L1i9_jp#|En2j^k}4C*ty~B~^6prFN0FmAEhZ+Z9}6 z2%peTc`=eRAj4_ZEyYX4n~^9T3|l_P@43@_J!*AKVIbFR4#QBmx^VJ=T;r_bRsy%8 zCx_0r3h|?YDB8Z=W}l31P72PJNBZR^vD?VH?rr1#F=BUc18OP8y3bf;#y2~YzWp;b zYcR~qXjaj4*R`R{m{6YmKb8~R>kAD*5J-D!fFQRYf4fQ-fx$atGRgxk-RA|*}$zNI&s%uC&<<60VAy5`9OxWKo;1%@bUgQ zPQdq4YHg@<0JGp-Iy$aGjdVWj^E2$;ajy&Uy+@DC)id7cq$3J^zJ(ByDV3WvM$pq1N!7RQcn$q5oFw*6X0(v&J;GaCf-5iU-)ae-KsrumE3(@G6(Z;H z?{%E!mCx;;Qf)1Ru%hl2wmT0WbMst7d%X+kYqHc?VVJ9PA^DCd*L{ZOcS<>PA?Z(i zWH{{4Tf-13BjP%Gzmjwc%y0h{fHG5Su@l9E>GVntZN&^#gB_@QZa{3bkz7gb3-{Ch9e}&`^cfjPGHdLFJnjVGGy={1VFvKR<}P zZc6IvQ}!#7(749&LFi4xz(*qve!Xi$!=*lM951qh_LH!`g*|)k6A@LKSG<-42lVOq z6n)ew(rz;edUFQg-qmGFsYJezf#9?Tze09x6PT@qnu z^3{ntrRTOV3Nw?z?sSq)JB;efpD4CpZ z#QWRJMCajVMPArrB8-7EPqdr9Yv5S7$tNEt~Kf*Ir+En7xGsZ7kE@xo}NC;d) z8quW7dFuX43AKN>?WRh@0magyrmSc&n?Wki<1^Zl2J>oqTuYTxFRSik}{6GrAFTLs15K0<>^V~Bnt3D%Hx&Q_EH zk)NTIg{ayhUxxHbp+x?G6-Q>Za@_@+zBC@$a)TY60O_27uN=mD&!bO=Pml-i@KKgS z?@ph)$U4y+fhIS&?dG!M_75X8WfO#DA$7D>>#2Qt1IUX{9SOX~536kmJL7w@@%tX@ z2WrV&Y!NT+w&k{*sOgoT4>|ie1eslWmQ2Qy%`&n9DAwcg-QqUJLVwA=M?7A8b6~3_ zD&|jIv9MDMkehLbGnRo*jGFIg_>_6ZaKQ0Y*?h56I>R9WD+b9c=EMJYM&kn)uGC700$ ztX70x2&MA+@;uRnXd1z42uPOyXn8`Yf|2$K=GWbgTZn8KSPNH4v|H7GQ`Rj zW+0i%tvPjWtNmSQyOAic!{IXSHNuo9@?~flhZU|uenEg&55S=L84-_RoM02Xz_Jn@ zNzW}?{q>2lUv*AMHdzREa)+aX@32_6WGO$Y&urfl^h`@pV8;Lsx zraz8PB%zU-aGsXMo;(Y@-1dT*qPNCINOrJ3AYs|HEmfkY^!D+^yiHurDcMqrtN&Dv>!;?-gQuaWkCb;)y<0lmTS2 z+3&b}m{qlHpXc6&`>RXbM14g7_I&!@T%y2NM{9fHIh3QILHwqs8#=rH6wE{dNf&c@ zO4*yadv7Uv<4no&8k*=dO;H4>gs)f9d=7?Hb^Bgw*QzKC!Ne!(63u-S3N%syp|5}h zHM&lA5JGqq$C$_q-CVq%g+Sg46j(s+FX#x2HGKkMUEy`B2E3=ih_UVEC- zT|<*@A`%)2L2W8Ld)LzpA`<4`?!6Awn>tuo)=TKe8lYy zQ|&ZabB!L=HD?fs3d#FMTuw(OJu_|66j=y(!$x|i^q7!NN$A))mU!8LzkeGErFnhn zgshXeFr9LN8|k26j0x1p%bE>EpkWP|Xq18AxWo0zp=Zz7*&PbcPM z^r;PV+Phte--?<3k9AA*1mA61JddtIh6~F<=8ZgB-p)7mh=00pZIZe?Cw^WJcUZmk z{LrUkJlleuc?)78Y&R+=WRue;A_w=}9ynO-g-RppQ;`L}uYR_-{=Cnoj7jLu47H%Z zRsItFoxf^H=(u}lyLEzf&*q3I8v;0Me%gKfN@J#O9kwPdMGBh-JO|vJ9v-?59iNh8 zKj;@HrG<9>tx6}&D06+nss-CDl|g3(j_=*S|A;dI^44J>gn&!{)SCBW_fF<#KB&WB zMex_hy4@#6`Z}L!CH3Bg>`p`(XTQNOGOR8*O>!DT*Vv6Hg@2~+`wk7`B4d={qKQ?{ zlxe=lp)K#uT2R}UDQo_jLEJuY`+mVV*=XZOoz)zQw!@I+N{fyqi0q|6-8#yXQEio~ps16SqqYp@$+n*lEWpaA)*dgo6sPV?hYJ8;{_n2(@)3j@B zwf0YUJxwJLK(!PB?JwN_GFM|4S85KOP0J)e8Q0odiJF&LGxBkM_g4cE7Cf3p&AZci0!0EwqAI=uwfw0=oY2)XjtDUDWIpj5KGPs5Hdx1VI z^1`bi*sM*#dPTzB8scK}Ra9ETvd6j(r*x`0^~>+SHZz@g)0~b3L^#Xo2^UXU|CX6! zr$u2!a5#Y=@`94&$BU+{S%A@~Z*z5O#W5VBT7pVRe5M072Fd zX?tK#0ET8`iDPl@s&uf*$y%;m=BYv%3OtU>)2%M121C{IRT2}#QB|G5sglqV^ z?>>yagq4{t7W7#ztyQj|OeLX-=7{hX(M@uNNQ$`l5=3%9J6K$gEzoYV-6asy^92z9 zRDbW7=FEi`Hyh2AKWveKKtjKK2|wuAh0{_zHY>+`oZ+1+$H!L=J+SH%-v}JG>1CR` zsF)^M+1ym)BzBxdL+@-s%yPr-aoINXysx{pJsT?vCNm9M2CAgb+Ohcs?$Pfb;C#6s zPqiD}sbOyWKlWY=c-2|WPpj&d(7@3?)jp~)oJ)WO=Lv>+QU_4-B-u>c1zlOA{%>Rw zB=_dtAgmx<-*JB^OR+>0xDjT?V;V#Vq<#?0g98T~D#^r4>W)em77@GWuI&V}8C`($3*!$aS2 z7?Z~+?&?&HRCQrom<)*Yx_4qIz{3|Ys{@QLGP@e|2iAcVVJ3X!Tue+dcJ(|}o zie?XIT(@KSOKv;TeaXx{KTmpd1*-EnYLikg2-}^{esEHGvE_de`vI!iJ8(Mrn_O+AK+0k|!tE_>{H;3w488Eg< zEmfk=zZ~N*mhZaU?OBH*^&w^ zBbCNlI=<)5#QH&6xCggn*iZQB3T=b?P}N7on8%_)pR8BLbbI=pQgz78zWHZS_&B38 z0SVo!_W`x*+qIOv=C3+zO{H>vydqs4NaJsn(yyG~-jSKA1+i;NTM^yfH1QaUI20bQ z(pBX!sL0w+!!pSN(@)|UG*uX6&E-8r35?irEtu{d;Us38TkFI5k^Ess8|n1@8R8D; zfedDt`}WjW+72)L{2>@rbD2FL(GvD9nnC(;PXdqNnc{JPJfUmT%l5ws@vR%=81ZP8 zpq3xBpw0dM(o zP#!NejOx(C)|+4s*R5ngv{_Iy3s9~9%Q_ILDU()%3I$P8^>>p`Il5Cl@E1nTCNSqu zc}uOX*&H0p(b`c=Y;Y-_G=B`pbAB+mo=n<(rlk=<=?jo@PXPw22++!z)J47g|3npO36NDlV zughp4o$HsB&X%2DwZ_XXO#vFn^2p~fo@#mYL);2tPA!#V%)I2ae_8wiveu465(*N{ zM1-=9Tw9@zyjznl?q+1K3$bs}pudK03{s;ao(S859j>v4gpmk$jGF6_qQS`i9J^}D zgVWBT6^kg^af*v4AOC9^q8gM;*_?}SwNKem(lGBtj5h%iqr zDMnv&mAwqKW;(x80RjHM8UpCax$kNZ3Eu;pgK?nYW#oQSbTm|pdj?}-@Whrh9dP3z z>D>M%Ru&yPnA~5y_~I{p>W~Q49mY0JK^3D1?YlR}%i{V{pTeI|aTv*`2s>EsIQ)1A z$ohMCP@8*rXrN)ni_ZCvY|-j@H>*twXhY1Tb2AC<$zE=`xutgV>LqN9mC%}Riq2^< zE2hl1Yotpm17z5z)l!O)!#RLaiUTMN>S#ofv0?@o>$5mV^M*l^t;XI}epOr~0}2RW zSS+T;)dt7OC|%09&M}r*@fOU!SJZj>3#dSipZ-k_Ui0D`{LM58&y7b7ZmS}z-{xI5 z=_%1u419b-e4E7*uyQVh0(Cj1+pjRFpPyeT94Ky zTTReeh(eyPE+*ocZ0h!j^Ji}cDB}v$Q&UDfO8Fp~-*Q{nN`C}8*l0FIuNmwBg8RQp z$lIZZ2O|Rs$Bs_s>L=PQbPhy{2>%O@WGPm3A`a5tbhGhst(l*0 zIeuu^`JszxeYN!kD3&#HnM#ITGwsU;dUMp2XU-w(%-P@Lf~! zGWC*GQ9E0oXfI*evd-CT^)>p}HJYhM+shY#TnjaX9@(r9)mC!{lU(ju0B+_G06^oP zswE5d6qHFSl*ri9?sq8)SWH1Zk(*Sf8U4I)#~T52wWLr|AoI5 zvLD1uz%^b|5M+oBpHqpRjcN5xt~d+RnCF=^s%sp5|uR)N?R zEYOS(G88mr2;3BL%2P!!+@Tea?x$|EX-)fp&@B8e!#b%b5mN z`uD8U2bz|l7b*!g*RR#8r-^BmU+SOg!^^d*!*oh*4fkp8-hF74jh%134P^XYnG?m{ z>PK02K)n#@NSF4jGik9by1g^ui{7%+^E7YqZ`dNLDb;Z$c?p_pG1a@Qk(lNzQ$_GR zS29KdVhhVFy`V|>lU4%lVjYe}2U1^PBJ1b*#Ap`O;@sS*UFT@%##OekU2KQ{8ri)9 zG}FXXQ7%sT!|*8QnWvo0y#LZ9xoD)uyH6S?evg+KSJ{oQT}^V8QQJA!k+)G9(tehw z0;-{lC&R!5f2U71q_|P;H=yxYBY?>WG}EX%e!kVzU9Y{YaxmopNj|%EfNMp+&} zp7Th7Z96>6Y;s?ZUE9>+M9l$4mT3zQsX~lQ&J!%4M~QnKmAA(p{N0jtrgUKXJc?aSjJ?H_G1c1!Nz!@ZdUJ*mQO*jE{bd$@w0AsqZ$7RlTaTdnY zSv506mD`~bSCCEo&CI7LHcdQWYVE+N4`I~}&5p!RY0}cf^D^G4Td0pFlfBR8X5A+c z?wK|Rh~}2(Vm)NE<9am){Dut@?)bZd4?>n;3P)Zw-}IUr)zTR-7sdL~JXO}UZFy!P zqDwtQ*@$GXG(vh{@ zyIrJ(@?ic3Le>nQWrq+hzwW$Nq_f`kmUA$iDrYU#u0hz(5Z+c{g*#IRTM-h|OJG&KuVkS&SZ!aw-`&F6jewlw>z@ z=3EAG7q;i^Q5#wNta9dmi~8$)4ALrWP-m$GLNa-@+dHVHz|c)J|K!7B8ThLo9aXMv zBo;SOlyqLySzdI^76q0I@sI7RHMnqnJ8lku&-cU6vB+KLZ1?SP-71>_=c5b9UpHWK z4xiJ+^HdWD$%-PWcoRY{@tLl$ap~Y_!Rs{+b4~sZ7ZP@uF!i0udY9aDOb_I_msjg( zorQj>j+Xss#k84xE7(Z)ygwcGTrA-Gcib!I)nWrcNp%gBfa_BAym2v<+{Q}TPeZ4r zNvh`niD7vyCU*s4G&DCb;41l;v>Rd$HM>d0!PS>4D$0Ot_JiI@*`2H|+&={Rt|g{s z{kZ~|RKge;%c^P#@SQOknrhCs0hNiM7Jj)SS@>~zCj+(GBh3rQ!2Dz#p8H`i3#qf| z>ft=)wHa~un2s)T_C9`ks)&gxV7OA($dKY7yhX>ib9?Wk(yc;_t!#!KW_;}Yd>QNn zI5ZOZb34RzKSP`A4uw(6iROT`cmyTVdTDwdBVeUcXu3IbTO(73n~;n_DDXQV`n|SF zh?w9ptE88k5&8t)n&598_B|$YVEk7zCs-ptxS9uMD+L;zeou)gZSNcwTsU$T0z>KH zXMEO=6kmZWNstzfU0yq;7uYqZ3Y?)?pUsc^D@D7s^DVm$bXUABIzGjAk$1q-oD+AX ztvV%lQwD^=)iGNWb$usByFnE`ep!HdarVUbAn${K=O|S>+_+`zxeOL9xHuR6^l6@U z=ql@)y=~}O#N(w+PfTRGu538=`^4_{ED$mRFrOBk+Tvc4BpVrP+N|B_vhXHBG&n|_ z^!Xg9C4(!s<^Jn)B)=TvI6b}$!+t}JBLyaszGI;3`WkZp&BXuh&$zPnFvZ-k>`NFs zPc0~U*66?PnpeVy)XPTjx87`XaH*LE!JYK1j@`Y@1ciP5D`#dM$4z{X*BzGbZ;Y37 zc~|1)YwTIn^oFE3-)g>Yz5dKW2xp?C=bR+LNft#{C(sJhBfgX$4>~&?oLxH@gSpkqrxoX_)ofx z)FN<~AU;ZrbXzPe8Z=rm$ew@`Fk5(_0u0mYoTODeW;J3zM{+p1=BP2B4FilokwW%M zj|#Vak2W}x1#Ltapn5Wz$pWET!lwF;_GbV@w^}r(?Q=3O!xlH4yqSPf-E?p@O7>lk z&2JysA;@;>I5xT>Zuay_T$AIzc&x+6a5f*# zXnEOkF$^^~S5D+f-)|ke(m|8)@biFmrss^vP~1D)vLcyYB!inzaU4oa8meY=0#BEz zPW>#_P~+?~(yNtsvJm2Somk-ap<>Le(5EtO^43b=F{Rm+QCla20_zh{H-tk+q$%y=C#4Gn*gGqIqOC!G5Nz$a*Rd^WJ zerA?m9`3PeJ=w^L1|2PP>hOi2#xLxG%+BO>DraY2e)B3DNNbBF{;1KD#1VHT#NS^4 zoaiaRCl&3ZZ!C?R%#5RVOs|;w2VGB|&Bpj1(%f%Iq3nA~%-kp`3Ic%)YkkXHS^#S& zZybB*>^{=-6JS+R>d-LFq053`jYz4n*zoo*+fy~MfW@_5QX!WS7o+4dGgj}3DPQB_bZx}JSdZo}5sUTZcr}6Nmx8fU{!Fgi zkG7bw<4&rU=M#fBk>qR_ENvM9;~$PEVgwHsqXm~j4}|ky1>d}@hwn=|wJGCqCny>c zAU7(p_P2QO{_V}y^xUw?S_vOO+KLMFEy*TYWS6h661FK%8*Xs3adRYv1YXV@Yn=H4 zL9i6B&xQNcbh}8ml$e6YQcoSx0eEbb2wAyVv&2ub_NiUKIs|NSEIZ%b z2`i#!pKerl@WHGXMunF}64{TIKA_;#6Ji+Im^W8<$G)WlV6RW)igx|m*KYboxU=b= zM_Yysf!mFfV@J-o<8S?8N83>^ssg30(YAnfF=MpEToYK>v2cXi4n6b%SpB6?Dl@*8 zaef;$=sC`v%oL{}fbjlnn0NCvdfhmH1gRG5Bp?-Rh0zc;4WXS|uaI;Dr@+>SC90rL zBiyj%sZWt^Ny4eOnobtc|vWw6uYRZ%Q@vePS5ODIe zd+d6b2qBLjTxBZDL-H&HmT3Pkfw1G7?hx^%7gfyf-$?EGQ|*<7P*CAOEZ{@7&UjXq zvZFyvGR;w_nIX_P1@zTT0oKT_vebn5^Ez&eg5D}8o2=78%@VzCWHVQ!>`&rvvc^sI zr3Tc#>@mvZ9X<8@{gL0@(`ExM@-x9}HlGTjHhOkUvm;Z{i5dU__NsqAKJjtaSXN1u z1;PwVH|V zyJ0SeA}QaQa-LC%{MgXQ43#YZ`~Xu?R~fchYY!9A)XhP_mu)(ehSH0|kbvE)NdOeg z%9#U(n>L9Fi7wc;0_I|s#PS3!WJ@FbVhIA@s8tl}~e0v>0bn|AjYW<>E zOG}F@VC02`1C~7IMRpAV28YQ7b#brpmhT0%s0kPIZq#zOqPQHyOWeHfO?-^sja~1N z0-z(!*;;OSwnE?$l=h>5HFGMF?wY(dQzNdgtgI}x?CUD2zyqb(of*}gX`;_dBqm?`HNijMEhr=GH0wGQl!dzzTg!tZ4%!abV2N9c<%3Tw)&>Wq;ZyBX|vub>QWC zI@`+qaWYQJOanah%~!^gCh*xNpXyR$7*B89%5uE6ggc>ZyOB1aYOjo}0o4d>@Q2_a~2L5uL` zDn`XJNc*XTkx+l?z@99UonAoa(pT;RS*bB2ad3_rphemaUPKPaCWezed*~rGC9a+_ z`g}RiVQhD~Cy`dSxptCZ(JABzV43*w(1z7U&WVR-yYL=H_~o7TfDSYPK(u?z83i{1 z3dxEPx*jmfJBXIKua79twInd2YXFT`GvWMqlfjQ86FYbk$)IH8I+={ZN6Hv|^8j4| zt8Pn~3&7(9*iZ@Iq)cw_pEb>NG_91;+SSkKx{*|BrU-RylZ6A`IFbGm9=-~|gm4AT zpM~_jR+sSD(9te-w*x?{DDid)v9h~MDEq9Uds8RSCl|z{9fJZ-2pLt8NA$hVOdFR7 zsYhkewZ}t+naU;5srv()+XpICyl=R6KB&2o48kY#vZ>eV=sWnE82H@6{Lkm#=Bj$* zdUX80NXgV!h3uG`XzcfVh|4nz=!#@L2K-IEkVEOyXgE}TF zB64U4pEY@VDI9^TAXIS{h0j$U_)CU%S{EQ<}_Fp z&%MDc@S^BOYRAF{oMvnp%yDuRSUu?h^ikzPAA2qNfIz_8**@1yZ`|U4%Idb#TjhZ0 zzd>BeB$4fld?(l-Q|94^cvNioI+Vm2h ze2+C8kc}QC_jt`r&ESYZQp(Gs6Wah0Uk zPVx*7q7r~L;gN+avHWz{3RdYK5bz5Q2BRul!^wJn;uA+f4n;g0y%p#W4|h(?vI88c zzO5mQeqhfVPzxW9@6ZqM%kA3VQF$u68bdUAyfvbaA-ivx0a1MLU^veuU=p^9Ojf@ zYPXSR(L$yA$n`_MlXS0qhvSU)#Z>IHi|loisrd%BMoJW^46A)>_iM2aoqLYtP<|=fc-N2(YcsYfP)F`W zUh}W0)q$>kz_fA1t(qSS?E66NXMgP+#c0S$U44_re-gzX0*fk=0N9gZwB=B9b z6=nfB(WH1eex}&W1$GijBZUj^%v}4X4-dXt_2U1e_c>F_E@x|^rS$}AO>nT(#|fxY zapg04*?3@Cp$6$O|5M@9Q;R7@s@uJ{l2|Vp#9pBhF2F#>+U$EA517cP zADqoz&MZ$$bOQMob>1I*?acD$32&5B?OK*~dUqm(=~)yPzt_Q3aQj6P{?l1quM=fo3B@|!O{NlWwjuTJ}pyv$~ zCI!8VFfPaUDYy3nK>zMv+r$|~d5nzZX?IywyF0ZkJ{1{U+s7DO5$Bd&!vesD%cAti z6k6QrbF|&1ba2A-dQ*d&)nUru)BbsU)MCgJ%T-{*v+4~|4P4J*Ac-UW=B}68`THvw?x@TDkTSoh-Oz4u zwQ_60`~V@!P4C{nZ|gszvc7DklLmQ_{cit9-&9QwW^1x~8CW!U&EU{L)vdfemGC|L z2&(r2aT{G@D}@S-$MtGKFhXSoL2p?=RV0I z9w|MGz_u!cG3o4R(}7^pWBFT(T_<8n2t5m!b-NOIGxxs)-pAT6$QP;{>Y5e5p<07F z%et+oiMvfz(deaa=iqNz03=B~Kw16j(apSrzjk=v5yE^ir5k#x2db?qvBnO_UUmZ7 z^zu$REN84A(A*sS>!D77sS-JhD|cKSaOP=v5y#qV?*tO^eRk!-38Wv-`N(Ik!+-K? z^Z#S-tK*`~_WuP%MZ_RP8kCThmPQbyK{^MOZs{Dt00T(@0VyS!t|)il`*5ReO7Y0VEd?@=V3 zzj{jp@OB@dKx)Z%WypHa6d#2|3!5T+IC!DNW4fh0CYfbJg{E*CxJQ%9hG_X~djI_H zV$H_QQs3}RkKx1Jg3BYeOZ`y#eynYg*H#4M;&zfG)NrvY*{t&ZjkFZk))QEi4ZNcy zL+a{!wpK>e(#yCv0~Q@r4_t|%6BWf~^6m34<~HcGN~MGygb{w1Y5a_)ZTB-x+NODu zQvCQGVz)a99KcMD)iW(HPZk3b3VQ$xcNs{OxB;$TL;CJ^tXfj7qSP_oVnaDbI)g%q zy4UX2jnwIn?TLK0UAJ5h02FATT+6rGXW`}cTixuGoecr+2IJ+m*#Oxy?e7VQ-a#%_ z-1aolestV)(lt%LbuWJ*9PaNr&hmbL<#a#jEnbbzREaSx3<*98sXqwgEHmiNFZ{rW zxDXwd#7X{r_W;t2DYrPDu5W=QpX<~6sI9oj^}WaKEv6f`^VKRPcz^mc>0hlhU>#z* zg4g{t{h&}9po0evLvtU7@wy)-pYONoi5ffftzlg}08%u3_h&AF4o{58J3)jAQI$31H~D>9FJ8?QQCp&op1OQi`Ddu7zx;5GaBJf*mF+0|5sT;qK?@8jn1 z?q_&SO_g_$7xs}-W=|Id@0J6!-$0#Qt~JP7Q^s89Bb*xIEwvk9nXu2GUJJ-0#+kX? zm%e}hfZoXY zg3QahC#Hq0=|@ncMqZHc=o3&HNM3QZUA_#wWWhIzSBr1^9MQ2Eg?f!3_W5UI*zl5jpF6c$6-_N*gnNYli?LjB3T>b0ASJPxTpwI3ZrQp$DVpjv zsd&N}mf-HoCwqU<|;qp0zqT zdWDb}aMYljZOBUY>EJ#Ko%w{fe^iU;E`Cp%%A;BKT)=Lm%LW2yK`~s3vU*129%sRV zfThXpTjRB6I0o>rmem@3f^%;lC~q|S?AnesIMfc(0J1h)lGudagOQ8-;B-Iw`v#;U346L1}Lm6!A1!ko4#p zy@}v{kYrT4*qsivwYYngkypAi9?gDUbOlhOIFPkbRvHcxT=%(4VD}x;I|yU}<~uBu z0}Tq%YDuDFTs0tRPT!VHjahN$4d3NHKMvof3g0|>gjY1DB2e0jux8KTAte3t$1{R-iQGkjmXruN8gJ)EsiGp=x3$~KiXLY za!>iZMfQEA6{~RK*X@Zb)ac)&kl?pciB^}wFsLM_j2vEj<^VY}z71%gn%OcU-uAUM zTaA||<5O7iQ2_HAF>L2An8T_+0dyQfR}t!TQn3{5+7b8!oQ-7td&t$4bkFdDMci~> zqh<5JA`7Ry2W`=|CdlEqq${D-zO(}u|Aq$pojWyv6qf4kr7AlHByM!Shpm|H+<89T z$&o>MxHs=dM9$7>QtKWq^X%p5>%w^a?lGD}&&IB|ye2)#gj@nEw#QWOohB-tyy4X8 z*t8*@FElANY`|K+T!tT;2zE=?iLFDkXkv7C=_OW)yfX|1SNcmm%fOE1)Gg_@Iw=Rq z+bJ=rSe+azEpeQpiWhVZHepey+{P8rIZ#M?>%(q%qZ7S|+YJGR!h`Rq(q2%2jk7>+ zCh>i0OfezwNkX>u&xNj!JO%B-6%yr0N!%>A1yFtkvli6|PJ@z;h}+nO$?8%%xtEbd;}VCoNTD|D8d#kePK9vbBvDX$4_Tr>Y@(kU`XF)P1aC;OfdRMsya2@j1*UL-&+ zz1T1TDCLV%1~&+Zw4dL+Lq99+=;8HUdfu=v`lhdx*CQ>_veM(gImcaZ+9933qfbZJ z9gjlMMM*(vI4q!QkOtq=DKhA&drCa&>0q}@;`;=T;jHF_`soqtuP?~~f!e?FLXlkO z70r(tjI|A6!|AkVeAwYy17$W^{YF=w!2~pF&D%+;p=%XmTp-25HsN##JZ7QTOGgV; z&JU(N0REz0O@-qlEJvr)hB0Wa7~o%JM-J;)x}1tmp1x2-WF^K;*SB3Z?$a7ZyAcej zu#Z11U81k-(X;n-0RVSbpMvkM1Autz>cgt`?q*KVgwTZRtQSq9u+x=#NP_T1DAq?* zy6~VYl0T%(VPb@BX6+U|@yOcR45oaoSas7SfZ*Jf2;F=cJ%Tmb8=&udd3KVfxARv{ zREr0w)<+6COi+0ioh`Jv=>x!UAQ9)Sbqph4ZKje%2yG4K=zue*Y%O=cH+}Jibn?aB z8du{}>7K%)C^X9lBN>UM7GJIXosvh}QD=Kg%b##Rz6yXICG);8qSQR>c-b05^R%z2 zX1WUH<|uNhJ2G6#t9LW*HUQz~-!BZ>wBff55s95Ha9o}V@@N!z#F|<_dSE|q?Tu`l z7`C#gonONhB{o1Ez8o&|HKLHPF zc7Et1CJkU5^8?RbEo$K=Um&6Z-C(dX?Ewc6GeIZ#!rn85yMMzAB#8)I)Yl9~R%&4r zs~t9(g)VT_F#4EEnbsaEsdO0-mD&iRe9;I%JPX;#DXOiNqLn?)L~Lwu0U*tr&N!ax z+U)Q9`;?v+V4v?Ba%ym>wQ3K(x2|?sxVJ|VIK-=2_hhZP6-{w0J97gORb)I_Y@Y3!mz}5GWQ`4VB`Fz9`({ys& zzRhubn@i*j4udeg->Td6YI+=QvG9^|k>>NcUb~Pnyg7@WJm<@MuWcKzK6(Bj$aSHI z8Fx%sO7`tpX|C0MJzZ`WXWb^oM?Xn>vcgahy8f1zgoNaXofBJRlA%P-cJKXDtyGB{ zHxw$I;M_))_U=(H0Rr^QPg0zo)T!P?F`h2|Z{W+qx)U8S9Mr=#4$V~<)!l&%<@e;+ zaQf0l!8dbiaBEcX>;s2iTU2B2IDxqHGy;7+D)mpEAinhLkRLftffVe9NsUKw?o0z9 z&jFE>=hY|Oq2-v|mWvvO#fFIxC!c)#@9s~Wg#+ILl#C>fwG+dx1B4KjUc|-4iAb4* zrg>`Ja$Al#teCCzDktgR9(O4tUgNa2Si$jq5EPi^{$%rqB?t_|xPyJ~E6Y_fZByLHAa+m7E636_Bn2Sm=+*~F#y8^n66Gwas z9N+0hy);@vkBo(m_Ok}7K?N?(r{E)RFQ9!!SpmH69ylwM`H3yq)pp>zY+#!+H4zw2 zbJg?Cb*FAmM6=aazmxEl2k6x6T3P;cB`@n7tgvz|hyaI*TKU9Z&>po77O7%Lq&C1F z#RCfN%Jhdyz?OeuBIsj6oL@Nv`t{P5z3*1a6aa|YC+RLgfMpm}LZU$RAsUg?baLU~ zE79Bnnw90*U!R-!7oB`ATG{_4(wx zCxV_bUK&1uWxXy><>~T$3;O{4!x&C)Z%nW}_pm>@b^9}=21mc$+3Di3;Dvnh1-(K8 zE4NbO;R#2I5&dFsao%zsd=xJewdqOJ*W~;g&C^ErY1~zwEl&~l{^-%F!FA-kH)gKB zgtdNNHTq!aU(0)4t6wsh``Im8UD@{-JV`+BYO!smh81>B)uO+dt5H2t0g`T)F@rN1 zM|}|yPC@U3=1`P;SBH0eCZl8X{cI7-e+b0q$})?1Dk z%u=hKY%DkcSiijROX5EB&&wRgx@cux?dUNgH6Og30h{7uHW^?t-mha7uG)P}=#~jL z{aEfuN7VR;my5ey@UK)Ka0owPyzUqC?TNzuYj@@ED7c|w1n567-+|{VvyPRB_+M7G zI(hEgxi{}F5TFM9W{fZ4of=SKUxI&{Z0?@V$QL>(>Z&)Uhf|?0Be7<`_IwzegTCOSD2F#ATj`>m=~XK(IPH81zZ1~+ zH+Khmz2t3iQYll>wX`~A5$2=&MQ%+w@YJBrS#R3=NBH}yv*JK&)hk3=2_JQZ5>OPY z{joK7QwOd~7e0GlzMLgo(PL7h5LYmjqy`QF!qb-_qDx0Sp#XA9UzrG1dIBn%kSE9j zzt<{`UXmiiOSIv;d#gx@j~%}QZX*mj&k$jNj}o*UegjfTB|zd^h|xQPlU@yln43Q> zv+sY}u}V1ud3g3LgQRtz{tqm`UslJ=$%1BP7zhE%xPSJ}T{%T_b8UIbl0oO?=%pu* zpYWruQqtg`tZvU;?{}*`BFPmmJX2!`l@w{cK+|{I zV`FrgO%wC4e!-_T0zTzH|8c$2^o6PXZ~3q)Uiyx#6u&EH?~PNJw%$ntepl4t_(FBA7-Aow5M36##suo~ zAV^SXeSeG|$z=4gazGcDK<-B68)rY9_GR^&auG|}YC|t7?Yj_5WIAIp7As@*x^+j< z=X!nLV-9<06V3Yqvm}J{MCE@)-b8j?O1*ggd_=z3o)X|{=P73fNMpQq?!@!iJf5uc zj?Vad$;$Op#@W+!q@>q@AX!(0i#KwCTuQ3(yok_guy7}nwgGNi@WJ{TtfQ!iC^@k# zI+?#&OJ;!VODB(f;B=?ciHVh3F@?gWefofRWQ73e%FjHz|E0&dT(iDZL$dP6)2z}< zH9e+Czcf;U%rYx8**-)}efaf4J29@P%Hw<%WEA zjzWt5BgmQM&8<4K8BKzrUY^@^bvVvyHQK zPsBSCGO{;U;=WOV(cLEC(hDV15kUl?5`F#X=twc_=Jlt`guSfQDl~zCdS}ghlTCoc8c;;? zUlBDW?hOesjiMHIOdq?2LjcTDG!L%(7~gOO%BsCIYx9x!4DmO_JRJqFy~(^y8Ee)> z5F?q}-mPlJggt8_VCD4L{7&dfN(1|ZM{5oXtJ=2vfT>efZoB=v$M?Ho{n-)|A%kZM zUylN^CviOuKD#-_4Q^u@W>ZiN(2ZbXVE8mOH9(Jl*8Cw@f9?~WZDRzT@F*^$3W4}` zAUMV(B%}*487%ki-Lr8~op@z@mMD63w2WvyYyVXB{g*&fI_o(XGUbg%-(-@`xCwDj zZf5CLU4x|KXHP=iJWm?ChYx%`dpMAVlKg)I(fJ)rE@21P~Q3PlFSX_1{~E+PS*>-NY@qrc6BxgXO26Wx z;>4m2RA&H-$c61tQ#bJn(zz4}Bnv;W%-kH{xRatG=cU&&clEjYI*nD#D*qtnrtaSy zOn;Y2w9(+V=ihM>ZRS|%J#foqO@Bs@$ibQQVP=3X!B7+0z zJqJnGNvcqw_dcKz*nzZpp484G7bqFclPVUBPlGYKSKh&1RZ&f=J9TxLI@Sf`k>=@3n+(_sINi z3HiHmOfp>pGvanr)3>>~xlFp1)3{%dCcwpL|Gj%8v`~Uer!<+sLzlCE_l72D{^=qY zmPK1mvUnBwbSb)&o#xeV^X~6HCI6BO0exV_`x+6;y*9r*Z#If55O!djiv9S}x<6&` zl0r6kKxfKxtbrC9-t;I(t1c@Fmi`c^VbpO8(;& z`L{nwc?URLoHlsgC4Qc`AC@!RCi*N%Ot_#?XOGG(%c2@bj z=0d;yrT^&@A;^rDM*ewt{~-*w&VdIGDen5`9vy%9b!p&-g#LLezrDwQ@(HAVz@g-& zg=N(Lbi2R(Ffgz=^Uo*PAJ)1$nZOhEdYt^xe}9ts)4u>5X(ay1J^sm~nIQ)T81)b1 z6@Pr?f4mSrYVvRBVZR0CzkPxLPo!y%*m%W% zd{Fx1V*^wBZ^)_t?Ok2^|5N+_6fXaNYX8Zi^~a$6wE1$U*VvvV2 z0?zvzkSF*M;QwxcRY(pD4-W@^HP;&&#+ZHg);(D7%F8F30-Us%can?1|F9#^JixOH zJX-8FA^`^zUfyE52BxXswqA2)JSi#DZ6uPE9wdH-rQ%=b*S6o*J2cNl6}{%(uTChtSg&m|j^*(DpN$1G#9*lu(<5L?de-2b;F;4l6= z!-hwi+UT)SlP#WdJ*3f^;NI`8OAF2|qa!icbbtD6xH?Iyr%u@1KU(j8S=|`&2}(>F z+_pzzIABaSZ-!)xs>}Yi>@ig18@`_2k;phQHJYxgj-DWH{q6bv+w1+4|I(4Sc`&XY za?#V%v-kSGiSPNbp9JOcq{aY6;X0dS`}Ef*pd+TPmcaSrFCM|~B1_tmwC#$=(PVyL zAWgi(WGKbY`O9$f95{NbJ~VTB8+kdkH<0K{#6Nx#TP{KNvlKj&J4ocN%jwMUpQD+% zfR`^BurcmFNRH>@`F!BaKO82wK$RAQ(U%3~NQZ&gZ|A5j%^(o>if$-!eqWGR z`H6G>>VYB~u8<`=%r5X1l z3{rur;@und6UNn`p{P|V$8{i_8wjx*0|Kp3C z9mS=}9>49Fk8(j$Qt2{4`Ohy=bq{2Y)VwRdNz6+>Jt5)wKNN4xwQ&P?x48QA?jsbz z0mW+Inc3)nzJfo09A~|Pz{q*@%`<<_uJjCW94;GLH2lZcU;uq9d{le?H~pV$V-OS0 z97ypC-v4kBHC$K`CU^b}O#>b9u5?Wd7XPM4``fKaFH(T0+J{9@{tPQhC2)~o!waYX z#+~7}i=4n00wbxW{cUBKWCzJt{yVq7(fs-SB^*I`BjiT8vIoWOrEPFVm{QUgx1em`81gH;rr{;bO$-n({ z7re8{pIA<5swKOfO;s26;r<^A3OG;cr7OQw=Ww?r&XlUNBq$z$TvWYi z!Z_;)ikK?TB_(qD!G)0vi;w?fRR8s<&%6i2rAWJV{>Q?q5EdA`C@5aYh(Xy>CBrV& z2d@m4TuxP2b}0Dq8veo6+b@GCq~QwtN&hjv)Ys>yqM}k0yu{T!bg4ci#WX?pJ*zb0 zv+8HYe{jKHeENb6p~6_`?cZ>Vzg**%xcrvfuKYEkrmxS}y`=915O z{di*xmwzcvInT)l^LHdm)PkC9W_uMVdl2$d{Cokw&5Zx@DToZbF?Bqute;`ta|(nl zGbl0k7Tp598_?$v0!iG7`qmfKzn?b$b>!_NeGdZ-{eCenU+}~5 z6v7yBFSpkS0Kv{lfCFOHRD62kFIB}FLe|X?562_fA8AV=NgGQITt~Qm^&-eS3KJxv zf4-+b-VT}&Cx8Q7G&nC`zDz<(&J21nB-FlBhrUe2AYc@T@UWE^Oicp_FImjX$%7N2!UyK=Eon1Cnvv~yhT}R#upXxn~-e#MLP4_>y{9ne+*Z3)8>yZfk6_y7} zy&XW&nE&|cTd<#)vjqhPo@h5sjUa1FF?CsLk1lZS)WpMt(Ta5f0|yg)=>9?+VSg>t zLj4&ah@%_JWBTC4%PU;hKradf3FYKNo;-7*t->K7eoQ;xP!3Q5W8^Gb1VAI>`dRA} zLM$vS*UvNe^MbOycU=@S(CQDMzWM1qLuxqt^;;=;R~|F|D+nSKPYK-V{Sd?7{*YJY zN4WjrRt9cMH}7Prr^Fts(ZzuhDgR@@<^}rc&v0yBlB;B#2^6XNcSSib#NGL%Oi=bQ z)iS%_&fRvP0>H$%o(CFa{8V6xak*Pe0$2c-`!%3GWt^m`K#Wvl{Ksj{wpjou5;#cqQG|P26hLl8S^ue5X{_hV{<0)gnT>Y_-gB zMs8vFm@^v0Uo26XY1CMjq3G8q>RH`a#7c>l=`Zct5roTkt#h=0|5p~P;ERV1;llbS z2h1q>`WF1$V1LdD5bVCOwpw9~+CQb=bK# z|I{-t=seRS#`VWIjjU_exD`QL5m+tg085RHCm$)FKTQMo&vL1X%>UjV0?v9Fa10d` z9D&vt>><= z<2Vy&m(&A6MYp^NEJq%7f9w6S80nR?deQkt| zE^*_9Vg3Ld4(Eb04-w+|gzpS^vyd~0rzlc{J^0r#CW4XRY`;~T#RH8$U76C019g+7 zfKeG?FQ9vA=L~=RO3FFtMO+0SBk9J{u4KNRh&OSe$? zAi;5|YhSceCk+TG9EUMVBc|0a1B{8!0C?->;z0m;U+tiAj76uMr{0UTA1!J^jpYna z@d3_G})6z(;c=SA7%7x`6QD!y0#l`Pfsa*(R|5C51Mn?<}4;~(#&HE_klc?mg zRZin&w?#eHA71%NiWMu^8_Azw5)jbl4h~U%wzB}ppzJQKw4S%>*#zFw(2_e$=jsD~kouZ8Kn%T|hb~)0PfQ93j@plv42Oz&78oxc0ne2Ztz$)BAM(tM>a5fDy~6@&2FetpnOfIZ@u<`ekiAmO0;mYku~MF@aSzP}TO?B!K2&Mb zsZ(MtrP(w=(7`sC=JI96iAtxq`h%m`#{@(QQ=;w}D)9r{s;uQ}^In%HdQE}X3)77o z8f6dnz~kt?x?7r>;wk8Aj}aLbEGw%;IL6OFsVK>r6NMc-R`lZ-Zk-KMxi%99%rWu~ zH*3B5Jl6GJgoVX|y{sLu!~w1;$~W-AwTOs7e|T17;o#3r((~Lh+#R4Urjkqsex5;o z?rpy;bLJ#Lz%{b1_(KRYlOA_AJoSaLcc}`WU@)lOLReD0v$+ys%3l^bfvqGZo5^^& zpM`BanAA7k>f&MG0mrfsdQ32&u}f{s?|nB52UdvmN|D#()~8E}352aROm#VruTMKY zm*O(M0b)sTDTYT+%}tUD5`ZdhwHPablWH$dR!93Cjb&|5r-*yM3_Z4fOsF{_L_E`g zSv^51u%(mLwCB57tv&*9sv-*rsISF#6!XsBM6;ad7aM9R|97ahiPl5$TUOj_USJdA zriThN84r?tf(hdaHQOKLp%F7Q0_l>d*TOh+idUYYj@Oo_4h+2QMiaNY#3@sOpxiTq zCFsH4SL$fTw_dKJ)I0N}>P)w?vQrLILTN-a-yc;ve|AYdYF=r+D8U3cDzWe7UWrOb z2`)iE@^PA&?i!#+leeZ?1C=UsRLF<x+h%_Uo^h`^SdB-}cMmpzs6JYhn7UMAyKv5vlLyKWF-tHBG;|8@0k^%V z)Kmsaaj&Aus>>X}N<$JP3yqlbmx7)Wpf38*ScTv8n$#Z8hW7YN~K z2VwPO1Bfh?Ctp*Op*wooxfZ=SLR|<=_p~|q%LJ(3JYKwR$7aUj2wXh2@|G(!-gSoi%$cZ zt_6E(OP+uUbA<;IwLC6`MQQP2J8eeH2H2%jhM)!o_6C44Q5x#EWWrzZ6d$Fy`(cU2 zEdVHe!6`*#FsO~V)EM0~Q_Mu6W}ZH8WxlhfX^*{B0K7Ztx`fXj$rRaao{c-k8{QpL zi)fg@>_spZzHt21xA%>xw=;x(+W&o^1AVx}(Nj*7nyxxj|J}KCr<5vHUV&33HNb%t zr7ktq*54j;S?C%hwzj!0 zy}MuLyvY8qUDqr3gwArk`$D|%pVU_Zg^6fH-4xW4Z?SxR6&IzXljFlr;0gfQoJ>W> z2&l`qFZbAV>(bwF==IF;UzbK)D+a7GL*{y&wF;M6Kn7k0QfmH+oAqeA?0fY!_S1E8 z4H(;CK>?FeOI1=eSkjf)%+@m7DfzO`7B5J?S{#^8fHG)borM44-JQLy%Zp32qT*as z#+7A7MK?)PX@LrXO5$FIclF)yO+M5vtgBb>OPnxbgVq*F!FdzUu|74Q? z+%`G6jHa2h)KX$ot{gIbkfd?QPcMi$pcb;fztIc4HS%x4(`{T|ym-Mo+ywDik*hvF zI_R3L!0yIbFKhyBFKn-*!jYKwXU8O%(RG1l&Vi zbvSvZft~7GMcL3hY8aehA5g7SJw2xljFQ)2>V}n$%=h~HfvlSnrR2l(-IWt?KYk-R z@9z&>^G+Y^X5-b|P6rY}i-N?Z$2-c#mrN06ty%1&Qv%$F4*_T0lJrGJja_uh>+bsL z9376aa^JW^)SyhfFv6FR@IvhKk2eNCl%9avBEVLei!N_$OfiUqNcbiQUTAa!4r=>j z+8H=6NXjSvEdk&Clc&yW)vCR49`io}>4XFOoYa8|55FY#{P7`@htF=rUIc|`9PwQ* zbTGOjylFkr01XkaoBn(#p2m7+c@nT4>@d0^0T*e@Cfa5{ySSXikJtS~i-!St2E`BZ zl_{G5zk-~)K*9aA9k?{DpN8aUQ`W+J2OsKEjURb++}VeY@x6Hdyr?$a|KiSWrE3Nd zd$i#tt2CAk2$%8^*z^@u?u-*|oG%~aT#hC3P9?=Q{BCX3ls-OG{c)Q8x6BT4sZV8c z(?m=qa=Jy;P(kBSk*CT#gKL4(u@uk4UPb{!kmr`cc0+j%-O|3c-$0W5X4eg;7K14X zIQHjH2+vZ91v$g$Yn9vpyYKoZ8^-0sk2D}&4UJR0JoSe!{q`5!kmFmG@F+&$=mkO} z-7;TH^|`^5EgU=Xwn3|q$NDWw-W^)xPLVJ?B2Csb+%q(I#BR7@@KjHENu6$QMgG%5 zR%?bv_e2ktu6**MkvxdS=aE=T>Y3q3+wY%AjnzB(-c!D6U}`s=7tg7`^8<)? z-|%a4aA4)xDzN@p10Wr8N@5UrB>{?>jqsK?yf^N%+{^Saa}P`N(TcfZ%EWi)%{-bnraxq%d{{Qo??T5>zFw`Y^vnVPJ}%S_B=of;Vlc$t zgr4ppj%9KV-kJg+&6h!hH~|ASLfLyG~5>)sTVBPCMxhC#GZCign|<{OL}yc)<-kdkCh;PYz4=X z^}d}?3FneLvSyjBjw5a%4(QXZdLfJNvXcE|8ZhjRCm#!`pIYXE+Q~s3dLG+}(kI0U zwFbI|)78%((0?0&f|Cc|!|xM>7?Fm^1Xp_BLqdZZxA+y8-JBR8DsGYm+nCBK^gGy~ zK_;73jcWl-p^rD%3`)v&74bptev{FsHM4a%&tSryy?^m)s%I@By;3t5S#mv2B8 zXvsp0Wn0xsyqqTo2^x-kV#P6S*Am0XCPOU&U0KJE=7@=cv&#K5K-YtiYiTr}{J0}peC&QD*rz}N-@0ZK$b zI-+;w=EdM(rHyfCo$OloHTGk4`-V~i2$889snn-QY-4Yh1b2ngi#=pt7OTMJd-Z6E z`w+*ml8nPTrh)`NO;urPaC*5bA5LeNq?N1d+4)V5C_l@GPa1Jcl6c6j$SAj}aSyZR zvOenU`uy~~;0-4mEvbDeO1^nPWG4_~UK!7LC4OMflNIFLYL|{Ot`FFuxWR;T(xB#7 zJMz5N6!4~es_*8;tKJN=)(ayKfV%JTQjPmS1Gauq6c{GN^gVG_cX>3D?6+zI%6_KZ zMq!|7ISH@8!+epv4vrczR=#N7IqT7)AZv0T&pc;Mbp6H>PjR6jj@K(z#knCfDq<&h z49>Ihuf=_OV>yg;LE8L4LQR+rm=v{$Ut4ZNaB0gc4AKF69TfI8#K%*FPcUQHgO#~bkeja6i_lYm}6Cmv~iyuB7 z%7rVqEWJp{>dZqvoCMboJZF5a@dn_Cz7hA{QS3G>7k?=eI4uVO?g*y-7J<0D&z(uc zWnSTtHVCOpVY;yc0gs5zo_+3_=b)eqM_4(zYM5vuwqa_;H23O3KlBTQvf)=y!-Zw=M-11C!#{EOe0NZ;Nv}6o57lL@xv`y z%~a_iFK7m8vec?qDZZrAbBDF=wSY_HP!^FZISb0tEsX4Xm%Y zv2vR3mosqP`oGp2>QpJ{YWmZsPj4j+fO>Lxtj@f%P~)`_!K{(n0A_D9@CKmlu}4Z<7BNV6|n#B5&h{V|l*nhvWqU>xR zyvkYDt@&v|WNN>(u_jluksd9wXAMPk;?&Qn~ zE3%1r0)GO{EXU8(v?PWHKwqPSA|=2QIstYuN`A`@Bu|}n!JLVvM&f8FoV62pFuwB-Pid6) zM=q7Nrg(MOKizA|q~;bC-+da7S#zpm1fHASeCNT)`qemKS}Q&zT)Xp5cS+-N)mH-v(W}^hfuvBNj`R^)scLj zU&>B%6Hbk>A@oXBqUm7stl6b%lR7>$!(i73k4tkElKKK&Pn~+Mn^su{7Oms0x$uDx zcX^P+PbxB16BKGa95TR8qFG^Vz-E&3CJ$b8d%Q9{_jA$}AVOab#7k?%cea{=S*#YA zr!Tc{m_h`krEG^ncBT*L-|~tO^%-#%Lh9K8`#y@8TDSv%y39e3!$W61WeuhW8ry4b zHc_qxx-U3&YVyicO#LD21K4cWZ0+o*U47te0cy^iv|{c;Fh$$awogG%<_CKvg1iZEN{7mnVnkdEzjQ47}?wBdB3Y$ z$UYn9S`7>pyxD_-pSiz29Ol!ZhOvT8}zU5?=4m{d(AxTwl zJb^82y;tUR+^UgBqKRFA1_WK~x+4LHVDietZ+jc^Ay z>}R^I4>1dxWUf^krhRZiSf$@?4(4;fgqnvhe0xT;v!l$@7Pc|b9sGC?JxDt5)}ADbWol>Yi}l5-2jBR$f*OvIJW61=CFh%-Ij3rrEed<(F?gPQ}cOuv@X5|ilJ`<%Yz>vs^%_`x)2i*I*(U^$P1zK zzk_RI_8c!Jj1V^?i0H~yLTAGoyT^L zzWxuFR;!4}I5~masmJ>AgLcrQbBUYQ#sISphe-;c`hMr6|E6_~_spiQbGRgoA*@Fv z%>$0T*a7&U-4j0(u#`B?s93HfL?GtjxPbRUOw~a#5OD-vzRqYHO)f#ZJm`e~kv&JN z7zDPLeG+r$NfFXY?$~JpOi6Z70=I^YSGbqb+*I&qyy$~C0mbZS zFUk_=6BL`Zns0w5a9M^2bVtjB-IyJ8O@M~>+AGOw*~HU#z~DuBuUqroBKY|NT;Fy4oV#!*qH?kI- z=RBFe$HTh4L35+h*HpHpBBmz#T;JV0ciQF^Nmc0WbPG%T;SW~?fL2F>pwokTd4wc3 z9i%mM$D(AU(U@~6!CW2ZiCbB{ZyB?R2f`__L4Fc;6#~{n8bVH!OgLpG5jYf4!JFxR zH=DSx5j&E&GkCZUh^{wGyD=^Hq3-sej_{^3r2aw9;6+*?6;Y4Si9^LsDtsHtWHogl zkW09hwIBgXJ6K7Auvu?qo&7KJ^Il;KK;?I(GR&Cr&+^HjJO=O!9C_DV0UziK4h4ri z+A{xS67sG6$a7QoZ?AuxWpckwChhj3`x;C+rXwcl^xN_j%Web-RQuKG zcb*%d``~}eFw;+b*ez237F-BeOt#&pJlZqb)}5KlPHD3%tWP#KrQm_QkqtSD0Uo{y z3@;p%f#@-CeqKPW47O;>4vV-hh6-mzBI-EITE6JmpxpwG?puEpG}Z$Z1JX^Gk#W#0 z&*`uOf*p3ZkU)u>MLVm>t~HYpy+h9aq9CZ+Nw`25{ib5*on0*@V{tbr)r~s;jRb8V zI|Kn964LOhd)G^YKGCW^u`y&c6=tzc6k9U#-fBAY%^f=5c2CPdnB-KNC$g)s{C$J&NEYLa1MbLL=%q{4XIBNC z9PMX$=jdHBo^C<>0}Jqa4m*|YO3|;ehvI2e@CZB#K{Q5#qSo`^UQ$~if)jhx+jt*; zJ{a%$11GhpQ(+y* zdLPuyxI$nkuN3m2XQIO5)yiBhWvS+5xIg`1&VmVv|Mug4@SZwzKfxp#wB-v znEH}tV-4dL5WliK`H~~?YJo{Tds@f^C246OkfE!BBGgk9g!1)l&)r>+Uso-~`#L64 z)2`nbVBF}KR40NGWYcSSr;eE_E?bIE~8qmats z@^v!=j2oZEJ9{RMiC~4PwJnW3l&tlh^v>9dl#1bZ^ld3mkKx<51>qn?jK6|uCDd> z!xpw?i9lmnHCw+pb*0r{5bZwM&>$HFH1X>}<*#ggbUOt|a_he&BjknNqWds+aDd~`^*Az) zMZc3qN}ECHlB^X7qY-(SR%?DY(EqmG6s#{qBVejSW8GFPcP|`t4-rn4wwq8)_!mH4 zM?QaUM?yBw)70b)H@zcg5*KhJ!a;EEl=2G_@2;i6F%cg)lLx)NWNo$UnU{?L18992 zI~3LvZU}c_7IzZy8tzya=|IN9z&f=Q{i*m*3or~bfD@bSix1Mp8UPhFFDPO?xhYDf zjxHqUt#pDvu$;=(14bZY4K7&?k`Ci>J3AaFyLiJTFdw^;p|v&H!RO1)p#f^3>ws!< z_=FQU)(+*Ev{Esd79U>BHa+$r&?G_ve?1N;W{1bUhW;3ov*fcR6$8xErp$2=-) z*!FT=nKwA>GzsKO7asHIyVqii-OqDgnR{(tCf|90!GA z=-mEMFX)Yb*a+Kqof0MODSOH)mCR@0vlmLVIVMEx5yPd|WIuAlwBX%45ntUhOrby2 zpqy`0kzC+Zooe6->;`$S5+3H`#~YM<%Q|DFR-RFnC@po*n~gxBBnZ+xJlEoXqY+Dm zUS`uXOcHgsK#gvD%cJCt{_xYr6H~5*b$jC)?TI`oQ*STQOEO)Q+6n}shMKX5 zB||HIoUkLa1>3dY)1BUewj&YorHwHc@7D7WQFO(DU`>>KEbF2a$8C}U^6vyGikk7L znvqZgcpK=pycy2bRs6DWcFS=Ojd`D(v^B`^Y3(UNVS((94mNvO$N^nWDvErMQ$t{ z9c&y=T2}W(F&}`|cT9;VBEUluEv2Ceawkb!fE%p6AF=(MSdCd)@m%7Hd%~}g0%v!ni z3$^m}A(Xt|=)6(=Rj(JrHY(vqk?83IlcW8mJ;V#q3SqcChe^#)IgvB#5}iU1vEQLr z)p^Pi{iE+@jk~3gj~d+Zx3(|0N3$wy)*`fAoj6T=4iJW%*sMi}QOP`|_uOmK`3a}W z_HsLCQ(4!Ab8i};i5(G)TvHX?J)D+Zqt~9C#xn8L4{0X*78Ij4vGsYT0d4k%{kl2Y z{@w>0d*C$d9FUc|QP`-aRO^v)H5z&V@uUiqf{M!6j?i$8K}*^TrV6X;ioZCAR>aIl zlO_WwkjtjuZ?NJ&mM`@H1-Sbx;oU5r2v*>wOnDGEe!fmu{ z9)j+!x?#EF_15r|n}wBjFx~M=Cm!Ne7zME$k8zm-s2N!tZEMS(Eh%x4*Qr zdMranOf@7tYoYx10YI84^d{GbP(>iF(~Xp=OxjEce-aU*C7m-v9U^mpb#lYdz1MPTPwA*t8E;B&P|AD%_Lbm3FBVT)7Z1rBa{LQz;Hc-F_R zcismBNJ%y)1YZGi6IY_1QoP*MGX&DwHx%DHP4Ny_1qI9*E zi?6dn*Cgl#?RXd=#XrX*Nc(R5cadIZX`ZHCUpJS=nP5PC_yfp~`Z|NdHZuHB94U-7 zvaP)T0J8Ww#Qc-Y=}h&1z*$3!?tEST3*YNJIa5lIcU>Jaj=0>V1CpX4+u|b4f(r`X z5A$`03V!gLWz@8~6~DfIHS(4iR0sSkhEqu4j=`q!{D}LE@wpbnclNly6au%JD5Ui( z*QGk%4>oCU%97^0oZ(H28VU>X=L2ypZO}c<*W!1h*Bn(R1sSPJW90GEk6KK|V|Sav_OVIwK>;Gi`e(xLY{gmOfTXH+qn?C-)CEEn=@rB|iy2LwNvi`m!?dVkp^~)QB^-Dj%qW+|; z9kpQ;B@>e^wNY=FJOlI%=EdC=n$>gnPeKVO=&6KU>$EwMz72kDqE&`SPcW*-HeKwc z6z`|-+HkKr-}zIX4c0s5s^l?kwa|e+0u z%aih*L!o_U`rfa>4kvTUYK#-_b~vQ~I8_sGCiP-Q!2bNu8;0>B(?P+spo_QGP?`X9 zQ%t7Jxakuz;@8@Ib{u}sMk*1o3e#E0ypj#ry7|p9@1!1*BoAW$Luvq<+@CelaJ-EW zy-F3n>y%>~7W#$eGif=QtF}8gzjc{n#p#hkuJ9NQGJ}xfEfKvX=89yRk|` zveD&R zEd7k6YKxC!z6!&-PT^){_Bh7^ebk}y{=(1O_2bzNnJ0@EQ9hkdA^|*?SX2si=xS4` zoudM?Fg4Md?Rb7RkGOubD~>AEXLrmmS2;%oWIx-^*6bDnDC523HRA6g>d9_4daLio8!cd|c(cU_1Y`F6r$iv>jt>9jo3f2st%iIe}jbro#v6nBlWFCO4DEJn6 z_1dY8+e%Lnek?NQ*vEk8zjp$3R|6vN5Sy;*#LsEqLMHzI~SAa$T`PEPIAUph)4+X9mxW(l{&m~j&KYv0Yw zr_OOJum4HZ1)N$P%o3(r38;k`%jRpivW|k1v5L8$^O%o_@6+ZxUZ%MyT$th80*HUE z{H&hZTsr~@bz6=>zo+-j?VDm+Q(|VT&G_GOt$?{Y*6Uq{-Knxr9+oZZQY}pHIg*T& z2(&s@)_br5#k3#bpnQiyA?ylLEiXE-kNfv42zieG$^ZCBgfG;v3k>*9p=2h)Me4GBs6V_OcQi{RHj}rsOhh#uQZ6X z0bbxt$5miZ$_l07Nl{4Up&Gtg_G1tT3_M&?92DH8PeBd$LP{quCpE^U__{tNA8v!w z*l9Bkz2dvAsQnbTL7V%ilFF~o-S?;8^JUjqj1R$EMU_{8j`RE}8ac)i*ve3s#b$t4 zY=wH|-8uP`PU^FA0YO`^@O(kW?EU<#CH(z6?zq~kquF)Xl-sk+>1BkT?(-3XW9$MSfi4TQ+iCsyKcP9Il@z@}GO>AzY1c~6 z#{$(W^j_QDv3uJI6oFg+O#Zl4rCdkgJX9VDmCxwm-a}r2C_CHgWL{+JCW7XWk0e*a zf62AeYP$4?pU!Elw^JzZ4#G;>GKm!)&k5aNk6u=LTfaDP+Bfm~Y?mS9-v+R8k>!vgUUP9U3aPW1WdgC) zKhvSUY-tF9B4bM05>1J+~cmf}Il)si(q(i@BKAh_$(@HpABE`1KWBr0QTM65Iwm42cmAkts zhPD>n^HBe8fZO5kQ;XTEIG5i|9JOZkL#E?-qL0^?5C>gDIAkxum*iBVg3{n>t3#2! z=hF`-J*~Ut_G`T;!Y;plFRh<|IL5jRJ0v2T`f3y#X^BXN6 zo>r$P+@7)nlY)?O_xJ76s}QVBphI>7bs9|r$%|52^^sn(iFG3*=k)C~&z`*o zt8g5^st>b|8C(b(9GCYvnly7%3+y()Z5YpM!_-$}p`@Mh(;f-O_t?$D1RsNgpR22S zfN1SivN~+~W)^Go>OkNXq3|1p7jzYWN%Wm-N)|E}Tx141z*uO6`}6@viY$wg>Bw;? z$ls)8hr$BEf6jP`S-;hP*S0fg@(hLOU$e2T)G6^n44cf?<(zFMq#YDbH#gD8uY>Yr zjgtQei4(;cVQ|Cbr~aQ1X!QHmm>__)%@--i735&lD?^mhsS=1qsA%s1qeRwk|KvvU z0Vl(@kO<_Cl~$wt(fRQt2%P42fGwOT>1k zD=KtaNZE+!6@P*f$0i%fqn&vwp9UKVvKC-vd9Cvt|4Kgf+5vZ1@w(=H1bH$axWX@R zyk}r@_05z z3LR9(3$VEAte05wsNMiT=1*x80tVc*92TtM)C{Z4RZ) zs}a+1!6lnd>NXPrO}Wp*vol_Q>{R7E%h_z;0xT|I6u>bJzo7Vy%#Q}#ByJPixt=^_yTr;gi_8m z#UQ#Hi2W^T;Dpovd|Ue7sNh?#>&nHsWrtoWK#;nTak@vFM+;_Mc14N(Jz1qIo!oOt zWr(DS0|t$}4LT#Yp*k=GpFE}m14IMU-5e|>mw+lg{&cmUj{o5~aFAz9;}`(+&&IOHY;STKVh+lrR3%OCE{nj^}Ct84tiz>9#6Cfrk=z zqLO60HdsWL^=?X^WP;E1-i?QhPOzR_2@E7lW4fzq&|saAn1|tC8M4}T8GfV;X-3$; z4*rE7`Z;j*&4P*K_h!O+35R;aykO-|4zF2*k8d)(ab#laWKweTTDCo|XA%~?hM;X; zs$&}>cdEK0+V-g`K&GGw&MRlv0Jf)a!QvrWgov{7d}$CHZjv zTpn#k3b}DZGV+MoRPK-G4|C2a){fx%anpPb2qmrSk!cEfuiA2spePNR|w!OTd7~|Qf`&N97q+> zaAN@gPZz96RT;o;;nZA1JEUgv8Q(F_x3UJKY)1Y3SAS#Otn8oV$wfo#YO-9DDwV4~ zJ)VrZKQ7rO-Qu+U3>Z8N?U!%h@J!bTa(h>Yj=z&u5l8wSgb1S)^#*&Ca6VW$9$|q- zL_PYM8uO|9WUYh#i(Z$~@VP&ca|Rx5=z-56JCgMm`%{J+Cs}{=GtuI`$~iqV?hbZU zxvf8HK<2J=pyZgi=3kblKym270-4y|na5e(y=n-=)$TasF(u`fAFuA(Q~2KUJLZy9nQ)~ulIyv{m!JiV zdfuFlTk>HdUQPR*BXdUaUaFla;JvBHETf4ObFXB;efp;KCmUkluG&c2?I%mptpD0( zzxc+`g%D*LrNi;kN8OJ)ygwcxYpqaN=pKhdT_J~^2eZ;61xmztu5ARuqx>`CHE zhFpCo?l*SsS1T9BCSivg0y7JvzG%vk2gliEs}*O<(ea&D8Cxd4TEC)7QmeyycQh`l zYD|2G-ViD5F-BS{zOq$EU^SiIMdQaUF<;k1lTXcW|4<=??QO@GB()R0Hce)6zspg5yFD*~qP6SyNO{PAy3EFWV>(G}opnd$Ddtu?P;Y1<%o}pM938RYfizoEX zoe>m)c=UyTeDq3ZUJ^I8x!Rzmrs8RQ4&O10zw|)9X%Rq&Geps>{v{q{dT3b7Z884B z4|O~sa=*I%;c+!-wa{wF(@Zo>Msal9kNz0{*N@`iB$a0VzGJ_?C30JQZlG6*=5dbJ zyF5QT8#5tim5RHSnqmoA_DS56{O&ecu8M8c6CUinZF^W}*yRbccLM4kzq_{L+S=Yq zX+J8V8qg*pq*m$E@g6^y}j5dR21Zu zXwjAp(<7Ub_2}XWyQNl0b!5#XzCy&sMjz4j^+GQIo7D0woiOU_1h-D$aVJ0I72VG^ z#5Ve3DO7ZYs%&OX9w!RVy8n=HM=E6S5(E63d*)r|(DrbKM$~413dz~M#UGceUD&mF z-zqZLpU{8!3Z$n+eUG~AN^~Q6IHuMXWRYQNy3IQvEQLpvf@4AWRO42 zDw|CF9%nya>8{*1a{WmnWSBhq$;o#K=NpEaI1zv+lv>I83qw7yLl0 z!+TJP*Xg6MOJinS5!>Rc0Dab#+D5TTk{8%wk@lOn0vs5gZb| zSCMY}o_Jppd&eaE^LP(F9{m_{u7Dk6JwK)Q+mSte5)Jn{M$dUulm{wh&^M7kf)Xy6 zgZ9Pz#t{7Q-Lfw)ha;%Do^AexjM(&`L04bZsDnqmLT;Ci2_5cgqg^6Hlf@j_kBCg3 z^!<1&Nk}Pb`te4$N2lJE$CrWwqi8(0=N2<8rt7y?{m8aP<7a`}s|~T5kMhzZ)rq=M#0HEjEf;Ac%UgIj7xi zxx;CaJg3VOami|&9bQC#>;KOcB?V{ked+~`$E|gnB((&=9TxQqn0|$b!{2-7EatG38T^yg{~N1%CiJ) z20w(pvHpaA3sV({H3}t3UurnzY;xB8rteo4-Vs=mxZt<;JCIb&moS8&EMN)BV`ScS zTBPC=xIc}ZtmCNCtZ;iB+2G~In3^hDQJP6rXxvxS;IxesNppuO;AF|uCkPA|U~6VG7N6%5o#^G;xoTirzP+r-MQGi&rQH7K*M6#G%) zjimT(9z4#po2f+KKrDix%m^O7%c#@vm5H$o>3s_V9?TaCX_ug0p_B=vsUjZkzn=&d=sm5q?m|OOJLY+$>!R1k*>>wW=_J1bSIwFEQj=fZ=c^C=5=*h3(f9Gi%VP(s%h}KglFK{tddU1N&m>4VITguu?Ae#n4>#rlv)} z8=jTTa}TyC3CT?Oel{jA!uIwC6q=D!%BwMrAZ`|=%l0NAA5bfg*>j>>cy&NgF@GT9 zErB@3lmhvr2D-V0R2Ye5qvPfz(?=B#Jzx4@+d0dtGxw^EzK^w9EiM!RN*EZIN+iL95bcJs3UmmN0;+fL2&r5~M zhA$PZJ-Zp~>}VERcB4SSU8afL;CJk~%S)R_I=Thxed`(zaBjY~+;A4|oe~h_Q*}O6 zvMOl=_F6nh$ri7NugWXL$*SK-wiL%eYEjaMJp5j>{&*|~rR4OdpNDwkm z>~OKy<_?ENkb6xO&e=OTO;S}@Ye3<`wU*-E4A^QFF(g2Dt3mX#6W1Hm zQWES>rK$P=qc@hz-bQFi2^Zj%-rin^5S8X>fkz}~646j+gYW0Mmt!;QNXD+$(mh*^ zjD98OxTrdXJ3>g=psO&G$eBm$d;6=Q4`G6Y%k50r#3}oW7GEXH{{{zzZKX;83Vouq z5v@RRv8&bRh*9lBwTh;3x!fl)#u{|EY@DOfq1JjFzj0f8yMb{Z=M%7DwWkS5I^X%0 z>Ua=9J=2h-8F{!8;NyNaqkryHs$!BWlOo!g z;Wk=}eh)VKYS`iR|1jf#$;Ut+T^oBxQX?(lc}qzNvrfOcXmi3H`}~K8x@mgp;~_`j z2YA~48~ztQ)MQ|5DQe9 zsRrD<4aClIvm0N|!o-@iOs45gEf(lS;9YwuHb+ZMbQskGl`5g>Nr{_@9wuCI!X8cJ@ENs;`T3UH;{ z@xR)kpN!3OES^4H|L)iBk0Rxrpv%|6rN+k0mJ(me?Ps^(fXX}?n_5ktd6X&Ua`qB_ z$#EN80tEa!vn6f~3GO59z9)jN0|qH0=x`4wvo5D@$3Ppv3z4pt(a~`4U|02qr}=}< zYVre;Ni^vF%~#G|h$$*z_mzjB_vHg4D#OoKLj66?B55Y!T@{#r?TQ_}J8nH9nEqyn zZbpC)tK_3k6m>>Ph80beH6~o3+U)bt-93bW(J-Q3La0r&W-VW z#f~AW`BV6$)#CXsSFZFno9<}o%{J5Y90TFOau_JtTL&vt)1iq}xg#-?0pQ57N)DZm z=Vo_YNT%XqzF~&N-{yG{6f}wOmr>eC)Ig*hksfU5zua)sf%>JeV+yP0orLfetTrpE?#L%OV z`c<>9EcZBu_paOd%ntZae85A53QL zMj`nI1mmkroD9bE2`sgpI#0K(iPZm@+e%0Uof*7+GWH?|M`_urwm*c0lf@b>x9;;k z9EqawLp$H1>grE8b~a*=-wDFNY?ZRNe=wAB#e+N2A2MAb)??Rp*wJy1@A1>yo8_F& zV6OH0Wy?bQ7T?%D!fG=w!@ehW3vG8d!m%%9zD|{e8UBf;iGGunKh`4uvQW9}_LXqG z?cw*aJh6)RMrCL|2kt%08yDhs?;5D^LTX5&%>~HOrdYkg*|Q`U=to_q;QGbm6O-!{Lc#s8m9GY z10ZtTIccdY_IBSU@}xPes{^eYeK(pv_+*ZRVKkqeDH z%Mh<}o#4N5%qf2Mt}|l`XizGGSL%2jXg;M4;len$A6KlNT$5wKpnfP!z!3I;{F3Kf zh}=AxSgZtg-FCiZa|}G|r`KATFLfwErQEubK1Lw{K}eiNna>EAtM5R=*%pgU=t!QpH}=3wR*)!TsMm=`8Y)^k-5jR@a?N8Z+V0an4Cytw@Yjd@JzbsVEjE8O;WgCj_$;Y#x|6ouuKbfdDvr}$@Ip1C_ zGhZ~^w`+;61HTk%91A^V`iV})MXw6Etha0X?s}*`^HN;QuoGQAv64jP5I`(rg=(*{ z!ADi{WrS14?PP^2QS2_U4-Gnn=niv>espuXF8ZnYlJXX*UtGLP!RPuui8F-O^H5af zweIC?^Hm6(F7``*!tl3?{rMt?3{jsjokrhK`4{}FWjFpJE%eheZQr3_k_*H-Z2yAU z`^Heh$N9j#=?Q9Ya(nftb&64(X3|T634vIkq27{x2q_urZggs$XgL z#U@=-?y6`7P3Ex1icqzB=bOnaSdL9y@n^eznJVQk8$!LLz^KvQyG<=28uZ*KKityf zI0y&7tsl3f%ikX@RnSG&ry}LG6FnqGOt;C|(y1t%4c+cW(Fq_0rZlQM_<>)(tZZ%u zdtDvT%KWgKAf2r;QC+V8^dz%>L-0=>hyim)-I6uA92OH4dAaea!TtR8N*_LD%%o0O z7ed{At_*%iEK#+Q(Z>7}1Ax=Ifw4Jsvdxz2s?lE3xJ_w7e$)bv#?qtN-JLLkl4B5B zv@)6_J@drif&G-K-D4nIjf!8jY*-X*+TK(K- zXuLoP!qHTJyp(0!_ruMO3+yVjqB4_z&z0@2|g z+TcwS$N^57Tru$a8 zxKLJY>jxut^G#$#4B>nRg8U?Mw#Biwc!SYSOsx4Jf)Ot~>aQR+O8Vvrh%&%9 z67l2Q=^aCZE>u%XVy*qY^ZSN(oXKhW*Z66+slVuLzj@zq5W%6}?vg*CKkzb$`O^GSMK-UzWtJi1KLL>aQ3ux)3jeuBMAbVHc)i^)lP4 zyCklBL)1beDYx+htvbt`I`ACg6sxgBZM;@pIRR@2^7i&9N8LriFCPPTb`4~GD{DU% z&Gm7fCon0imB~A-O=wn&GXB!=Z*@CX0I=NCcovJ`CYN(FEWd#SjO` ztmpYw*!9h}?8(V>hq3-@aKUtGqd&FCRHl=eMmUAW{DBb%1Et*%FZhd)2)nMe?%q+T zmu5J%?)Xk(JgZP3U^NUtA$Srem;HJV9gFtVGajog4)SKNE0<68M>-7vPf7doMKn{p z-esog;t60lu$6|V9+PK?AjBal2{`u=myJ0eX$M0?yT2<0L@9xaAIdL3UlU2qM4_<2 z%w6%HLve3p9*5zVT?kb!?j39wI|2iy44vNa{(m6>@`@AsimOsh=_11Y<0T3#28Qui z%NqKaMmBJBCv(f#AI^!#qW*!klf~65d&yMVb>_Y(tND(=`qA9J&uYa#DXgYZU)Q?t zK2D!6H2VJe`8r8zdVHeCO2ex-bUarf^m|1XM@+S7+Rro4Z zMqOaxf>L>&T#?KRV11GX9^;WZ8w#B!_tBNs+spazPpiA%T0G8^g-!eT{~%vZYqvg} zvPAXHAa^5!T&nUzjnO8~hQ3bedWq|mi=LMufQBm$FFfuZh&nucTa z3aS3U38p=RrG zcL*91#8j?uxJL&-5Cl|ocvw-?JQ!Xm)Vx6(SQGxC0{vSGSn~~62PUU3eQ`p3xFo!( z?Zy`VzSkqP{O%|Iqccz9*{^lq8oRgxJZ)*qJE`8~p`3i64REib5ELm~mj#=Cvd?XKfYtmjVwiI{fo1 zS%=!OT1goj?UOpLaNXx}M$_Mf2?DU>& zf+LT+9(|Pa;evATibk2HSxiI={oZs*1Yd<-&gVSYL_weYJ{(!j{x+}rA%3?l7RRl> z!50*K6yZe5{=SP%Jl8hY_D@-K9>^t>i!Xd6UIj+8$R7Ot)xLEG!;o_Y%+&-oBvEXS z6+Lb+aVFS7S+VfAy+!&if!w0AForIM!H&9AvwoC?&kJ*>_U!`v;^F7f>@E5zn^g%r z$jvR{xVnAx)QR+ZXSCUT#DG>MUp{=j*Ncw-noc$$J*3_3_?H>L9T=lK3ftytNjm2d zZ0hCKp~VoM{`ZzCycOW?BoCb(v>Et~!z*{z$78M(bX+{MU45H*SgvV(Py) zI;G^u5Um$8qzRU($6{Gd{0ZIc7sDQFcGe7xpkOyxYI$b9a4l3_?y(b5WkL~=z`hJL z#OQybX^T3>KYy%Tc?VJU{SWM7Tn+UA53gqldbcuFb_enx1#$>O-Huu!>ueIT-r<-X zEEL`Ft@cFp&eR`2ll}Rp6cuDk%skYy*-u#MYJ&~I)K9*=?TM<0QcMdkX>0Q&{@R8T zEUO#N5QDx=SY5mqFD{dJi%V?!ToPH(z@Ni$fPP3^7-*S=@HKOhn@t>Rx)?#1K zSBYSH@ckuxs%v4@s=Iw?(!bT7uaI9dtL><3JkPAbwz_9Bcsv$UBtbKY92C*}hbOGL z{x6f;brq&H&{@H#D(e+OIXFll{qPSZVhT-4jP&ZW*LYXe=9k=)Pe|;wp*0 zX>{6Fd#+cUX*XYoFW^M(8};%Y)Ez|$e1tuL*uIuJIQAs$${4$2`CVXbhLCC{tLe~i zIOdM>;NaoC18nn(&svbDVr15bm>0#-Bf$^1A@)sg14AC*deY>fA^$w4dt)|ay?CwC zEer~@`5o4R1f`24*e{KG!*RI8X=e8;EjMLBz9QLK+79q4xNN^BqREw+yar2F(H$?J z_}zS=viCxmNdU3b$u5!o=^CbP;?hsIPd;FN!44&)>I9U%izt}fqyTomcXvhe zMjWyiwR633Q@ErwhlU)<>Z-IDe;-Hm(~6F)100bnTMC1?r#}$$EhZ(NifvFI#52}p zKGl*gIk_~!S}n}C)S%It+c+)=1pcl8BN~Y1iXBCo4`P#M*5}hBvj6**bA1#&n?OrT z>q`3CQS!qFi0^8ZOBg-?CA7*yQ%rBzZAv~BalS|K3hK17r3vo%?vB`!3W~Es>Pgr+ zb)K=pcw?aN!BqXKaG~(6ZeUiG@!)q_$q?G8Z?iob6*>XhZe8t7fZfX!jZVyNlofPuBUqAT`j z9L6zYR}^@*-Z;UXQ$<~HEynUYvb+5{1SUA6A7k!pcIDOq+;-?tXlJA-)>Fhn@<-Lv zO;Dd~Z?Nq9v*IHdBDZ0SU=JaUL?_-z zf2g&}4JwqTcvt7;gSpk-x(9gPQ4T~5+3Kw0aaEF%bIc%$>qh8<)JX~-aC*&)8rRh|9b;7C%;p)f)MlU3E*oZ0E_DbtUtNy)cxJrW!0# zZ8e`T|Ak9uH$Mfzgy$s@Me>h~oRR!7puhN2c~kHN$;@bq_}BZJIHJ z-1H#RCcnQ9U#S7WsL4e;j8&T(-2{n-YegV#R~r5E)}7lM>_QE@ z$6uDPjZS~4W1-tNidI#1z%RCye|=^v)t(+|XKnFn|3SleX6Ra?-e2iKd`{YZSx)ulWFlY8KMb;qFiR+sLZx`9AnMmR{lO zVsmp!=}$4ojpam+^=MC%0FyCI0W-4j^SU*hhmlPHuNS9DbMy#eh9o8A>0N@;T=5`1`cRwoJ23 zGufDbuFj&YxJz#Hufc*@hIpN?mSLWgv!w#7VBYkMNZ+{Y z?ld;p2lg|A9_PUz|8<6zh~kYX@(T*n!PKJkQ?qGV$JHTV$fKxj_i8^|)`kqG z@D)wsf}-5gjgP-ROD$fNo|1kuyr{G#It;!8m4(Rv)-3#f zgpP9^q^H6hLYXXx=Ml@Y3gUYJ;O%#_7F^82+V;~@y9u`YtJBFParNa*@Y`Z>x#&-A z!34cDBA%p@SoqvCroaD;7OFKm{$!5;J?%-DA`@Mee^CoJ52N|)d}Dim0q08i&7wWV zTfm?J<+I^RYnP<(}E>i4`zamke5E!S2C8x=26oDkO9I zsN_(tjS~t|)7#vR-!1F=1*^F(|LV6l!Bi!2KFLJQ)R{2P-OfBiVw)GoXNY)dAzx5E zy{6tBE08Q2E8pAR__oe)a$te>uE@10`rGcz-8yCqy;b>Ki;FcPo%-5v4~%O?g=FR*Mh7qhZ0V{3w0o(H|V@}a3?>RF!mRfn_^+qD&^<3naQNb1QF>^ zfIy-BP`iHewe#VU$U+|nh+T8NKqIr7k6ff5OyRNkx<6fRw3*EFCu6zg@3yZm`^TIf zVPQ|01SHnY#YNdKLuflxE`hq8R!;{=KL;{@z;{NyI9&G7e===;Dbp6(ML~c5-2q@+ zS9h2%sb~wm=LM6MeX2@rt6`Va9(8QHYq8q5Uq;d-zavU+uGA%#22;!4&*KNuL#`)0VPC)F7EVLr; zPnK+VA1zK>KgnE%BCzJ_wIf#uX=0c)1b?!o;47uKs2c$lCv|t2nX)1r-IF6#Wd31S zh>gbdOZ`?nM%_8Gt0TRrY_XY{K;lv%$Ue_a&&16S+Fd>eAC$^vCOYhuxH|FXyK?BZ_Y->O-i4HEEaZ5K|YV_qc12)Svw z993}!wAhMh)JbmsO@`c)sNbKa$+4cb*js(~ZEbz7HsC|*^?3qiybiYNyy$@kXSjq7W+oAL-J6&R{G3k1K%_Kn%}L5j4c_lhP~P>InD$WS{8V z`!6j(dPzx1IcGy&lgbXW)w4mS>Ky62$RXWu4f=%bmx)N#3ccve<{Ci9k*j%T0DENd zceX4Tt9Cuv{p{LaXT+t=L^+nYHQSU8@W5qoiUp7PhBDSp$$Mma{Bq| zOwqk2V{eO&<%++dSN#aEF0iR10Dr*u$2#!vZX&)&hDMP>5)*7{N^p$-{=_L5HfMrc zr37uidI!SQi3UgyL`vb(X(}C~5Rbr3QFgR*) zq0r*tFdqO&_ItokeN-R_kgFk0tIj(3NW?R_rTcv(DQ=PvRZ}p*h}=lM@8X>M`Bts1 zDo%c6#v4t(aS>;lv#^U)O|EEq8!a9q9n{a;)*9U1Jw7=Mm>-*F(5o<#UDpKtY}At1 z^8wAy9AAn6s285(1Rzw#G$)ye@#%kj$aEk5Ar`}$;;>?~v@m<3^K0SHz#Pa1yN8vn znC9tvH~M^9kV%1}m=7Y*YJh5afNXH}!Wp?5X;$jmXJ}d9pUr93vMomr$Fv{suCq(RoxBe)sF*@c$i+ce=6SIxMlW_N(oQDi;bEeAg_D-b9K!9%uzHXbtBKyIfb>15%iB*%TxWgKTnjY>?>53a+)Uz&?L8Y|?=Cp|ixE31_8*XxPjTZekujA-^5%~lt5z;+`lJw%sQ6kc(3bFCOzs7FT#giwLbyRA4*Z6`0WQ#Wz_h2^j@xP;KAtSZ>g23)C$I8LX>a_Kfl($rnoR{Sh z8&GsRO&BL^4Tc#`mA;R!OsTcm5pOwondNRVha>u=Cc$;{FR^a7=b`JCA_nKafxPu^ zG#}&52Acmx_|(esPu4(lBNplU2#My?uU>}Nxq0SMosm%ir@U-OA^BbQehMyKQVAL? zG@N4f>g9(s+b%Z+`yvrJN*S$&=^DpNVulyRkoRw4Ct${{iT|u;82qa?u&;O_5DQ-| zZirp4Dg5Va9_GsAd2;T;D5Bn|9KgSXlM1NR&sUkqjaJy{U!;}7m^)ll6^8UFb2Z5` z6Z$YsbmU5WNZ4*@t)isW~M zB`PovNITyFB;o%8qB+#WJ2T*M{W*v?uiCAL#TBe^M(pMqsQu)VtnF`pJ03KAV}xj( z%Vl#X)w1^gP)M5H zuU({Q*gNz^8uxKSis&pk)7$RaKBVwMwLZV$JT&_7;Fd=0y?8qjcnQftfPC1AFe}ye z?l>Cw&7zF`s)cpFJN+JxY~Ng!FO>T4=2MDs*=anHPUrM%Jz;^Rn!4dki_+Y#&Z2;O z&<277`QUA*P1(TvIFvT7{2&BZg?7DCV(NGaa~+7i-7lCP2sq_Se1H1Q+LdL?`S#4@ zIh($3kXq3bTCd9qd;70eL$;PFSI1({je1s1Z8BcT$$|l>*I*tb=zY_!$Cxk1t`kk1k95S0hpgR2RnqR3pk^O3M8AzXJ$A7?f|^>scn=oma@}){ z?NnCn;s=9ZoN-k6@scC;B$FodZaoUV2Q!9zv6`l)Gx__@-c`|XiU+L294#%jM(aI8 zj2c6)UgUoev158iqbyxlE?hyioPK;oCuke_>I9NSdT35L#_ zn=JQV*_J{@6RgC|U6ye4OAuI{rMTF%mHOlsiG*xkgVaq4VRAm(FPhb28konsOZR|B zT{1FNzyY@N%N{WhBa%}5#m>TiY1|3Z7r99?d6n0%5lyd<{HE7pJ|F+`;AWmt+%97O zd7u`TxF3ksdHE1h`qAt2T;Ee-CbjlG9&q1S^`k*+j~Ia0S=~B1jzXRX`K!)$J`!v% zJG?0(Uc9~(MVg~fkOH_fwTS;pE}nbexAyWq-iWsLPo(mHk8?sef5OdhhAf$v<}5zN zc72Fe>f}DFhiZXWwx8!tFOii4m{ru!PCw61};9Ux=EW7lt0Yxzti$Zf*}E{poR zUpy~AmM&MB^hbW~4$A={VtcDXZbt^d?d^`zQJwrW?|BKwQAqL8W0OZTfb|RnbYyyI zbmbiZ|8GP{1bNQ8Z^tpBl31Wll#Bri{pHZ*8GBU?F0rC-*mLW#D}LGUzMY(`N-j1w zG&HOnEjD9qlf&)jFy}2#DLSy3ls8MWqU0ZQntpHWs1WWYOA8) zH(j3A;JJxwyU^NSLcb^wZa-$iMj zQ{1lXAILyx*BuNJ6iK9Tt+QA#`r|{FnlK1VaHVb0~ds(dPUfb=0 zoNoWuuLKRl9c4HmdU5_eg3~srT|i==c^tbPOl9; zz4&l&9BK1D#gGgo=(%y)mM6IDt=VPNs%a3|P@a2F zPxNl{q5^+JdiOcZ_2}p*5`<`Jl?YuD$zcM;X&y;@ffPCshQ6mi>e_*3qD)91x*T`4 z*!|t4`|>btb;V%0FNb4w>&F%9bgerKC$!V(e=!RIZ61q37|@kD!ksN9W8_YBi$|pM zEfCQ@G&d<%R1HNA01L}6uIo4JeDXZui+kS>hF+QD*gZRBzW$B(=-R74Y5xyvZyipGNW2p*Zy ztRQjWsgR+729lT9yF*N`*aHF3M};C0#;1P;5VZRvp@Agsu-iDX6$Q63O*D%Zg(dPq zixvuBX+K3`antcTt#VMP?{qU@?hLGYmZM!AO2F`>=CA>6X{c1JmX zju;LCnDKsIwvR?cIZ} zJXw8it1bkPtMi@>HUt9Hy0`H*mfwgNtZY#8$w^rc%~XA$_TiR9<-ywak z03VU#3bz(luaF16|cn30^*lQ8%4W zI73&+zZ^o{a_<`Ye%FclH_PR96`=^dQ~E709?118NaDU>+L9gCKJBIh$h9JX9=Z4F z!{csCOG`lxfL=osOkdG@XHC1^zjgIiSC;CnTlQEyXrQPw*F{IjrXXV>;IP15U_8JD zB+az~Y5M(fq5aJaHZt8sNt0*BWYO1n^_SNk(PxxBEZ@mykUU3FEKQFiU&ZR z-e=ba9TvKkZr?(g9WHlTD|NzIr1fedJfY;;=7Moln`ZTR# z!62acTPNU-M0|AnrfUqZjb6%XK9FxIYDFgHI#86BNx6r0wBPrQ<1pdwHqLW$eqyu?$RjTv!^i>_BgbPDKrk`fq z4Q*WN6E}pt*7A)XjZ2Jdiew4^V$e6AK@CAs)jNh=n+$o*FfdIEWqwi$y>qm3Hbq?o zLj6dXu^KR&zGBi;+qG9%WteXvgLma?0w(aySIWFB@p^f2F_WqYtoI?ec|TlpWrYHb`|5~CO9EYBmy6?4Cl$Zrl1x}2z=?%P zfMh2%ryyk2V*+WRFE6apEKY0#OY57HaPqe;G9 z>%z`sfqD`l8O=QAL#WDqT=xc57U8eVW#9PhdFlS^5>pLarDiFg3MNT7j+v1A9&UZu z%RtG$mbNB_X>4o^cbQqnBi&KJ@_aA;ez(N^83Mqihe(19jsSg<^Y-p({WN|-pE*C; zkJDbuRBA|AR&FW0>-=c8o0IuaiHOG&rrK!$m+i)49_`t&cWOS^UQ~!W7;Szo0{9se zdYnw~QK~F_ik=&cR;_HRSy@>f z4IMHJRl1S*qmwi>y@(BGl)a`o3h4aBPTXA-Ig6sR)sj-(q&0bmM@Oj>5|}wWhj@vy zVk|^a=H93HV-VNW>jy1-a@{7I`&NkYFzTDaZDE@U-ly%az3T&U=)hH95ZAteJANNR zBAas0LD50O=fLA&O9JF=y7{y#T@@))DFQ!zx&@*kBQAh>UZj~N_>Q>sk;`L5ptQ#? zAee|b1ATeaJrW7raSd=J8UQIK*-DtfDM)!3LM@2gRbS|y93fk?%HMk&2mGk)x8j_Z zM*s#F&Sm&eqs+bWL!$@qUzVh$SAOmc$ob=DBY=7UvoK1SH$s*uH6lAY&a*$y7cK^p ziiZ%fhIj(Pxu*KNC&NOR=isG<-K-EKlw4nM45Gd!`38*S8Sv6qtWUMP1z0MA!$4QCi)Q1ID`FUMSefKQ|Z z1?W@+1cYvV)sF_XJCytp^pDBt>d+>e^}oFOT-JP}e|2sSj*r2_R_F{3Q4OsIfOMqA z8}F*0wvlO4%XOU)2&LDQa+k8<4{24otG+|n`qupIJydth3%)F>&yY(rqdR?lwLY+H z@)i0KS5l1v0E#m*^@;3+fm9LSJ=JAM#_}T18(!HTJ&J;af855BS3m@9dthRK4{vRv z5yPRvHr{Tk#T$CEFR3YPa^{@@W<6POM=!|py7;`ymARCL=J-oDsFI~XL16CXYze0- zB%`zStLr7TgayAZDw)yo>EG2-jF%w7mYS@c&sHyHyIFJLb#U@AuX2$BQf`HEkpEL4 z`nDabfD98b!52_=UGSOSs35!vn9bNd$#0_rEYIz-h-<*eV*|gxt^oi!Z#Z6+9Gu!5 z1VkgcV~HXype5{Uo_PSdvr^b6obRblZ6iARjv$lv^(w zLpbeb9)TP-q%ROD{&0IauwAn((F!Ic3oJ`23Jz66NxnWUt(ip#wXhJ`ZOiBDlMFh5 zQEP5|hWM$M*3Ppfhj5Q-Kj8Y#E)9I*G}>Vq-lhN-YF$gpnLh&TG7#%zkojEC1y17w z0tTJg_IT4Ccnc|z5f6@G^tPF7%6vduWC^gGYM|b9p7}uU7x#j9jm`V`om~SWRI;}w z;#q4Dbb?X+O2GY3tpmZt4PSE?7?!Ky9P@HU{Z}t39%<=nVqe0`m_ANt%1yb`zSdR@ zkHc-n)8z%VrYA-g@w)F8D#AJh1)SG#)8#&hw`-J!(}*Bs+EbB|1_6-!1h`?M=)qrJ zLIcG2>F)8EH#KM(;A3y{omP2e=>BVtq=CaP0dk$oE5rN?L)Y_GCnof|PG&5ZW@iHd zX#bk(Hv>Q`!A?O***2_L9M78T+lZo{LfP;j=b2xdVRFEG){NcA_mlo3S{a?AyX&=3 zc=(nCL4JdtD>s+)bHQcP%6EL&7pK-s5kxM+rrLrzz;ulGxOT=Vntc5=6}T^QIdbS*$d)uBr=< zPM%%_z0s)m9Y|Em`VthiSonl2rOv;0`6b}V{aRq>2FdM^&}C>`&%!J%Xy}uRf5v?P zj1C=I^KcatJX#m@Xm369u2BNZ&lyILgx}D*A<%K};v4^AT!zmEKHLX0lJ?FO0mm?R zoN1Hde9jOSLLDz5Vl9P)myeOaeF49@tM7`@*!kc8xkwG@@M!I=)JpMTL@|*JBLDp* z-i#O=5NbnFNATtbYcO2Ev@kJ#={&zC*6~Vkx`NC?5{_Gmv~U}00NoZg|0#m*iU>Bi z4A=@PS||6d6JDIVIRD8o;Z+5eyMh{EYWmq~;5^K$>jW-^O_w6ne=)Ove_j_oE0T@w#6703uAdb$BUh* zVgoa+&RJ~x^w(Pb{rNvdkuwHX}l_`S^pw~NBU!|P=H7^n(BZC`C@dHJ& zd5>{-hAO3f{3(n3U#@^xKWYc!${WnZRpihO#Aw1t7W||)z(D~p?BO-+JOAEb| zifRA<?9`Q7&z2&@qCy zx5z?_y$NP#^T}e{t#^N)?ccT8h2$M#43#_K?0T$Rd+_ZYqwF^#n~8?}QxR+NvsozN z?rqHzJc-|`%Zy=QgEI=YKEbyR1&-7hk>(YU#`drAUY6N&6l z@ST}T{^#foF6&1FsQ}&z_?IRDY$ry95A~-;CrpGa;iICVc`Lxes7KMfy;plagWECX zIh-A5oBRu_qZe8Odbe4xu4q=cZj1WQZ9zHrDpoK6Y@)hROfo*t4j)<)*WYAYA`~ zTzP$AT8b#4<89B?oAU1q7G*Qxj+?`%@dyc@1K=$#pgiW~P zi;aP{$Ke^9)I#nMaKC_;x4}0ul5V2u1>pUUu|acr^-GJ#2%P_S_{a4qIJmfL;_;6^ zJW`F|lTy0=Fpifvj#Ani4E8aC)o6wHss}a5m!y3MSB39Rw|bu_Z$G5MD~*U}HCx7h9dV>Od%)zI(wHMiRQ70v-HL z8b*bNe?5r!p{AD>f0;s3jA^pm0|jiYle|mzS_Z;QBjU18$4v4$Ljt5)syQJYZvMX} ze2h9!Z;}q!WVII@IPs+T8AfL=5pE>Xbru+tgW9_%QRpCE2{Ih;p7O=W2~F$MOKg5SEZTs-DO+``+7)&YNrR(eB$k zK|x^vmkvpv;WONOdHq2+w~5^KN3S{V#i1i^aZYfp(VV{xvV;e`p&bpW1hiih?7#Gp zUhJ7CxE#FUe(@;oGpDGH#H;?-O!oOib9kCag9+{d9;K8X!MaEy=p$pIdLM2|T zQ z)6~EnD_cjn1fQS9nglSrLHSty-<(}EDhI^s8FBF?=PO7Ch!5(slHZ-+V=w|q0)fDb zN98MmC-u7;x@et`^A{GJaBl-yPr*y*`gJxT&*#2ju!)K9sl31vwW7>iz1Svo)P`q> z4bL{wY$n~she}|$>?4xum;~uUR};es$(=5)g#6OGmuPU8;Sa#OXr{z8+)+z@!apCt zDOFQTixi4Y+vO+&R*_r~<+mgP4*42x5Fm?Hh{wTptZ5FjalWuRph!f6%d^73d)G54 z+H?y@$(YYass9l2^O^ql{i$I>aK!19G8SU&$Abx}PVWs|I@l0W1e*!CC!w|B9F>CP zh+8||Gb;b%dH?YiuNw%Cpr|svpGXrh%UTK{%D)DH z7m{&#ZvApKIq(O&BB9~vTj!w?_}3i*@QZcu;5#&kTaGC+kl!4!KY}C1Uwb*B3R-Z) zosU?ih7184o7UPMHUFM6-VR{9@Dm^Uj|rb^segHE5IuZ3(wwkouNI zi;m41A2II0|7Jt%|Ta5_(=EA1anE?ZvOGdytP;8{$@N z?fM$+buB{di1JIoW)&%bUq_{&6wB&~|Lh4;Ab*P|&)4n$dQCEo&L-UPA&sQ_5h(+D zV2kFZ_$BMc*~x5hLN8KsqCeFA|2}%})%dk}dBV`}zYYE|%@Yb5kt3SSZKDjp+-UKo z^>XlZZe#yl8oe1&`voAUY@IjJ1DpRgaMo}R!0^<2#}N8Ui*Wv~niyJOyJBqOoiF^z z8q+h&{Ng3$Z`v;H*N`mqPB$28O?P@fR9|_afk(=VIf=d_}7$I zt_Km!O7Xc;G5mbV3~)fKatQyOSLb|$e_MaO*t1C$o7-SANDBn`jTS0S(b?4&&>ju$ zlHGgil(}MexMQYKu|@%|j7wzy7*#8@I&LqrbU_WHHCjJV%v%}X$Iz6W4z#~R{(UI) z_-F)Fk5hvP7_iB>zZ$Ptp5NW^=0GA%oX!51T-3RX@*jhA&qm1Sf}DQu*zY`?9f~OX zFv5V)nbD|QX!QuS^R#pd``(KRFT$IXlUoC75qDFA9M#I3NDvQUy%x~|hVJeMduYv- zW;C8h&Unj~o3q8{o?hCnFom8qtKktg?<81nLgD|qM1)7@X&tDouw@|*~4 zcK7=TxZ`I8r|FV(yz|FMe75wB4G$V+@?>m5+>M8gZIkW76tTh# z*mpjCkOVoFn7K|)2G50f=5LeMl7DKMn!am5X8)iJ{LRrAd*{-FdgfkQ(W=z@U{ey} zRjWmy3wCK4UqfTjzwXY*Pzw&}YrzSE*JqQVSWh0<+1s`Ho*-_hnSZ7HYwfkqykhM^ zqS^gRis`aO|2OTU=AC;D0vmQo>B%XX&Dh8@3Loi(KOkZEk==qW4WlS1G*ew!!-)<> z4<;nvxxy6PZ`R%aqKQh1MK+~NMSr!j(Ris+{004vmL7YMAXD}L@H-u?Z5ZNy{fa5`!QArI&3JNj!FZ%ij}sP|qD>ss z`n9CK>BHlL7+K`GO;ysmb~b4r&)V7u=dmMrb^!!DT-+91;<9;lVrE&1EZ0h0obYM8byET~K@6#NlB!W^ zs_FTgDA{5y&%!<#UiBK5ELV;J9H z$bV4k+*tVdSA;-ggvJ4Zeahic{!RC_0T_|oY0LXSYwT~XuIa;=HVtz4cX%Y-#5C0ifa~*RE0v!_C&3yfn z`u$H@9NY?HbI<4jMXrKHBRu?g+Jd!sRndNjt8r%ymv_s|p^4ZXGX4xe74N z&7UhQwl7^qp)7ktPHv-0eehY7<{mij+7 zs+I_$dpW=pQ1FkC&deO3HE9&A(e`G!Uf1p;OssaP#~Ch*i&2~*Va*A}ro><%74usy zcPfAG=Hww;?TuA*q^h3%;tu@5V6l6?Q!az37+t=#EYaM;!gbz`4&U6#sdeSODmS#R z35$t4LWo=GBWx6Z>DcfOKn_!HGN4=(sD5c`RW#ld1c}m_W-p0BOQ8;guB}CR+lQIG zy`q~l$lOoZxvr6IFT&~u`$WhPw;~xv8r!RfWHXD19G4ymbc3s_QqL(p5Su=@maGmD z-?iCT{ed)Bn6F+o#Tk7%jsSFx&D?#uLN;(j=vwalIUw)nXKv4AmJ9v=vC{u^v}I__ z(5BMhj`Q#wO%IZjD|opp*?cw%O{b+hg;)``PLGzo(IzcF=jRTa`ycIN5eyR@Ln{L8 zj4HUV%#q*TBT1lwt+iV7KPOea)5SS)WhYm|0zMX@DWeEN6rVh@d_n_16uWS*^8sTDE-f>Lf`UqQZeO7OTEK45sZZ?pl%< zrg`oK^~;oPY!{&ZJQMW-x$u_5Ch=1v>KvoUi@gH9q>1d>Gc>@C7@XEgJR+cExJ%}| z%_UP+?Vv8$ooSVB8XE_9JXlQ2>)4Ia-hN{3aRi@`09#uZEBWDaHL)XI`-TS3A1Gs?GPHcN9q*>JTSnF<}a zwMviLiGnPVm^?ggu?+Ay|-9Mr}S4KzLOG$~@GG4w+AG3o$sHs6!NVi9di0In0fn0qQXOO!t&>B;!H@}Jl&WsVT z%)M>WGM{Owcc{(N@~amo!;A-x3f#ls4+u3MI{uZ$GG+z%s9pv0`~Ogu|K$i@_=5(w zyaCamvQ~;N-I7eEl4nq&yBg&+YZ#zl0V18IzQ~)L;{92kva?tl?Zx-aT^z~!1BUf^ z{Sq*lEtB1Ri%jx|(O-NcPwzw!{kTo3t1AmEXh2fZwc0@#MyGc9Ovs8RdyD5R8@I28 zto)1Fxt{NMp)sSTn4PDUt%$%ViLMA7lpJfDy&&4xA8FBuz90;j;-&7|~FG2&MyJ9Z0UfC-YDbthNDZV1l9mbsJpkf}LX{f!Xl|HL`7 ztI0ljaNCNMoY{KLnBWy-1jlQkz0^fOJ{1)wI^rHs;!+;6_TMm06^=4Z=MdE%+AV1 ziJ-))!WWzr@oH&>E^9P!qeVi@+Oa~PxLl8AG?qP_b|FWdn_OCM$e;Tn$rD$*hCw3% zt96G+pZ>gjdf}~~lIngBz!)s?E|lA^tN~3bxjR?1QgX9(Ofv!{!-U3Q)q;5rn*@Y6-(n=N z3xN`1$mYt?A~L;n1#2pe=vMsQf#gR6Lm$aJWMq((a-4k}%&6#?n8<{2Si`!iqvc~) zW)ZfMZ5E?v*XELhnfRvayXT{1(qHlsqEjMWxi>c9??lQ!LoDh;H_gY5e5S8tCNn4Z zfP1YeNO^Q*=Tcy*DAT^aph(SfSdZKoGTT&HUiFQx6#n3LCQPRb!AMmXx^iTiCf|qP z>RQ$k(?N41CcxvT;G9)E%%RUpA-}n3c)-+{snS|X>_12E3|U<7(`M(e*r2>BKoaQf z&9%$Vn9$q3XrVAHK;iT?ZW{ILwL=pHM(w z0Cmf7HIan!bm#yxmZ!XSa-X5F(p<9!Ni5)%#4c5%LF4Won5Ju2^S&qCy|6O0Vg~B& zfHdXND|Rz{V|$$JUtw;-U2v7Q*=98}&A;{LWuhgwLBu%yz-A3EVb|a!^2oM-qHXaoG01yM@+DQpq+u1J3Qrps`)YRCe zq?8nR=?;jqi$5eO)0fN8uKw#I;epBPJt9aetD~EN2~})gqT75cTq1=%Ra6KC1bt;{ zno#LKxSHG*mK`7Yev4E|DZr#^hdZ9}r^9HF3|!(jK^^kHS$mdIdvjQJCwx(Q&v@o6M3hT0co)QxeR$fI;-W$ z_q)iQzwVd9RT>P`sUSEHV(S^g3oj!2>{+cE=uiB9Q*L#@S-nLXmwMrLzX2A}r|9O*OF5MWqRuY(=a}{1h^D`%((rkXR6Lz`rNX|zDxFW{ z{|wY=N)hI%5^=D;ODep2VmsTo^Yh4pHJztP%p}JW^Ifwi<#W7$W$t@ajPGwsLOSmK zT{sWt5(K=6V1-8!m$S0p9zvw?&^(S?3do*J@`6BxnH}Y~oFO=5Ioj|QaFyo1rl0@f zd%fVL3z!~bxDvvZ13VeV?1Gf$W(C`jR*b@0+9AYV8!f@|>oxkbqjjWQ2H zK|Q97U=!)mp8Uc`2s$#?bqF<4wr{7*G2xSl;Y*V6O|+(ygQDzimkFoBOMS2`D-8m_ zSvn0D^~OZ&3@P=OWW>d*W!kz~@#NA3aCTUYP30W!?rI#ZtsTRP!96bi8G z>bCGGs#B` zZaoI{)Q?hTpZ#89%)0~C0CZIO`dA?)?0Khz_;uf{q3m;srn!A4`ixu2T<;g$hg@z! zU~e;9s?PuXOWO=_MK^Hu5?YzKM(2P>^xikkSW{eb5ok}rnD+eh!PdkuC#E3I&54UC zGoY81bpbW;@v9yP$hcyB2>yZ!pWBrG>eGg^BF}X2`?kgmM{1~7y58XM6pb`5^vDOo!6n1=Tm}1<_1~24v$|fk<8x&2FZ27*8EKR2DE`wx&(buDta!9;Ynvk znvqBPl4|;QOG?v>)&Rf)NGuBK)o%t=_HQwF&X!ozBA?B^Qw_uj+3Ee^7jg(82P;u1 zt4DtRx_^H(c*6a3;Dy|b(*G0qiwg#3M}28`-ktrKGcJj?%i0a!+t$%lhSC08_iJB6 zcJIzk9RZI6LU8XpP&BDUD`e92-`D%+6T`6)Y!J63L$T%f#bSHSraH19FzhvoGoU8q z&Y4FLt8t(00;yv_=v7=>9t@43!`Ab;s9>ezUwiId zSo4-=XCr-Im>09Mm_6Q;&M8i-e9(6M1Wb>1;mXe)w zu~JIe2jmGt9)ZsY)WM~)rXTjYu?pxFp2v98U)o-#Q1IU&byy@;egRm&pT-Ft%bZl0 zs~T;sXbMId>F;KNx}AqoDSq6AMsJ}9!j9-<<*&PAKFn2!REI9rO)>e z($P9ck5`4i#^0Jeb(8QmWbUPbo*bD3ROlX)@oJS`Yre(7Q3JYDe>XSB{ui#w^GyK| z`>*&V_5+#@3(PSIx+6!C8?$JWu_>0v{cC2&t~a<0f_pNKukaK*8TWq0E_d4PT8J}h zFG&^)#QL~AQhD4}U~+YT76oS{(PGd66&)R1P#f+n%C1zJoc05x9kYI!aTT|(AR(p5 zy?-5hv&dr6{|eSadMh}%2gq|yOC>KN=yvOdhDcMTalEs$cN$3|1@RnP&LC&EUd1w8 zc+!99s@G;mTMu}thOoq5+`X4~vt`ZQabzDX1_sG)s`U8L7?o*v8Zvoofd|s)muV>* z!1()_{j6)>QG7f*a^L?{Y2k>-;;*Y7FW$DD26{mBp8w3G-09As%h$IKmTIpqS zZS6RtqA9VE2YM}gH8<#lnq4)<9Bv~In&nMqRlmArDDHJ4l|LpB$pFN1qoV)qh5iOv zEFhLut90Y@chzgz2xw0s)y0vBy)y1XG6=i?CdlNS#x6Mk`azl)*5b82(2dTm?M>9 zkLwtx4c{&J{%}wUh{WN+!Njx_PLJQi7&Z)jef<{%m>3w57J>p=G%Vbh`>d?8?}Aua zShMj9pc4zhW8pMeDmy3$2uF&m(r@wYW-GDnW%K3U7x<4E|8Ga6h34F3CuIL=vb{1M z4<&lYQ6eG`e=sJ>R?lsoZj}bfO|&=xKE}_S*c8_HR!5*;?u3TY=~_`AzFX+*Lqm=4s~YcEfmT)0zDPN<*{_GtFMHg$J-$y`S#5i+#h zmt}%MK!Dh@_ZeuCD%Pf5ZNa+M zq>Iag(jTGmcN+^lxx9*Z8F=}tYd2@Bf`WoxCA=Vu^2@hbdOIPVX4*5lwl&ZHD6Su2 zvg!7di0qPxNsxjSaGSwk&xM<6H9|)TMEIr>OLzS`0uEJ0ZKa!*E&DSE(18W>;2^le z5xUI;2gutST4Ue=DRP+7$`_HyPcVs|y|b>F9qgnYhnA6In3O+oAIC;VL5m7+OMZU2 zFU+sqMU#%cHBnYxQV}ufe(Z3wa2)`t13@dSnlE1zwyzTNG;36^z&q{krl}R;GAS@7 z?$|$jNB=5r+l)*yPX6lycWP4d#n$bRxnRb_>y_TEcD)x-Hj<(oiPr(?yT57?dt;C z;yRNVIL_AW*K6DJ3%-e+(mqX7qgy|*8Wq6ZG=IzO0m2O?cU=@dtofKwX*#s8+17u zsG{QHOT5WpNfFp(J6vLmJM^7BY#p#9SaLVgoHXFR+BVatVz?Q)k3pQ`{0!f869*Hs zp{y_yloT?uRPc5?yShZS7jAb}&f+l2BqlCouTS2ad{ge0jP2^`+MjRd6mWOl;C~;a z`^?^1q=}#>IdK5yKKX=`^X{rNB;wlJ`q{cfw_RbIt+ZQU+x(c712Xfk6U`{z!Ndt` z*jty(^Eh?lvyZG2+leJ?xoWT?L=nKiNQR#XM6*22wIpwxXDq$47nI#hlJ-IpVt` zYvbDbGhd_CmfPM5**#b;)ve#rT~uNyuv+zcL0;Wl;|-l`89zkD3i&JC1^Ch?jcUOYOvAQ(re!nrgG~O3KF$wALcIWmF`Z`ULw~f;Q3&3f2!_a4~c>H#aq; zbn}PqSj`WamDYC&dHK4&kwb3a`o0W*%$ql8AD&ovdmfvbQqJ@()V=?R2a4E822Pi3 zn*(qIBB>K`(4W`M>%+J$ZwLxfK8iE(4pzh;@m@J*!;h5QZ^_tvK*{=JrY=l)_Ce>= z2YJ%yuWmT9`$Fj{3aW^YU{b8xfoEFZi!@X|t>nSHe5 zyE*Jfds2cJ!=mr?CWHlO$l>9xtJ?#Q0FQKPAPVtU7^2mx(*rgJs}c9td~R%6Sy_hs zz&}}7*cAAmGW~`Ymj|G3+L{GlMa?%ZPSSJBRys_7yY)2|Dt8j6LcOl_khv3jKo~6d z{;7z94NRz#=w7J#yNG)VKJ4EP+EGFjBouNn} zB;?J-oz3#)wwt5+(T0=_q)0&iT>Xa6Q*?F1HHg@etT@Wi$<2W8Nm z(o|EtuDwF+|)pEogUbq@UN;apv8J#dU;t}F5 z49XmbyOo{Ejb!czgx6*j3o}?*SRT=cp6;$zztUIA(LibrJAJ1#pv5O`=yj3`TM&Cb zJ-0{bimut6rb+=1Uu&A=u3KU#wTtAfW8<;ddyL4g-J~!^Oglw(TdL;A-I%Nu){NE= zF$F2utyBkuQJ{BX*sJ|MN8)5Qv8Z%X0M3ZOTq|4(%3;ITqhFYCo+MB_$G^eS-A+kJ z7_eG3h{TZkTGS#sy0j}#*8~aKf1(Ks$Jq@ko*>Xv;_6`F!$7b!=tK>guf3ea$YoNr zFr2oLZ9HkG#3^M6#54&iwIj$;j$^)YxPRor6SR$PwcZV96GV;=u3|Hs4pn(bY%N5s z(c?X){sPMMv2U)na#UI$E-l7G1acoery&youcBF% zGAx~Od5pP@W#p<@nZ^(I++WR(9vHC~m_)7}O_(5B4CXX%FLoGE)6sc!=V&5yBvdL7 zV{$@0`Lnd4iYvnn2wPvsgI@L3)+TWPJ<;%Du|tuEu)y3TOtak8jJxGI2`KMlIwVA% zduayxXNQI)u_IQRHnS_lbaR@SOms+x&1VWG)HKq@Ix@KD^oT>v#r+7^|3S*=c}Y1#1pJ)ZWnwGQXIFe5c#eaTeZ8J$lStA)QBSCnIa?p(ZIyt1@l4@Sd_I zww$O`9eS&NBvBh~+pX{K1U@zsn%JuiGtG@+G4!$}!Wuu|AhZmt*U?GyO}n4QwAh>r zVMotke={4F8RIj>t9QJEeJ@R;4R@mQ)G{DLvCnGs29E2NB24lrH169iNVVxSmj*BN zNnwNQ_CktnR0c;|6vpoAaO1kZq~a;uj^Z{M>8(36)OkBULmMl#<3vfJOidle|bhK#f}H?W;tx15i^8A&#$K8fex{%nc-BE*~4TT2`QS zv|5!WmV|_{({6g_cX=pMN#(J-;+ndBFtrXzuEYFC z#n91Ye@f7;Agov5n%79dJX1+ol|;VYe!i;%NjLD={6$GX$ZBot>Mi8q2pI~Z5gmNn z4pX?j7Q=oFnsGg>a3Sz=-F~QBAFwK$2r+-}f=9pd&F~ZE0*{qtSUkVmXFH&U;9d3g z#gI?}^}f7P0ucfI*R~giw?Hz0MaBA_(g{i62M(K#ghA`QHIJ)V+8^K1-F3|J1J=vn zFfGmaYn%(ZRCHW~@M!5RHj!{a(DGf@zhZzV(?F+ODH~tq<2%8nC3A&VJ11zEQg-jy z_9C49yd2hIpOd0`IV#}uFUO*%59qI|Bq8Hr%gzXA&v^j6;PI{bz;8Qi~DtAT7Gcev)1$3Jl&gQTNY%(Yjz$pq z8X*kyFRf3LMX^3s`-+9GXJztZGls1+rQV5r>M(huy2PgW8y9^attF#c*%toH>FLb- z+h$eGyQ9hvc5Xim!v@K_K&Ix4Td#a7AW)n_o@-P}S7=0?G@A0S@_c0I06k@VL^_j# zUL4XFZS~RTv3Gy-n>N@=pn9K=p_`Z(<)egH>glB|MSgZ}1qFxQMzPt~P?fxoz4hN5 zqB~Ea+dG*tqWLC;!biu~42?aO>@nOG#RPSP6A#sc7WO6>HQV9rVj52BgI`b9tXLVe zXj14szqz@Qk}PKa{wNTqj?*KHCwW3(fYsCMq~(OhTyr+}trHCPwX3Z1*$)Qe!KN_Y z0tWiDT$wqY@rP3V#63sG#b5X4-Ya_vF)^VddM>??hUsOn1FspGfIj^c(3D5`_7^!| zJb&>**Tm3&z>XLcBX;iQXjBoBuxo`MV_{yAb5do$$xO9PP{UI_}XcNcm{j0bF^SUyIn z;3^_1sTXU7fuF<&ksJNRxw&6X=H;msvdd&n+$S1vf&Nw4PC#w#Oq>mcXG(iU7YJe$=9tXx_TsRvtgu=sl+RyEvtA` zzS`TGSjziS-n+_S(V@hB;BouuLXWKJxXbi2Y)adExmwUK)JAQlzPj~fX>B5)h|TV} zqyn1Ddcbr-b#)^)?s=6v2~L+L2)xNcX9*#P$EJ3y6+RuhcYJVz)7d&J-Zn8Pi18J> zFbVLaK$|E*!y2KL2++haZKYbj*#b~lW2zTB2KxJ(_>?lksAS3tZiCK%**l)2RZ!j0 zs?*Lwv)ftfsZ=>6SdDF0zki^VoNO{QxIfg%G2^t8lQWX0hT2|axwxjwOvALq2J zCW(aP*Wx+d+Dwe|XxWpP;R`xIt%UVtmM6LGW}x+YPBhTjFPA;2@;p63>i;pLSomQ1 zK*GzjFNeRs`jqPFSL0`7x6KLSMjNNGO#4obPy0+W-0^)jc&%%$R=PF2Q_Zv=6N?3; z31f*>oa`(!bZ3`~3wc8*))BY7y_51j#iBq@X|@&=2|xn#5eLe8-D}RkJj-Fj{Wo~h zMKS?fx9_wkp4mYf&%g_t6nndHDUoR zWWWvQaCp~g*~)v5kM2IL_`yX-M~6pDPbHi-0BSK`e-H_fSJ(!9& zY`R2LSwW-`3rx(@3{Iu&G9NSo>@Q;_sL(Ldd=wD0H;+{2!-Ev-P-oTgUA~dYX8G>A zk_UXxwn1A=$$+Lm9Wr%>AMQ0?)$1L?#Z!kruFPpyqI*>yYq;o(=!ra?MuYC@qF_^1 zgrF0lkAI08HwDIZPNHS>ct8Qr81F?^LPs&S7g(d%SBD3wAIH+?F1TbWEm}_plLug1 z1o7E*v&xYr_C19KKODFCkrmO`@!c-q$H85v5g++f4M4O@WI05dt)wfIo^Lg}(*RI6 zF^_w5S3}t}napkx4bZdh*eRiQhNM1lc5e4dH!;uWzu{A&o9xwQpc;SNA?%iAx&ZfV1jSIcpaa*D_dRkHQ6$}|A@LGwrZrYDs@gbWi={DTlMx4&oVK*Gm zJtIcC)_Gm8Y@sQ5Z*ya7OVS_RZn|~014k^13C${Te@N}yfd{C`tG${$uYiK`;#sl| zVXLTdJKs6V&_{D}hAb4VzEUyQX*midzN~o&&3)P(HJWWrv64q$#FLQiL+Q1{7Vi-N zZ@M{%O&L#u7271B#+DBfd1R}VDQkPWq(v5cUT~+ct0wmBaJ{&zj@ zzL<9j{6Rwi5$!&mq`ufp3|n~$NZ!KYB}@TqXYHnZsQ$o5J4U(d)GMRZso)lT)e%0u zw9s?l%@|JG>g#VK7$-yJ;9XGy{hd#SP(Z4xzTw$p0?m-vSey*SkaX_TwejnOETo5r zJtp6u=*)lnl%}ET_Y^AoqU*0bzETmOZYsfi7b3H?Is0ALfN-;X>={9r-t-rYG^K3D z_5{ieRVhQ?lFfm#ZIl8lZQ8D?FErK0iGcdrrQYkqc9U?|Ok3Y{8~lW|6YPUhy5?1Y z^R0r5i@QgER}!zAUB#No$48XY)n5KlJeeDvs2n2*K^uB9J2;Iy*+9YrFb|rlRBKiK zaC1eP6BE6*HrJ(obaLq8vgyUUGISMCfwCxk*DPAP_TdrVHK^ra5k80{!_*4(U^3zT zWj?<3vPpIBdwcGFg$`7hbC(TPA#>i*(}_|;$n6rNy+V4&Iwn=tP(gX zs#Q)0Q>o`v$At^o1u9UzK8c~PbO{ghj5-9r#=D8$2|4XXwUG}g8AyemmZ?;fXeO)k ztYFarMJ^V&(g+eBGR2lRK{9rH^-mhb|Q z0RN29$)MygJj#xaSe6;sTD}#8)=B+?AjpCo@nt2^pwnkxDi3|?<{MX>cj@tGPq3OI zn|0URso;f2QN^1Du-I$ycp$=7Z>cS{aiT56iH<2z&>^EVAMSb#i6L#c6+yEzqN#Zx z)Ti|D79V{X z?xe$3VqgrHHSK@Zzb#1KSrm!TwK1%P?tRUSI9^VqLbK+8aB^;yP+;iS>v3-9ySqrf zum##RJByx{n|lUrR3fA)05*wp-E)>1p?reZT8F0Ur0iA3v&iol+*Va`R}NtUZxEzf zHE`h<^%_64$`ofV8mOpF-)nnYJ=vAQV|r}Xy+;Y0)!J7#v)2}LUlderDCY=>Wh!MF z9+LAkB|duNCo%KNH(q!(z;U_vGj$?~8}IhciAZ#L;RMqU|OhvaJA2FuclW0v-z>n-cc%E_8^_(@Thj1s|y2`a-h~g|-H&Oyyi8 z(DLF%Z+0u4a*iKSuf$}1VEnp%bu}&-@uq?c2$B_YwI1zQ^kfJyDKe#Xas8=>p)1dD z6*E|T%FB7^ADepT>yu})eA7GD<5>4YSue$t4#FK3&dIS_pD7fsqf71{ngphTy7h>_ z!0_OP)ZJw5>bK8UoYs$J(#8x1?s$4ti3PUt+Rcm`RY!{Zf+Tp$YVOpKee!n9BXlw@ z2n>PS@e)?xK1lH%Yys`vzk!BwauU8|p^_Vc77%tzd3&&6hVBUkK2-Jz?<>^C+4k(3 zK%BSCozmooRBXFqhLQyGn&!j$k#p{@POiR*HLt5*H(~+4P@;f=NROA5q3*=!S;Lm` zOP=#Rw1S9RdhMa1G5LnyK`rgN=XXDguH-$#{`>tuGL!++gHUD8-R4V3rT;FJEldbD z%&pXXcTu+&qguAodoqhLLH?#8Ot`mpwYoTF)5c*NRER^SU}2Qx&&wP_%;Jpn;9c>a zv&rb#e(Wo9b>OjI)l%${MlzC$Nnm9T$kjX)EOW^8`D6qe18#4o;(M6DK4~<&aF}j= zRa2HK>-hY97&_6+7aI^g*NzCSkK#4MrqboZx=J_i?QeWfm5&gvb{#J+Sq82wC=8y2 zvzw47Dk~iW#e*u@jQ4=7(tVyib1`IcuB$dpnSJLjBjT3tm7ZoFWQ$0O*~8W1iT-?) zy`8~_AWNH?ksZ^Avb9u-Rcu))v&e3#!Uz&aw@nHCGZ*;z*y+dhOb|d1j-cte*?Ve;xLNW4 z*n8`+D7da~6cLb+5)q{Z1!;qj2Bj1b6ow8J5Tr!uE(t|SDM2I!=^T0(1qngA8$_C6 z=z$q#=G%BbsP_}^_nhn;t+i^%MC;+cr%Aln3~$NW`{8m7uQ%hr z7c6V#y(hCC5J9how0zP$F<@g;UxqD9?n{&R0Pwh&jP&&B@84h5x~_fdK#+}3q;Oog zpkOBw7pJ3F?#A;f+>9-#g}__F$bstC-!-^@1@uIHAS!+6=QG#Vo^XAGhM)i0*OzMm zL!EGKLHLZI)J49QDVU^=j^B{e1&IITsK@L4Q}{lO9gr4Upr9}?T-_eT{NX%@Q+y}G zStRXK9T#$ki_@66)FnVJQNjD)5}sUk-xPahWO32W;*$5){0b-n6!{Jh^PDZ0s4VUA z#lt|Mm@8>}8OkTVw*7d)0ty4Lkx(6|))(0M`TT_kxTL0ba%N8C+Fipd^B+{#toj~> zGf#beEZi0!DKMD@B5Jn6SwQX1H$NEjbWQpByirS9Ouh#j$+nm!El{`Igf|Z#wlbEZ zMK5e;aE6MC_-?+Q$yp|*Ru|(M+a_S+>=l-SyA*IXM@X}`R6?P0DKaAFdT?zX)#2eE zueU(0-HVJO23%uS0H_Wm3#wE;1K-K}_l4JXYp#v{hw1Eo0p;Lc?cksELysAjFp6?aF~ znTGjIPi1Cea9~Y%Z6|uT6Q_FZi*I5<0ACpLg@EB} zAM3zPqVE_LFCmM=EE0dmx|&}DI#?g5N8omaNOgWc*QN8v$o`C`z`W4tC~G5$+oADM z8FOebyydug-j#KS;9wrg_H^}=@<0@TxEK!keu?%jL8|{WM4k#BMNW}5etS^@nFj=X zHe_YYwF5B!Q$@n5h|#Mx#!5tsvqd~tubvXKH)0rKOGt+7&62?o};*=u}#pkWJSw zEB$!Sf_8iYcE!c^np$GN@ZBtsgSG97$8NrVET80u1oZmMiNz7eypwyi)#~zyQB@$L zF##vA0_36;V19~Wcj7;fDMx1A-(JLCRVh$+-rb|~1diW}s+|f>PQqvSts>F<|T97F#DlTz9R^Q6-(x0+P2i;;Jvz2v*`P~zLShZHe9$F~(<{s=7F z+EZwu5hbQjyWysAn(K}&NGi(MUV+D9&H^v4_UIHePspHxzv$_!3Oljs?}|ghl-*+X zb@y~&MaH$yPge0YqMvd*%(Y!Bw}u+jZ&!mzN*@`Sl*{(xw1=j#dJvtiVbz_Qse?go zKi1t|gBj}x6=`K(h+xCt8W1|Xa?Gh32y|>ptd#q!nIFRBLF8{QR&jmLAp}!WveHus zAn;C>P@Q{2Gg?s)iWu{}=pjA3wz78&{g*o``}Blf#sJCf7Jwwl<`fM1oo`0`?q6)z z7kxGyH!;P2mBYZkL^Vlix)j66iR{m6fn~ zA?ax(lG@i-)@w_h0=m5U5C$0dM$7A_+z+VL0VX?mxI0ni_3tE$^yI`9h&`t7zV83Q z=Jf}~oK`(!)#o8CXzcU)$EoEZzHmZG@jT67 zLG7zj3p)^i-m1V?5E87^I(35S+|pwsUro)kFX@v2SkA$AZ1=dw4w`O$o(m`<A1sxetQX8^zoiq+3VuY0a?S}iz_p9<|rnL2y(Bz2e z>26_)?y#ux@EwI}++2I@bO>90uiM&`k7T5NS;SWq2;OO$)Y8d|l=X8f3h@4>ejX%o zXq^6B(mXR0O*7Qld(ZK6-Q5hT`?E^Q32)zmeAYGAi{-;zfa0_zL0)6hG?uSr*NNX; zCG*7-Ti@hs1cW5MIhyl@@6e^d%{N@oPs1fw#0*0@p({@&z)_g`1QZ(QJ(#E|WQ<4r z*d_$lkr-rCkzA1%Z*WOT+JV~W$cPm)a68*w_u8xX5&~A$?(>%W*z;7nLpjA2cj~Mj z%E=XvQu2L;-FZ9#)Q<8qb8f#Ji|ip3=75iW$W_|2c;)_DcTkS)>N~-RzLvmZm$&d- zY`|RHrMtIHceVhgU}k}d$G1?Y@9bP*Jt(~br?>!;7VL)!$kSwgqlEhac!(}iyf{Bvp5y0>s}5om zc{w}fJ8*MsW9ZP1ylbc7)#vo#2EW&gU>Os&F)fqN}cdt$0LTY%t{W&%r?vgt~4B?09ZO@bGi z)LFQ=NXW7z&0AS*r+ip;mSY=wGLA!#*-buY#4eoXxk`Wj{7iy$t$@$|hk8*XYrCs_ zaw(%bd{5Ls;U5l{RTVvXnX6aHVV?MlvJBrtK>*N@_IU9o4$B?G|2SxCA%sJNl~W|C zDXi1hY;+;Re-N$lbKsC@t$$yP>bWMlCz!+w7lFT@tWImKcwrMG~oS&_#6SSW}&-PXi6Zkao9(puYaAra6*vsS9v?q;?<%s5Dm4Vpr zhKP2&o)xj9Low1)c`6OPVEyU=0q;$0>B$%vEWag)Dh?>=>M{Uf9n=H5 zq!fAUAUbd~KhW+!KxqHKX-DZF;iQw4k_N93=U=TQ^j1+Jx*h*Mh??zTScHInFD=lT z)LskZTrOIyvS@cc$yTo%asI~kM6GUMax(P5JN+X++^R>M-`GP3w}$$MrI@j+(tXSM z)mhqW5!ZDJmV;TPb913P>+3h0UT$d@Isd~$^1Z$OK_Hz{xt1m7#BAqDyfpUp2!C#bHQb-tqm zkvYcrDJ~9ItlF3;9epR`r%12xklWkA-&o z-ic2E4D{{d(l&wkrRC*tpd`1oZ8Vr1`e43`Y>e1Uc(4Fd}H8=eTqSp=}5gf&+%7D(bQ%k zZjE@n<10Wy?3y|VC@j&RvA1kPP5KP@3%1wWhwCmqNm;j_`*y&brR!&s|8-)5vh0!C zO-;?!4ywdK-rT`3&3Y&l%{joq!2#;&UZ|3O3R!t%ZvMQ(&{d9T`M;9c=m7DgCe`~W z>V2)OWOLygRd<*;fDup|R!V%`Kg+n>IN*F0IO5;@z!~|OIGusO7rJC~7?qzVD$Xr3 z2uG7A`vwp&jC3+GfU@9}M|tn$~fqX4L##>jp7vYJS+8>j@Cl^`K9(?RAq|E{?q zC|g@8y#fAl7=KV5=~W;fc=f6tKQAKN!f$c{D_xJV&sdfUCh@pr+{~zv;?bT&qR{Fw zTCQ^lvVRA>VKCBrl~3Tb$BtcrC!N7SwR!lYo_dEBuP1!>2GS_Nj4%0Z^O=*t_*7-h^Zl^(37teJc#XQJy6+oyp&bu1BKic!y1TmtAj&a|bmU*o8rRzDAm}$>W;RJ0KHNMl^cuFquv($E_{zcMnoL)a zy{^wNVfI(-k3TaPFxfN%addFgo9zV_PxQU_EjyH^wpG!Pp_o{;PdI{^wBhZnyc*tv$6fR;qk_H<= zxeOwb=7Wp++gR@<^3#_z*+aZ+e z(z5ME>V76COYJ8h`6k}_B(PalVv6~uBDkm$+(*PbRWv8#ZU%joW6SYaUi#(_s*S>; z$=5e7=@ixxcuN~gIj4|ZE6aRBu(wKhOv>kg3TQt5Nl9Yqz#28Jd6tqfMgcLZxH0{C z=|H*-2QKfL?ZXw<7+S^A$S-$L*yuI&KXal|B&YR~y;IeUiC~ zi9tw8$xXpFq0amkAHU%sZB`cAs^98uox)Gr$q+A=Ack}ztOXkI-Lgkd`Ax>D6vO7# z9bPDg`V8dq>6Oi2m6G;`Aj7)rue(mwivXM`>_VlIHEL^r%l@o1nqYi<^6OUUG-_?a zyL=>~b(O%|TBZvCfoM{C?8fXrBD9`%02g~uy|+#3{m(hKNETx~l~>0AT5o!R;iRM_ z696UZ@5~9(z zN&OLxB2PGSQ4asXxHL4>$_*a&dSG1LaVgz_mUw&T4X%Y|juGTa*Z{6T4o?3YirZ0q z@h;f%E2lIFS4MCm4Gj+o(cy`2AGWHLDb&8)eMpJ)eu%qj`bFm$U0*&!;?N_+;|VTc zw5hL0ZcMf?kpu@{x|eB{p0@v`6S2U}%jb0icMwMdS^Tf)>}fz}HEePEX@uS$V=sB9 zgiQ8k=2U(471}#H(Pi{&eV%u6Sn#7F)WS9uH>g08LOq~-$|~MOV@@vzV>Cyz@H&x%mlTLo92A92_tP5N52a%02;g zw|JNs1kxgDXE)P}?Y^;KK-wC&AE*4)^ROVo#7JcNz#{IFKDxc89kaz~``<)HnrD7G zaJK}|9?#d=`M?R)tYP&!y6=1SYZ`~A?tuWYqNJ^Tnu?V<^@@TJZ;F_06*CYMKYdOk z6*M^r$^sFB5@dV(#1Hn?v7q2>$$^?qk%_|WTaTU52;lep+^WJx&8jAcI~d8Z;s8fd zUUL0+s@$L1E*9SL`C2`psjyqu70}aVDCS9}rr;xfUlT0EWV~X~xspd&M9K#medetCU-yMZ4g4 zfVH}3{w1_I;Yx;LMb862nK7f=g1>Vnm{T4eT(fg)sD3E#)(IThmq}wbTGlhki#$6! z&fN>#j>0x}?`ZqJi~rx1jzj%}or0WM%8iXvd94M5N;w4-31?Q@(f;E*%k*6VeA6+P zqF_W{-kkS2c*NH5bpfQmvoUpq{#q6L1PDg^f0f<<&;g$3W@3NX zT{;fKb}j#jV5EM+57$y(=FY{2&&qy4MF^Mp2c-x2k?P&0DM?kS^h+OBY_dTa;6=4h ze>REcP}KG)^HYgV=is=+tHMAdPkXlrzaahKyRGPmy2~5eQ}qk(bqvEimNJ%)UaL=T zF%i+o5ghyXKU5&cK!CQy)oqHS52$E?md6qfPE1EF|FQEw{^1Xk^CQ5TTyACNAS7k? zBLF|5)W9HL`yZM9h)XzrL1T7YtH;s^oCM|GDq4|DKKlgAfK- zRvz_`%Oa;q?dp%7tNPD-@K5Vg@daPIBpb|p?C5)=?hz6#YFS@jJbBde>kI$m#P*0t zyB#Hd|N9HXkHFhVeB|tZKg5^2pbPVrk<-7r_?7rVtN%US zzdeNieY^kWDgW=={Y7yOY4871<|A%$#EJetOqmb!WVu>b3cfX3WUEC#sc6q9nYnbMKKaO#qrpNxN%tps4t9(PFpg`WO9wj)R2uSZPU-NM_v z`FrQY$w4(_CySao%#)?j9a!O%&`Cb%5X-d_ul~!)MacrbyuW2;_iGORG@#|(sjsKl z^NkfI~n2?;QGaP#_UDk>4sP$h~=I?ZBxcsSE+}ftQ zr$jibx8l(r0gCGHL<1&4m7%~!`;UJ9!iUpR3RKiRYWbIe@7)0mzKxQM<;I^QoumW( z9vp9*`E^9Z0&;%!smgv>&VNI!ets&m46MnCRT>@f=N>GvgR^5%dLhGc)bH^H&=O7( zR`{#s_W}L&Tjo13QVi26iRXWgbdC%3Yb1$jls}p&xdE8Fd?W!z`KaY@djjQlnK@#RE4 zJCq6SRTgRf9LYQZ^gGF4y3hYPQtM^V@0@5VGu6>dHM_ywDLjaV zWsh2p?#Izy$w^Sqq-remWFU`P4tEBm1OmJFAZC&Cs9%)QVT)DqBJWYlzl_vI21cqr zpLX+aBRv59uGz|oGat<~H5Kdv!!~KL?@`Oq{W!W;{!VAW!|2RVp8E4av9lbmx$#nu z@=-tb@WYm#`#q4OmVX&3=J1ICY!PLwt1~X2-?GwBR^Tda%_cyLNwf#4_cZ_@!&Av9q^hea!o(pI!cW z7u^rp6t}3(pFHRoQ98I~Y$Eg&zxjj$XL03;Mc1{14xiETuNC;x4wtp@$F(j9VdAHC zLNggx%+LL?!}lO^X~ybQ0w#5tvHbeIZE<{0Ah|lw*B4bkSjRW+uAq`75eq4mqq_+B z@Dn)B8jjsf!tM;&PgZ6RTv~%A3+Uu0)RL&i^UM~0-kfVQ{CaQ6nR4v_AvU(#E{dw% zDw1@mno0&CsgW^f`_ItQ{hVus3nH@Oai7MT3Ad?ClFAFMS9jR`Pdt>h8?{RE9kl|x zb$hA4SDYxF{?gs_=a5{Zfn4nzYkEFNgbS2V-+m>oW4BKu$+mr7eHVj-GpMjVoOmp; zZ$DW#B7?V$7BH@fDfY@g-T}m_s1ic*fuSPPsR)Uh5=h~@(V{mvd2o^?*%wX(|4CeU zH~?KyBnhzpMYG;=6jy>=Q&?QBH-4hB;_0l1?qibzr=?OIQ16pZKes&3O*>PC7KKEY za;Z{aJ+s~sF`xKF-{q7j4t9o2cU#zMZ5N8*HMz}_E(hNubYqtQq@a;XVv&Z=BwgY5 zxkYbGqKNG1s7Rg~RyR*C=QSi(d!Q|r&+_8!xM#?*GMJ@CPDbiA(gX@co8Dcdq;Hw2 z&wf*d?TEP{sG}sT7-{9M>jfHfbAJ9GBg8)*6)niTsHP5MsMW5 zt#FtL&}MW&2Y^LqzWa2gMYu$#B_-+^mVD?u;Fnj_oKF48FMIV4r44FU<6p$_-rf68 z$h)76miOp*C>N_YEn{Gq$pUv^=0PM(zIpe)YQ_}b%+1kqyI9d7=5i);J=qP3X zDc`BbOk(!MDWY}`hL=FTNfw7`d7Tf-KJMcWy>!P)Ey}P}XwR`y_p&jE0)rF7I4E`T z>kqe^T$ODp{RvME?~gs%on3}hJnG;b3T%tbt8tWqWHKWVx&itcBM#NpOxGn@jQluRFe7c zWBE+eC zt6c3s{-YNX1;c`13Q^I7O<@d`Wt|c&*8TZ!OIwuvWGq60BmOuEKe=GQb-SYce_Z~> zE468dnFzOYmv<2_=LDrp3%wNcj4^Ty30IqVtmJfCky1g>3#<}!~3Zx z@MRtN%w7NN8$C3w_m3HesK$HQGl>;e0=hGH-g)_q-bIhY42l zPWtt_7l~;XGb17C1s%Eb=!gZ^J0rKZi5BT3akhUGoq>kYCH^;(qz1xl&{$W)%_d7h zf+zv0qV768T$+9nw!9U}wiD&~hQP|yq72GOSq z84W{HPyt&eR`1une7Xb&f4yWH6^mm*3j=kxK?;g*P!>rSy#g$Cc_Wo92LSo+Cec?c zhs5e+0j$GlJ~5sCo7}F>d-8HaGud0RCEz}e9XrAL(0Q3PvESYTEuz<5&(@Xrn1Vr2 zWjUer+M#@?MI0fX^#Uq^dmbYx^y0+wVzHKHj8#k? z>^G2i@m)8qDn_h8-cMPw1xOHk-zfbp|N0ZsYF0UoRixPU1h}txKMM>F&%Zaq6LS5gb>_rJe);jP$QdO))#5tfGQOGbPKz^mQ5J7LLotzpZE^EZ3+ ztoFOH-44gK4HoL#iP#KQG_DM2ybRr~o^OHZ-sWaAV-E$E-g~L?fh2?Ieo>wQS8yaE%uXbKQv0i# zfZCH#@rL};W>06zWs&?8V?Z7wGhf_(Hj9UzG@nV`a~}Spms{jr41)K*V2uYB22K}d z|6VKDAc|EWek5&uRpZmif6mgE`8M{F{?)V#aOC*ourr-y#9B2J4<{q3IvK>OcAP3PZHJJ zS%&7f`3&TD*JDT;w{qo>K?^xjrka+lnBSaC1>!%D=TrPdhrcXQ#Aa+}XPLV^$yCVD z+GZ%fq9%m(0hIdspp;rJetM>NbdBZY$w202pUHP4yCBRT5hW2~xOCORsL37j zUg%!=$2^6im)p-5D3Dh9R#vutUAbQClWba*A#rywY z&R+R`LUbOW{l&G}dsj{U8pWCr?8D#K;HOFbX<@npY$Uo423 zsG;(xGs_ zNl6n|Eak?O18b#4f{@+AuQUx9$Q{TRH=I6(M-tEvk>~zPsB{t9XafuTUFBxHr0IOIc5t8P*Vg*Ov_5&tPN2(O$cR-2eNK7MfGs2O%f^H`K!qx2U9k{1`f2(KytE{y^Jb=C0l3xVU{E zNaI_gjV9#2+tTjxo48!nV%MMPHAiq?Q+|=rX6|op%=C3sBR3pU8j%E2*N01CZ5LhO8bVTfZ#Jj-+Fy1Z< zUpKO>ONQ*6j`tOr%Re?&I}*hzA8;yhoBP_HK3bL>yCc!*1ibk!_wHA2w2+P4q~z_O zMZxXeGDB9O*++nN z?2SM@3K2t^>ZZTJ9=U#*J&l^pp$?&Uo{VO55|CX5Qmwi!H7p+{>=-ddt2BvrTw9s@7enYNHm zLhyr!wLW<1svye~vUXb8YA(Lngx)gu_5Uzg`b!;(>BkxKeiJK&Gp&0B-m{N%G94ah z`I*EnqB_g~jU5*+vgce2bKQ z9cb`lhm-}V+HjC(_Tm&f2)M84ND?!Ad(|S}exhc7(uB)aE*50CM~&Ntq~G4M#@eqf z?km-FhGn#3+_zR3|2ig1OG6H zc=v`|qvB8`#iDl#Okf@xnU;p6*Z+EHk-mDcY};+$e7I+B@X#}ln&80~3ot?bmSli! zEHHqk=Uymk+Y+%EC^dlYQlnSO$e`7pIN3PW5~rMd6~2g3``9TTOvi+-nVm&@3^WmH zT{GZfJN3Y3@R7@vfZh+PhQRPp;I{Ey)O!?Urhnj_Z%!LuWKvKQTckM&RJd#~A>Jw@ z4%E5I&g|h}4*e`WF|BmTrkC`NDUlz&E1!M9mUz0}L&uIS6y93;@;=#iwCYOxTJBn< z=~dUR`qvlMTW?a2)*@d$_2iUGx!~mT%OcP2_lXsdfp_Fbx0QhKDgCew;dax@YIg7Z zV;uo{`VU=Wp(FT;fDhl3WZUeG68eh}cj-J>jMK9F`Z;Xeub;xo#$G$%lK1qjSH`WZNDX45q&Z#CHW)jWC3BT5ezJ{`~I0$Quk-D<=dn77klHJ^}{wlF%4#FH?>bCO`Ol=(8 z(Z8`3E!Fah^&Es|$`J2&oT{8^;r#**4LI#+yO0cy)u+Ftd!|s6t=ap@j2*eeszw1-2R^rHu1;gY(SWi zX>i>}jon1eMIJ2RET%dSQ&Hc$waOE@oQuAa38 z-B{g!&@w#h{}S|UuI+=Z>C#!kw};-Wry1uA+)W^{5IdQ;<+e2WemC884V@}seawcE zBf9+YV0PDB;f>P7_o15EcZ^ZyZ$xZ-oYMAWaeIx}e6M|SQTv1Id4{NNTW-KFzZkRG zinB-M+i;%4^hj$A%){EYdR~<Bo61o zF~_$@2SeEEo}~D)x;@ktR*%i|G#zXqXAoRHqoM6ruazyRQ*!Sj#&&}v2_zpBHdYM!wf zVq=DU-A{maej7x6T~v*Gg=QY#-Rjb*Qy@c~j&QQs0I_?+B@-+zfPk6F93X&J3)1@o zG1a-$`#ri#7GH%V=GFdqAKcX^U+nPcm7)r4ylKAC{-zEvL?&Q9{qayqPsf0>4tQ<( zOOpL5QbtM@rwuH?E;>?FE7PS$p6-l4z@FBhA1Yuv%%aY%fC{0k@XcZZc#nDYi}U7tv9cdyQT(modn0%u0Im--N?fmEe@ zMRivpHHxw{t!{vLykPonje$_wOC{!RMSGlLOrHW74aX&tUuhOdB;{kROxb*;+9h0< ze{h-e@IS*KT=n7wjZ2AKeMTxeOuUv9nYO=OTO&IOl+@A}Z@3Hxa`6#wD-``??6Vn} zUj97m9t;^wgNL+;n;dF2(!f%UE41iGqo@zyL)+JcoHPJJSXD7?&kUuV8d>2{=gVRz zJ-XAda{=TiuxLNy>Dan**K|r*%-Wp$z1uT~eFfr0Pa}skz-OAycu7n>>cU-L#$B}j zd;_y)6GYA+N`E8NvYlr;`J;Dp89t}HK${B0S?PR38@WSgJuFwx8bYlh{=KxRU8@9G zMCC5Uox7+*LsF@;3{23Jw3bh2!O zt*acV<6{Fq*bKvzyx-L>@;P4nqqiNg&be%%uiUONye0hl1e-_y?iAc5VqvfMgXt(I zwe)n{()fp@WcNXFvTCkP##GatMrzUR4l(c+Er$=y?e#ozzMDMRdMH)~L&R~a))IIT zeD`HXf@qU^)jiF6g%_^}*IPLYhCaj_&fExEed~YRc0^Zo!ltd8HskPEJ4<0jae~kx z6Yn0Nt}=}kKk#0+6tV6vpVhej)x*dw+A}=!ms$u+er-Sq(sFlNWI7UlqJlvSC$&Ll zlqWF1L^tdM*W)@zdi!4k#@{O;F?FjA#hlXeynhxQIs|g?X~e# z1(MV|DgKzYG^7;s9sk1DZE-}4$Fqa+INen4FbfyIdgaPHyI5B~mJR68#NzuOAY3El z!PhRC?7#^R%*5vuf&K4}fKi=Tt&R2H;e@V93zpgxt}q>JWWJb1;5?6yRU`dF5P+Ew z`R*brr-maCV~CF#OvHQzT`6KiXcO01!}&GOwaUIRs75PKbUY}J+FoCpS5!CdgZH)^ zmdWwJr!JV3Hd0ASjdkr*56?zQn?H#>$Fj2_)r-F+iQA(KY`3}Ekm`S2jaZ&TM4EEC zLUOI5Z}c7Oj*lMAFOiwR-~71WYxZ$MdZ>w-z4x=DXgJM~(A#d(P}X z>PWI2sldtDDr~boS6+56r(fJraUr(aX9qfeX2jjfl3k(lK1Pl3f#z@~D#6If=pqQ0u!EqsfE)?@L@R2MG=rQ0d62wEu>ieXTZqn@i{saS z=!h~hl=j>`86h9eD6;wE+{zU(uDTu2D(~`$cdTz$l}9V=l9&R4VS41yqXLX+-%fFSeSG;exe?3*YN*?ic||j&5n+gv@G#`c*tV-lOs-e==%>(Xv-1=Ppx zEX*rZf@g9Wt~|5>DT8!2mGEaN!a}%mg0#^9k-TW2y(Rnt^jUe)B8^f{CUB*{)668bKbta7^lkgj; zcExc58+GxOV7oCQbeWc80sHdrf%c|s{)Jrmb z4QY{2tY|pi5ve7Ee|Rfy=!Mns?OXzJApIFamruA3lQ;F}=?X2QBx_KD#x?x~c$BHH zVTXYTVsBO>bWcVdlXQ)nCcqBF!+UCcM7fq(c2xj*bflz()uzCJg{?h@#>i!+A*k@- z99u2EI3rSGG~%Of*U0m+?8@Gk^zC*)c+*Mqa+lovn8C58OLxXeR_b8Aal$h}#SUmQ zz>7tQdAbv+Q|Kiwd9cno?wQaKL=j?q(STaoeY&hm)^*HkxcET?^0Uk8@Pn{30?*p5 z`FGw+(hATOey)?TYm~jxT6R&6ZJ(c_;6l;Gi~_Erp%&mu5cR$joib0|y#m!mo{Ja4 zk8)`wCagSy$ymu7hk4vku|J092%<2I&BausNXZYDyuXb?ZN~8-qL8_QyVuq|H>ww$ zo$&C-eM(8#r>TV4wF%#&+y>G||}(woMShDl=hACrAbz>%r*3~6!bBZm3v zXvtDbt+RvJe%WNCOk_6(05~E;s6vB zvg;TXp^YEd)6Kaz06bF`k`>5e0YlMRl=+4gqXtB!@E*tqHC~TpK$+aG9Tc<{w2e=(eB~wO|X-P?i49dH;Ck)iN#N8JvQwH)N zm0D2;X>eTX& zq1k+m?7Rh6=6HAVmddi<+>vR*?l)3TioxKJJk7LpX?$;<{lg^qk_dLAh1CVGU0~o` zyZ+r|ZRh4>V+bYLB))Uaq0o70v=RIMZiVgi&Rs-yim13yc(@FkcmZNb2N_DDQ-8x9 zrQg-hLpR};J5V(B$;54cCqbq?@>~@CABLRP5FiG6O_%(}7s$B)v8+0Uev;wqWPC4f zd1EGEI~w@e7I*voT8ynG4ZHF-&#X}($34&Z96mFx>^sn@58osg~R7Us#?Fvn@ulY(GZ0d=C+M4$AXg(X%x2(RbcE@h6Ep`{^R*x{> z4cJyIt$G?p%OBGnOv~ypK?;E*6NTv0knPZUtlU!Yaxj;lk#U<^TIt-YvsXCw5*`$> zk>3JIkNK9cTf0R*luV-E>an|Hw!-8!E-Qlq4^zEA2M~7}f9Cal6LzDJJH_jng1z^7 z%*Q$bFcZ|r4#e`ZeKl*l-;n9)qNQ-eC zv#YFjtW}K{boq7#@3V1+F;!OuHK1!sNphi%a#^+HTIcn~2&v81P)v6`guK+#G{K!> z*-L8qV@5u(%hBF^usr68gfZ-n4?>61Zk`ak^=0a`kLOB3CCBc<7dnmdT?C!h2fjfY z-F8nDFR>}IZmW}7EbY^D`QyHEY}?fKhqa~b6nnsTx}GJk;6xp!(0RSZ-6Is4Y9v$68r0V<|iSI8>Q%fwH zq&w^6>qR3>(g|RlW=}SCE`+I=72VG^k##arwsx3smUH9NM2wt za*(gk;*>Q@9NS^E+i|z!9wmUqgyKi&X4&y^Bku+@*-IUS>A;R7?-N4YPi`QLH}Mx>grlU|OYP}`BV<~pp)7wnp8UY4Ve z$Mm%|n1)qf*ILcW9VQuDy$V!;$7{c4Wjex}hUKgt$nIi!Z?Vr+Po|hw2gmHghKJ%! zJ-)70&AN!W07qphz)O7jaSqY2`#SFdrKn^2Z#i6WAP;zgU zsejrRbds-_CGt4sqx6dEw$l->H0_4QA_+g$JjpkOcM|un4CJw0kt@2qAFa${*CPD* zArNf7^+I5kAX^p^1e)>auhAWvn@r+&Bi!_u#B7UcIfE@Q)kb%umSSw*gh5k;Wbotj z-`*x8aQWeG{wq&(@*U2A{i%oSsGnpMUgyEAAmQrg7gdEQ4g?ts4SY=^1|&mO(va_V z-N&B_1f&g&cOFyMc3Wy%C=<}haylnD*RM6PPlMin$rZqJDRGD$wpN{+mH$;Qjm~^$ zq~ip#tC-D)^t3za(5B@3zsw(Gc=M1t`>_?vJN8VX`ITSkxJQM?e92e`X_foSMC~|r zLrtotwJ>|LpOF5zxG5hc15>zb1RLr-fcMz1JpHU_AAfVwZKA@SQq5Y!DTB~EJ?Qut zN;h?ADiV~=X5E7;=X>noDSFs3k_aR=-` z#dP1ky)4v*?LMbcxU=sd(y>6pdTElgLIOwRS&k<_A;~9Qm)%@aO&N@Abxl2Cn5{2l zvj~3-A^nPSBnwcL6lBlKo@vq}-1dkZ*=n$_us5E5!LOdgVxu7=a>=>s>B zoqg!}#&pyhb(RUh;YRM>WD!+0$?@<))Ve?#i|4YVN5lVJ16e_b*4y>9(3EXJe z?>OvhRiVnsc++T$b2~HPL#EJ~n<2w1PpG;|v3f>-L}dZd{Se(4d=>KxDzVD}_<=i< zt`@JfW(cHz!r)P!`)K3PGsOXzxHe^3X*> zA9M>0=IYJweY|DHfWBB3Fl2IB=e$sabGN1^#Yn0|$^cZfEpd+ulBQ_d0WEN`aXF^AOdQ&GNt4V|_F zyG1Ecg!R9=Z^P`gmSYV|M1LfxN+kS{tCz&9zEFwI-_l$#%gkag<+5^X$ym{zOs|RIrJZHnj#(80$C@P61B%_%6g-u?~Nw?YX!T?vF zB(CL;r3HlF7LplAIp#h*zSrDHeIhqz!W%VlSGuE-&1c9NAY7zUykEzS`9xOeZH9||QSwle%9kRRV zYTgAKCKC})Y!Vj;3RbHkZvw2M&gaJhW9JH171N1a$!e$r1}v|2=uN>I*Cif;Npsd@ z&o^A^&rswO3&JD(aDhtCqUKcqb963(dKmYv_~$OTYjAhtoF|Q+Hqp~^Kffe}RA=rH z{{Z;ezd~*y3|2m2|CZrcG83&_v*rFdo5JXmT7h~}#cp%d`H`A*@^6W)AFbQ0 zYm}0N?SP;>(G|zP-yltP3-OY#u-tPECf1I|eX6=KN1nW5!sy1KN{RA8E!z~nK(BMF zCEVBDUZPtqTPq4@6su6AU>K>eldZi~RaB2obVg2g*lDF<6^Is#YS#>ysx1jqO(KO$ zbWtSX1*n?sDbKYMzM_N5t2t=8u$BQ`V+*t9S1!ox`A&-GRSnQAuj~OOVn`<6g`&RBd-Z&zt-~9Mth-cZ&0_xXb$I5TeRTi>*hP z$6FeO-(mEMN%Kco0ESCOl`aJ!Gbz{g=tA=suRj-W0OZYCqoKUPg4?_;t+TdI#20`uNU~126xdh%n z`9~emVv_J<>u~Qk%{Ncjv5mF7M7rNjo)n^TpM~;=El%=ZLq2}wkph2!^~lq%F2x)u zs>9>M&q=qb$Ay0QdY4YP?sC2l{+{9jM^Km^ba^09!|)t>o@osg+I=Q+`)ydK{ioUzCe4B-B`b5`R!w9_nrexi@;v8k@<0&b- zBbm4CD{e0>S2iGW1=&>lEE5qLPY`Rg9#c}YjU%7C&Uu*hcj)k&u605yr;?{dCD&lC zt7#{YK-2G&4u**|WA#yf^2bk9t;`3$b$p0>B|0k8aw?S#vuowK8jW{M(h1r>->CG+ zvp0GzhrtE8YGT=MUq9y}iK^S0NU`Y6+96v*ZO$5$m$#%aCwxdU$i^=DTjLj;7Og7B zU6d;t6E5SVJ=bxb<8T`$YFV^`C==|?66#e(goK0vN!%~{4vk!@9Pr69kV;Da!g5^I z1vVdCvuHUZtiEoo2l{60WA3*+9&;pC`Wz%Wow7SmBanph*~~pii0&NHp(F2Xp%5{C zD+N!)uo2R}LtI)vtx2yNl(rL;h}pE?v1X9aZ8bD?%Y9bXuz|}$NNs()Cg@afdnDH* zkESY7rcU>g(m?f&g%O|{4LX_shpsn|hx+gS|3j;iN>bKVitw&TcCCt##Mp+AEMx5J zFt$P>Drq76KA5rZGfImr*#?7QB*GYDEVD3U%=g*#`COmt`u*|yS9QB(mh(F2aqf@D z>2^CT8fL!Df@^m4-U$QR!McU%nT{0d6JZ_i^oBogZK@vJWy$1;8RO=26QU`l5rSgj zV5#KE_3Ke@x32RzhQE`VwdJOd+J=cN?N*81dQKC(4a#Yk(!P*|lAxs$-mbIhf>r|T)iwPqyV1P zTn?)qP&8IR^@5r^^R~YNMWd4h(l%MnAEJplsWYlNW`%cs29?JC{+N zc4HOnVVgSX?l~qReXZvtiz${{O}*q62p%~HG!ehKDtQPD8ew+DmJi{Tn=IS^G|9{^ z@jlq@9FI#fVL?Yrt*g>xUFS3N{7ioG9vz}tVkw3!Xa)zhULJ}G3P*IgwWxTC>&Cd# zPYcK7VG9X6paKX*(}!zmF!Y{SuNG(eqiqv&HHr*#mOAJa{4oP~eSh5q73KyuX%!(f zH@B4vv?VA6!F^Y{B!i-t-D_k$<)8QxKpW`&gZh(lFq;re;YKFWemxNmWIfOhX@enJ zbLEg{|0yt_r`;3LxNvu_&`xZKKDYy07ZRlyQ$j~z;+;qvQfPjpUAxK{Oq+Bfh!;LB z)$@1Vbc|zq)_Tfy;vJ6XV4j&JKEvucr>7grY#**Q6jOV(f-1$-m7<$8j-DN)(6U`J zE4R2Ze(8H5KHS%K+VJ0BbiSkZrSO4lYR!hrY2;v}vH+r!g|a5!tHn;qD1d|A_axUYVe$@nUJr8+|ka z=W9FUvMThY!$+`5TI#>fvGOHp+9|SM+;iLLZg|^b23J_ezcqR0+jW{)Q{bKltywA| zRjTf9kkOSSf-3w_uzY{^=O^V(?RpL84k{ia#IiGrxxuXFaIR-vxZ31L1ykdxka9pE zDddnYxjBZHTdSMsSe!w`9GmOONTD2^ap8-I4F81}C^Ije{;P3}=l>?yhn^vt#kL?- zVS#0P*T|&xRYT>GbLA#4j%MpNnV!;I`T9v@0Jr|z4^mFx&6GUZuQ761BWS9NY&J2s zEvLMFhnV%BPIKD=r-{-ps$8YM0$31;m^=<7lF6V};bLlYuTJ2vtGkHDsJ;HIHq6^_ zH+TU7T^wflLd658%h0o0xL z0vY5O-t>052`Fut#sy!Sntzs0_b^>DorA_x16E7x^LEjA7^9=QJ>1%r3r33{K1azt zCI`%P2NpGYV>3UkjPJ!fCfnq@Z*IkGiX5iX(){}hY=l{9*0^giPt_|O{>|M09Ocjn zu%FT_*b%zWiNqf`C36srBO zS~E>Y?D-xc4ExI;>mvPRX{F=G#t#1uVS{hcyKg98j5}*<78JOObyO)@(86y`x%yGZ zA&IIYjUxUxg3CAX^nyH`iamG(_6|HP-G+8P962OL6QoEX>8+`Oo9)G@Uj%3nv~F85 z?#DfbJJM7_>0!E=?ix{-YI7o(VKzkp2#+%{qpd*DQNd$gn2B%#t8A&DM;POLAuL$A zf@e`=j;NKBJFiXSv;fqE zIr~evH7)g<^UG1{Kh3&AZ3HDi5z4e(MFCyDuFt2D+Th*o1tVLua>?h{u z^X}5>GefsR#41>6;QeI@Pk%T0vGz0yY#%4=!pUb?8p}P+w8nUY93v64rXY&=K85d^ zdYGb{{#WKa>bc%2LW45cU40v^!7C?OnSSff>Q`rSIQk%uxf#VkvTYq$B(Y+T1+U(H zlCHv9-mRft6*FB07j&f*-ufy1t;i|Qxa(h*`^Rbi7z)qw7t$wx7|@x;(d+%_yPKZ3DKdpLO4JR|gTh?Hhy zJhb|8_@jg)J2InZYr*{^tAw}ZwhX%IsYyW{a&PP1m{rj-KZ2*J_J`wxhEpLi^R0Gu z-Gz5Qym?_}a?CYkb4AG#vklGKeo^r#W@R6^_DQS5?m3Nk5lWk=bijxhy#~iGe+7;A zy$q}2V{bCo@>7iK@{&AfNG{3JjJKiU7Owb|UA<)Q%;wpl-F>^BUR2eaTzlQMxT*uW z#d$4ZYi#oS=q2}jB%5qYY;btO+4nv4^~(q+VI6vLMwr2`wNz8D7Zq|@yPq<$kW-?N z6WsA?VxyxXg4>7J^K2mNF6}wWBNGTZnX33SDdPA}E$u6_TQ{4po#$ikcd4xguWgf& zeJB@Pdg1JF2+Au;*%u~%@2P$k?*)=J89Ff$MDoNJ83kqq?USUpNKw;gHrAbzN8a~G zv2YrXr9vB+9gE4-OdiRhm7O_|6N&&-aT)i63vrKJrX=&yhO3d~{(o0gU{-(h*NysC zh4X)IZssAN;H*f@d!JxGikk~i0hM0#aw8D>pbgr#+k(jh1x&z-G%J7d#4;?zG*;7e zZP9&sZOO0F_=^Q|<-NXZTvr;rgOb;^1n4t(!4)RchHuMW@Ywd;!dUwX*dBwR_%+voZioYv& z&U3r;MV=)GY>cx%0u=Qk({B>X8ucRUl=P-7dpE9Ak!lc zdhT!4dkw0d0A1p;=X2)ElqTDgNt)w)Ur&MhTQ1m-`W>caeKML3K#LvlUTsX?5E>Z? zL@NUcmR&ZYqP4`a1@)Q|o+r;hd1RPH>ZCal%=rG~SNvXfxL#fU+9uL2hiQngUs^gL zC`c)Be3jP5LlvVdX4|xfVrt98>>$p6<}xKD%VedSGXA%i?VJy=r;?`m6|Ab>0HhyK zdMvxTC8)oT$={9L&y2Nru$obGWMNmhKtW%`22IA#{xflL?xP=CD!A|JQc%+gua+Y# z@jIsyr&Tj^_}IZ86~v(9UC1MAf8H66rwE=@oe-cm57oh2N5CgMIsvRMdcAZ-u!0hICAFmK8Z+vsSTv|R9qL<9OpjUhSWO?EF!!{#(s*oXL~Wm0V}JuvVMw5(sZiU zesOC2))JeB99~NNG*_LaX!i&LJ$QC^KTWsZW^FpDdaYNxOQpY%8RkWt0VPyjlQ0+nszDW4cucYw1lTShlYT zm^kJBHw}dPO9KTwniu;&8mJKbWLnpI>o^j%Yd(N(cR^(=h(h}*Z)l)XkE<(1j7fOLm^`gs2m9wFl^LGG%&NhX{ zY|jdrJ;wT0jp{hJiJDG0?9aGd{4!5|6Gi^m*Lbj`#IZQK26eamC9d@wfMf|y_jG?B z%YGLUw_oa{#=5`<)Jv-N9=*>VR;c@M0Pop~x|-ipHqE_(o@On75pFhsrhUqe-}fp_ zR^#k;^bzrYjN`Up+hP#WF=_HSr6{PY#NUYxbh@7WNwU9Z1`n?f2NcLT%*P+i`MDPj zpr-KhmuQqNb8++%;X+3jf790C8^Q!(C;@?rlU0kU^PYRwBRA2$X>`@0*F=;akhzh* z)R-3h%ZpT-^RltUw@H~IT?H;rdze6$>wNmI?ix*n#9|EQvic6VX|;rJ^iJSPAynvcnhK2C!nP+I5R7p}X)S7LAy_^knC*tnk;wp4!cpMnCEp4C7-m zH$SoxCZV!EDVVxDRQGNMgVl3?JL^W&(itQKRv48H?e(L6KR| zX~uZOy#u97B#>AGpOb&DtG0gZ?~?7*p$yW}K1~1s!O8CoJ(`p^7h?zjjCab@f05V_ zZYMzRmxKZT4?9C|s8_UmkkkHm`6F{8r|V)t*T@;HvR}LUWBS=3&ztT-;+OqJ_R-(G zCD?_YS-{iYJEd*EUWEFmHMswpO}!@+O=}C`3#c-jc7(sS?iKa#GT8)fixK5hXtiH20nP```T z+CG*Q@>W$BojMWFu4s|(YMQR{2MLQnz*t*Xh9e_X+NbPg-)=*4@Fun3R2DGrl4osB z?HmNmo?Egmv%}B4F4pv8fDHy`Z^o8=W{mRbcn#m z-rQLtO-Bgo_1a;ns|x7nsZam>x4$F}02KMA)u;YF=RTwXE3XXqioRI^m5ozt5<+zC zmt1}dC9-a!1Ye*^P!;mK6z&c;`nf<(j7)A$ ztpd85*suQJ=ish2!Nrn|(%TWspLbn$yZ`G|O=Ao$Xlb0><|$Ywe_H~EKUHY1c;CLt z6M$;wO8CkR8)H89NBJ3LyA~jy%Pj4~x{r3uXEDLCfXAU=wg1FnJ0s`T3vs2pWd>b> zioru~uX9wwH-iBR`FJ_Yyc`k*iTN#B2sx`-bMAa;Us>8)7OEsp({4llbdHYOCim(% z7-nmGTO^+hC%6D@jQP)DvHLk!9n@WOpR=BY93x8-*3UJRcL+_TwYAw)=HFOkn4^BJe@T%7ojbb!YF{)iha5 z-W)bNbJF~BGDAZcYRADNFAo<|&Z(^H0C6<^l8f=(^nrXK?V&G|EwDU;RQbTQpK5ar zSg;2qZf>kWRU?d&R;^VC@7iAe)tXC-1)G*^UZq`w1d&3DB$|Q!hiFaRql|FMBHY=( z((o^Bk?}8WaYDp@ylKw$ce3oZo9<0G)}DzePX8f<7H=vEwN;>hEZFG!!iC7)u91 zT8wc^CzxlqwMK7-xTWn5=^1rtG}qm9EtCom&d&8{U~VNI8^(q{JmlUYK(A$C{v`UY z4Pp&aQU*!l>YYY+AFS<~&tqJn)|kON#Oz0Iia|Y+mp(izhHC>K4Rd{(JJHaS&U5=J z7d-P#b6g{Scbyze7r=bKosV<{zWyVD0RW#zh|qw-pyckJc4ZYBYrj}FftG_+B2(od zrPn~~n6(|!Keu1-&qB~m^CCzpqSlq4E|^JBr{5DhTxMHRm8u9)viUaAvh!c797Wb$ zt%13t>Z)Ayy)w%SHm#l+FC^7ZHn`?5Bj)RJ_w>y%ET=`YbyvQoEY9D0F%jAhFzEWM z@XoCp@e4yJn>ln8Arjik%T#KMJy!OY)%QS;I%(wLi_6Z z`gvQJwu04O+sJm?kbTw4)_LFCX9D;9*_Z=knKC&f z3W;4*sK@we_}73^d1^77OVrA>Lxye)o%sCcfjMtMLHUxXYQGOz)sxE(7_OLl6-e)I zx8zj!^f3Gww|6yrQmsR`8b4o#yZ>4XK@O{gjK!IXvVL4TuJ9i#I32?RJ`~ zOmp^qL~EGYOG0UI1|GksWtxGKirZU;hBgmB?tbrp3KqA%YMc5-btn7e7)N!$3GusKdnvm!28K8pgK{XnDf0D2faGRZk#D5V zc#8hDWOG&qz~Ed$J8yt;!?W}4)n+y9QCC;RSZ}%&Wjtn^{FLB~XYwEPY+h+fs-aq9 z93mndu@Qe*0r-gV{_*AqQ&86P8@uMQ(e=UzVJLZCWDNwe2w`Q>^*A zZB1}xX@2_Tk#l;Jb#MvJW_7RQSlBx>lv+~6Z(?hT|JrMaU{nF()p<0Dlx2>w`;w*t zcbqJEQiA-u%+VFMrK2$6}4$C$699qt!x_EY-nI5|*bEr}V zbn^DC>E-rHDXS-~Ts7_R$V-`u2iv945i9%qLOJ(d;vSb<|3&Q8t4?cNp;5KK*$~~o z+zA?iBe8I+s|iche5x_h>6hWNvbL?vPD5tv-x!ljU^hImFkDnu`PaWr+efOjkjF@0!@IoJ)ExRsU9q6)Mme}+D!cUxS z)#>vIvQt?|;~pBet$cl

    $jMa(N4e!9>63b>3NRr-+X#nwi2yjEUJNp^7#ao0Q9 zeyNqnY(Cc_zSQxkaSD_clgGNH573Si$_;%1z*w{8Amg+y2fnuRQbNcQK%2LM#Hi)k>K7C z`gt#Kj!>BMFsAT0 z18ok8__(G$`%m7Ub@xUaFYHi^makx6me2QNfk5@TdHSAnG1vn1aFCs0y18ecN=if%uxJ9z3=12{HY8Ml|e zRHKDG>!s0qO|Gw4BU^3i=2a(i9$n3-qCaA57AleUFu&;a%pje<-DuqO3<-vAlx=VY zRcgt+$^uVGvWo!P194C+WJ4~e1+2L1!Zv)>DzAy0cZ7qMXp@upM|?jOR>Q&sw|FAj zC(uw$&5aqoU#hliCd;?|Be#H`uO?nZuBih7+!+x`<{6@P=CvWJSRJ?AS7|J3v|R9f zSZ@j0?Lw2z$sP0903hhUV#*l)&>j}-seHPY*J$@zRLSUjI2|$Y7lAaT0$r(w^bUGN zD9UCQL>E;~H1qsLM}Rr%sI79nAYEV5=P7pmw;`aO?~Yb|_OoKGrI={+V~gd9i0j`spdC*^OiXyVcoflvIL(l%64DQ2(+AnOWgK^7R z71pp40uW6r*+!{*p%M7dS;J6Pot<{*%|9g-X)KMbciXUb>B%|^w+UGeUt)>p%59pT z(@}RZmxD-~PqAvtG7m4Jg{aD=ocMO_4q`0(SaST;9aHJWpkF$#Zl0e!dk$d=ePDY2 zFzDV`S*UB6Nw)_V*3-Q#-nUJ?qvOW8N$PVcqe@rEf7Np;G0Qn%VK^WG<+udw1<;Y@ zcW|NyK_;Rh0bkh zkvQ0vAYJ5ps|yI;deI3dDifnygKpMFd!1ll9{;qq$a`!AAZLxw@y7oe))h!)n(gT^3p#mmuj4><^`=#*ietILYqg`Gs zzwmidi0!ee|K?U6@Cp1%oZo39ND}g4v?`bjy z%>b@;d8t=$r`GzzUkRCg_d-0mD?F`(dQ3<)Cr8y)S^pf$oCxt(R+h42aRO=Prt$;IXkZ(3z-N%PXV9&Uyf_D zKdAt1PjDVO-?gyU!rT**;^Ycis8}pe6AKA^y!?c|KQiF1B5Ql{P9gF4vV*~jX&{J5 z^@fQ+U!!En2IeLYJ5EfNP4s$>Ke3*pEy0E5Q?V)J7a`Hip^33qJ{D zD4q{lW0)aXLS~RbB?+)!q>y*my|?5QW|3(KY{KA!7#_NL$ycP?cW+;u=P8WS!lW^; zUAp=&z`=WCV?GOj+)59%G1!^EJ4P-Th9;T=YC)ZwJnWHm8F}jgDo_#@cBNq30Mcvu5NTm*)DO9T`CJ@c`nhs_8oHs{vx5~7i*e(P1xG zTB-{xa+wv<+9yr!}U*$4_qd9Y+EAaW5 z93ZlTbCLmJHJ=1COvKwS$jQ)~s=w zr!LM1B^rkAlC3t*xH$S&kt1(W6{d(`h0A67S4jnqOnGVoL!xLHzr(+JDm%CZ+?DKN>*D|lmNRa zUNP!y#Chq$!fp4m24jH7ZM0Om)fMq`wI006WmYnyid@g#N&{Qnev3)X{8@b6|6`*x zUv^2m!EojAU9dM%FaKw?e5%g5%jEOT*`5zhplM#(3ST}EXW3;DRn&+A!HC~9g7&n$ zV~xpG>(<$L;yxpL(1mhtH9CsR(h=Wl&ei$5{c3vM>tvz@2ZRCg`TPqcrn1pbXAkkF zs6u~!)WT4yO*7M_SuSqAb^xZkxr{CKq=`Ov?OVRBIrTMV0&h)MZxOtcD0OK-ho`l? zz5GjZu?wb!`Ph~_^4|w3Rt^JQ%EYKH=KlgDAA*-E@DSK7Zz;|k&St$$s6nIf!V(uW zyrfpkQ^6C@f))J|nEC@go5tK{TxA3A#F4t!FLXC8^KJc2@~jlVxkM! z*RG8I-PrbOO24?-;64WUxa*|`HDf|*c*8f>S1$X_-gU`&iHT6ph|s7%wIjVfNebpS+ud@DTK&*!i^WksJBolKG)LVQY_u;J1Y>(UL#@GW|#0 z%C@qwoX8}UEAPtI?$iDtLK;CkPQ96STVL<$)g4z0cFWi4@(ap8k9;O{qu|-`Xv>P@ zC-3jbkH1YDFm%m%dXInm8(xSksbO?cHr?Y+6(_@wv}V90ebCHmWG%^Z(i_<~^GLla zZ@4|0v}dt!5ICTO@S>aFQQ$)eeS=*gnf4DgGo*9oDq=So;SWV2{PZU3NTt)y?J}HD z(B6Zri_-JfH9tPKBZRD2=QDnRoRmt1pXAfKkjlOf=sJZa$NDMxrg^t~Xy%y>N%s*# z6raKw>BV@~PhcXw3N}+NBnbvtxoM#}uPS)O(W=S6B&34~8d<6xB$Lo9pmD-PZdP3Z zpop)K9Oj+)Luc|Mcw0W{qsA5n9`|FZ*a_1}QpSaVKhh2o&k59s_m}j=Rb1!22{YU0 zv6TkZn6?6(yMC5$U+J9Fcf^mroK?`$$!R>&=(tvIU-2+%K1KJk*Qhv+J7^VYH)+VG zFF7xe>X7=mCY1j8c=lIeqGL>dfb&nIbTv(_?-FW``C*N|vzMOMTVxYrK?`|&7n3H2 zbo}Om=h=+nx3V9(OH}4)Fyr4&W*j@wsFha`l>D)SMf-*lH#G$5gG<6TqKKcbPLSQ>W zgOV>*Zo?X%UGTSe@e8}8!@HYS@9JhDOc0$B+dQP{MwIUX|LV*0PA=4ubA?-7vSJZd zKhcUuvJ6*#pLxBn&ue_S=)==T_Wl?6gq~1n+Yug!#GdCQuQJgXc)gFB9xWc%B$8b( zt2O>C{=!s9#+&p9WD@sawe<=UZsR;!WuM`R&eCLfDG#TtzD5z40YV=sF5hkT2M!Pr z%<%CU=q};KcOh$zJm#-r*AWZ3%M_Y%ox%_iyhdAB z^i|GMUih(J>C0Rak>4*lqO2?D`!SmeGh2rQoZ(c7@^IX)PD8RG-5>$M@2!E!lb zo#%y!h<-U^zB01^|_A>q&mj5GZKu1 zpc;bXW!AL@vp*A%ix`XmeMvfN=QY52*@$Ege8Rx5kX8XMkrxI;aM^71RUd`oEuf~S znX^@W)D?WX-s1WuIkK;I`Evj0E@&JI}Z;!-ZCw3M+DfcNM0 zhNYW6)rPej8T}Nk<7uxFc>zIpT`TbDczx!l*(FJ%AAJ>!#ABDb8$rRU4`zvLn*bIN+z?WDfKfoGH-8^AffNkasru96Ut0jvmFI%l^+hnLh7^@9OKT$rpWX?`O{IR_ z0TG}N8~k>kJ-=K=KxFP&x2XZ5A;{z(@07ESozhpBwTXgZ_pB8i^`$_5)} zS#J#Q?>)o=kyV<3K%a`p}_6^38#?z@t~P zv_f9DFli#zEyuF!wd?&w1NB*Zp-?->hx+HpNISWvT95wm7H|;4W}q6fIKyA+s%Jkb zkR&6lrduTpQf;cN#wL*UjWXn5?9$90kuP>g^T*xmwrQ8)@>#|iN7P176!0Oqckh+E zi>^uIOM|>beiJInV#ygswnjb3PpNQZ06e%_igV{W`sqBpSJRItapm^T{o*r%R#kN? zk6YowUnd?mm1b1g$B>2s%el5fb8U-{|vWHTvg>babpoLM)MVOscPgc@;^Sa)S_ zAB9Fw=1DvQNh%+FXBQ(B_-r_T{U%Qr8P&~Op8qiLBSn#;Z#he}^B^qi-Su6ur8LH2ZBP39Av46$$*){v*XE3Hou!o@PKTtV6iXrHiDG5`|EjTtDth+Kfl~cL z>9yx=mlHs=w)8vWtH{pVDVphNy_T)ryitcfl`ru6jwiwaDIXhsKV&azG#6=Uz;2g= z*s_GDPh&NpCC~^l@{nd=sy}C~Gku!uIl(@bO@{vL_R+M7!Ff-eT>bP!_ygQu)I+G> z=8RM9zSXa0Ch4pebA^Z$FbE?4e5{ilbN!Pbrua`v3f1suT79}4pJmK8WZ8CblWkYV z`h}J0m8wNT!3zHj`$G$LLTYA%PoUt-gyPTBeu_-+u2@ly82uS2NSI})`$fEp=IKm4 z;BoKr!gEFPY7(W_>{|T#3(_qQNrar#lL^gGPVFs3%5zxM7X%QNo|^BHg&Sz_X)98) z!};G~GpJ|c_DdhgaP@Y7YSu{ zXLNh<->wXQHM(38*vEl^SRChN!^{YcGP%tXf&Wl%d^TXA@lp6cNo3N0!$`G^Im<0O zXBi2n7533QBS}MC5zPhg&{maP1q1GP&dKYL!hpl?hnR2ydQ)z`&Y0~NruZ$q@^{8w zm{6JfmD`X#_=jYWLVKfiU!9`(*6{IJpz;3vIkA-D)#_4yiM~fHt4`Iuu|K?i$M12=8Bf!I?vV&ObgO(64g5E21OBm21(<`#W z`3f-9iDp7sHk#`C0}4s0d%k50nHtKSauX_CI7${HiT4aY1Vd6~GOgCW(XL$OS2Nh- zuH7t8vWgKD?RNiet>Ta7)6Y1Xb5r0a@Y5%#Eg`za=LxB~DuXBy1(i4X_DQf`pHqJu z%hVvDqqe=e4EN!(59&d+t_=^azd@jjW71C7@?+7RfJ_8`;uhWFj^+|XKMr00vVzzm zqA$Xl5rM$M*u6Ep{gBYa-ZPMakczD6TWSQuSQJJs3*cHCF%54snjOP93e* z2XN@)%dKlYngvizf<<=~b#C=f*UyDkMEJb=^!#Su%Ua_fec_Z9@}o@f+4=yl?ViNOXfUuW?T+?WtWck^`wh)4W83Kw_&9trfd0i#;5Az4ERQGj9roTZ!C1U%i#XOJ z0h}&GBDe1Q_|WlCgPwy#qz5D?NOhl=#B(8GUD?S97W}#JDbSc(xbG+{-`SI;>taz{ zrwpE{O&PV;^>zdW$oY4GuzBJFxlAn!-wS|zm+U+?a_U3Mc=S6Pj>MfAu`Ic;6*rXbEg6uJR~bhYrvf547Wt9Zc!iWuLoFw?w)I3Ql)8z`8ll|FI;Np2- z;+LYVyBFtTl)vLA{B9;!no};{FS+kLU58*8s5kH;ZCm{NKemI2y4Ob(Z9Yt1=*`qV zRgHW9A+pS=h;sv@s|^uTwszIgpWlF&&y;I6gnnEo6S?It>Ewv+8dHUJpWcJ*p|fs5 zeVTnzG;cn^HS)O42l&_{Ijcq6uvGjh`lSr+W!Xy`xogF9F?gr{`m% z?WigVudv8d$|fYMZV(YN74qEWCzZb1vX|e}-1}SQHYX!LqdhYhsh}M_$Qj+JBc3k1 zZQ0VXjhXsF{DSQ)PYZ$~6rV`Z+30q&0sre0G7(pLTDAu9y?0pBJncJ!fpS=r1s6`W zOV4{&YZb_3-`d zHk?PtQw7hWSwXtIL-{}{uRN!^C@XyI+)H@m7TAodPCcOXlf;ej%G9!UU)VqQ>-)X) z@^bl{g!J5>*(ZdX7GJf4f3Zg1Zk6meR|fsr6g!)|V3{D+1-9GM%>I}4;T=v=rP3+k z7jmbp>;Z6}J+7`ZVgDsd{A^8G!0V`;0;&(4Q$aq{u?XT2SJfk5;ebu)yOKuZw|y5X zx`#>tK`FcLY)?-(Cv*#TykB0tZ8(e4U^QJ_8UoXsG_4eRY=eR6S$x5u4+n9wr);(7 z7>0VB+_`1ox<7SFKS>2?a9$6S<4a9&M(J-bOZE1GYYak&P2rX0rW{wOA>|O@` z@Nb-f2kqB@h?;$qHvvMQM0#qU?_}cG3kqu{`g|~ju_d}ABr-4mw%^fJu z1HI=angUfFA`};LQ*6q9<>88wq*zh@6!B&a0A|}o6w+YA{$xqkG5m1Vjrc=V52sF+ zm}1i^eGg|7sMPBt!;g+?@26aXa5a}+0$c0Y-KN^-ntX^14`L2CB87+BT!3974?w;_ z2(ijzK8JHnI{Py^Zr&Lx$8d5d$C5BWOMbhBY)ie&Amy3h6s%PObJO7c%h*^l6%uRG zsZz2HyE8acm!!NzJ|J#Dkh?qb4on6G2B~lBg6b1E*juy$_6JkiE_S0HByqAjw0;l; zZFd7xP4|NQd>w0#e#$Q2*&l8e+@4pLiqvm#6SK1v>3;F5qZTwP!*}Q3Vncb)2|^~@ z=O?Hz1AaP)#(H^J>Hg`GoTO0bmo&C|zr4myR^a04Zwc?7aX!R0U3%PoxPEuE#=;F% zsG_#|%}JQbxYsCU&RppCY)6XP(xsd&sq2uRy1p~s(I~8X=F(vKE>!7{pr+R954#_V za9~Q5x z8@h8q{Phs*UV>e<_{I88kW1*?^HE*)9{4@wOw=`uq1v>Pt3i{PRkTPC^a0qZxSDcVO9#aQn+qgn;R=Q@8Q&4jFv&zP=90U z$8A`q;TI)@a(`q26?>b87xy0KfU`0Y*f^L}orCg(J><^t9lc=@*^lalbPeA*@j%wr zpCwMAHRZHY#xv*UYX#Pnd?{=pNVtY)0gFXgjeQe8jsz*|6=C~flR!$59D7he;qHuw z79$++{ucecHxRsj9WvIiSD?JzI%*Q=wv5J0g&kPqjPlNpgYI4DCX^f7=SsaxFk_X} zRvd&9!t9v2GfQ$ZdO84CsVKaAp_YBY)yTg$j9VPH(UqOJ1H`L3@o0N3cF|i>Oken#PrE)2< z#;tL&&+8nuHRT=i-1~d`6{Zn1=Nj;jUz1gAci1{4PcyzSKqzPvm7Kb(wShwf0 z?>8C@1-FWol*dvbTXk)B`)=Rq-@&&)@Oo%bS)XycB+D&8bxyQPGC6MOty=q~f&iQL z!q)2x44Xz@(LI7&UIUq1IPb-IQ!CsiA-Gj;ltbn}YD9DQlud-Ght0q2&d`+Ot8Q*D z;i$Qe{Lnhqj%XdK92zPMSdAWUa+d9M1w29*$n`w~X?9`Vq4BZXLv`gfgFMt&vF#ik zH`(qHrI)$Z>1yydL6aed`=zuVE+yNCuOVAH?q-S>T>dZp_x|4miv>1be1569Ty1PfC3j-s!GP)S0G;0Q|J--z1$II;APp&g(v7wi>?Jh zklr2I`?cHYJjN!$?`LW*5~3T>M=@zvR&RGGe^$()jF7kQB$|st)pv4z?J%0$fr;=< z72DDrZK8JzJfjIy&52u-&jKfj}BfQ1b?F+;gZ48^tMmTC8e# zW36vKHj}i_Zw9I7H_?QKSi8ady>OSgYX)OA%>K}K0m=iMcO+g)C}|#fH`%=XngG6| zQLNEVs?krVIh5ziLYiT`gvu!)WvtA6Qhh+~OP#3G>Nn*ckVVeFHuHCUo9TzJ9S$jC z>6;v;#A`A|<~%5Ye}&LQrb53BXTe}~I6rTvki?H>S_{ms3hF}|8^$a2+A!Gv8h{UR z7DE=b-(Pt zyV)gm==ESu(K_R;qRkH`oY;N3(E)?a{EfI@m4`tK+1EP3vRPHNu;Hyxdm9CTUEg#H zex#W6-|ifTaGA~kWmITx@@Z+mJyBxXIz)v9|-nVp5&?>5=wC zulXgR-|d-2V$(-}iBY|rdE!LQ>l0t#xmiAv+C#lVPRy@?!+13<(|64QH%7FC?o&WH zKo@g3M3W@n+V3^1tPC*8cRic>^4n;2pfz=-Q|jb}3j`<3=lAa$Jhd5gzX5D}9y&0E z`d>kmxBq^%@Dne6{vUg99ToNV{)-EuScD)Vty0no0@5fA0!lY1CEd*s$DK6-M{Xdb=Df^4D;U4-p_uX*V8q}K0LueFs3qY zdz*?Nws^CDi4=ksQzRxZ{vA~amB8>wIYUVr1P%gb$9b-Hr55wsRDib9n-?C6?|~xF z2wF1U9e9u&=zCt^-Zh836jbi)%DqJ6we;2lphY@9;TT@stg5+8Uyw!jPNu*Jrt)b0p)Qy?ZV0LXdu*y-h8<$m6jWFMYgpx+hCEzK3a#m;{;1de zw_jTic{BkeU9xl3PihZ)E2aNMH)-Q$rQ49RpjPya4C?xN7 z_LG?C1ldXPX33srBOzsP?IP0%M$Iw>+j}uS#|Sx5eDXB+!Y!*{ES* z$k^Wv026HGNN7V=$ef*`vqett;!pj$x;u8k^86uje08dji)yu7WobWB0;h_(s*J<6 z&o}2F(703iK&BPVnZ2yqhLBFJAY9cc_8dgc5G#igj5q@M2yr*X-srBqtpS~?^|qEZ ztq+H}-d9+T^7*J$xX@sa~GPz+H*= zc&iHv1R?8=#vm!xKX$?ixRPTzlSN^dX8B%%cBIo1+-m8{In1ERd?4uCD`jcR5avj<3aZdpg8u`%{6w?Gw}=_4kU*rpa@G_dX3Ym)-I@4gxck05&|VXh>DqUpIqqKj zIfrC!PUws(Uj#wOjU38(u(pr#KrsBEQUoDQ>Te4Gp&NP5&b}(OUy9Ug4io{C4X5Yk z(`1FgTr%P$Lo`jYql}y?JgjvZrTP9W1^~K)!|2 zrh0J)Z|)P2#31)Y=kdB~GH@^(*f~pfqyLPq#U#13-J~OU?jcN42yIa5Uf^zUZqGwBsC)Lr{29ptdOCF-i?9h||c3vgAmjLxl}Ues^qRd)Lw`}+P$5|Z9g(By zJQTg3J^X>LsL^0%2y{1nX%^Ap%yhAveU?F+tYnqKo83Js{-->0)Djfn;oloRa;loR z(SOTo*S3Q1>J7&5uaQ4~&VmGy^525I2256W?znpV&vR&gSGILMv^*bTSgub?r>Rwj z>a_;~hr3uOG5u-puoXkk@bFuz8$&sGH-c3`<-v4ccD6?DQ}f{xW*>wpb8T&{mf6Ra z>*6e^6@wBrquy`j(6RZDR`~8lP1!}r9FJ2s24Gt?XReJ$GT`aAH7}z$c$aTgA(G1g z42m_l_XziNRp8Rg16Fh6{$hLec(!5Pm~pICc^-+!EnY4XKYaO}$)#_9^bg3i~! zWY>0eRmf$?@a2$ireG~lv#x0fYQC;Q9?Z9@;$6S)F7h%5`(6lYz&IM!vv|SBbAwCc z4|{a~2Wz%~m8ssnD>mR!G-qe=53Rz-7g&VUHMOc3PVb88UHlNH(+B zd|h^ZCFvYAo|YZIe@Y6*SMJc7Aab>0=#DX}IssiF-fp1?5SIL~$K`*pfns9X0ay!1 z{$QM|RNs8DRCaXf(|15m8OInEd_1AgpBy70MT)`(Bi$Qr}$EGV=rMx&YYq7HPDIF*I#v!{o^j=q*zD?Hfh|`>Y8pJbZ!G zXab`Yn6zpF8;|&ZG3_kAXq@93-DT{KXam%6wn(BY*citgit5O^uK?6}95&;~$v|&8 zsT%F`$oW#;p_^Qg-6S~6Gx3(tjN6YNKc*3kyCd|rcOvC2auS%|6(`vHMtopphI>7tRLw_idOe?|WoLaxZ?9k4 z8YN7A-!qZ?0#Ct6d~nak#&p)H3cI>^`MDaJn%47lhYWz;UtKV{U+4+~6b!)CKOU5u zbRD(T&oIcB58uIC!&s#4!oWTgV`Kj@T67vhJ; zgXZwMN}r$vvj7=RtBMVsF5gIc3`pv=ncYI`xd{=v=Xu`2Ov3}g=&^_ECPE-<0(KIEh# z#XWP~E^S8uaVtRbtn6eYS5lEjHRf!NW+gpOkwwVo&*IHtIUWjWYMQKiZLgjn&n7qF zz9aaNYxXf3Xy&!)R$9>AAIy5r-whTOC`JtA3w6ZsF)=WVZl9EOfi@LZ-=m z+0lKuPcQYGLz$vvap*t~$iu;Wsg5|F$uOvn@!q_S+r|{*`*)9)daRPcSQdcQFs|IV zcTcmNFPf$tOy!ZrzKVI8ehOrs8yNC5>US|H`$UEqJ+6VBM;g1*Occz12>t#&jN1M1 zFS$}BkqJeG1xB7;^W7~6H^H?jshG}miN|>HvXbwhXUf3AlC}UUDW7Q^ zJ!J!XbWhxPD03XYog6m#Fzt8cY;oYbOV)VP0`h7kZ+kJIpWN-+Iu{a{g)%4T9Nx8qk{+_j&ZlMNM4xz($KBYN7edy~^jA5EnOUh*}VN`kX z>J6ll4Rc=xeish;50)?%?WbE%9W^AM)qY7^6lWN2rIaBh2}&*>U4G?QCFJFjEUuyw zseeZ>&cN&K`BP;K2oXTp9<=8rIFHRU&w{(S_t2a-`0=mb0iD-EuLjIqb4Tfr3P*|N zP(BwmB5dEYxfnTDXOp@?yC`uWW_pxHU3ZdHR^`%))+!)gcGSDNVmQsE`G)OMA|FHj zU)D>ll#VPIUc}m#tT`lZ1MJGxC~q?5wCA=U30h%4a^bkEsrA}uL|HjOJI^sdSU7sK zk4}sSY_ee4&3n({;|omtxZAUNEylr_KvRh@{Z1q{Aa+g=04nZVRM^*#cUoK)-4jF| ztV-=S(!tiL$i!R=14*=zpIU-AbKvdjg4Qz~Ose_%Ve#>|l~QD7X&*FxIQhjf1=PK^ z23NF{7$TC8kN_Vpy~O3tZ2!avw5y`Ze03RR20#S z-%KFBNPG<&JM^o_2g!ghF9Sg_7+h(oZz(Y`aq8QuuJxL5$+vIck_KQgJsCNFWos46 zahMN}^;soZqeNEa)!uAPF=BS}K<2W?9{ifu@G;+?EDS}e3W`*6UhI>x;^H6CJ#WSr zHwY@{_lotz@wtd*Ygb3!^m?m%BGmwMGoY*H=>@p%T$MaTzd$V;TcKrCCm#|k;Lf~h zvlr204a^x?MJc{ZN$G;XKBTs&LJt&h_k5T?U%`f}>2uf?af!=;<4LxbG(|N}Dy_Dr zkK011be_$O%=hMVzvRt>t5FJRaw1^1d&snMOfeY_CH21;AQ+vrNI zy~M?raZ$WH$lXNcOS9WTP>O5!m|0ZQzcq}xxzOahHE5al@H?jaiqHE}W=tH?;K@cm z3E^)7#&k>ZA9gdbcb{rK?Iqy zF{(dqAf-RkLjHSma)_O41vF@Ibnu5;HJlXx*c|Yf0kXuUBQWy#2BKjIQ+1t3Mq~W3 zZD)`FQVHdl1?bi`!wevi%e7`Qhu}cDWDR%gK5h4yP#+S%2ndhuXL^919|V1M-ioAV zP+ksF)-q!7+sH&DGvoA=l-r ztuY1_-94VdBAOu=uG+tMe(NPBS&LqpCK1}uE#iupsD3n_0?GPNqURz?MN7?OeFq7WJ z#I72*h8Q{3xgcT~rEVe~dTp;ZjwBi8^v2!l%0Yy&+DvoXJniJ|dD>7h@_-3HrBwH$ zcg0;wQO3XsHn%VaWl{mFG|fHYwT2b@Ieu~N%E5aB)kg&tYsM}_+y#n}Bp-uG{R!f8 zRC2#Oe5FG3m;ki87W^wNOQTdOPpJo z+I%nF9f5Y2uMB3}HXc>V2+@gqjB&T%p94NdzXd?bW$H$f91wS?OM=>u0lmc&@^AZx zqs&X3U;q2`YMj24V;=KE+l9FcP_;+-u;0r!+zQ$xOQ*X05R?))CBO&`%6+2VN+wIb znOL$}9@cM;;uH(qNJ_;@NXV81PN5!Q@q>jNPRX3uxTvwMb^uX9!+vU;Y2G_oLkcpo zNzL5-&KsklWJ7iOV>SJ24e#um2chJm*%5G=f+Y;3)wF(q#cnwssNDgL z)Ku`Vb}oR9IhEE|8B6r7?XTxmR4X{?aN3ebnQ z|K4&2wRW2`qeT4X5=~2OySL$FKwegdi}DUKXAB`;B5XDS?M)k}i6MbxMDq{$N|n?@+Nr_Ug&hE!wcVN|0JaDQ1LA&D72Hgy}W<;u{Q2w=;YjGTd602N|p& zg5WQouEg+CtK50xZKk?|3O4y8%EEek%_IiH`UUx>%OHaz(qmC z1fhtdQ_aI@9j~LLQeMuKP<)|PZNKz(@rRXMk@@AYr;mwzjOyMdYhHP$U4qLL#Yqvy zS1v2@D%Mo))04-g2b&4Zg;CK~P+g7u>G-r_p*_F$BWR|ohN$*4>sVikmeZLO2=VLt*fu#J%z|bjZ^-Y1RB`e^P#18D#ZF z^k;W52_TO`lY$Cst00DYt)Z4&F4K7m*>XRk2}`Z(&CFlUALjy$%4WI<)qi+H#mHFP zYR5fzzZA{caC|3B+ng52Nk0=%3!+~4wc{6qI081WY`m9t=@`HX3IeQPLw;f~9P`nM z%=EWLd2&F!TP9tB+*~Kcb@v_bz9m_zI1tHE*T|8T7?}Ej2bf~wRjvb*W>E?%TMa(d z`d#r{tdr|@)=O$nD%>IEaqXM&a+>-w9(a($?Bb zys_KkKTLLAt>@0yo^E^G9sih!%VslgaoB43Z&;zXTSgG?*0AR4JUQ3PPZc}UEC3oa zI|o_z&!l;p)2LFc zCBcPnB~fI~Me~5u{KMO{>g~L+VH;VyutD3R=P}&6deDx^?wQpFRyS9|d{w{YxYjM0 z1<>rR&4J0CkFt(hLT;6&)=}*oY#kVo@X2V-?MouDRF4|(aM?^JzyS&Z%``;llo%gs z0hd`!;N}u-Q*#huTc=%~(7|r9u+Z`XbU_|7`{TvIupGzHad`|m)@8#E4USKyOaLo+ z^TdW`rSH33y8w@I;4X!yF>EdfpT{cQa^GbR+mcFO>xdBu(Vn*WIFxA}az9Or=_x39 zqI5rWYTB=O{5p>nF+&T4c6`ZF@1*TFYOjNCAA`gLlod+StK9}IpK!II%^-&njEM%-pyg5!l3>~-(I>k4PDcd>?Ap>IE zc9OR)$}(nx$fFe8XMDYFegmIYfN^V7oyk1J3`=TjVJn1Dv(MqZzX9*R5L^&>%vn?x$ z^wY1L$1Dov6fKY{8h?-fT&}1s!=SpLetp=_5-~ z@Ldt>Mzf-^uowf4hZTs$xpTh(5us+Sd%Z;`4=;_wki=r7MxLXeZIyeV6LGEl0V41^ zwm(^Xlx0U^mwVc!RhYxqv z(gN$K?%tK;wky+|ZW~}+;!y}2$e+ydcXt@NocFSx+-aq@Y;6EZ&K!3s8+7N(lTAVT zFOMwHd{%zly9E;%*mvEEg(eGMZbaKk!hW?9PcfC~7b*dChW=LU3#;+1@bIXVqkw?0 zsSxF%)~-)kq)Hp&ew|b02#yoYR|Jx7b1L?O9$Gwaero)qHj0^=HvjR1g30N zYE;_`(YSXrjsAV!WX4=Gwbf6+Ja803-0DTq!2h78s01(Gp_t4N3a|eNP0|>E!n>0* ztQ*Pn^;ZK^sdXE!UG}xA@u1M_{zfqEx@R_ZRW4O#wZzPA`$dMWyJPy!R(LTY%rLf; zt@bcgRy)wyt}&dJ^wlcxMskhsP+>-5{Wc$JX28)yWMp0+AH9b=%0fe|COaH|asn}q z<-{n#%FMaSmW|oNQc%nznws$fnz@>?IO5V`bW3Hy@@79|I>JJKJ~ad0!NU;V*B%oB z2J_1A^7G4E!<+7=QZ{^h^Uco1guoVS@33rfpoG52jE8ySh*R_m!IcMgFfE&4fO7jX zH_2}Ve>VjNx*gq34u$N-VSgev7YPuk*W_2#xZJhSazHEN6F|zvO8XgPgt}8{$9fuU zm>Du`(c0Zt6N~7sdwOZ$JN;s)MFi^wy**!LQYC0w&kfZA%kp3bu@#ix0>tGrv|<@>ap(&|ZGub+s6TUK@pXb=T z4nVb0e*1o$gLoKooVevP8ZP3)6nINW{gixT$T>)#v55Y`ZmrBhrot)~mo+@If`6Z! zh_~cTtC>2y^?rbi&lN;g?Q$Ff{C9A?>tTS`PQ+ku0@k+#2B&xyk&4 z8JL$zFnnq7pU*@LMF;)rw-S>PaPh)eV6;oihh3!8+AdyFxi3F4Z}23O%;A%9m|qF= zJYO9K`~%_vcR%GF)vRFjfwOi<2KP;`x=OpHEn*?bb`CvHH+52iupdoex9Qr><`9-# zn)UU8lGWrAw_s-WHr|MMi)tEHa}C2(jeaV^ks3l!RXR)MBjpCaBSk#* zaScxUoBdc=PLF|W^Tj-aDr;QN_cA-Fr(GJY6>)n~>(c}iMU}v1bc2DrH}mhq`pi*l z7=*peL`kK8cVNBKoi^^N7Ts&i;sB@@%&f?1xfG$-Ld%e>nlHJwzE2xuhmt!X99WTmXYJm>av`PGH!`z{E(0NOa?=uBOSrE`_p ztXiMDK3X+`^c@4QM)#?fjgmMqEk|uu!;WAbo*1f64BtDqU1Jtu?%bmbz&e+TpR!`zD{I^wEH5gVXUTFc5f8E_-+S_y2yOUg;&t1a)SeYoMv z7bg(2`_6VjiB@&SBhkn1$&;hhDloE3%_95y5OA#u8TZjMKdvQeZ#h^7Wdml7pg!z* z-Ijh&N*%xs(o>aN%Tm3%{h3&6j&Vc=6W@|sC*egy=0UTwx0=H^H_M7*x+XFc{<@$Q zEESf$OC=Z=T?_K!e!2zorI3BA%>yB3zn$qdQb4g)Vn`Jpt1-d&E5PH@8;}Y+{^UOt zTP*ISzt0`?XN_Bn9Ta0h%LKH7=lkmP>OVlN9fokEU)j%OLzDEHd1bf&pjrPLJJT68e+Q6L#H$u^1JT-^qvi{X(Cy@=J)(yblGK4ffJ{^|#A zsnui(DHpe7N6g32NOsXk=;l7V#s@+iyEKSNVlQ@=JBL{e5^II(}QEtV#Do*o#CSZFg^k?(ktIqmC+ZwubQxi?Q zb)gOsQ2Sc_+tNasl>?_>4yLWxfrI`+eitpUeCL>hK2G2XZHA^Blip8;zty zwneNvCXiE#F$QzO_C?mF+vys=iq)H`-#*e=7k-CwbFfr*4;qm*`iV&Z);}~^i$^Za z4D!{ez8T1C&|0W;<_z@XcdnVrwJo><(CPLMqF=Wv2T0GYD-0ngE@Mvy;3BnbjE((w z*BJ}$CD2VRUEa`panMQQ5OJi|@|lyTNnX~CsV6lj z&bp+jWqr=n7I-vZmTK=&C9=MnXTRju9Q@!lquQEALL;nVKxKV*WGTJX0k&aPnp0M` zUqe_0a4csy#7%X@`=OayNAiIKf}7fJBbsigm@KF*;x&Bx{dW*7`!WEPpIm-_?!X1X z6r`-Z^V~N1fJt53-so<9Uj}E=)43NQ|Av6*etfbwXVjoVkHH%lF+k-z3ld$O4{kWRj$kTY-uk;E|auG~=)v-w1<254Ik@ zFSBcG>L6XsNYd30)U zK%&T%G!R%Y9YzMKwvRq91^P_2omVRKcIuLf=NfS8Hy5Otz{hGYsqXE4l*>0z8RnmC z%<&J?+J9W;ki(KHqaBzmW#1yjQW)|gr%-xxE;15rSIDbFBBZ6R{ee63x$<1!p0BVU z?8r z2;)B8#;@{|;7VjkkTsJFU;nPywIP84Vs@Mvi=78)aXsr@=|^=q?m>i@?iPn>_#Ej9 zOs0n}ed*`B%la^moIvHadX zS~F;U+AIu{BK+mxK4e5E?RnzoI(}hMtpFfKS|iv9^;gC?)^=CQCyPi{8=esH?Gl2d zh5SqnE5sU4I>(ArE)QZ6`XF`^BWZNYRS>$XLGA0dl!t z7Yy*emLu##Q+FHRj>$&93j|gs%2&dJb$_9XprtWexXSW-es_|@z>g!XFH*Z3dvYKT zp*6ja<=dM{GaOJm^-}`{vYUJ%z3R?pe!*GRXK%!ew&??bOVZLv-_#yLQ(F~AZZh$= z;@a1%ut*ArWes)yi0<||J)fNF(R&c*&il~E>(e3nqn)zf6bXS>QZm$WV1LN|tB0A( z0+b@4KdYg2V}Ua|ThJowSsa715|`6_7KW#rYym(sz0LF?43&3iWwq|(tM_M;-BZn{ zi7r{QEbi3nh;E-I)hKrRqVW0I{p={`j)x_anp_Ucfx%4b&y;e={l~~xL5O2&1M2o+ z$m<*=&*Jg#Sdw53$d$z@xszIVa%KM+fc}pwyRVMAvfLlGY{RLf!j?afJi{XC4BvSb zcz%&FGlNGaYPX#C0I8gUjrDzN_PH!Da`m3n0T9zPq#N1F~6rr{$WlrTGOcGjz zg%}}up@dibA)7Vl@vA*>P?lWI$mkub@vnCbE?62ez-aq(d#`mVXR&r?et}g9*+uXT z6^=P(gi^H3_wB)dhi<;vfh*`DR%F3L!)2`(5Wz+f)F9mw8DBn_qgAN=7UxK$$drrT zX?;2vc#y>#ymNyHK?ODARLixXa1zdB=Z%-3{@<(3_-ME6VnrD9fz(B;{l|AHSz7tF zhMBe9d)UZRX&Vkk9NdB2=wQd^;*af8TrSIq22sHWwO95eV&b`T;}?W_CY6L(0QBbV zJCZSSHsqpH;_XEOKSvOdT*ml6pb6t^Qw*_T!}dX9jGe$))?AJNElLePHz!L$LCmRg zO}JVWWSg_w@bjp9Y6)#hBV0QR*erES=GGn0vl%6c2Mk|sU`k%1vX?d=ve!^6y2S!K zRUz19#w~lgh_EkTeiRi_Somn&1rqfEv>n+=5yY%%E-RQBL^W7IZlSaZwOQy$A%JJ6 zOu=Wri>vM~kPj5Dm+-C#lBt$hfdS2fy>qDDQmG zHz^d;_z{Jr5^GIq7Z-FpMYhyo|0hBhq_ znrp|1;`{BWpPnfTJQSng^M|kU4Bjq8M}jcDcqAu@3HIDa)yk==J>wGF{UXne-x1F? zH)30J*s|{X4<9&=Ko*jCO9;z7m#t{laYv@Imq#;k0`yGkir&!B{+%{9?WmBYLU>1v z^oJXl14hbaznLi~8;83d*ja!Og7#mNwhqHMDq%oj@I)7X+C?KL?Mwc;XsAo|y9wurdWAsIPnl8i@5b&qhV)jCD9 z|2PM5Y3BerAmK~OPYcyG57)iLl^%5MmSvbcOV*?(vwT|+`J&VUN1q-T`6>5l-?G$l z{DT;95@YMIa)Nm3)B7C1(hvSWLgb`#KLd>*8v!^?+A%zFNJ+_y?>fB<1#tsAm;}CZ zb0ftO4JNkmw+wnlMum$w=K4zGGRDcSq~SxWc@C3f5IjQm!8gRh-nHjkvJ zw<$$2EIWo)AKoOgwqC(R12-eUa);(!6sMO|+^F`qDDilnf>+N~`S)khfO|jV8W}69in_QG z#6@|xKzwP^PrYp;1q7TP8;H1Ml=S#AK;eP)E_(2$3zr*;fS+M)f4#kpJwk!YW%EN@ z^p2P5kh4aIn5a}p%bR4G5xnkPT{|{Wkt;YLZSBTgP(3k z69&>5BnGT6BniLoqM~I|aRiDeI8m%q$`*+Q8mgc&NG;d?VtXa)U4F3xH2$G|b9H8| ziOXhvTEKdL`@&^lO5?nK-ZJaAD_Y^a3hVnXTaOM*4?jhXa zHJaD7WZ1eNOVe|0xL1 zW~!QBM!~eyz0Oc-Df`yW^*yOdXKK>^V`D3jTI$Q<2q&+qER*HJxq;uN4@P$~NGP`D zG9`U@9IWxTzmRzjGRXJD_u4Tv|3RiFlJZ%+PA$^r3n?m*V(f({@Ks9&H6Ww_!~#_m zk7He`k{_CAX{9SM-9*4!U#-$@3lWz$wfBz6jb^groV%aEDaS{S4N7-VQ#;dHkR;RAr9VU&(6!dZRp0LyvGlcGq>m8fi4FUcGXg=ww6D}!$4?VfpkR->Py%Gq1 zh^1DJ>Ffra{VhR3;2gkt_aY_eEbIAy02?QNJuc;h5O=UdgkPd$+&?A3$=z`UQe;%Q zyWk|L9Q7DaP};%&Of1&z#M7V!EF}cBO8SW__n$v_=@K9U?LnI@-Z2rVN60+~8%ICU zb8?Nx{7|>U3e^4n8)+UnDA&GZr2W|^+$KrV7{zHi$AHvBtp`qqa-+mh* zb0QzXUJ23z;Uz2L8hKhc2bNeV;7boVlP3EoBdvsh)6kaa!9KfW3{NFUP z2G17!cnSh=O7WRew)*7sSx|MTc#M5#H{k5gT*CMp5aDslfcuX$%(Q!8;|Nxbh>uTl zMGP$Q@KHMQ^6@7p?DPAOYEVKCLM@HT)INE_|Dj|#J%rl?;Pg}P@=Kqv;~`k$D1}%E z|G(=XNpKoDNk%7<8@1#=1k=x@|36{$|Af*1FTx0fFF}JxnDxfIBC7rv%ukd+6$z*p znd05J0Z-OSZa21iM@C3$`K9s+7sVs+!mBd0(zN{fdbIbj^}uuQ{1)ai&M*bu5hQMp zYD;1{XqdKC);i91iluJUu^L?Y54^`A(chPh4uG>bYo5(hy<~ZN41_-1h^$s8M8Nb^ zcf5jFN-^x_Xf7K;@2l9kb>8Q(iwJpV&V!fG*J8o!tG`}`6uzYbmhNMRHDDb=6Yl(D z{LPTS<5g(x2;~=#vVe-0giemLSKI?wyx?jM`|_O#9SqV-jy!_yJ8SyDb6F;|ca4Zu z{MD;h|978$xbFmt?Ly`EhD&Wgm@Q)8&D8Q8*-O;!|2W?LgVWvDX*;|7J!q$U59gn2 zp@^v5aC_`AN&qQ3D{z5!V+Mk9%6{=wdX5R!458BUJE-Sl8I+kXU%9fjms@+s{+(4# z?dHoF*D>%C-`;GrtNr;JUwr-G*O`}q%@^O7#r$>R@SmHX#Ne*C*}r!I%~=^a@JGQK zu6QXB+`&V+v?nUb<2)MZHk=5GR6xf^nWc4jGiC9JeH6?O3p-cn=b`f)AG*Nlp?m*v z=-mG}bjtt9q3;X0Y@v}U78Y6UOMCj`g5+D|XvP~~1-iT4<+kH@=@Tw}aAtyj_-oYU z{No#s(bE3&vfa@(UZ9p^eW@>@9GUPubhVZvXp&_k=X{hHq|or6xy zwxH8j(f+3a)i^?oS+nnke`XKVxB)e{Ib?MTJ3WZd3^8XFgK~3o@$m5}B_t#o7*>*3 zDn9i*N)jLQcs-}^%~6NQG<5&MtK^<@N$;+tK9mBF=2Arrm z{I?JKSfhSH1z8X5Nl$r6N3@0j%~>e_^5x5c(b0^ao}LpkGc)-cHFuJcfNt)~_kYv< z@s#e`vvkW|IiUQLOdlYF%FKA%KNRs!~)V89Hp5kk(K_$2S3;%7`78czPN*yxcgIFNNyF1BMz zev%#!ARHU^>GDZYMbMw3qyN<6SfZ_(o|}^s&wN>wg2+%&pTivHT1k=4EaDudW$13Vd)b zx))N9JmIE)>;83)nyo_h@PV zb&FRX04CvAvMl??uQ%e6GWdoKgp22xf}d<9=n?zi zl)uKFfrnvF!Szm==^yX-X#kTnUXgJ)OH|Dipj7j0lA!}CytusE&GQFioHE5Zv>6oP z*(`=9m2y9~4hWr#r+fH+UFg__w* zRw(mEe|r?G!?G#F7qrrnfVrLbo+0VlqEoFl_xW249zwtXl^A{Zjkee-LrodAvQkyGXwvapJb_?T0g*L?JXixkH(I`6Jmqedh z%P51sO2uyX*irirRsjO;9-76qt{lyB(Tf`{E#fyKX%#rkMyLU@Q5vu}g@K06_L7C2 zFfkca0eSwnnBZWla)+gdv5Eb!ao$%~yN7l}M7&0I%a_HO&A@rSWIj|V`?ao)&E|u` z9yyP*@L+*ywstEC(VZcOL(#g>gi?8N~zK{0#M?^sGj{=^yChRp_n;|X6Dqjf{Tv4hW@Y2kFiL& z0vg72@8~Q(n){v~ruaF4g`cIGStHz-LJSO!dHFI)G{pUD(;hZ?`~5Zxcaa{7h;aYg zo!BII)j7;U-OgWZX?mWcH-`*GN6sf{?<3fD4$GU@RwxUnqMBh+zM$%+H?$Jd?_dQI z!MZ~N*&h2;r`Kj;k;EO6D7{$O;q9wlWtvuT(7%8e!1;v#$T$H+mzfiV7TsW)=t#05 z0p`7}6yD$m`t!C%gN#!zuul31)&!wcl=Vi*iO8?PLJ8^@&r7KOm0Z8;z_kSFoA1;A zZsD$$+1gIRne?L4i<+64zmI9v7GD{{DU*FGtpe*}i77J{d4iI`Ld(qI8~h z77^J3y(~JCNb#?&lO5j!4;`{-3^?UE0~BImJQW`QeD_5gIB$@WuWA$O<-mBze3hW; zh}&`A2lq!@`oTO63s`6nt*rQ~{{Ee7G4>Q&VHRr*^d~d#88B+Kh`DxaTm6ZX9|Hd*vS zG=IHt73sunKX+GGYp?iFbwiU#uOI%k{wf|G>D~Gdgh9ilLhV{{pUgH=205u5%qz8@ zJo$^1%T-)5lwN;TMT#b6Ls3`6hp3P%kxBhLvdDX{;NZFL#9{=S^?JWFU8b?}7oD;} zqTRvO<|0%V5XgtzN&Y^DF3fFAabdlrzTC;rF5^HL1^S1_4_(3OG}*s-!7$v=R$%2A zDLeY1P(>Ow--(Sm`~+ww^Xm zMx{vNl^>502}p;%*LJ_7%&h%jnF2^PKY{NuZ~bo3>9+*4S83(gGlA9!kpthqmzp^2 z=JZfpir}fV9`Wr*4IJOc#6{gMpHV?}J+ui*0eyL;b;YP;$pee1V z-mbixkP(9r<32bPb`W-A3=3b)0L#pe18VICM04f?6`kcm^f3 zc`T|vY41V5ykK=~Vq2S^4ajp^rzv&d6cu>-ELlx)XDrcbxAvksN6&86!SY^16?Y<& z3EFO(uZs{&Ci)SxMb!h*&E-+*t>uIDb0WDD_4>14W|EW<{GxBK*Tn_ZS2MA;$`zAmnAI-eb7Wh(*lNEx=PZ9QvS!|M!g?BSYb%L|^1dHxNq_ zpfAkjuQ317`{(RY*POpl!6$fRxio0Kvqnwk+Zny7gJ3ADCY5;z?(cLUVR$sGR>1*_WZWoqrS<9`15; zZ)b7kI_uih_g#qC#o47&!fS*^QXZqvr-`u7ozzAX37AKFQwVh2!AlhN3w@p_cepe@ z?{ls)quvPT7*MXDED9|xow9$ot`yKLaJrz`@?wz>P3}wz9E$T)t@iVoqzHNO zGBJcQea}hLvGL1}@kT^loF#0FV*tf5fo?4Bp8IQUU&iHf9-jx@e!e`7+ztVCvf|ay zTpL58qXEL*8LS0pbCx=%{@r;!d&${L!5;p5>Y*|nxv@51R0_V8ux{e5G&r4IKm!2JZ= z!YryIba>7C27R1G9PRiOz5Mtcm1Rf;vSBRhRTbV&OR(LUT`Xcr(7Ypa^TkeH^4c() z5Y>YR&-V`wL?H4WWhp=}G(`{O!@;DlT@3;T#Xwb?o1isK_~8g>l{_Cm|2=*(iT{*DX!C+EXznbM)%l zAi(Ah!A@__dyxIUae`ML-4%|D8F%W#1X#u3RC)h*OcL!&UG7{j0w1Pty#on+l5?#3 zdIK$m4fGk$Y{QC0B!9@;?wpdVvTd1PHD<%=V$oa+TO7{Sl!@U6aE|B_%W9$o zd*1!grtW+1HbdUc*IS2_^3{7vN27_#EI=*>hX^cP?ushUnwXeK5-ax2v*^@1*!+!` zZaGw1Pks0tGA0|xO${7VYs-5S#uRm%aQv*PG0*e6&`$m%$w7zNSFRR7_qGN%)8P;& z@mYvM94qW>xSY4&@o&!E5t=IG9DNwO)e*zfoubG#`QZRjvE_-Ji^ALH00In_dhk$+ zn8Uf(ad8h(MJeDaAaa!b1*67=P?>ltvu6=e=zO_?dC7DU{$ zQQxgUmeKxq{X=jo)$Ft`%;wur3dY6k7Mw=;J^?k&!g!`v*jY)9vg9lyJXkPh`{>70 z#s?`3JNg*{GM1|2e8O_7x8n|XmKyS4ke)&|`rtgfHdf>O@i^rQKxb43V#RBySo?75i;HaIM&I$fLX zpa`u8Bw~2o>#ZucU-mbT;hCN6eSOa5h(AHOuDEF6^pfs<`x_%oEiKdSm53Px^P2xm zYzpV__Hix%#iwe%+llSh(RRTh6aiO=er&)6#BcwNpfQ@u6XY^(ta2rADZM%|ws8MD zE+qh}4^n(ck}MG9Wo2LpQAiHW)0+fqX1(!68kN?HUdf#1BLJ*^BDy#W8s6L^;miyJ zJqtk^aVi{z=T^VIQJ|bzS?$5Y#f{f)y(d_$zfR+nZpb;g%5Ksp$$16kWdF80xw~=r z`eJjGMP-IN2bN0p&SrVHyG{%Jc8a!9t}t;*3ot8p#a9j$c9v#z(^0k$=l{bIOAiKUDTxd6=; zqLDZUF)&W42X8&fq&K$k7`2U7aSne0y6AQnP6m}N$t6p*t|=}c!7w)CfwID<(MZj7 z?$4N1^r1{fu4a472Z5XxS{ZD*i-^DmX^^^c;r26Rcyq$1Ei7s)JQR`p!6M+T2b)-V(-0!np)fTQ2~)H zD%b##Ceow`3JOYBq$9mZ*G*A6A{_!)QR!Vo2-1rr6zPN_N>z{&kd6WZp_fn+63(+! zqVD~3=KIc^^ZxOhch>9~T|2Jztf$=dy02S#L>-McroNVt^xkL`)}Ay3bp*ZW@%0s2 z`l7IG4QxPvx+^!qEYSr0cfk=o)o`M2f0-4#O|;P2Xil;ZA&iuX3miP0XU?3_3m9Gue`J%{owG*Q5`Ct6JPY_^ z{e?%9OFag2o%U`_-CrM{jF*{qGEl0_#$_k_#36=sP!o@Lp>U#4stJ42>_k!nmAlEl zO{?R|)%MUIreU~BZR|mm#KTGSyCcar5B>~g{!e0a8!7>2G1!6Tk)}qrY9#;GmnJP> z>nikB-A*OKe)x8|WH>}(QZ(3>(}aEWe5dA;X0yVe7>=ao6j?Sw^T60tvs!>(Jq{GW zYY4&YZ{1?uoLv#h3w^V-xo^rH9SaT?I@cgwFQ}oxxx(l+$}OP;)CSUJYrafjmiEEhCa!Fg`%XZ)=yng3LvBzM<@a}+2_dEA$M?X3$nf@@7zFW3R=b3^=z{iSGp^l5XmSOMc03^Qojmgl=M>>rGb-!rA>TxFE*A|9Prf}Rmm zF8KSM4$BCxAj1VTw;=RCP}WHUk&EnM_#!s0@CLz%*Cg$+>q3VyraL=#q+JCFSb1V5 zZY^%RYW-4eY|1W)ht$IBthoNSmRv*54H%C&0< zkbOVE)6eJ;0&oD0&(BBl$04VBJNHb~_W`ahdR)(oMGoAbiMPk4w zVp#`${9k0$9rBR6{%?sKHnf+rjbW90Hm{fzmM*7N(nbxsUGQcjgR z4xL0*dYmQCjhPa27-DBHzcT}^>X9Rd$6r222j}+GY_%Ml{*+zxw((tSB$t27Mwm?G z+qYN7ysiQU+;?kp{Y@iyz_kIINArJRv*QBI&EhcUZKrzaw}}Z=t8eazzfBOiNyE7O z{th5=kz<)Ptr!=IC)Tgyw%FP03SH(Dn&CA8(rvLH^V9P;=7%cN>>7EnUfo`eu(gNX zdBz%`;~$TVI-GmuYubY$bS!!o;Z?j$a{@B#Y|=sp_ti9$a_Z?l-t!+dy_V9W^%i)p zzBn-^P1M{rDbVKfnn_$wbBW1W2JQ#WtzKmfLwHSwqic<8{#ZwWEwJUic=t7VSHSWl=ECv1MYQ_p%{V`1OLa8%hpQm_B}0& z>^3gGb&@BPt(S7DuQIG^`|~x6vtZ59G)^^}kKS+Ge%|-yyiJcV1( zL)q1VlI31D|Eaj5_ z7yjLMm|)fqf&8P-*$pZ=4wpGf>OcHkx-cvR0xkMr(*;njOt$2vDjcU6wGm3b_T-DO z2N9{AEZcH_zwJuHE|`7;YMSC;EcFNorZM81JqgK=Zw!>U1xcbS139@kkD2G{mKSaj zZ?=@OU+cFoXXq~AQD=^uH_6}aXurq)XRO4-POBIh&`9JoZjdP4^>h#x7HoWiZb>(#y@ zOS!r?R+5unXy++h^_rRUNVhGEkJuC^okHL)yY-e?51M9sM`UMTOHWrGBxn5{$)nQ%a}+5CxixC@yMJiy*=6&!~- zv!IXr+&P51w8fevs-^n;5sF1*5PeIpIV>rMp08}A!p+Xh>OB)+r+uPZyx8+?6>!!M zo1?g!?!xfF`giY=X=j*RWE8f1=8EY=&%Uh!Q4udfafi$NdqYzXH`>{L4 zbi(WmbCuJA*(UHgftYlcn&&sj;|nWw;Hgi7nc>#wrTRl<`nsR=6^So)oDxt@bq(G^;wnUdOKe>e|`8 z&5064nTt;eK+!vmROcN)Uu=ot7XrK#`OTalr=zug`6U%7tQ)HgUiQ@H>Z$zZHM`-LJ{g-JKvOeN*$-hwmc)SGIfO1z0FJ|3#Lcw%yOce*2Si zGWZ46-!6Q=y{R@B+AhJWq1Z`<*|=!$RC@E=B3l%14? zKYMm+eSRpatn7lax_WGFtt?1d0=gs@Pcg^m!=J;+?@`cXl zjL`U4!ed@ZTU)!LM@~+T>(r?x9?U&Cm3V*zU43*DfCi7zPRo#&3zdDl1PEi#Ht+tp z>Tqz{XFTzw%K^Y|i=+Ykb|9sFCr0@lobrDVr6S~QG#?ce6;%O3!qCUx-~Y+u$BR!Y zM#HtOnVrv|J~Pq=Q#Hup!er<7eZN$%v|GbQeA7@{+?*o!~s_2JD z_aj4%0WCD}6&=dy|FO{y=9_|a@SOfHXyfbmp$XqX#u|Voy#nm) z?4l;=lM7&{9e@VlX#HcSjr<%yYuqn8ZDh~y1K=N)wVjzOvx)v`6CwNx}yK z)qANp^#9TMUK9mZ*08=7?zg#}htO+7d~dL%ak4_7$NGIu6ThqcU&d#d3n1wB6PdSv z{rp8T@OWuW%L)=$vP>I{k-yl6)OraT_)jkeu;>o}fQpZ?{Pb_)r7QugwMXt>y`*t^ zLgqHD(22M|R$hkZ0ei712HrqI!+;mnQbG7z*@)?rJ9gZt3K%1zI|FL=@sF|m_^T6y zN_C)1{|(zb>Ij}bly}mEG|th_;Eq&PfXNPogEV_P(bV|8AuJZlwQ8q#cm~DTx0{q~A2j{|~BdA2~3tBBP^E zUAk2M!tp4;832sqi{Vg?<4VA_c=oI^Qovh4O)#S0K>`FsAI(v>R@qJ_*a z0o>*gv7%gdN6nnb0}8Bx)YmfLe-cgzDBUw9=G1?p7hxKeA|KRppH?dJh90mtxn_D0 z9eAk@1Zrb*)5>$d#05p!hZ&iEVuqklDZN*Rkmk8`*wAt6V_nU}r-D@Nlq<@h^=Ryp zuRo#un*01GzKK?YNJlAet8$i`NhdB|1jUL;r$7(r82Yf{$O`Q73q|GjQl;LS*ReTt z07fJ}<~B}qAcjF9cp2lf()3vlSlBms^ zLt-*;*REf8F66uz`P7qo1dtS`;Xx3=BLu_!!n-onG3Xrp8e1Agc29{afDNsWT{7pjaK5=cb{i21OpH?LfHga)q51|AhbM=FOCQ zt4Rkp+EqH%=KHv6wo5eODb5io+9{w57SkbKS{ZE#{!BN#MK8jTYo<3Z))sB2jHgK) zBzlvU{QqRJLndtaC(7S;?r&zuKJsC}oH?X_X)!Z!e-x%b3D?^* zc5dVE4*de*u0;WBs?tEJ?D)>Et{np_?&G_6`d=pN&VtK|Qg2)iFk9;JUOZ%je;Y-g zzW3v{T+|S#<{G2{!)?3OI((-eWS7lK=ur| z>Tr^2#{)sjPYP!KF{ZcZ4I%nK8s#zo81Vz|0JNdj(MI@E7M=fK5+H3juP_o5P`j%+ zhX2!Ni*X{s2eczq1!wksk}-5ix zwQ2__RzYzCAE;G^jyDS{oA7F%?p-|!6lgt|Yk+KOz|H_tPnF<)_bux|-^0ZP0-*Gw+Y9=g0VIm*3Vj}@q$JYO37ZoG zkS}M;y|Fo2_5_NDL4XfS1CS5Pe-KS2UojSSibVN%D7)%*Z!e7Tq^rb4in}qT`8>N` zS9Q^4_R}GYk!~`+EMy4~NJ>!|A9RXS+iF1czOV9TCg`0uwH|%D&xJ0{aHY}pu{Z=S zPysDEQ>8~hJC>G0kTHC6q|tR(`43T`tfUm)-VlZk1ifwl+WR4j$FwqjI!^o(fRH&$ zowt=f<-qFy=2IKgoBG`{m*`MDGT2M*inh#bvTfxJ!f`+*RW+~i^r~aFJ}UOyt&J=M z@>RMfx09~Uy=bJ;dK~BWA_h$RCs}eLG^Pjl=**@`1 zw!@>+(kuZqJzN)IqGw*3?y!zvDfC!yEccy`%(`CYy&)IU4)A=+eT5SaF1oY_eF+oW ztJRP2t=%u*S>#Q8R?De*iyaWJp>T=m8!hjS5sJPgO*gPX+*wl3(S_<7- z6#z$Tg|t&r{uu!3D3t6ejIqB~quzrw&TE9Eo4t$04%^D&e&9wWY4IRP0ieZW;0~(} zpWx9yNjFdv;ma;NlCNbEk0V%?yKF{+o}|hZ?lUon!;F+uda@okTAgtKg2H_QojpPB zi0Ah00^Is#+M}@mIHL@zVH^<4OJj;4Aq{ukRqYaEE1u7C_;AYoN*Lj|HM5Cw9tat*Tw3okz9Hh(FPoTcw zG1aY?qr1uTfH28c(=x4-84P}6ZfPlv^3S7Ui}l7b9mi*ox5-?E=TLmHZH?iN8*hr& zNtqu!OB^otaRRv+OFmk;aFlu?&INGn1464{8YwEO$;dq+&y^Czv96xDsDg!=u}5K^ zh9LDQl2o7Mmwo><<GEHVWHCy<$aL-)G4LyR~V*);mZybITjcwDx!oufXY^Bl?IWP%_9?>v40zk`6leM|+*ISY+iVjgi=F&q3 zSlK82DzO4wQr_6J(!PK9FNHAmG-LEyLwb)LVtk1-l-jSED|}2`uf$(Bdv&4eJ&dxFKOLaq(5z>-0}B2w;H|2KJNs0z@QtNcY|-d3;rMM~x6<)=?ndOu6in8z?l-m9_86J1D7rwbkkle*<= zz^JRfOi6G!$QSH4S}UzJB5?5FsF0KoHYPG%CDXR8U+%9*EF#n&sVFBRZPB@^W89~M zN4S&w_10fKw|Fk52@%ym!8(mHqrC1yytyPA%qDl8{oa`lGLp_>Q%;& zgTp_F0#~5Wl-Y-}ze(0k9u%o@^D4WrbLU?XBryhnKSlA?$9`bK@-6TMOK+IhyXN=h z8}9qKE$okAmMk-;ruUZJLxVG@s4X{2_h}j5>y34$(?^h*;u@Ua`C$6Stcpto3?}kHbGyh2NEPVGy`8=#b>)~*E!L3 z>%W}8t3i9URNNB)QB5U09vkJE<-HpAuRVyet)9*>$lP9{Dt9KnknAsu2mfU+Imm^Z z<^{-DPTdh7g}WCWO>S4I5oSs*)s?l%z`4)QG6Dj?99PVs6dHlK^b}yKZH{0^75W|C z6>=3?wJPu$AW~cx`XtZ;hI>EWv;#1`iwOIh_F7|bno1KNV$iYLt|5FrAmx;7!aLt z#s_hjqesfFe_}oanl5G+x=#0tm{#WM3&ve#>Q&yPrb{e*O zY|P{$qmEZ^OOoOMW{Ebd_eO+nrS;Kd^ViNv%%5sB#9e1|4EV%K=Z1@Qh}Wu=RN@3V zP)Qi4!FLBO4MJPG2CaEzomx*@W}Amne-tvt#n}&3D4*n5X-=+_`+zv?G$*c+D9J8z zQw^$nvwEwVsx$W-lp^XkWZ47|A0A3w&6%<(#m@9hIT>Z?=ViQJ_|&(t_GN?tL$#-y z&|Nxm!dS(!OS*@A^%2#D?$YzCQ+c+5RGy zEz@7fYxzO*;vpfXfhwT))i&KzO7v(epW$Pv&&$9zH#5Bv?6OW>XKwtkFC-!UCdHVb zAQDl*AVk@K?|sa_1Kj-A5p*_(%HeYPo+0Q*UU{izk0eJ3hNka=>2|ZQ*dExwKe_`+ zlt8QWWa#PfhiMQTwJXr0d3wVKP4xNS%HX$xrrlA%%FD_}+Tvr*I%W9Ci1VpbZ}?XE znVK4d+ED>f1~xV%-VaD=t;wN$zdWBkscK27Nc}HOM)T@^ODcZAi6ycT+AB6@&5gp;$F!Nquzt80dHlP@d6!(sM&a zs(#xJFOTw@UB+Sk`^(xvp|;ihi+J2RpjT`L*06GX;l!&@dE<|-0NCn2BZln29gE)8 z!8?9Qyp5qOvF;k~(Z|oTovQ{QWr9e1buwFuQsq#En+~@xR)(FR_{Gz%|Dh5xH^;H!|_E&y_54Lr8XFY(seJ*1H1NwEuBqqVVThl z$+%<8Lm_B%^<&6W*%r zuZUWH1pHo|8VBAcM{j!t0CLC00B~QioxwLel(e*Zzf_+x=wEDAP<8gKhZQiDfw~<@ z(Ps?{3AtK|4MqYnLO2jIMYY)vrde zs!GJkfs*i(Sd3WY*V9HuRbMAmw5pu`q5zri?ufqQ!S*&cJ+*%6)~`nIT) zcTZKf$?l(y)yVrwZ2q*)#mlRlO(?r?s8d4K7ZCy`$!7vMK!Bs``C>+0COYry85 zTQJZkUj;_LxFk|P9BDXn$t%TFF$kt#H*YAC5oC1L%z>Sq9Y+@O`0-x=zL`vXM2?kJ zkd;lY!Wm(l=<+=%aj!x!lFAOb4(ua#UW zvF}p~kk(0knD~8{|G#yE&dag^MxhaPqS3s9BCvE0q=SMzYbbqg!a(=w05x@FoMMe* z!95U@i*VwBod%Bo#JdROC*A4bd#!7Groxz|Iaj{69WFlXImZ2AJfhz%guj+a@crVh zOW-M(jrMIZ^~wW`xEFrA*hP%Nq#8~`Q@bY@cuCCxm*lQId~@1Cr#dmqAW_I9E$g*c zRpL=W6HYYZ(d{oyKod0HQW(>8I{|9VZFoSn5Km(mOVo#ip4N%hxK#>f3aSGURGrtZ zF^K^kfVoSL{U{q0>yYL+grnE6$TFT0og0Td)pe0QQ{PM(y9^04m_=>om~IxMCq(!2re_+i<*&#ih zrfI@W%H;7vcha>@e$rt5Q*%pDZ`cNyp}%YtqzdCF;cwg9HJ+E=;^L1L>fjPKy=z7w z_H7iTT&}B6VqrfN5nu(+~s zCQ;byp4s`T9pOJ`;XvtKJT`by1dG9Hlq_(f?YdjK^V?Jcr1=SjVo(8*f|Ou4#Zd}4 zD_=y!rofs(NJM*s|Ev4Gv8;j@e5Zh%%hXS%4L30N;YXLxn%7i!lG#@C(aSYc z1toiy{0u)VrGNk`r~^sB$=gqMLU{o^5C=?}?d`msJO7yPUF7rJqphGgc%R%glPi=) zQEpxU%y1z0avX4=yw@-sym(^K5Rk{ZCkSnc1D$2ccWhQ~9Z6_;q2=5+yo*qn?%xc| zr!+Hy0%%g$>e>q$v7gr>vnr0?_mL?Ssa^csb+-HT&=OM*GUj})!4%y){3^>cM>ik0 zS|ZG6ygU1dc7Zk>uX%OK5;J6srkk(rp3;CzbBXvr=WcFR?krcO&DJe{imn*0^w1!# z$5Vd{3ih6GYmVT!SwUbR2%FO|f3gFiyTi3?xnXAyI3^^$E%WyN=20IQGX^dWko)Mz z<-1^j9sqczz(Tvo1xo6NXX`2NSht5maQ>)_(`h$S8$^c~Y#gfC|V$JwParDApPoHd+j~X$f5ug>FdikKPrbjA)sOyFCD|vo#pU+1n#tF9WnSx zC-#%g=G&h}lgO)Clf%NoqIgHt0k4PSv3=2^vi@`ZdH^Hu;Z29zbAU-Tu>#A*$$4eL zA9$~Z)w4n>mn6Pzie^PaiszZ7>9iS7P898s8k?>`tpRxkf>6^LlqsBDZN($E-z5 z58_TCYr^*5R}g4$MMe2$51r~m#e7@9no-K_(fFjRQ`}N!~N@J)g6msk5 zJA3w0DC@q zGws_icaC1bP3TUr`0bdTJ3FgSiUiN|j?RF?7_&0jJ{?d+U6^y)0az;TYwv9UM|1_v zR89;sh)zgwePJjH#C)eL-n^-|stsU2{G2E3!^7{w&nz#RFt(a4-jrA3ySc1dcln{~ zsHxPcaIuq@mmf{W=wzxt2T<)%eA*N9CXjS+J7p*(pjmT?Bik5xx4?<-nzRGvD4=4@ zCt=bf<|&rS%4$mC;4@~fyMlmRBq(7s+W(PBU(sMvyOD~L5;ztd*k?Kkz}VMs zfHYU)fOY@~;2cTOk2m0dm3zjsBSjuy@UDRnA};_k}oA9x*0rYWQ_G&TG(gRK5yE9%Lz2=z7I9mjrJqJV;3^iI#=!jBQU2xW_v@= zeQxmOzya!(^!wWkLMGup>wl+Rue+>WY;=I-3lDH;@*35@FOO$5bV&Ia7)XRwWe|dH z9eK!r1ePH!+>aFMEE%W_ChlkK*?J$Imv(>Ncs&3j$W|RG zn=bRShAf;2&4NhRxgLJBqb-)8#~<4o!9nGAeK3EbEzuYw=BaDjo6XbG=-T>CtF_n% z_ze#lx{NQfC$!icPZkS4V&y;C<}kLzCoohc&rcYAR~l6v8xSDtg)J8QkmTPQd$rJ} z-U#FyIu>YIh1H-K$hQ4yS*L|#A8s+{tQY~uZ5{G=?evl102!ZNAq4zY<$8sXbR%px zf9=4%=UM?~i5wu=rJrZQ=i|Q6L(6ka6nikpsICUlEXJ1Rpc67Z(Hh$lQBO6oHfM~x zM2U5vF1G89It-j3MYgR*UNY}u;cbu&wCZh}Yp~O&RRrulX8j(soRIecR4FI$nNQsK zK?*4ev9ATv1-Ftc=s^g8S84Eg=8vRg&Ns=N4t$j8lEk#Jpq9O5uP0x*as`k*JQhi4 z?JU>xB(}t~dh5*t6U-WCq<7{bAD_ybH*Y{uX<(n-lMs{;2%|=SD!V{9c$fO4giot& z%?lU|v(=%c<$azgdE-XPoAC_(rGZZ3oN+1Vi8Hd77?$=K%gQ$AevndTo zU*h=?+(Dk=NF>e4KPRh+8@#-4w?@&gLmd?X9}LUrb|qy@oI}8adVgtK{SUPr5a0n4 zpX+TEo&3Lknll@C`n=9Q1c^gtks91F(^3%sV;x5LRq*nny-C(2cO5*j7HWEfSIhp{ zC42NZco5=t2Hmgk`|UFqAkbl=Te`t-pC%2SK9ossvUg|ZM(Lo{O$l}-G2f7$64XST z*x?2EF>xmnz(8&uk^DfyHiD-Iv_mQ{Ub{mtDgf^e zlEnYM)7Ai9vvees!%0{>&mWXMSAQwpYkjI7wI9Y5!a9 zJ>OVrSN|t0wS8nbCNW1P7neeL{NkeX-u?Smas2>q4(OK8p!#<3<}yF==4XDzn`iz5 zZ{GC3#G8W{St*8ZDK zljQeSPu^P%J)y*K<>Ba1MaiZUZ(6G#pPzbE-_g(IjdfN zXL4n*Jjlymf@G_b;LezK!W|N#?APBz+OZ6D&+#_MJ@peR4h_~CDvvFB`jJHO zUHb(*=(L$^#g3)(&*z*th=g7h1CRQ3%mGY5<#^mbd507Pku?DtI84={?cpEo7+dfl z)4Y_QP;qFS&!A#}KEBKTB-0=WQX`&Z(|^W~S3$2b@KYz1*F&G?cmgcJfv4}eNUHHR zpi+<)EWY>y67;Vx3kb-CY7UB@x{zuPps`8`c{%r~Ff+Y|;B^*ccWZtHO)h+A(YVyYi{olQjZh*R+e#YO|?XR+AFii{s zd~x4<$$}Yun+c|AqkD4jkJF?N)t6zZJAXv_zwezhmEb`|29JrPLqQL1_kw#JzmYSO z>?`PB>Fj-Q;A3Wcc%3dGRphtV1)zbEYmBcI{jRiTU=KA^2H*uG|P|&+iy+J9+c>(2kp>OY#5p4cFbk#?(ii zAdu=N)F82AN%``(1~bssBaJW?^u%$>cIo)d?&?An8b3gK}l3ub7Ww{~QQu zghk*~iupX8V*BOce_exoKQzD6HtBn#z=;wwyfpP&S0K`8z_ITLCmU%Dzx}Cp7EJ30Gj`l>?1lf@7`2w*XIJ$PR**_&ejN=o zRo8jJw8ru=B$8Y*=!N@$j48d|F(th7z1OWFv4Eg~{n5|;_x^*v_rKd{XH!Gd`rmEz z-)-a~UtR=o!yvxK$g7zj&Z!iBqGj&FozZ3)K<#Z(&Sle?);0?Qb8pj?t4uk6a*7{3 z1H}_`qVN8Ua3G-IiJNWn&$!Hv0ND|OZ&3mKd!`^*JbDBcUe ziJfK#=2K}RPidXZ3_U-i4%$~nx6rk;fDmB%m2B<8TT4P67!0b?!)m&dt0Kg*IZ_#9 zN`u$>U)!=vc-S9V=teeuJuNXF)195_hWI3YA>-=bx$xIN4aDo_*S9uqcjg*6wJrjI zqLw{%iX>1D0D;nzbp%C9PybWUoymm84qPhkIiu1uwJ2@x=6P5km)G+6A^OjwcL|+< z7h4nmLel3_B&YJ>yf2g>@AZEFf$B2CZ+!T zoC7_(UD&*-CHg7d%e5y@cA?;K_))>|##`$f*>@ryZCVr(+W|(fOP$4XVP2}U;;Qj< zN3O=}=N?Cs4MNm6y;FR}JYtXfWR|^uhE3!IWX495Pce0=iZpQ-i>#;W2SC)5fTP`y z)>TWmQVd5pn+)}%C*Br}3TeHMFO}uf-A{CRh>qQ;1{r^O|L;ogu@f0xvO$pEnG_Rf-IeF5< z{zf|TRd$BeFYop#ho89gy3ybYV2ANkOXLw}2@#NG>*DU1&P9iD{b~li;yrjc*#K2~ z`Ek-N)cHn66sZhAjUSwo*Xb{R>n8J?p+J8S3kdSd{drvC+kiK>qRwv0dvGa_CWI8 z)tT*wet8H}#;RA%#WTHcManZRKg8;3r!5u^)P"Y4(EKgzr62e@2aKz7IrfQ|wi z6}}!WB=qDNyI@P60QD}*H^!)4*8Zk$w3C1#|fGDjL9@IIelWWz{n~l^A&i155!_~iTBHok$!me2UIKjXHdQm`|{c4}T$%(Gas}aUxPK&UTl?@cYGN>u@)}+s6=$O-gD`*XpwFEV^mV z5zSIqU^f|B=r?CXaL23~F28Np0Bx%g?S{#k?1shG_phwI^`uiuI^+0it74XcMVi*I z%6utS;p%hMI0XD5HBPbHY^gM;X=t049Jvh(8gGkI6zfH52yISGzKaN5Z_$HUew{6U zxmLNlJ|YYVru$HKnUX&Ax9okS6y0sQN>HzDhZ?S}ccrjdeGKrUX)ki4mB8)#~G0>GpkvTJs|Wp$$Z}Z%37$>U3v%QI$ALPH+xgEg|LWR?w$RLWx^5oZM0j%k&jxbiZNGI2*T^rt!-rNJO<__$7I}J+xM>~Y{8a< zP~tHje2nKjpG6DkskFohX7_q!`pMI7B8= zV1RRe;_C+{ab~!Hp>|*Kdbp>LPhd_T;8IUBouT(OP4vcEP9STJ4>(Y)^4%KAkw4Nm zHZzm|b-FV%D!tWUIV+oN3Jx-E5f5F}r{+K0tI zZLiua9%)M4`*EboO9z9_W-oOdboIQ-_gKom?j*t0r}(WEfYD^=1eSbRU>nW%=aU_* zbn-_@WstA7`fVEGu=CUT^oHiG+^TVT)Oz~{cEJ+d=O#9mQeB+4iOvN5x&^M9ExwB` zRfnbrZ%?#X+4NNv3<-HWU^dBdwTWLRTvmQ@p*$}1tkg(1sdEBuockOb6 zGvFgkmupn_-j0*%ZQVW9{zCdRfc-`V(h~9a9?iYW^||x)#in+~tWN_bKZ%NFHs(lU zXrKs_*k?OJ?p|{ZW;VwJ?+Uzzj8K%v)40bU`}^?c`0mIyJ@LVAyx-7!ke@g$<{Tc( zdR7fd!<0O-R+HC>?FxI>zVM{pZsWC`7-&S|kv8_^NBMy>7nu*4w;Rm*K*y0}kMxel zV$zCngSvjFdnZ3q{z(gxwNjUP2UL4LIjmj%w-uv|bu^-*Rwz(uz9i(2X)$^;`Op}CL-lcD}d8R#CK&rrQVyH?nuB1`+Ye| z@a}D6aakBPo$i^?N&UbJ2t$|0o8>9LtV{|e_vVJ%2d+6_ql!Tv4{jC-w}?)wmX;7E zG0V?kBTO1|h^0CzVJpo3)YAh1G55wRlJiNr$-}3BFW66cewvp>n#lR4)(37TWw|LN zGqXm`zzB8sl&zT=B3Ns=W+zL zKKob@N<}7{@Y3j2JmBM5$~LO=H1hahV9nGAdqKCgHECv?jpquCELE?7WO4n=oGe~ zy%5_oqgRH~BJ?*1iJk*ohrT)JZt0nxyjZRv`e{P9&WxD-U`|-^WPHnjaktJCx>q&! zv?*v-ZidG5+F@IFZ|t?sG1d`C@R&Q^+7<&d}7^*TikUc3iqMbZ!KA}rK6gv(LU zHktbqxY&vpPP|i%KDVaxaz;b^b=QJG!x?QI9lpe<4t?7Ey zlNLZN*%*9<7zAJRUK)-_ZB%-G4tcyW^w?U=`veK&8iSXUhf*7+d$j=8xl)XroIFiA zwZdg<>MW`p)~Mcz+?KWI1>Ihscpt8!W&l-ZO&>pV6l*kT=VpYxz>U!odPHRr!(pr! zO}@UTCZ=5aHEF?p3KMsYCQ?u_PU@1a+AbKJOZp^Z`ROm4KE4aGj(~r0 zp=Nt2Q&U5iP1^PG`)$Y2VS|S_|A8_zt7?`TrkCupZ#RRGZTD8~CqF=HyJ^!w-u&!r zQM3RSL4+I8zlZb41@2zIILjai5EJZD6>c2^D%E&|z?MyRhna3$It{|OvSP{7^SnEt zw0392p-&i|aY`~nXWucH?yE%B&y{GrEye0;beR{ei8V)wA*I)rCsehwO&G@*$h(EE z3K-%>gcO6BL{zNR;Th{6!g*6?`w1g)49qVj?!Xn?Xigbqq#fFD>Pd;&&va<(A#3qq zH38YZn0>Nzn_vJ=*mDFaDw@_@WXAbWg4?F>Mep%^;^8Y;%=roI zY##@U&JQNcfZV^X01v14R>?sr5cOyDGNIb>u&y5lUM4s1?jaC-`9JT-{ImcGz|61n z#^L@~83Cgd_!UH~@{z{Y*SzL4z4rrY05QKJ8SXLlR!yF98f>7aLGY%KiSV1@2+BJI zR+-pRYKD*OeOr{&O&GVDKne8vN_Er?b69pSEz?D#=jWZW%?Hd3hV~q=+$eGA&^{oGYqTf=i|83p{S7_bgrV!a!xFUu2n)O0)p4jDX3>Cz6scD#DVa zVSEn8VsQlg+BBik6>n1EKiqXeG=~h=7{=CY>_mUijev>UMZ>%)7s-9~c#cL8!|7t>$Sel{WTa+HpJrpU? z?r+|Z@9~P-GEPuSEsO`7{*(|Hru^mY6D$JmwyE2bXP9mt#w?MD^sJH3Y~*-!x-%0_CLbxTZ$FX-MDI4U<=C{yWFqGk46pT zxG`DavvqjV^|9**?A&A&O0KxW72K;*FOCimRW7#~JMCG2S1%XJEWw+Ul=SlXZu3O1 z9V`Ieao(Qs;w(R^4cIA?XEdq5wr1CCrN-(z^easY4y_{33sWxt(t+!MHl1N$3}qmASR3xtZ^Dl?mxlYdp-h)eefu3hhkx~9H z!ezF%bP%SMCmGtec{SiTCIbGu*;2YBokQk5aQn+t>wW-+h<)T{H+{#dJwgt2n|ld1 ztBxKUTfFog$1L$IS)bt-NTwMY=8Npes%4c2D<3d={NpR__u$9a3bV|$G%eCRpLl@o ze>pFclvoQj2560&LLCNH2fSniqSuG{g`h4V>K$V8QfNu#$hU>-qi2=lh0{A6b=&Cx zZi~<&8*$HTyst=`)!u#a{IdSFKh*Ev2m8S>27R1cZQ-AYd~xjQWg;7S>bo0uAU2h*spQ z`>KJkrlNJJSc}QkkBKjuoF)K+{s#OFOXu|~ch^$^thf}BINp-JNM(y+>GEb;!E z7;z+Xl~`h~bVfBmn(t1)oDhALaYisP{b7D9+&?MGR*w&Z2AMKUFTFy@G43D+LYBYY ziP@BS3hYEYz#mJ@z5ju5F1sgdNrp4yrf%nV0%)W&n6f=PBEI$tl=jfwUqJ$6krwC2 zs$IVb9q`;Y?~~5UH3Tyw?7C5kLVZPz25TEQ%zCQP3!4dlHEQ+0o?i7rA5Ppw00)p`!9eil zgD&)uURVw~+;?-IQj(wFuVd>)% z^5e}>nUTDAtouG!YE&PWRPx)9-UwnCX?wx3fY&XWAr?+PJtd2LGxBiXL{xI^D9^`S zo_C6`vxLZ^`OWKi+c!MPVY**;VdWFK56n#4QD@6vyZ<3URHYGk7npo9TsRC0@_>%_ zk@usEpJLi$n zaWMxzu%kR$Vt?@dJ5xg5fCvD7ivjEFn41X&$@)48Szmm6Oi5URomj_?F_!-V3a{zi zuJG@X?|Ef(C`Y|sF*G9MgYlGGDETT>U*uhEHwAO~+IG6eVeoiP$immQ*ogXS;hw&4 z%^WnVO+YgTFdc&|g0eF@>Gxi2uV|o#hP&!&64ydz`^!yb>MCwWeTBG=P)@D@SEMoS z)Pa%PwEgs0v$wlI&KPQjV#aWU3`a)rZf*}^m{}{FY+~ovM;^hxg(<4y-SZ~=jZ3CJ z=wy5dKYm{3j5`*18GLlDEqMv0s}*2}?;UGsTD7UuSVs0HM+7IF;qA5|N_yz$=;fvL zq9VDAnJ>5weli=%jC>(+N55uA-CGO*Y+@P?PxsB3aFcr!5+0i;{q6A9%pRFq7%V>x zOtx6yL`)0$_=+tkLJ`xBICQRl(GSyC~1yH@~C8} z#RE;a+?i%0k|}8+F3Z*jJDzy4)?JMbHgkG&DKou+L4*br{(VR|FPhZp1l*8;YPgI+ z7--Ga)f$~G;`*12CF6zUFag8YN;MAXjFA1Thqv}I34E8fW%mP6yHJMpD>vcW3$}}x} zsT-FB!zmAWV9>Np>|w`)^pR?yhB$~qT5;ZZ^I#e8#AI2wwcv^e(OIijcXQ?chrPEBt7=>Oh6NEt z1XM&6q@+ba>5x!R8UzVxm6Gmekt!*kIf6XO3DDrinKPrA&ef{n{7L z<*D#EuP;kR@66l}(jE%6#AED#*c@Zh)&dGRgiEfHKc{F%g$P{d_rK4Lx0K(d-dLyx zeuv>AZzO;r>J+F8AW;GIgINf$@I`T(h5*5td7ZxC$>Eaa^ft9J94%sbch{bV@sxFX zvJdoc;F(XvGHbOpbG7T}TEkdCz9UBpL(R=h1>I7*wJ(?ocnEEhL@iAzbqD>3ENz^Z z!?|97-YNxP{tVi3B=jmQn*GC-u(2nVcDwgN44yrU54o+SxHMivY_$KGwr-ZsTQkuq z7xdCv?5*|ZrVDl3ZUIN7utnanfGt?&ob3$ADP>6df}QP`=C^D~IWAJf%?I7p(dV^z z7U#n5n0T-J5ZDlK+Se*(YJ6kt_=w+`75vC1?vVlh03h{&Wla+EwAajAu=Gg0Q&^sV zwl^0{PYbG94dHAir;$eALzy%cI;4yY2P=JhGd0B&Ql)8uuUN}$polLQ@qYF9?H4lT zzY32a!^Bt4k;ZddJ7}$fTEwUOO|)qI%PNF`65QbBK8{D|_2OtD0qBGgATTh$;X1Wu z^>fcrvl2v}mX*LHUp78KJs`s-AYs6D+a5D(ZdJ)sdFwd#f6VYhWEaKMBvB)4}@uCAM76m!$CdC|?a< zC4j(Z5>4$@SO<==K-K6?vU{0ZRX|aMBYoH7Qf{fhQ&q+6r>b1$doob<(G5A9pm)5v@em}_xuDZQz8Q2?&*%F#~K4pY=WmpIb??Q&rufnk54ll7yJ9Nh=fq=lWHZat`XmQ;EPaM(*3gXx>AD~cw&zKEf& z5w(~NlxJMn{w2kznfdXa<6;`z8*Bn>0VA*X`9Q#9`*#W{x?R7tANXczhqDG>?@4=e zK8ju@QuI6#%S~;3@>^$cYpxp|f)qeW#GgVasJ6zrDNKfxmhwl08NcIsxt+zLj+EXi zy<{x}iIb)aC?PnqYVfee*)QCrmy5pR**MQQBl5!cW{ zrlp?Qb_}Bt#7KdN3(Wp4>b1M)nemWPijwf7e&@!<@1dM0z6C6Jl@EAaNA9exh|`!) zw$PIC*#tS^k?>_RlK!>UTPS%1oT?%Eyp9?-_oomzcOY~9>hS{yHJKN}fF^Q*vin2= z@gLhM_u=#9FWd!7eKH;thaViM3(4>|lJbHU?0Sl<AxV* zXq0MEWSlC2YY`K1__aNJ?bs13kaW6ZuxQ5OT9aR!plEODc~l&~y=-@y9BTxdUg<8D ztyp80ah225`|2RR5?9n-D+{;zSjYlA@122_9LR^V98$$s3uV4G9mvJr>)Lk!n|DYq z0Q7nv4J`qDM9TK?Yu|d*5MC6Gd96zzIdQ0U#v@Q3y2@$CuKS&#Ihel8ITqA}XIZ(^ zcNq-YHG(({yTx}2DfmS}ShUAK=PWRfvu|sB#!dC%zGOZAILwZ9=U{)==|VbimkCwB z3_M{dIHC#vN}HaXOFsq68>@MEW;Q1uz$zYu&Qc$TxMx%#<+^(7s^;I|Tz?Gs#XB(| zo9+pHg->-+2b1~@j21A)`w>J8isH&SgBNv7dji%mcefCsHO_Xes}qfSle2f%=Cce% z$nM#;z}Ggut~zNp+mDC77Gk!owcPv*y-_c7roo@WAY!HZ7BC3pCQH}gI=-Nb>R>CT zBNb+c2Fd+-F;H1<ZOWb&&W07kEs@5O%{HxOZ_w0){=v7{8-HFZ>+RM8 zg^b)>CB4Ca4o*&zpx!p|<9wU-`5T_b9;fk3?P-6z5}Z5a{dVL^@N-r>|HPHx(jm7X znx#Rrp3Lrs=jRKF^S46?`uDg5F5SIE>L?h*t5 zh>wrA*xy`R3rdy9i|m-G7u{&E+% zXpK34XkYEL@gCHmj6eZN?>nco)bTc64LrqEW;`-+*4F1VAdRqyw(domMC$dcLWF7}+?9Cz zIH2pa{rL`W4G#QIm%+*TiS7ah$_MWiiuRL}e|e4Opiuo1LfnyKL;vutyJ3E9fLK-r zlmB2@{a^3A{9LDZG#7Y_wW=Mc$jQlrDDPvI<($R;l+uWCc38swx7Wg}!>5x+UJDjT z0{!W;M?ca z(A9rQA^# z;0}Qx)w7=+n)UP~=Kko@B?R?$h(hU(C`#O+mxjmGo0 ze9^ShbJakz1{MD6g#mgo^JoIAoEY6#JM<^V?Hx4g<_0b(;<)Z^ns(>f;!Y&~$>qR* z?R9ojD)`OD1BCV3;u#5mj4$@?1eWrX)4rsYF&D1015Ievq|wwOJQtLN^A79_cFk_U zb7~-wOb{c``_mQRy)T1i=EkN*#kx2!vrQb;H{t9ou@+OUEHXJ`5dz?t50Tb?t3=0Zbt2&)J-%X(UXPVpcBy(X-J0Uie5b7$p-Wvzg_(s$0ECX zl;1vCI^)IViz~N#1%cyg@TGX5NYusXZ-&0f)teBTcYyaq47U;*nX*!i>wn7zPKM51 zcoDZ;%3Vu>MY}>7fI(jn+xCAj*IS$o{8{?&N_?IDR;~uHnFYe5bi88@BKPjy`%HF- zoO|L3D%&59C<$e7(@tU3#^HlHSwMb}_;@E_bOPZ2Sy??jX{5rk*T61b77rWX_jv6$ zKLFAFD_W>)264i9D3U?mBW6X4Sq=#hE4Ljzn6)HC63!CRO4DTYBo_?J84UT&Vv4k9 zZG!SaD2+^{;PKQo}Tmrk}t$z7%qe^!PAzU&b6LMxo63hz0r2(#(8&|GKNFH|SB ztv#lBtecMh%mU|Z@C;XJvDiNcP=ZjwYSh`D_DSmZAVHXu_r7gr=OUWQ`+s3qeus=0 z_Ba#0|M8$s{y0r!k#-8^^+-nddpXX}_p5*dlkv2+Sfj#*cZACs0|kH|O#@xjfZ{XS za6nG}F4?4Fjcq&YCsZ?TK*e3%XYj&CTJxMLMGPgKUZe6^&W&Mn8F;p{zQ`+|un>>} zpvG&4aI>{B=P5*KiG7C|(EW;B90p*0!43G&#loqU=C2atO;8zb3y&0OdDNS{(AUr! zU8D|gFSOf`tb#QKab}qeSJ1bZirIt@y!p{6v#qu^*~T3YjevpStXYJENnAZx6; zv*g^!9+zp}1X|a-%R(qz{DuB({0&?z)grxM2F}G=Zz)i8mDg^bv2;DB93E-Rs<5J6m>r(po8%*0sDA40o5J= zPuCKtsB!t$0`*jVIaXS!x=S^8A3r1GccF>mK1hW#?@Xx}9>LSy09(#bv91(IPD+`Z zcV|azD`qjiyG$EoyP(_(NF7M04Vq0n(lSYaMxPog$x3{%mGtOBC{RRPK=4}6gt_k5 zb>@vTYd!+eA~h~qE63;z&IOm;;U{wRc|eQ|lv$byaBI0y;dh_V4OTeUb=5&t*qlf~ zt92A!4U2WyZBYA#HHY&1n~ax2m;%~)oLwrpB7iIvn~J7Cdt9(OF#iA{_KLv z29Tc~fXEw?ep+)t62Mpv?>sMY^Gg;nJ3TDleOSLC}pZgu%67H*nFR*B$2Z z@myDas|Y^hGo4k5q* zoYy<^!dcaC!JJ%kwVj>l!uuAuS{4S2NE=-R3_Yb|?ln6Al!5;|uab_fu}U~#Z!Fl& zd{>V@jVzV287j5M9eVRaY1ZY_=WmamY9sP6kpRkk7hg%6?Ho=D|M^aHU{F$NgL+Ca z5!x-qwO7lj7y#SMzRspUX;C9!8>ymqn2_{nciso@HSXlyF$y7D*|W1EVW4U(S#a|; zvRpEN(qoa=;;9ylGpAr5KGixo8~6p~(w7Clpv3MB@8CPmbm~XXBoZd7$i=7t$p#JZ za-|8*plOF1kHLD4do@(*Bru^9qQ4%^+usl55V>wufG2;L|MH#dT9q1|%njC_RF@oW z0&+eLwNg~)bN5=;m`JO|S^53m6@bb0DJ!#2=ZgW7^MdKhN_knTS$@(7`jG_!g{NSm zXH?`|wlWm7$OT-I?RQtx;(S-iO` zz6=6AcQACJdjrW%w-V87mSvdlVy&6VDH3#diCF3~35S6xizu-?c zi~(ltg*j2suKn^HOgU#)=KK|gYq>JR7pqMz#=*J^)@Hrlx6cb5y8PGncGfLSol}rO zK<8Z2>y2*=xyq5cDm2+Be@VX`lLlA?c1+|44fIHPTq6Qhy`*T4H^(#bsL_c2;}3nz zCBR13ixDt>^Wi$LhDy1!zgD4vf8OHiM&ELHo(2=pK)1XWukw8M^Xs+iKQ~paXT*3E zKx&)}Ts;~D0dl?3_c|?@j6J&$D#55PgVmgvi4Lg6he1Hn;5W8Yz>CJI%GABqF&Mkh zd|9i;buXDwp-gMvVn0(mEp=7b1u*UuJ2GT1s=5pEnGSoEfJ|etSJi0?L19(~@C+@5 zDN;z3S=?4g?x-LIB zp?cl4v(Kr^aYwn9qlbjD1@qGD*|w{(D#Q}*f-Nc2Xp6&B7|u{OM*Pnl>}141HU`j} z_RAxIY$@+0gzMQQ0zXTU3%Mr$TqC~CkOeI_nblYTYbjoGh*}pM&=_P*mSg`LWQJKjXzL@ zHI;#|oV|x8#2dZaTav`#fb%=or(U)zSqj(c$pdKInxsz&u5Ix>^b~X2B9EmeQf2fe zXIEwYY3)u_4$fW8Cr;$nanRZbJtdHI^*@Vx5LnW%>KH89f(A53-{Y7OOLVT zFLnss&TtWtk|Lc(u86e_(dH1wVBl7y1!%AOCUJMnK1j!H`nqZ#aoy5&aEY6kU0vm% zrd^@)Ar%C7E7%91v}C(BqXuV?1LU>32960-EtrDcc^FX^w|H!|qc2SC!mi|WTa0P- zU+YL_!6Ur+rk`GCZL4$|o;az`p{r64%RlhpRk5Q>zma@BsZvZ7y_rlE>yx2}rlT}bu7{eJ>7*Iv4d;yteoyhtm}4*> zSJB_Ve4bth(8~W3Y(laYR*}+FoW}5=vawrq0B_f1z5st$=jJZho91L2!L?t&5~+Sz|89>0U+(U<@c^*^xoJ^6jM|Mo=#at*`$Pc^l;z5yBwf zJ*HQ5V&7GdgF%$2A_Z_SF$4Urc2vR?LQBy^$FGltDgXkosH?GDIR4}xsg^J$J{AP_ z6Ccmh&|--e%Cn8)GG_!Y5LYra!1~1iX8}6qD9%8T1M+3d=J46-c2626lxFQSg)uSN zFYgh*Qjwz(vY31Y$kwexoLPVeQ+Z8}qfCHDCsi87INj)nErbE+xp`xUJ2Sk7fHnY# z77QA3W49HaT1|h4+y&snV3q6QodK%!DG%4_02%$p9dr(YvIS}FSz}U9^YMb<+gjU6 zqa;@VCMWFnzETjqoEgUi*bUl2z#|UxZeiJ&dvp1*M@jhWVZr_347TTsZaQL!XS=Of z>}hUH6Ad3etg!qR9U$k|a%=WLqv?g~Z9#ffWoBBzcD$ZZYC*s|Tz>k03mO89&p&{7 zOxwr6yGRf^`}`mt5zfS;R&s6a37iRNvj;CS2=A!e3Lr4*E8yqL)vCD(3S*&3-UQd> zD74Ebi5XQ3L&n@d;ZbiT{PZN?-#4r-hcHl4Kk^L)RF!7n)3Sle2egwVp!2deI-*)= zJcCb`EC&*h&|6AfKNkTJxEWf#3Q93hv$57?%PiMB>&jeFHC9rQ9Pe%jx~f5%Fo*>l z+-qD|+SQ^GbAvTmfW51>oLQ^;xc0?E0N*1IqS9%2>@5bUgZu}eGQsB5YoS`FU$T0| zxkPZmcrf`(V+^uqgNzwK2Coo>5bV4VJ;Drr7zGBe%tiXHOl~)X%(B=2j(w|alKy-` zh&?AtBkUf#dZM^{F$pY(Iw8Rg|JasQ;TzVvP2A@ zEr9B3)L1>#l88|gf(b4rVvefhY6QZyoi!EGBqkK2sQ3j4E`YKvOOtV4-I9%4Z$TGt z+W17H6)1O+X_Ud%oWL6A2%i?}F%&S9!YhvO6(GYxFzg$Yk$Gb_o z-#<$9=3(&^LHtYz+%PR2K^y2TRxYwMUdIadi#rMn_q^IOCf|Y$66yI9No#h!`yu`BV#>WlE~)4EpTTklb9ZQzz;1i}hzV{rfXx zP^v0Zt{EvQJH5LF;H2F1t;?fjbsQ$WUo{!a%}$Q{|5JcXSOaS|4zT0_mNC%X{S<^$ z0!Y%gX=!Od6KFt}q+porh9?Y2sBX&Y{FYGtDdc>tj(lT3E94LO%Awhx>c}?Jtkngp zj=+cFy5r^v)DD{1Xfz#qseL9!Sqf-ghaVhPet3XY{WX78cz8G{&h|3Tdx}^@K<9sB zaCCmmV+q$A`x77kIKL3Z-{CXf(*r1=&b<6V@E_{T@<1|?!j|9fBoE7{{u3y+dju34 zO=RK03cn7)#2k)bV$=aAFfqSBVPbz5NM?6?`8V9!$t1rBDHqC@O&q}iq3OtvSwI$F zXFcOr?Xdbep{A^?3_O8y7gLLKEO+sF2F4cu5@+6Pq&OC5PI9|;BF-$j_jiS4gM+3U z{~<(ecN1}L^ht^&EyNW1e~C{6u-41huFZA|%0zNNc%|?>Jp50z4FInc6?24F+MYPU zE9v}+SF-sBUMWIn;Wu6h^*_NYnVg-C6L6*jV8wi6z?8LGMIvo{DgiK09|Gq6{7aL1 z+h6zun8$OXN&R-a;~&7hg#Qqjrxnu;YKFj=mxh6%Y1V`P^^6D$iT5)caPpY<03>#{ zL1df2;N;{#KkwBa{Q!)b-Jep$F*`ySFD8E9`P%>F&cXXGB1?Mh8UtVy1AZp~kks-R z?mzAQqkF>L3qe!w04XJZcDrq*ZSnhu2X&Tx{q)g~$X7RoXJ`R*-B_Dx|Bkymzh838 z|H(_vavFK%U*h0Npi_Yi;*5x<&@wnsAg(t7LAOo$5(!K-KXSeNC}aeRL(1wOINXsh_g;U|C8fHZltltLE%7Qu=j zX;1ZsvjoR0(=$u(MORrVN6~tuCtvZ`Z*Nt=zgTVQX#Vq>>+!&vRCPbFLiG4LDOsR$ zR|?n6zbzA5aDp$&RhG{^dwdsANc|20>`aIfgwTuGH)t8HcDF%mwnG2x&#U@zI8a5f zlAek5pD?F9^MFCZUHHLrjYQtfnsee}55Kac4-naF+8<^aFW;0T#JF?@D>Ro;sk@Wj|d&PA%k;D4BBv2_LbY?z(u ztz#^}SOesj1`nwV_?T6*+H zD~RB*3j3EQ9U3TZo&nKNHG!D-NErjH&E8-E+kI@%c?E>TE(s{f?&fbzrsHwLk1~I{pOucm)Q| zS353NeDQeYT44g8xb|ssK&8?ZMh}s*=Ag*Pjnf81COWVEb?ly0{4>RXE3B7jUY|d^ zqw>X&8NEsaiQL$q&3mJr;)w@t#r=rCUgg?W3;M_zx1`T7oMlvKUD&=vA}ICQ^G1YLsA zxY=XnYWRhd4d*sTJSlTYlP^AQ$rpwQzf+)6$S}&iZ7UmnjK9@|hDEi`pI7A1!D`X~ zpGGWnbRD1B<2F55JX>uU=^+S8vQK>%HMibys_|utxB!-oCul^^vrA3ktdBE$n`nWo zA{M$dw1Nu_b(ahW8bts-s=cRRxudWrmq9OgF7n_L*3I*#P{!gS^{iv*>0r@9r8(#O z{-d!H4!hbENG)vC+synr$+HmYXk~Tu00KC{z!>KjJWRbMM8mGYGS2iU*vrW^MiiH9S`L)%_?u z`o=NYbzJZD1yp~a&OUrg>Wnkdcj@ftB3fH!cP~8~i7VP6MX2!au)JAK{KNYH8 zrf?{D^UfV%5)Pc;d(U|bU*pmr`#&Cl$uf3``fwGU7=Yv5-N0>cVi4N*`qYiBb&B!V$9;*tsp0! zT47PyjB$qB#xXnw#~Ga}u-Jly%}L50Px{|e!9@|wDu25RZRk9B52W-pb(=R3P)ogA z?pa$4lhr1Y+57A{{B|mxIzgtHj&Hmpsa7Q;3aKt{ue?7WQoUmh;u}T~lf64ADTucV z2f8DT1v!syP?!#fhUV=^C9T!OBTtR0NJ@BKL+k7HRnS_O87!HoyfI||<#{U!2W~L7 zj8$_EDGq*F*D`a9Qd6jeLF04u z0h1*lvmS&j-Qj6t@Qo{b^vltOyN))i&*plW>o}^I{9nC315CnKt=9EDM=FU}S6BVM zSlAq!Wa-LyMDr5Tcbqvh1z2{`K+QpBXQHf?O=sSbuux9Rs23+R%=AIq7J;AS#Iz&r zZS(o8J_8w<_G7~|7szjcbqnA0q$v!jAH&mBqW-%?OvaF zCY)wdt$GwV8chbsf9YsRS!YySCa zi-Xf_IQD}5`FGwOSpT;xJZ);zq4OnPWQj!YaKcDRth~^6Yuk_oV%$7F$?@d)J+A*B zY5DkNp7O&tiv#lx8#|m#xviOcDnT5TU(cweTHDlO?4zZG zO@xwCSd@O!@U0+l*ZUr0srtsvh3(;!C9-QQXFikV0f}K%t>!3yrCIhQ0^Vas;Q^4> zcQmcgV`!e5faI9zahB@3QLNHrk311iZk%;mkt`BjMgZE^6%oF5c_@3oML5FqG_g?td)x^yjhTNiLR>9g) zVU&wBXAnbg{xq&M0kt@wTii0&lSWHN_XxN)E+|fXe!X6}-SJSr3IKb=-P_}nw?l*< zJgELaa#uq(hL1K!tt=Gu@OBk<3!mtOv8vxRXpasA99xOi%d|y38M~7eW`l(i`g?3` z(zhKK-=%>G@s)mfj@37k-8nT|y-KnrJddUZg)HK724X+}M^}{s?7bC|^m90P$y$pt zANfWqBcUKMSgJPzJV+_xmMtV^+KH#!Lf)?Qsv zZH(VM5*ACo<_Q3>ck7G?>fGW+$}ch9?fM3C!-iVCNjLKYb0KL5AdddDR_E$kp&UP zZ@uoCt;SO?GYRuLOCY|FjYrE?7{5+d_~R<*afHbUxsFE<`mol~efo60Zpgt2q`p6^ z*__Ig`*^i0ZREjx2d3PCR$`;5>o5}4B?o|W3&SJrmz-+pgKXjZ*Qtf$#8)>~I)I#4 zPj^;FjY6%#Kn$PO z&~#>L2beVoR~bC6R(z|N8`;{};WvHsHVE5YYQo2GLbR%tEJ!xW+Vd|e1H(zZue*ni zsYqZ3mH^eFkB=hW%Y5b)7@JC@#Dr?Gk;o;%jTr@>lB&BdqDg~roz%|<(>%cQTVC` zdeZ!9uijv{cpvgJ`pk@Ly%xM9KKYL73_`Xax0RT|H)?8iss5xRpByNxOwdWps`UV$ zLXZXr2dBHL0|sfiR#`2bnI)WTnPLiJzmqoE63XxE^vjNu-_NTNT#c zeD^5ucHg{tBPlG5AKszbF@-8wxE#hUEsCiz)=~I#$9e|1R!0YMYYTF3$t)%XXjIVL z={)$V%*|A6<_dhq!9qrhfw9>kCPZ-Ig4II#(D-~5SOF%R0xR?A6O3~Xf%>U_b?PP^ zwlf_cOEH0oN~JwLWSPzR4!qKUj4WZh=wPLF6US<2O|Pf6w;{I*hLCj|$WmYC=EILJ zX)dz9TH(0*mdyGzPL;4Q1T$$VEJHx`&UuTRT# zt!Bd`{I1*k@B_82#^aa+`DH6qwUhPJWt0@{hV%c_H*YM?3Ejm&6+7z>se5SUac{H` zhM@!+wA`Gn1(La37e~2AubboGtb*?JbsSt=>uuPZvI*5V8~}@#-5~$v@PW1ourO#j zdTYi_9og|WYSU&+oWBbZ?^8)#Wd#oN#uDMVTje6(OV8=P2f2>)^O^YLcqrTt@KJHL zyJ76mOxGadfz5|ytJ{8CnkHtYX?oRVkP|mIUR--v5<(+htx;p-_RUGfzt!&eaQE|* zO_wO2KldnfmC4>)j%y5xzqe~dwcbzaoMP*}_lt)3G?Z2S&RhbhFjWPF$BPGJE)x^e zw}vxwB>-bz3SI7%DM{e=oDp1T()?nu!n#T|RLKWh*SI&G);E&FmS4Qoe3?9AZ({2* z85#4I&`JPUveZgVM{k2>HW%StT(O4|Q!PpWI%*s`W4rMnu3 z3-l;6B|PUmcb%}?_4CcevHb}OGbUliHYxd z?opJK13}@g_<28s`V?5SL8PIb>r0CpH27S8*9ti+Kj&i4odMjX-#ZoiL)c@mPIACP zsKsO%70dHO7@x>SmX-J9m3BkW8}8NHph;I6AQ_rPGr0V9 zRc8$`(A8gHu=Dr}N-frHwT~uYe|wu}u>84KwpPgq*4+|UW;M5*A$#!}6dsu@X1OJ` zGPugyq}3tc6VmGsG*+rlW=S#r)UP;XVahlTg*WxYGZ%`w&K^PO}~)71s2fn zjtd)#g}T>)xq)U7&bg)NBE9ch^P?TouG`jKn7qJ$F_1 zgwJ?_$XVZwRh>p4@AY;3%fiRJ@%X^rtnqrpySqz%t#PS)3~u`+Qkn2{Ab4YEH;_YJ zM@~1>9upj9U&5s`tiaJ!ZS~D+u-LFhqgGe-ExoX0=q=g}QjSvoj20{ZZ26+n;^od} zweTXK3o3-O*zh4JHq|yxv5*SDA15MsbC|Ct^zDshQMS{JgV1?ZjvE zvTjhjj(|ZcJGt0wSaxmN_Fh+8lx4XYf<(2%Dq>?7C+4wA!UH|S(bODA*-Pp%La7iD%!B^4#J~5D)a}t{AD`-hCq` z+7#^dW`K;GWLvA)qE<0OL9^0!*>JSsGN$Q3dPX#_wIp~7hL{DFBsp)aN_w-E)YXb# z(oWQpF@vQqU}fc8g~d-#PM(ZL*dho*r{{oXPY8$?5l$Qzb8Xc&woJ=bVlh}A%MvNLfl1pyS;`i8KkvKck9dYBxDB73V z7_KqJ2|w2-0bta0t*4jbp^7~G@}*0LsL}4T0HhPTB;c%ix`pIX44-3=YKE3qq?(O_ zxVZS+9diLcJx(jl)%{`fN6o7f?}A-8n=%q?$r{5@%jv_yLnX@}6E#U#_Mdm#1Js+g z+t%}VM=y75`VtP~F$FLIjUW7q;j?Gg`BrK&T>8*8mg`&f#(kgMt2eIP&>hvr+a+)q zF#6T*Xb2;@(~m!mY@dPy=yLoH5TY#qg7aCwO61cHWk$cXq7ka z?SEhIZx(Qpdo0vS;|I)q7#~xPY^&$`E;hR4L)9t$>)6bgIa=aejqkH4K8BD<>b3}$Ivrk zFaF{3H^=qq(DfO@-N$hgsOnKbrhLDfs;AMHH!ki+#($H~@L{9X(nv+JWzSWG!(|+* zSU-bSfn@b9Er6+}nC5*pwiv`6R{|qRu1gbRWjE|79q=n~-NFkneByQo9z15hF__78 zXxwEGEr`1j5mN2Ij&J+m^JgK~!>X23IR59j;jaa?B|}-LySl!8r{PeJx3}Axvet%> zMN9(s`Hxe7kJ}Jc(Z_}+4T4uWP~{stXD^ONmA{u?W83$ZvO17Vf$?@`?1TCnMli-V!pbi2AsR50E#njMy`*v`)cdcMZ&`L)uEMs7LK z29RMQ+tiCWJeuz&)n3K3n4h1jEX+5R0I)(2Jv?gTi?Uy4H%K@_K7PDLC^}YW9*0w6 zznOO`uTGnKQsQMSHDm%0A2zFO4 zgmF)9lByECG+V1i!8MA%6|0TkX8vZ1R5)kw`QFDO8|7_^#_*|l39GyELI=kXwhBR|7KcBA_g3haONHnX*J5Rmm)kPf+k)l<3; zhYdVqeU&ka`@0&pM0%PD4h?xd(!=~V)#{)W5v8kLvWJ}5YJ+RJ?3B_wWb7A<;ZXK#sr_tH$03T+p9(dxnM!#q;OQ6Uy4Tq* z=(EcrYpo4h`x(dReoAVqHB@VnzQ!fyf+eOpkLKJG97uQJkVdiUb7{{@XaSYKi16%!%42On2ERz{zZtJ%0z5GP^&}Q zG>TR@PwV;f=YZKK){}=C5wKev7&m`6NVkJ~CBTC~AnaG$omee`cHmeM%ec^=%eK|F ziQtb4T`?c$R%rovSZPYFxT5CesIb*k9d;U;0T-=8#SEG)JM)I3De}Y)OIYsgH70f0 ze5k5g=b95yoJweoB5(k<8@{Fd4K~usnHs7~CW8$wa`AeH2Qaq%v}XMmavLNi zQ%fwP-HWPa){S-%d{bxF_xeJ&vlMfU=o)7WkgD-pgfaIL2zeuvDvq%LLqIT#G{I$w z3G7{ADfBPV$isk#Q1>CwZ;}IwA*@)XW|eee_@nsE!^#^|jRv=Ic_LVaR!yE8S{blZ zSW;WkGM`6hCCZoy3A|H1_U$e2Y0Ji4dlF?nD(1wNljyD~B-xQOUY2ne%-PF0hG^U) zZh}PM)hb>@s6NyX(eb~xnT#{0CTWC$GJHbcx`|Lvwotr8*wjOrWZI?lMa*}*`_-FK z)xg2aB=E|`*IgS6<9=kcV)zt6{iZ|oswD!7r-k3ZgM`-fA(x7Bt_B_0$c2o-mP_C{cD2)vZUfjKnUph4U{5>| z_Ya<~RNdAt9YA`?Di<*4B2)CfFG!B_t%@Roh{0ySJXb z@Y7SZKusmdF2#Y5#h@dS3Rr7~Whxnq!=0UI_!T`}4hF@n=6*fw>YL0`&M^~&H!}b& zg&@tU(x_&M(kAF1?nVW{Q3eA0yaWOwkWk8zwmtf~?WJ z&ho3J``G$jgrpE3Zf6>t{mHOCPN(sJ^N%n7DIcp7m?jtYWlkS(8jn(ew5T99h#Rcs zEZziU9OlMdyZUvz1}bX>gye0&TsoO2tV0a zsjaQ`giz=;`sL`q7^e2eD|9vCZ@9et9EBY>yk$CS0SIbr6Y#mzQGbEe5ShMMf03B< z&91ZNQSY36qd3C()k`J2roXwdgnV;5cwQ^(J{HOpZt4Q6u{EqnOjC3H^qO7m*v(9SdH#*go^R$-cmRJxV z>Qi6e>n&2q8$$>^t0<->JuY;mp2Gs|5l}*J{p|QI?*hC#-|%B^hPjZic8-3!C^2qJ zNU7E1 zC81HlrPlHbU(xv*l1;ZCR0}ht{du9P>~4WamAGNs?mjbbx19_e<+{CjAyJV%wc9gE zS?7edK4;PKnB@Vcg*s|B!JC4c%E@^O=n_~hOv~W-zb($a7&|mt-%V#avCf)m66}|> z+~B73`4wx;8Ug#y`qEjW?JwRiqlN({5$!|gZx(l%D%>`v3=%zN>9GYS@^L`jIYFb+ zrt4KW=T{TP+*i3Vqf=LzHJ*)O$D1q8_SdA@TDS7#YE(py^dyCk8gabJ48C1VL*U8B zpPTEt_sRK?Jz=Ub&PDAeHE!yhBVxQvh0b*=2V5VWNUU?Ss`dAoF{HO?d9iU>>@?ox zW}n=0A=i;>%~9NO_XH+i(@<1(hlkxA&zFBvk)=dhH8VNu`lF3AgYb^)&JI1{fSZ$e zx5N#WPfq!PNtPMGZ*ABDYvq3q&S=Y8<&fQT5C*EhI!o7wW=OK_NZH*Y8|vhM1L zIWya{5RuVXiUsT)U)}~R)uQ1MnDbWC7GYzAMGba0^EYVg91do2>mJA)o>gM^B8a5o z2CB@}8lqg?62=W2)Nc5KyjOHwt8=_v_nN?1iLuI5u>TmlcRzR3Y)P2Q2iv)!FV*jE zI(C)0x}chOr&ZI;tvf@vN_ z)3xKJr5_rAr3>K=Rja*t1;8|}KZ9Y9iOj3h6qO8>seJGe*IT@>IsA)Y(DN^XcJ|!< zrSOaU1LkhXm)dqm-BgbcM4duc4(=QTeczw*+^7dd-BJP`lB3nlfi3`0@NVf=skyXP z4T&c;$lyf-#vArW&iE9j$U0<;P;mA~Q>I)d9XEEAF@KgX{}Qp(tdfd|+w>qY!tAB$ zAm`4lPQy#_dGbYmHnmm8?5TH_duL?@V1a#+zUFwfj9d=H&}@) zTbtUx)P%W`uVyrelFjPt9jN4^Dxs>P*#ANxPM7I6ecl%>_pxph^kH=1>ADLAB*e{hzX;r8+=H1pS zao%}rFZ6|yk;(&jGgDumi*1Po*&)4bY(O7Vc+_D(7qV&#V_~{(@19Jl$rz=5i4hZ^ zH5H7%#=%)Ts4{>5##0lgQ@hSpraexGb*3XGc(lsCd0J*crsg!c3r(~7`Fp&qIbtyz z8aChNd`aOx*!>&Km+~&`hf9gnM;+wj-C58*7jq@B8dP~@YxjN;u1~3GJ5x(s>aM?vyTQYC;I9h@9gIJsWFY&ydb~Rv-%$F#9#TZwfEZ zmzQD98uw?K=F*Ej9Jfjgc1Jq(Ehzhd$IPuW6zVF)EF2g6S0-q_oI@E_x-fi3@L+|E z{k~KjPM8GIm&jxWoOUvA^+<@Ws!x_m8RIh0__J(J(Y zC;xz68t9F3scny_K;Rikxla7!h)wAOI3yi6$(jwJs;}0$a>97G^@&6U4mwLECON4` z*s~?R>SbhW#TkpoFUs=53&5t$z~Zb0&(XK7h*MUbnI$p=^t-hMR-uT1 z9PD~rF(Sam9S3z{pgkaw`2I?$H(54*wyjYB!fR{nvgRcvmHYif`7uN)4_2bM%mS|0 z$+9yA7^k->Xbh@+N$|PaE?3Y}o@9}pVkUR`x9|@ky&pfn>B3CS?if8tK<*scO}W_c z1D{AKij-2%qE_F+!A{kvdw89Pg64kNYE!6TW>OB5o1pDlQu;l#v&1ah!-`A@vW)_C zWhsr3oGTZaPa~m1BPh~_2*Q@bfl>>+AY3SB^%IM8k>bu~eZOXFi&eCO=0!*QQ}<0=Vf~b%G3K=BfFP3{4?0 z{nUVB4FC6E4QCMZVAhx?8{W*-g;>7LqDP5|GXkjDgCjk)8>*feip`FhoGZ2 zAb1&#y-6_tCawj!L;z8hAQA7)D2W?R{<2!oCC#BtHsxP!?b=?~ zm#*rL%K(vhJtD)j2y6C<^YB|U1JyHb_4Q)3bg3VCna%UgigATClkxqOrjO;l#n&uL zKg@f+6ApX<`0ONkGulc8<-iuBB4SSlw|SU%r}15(GhEgXY=(QY;PozxEdsJ-&NFU@ zci~Auc0S6I$G-ixYL$LU^5!(2Kg?fKr@c~ zJT&9Bd=eG%ErtL!+jZBgH*01)e26?YNHIrYq>>54Y3mBGkz`>C+c&gNtK1*~ErGVt ztbm-q%90i$&wiT2b@!Tz`UL>~+oLh{xPM`YIe1q%Z$L<9M;`3CZoNKF6b9BV(1O3m z2sMsTpSUKCRPS@9KD8`*QJR#Pb**lzJ&I14Ydd3x#?1p$cDGkmSer)yEJZiI@;t%woEMctU=qJDhR;_ep+kG z>vB1aQ!OfhvrD4NZAmL!2lCvA)m9F6@)FR+;LgJI_7;-_MQM#{2l)eyV#;so*thR@ z3hpn37wVCnoo)PplJuhx3cGnO)BGy%d&lB+;jxszsA(&7EA#D@bN(SsP2!C5g(1>A ztIm-9=K!|4t-4CPl**5TgDOjXb>@VG1bs~Da)5rA#=FS(^WpfTm6pyB^E3G~i!yHI zms#4rgCGS@{x;t-)tWdV;nmgXM;=&S&cmdG;M)L}Nq+h%<|^;gKzQsAo$!Voyo=aQ z4h{kBN4>GmJ9E3M`Yr?{*-Xo42RNwjPq#;yJ}6f!_I-VxIPC4)bMM|VFg#33dN{t4 z;PHI;ai!V9(s|_A2WKhwTHB~ih4E=Vxup#A{Z41i|HIy!hf}$J@xv$yNri;aND_)N zPbJBmc_wp+Z63B^Q)y5LnKEXcWgfS!9GPdf*(S4Xo^3Yo{U~+L_jg_I_5S((@jHK< zYoF)r?RlR2UiTV4Ypu^(JMSXhWyaB57w{wQP-V*fRhhX|1Ikg=UtkCAB{zl+swI?v zF1@3%uoLN9%aaqH>#vAYO?%RC`HQJg8_{}fpm~!+fka(JvU5!N=|Miz)h)Bm4+|!z zH5To(zkt)sH{QNlOaK6&9aKvw@IUg1knFUagWIp5Qkj_(^nV~J6Gq4RGjZEs)uNj!Kk z;Tx{+(3e_nS@L@Pfy7#UK^tmb86!mg%<8(rvGuXDfRrI1CL8damwe&_qccblhUN z_+@u4rhS%w(@aP=_ne-E7dAy-x-Tv7=w#b5NaHtT?!jo#6Z{iMGQ7_qfeBbCq*+ zC1;1X^xs&cN$&~HnTNr4i=j{{uJQG8ppk)qbYMcLD>vBgy^p9zo# zj+gzR!ofXlP!NIThF<_Z42uuxfAv|4^HSI6fC`2=5yV0mGmHF3Z*Kw+I_N|m?CSCm z0RittAlAPL*@INQn|=Ek5FvdPlHr97+(7NGhID-mUubP58Ys^OPR{ik7oiR) z>4v5mC4&Y)RCFzoXgyP%GaQ46x(ZOh;8qvRIJ#%T&sY5!mw4{w5lRd~%E&mk$yia? zR9(A7!^qgU#c!TIp+_w%hQ{1_^{#lXXM$%UjIF(drc@`Ic*bR6z);<-Erc~tcy0c@ zGb=STFBYApe>u77Q>fRL=u_+Oev`7M-}c-iU$kqiW zL%$r@ze7ju+g{iG90|gl9BFgcu!i&Hx@XOF51#L*ys5a)yo=7Dkm!k|K7DP4E7Z>foEB&5F}B{Jl?c$^3> zx&V?zZWU1DVm9aS{K#cgy(C03(<7(B-xI3fl&no*Y$44AJXu@ql;;9qI`lrO1j&(; zIc#}m4WjGI5s#`2j3zF0f(R~Ny>i_dMIJP?MDf)+OByxZIagzTwMD>G)qCXJ88GXp zM0k0$#`)zusNV|qC7+Uw%r^z)Z9-X@Ar(enrp?5}xmNN2*a3zYCaRN%q14ym$)@m$ zCN9x?J}MNdmR~!y?9eR0>ntZ)Az4CN4|HzC#wr(CZcxj2w0Xsfs`*jB3)3!|i=Y|i zxPIsHe!$N3xS}XWP!Ms*JWMOSPwA2dmT6_ z5qO(3mtEH$EPT>Z>w1MEYB%|L|0-Ln6~4TP+a1j{g#~_TYb$o3$ZEAgiaws!ZDq0- zG&#;%{={v>rOJn9>+qG)Kz?T>MZ1vDboY-+j-152xt4on03qowa!6H8S8rLopa-DR zNJrIjN4i-T41sxg2px~0Gr-2f8ev0kLqWxsfz7ZV53C$C4XpgUo^;^-M&%+V|7K-k z{6-{Cs{9$<_a%_{AOVuS(9Wkk6U7!x+w_W-qSeZKMERn>7>N730;tb6?Ksc4mUQQ+ z@YQn8ZZvlv(*nY(9`T0Rjl1?AQjsk%_^uJmJxQ|cp$7}egx+)mDkp$ zm0h3`stXEZnx^Deo7%)8NsrSnXKCi;J8}EuB#D`$<*1HRHbrT~x~|d{SXNont@O|) zjOX94Vd>0pin-f(kA724jGRoTb$Go}8X7zSJln zM88%%ez_=bZuXu9>HJh{jLr$sy;3^Lu!J!a&n6!QdP~fgg6^_J{naMcS`?BYyd-mX zayoqq*HHp{R$pwKngf_oVyNp1Dh(elNp5W*ZZ{;+A)OsNTaw??ws?)4nBgFNrg$jr zR1X}7HWwXoso8U2W+!ilJYUx}2bc#d;ne4RioR6d{YqqA68a0M4}oUhy|3~VXKlI` z*BlJxeSp%-t-A&4gd7*p5_i|LC6_kdALu@1shutLv>i2O8=)Vc-57oD4G8tn>mdrF z8OlW&k>aYCgdad-r$FZ5yl2g8FIYT=$2Fe__$~eRlpR)&See%69O5GD6C_dQeWiv- zU<8}wgt0x-`fM6GFE$+Y%Iao+Yyy{RNXH2Sip&~e4(5^Agh3Hxl7}zZRBSF4&F1`$ zY>LAEH(|bwi}S=YPCpv}%#ozML2rYisnbHN2@u7C3ezps0Q^xlnDF>KxMKbkiq}q> zn0b3GKix9Oo0kcsVA#xaD##u@U6xNi$PW%tTl*mzUH=%I9lG?Eyx)J}QZ}GJ^44bz zyeROTXqCRPGZ@-=A;)EJxjmcce`<%h2?Z$l^#QRIsE@YT1PxRu;qB2xmgRX%yYua6 zxH5S==qrB!K+3D-L?AzW58K!bXQF3$|Mu-iFkCx-c(Kdlz;^D0mFO|d;2aZ zlBg#WT@tZ;#wtxi0tF@upF^imPA`0OIz9LI+yvZymXb=Vr)$dt=jjsjv=|yBIV?a ze8ho1PscZa-SxX!50`6s;Q_cC(Mw?cF8KQOMKp(i{lr^RCCiWS8l002$&APXbZkJa zO>1jEaDG#JlN72t?BW-C6ME%MOfCy}Y{$PR#B+DE zaSP*VwV%!|OX;>)dyVSetw9US2j(sRNZT)xb+%(^HwwUrxrm!*m^g`7X>U_m0VCwj zv&cb+19{T=;M(`iNym{eOP!6f#NNcPDUQY9losu4V@}}Ilz~$dNV4I^Z)0LA|qF<-> zzGSELPnO-kOZ6d`Hm13!K*K<*@R`r7iXssH4^Yj78rFHLEYWZu%!7<9`RAFt;}yU2 z_)0LS)5iZ!(+{AEV@Gs2Hs(ykJssYikNHTWk`-}L2(Ai7d*AdWmy+(Wyf+a@j7mP) zjxCVU_*x>@2jAS$$f9D_o0ToTe+da-jnwd48-F^C`c*Go1?v4}Xw`XTL|KNIBbMuy zlPMiu($V=Q|FunCKt7?r^??L)fijXVzU}R_Hy6-Yezv#zLRpc|br2>x2LV^FAEcYFvCW6`vHYm3^8Hf5~c^B7|g zGuUpl_5~Caqg#}0xsP(*uUBxMY-b(d)GieV1&b#8gZke3s4C!x?TiOvCb#qc!I(em zxNTo%PcxZ=ww4p`iurjfKHDkze0*=46EU1Z*~(kUC4TvtKJ z!aKX9wy$~HfY(|V=0$77R_+H6&A8X>H(Sgef}PIMoN;db{!h8g5-9e}9)k@_1*+w% zvdy;Vy(e|3&<2(g9Fk&VFpNFlep1=$u@)-DN0s6=wsGX_*`U)=750<%T2n|=H2-C&f{fd=NgCsYSGg!r(^Y;Y@Abq6%vgJU z$;hSK`Vz;IC5K9?Ts}j5?*c%*SwT2`ODb^ADY!KE^(+=`bqPTC*9|;2gGj|rU)WCj zr|-B8hrg|}KAf>NdZmDrUdL~9MK62cv-C*_vIOlEjUXfCQk0oJST@MqO4O{K2Z|qU zLIwnc7}j!h#bP1Y5zuX#jD5$L4x+PJ(w?)?S7mD#y`EL1V6RZAqzi{%WDGR$*bmYY z(Pw=z)7Ty07h4h#$~ah}^|G)$J7D)T<5Cy^OhUj++c}T;&quWYGQSfV**%)-pL$8@ z%D*F%`ElikA@*|jl!h`W_RIrTUbxnxoO^$Wu`6dYSwXnn zwqSCB0~C3Egl{k$)m>f}hcZj`=_;ucy#`(~_6ObWx=rfV%{n5=477`)iMmc5SUpx! zQ1emup)5htI7ued((L<>Z98M()9u(55k{!8GDefvbAVa^{*X27TELwa5kLl{O3!Qg zv*SWn3@|C!>DT}XuP`?c zuG5f{BXt`~M#%1vl_1iy5N{)d+{-`Ur(_GRNUsK?#7ox~dBhhNJDbY$d(a#VlRe>a zx1&UN_prcut5anT!65jwS-kifpYwSAlmo?ZD{7e*TEe95Tsq}Xs&K0k0xpY=!6`GD z4|IwauYr1p{wyQY+~@uI`ul4?$jyHGSa*os9_i0Vg7uM6h!ZswwH!(C2Qbos7GGBb zJn@mol>&450v#7u*ZXA$K2jjO=CqK0sCO6`THF{_@UD{a*9~T!zbOV=bts|p+4Bf; zz5Ej_2}k51BTdpC%XMVV`z-o+#TwX}vTE`-c?#0$Q>Q+Gs_=#wp}QiK)5^o;FS1S~ zgVqcOi`@@>j|Y5Zlaa)ai@hNsU6>jR zouZb35de@EM@r9HjXaG6p{0g`bGZlnFC zSmWSywy_Sa%UVj(bOgFYUUOvm3svKM=(?BL9CCENpXbo~S9R$Qn6O@3hc6d$Jpz2t zU|3~1r(T6@(cC?e>>qnW^R2GMe56tsd;7TwQozhz;-=%TASko+69h!dd!Xcf67ap5 zB}8PBGtRk{<6eJQ7e&lTiGw0wp3{GYw^Q{J5MR(^$BRn(slN(0DQ-}pE&}A}!$sk*y^D#N=DGXa zPe(6d&0(kwFdq^3W&=Zip;zp~%$!Q`L4XAy5m6LDY%6L^6GpqkS)O@s{XHXNW&w08ZYvb)zgHv0L83^-b#8HI-MM(u=Q%VIGm4Tx;Sa&gV=JF^pvmOS znljlIibN3md-`oSMXQkYG|D#*)u4^3x&UnqsK=^W8_W@h^=ItFSSOpe?VnT0dUa9E z`QZ;wAzfex*%MJaVC-ZClBfp|jZTDy$9)XG-dLZgC=AnoM|a`EHGqzMGDtbNq+N2} zb7Rxo-~-==h1>d~2LU^6xn+ZZ3Ft0)P6_L>< zBzjt&`!Jp)&pLNyRmbgiLQq*9`*WjCuxuUyJAKd4j-$M@GKW8Ra-rlkpcp1k7Wa8f zCuaA0XJFu7(2BP?g(iUw{I9hcdJd2KW~SQRgZ^DZnZ(6Zk=?66kn8SW^o^Gwg2O>TP*$-}NKLXF!_71m?D+AoIjo?oe z@|njf&xu1d*BpHA z87*?0ACic@xSp`HBumRxkMtajI#}$2#SdW)@~+A6xZ$qR-MeBZxqmj1MYdzcNLsX4 z4Y!0HyuYrN3kL3h%IBB%F1;fbXoJ}ovdM>Hg& zH);G5`vQA~BL~c|^)UX!WaP?uhSo(E6iRbK5iJ9sk%}rc0F*&SDD1nEq_EiAH}2=+ zx6T9%pLt9I!unZnnb1GLuk-oGOCXWen8{PK5IR)m5Y?My`af>Z;qun=iW_7&y6t%M z@s}{`l*1cwPVAGp!$K1i4dYXYyu9AYp|S!%7I0Z5Zh>qSq=P~A z2KpY0XF)k+=JDebXFT<0Qw=_ZaGN$Z)f{uLZ=jMgt**Y`S)hR^8Juc7opWMlx&<#O zda!P-V!CoJd9o>&WgvTm_oTrWBek97aV7LZEPT}qRHp@g0tP{J+w~LaPn|~8{!W(N zhqt$GU;++$Y=#AQp8kBLlrW|3DEWE7iT?3>AsyMoA1+Yhhds5K_izCs4;>u)qBEov z6-FgRpi{m61vuTbCl6<`KHjMxEgvm^oQZuw;K|~3tl`U18%@(mU6hv|2o*q$WAs_Q zyR7+7??bI1-)m}GFjCx!GdkABu%VB@b0MW@389l8jW~H}xddb=UlSdb%zBG|CQEYh z)@K0!05TQ*(CuwktHFXU=G0rP{t^ktA*REBfPJ`gXt-UM9h~E=YF>LjuUcG~)~-Wa zzq{vT}i8ZdeC~Ji{LAGNgj`t4m z%ZY7|cQ=QVU`xDa*KrXM=OEKLdr&>znW%x7?a221r?$31>|i73?z6t39<#Ed7$E=e zF#f?yCKRhmj1bwRq`l}N8Y-ds6@e@-Y z%N)Fdc2KN2+L)B#;nn7ky%%F^R!zxGx4%*XIq8xv?l5@HXEid=mwx>aoUP^~2C7~o zlkR4a@6R2#khp@ZmyvN?=hz^9f4c3^NGp4_g)l*?2D3L3tp2;W@fX9yf3g>cZjr|W zD3%YSLONl?w9Z0fU;(qa$Im^U#IyX@G7eq(I@kK4Lzn(TDP^h}=o0JYLg5U;qxL{m zdgC?>#XX3XnJcpF3GXy*k_F?Tn`LI)_XpU@Ei1x;uQxT)*05?~t4d(*pr7$Yq2;jB z=aOevVU_#Qg{1{CpKfRkbZg})--BHLCS%I2ySpCuR;Av(V_UQz9O`fHAwVwdptE~5 zN4DGEI8zn?6Y7RV<$i7>lag$jfu(4dbn&UoSnj%SE#_Ciisqaq{1J#tW%i)^l-g1G z+vgrx(~%(q)8PS{sYqis$b2IXwywSlP+;bAdES3WoPH_U8|0&BjPCDw7}nHp^+fJ^ z7Uk-70Y&n1A;RRB1zqa2aD%L8QR=0IziJm^!1zCi=1lU%K_wB-j;eq#B*Naud_cCh zH?z0iCKFVk+8m0dl-cnaK5K3E1J?~Uhtr^1BdBC#U)S#ut*m{HRqw5E!~xcAtq9ml z#3@g0;MiO>ZBh#!aGGoS#9Y^)(pGsBbYz6n#=7@aW~`Ud>W2r4ZUbpL(eTLv5%JLr zPOWil9iVx^0L+el`sC}=PaZrr<-=2QpZOnEMcwZ7P*1j$ciY1MBYi>}f>^ka+Yw;g z!aymQ2J&65id{dUuscW3;KV79PB~HAR6tIlb^c(dVvm1TN5E;Ww7*DcTxhZgQNG1X z=n~NI%W;e)ccqWt+Iu^H#0zuMl?$jtcL~csHM@u6rVMzF+P42*d#nQVtVN@=DYR4u zWDw#yUn2Gx3HMhiRm%=86)Y`@vto=q8-HNdObbejmzo8H_hxuH7h=8GTJ8F1ml~qA zx|(}IzlK!co?MFJu0CkpPc})n2L(L5bayQCAJV)FbKfnbqVUzo(o}O^9Qtg8DP(#D zR3~7roi`z6mn%@ViW>|T@pPBD|} zmx^F^4P}Jd_BhqK<_vtR3}Cv@va8bD&2i>Kx~_nVZi9u@3DS@yM09vI=IwZMNE=Xu zKFtsrkJjo%+N1t;s0GQNY4;DFcBo=oL8v0cXLQ6K0hWjl;BFN@wNR0d_<1PPSZbV# zzpM1)18(SfdzPD(+fL6vH0T%_JuQ8Wh+gjPSq)HaU?c4;;4$MZEK-UBeD+-)B5vOM zfy4!qiIn?o<%%XIhIB&v=4(p_NC8#APw)(%HL15SxkHPwMPCaPbGav;OX7-WmM{U4 z$CRdNqxvUGQF=CDg3-nY!k)n&=vUHrN^+ueFYl&Zk!(SYkB=w21gf87aM!c!E|p+% zU6y9ks0wak$70F0L6vYZqy{be(j@)XkZx-!r%F);pl(>n$VjGM<0@jg?-k>rq3Bcu zE+wvCndi68Pl|)MwF;8WekV?UiOc^P%Yk1V5utFzB4TG+1JN_aH|v^?oqli5UU4bnTk*1rTsfOwuxkQq=`U=7#1zkp!LA z_sM8Ejnu;o3YYnKEG_~54uBpY05UwtJ+tCI31*s0SDn9j@d{{-2h%sc%Zns8h4HcD z26JQ(<3A|e_B!;zR1E0?82*Gj=ok-EPIzcRO5Vf{xa5P`LRbMcKDK$f996FLOKpQC zLoRKQavbp-O@jr9VaOx?V>Tg$Kk5%mDQvy(Nb&$7l`pt*;@7u;Q*K<{MSxFNVKu4m zIyTW+OMaR?R~;E}uRTUsoEdb&Bg;#u_3r64bxFLI8FbcbMlcK5KaO=REGZf(8QJ-s z)fcNiiFY};j^M<#B+o6*dgThQ*8`&k8h_qa?tLG(-`}Hq;{-}lC1?A}r2N`k7Kd24 zM7O?n8H3^WHy z;&{?#x*KsGdnr3vpe%NECwx5Q`PMwoG1E2!q2}I4A3oobOnk2;#>^j|=~T1<&bYa{ zmSW~l&^A42w_Gi{<2)ap5AWfYe*CtCmm@_}dUkl45LB7eQMZ7QHj8wGS;)CB>ykmX zHiv-MUK;R=hO^L!vF~mIj<&f_v%q|2=GXxyD(+gk`S|M|P%hJ!Z#j7mG!XGNm#wiME~6A%(0sJt**2G=7rto0ZAu6qM?{+j`wlLo!@D&8Xe@qs&^A*~D5!j?+>93Pj|c-edI%$Z9* zK0Y?RPPlbx@B(i^bB@yYt7Ovs`82v2)97rS;Ex}l280VQFU;`h;qj?#R6{o7pkwUH z*jSLw()4o@J)eV_ysP&30h)I=&s|kglJrkRu5Ri`nD%%2c4o-4x^0#r28JKKT<1bA zM*xIHV|AMA5>-{4wTqBm&_3V@0MQWJn%gV@{(lnyVj#BEUC)Q)NA6}a5bj6+Jpv!A zpjYh;ANc(p0-xs)z1^Ub0P6>EM3T;cm-l?+_y(jXK0GtfW-{I__Fr9nuUM)#xtz-? zo%i$l@CX-&%M3tpAVUb>^b$WIyq#2&Vc_oc_4!Dlf*&mxhk=*K9gZmRUtpjp5Y-V9 zK_8ai;E4Aic!FUT<3HqwKu|&UfCGr8ja}ZqNa}gOmDX3=7s3Co#}Uyn#v35vk(%le ziZDIBZ(i-A(_Oo_MO6+K^#&)i531&z-ABA!BB=d2r#8JT%4!_iPJKe{4_v-{xhx5W z@&U8tWC8U8;AX#=z`htK=%#Dd8gUcU&YPSb+=#7OtxuHb%qQ*kJya9Nz!Q4LQ$0I! zx=W$cWzFsU-_tb&?AX$IT86(&Z~PEzq1@X;E0@Lfu>4$ndP$^fc93}X+{KH*yd?o0f}?y4!E<$9X;hkfv{yl~V9Y&d0m-%;HHSXLnG@JENOdg8#zQj)5Ut^Prg zY1%kJnr~M_4-Thdp0uW>{;ad2*lcHZASqhq@DBO0;EZJTrbeIrbw>U}0QF}X{ z{1Qa5-}WM=kqvUQZNPuHP?>?g}N1RwJYT3gVV0uIY*=Rkm%UU zPW<-8Lm{}d228wptccXnxVJXo1i0JO#UARsqknlr9f*2LM*o@!5DvaqAy@kvat*?O zYY=Bz5k~b(2~;mZRmghb9Y-`uXU-C>pMnxrhqwH7Ur+G+)LgEjKU>+a0E}rd)?VQP zkiN%&qA>RUQGsX`=#L)JfWL4)l4l`Eo;!?m{_`iRA)=zisJuU2lHTGs!T0Ex4&MTN zIQIK>M*p{;vJ|jujxcf*|Dh^8e9|H4Y**2_|Gp`7RKat^jvYP~_;BY?0KmWg@8kX7 z)%$;yy~X1*V)SP(kBvqA3592`-gbb$klR^|E?RvpUz4POKT|=Hsm`OC@SZqXQ6lZn zm=IeD;ASysKL5iscqoppkcK~Y*ZFTGXbZ@&V>=FI69DM@NJHWz@oo9`qci8@cs7Ec z_sy+nmj-QMXdDewe&M#@s2PJ}V{uBx>FBUS`q*2X8(cj_`saZ8+=b3~O0+68DsDU3 zTL?skC|Wie_JYn#1LwQ`ztl%Hm%+`8CUw; zF|b}W4UO9T%_?WTip9Bsug8dZk0(hlA-7_mbh@vKOq=nT)L%1s$`Tp5x3o52z`Zr3 z;x#=)N;TsCdIj3(R^VN^jC^4K{VLTWkm4Jc$^N~Dqxz7_`xgc9p>74EaIOAo+UjKR z8@=E0tp-@FtwxK)u3vv(I|etmfJRPbu%@_&_U zDa4y9E07LNzFL+6@1-wb?MM$fDg=aa)Ib%7bT!8RT8CvfpRQx;PIKsuojd>gs8^sz zP2#owb2Bw@uun7*Ca}nKQc__z;FpAN^1QPd+7GnZs1{B zqwM~Ab>ehjNwtpbiq;>y0ukYgn6mtt5X2AJ6*`W_{|U4IyQg5M8lhsY)x(DXALx&s zMiKHr*m&-XL2pgVsiS{X3jjS79rRGol>QFhgLd2&n&q=*rh8;KPhNWR8tib`FOx_J zNB=&OCtU&68C)@v6nFH^RU&3g^JPz>KV*`M|B;RY*IAbhwD;KPYu z`vH13z-9MFxpvCm-?RlO8YGYQr~g{iXvm^g(rNtN^iPnTpFFat_kP`$fG8F+_$p>E zyncVvn+JN3j)YrB;ltq-bY(Rc`%y2%`0@qwC8MF>)+7)%vmD zSE8Yn{czs&$Rq!Cc^Zh05bOud-!(7=t77hU_z>U&#nIEGLItj$BIB8Ve{=K$co6f# z8-Gy#!!Pnf@RJ))&e3)rKXU`jRh9>B1n*99X z(}c<7gb91;qqh$pJZN;1m!xTIYEtQJpi*~GDoU7~p4L#3*U%OMSOt{(>?yYvs{Qdp z?U@)E8CjuyEq8Kia+*Up6Bz}0X8Wea;e5O54X~|xFd6@&b@T%Jw8)o2{72rv#-t5^ zFJ*4-@%$lJubzZ{rXb@Be&*dvstORh)_J;t0{8F?joh5PaZZ&M`&TmvyxZr~%z|mf z3qol+TO&-9F`pcjRjLjK=DK=%hm)$S=a_oLG+bDO*kuhs1N-XQ8qL|W>2~3|V{PDQ^V3q%ilbP#|d`LB6!mGK@Kbz16|vK}br>Ix!2PIDB10Ykp8 zUX@lF0}_Gu&uUFH%$Ua#Zvq`PQT_Ol<1;T`ywn>Jr8!h`s^CcC{Pmu zBW$jY?;|NT4r@5a>)Fy=|HQ=P6CeP}Tz-4<`t_LZPfMuc3>2acDw$qW^pn>tQvg#Bi zB+@Xd43>{{*gZWEO-@cuw;6`}z5Vd1-*mrvA1NcppYX^(iY>>h(n(9H3{W-o_yy#t zIqhxVxz2L!#w}spygWN@p;~4@T=ec8DY&(&rsgY9kChc4A1qYIEa)oE#>S>yU>Vty zBLLrGe1=E((|@xB)Fs(5);cAclm;NN^!T1-w=Ms9o)N$&a6>>q%gxP=jDkW9zx^70 z5bjLfGgvCE(!|ToE(Pwe9tgS;cupRIgWmn`TdOjw$&3YG_EeC3fe%T=njHqHIa5`lUTN zLO6hPc6a5HQ&K*qCHA|u^khrHh)Kdvy@>j_-AL0^=(Uh>t~b)#1x5IB;l*Svd1#Lr zU=ippIe1EZ;>1AD?qIIi92n*}*OQ9cxLxy|*ui?bMRU+)#spuMraR3!-19|+RwvTQ zO5JMG?>aZ(IujD7?O=pc+B<*%F=3KGqw$xlAev^hr+k$M!&`+!0Yrpd^Z<{vuT>bKi%jB zI(B2Ge=7@pj`q;s;Rl{rTjP=3!yvE8zu-~azs+2A6lAKqo_A;*DcNdr=*qWi9nhhz zZXt?27r!KCn^p`POc0xD%X5CmECA1{5qH!ggcnq7e~3%f5^9SQoa;`{EX7e|F%HUK z7&<=s^5x5WxbG{;iwu0l38H>*ZDGZxw5>9LiG zj58`K%Io8W4dC)u=h|EYG5w&RBFBmP%T_K9U8uKjAM5ciDywCxx8$%pAP(d!TozP| zED8PmvtbJaaRwh@3uG`NJV*@C3+;3JzUnTl$obkLt$rZaKB7xYM`NuQZoB?g4P7WO z_rgw{QBq3j_K1g2^@xZN2OrgwTsry;_{JQ2)qRP~nMk7blxT~=SJR2cM%2uh`T@0( zSDpSZBU8Uj2j*8X#?Ray+nGoT(MDR9(FE{Gh3EKEs&=1DPhb07@->u$T^u&m_-du4 zhQ!PTcY-2H(|D-DB|KOmws7vY0~#rnd#+1g6}B4?17wT7C!032+{rlGes^mr&-0jg zUF&dnx`PHECRr7-6sk}oteSZa-EWYo3Jl#G2{#ML`|Wug>Mw6r$cg_)>x<%%QuU?< zF6GwGhD0p5;{E=(Fq=5+wJNnTB{_M?i;2{w5Bl2PBd?O=>xW+DN?4-1Bgnbb0fnJF z8yA;`gIP=A_qMUIv-F-{NI#H->)1`MCTo{ChtfRfGh72KFoQ)Y3vLc(+JsvP@t$7Ylwi;Sj*Jt}D zR&>*1u48o9>h{V8@;u-7xbXB|JY3lv$V;S`viu##`1k=UWso=0S{xP~ot3S_%YOHk z9GL!BWPQmNjPs7+cl!?a|7%!*ViN{l0MfjT68T55RRWL;*(*=S2!*S1Ez!F-Q<_@cc zQOqXT-IPTf=-vM3~-M!|GG^7>mGrfxwJS~Ayr_$Z{&8Gj`sltJ#TS;*!g&aLfhD4 zn~|ha+zQ%eI++nrA$Mlb$o}>zm2w1xy!Il1`luK)@EmSQ)-N#YAAc z%nT@ny6()wrLSf9$RL%TtnDb|@wmYVsz36}~z>%c_ z=rNld-et=v-L5jtECwdYAiXyXuy#-O@ZG4t?}||^H{Mm}h}f!_>&{M9GUf!+hr#6L z+1Xj#W@cMf`6Gkd3Psp%r@n(1dmf%EVoN?R-=qVcm128ILR>5om#!97d^$I7eIZ;% zS{m(rzot8BHr*=bMam&*wKo_H0zZHJcR=Qv=$%V4vo$}G{f4PjLpJ=D9lwa-m?3V> z+GQps3RbkRsNcW#jF2P9jd*5#fXRQLAJR3xFXqP?>XkJ_a*4Mxi(BLd|4`C1mt+bTw zQSqCp2l#e467Bq~9sHZ6?BQL-gGVnC2#`C)-sKxcjQacI-O@60a!=Yu@ZA|j;~$YK zGt`GL*BP+#gnw6_i)4nDOFM0F-{ZYpWndNzojE_g)nBll8byv^ zK1+W-<6yUG!LlC&Ud!UzCqojVY=rAw@);DHV_*Z>L>2F#^1%nBGdsbO*yZ9@7@P z{?Oi=*OI2_rmY)(5)V|`y`<4z8bmxzp9CCIIm|2ws~9dOfaNK6l^A$>{c!f9o#{@l z02|#;4<0<9?(FJZU+DF<*0B3NK^!@oBFf1oi%igt!(5RlA-Gb29k)HRFZPh_~g zS5H1MlPZVPmS$DV&{*2H>@Sy3cTf_ZuyKCGCzKR7s|os&(b-z*AZmEl{&6h%ycm4q zYqP@k)D5Qiw)*;ZJ_9dHU~duPH5n_aee^-Y)&bF*M((`!hP!PyL4-Imx*P=$0kdTi z9cs@g2DLh8<+LTTrG3CiM`!9uu5V_RvO(cJb&`xsT~l+GfpUJZLZi&B%yOvo9ZfNx zC$}fhn|^CdKc-A|v@?nAU)$&bfnaTN*N+`puj9fHm~Y;E3<%%(%D*inhjx-{{82i0I8SIMXHs6oI$ ze|J25Hbd#^!*Bzq7(IlgVR${7pnbRt5hS2sxHLy-QUt57QD$GMpN$qaV z7d2Mx4WRDY?A1C0vac+(ESGR9Xsa4Pj-<=d4m&L2=ekNJmh8)2X49w+$TMJl`4SOW0(?ejQ<3uq8Hn3E zd};NR=X7uBRX&O0%_(rUWEPl8uy2X%51QYFv1lJ$up;U=Gv}77OYmJQbXsG_CIJp$Odd1dGAN%qPoSH zBAybSAR)16d7X{sTu){M;~L_#b&54p0i{;9K61Cu&t{-dNkh*oPgzBoQJ;xgRgW35 znG9dQaVnXXvG&+HtZJ=lv3#6&Fkrp5Y1nlw8$OZ8niUSD6}1$LvS>__KE1mO2u0`O z+C;5mu8JGn*}9PZSC2*`tDCrxN#O=wv-9jXoF_f0SZ1nr$;4Py*YA5*CDd9_9%T)bLSni4~!j8>4jCZy+1-Xcizm zpG7y^u_8$`{30M!*}J{NfOXu8_3Tcu-aRK~evT?&E|nSa+-qA}X zmRB`W0fuhS-!*YDXBp*HuOS>w-Chg!A z_oc7T)oG@?|9m&5)G)?*p)Un)w=y@7E%goyLeK6`(gr$&2Ti%$LOHZ2_DYUI`9bD8 z@GKCy>--#`X$pIWFYn3F;ZnaU6<8GGV(L92ob_~o56|wEbdA+&juO*wd_>53F$K)k z8i+AK+4!s9(s}NQ#7WW`EZXMCke9dN@q7y-MJfOuF1H;r&Co}}-(g!MNT$YKe*E~k zeBIA#9w+5pHB@qf7&`gkWc&>AxeVC0X?aoee)Od{F5&StUs()w>lbeHF{}?p>{F#r zDc6vT0VFVSZ6b*X#B!`J`irrYJZ`M60Io4o;*!f*VUl%@>RdKram} zY&2e?S%4?>&W1cU~ALqTbfDR z+b0IVWSvdmvq~|0JpoL!`K=|^7=4!myeJk`;f&2$US8&QiAU^aBW8=j4GMca_urnS zciunA2quV#$?cD1pk`cD0TyYgWqV|t@>Q=;Tg ze;FM|buPg2TV;P^YooHddYD?vEFoNFaZ3xO?X&Rtp2WRp^*eq6>DJhq2CBMyh{flO zVqS6pwt>K$Vh72J!YBe<`#4lF9|z-z;SXdd_1n~VL{ zB9&;w=o71HPcZ}{v$;4ss>q~+2#a^Cj{joWxh_0dltb1YMKaag8(RY4?FB){e4Jsx zt3;`660(5nESeQvfmKhVi4@#RTZNaOR(V!`a9?g8^Mke5mlwO9K^4YVxb=;tT7-lK z2WM@cpy(-+b-@mjHV1`$&H#)wmiPg+40R6itB>^J2U10J#X&FR9*zI~E8gBDWc*a| zu&&FR_awS@DjTM4L}BME!gr!41>y>X$2h_^s^Ozsx34cfbQz`vW(hUDD0J7Uk(zwP zFEgjJpe=4VAXHIwKTULZ!ZfpkEBNuM0~9P9dQ$lb zG;_|^Na<-#^#+7*@|f3@7im{L4{{oKRVq&)%5krDUU30#fdK(|If^oZ7QQPjr#GepFBl{B9V4m^aBFuN#niV03+~_l=9jLg z!y3S>0*9AxX#0OWL&3!T0K}_#ST7r!9IqXV3>wvvfm38)5J(4i1=`$%T7NLXlwo6T zxc7V(UvAk=ab^Y$gCHfPfkH!MkGiAW2ky=;Yy9ccIbZG=a}_)LiQ#WEsib;Xc^fpL{J=At)uS@7@GOC(;K*3QN66c64T^Lg`px{X&H=0(zx?J!nCk0Uw$Y-7N` zxP`#)=D~~@^ITbKT;cv>@BOSW;$I-~p*>U$uI_&|BXU_7Rtoavoe5w5DJ46LE3x+|_{}z7m&-PxDiw`n4!y3z#a|#N)!kGSWj(Mc=_|^wUb)hPXn*31KGmZEOkajG z@H29ws=qhV$QC;6@YqeY4;@lf~#%mb9(Nh5v zvy$?VdP6z`%2rju=!gQ(+EO-i+;E1xP?4qQ9Jb?q4zOol+x6syfS<@(6ZHuAFU6c_ zfW9t@?DmP=vV|YC=yK`amYQN9XjbqstJKhQ56SL}e6h%K3uV-A#3=j~lQUPJrajkP z7PVo0JRpFl$0}#XZtfZwJeg*`+#OV%1qLT(U5Fj|><9TjDvxRbw2O)> zHhVPmJB46XhDhgOf4;dD*4Vrlu|Wd-i=8#caVHI(jl0%pIUX~b6YKAp5w{|88FG*r z%S+lyLE3p4wp|2&=xKouTY0}a@(eQd)wDmL0)=VntskX1&n#adbRn4uWMm&2D=NGy zEIhOsnyLTKl%s~-jcYZ8Z+`MoV!t_{B}`Au#CSv=aHOrjU6P-$dcZ?@nyes zyZs%#4437cf`Ux`gLRg-jPsY4y;5Gwxr*Mq-ap_e3eeOqUkuSXo92SeoSPs!h2~r| zG#o=0O^N`Ec`GD({<&n2U-9>uML`h9T8rFx>KxBU2zQ7;Q1c04%uP*if~?o&%lDdw zJr2)vYYHYZGLl=Ub9#C@-7IZ|vuxb6Oi1GXVT4vf3R#7rTLv9PYk#Z)`Szhz7}|U^ z`>+;l-Z9i0Hn*BmY}Ilt0Uo7~P@6(br7<(-mXV(qS~;z%>4**EpcgSp+rWG={GyGt z`F~h@6L6^Bw{bj03!zOC(I%CoO+uJHC6%%-A*QJ8lx^%oB}u4|vJJ_;3|YoHw4lhA zW$a|hZtTl2{O^O2KHvBKz3=~b{jRR7>&$V^bMEKfp8L7CC*_OiQ|2LjbGEg5w*H?(f#x{5AqN#7AY{dL#4l3IB?ju zYGiIWGrNKTd2t_tgZ{A2^36jY7TE{`=UMh{({h~QO)nTNdd(KANidDU?sNEsD>oQ< z&oDYQMw*s6UX?~_zU|0_OA)JJH;V#b`QHduvH5!u3OP90qh^@=-|QNL^4{KZ!cHeK zZfH1*y&SqK;y<;~m+k#j>TnL?eGjoh(Q=)fQGRR>FtC@h3=9eys32VS+|9UYQ_Qx> zb30^DBLeqpQ7?fubLWv_len^WoK05tF|z%X+lbJ*`S_K6VFnH@ofg74VUK18hqBS2 za`lkm$RD7thwb0K#=OrSVnT1O2sC@u-x}oxxdV4ESPwO*pdDrszL>e5z6Z38`tjpf z+<M2=f4CuAQm8?=hw|P5S_fyh(DTXWh!KGQXbj9Al-uj!nx8?={r=vQwm$ zgr<$>b|82-V`%;Es+^pflM^w4RprQe2-tEl@*)dR;MV?6r)#0&dH!`93;0ThGP)$MMdb}^}eTJ2EoId!NRsNS3L*furKB)%>w7lT(-bl^c{spn8ZiDHyC^LK1%Kw zulVBKFnB4?s$^%gZ4o+ExAAP;t?)yh6OHk_Acg9cN){Vo&v-9xW2yi*KQt_Wj*(+w}#Z2s5@CsZ=@)Uf5Ft45k(0(5RND2-LA zB~wecZd1J2DjN{_CPAaVm<&!Lf2~uh?wj0eji9=>LaO7q0&Gw2LLkfuU^}$`dTsM-+ps9Xp z9$h8Rij&DDW$TH3b~9I}nuUKEAIHBm0`}i{+H+;gt0En`f8Dr>wBIWvJ);GVsB6jo zU7~4cznmxV;r_Gs4o*86LN6b}dMI8d@Q@N;XWu~lHb^`~zJS3&1Z76dZ2RSQNFO%X z_sYx@dh~OrM~!x$vAN=I4lVkAs`#VheLHu!SxBP-uFcy z`zvLn?LRZ^+Dhms``zB429nr3yWMnps! z3_DAxVmDQYSwzXN%Di()fz%u9r1hS$C>!;fS8!38w>L8OQkKpsN7W!PO{?yZS4(-VZKZ2fjzEjY zG36E_w5_=WN2LDDvg#jil0?fx%hcuwGitw8?tEWKt&zC>R4qH&NwX#P2(4HBBG2d@ zfwZGzzcOeEIiPruiU`&9i*)^;a}Z0$N?Ne3;sN8$!m85!MMeF1OjCqvf~FBUq?zJ4 zdHn`XLJRg(wKHnkSS5B(PV&s-_ioGFimz4~j;|ISca%$xS3r9rkF z1_|NROBk2nfqgLPmesp-lX?xqeGATOZoD0KHl^m}eJd|7C#>1UOk-2{CO7idIzHFq zD>}Lb(uG?B^}LA)TK^qKwm`UL=xOz$fWd!kkU>UlFKuqtfsM`l*sl!%r@F)j>(gZF z>qS(3U>8vHQH8(RsYt8uXIfx(_66#y+3p>d1Py;|)%y8&bOylSkMfjVrmhd(|%L#ge1zE%j(lw-aoa;N`C zr5W;PD=N1>YTtb)2QBbeMYd3@+FJ-;vBiY2WKa@{xp+SWBh==8omqbrm($13J zGOP-BT0||zIpNO_kgzIAU2*#|dj$o;+%3jB4421<=7NX)I%MNk^MJe+0(@}h{SUOe zC|5{P#c#R48wov}*MO*lv>a>jQW`?&H6RgQv8wd;(%grURAPzKew%4&(Tiw3kECO} z$!>05$n&GOCWQ5|jT( zZ-M1yACQ6n$-B$QH@y+fRatehdx@^mitx9%YRl6%m*)PT;S}V#|B*ex_QMWMXc*RH zMfUHGIz*#I=-Yf>GoSI^JRcMMcprQ0aq&l7yVJlp0#jga2>>XZzJ}yf_9~$^(C#3D zkzWLiH9Jjz`uxgZ@8OPG>bXHg3PSkWt~<{su!h6VG^CsP5%;oafF1fHgg1BL5Tx9{ zy}o`)<)GN0qz&zQC@}0V#*0A6x5Ik(O8&Hi8X%usCU5|uVKF~2T8^SGEv*EDWRw$5 zOe{&cv$GhmL-ulQB@M(iE=qt-^;L2(Jr z=e1n!^Q6v1aMd2sq%JdyVkUD1F6Zvj*e~ zZAQpcOfGXE6}JCQa&JOv3d9quX_|A04T25imfGmQ@fd9fW5l%toz|hzD?b75G#ATs zA+%9LGT5IRy!Tm}nerw8R7EF%W4X9l0+H_SNME{T)?J*pvl~L)q)lOBG;}xiPCqEN@NMio9TOh$VFrEj}mW@$V0V5Qb;FSob1=h~BG$vXiJ`aMJj_p5&X}4XT zBfM4Fo{E?jHc%O9S01?CZb{joR1lj*MK9#x@~YMh6U()q?-5^3c^e|~67GN2OJkg()2Ts(htp>F*}yIuk0V^HB|M`hntqiP|iXa2e(#~wn#6|Sprh3TZ;JJ z{#wBc8`P=NB1gmL0gEBReB&Dz2X%=(3V(|&JKwP^RM|jiIKZjWo<w|0z){MhiW^}mTR@n&nQYH;)ItRmyQ505ZK3B9oe_01H8c}GMWZuvyKM$} z3G3CfkA~$o6$I~wSv^QqM%zvoFrA}#hty>4Vq;00)vj3H$}IG^w#1vCs$%2PmihF` zs@RIzH8v3i@vpP8wpzV>EHR~0n3((5b7W5Z^cqrMl)(doYcaw(>#USn54g>7s9<;2x90i!SaM^ChW$R-J zrDjOg@QoBH^orOCF18@|xx|j#5oS_`3WD#m7=wl9f^0X#6Q7Eoy|}M3S&wTlN6l3J z^BY`^`80i6w&iDz5~WZ>CN6a`HKvxVAOGl!4qIChe?RnhK4wGRxu^cz4HH4TYtSmN zK(%{7Pq5ZqoT;;$J+x9V9?o$)ugFTS#d}XA6+Wos%-$g%9x3x88d-7AaRLSP!V86# zu`OQ@XyvzGufNir4&e$1EktdVpXm5QQOKHhWO!V~g<#(W$&nU5mZs_T)7gHWe`dxn z<`S5s>SG$?B!>*qLvnM(6x}fk4dri(L@tC^F zj_U3*)}sj>GnpdArB{?DlCJnqgr%;@HOPN5`S^fN+4PBLC88EyY)~&DjXDlGeYLR* z_(L%urp;dcfR&635Gj|V`~YjQtoXW>MPRiOwefN0gMz@hb;>VsafU8oyGMsNyA}nj z9o+|=($uv8WQ^NQ$bH77c9)65p$tm?SzA&0>Oi!z=L+1*6Fq+c$iw$zt>dBu)BZyv z22DvyN!^NCy9SJh-qdpj*(ln)MLx`FnJ4>>&-?J5#?4Y^d%{1lI`S_#qudm`m@9Z% z(`5Oq<5dg4e_@PaiyEc~?5KLL>8|zZv@BQUE9S9;^mh9f3aE+Q5oHxE1xkLnz=Y__ zgKU+d_uS15+&5U>?O1#7Ah-N&&j+qGl5$oWPlz%fu@d*)87dgy-S#XimPE`;NJu?k@0(;& z&(U4AM((Z9HCQA@5_%L`Uk%3&I5K}0|#S|XMzWNjiAX*-#7!Q^&jfn!EhP1YIMtszE=_|@I zRyplk3#>R4-nYEpV(lN->WjYZtm8^U?jEV$;_%b^d^xjZ>%?{xY$}3Y++sY{P=NvYiHhB z1t-&h_00<&@5}pjNR#f(oT)>z?7m4|{@bX`o4pjG;ud0_<`%*Um^2M)P1Kj@OWhql zKjmO-?^yo+h{pWLLhwV08!o28IZ>A#1=IPBK9gl6bCe9+Ic2IVGp0%nG2iJKT&2b0 zC56QtYzJ?rK=%_*% zM0I@#u72e?=4XD_PX#>{D53nNg}uH;YsVU4v0(H58v);S+qIi-hH4_v<>P$Dr7v~@Tw7IZ2&yIP| zE%A$dQv#_C(Io32O3Q?cmOaCaZNSvev};W#OYFo?5&iqp_8ea5`_z!vF3rnx%QUye zZv2bk>7nC|X&cl^m5alI8_o@Kbamg;z!Ny9UQ8@}Qn0Fyo;s&lpAvjp>4Gg4)*ro~ zbMf!wJe92*F3hMgsgBz|RwmrQz9UWHgCgTQn?bQ85-|=jS%b1jy};n;`kw8CV^w9D zXkSs>)eVH5c9a>TiYK^%S|^Qmr&8N_9V6;`*R%4u))(1S$>(`Z(P3W`EPMkUKWq43 z*?`u;O$G zNCT5RNCCm-ImrtV4Pymn~m}&5+&npW$GS4X2-)2KV% z*VRy-Tf|5o*N&IWpT1!4@9;j!E?B5%-b`8i%x7y8?fY53bL}2`kwfAdO5Kd|9dTEk zoN8wdaL!je5-!x-*T@nVX9fN%MLR2>|a! z1un;yYw^Tt+FmZX$f6SloRqvD>Egl33Myx1B#`w#o^r=r=2|?lfK) z%+nYfq^ds4X>BDc1fO*rprk+S(`-0HJ;G2M9hZ#9r`hggYdAA#ViGj@L8sAM2`4uA zWk%{=nRVHZiLMEL0hF2bJ;{toG1p5%q|gm{YM#+{=_71AI*sPzgpb@QDSzH?^Ukhy zKE1ilqv5!jGco?EQ1)E8-S2b{2N=JfC?BM?Wo2*C-ntLcb z=*Ojrg<00vsw{#^rRnP10i`=i{*rAB&;6}Xb1ca)NTHK>&saKhegK3@~xEWdR`;Mxm(&#y;q z&+hLYQ~Y&tqm)}5q+0tg%;1|spO~Ch&h9!4^DG)2lVp1-cK~$oAkgRu|vNr3NF)cNiyZZZyVzGXNULi$^u}-EvyN_;jU&{rnZwrDu031dA93< z0;oK@_Ns=H-8X~=9_9jLna5a+6d4Kk4@dU)Sr)_@o*mcF%$#V=ixbt((&@}7b zJkd|ISJ2eUi5E>Mj9w7uX)N1w^W;U3);v|OwY3HOb)!|Y8j@WKHDt?7JL-f$V+^d} zDK41kDk1OS8p;vI3b)$c(5>Y^jwwj@g=tYHMnY=)58yN-n0Vj$(sBU;PEN@9zN7xI z39g%U6GhKP2C!tjZ=#-{8|NVp#sCQ-6>j37@)#`fk0QPWSH8 z`R*CjWF~ZdY~P(%yuXKTVM5VUrA!T?V{Asu%=Vqq*=C1Ncv2_7>VtkyOeEz*6s}_; zyH|NTZ1-KzGy-$Qpe}q2tj`FQg9felOC9W-*FM%gGwI13ArYEv&>#j2z^4A)u-DOG z3j$00|Ao^kfBU-e_2ylA-bSQ33>#PN%`e;w>KA@hzA=OH4_iU)geQZi*_thr0+=P( zUPwABP9A@k_@$pOt~bB2W+tG>B+7@?lw>A1o^1V&hx&OpgLLij^J?;;I3BLh@$H(` z)Zf^+JNV>uqqEfE$g}e|GcG!LYtAe6&EcA|uWDlL)$dG*8ci$ZH5B`$dt{zwiaCtJ z+GXTLdsTnQp6J&VkC*haH3)}I?KS0vSquNZ=^@)b^NGCbyA^&cfx_A2QG4pcJ1;i+ zh1j5C+-H50tX{DV9p_q*Hmn$1n{8CSBUF+k!jDJWt{t}(QNJdVaq);7<|ws}E`qy0 z;Q;4-{sXrDLXlONDNF&&xD@N{>6s1sZMjNp%{hU=xcXqHpC%KfY6qihqBnqvn^PT; z{L4)(UaQl7zBlhVS)$=6Vq33*Zuu!FKScatyL|f)l_FlbQezU z9xG;KP{!ZMz7Lc(B+X5gG)LmFR-RZ49yEG_Q8DKl!8jy2#b?r_(n}quzfgRn`1|$L z{^wZKjhXLg+sq?+Y|FTWKeW9 zHGC<^1LW<1!rWKqiMoD(jzY$>nA~|Sp&`swD7)eC_6Ez!kCD+N^l~o#T*syA7hY;r z`_vl!8c+5!CCpRE)?(K9kZE7*AGR`{vrX?4g(b|?oF>@i42qfv@g$8PY%53O8HNTe zI~<9=$J9Yg+`jx`ir4B!W{c zm@CX?de;^{$ArUAX~(fJOxzc?g>!M4->!*-;fBc3b2-;N9ZVzGZqAPoxr?tgN4ZSNs$DPPww^Ez znNK`>-SO6|k=!%m$uY77^_Dd@>TKB-8z}YVFRI*gnN+f89$011NV#4r^6M$adv8u7 z@`e@(@WfmwXBsFo3z$qlD{HTqJXjP{9?avLF?p*yKF7(+I<)+%TxDt=LHWXk!~o`; zi-X4lZ%+-bjZER-W9g6DAbOK(4f$@p8!$$#qwcV4zkHD=BsM?b7fVz|=w7ssnFoP; zZ?8q8|MS7~)iFZW54y}W##@9K;~Le!;O5h+jWDGPs{E4K&7yM7Ek|@14>&t>A~6Iy zzoW}Ov;0AXm1VEfdfhB8nvfg-uImwo{6ArQH6#>~c6b7Dkhbu(LH6=o&MmOT75TsI zvCzKbeD2|F{5Cp@rw^JGiNh&fPYK&So$s$FzSX0JsI`IQ5NF+iQ@Q))tj2Jmjq4>s zZFKi<-#Xs8oQ~L$~SVOT-kCL0uLG(YIICetw z-RS!K5)Bo=HD_EehM;CTxrrHTp8Q!S@J&x)nJwC=rW>Py7h|k2qfSU_Oi?c~diO_; z;ihB0PIQavbxl~nQkHJw3JUDnr#qfPzx^Uu-2I(w@RI_wKSaCXw9aIc5}_%-heyAa zt9Hk=$X9(*3m#$C?H+K~n5bz+^xK8)L?wM=pKYWMu=eqi7jA%CN+GV2P|whX&56o} zz1m^om|vf5W8Sq}_BvB`rM7ocClZ)`L!`a%NEl+VtHa6`=n_No*fk+7h6Li3X>pV# zVtQn?^`cNg@2*9I3=ErIzAr(N*c*bSQ0;*#s6tgC`XML`5( z>Uq){$KT)SOPzj$Pm!!>JnKvqDWy`>iaQ@HlF-*cIkj!) zOIO0EM8qAZruXI{qKdlMhu^HpWn?_FkQQiZY4J<;_hNQ?TI-xr<9(u`-^mLUSnrNV z3QB+9KvK_+nFarYzY$t3oXGC~{I2zjRxtMa9W1Lp!6uP(?@@qOKYJb~mrd~vE^mK( zR;=9>0bZV_(D+npZJOsS6_=&G@Ip(xi`m(~CPQr9xU{gq&-Vng)TSR9`#bE9oH+Oo zpS3``wg+ruU$dI1_ElQ3tQ$Y{LA?Ka@-@+$8DFxK@l}Qy;Ab(*-WNOwR(iRDbBpEe z5;-1ys;_iBQJHC@Qo?;lzra`TeP;b{vaV0vWsOfSb0`qSCel$R1J}dxv;bihtFpq9OmDIy8Ul6|<(let6G528++< ze{8AA!2k3bOeTtiY21E7X_o?fKIe2ZPu!Mhe#*;tHYW{F5Rqr+c~fheV(8N;Arzkylrl>3gAMwgy6ou<3A4XB?nMqIPZZHtH=-$T`_ec= zaoqZ5xlVxhE*|kvaSus}4$)H#vQS4(GFS9Tn|C9nVk*^&OC|2d@rBy?_F*`pSp3EB z&i-B}?P)waalj;aB;=Hmb)&3Xfwh9Srnb@*<@m^EO&hr_p@O6x3J^+cLb8iIGnAL2 zbI{`zX?c5Auw)GTHH?r*X;etys8WZShZF)8CdV>I`Ga>|Tt<~6xG zeCV(~!_y}sIq4sN|FAqHFyaicE#)hgj-bj8N-h}6dg9+RcHh_2@KQhG&KRSD-j~*2 zQeK{StF*iO_u+{i?t4*OYBz6LFt|$j^^1bT9l}Wv%qQ+oGoU5K+(I%HRbE%q!c9xs z`S12N;FbA1JpjpYF8&@`we+y^L%@iN1KkOSC8XpaE;xT#uI9h+hDWlDg_NYESCk1E zi4E=P`%DTPk;E}!AmYKD*Qdp7k}+9 zfX6jV4Z$d8(K5+AfWUVsjP zE!5tn)`rVinT`cLgbr3OTJ2c zry=*+rW`7st28k94(C_~|9&>Lynu_3%bJZ2?>*sJ9_T(G@y*Og^ zZLuuq8u4^?KM_#7Ns^)WPrf&RIt{rhcIXhTLZrp&BH5K6A;8DP#1t2sa}|$IO{pm> zEAzg{Y7hJxR>6g2=sPZ^_S<9hjK-cST}W#mOEtam=6CtEwA?fEqXAi?hDH0*pJOo* zw0>;EN9ZK6)Yggb*RF|9B((?-iVh!+&i{2n41+(kUb${=^|B*=ia=HW0U#G zMLZc9xud6w)w{9G^(m#l6({{o&X#TV@-?Gn{)fb#B02h=^gvl^?w8HeiEf7cd?h1< zR?ESA^AexCZ_H~CieT}RPXeJZ*{;n3^Ih!K?4#ND`-kA*rI2BzmlA&#AV_=>L~hBrTqzlEnF+&W69jHq_CC$dootU4We{Gj-!CCi{uMC$t3B-EjM$Dr?W?1# ztk-KFxxu|R9c-Yrocxt5R~WoA%mcoJRqR2y*#)>+gH=;l&v44?n48&a4vwEi_5Jc;x>5Ow&;6 zd91NCp9Zz!|25RP9I{Y2`?IRb4~IQEE${bvOiJnb#V|PbX$LlNiI;8n%gL}uiq0L` zv864_yHzaAS4XVc&26^dk8psETa`k1p*-w;CV-Z^A;?yjC4};B!8RpMJ(hkFW7XKl zBCVX8mA5{ar?A^YM-P6X~J|1$;kP z9;-~`hducH5DG->*B}KVU*0nqT)S${^{o8+NR^w}p@v*5OwTQ?DTytyZMXk{LJ7Uy z%b81Uw-{KsH&<+D4OUisO_d$HHz?5>ZIGmq&hpVHAWzP&u$8aeN<5>_4k;oj59B2u z8e&5JO|`JA@S@N!aSICr&6|Q$tf-Rn!e5dOd}A4UmSL9ZG5f~voxMi7p(E9>z8rtm zKK-4+?R)oHKVDDCH8YrPS2TFedHC7>x9#f>Z`}B;EPsxtdlYBjtUmN3$i6$oK4fZS zc;D%%pmJMm2)TUb#4R+JZe_bvvdyp0H|H8Yf7Z*4ry2N(e=YpUoVepiJ~LEwtPz3T}VfA^SXFx8#?5 zx>9kkmqA83qq;VMV$zlaE?BzvEfpfzk&RI{FsBd4AGC^>=yr{IE(Ez zV7rp06Qd(uTj*gGW@pjI2+j3fQ}uazL5|vab@z-3(St+oSEhyb zb0t~v15a5vTZYDhm*HK)J)ND=G6wy&%+xAC^6&!R}1g0R2ZcAZO_$a^4s_ofM#&6T`F{-1tJEKj#KVWYCW zv(s-Sxx$OAP3-pM6fBp`aM++W6KfMz?!36seJ-mXo07-gYBe|#x6>pP85`Jvd4)SZ z9EH~ON25N$eR-j^V`z2us>pa_3yFydp^2i_hgw_K#KX20#Xrp(DYriKw@^nD=Vw^@z|z+MlA|2xJ`Imcx)8c z@U?RZ4KBt^fV%k@4!N&iSX+2zc^*6%AS}=gn${j*72k3?#NoWxdCK%I$UU>hk?EH> zdTII_D>TTXe0U{1^e@J1}gjW}9m_sW6_e(P^7w_6F;5bDZ z4PaIgk@Nv4lWxFm0M7jwxy{@L;5)P+K0z51h%O%B7GW(8ts=qM2~Z@zw*o#$ZM`NLbnakkK9 z1NUy-!l3{{gbr~P#VqB4VvirlLbm$Sxcb_#qc2`u=_VPE+1%4QKl`J>#?e5_Ud{^g zrWaogRd@LnZuj-AmR*tRI>U{yHD7%B6ra2&9p&kvobzU;O^)4Yowodhj_RuNA$ha2 z#&uirvIo_Y&1;HnI=)AJP5N~}^o*C!5_Xp+ga^gzWN-F+89JBO8=;rmlbTRuvab33 zf~0~VcGA~&*GJ=-`WaUtR~Yo$wv8_u67`JpE|%BX$xm&2VR~f=LT8jMeXIxyx3Oi+ zTdZTI`+gB;>v-+1w9@p!%Z|Thy4!KsyaU$Fj^5`SnkvCc+O!HKTegG})~^7j$Oh+D z_*LCTpoqwS&c>Fc#0S-@K>xz~vY5 zo(7jIiX*Rij$n-kytPpqbs63ZL zPez>u-tm8{QBiyMzsV!+Q;omLW28zd%CeeJKnu#7-C1Ja9LDNuOApZgQe+9CN<4b1 zr%LU%1m5UV&LSV8gch}wdvBFa@d4ICr&*p|psD1uHD`t&`?SY!ROvRoD*9Tdg)y6& zkzJdg0VsCS9odllWSapHX(~hgRYWJpRP{_^9Js1vqMu)tudI< zFRX<=a@RdBvWV@Utg|5hZ#Cg6X{W*P?Hwv@1GLw3Iu~Lymwc|L?Taqnanb~X;RznS zave9R&-I`pEV-OGOPF<7tu0W%^t)HUy#i-ZNT|p@rZM&kGb~=9$`FG?a-#D4Ns~U` zI!Eu^4k)?S$6yw$%9~Hl*6X#mT`VdHu^+7ZU#`;2QnjKZkPDIR2&=@uJKg^2>caD| zi*5x+%*0Z(niIg1X>Fd2yP0KqmCJF!{iI#@$wAT9PpkR&3O#*K0vyz}1h$JZ1AUKD z*O^`a)o188c3ux{ST`Q0n((}Ju1_!7$x-VCVgu$aT&Toj7kc2dHjiN0M`I;utav1# zRoiC1zNa!AEV!YW$`m-NH-V)Je1J>31JoNcXx%eAJR!|`>2no`MR@YRpcH~^7cYeE zBwneHHW)oDmn0vX6SK7fV|vR|$)1;Abh}ULa*o^=7dikMjtjcWxq}h7gD83TTg5p_ z#&+F&XO}(F)`wP2D-+ohSk2ymI+VrPza#=fQX(&}(!Vsgkgv}zTJn~s!PLshw9uIu z?Q+GcCfwrC#Wadv_H2kuu7~qZ|2ef}YWity@zk9Y;7&<pCs|hEU}<1qMOc-5vkDLWL5?%($(mf`jF_d*tkyLyzVzIThQ)JG&H(-vL?y1&iSV_=@1ivESXxtGmFXYDv2u0ilP(fHC) z(rIXjzcQW0i_mbG_Z~kB?$v*IzI~XU(jLSGMN3P!<&oXQD|r?~CAxeBBhKHi5suyY zYr-zFfGy10@|e{hIQ$P_VBH|! z0D!!TMW9OaZ-5B!Qf7hHs{1ZEFxl>ej^A*U$l}9?li?Y67v@FG?*TR-ZwXv&f*~kz zIgS9j*3y5K(%h>CRLL|Xd|x!WV5n;dGur8@Exs@bbRe|;;8=#11{R8hu(fA)9PK3l z{v{6pKR4cRorZ;bTR|w9Qali9pu|9DHg5X8?3Y+!n9vf?Y1uDYTxl=ChrYSPOk{VP z1kg}qj?h)6!3v>ks5pQ-SZNNSw!dJi@PqqWG316-cRMct^Ra53m0o@n#Q{K+JR4lx z)yN~>m`Y`qRj62mD(fZ)rw3=-=&FH~bK%(F1*EfW(r&%Axhzwbqvm;Qr>__xF%KYEzNPa7PfUR=9)Sq#n zB?#P8X}YCEv&Sr50YtUv?8WsZl^1+BZ(dH(z+Ik-%Qc_~-d8qywWJxsRPeQB0W{}N zs0TukjVnOAwW0mx(74;53R6_ zk5dQXov`~ypeB?ylFM}v2fdFb&uOcJaw2@IYx43vn8UXexDLrS8(Kvg0CKM;e5$_B z-*DA*zlFe~lXj6hG}21}Bdb~3?|_ncmnKA((Fv2v`jcqC($fMX1uiSr14)U2%{X#0g*yRO+iPVS1G)y2fMQnz527dq0hOP&BF29UZ2DSJf3SoIxANgTc%`GuO zt7c^k(yJFP7fa*)yO%*wPA4cH20EAn@8IIB1OFvptDToLK0F0$EY9W7kypz@zvKn( zK4vA8fFQ>kX6$5V68SIW^drb2hf-)WfIma|ER^|@Us7m-!m%A@b@Pias5X}%0JZx= zW#ppb|1(xB3Z#@VK%eo?t&PfvL`xB*A1>c>89<1K*EP0H}<13RPg3rwzSkttQ%`$A7YyAd_#zu z{p|RQ_yC2*p>+{jOQD zxPXpnzz=FNe5EuM2F)uXSSkdybI$ek_iF|X=DVy5Pft((=gc*=^=_W&T<6Z@LhH5u zu(XMnq&2yK^*gwx(e9H|u8RcF@0By0+%Wri@$353lw*K|pAzasNAs~Yj=d5FXW<0S zKeHY^0{mW13>2af;O^qLHBzH%8a8a+wFLqjhwJB#-n-4hDfS6rhMx#CXtLTeS7ZZL zC%+3|?JUsHU6*~xIn04ba{>MGDva)vS3EK>}A$vLYQlf)@S5jrbI=4tn@WgHL)O7CA-Juxx z;((9lyu)w6JMwV>>i5!RjvHW2z^2~=WH<1Y5adj`LwO{0F3m_d{B3=9g0M+!xN%mB zCJ-VFz<6kasn#*g^!WNC6TMY@l*vKOvR>=@v~$KEy7R+&Z|IBPWKBqL%UR#kyu|a8 ziO|(-iM6mVQ-IxJ<{M@&_VZZP!^|hYj}H343O-qx&9q7pzT6JFA?QrQ8JfQ6+IHB= zS>jz$fRGV5NBxrTe0f@E&~@Z|>j?uEPTq45^t1BRS!*xK^zXyd+0AxhRi6ND+on7O z7B02X=I^MwByUTozmW#Bm&t{4F#rBM{S0W=NloH8-f8O0xw8{wm~!ZOy>& zNphx{42wK~2$>|;G9DgN!zRkV4)}l^-ZhxIN=GSPxTWp70tx-byND&-NL3Q2F<(1< zF-Isq<2yfe?E$5w+BOoy-(KupW|l?-ueqAB#SRD(teOn$2O*?mc3~A~q5e<%!M)p& z#YW#}KMnQEz_>hvox*0Cu10RkYXFgcQg+kAbuY!>cT-x^6L|CGCb+(ydiB++sUQ&i zxP$7f-M2zWLQV$K( zCd5ci>^cNiy32KB(R+X6`M*{G`tvTUyh8vP>N{A8GO`0SHgIu328nCEk1R*=$RH6) zegESi2FM`&MwT>@8G`l4f@HoOS&I%Ki+|-Isa3`^08h6zWOYCVkQCYp6yW89f`z5| zpoqvKR%4a|)(A=!u2X&bK$*Hw-+93X@5u;Z(b?D5!E!hFx~n<@8ibR@Iin*P+&l54Nft=e}1(>z148OrQt z?Ar6NQz04h2UHAW`e@7AVr&X_HT9~rkj+L*gfOJ~?8Rj`6?`~ms~5&a%&TvWF666d zwI_=|{A6EdW_)2Gfl{=NRdS~zW!xZ5KT`qB+p>Hy@YiAnMJw1 zbeu`%+(*_TGIqA{hSybo6t=WRt$@{Z&h*Xz>n+QN4(&d;BwdW=?I!ey+u>V2fOCdxCtgh!S%YX|bJ6Mgw>yIG50}*6BGw8TMThEqR_pbb2 zK{`tAWIdnLlv`eET^MPnpw|Hwu@hkH626~YJ6`IusX}k~zS@d4gz^WSh~daxS*l}| zV9uhmhN=kjBYUJ;v1q4dwY9`6;2&_wq3`kWYMIQIEFjJfl8i0P=7uosaim+h1@;LH zJ^ne98ad-XWh%2V(y0)_BKkKSDc$nZ)7CnoNCoa+~u4ihx;Lo z1PL0ZbeD!ujjPS-wltb%YZ;5{%PV-8mj(!pKunLVua|_!{tD8;p>Wj=wL)ktbplw^ zD#)o%v)#`hef$Ns^hrOXTqs$)N_;&tcaw2&!3(Ww`aQI?4bX6pLBnwv2fU_fI6vS- z^!H{3=o&*mitMjiTLNh*3p8yIfC_;!%U}!BTVlVm1T>V6nf|$xEW{p) zJ8NR-FcBbFbdmLZg^h!W$<(>g9^H1Z509p|I=K~+7v}UN-OHW&whJ8huph!EZFy;C zb0cekn9(PL^Uw>K#~9Zog^kQ^;4!~)%e;5NBiu_gvObyHZrEkI1!MNijmKfHDqeeg zbC#9I0dEOW;^c%`L6qb~@*;}|P0?7~^weeh#F>TTRQb7H);y#1rzV8P<3pL`Pb}WS zY4*}IK7yG(N1Pt2#kB1e-C8MN0``K=y$GETBYX3ApR~M|sZG<0JxNH=hV}e(+gr&6 zzkkjVF=XwB3m3f$!B7_-$(m}hZ+|ougr06O&PN#Bz<T>+e zWCstWsRljVE)ny)5UdP#ojS`}L6`~{gx0&50Tw|WzLix|IU)b+rtLcIUpK}4AU_}J z=y=Xw7%`+a#^)=(^KL%H4Lk)qZGiPwAs!skng2F}C- zq@a<`?I*GGbJJ}M3%1XxGh`>Tu-dysw>=Vx^3u|U)^00^-J&nM*K>=b+S6_ykxdnv zjm!GzuXG0p7{Kbpd*#a?Z3+e$a1by1w~Nm6nyoY1T()%=Tpv(+f-zoV3r=R?{Uw6s z9&k?7`C2@hCwqvfIoB(QDX6P#6ehNwK`j&rI-E7lo%#LQo*mosbGm2R+w3PUl8l}6 z!FjQ2TR!ZVCEapVnQJQ~?P*dvD`-1-TVQ~1|E)RCW z7k>PzU$fGTbO#%1T%9d>b6-cjm>Q@Tvl}1aaGag+SkAj+lH)NOQfbVMJlNH}54A9c zZrS0tJFUB17+uRV0#q&vw!S9{v2YK@~b{#_7B)tPu&*Z*!9 zr!P&V>>XRXXH%uxKQ^6<;771N z<%a%r=%Ii)s>+tzzQ8j>p=fu*7Y}GD(-jTs)IHmuT#+zn7NQ0#0k_l1yKT{CyIJ0} zT}r}o*8_!^K*gBnp55LmcD3D}?|O0Z8bNIzBK6c7t9#l^t`}u)H4c>CLzzpbSG8)z zb1H-Q*U9CPG|h)PL|SUc!<=!3eU6CQt!`F-2FBVDlkWP$smWcd|0&yjh&i8 z<4u81^_ijpBXy9dm%GcX?#8TOh?mvRv;3d^?(Q3qt-;b}S483}VL#HtpOl*md}lR3 zp=Q`$of$tZ`*WcQ-@ZrnSmVjmVe%7DB~i_|po84H2aQaZ4E)aPKg1x$ z+9-=-zR=uXzs{cz`^3emJ-VN9yJaa~yJfoWEg-_uTM_V39U1NI>}fWyF3%|BsNyql z?;Etmly2TX0%yu$HL0Jl*q><)eCuS~JnT-(@MP^~rRiF~BKGx))h*iTuZYx*??R}s zYiic4oxL6RM{|KvF860@O;%eQqm)kC^wMSH$N_v(l+@f{hS3mVky`aQ>|g>iAv zm>X}p{8pz429X4>a+QhAYEr9{V}G@G<}*Gs(grYFK++4Rr#_I-I5^^WDP+s!cN?18)~#3`IFV#V(Zd9)_4mJN)${=Q2RW8Stttv2j%q= z^}J3M2dzupP6+wiLCL`SfQU0KJ{Ct`A=lsOw8Wcde}wuv3ek(c@-U~af)sJBrjTB( zM(@ES#f^#lX7%I7z_0JbpYMT}Yp~;6A9!L=mP>LX8s6uJj7&G0HoQUvq{5X?8ntKn znvGk6O7^S8iYHT2&a@{LDb!um8`W#s#@_f++%g>NXCG1Kjk+3bRd5K(0I#Kv)|w07Y2xhqPEIyfN{ExL5rSF zH?RHjfxl(Y6%+Zc6kNi|H#qm7CxIUU=cRfdiaLiLWXusef#?o+Y}!ZaGb4&dT=YO=FFTLwhsG+L-&nhoW$BK<^=DA)7gPK(~asMZU?Jj z8e`hA&ZjH!2lMPTI7uRY-W2}aR2@7U#C{tZ>{BBLlY}+$2S>hz9bPvF9UVi=_s7i* zvQ*1yz*uq|bnY!(6yir&o2U3vycot@a}ww7Z}Kcgqxm2}UBM<-r;}uJl<$PqYTU52 zNvrlGg`uIH-LZCa<+t9g#uiHYG^akh?`yx-klU!FQ_B-}m|XJ(^*7HEnD2gA389@Z zf_N!FYTQD2USy2|3*^9LBF$+(P6>cME#6el8s#LGL@) zyT;*XOn1f|4TTDNI&~%9G5sR&Az9sc0(J+T??k$4drgP+5tY;@=O0t|q0Gd(eP!e> zzT;J9({)arhTz6rY=559titKpkVzs6Q6sDnYCfXLiZlIugQbExd-e$naU$nBFTcU^ z;~cJ;Pv}*(KcTdjX*YCLxf?CD3yy@f`)B*{q>ze*__|yv&fTK210hm>fMYfaq4VM; z)Nb|W+jM_w%8$Oj81X>qxedWdEUpGk6`#dWm(9xOv}VB6@^LjHqm<3294u8oCobS)4I8u38g0Rc7V=)W-x=h4AJC@`7u8Q z7-1-?GS+A=Kr0T}G8arDvKVld!>chFKvkvJz~}ynSKAlG#`AE^HRtxhelPP(BW}D8 zRAu4Nd5f0gxogV}x9%R@58%E{t58M4waW3RX}!We88yfSBD=+^^Eh;)8eg^9Zru3*=+eZ&&rYld>z>*S;P9xsvK?^l*;@& zi;;5yHVlD9;DSl$st4>?ZmLl%gTZkmum;sY$oUfHMR)fiDy!h@uZ)WxYSHo^R?6L(lMeziL#O}``eIXiPiE2 zb-z$xyhGs9Bpn~*#F$8MbG&^%<+a?3UlQm%LdY<1{^d9nnJfUzt$ zbuh)ZWcW?x5McSyq~@62?s>Q`GircqpUSbXhtd_1 zMB2g{PN#zNNBt%b7oe#?_Sg z^_>qv;Z)+Tr6r^N-s;e+W@4WlYR*#TLDh0i;Ri4B$V7aJ@IjLG#FBR{y6T)o^UH?F zrGu5avxzAEzGT-dXmz&Od>M-8T0(2f*3RvJ&3!9GUY>#xQJuAuGGzx-qC{ArT(}l3 zSh1KPkx=8jx@r7?dz0HQ;+i9YchiNSLC=l96=<6R;$>`mg}x5|9LSy2&7OyU5jFlq zFF<+DZP;lh^~pv=^@2lxbT@(x0AEDn>MFO7SC2jLcr~?Yt^*eQLxR85^69`PRERDg z_E##`|1C3frGk}ew;`Wzcf8-UTA;GZVz;0=!VNiDwb`32)QmAK4dB@u z$Itd8_Ojt@YmjS$xZg-obNkW);%$%_i9akvX9#o$MH&YY7$i= zfYAE!)Qag$Q)3xLlW_eY<^4~d0=-tik;0DO`LkF+VS!@d)IVdv@P}XGn3n?9A%DJ( zFhKs)@KTe(x)9X!AJJALTl-=2P82g#N^l$1^{^y1r%cmfDv-9x_{6$g<1H7c&s4t4 z0W6G*Ujw-bC~6Ft{;QNk-nHo6OiLq`WaKR@YRSbw2h8LR?^})|Xn?k-SNj**(|_=M z?uh_cED#a;cre>sq+2S^xTTa#woX{x?D zYZ+|GZ_)2be$sw@Jw(e0pkM{IDNBL3>G(BD2Fh<;92NMZe@AeoX@HzPZy^Aq^FqpYdS4Cyv@Yv`SKBQ}U6tLJ=LJ6e`$A|S zqie_YW_I+d(%puG64;%a8e6xI?nX}j#7yiQ`t+Rs;%uPT`yJ42G_Gq$T$r|5D0n6{ z&ZY=Vkv0fIc)6;dU!^TW|1}Z3SF62#J)6goSLQwy|KxE)kHU!3H_T?{2Jel@GtDkc`%9rVgr~9skqwO_FrQI z=oNwZE*IEgRYoTVpC)2}g0sFMKwZnKpJz270T(55C13;6^~D)zX7=?f8~dO9r{qTw zPf302o6t;ubnU1YlVp7eAE?htTb>@FjVP@sPH@nxEjuW{2&*|Qt>lFgZ+Oe+3}-IL z7^fb3e|RvEZ?6Cu#i&N=47kbLHBjeew4b0Df)$+BNqy(YLs3P_hvAnPe?XdCP4>8U zdWJ|8{C%MxFpDi3@h)#1e9%<_alrD0vfL7rPwWFX%GX1uQt|+_EaeH0pC0cAfcIsm zJW6;<4A$Ra0@hCgUH`y9eJ}Ii0@FXEDj66hI9P1Gb2m=IPLyiWTl3n4LN32t(~ve} zEt9iA3*WpUL85Wl3mx9v32uHqhsV38?vP-1FuU#CM*q6dE>JbDp1b1Zp){Y?2!;Sw z{Q(vgb5Q6)Az-=Je<-*7l8DX91JHc-)Bl6!!?JLBvYee)Xg-*4z{#?eN=Z8IOx2Zg z5hO}=jGuUIw(LXQjaazL~fF@ecZ|i}Mk4miv7oq@NrvYF= z8OvWTJDEm$`PwPyj7zGiscDQxL?e1}pfRg(>;nYC94Zi{=LagJ4z~knW$u7umVg9h z1OY+#l)}d_@{0xprN2n^0s2eJ{9X$S>`n;PxWkK9a+<4#y`ilKPwV{8$mM zn9+Uy$?ncfgvH)vU(>(bD|lCNb=cqT^@6^oPr_|uVzDwn*Uu?G4ZT-)=)I=>$GzVD zbFcVU?-l9yy{1BiPcmMx_7H0}WP@5Ni4=qvQ(KC@q?0yn$;Z@<$xmANVt6fUK6aP^ zZ&0o-4nR*Qbr52BC4A4kN_D@Q+(NgJ2p9%e=w4BKzv*5uNGS}i^&(nmM9~w51Mhab zM(n=nyG;DzI31ctT~ZvW*?u$Z^7x9*qQU{gZg`A0>WvF8er-xxi}gI#pk}=0WYlJe z`K8cxST!8}MVZxrG9z7fApBD1>)b^O=qs?xNm*env`AqVxqR5wvNa6UC6A+2|jlSjwY-2dGW2 zv`PU@&2CZ|eV~4R<<(J9*rs~`oq?r2-(jDV>E>&Sf*lq9NXF{w@K3{s-n`(ByR+2T!A6PtOcOCi>9~{q%qwVG&;Atuld%cM(GZsG zVU@B=O}4-N?=I6{rZEnXZ8u^Q3NIm9HmrL(E+o9?Ch6WER@v@Pz$(MlpX0b{3qZ(| z#s3+02X!z3#A1;akmN`ffKjM`fR^@VTL2+DeG@Tb4gmUCOLuM`KX+d|&oZnK9{nX% zCWbB{GN$lSXOy9Bnk?t%`hP99x@I9K6B9}GCdJ-4Dk6qo>{Y$lbhR4$B^;VUU-nl0 zGdDT%b+gE{75kIar~n8k!d`M;-KYTwM~(jw@c&V&2bRcyV8r<=d5F-Y#Je(@00tHU zIBlf_ms|d4(KecY1X5|=uT6^=WBQ(h>c92G)epMqi_UA8f4u4E)8R~y6H4A)bG)zU z_D?&g{!7#>#>JtoH*s;#yH6kf5~E*|W{V3NsQz^z4t*rr#Ju1RKq+~1@c3%#BcL}_ zQ=e@@eh~*jqhoIXjWBq(2wYl?Mi2wy(*Baydubq%?*-h8IK2J~=zlMUg2$#E#GV&{{`^uj zHgwUF^LHO7R6W|BJ@l|U7yE^$CYJ>Pu#7E`KqoZk#9UM{6Ty(C9m)n)7U&W!pmn)= zi>uw`(3F`D2?$jujU_bERZ_bNMV~Hq^dB@ld?;yBEGZP)8xRXBdka}psaNF%2KF@u zbe-X!#7%;rS|r|eSLkZ$nCJx!M=t-uKLQ=VfpTY-L9=BYOUm>sk?VgjC$%4ggQf6$ z4sBR5qXI`!=n?ambmvB>FceEr@}~bP+yezC1VT`*5kVUH-~H%0b>DHWzxwKy^_;(XHarB>yL&kY5CdA#=~Y*Wkj) zbihGy>X$8Dy*%);UicnhE>adTRqq-i!W?0;v;;oE*3*1aK~eI&FSM zN7vttG{OMA+Y(_*QGY>ofR&1b{xyI3?p>V}Ao-KKJfN%VkLdS9X(r>RtX#kUiGLW1 ziAk=zL8avuhN7dL(K9C#i6?Y+yURF}HLok7^F*HHYW=O1FAy854oXq`^OpZszreP@ zKqoJ8hDgv^lM*v@OizH9R_iFXME_Fz&pM&Y0dR@O51Xd%^h)K_u{-l}O*sDw{^f#q zI-GIe+*S!Mp7EVVA9vg!F>T)+#dUT&Z@s;=r7rwvK)u)v3qY$TPWAmOdL@kwbWOfn zB;+Eml=BC+VQxndI!xqS(w8~Nsb<@mwc4_5?a@(C{=$T#Ea{*Nbm^CNl%K2>78T~G z$ZbZcSZwqw)?#4=X5U4%9Q@dSFuR$crYT6|KGZbH(>%2NzIAZYjFbGRQnquM6jz;z zyPpyDZ&d#b0jTLzkbv6F&gG}lS7}3kC)0oP1UnERNO=a1S}y+!#7WEPWDTA6dDPT5 znemxQzG23&e3YwLK9HRZiSy?>{y%vBa{tOq@P37-@jJr$*-G%i!28Cy=(#d2`{rieN{O^4lS}gIFOQl2=3HKjpF+dE zOj?2QE0^?l(li+bxOM!up-jLc2g12#^71fH@s(8wF|YM!syG~nuYrLvB*c#<|Hhml zL;tmf@PD}lGDKK&bychg^@r@sE2DhF`)Vu?C7q80_YxLS{(4A(6z7{r5&yTFz;e|% zwRv^28vc4MdTrf)-%~$lvPNxz<4W%%)I`k6oZmAlG=BLvax^RUBMG+_uaj3zamP2e z#z}j%+V=-(QQZ$V%B?zhftld4Gf`AYF*I;+~W{QTr^c@-&# zKECq&@pk|E@llsrpd7dROA96#KdE$>JUhqP|7f#?H1vEBTNk%_p{lU46;AWVh=7+K zLb=yl*JCc`2qcaHIhw<<=7HYgJrTf9+BI#lpKq8JUQyvb+3G~`O9~*AZZq+}Omu&n z4RUg*I?Md)Ymol2UQ6#~Eb(G&wXK6u_icdL6hPIu)SnE-M?pn( zaId%jO6-yD1VW+ZjePtIJ9rhFceO%N-5s`NwC!1z=l~=npbqnfT0@@kYeel+S5yiZ zK(fhmb;vmX8w!eoyM1b4)Dxfv+Po4xUafQnlQ-l#XWRZ%DES|Q2VLLhom?40FDA>uD7j0w$v}N`c1l zGck)k;MoAr_GkRf?3G-fJ3<~!xU~+3R11l^u!V1Uv1<>81GKsl1x%-$)i6Oh_njbF zAuKX(4M7O0ZJ9JANR)*5v~(mRz5JZOw>H8Go!OEPSm?%P9leEb8drxqAMdY^&%DQ) ze6gbu9&G_}%r_YB88WI|)7k*TYpmKndHX=8AvWCkK%&6mupZ0F=y+6Am)q<+ObLh+ zDlzqUou^jTxi&nH(iz5PM2Q^@W;oG#AKnM0(Cl_b0<@vm5&8M8Lu#YkXI@JI&9YMM zo)HV}KE*2o4QO1ex4G8KWLGOEoj@IPQlZm0{cVG1(543*n!&bm3k*ZePrD2@b1NVJN?xk7K_GY;K2zBLi zIb6uL^@ME%7$3OEHmD>zXjgBkl9^OK5}osvje5x?1LXC&72(krYMH1HB8$p_$#daK zG`uuH?r_$Vm7=QVr+bD8vzK`KZ{d82d~V_>C4q@^nt6J#FuCCmIqvU7y7N_^+JPdo zl_JD)(rGkzCWq?`bH<;W25?@EnK%)JvC5`YGy?FYj8Nigw$(QL0Huil$+ZpX?pMGe zJ~sJy28fldVs#UZRTE}gn}DAeY_o8<;f3xzTOc8M*nv9foiJu+K4njO_6^d&)#+@_ zDAZ&>dFl^HFNM>6rEwSuCp$VSM1_%G_nhltHsdzV5>0e5Z}35?3N_D84jB!GejXg= z8u`>Xs-P!BW(QQJx!4^FP0k<=J6fh4qn-LytB4FXQrw4-4VcD0v#;(DY|?{Vh|y*j zl!;Y#h{)d57A3`M-5f5*Ph9%vQI_V zOuW2287F8ByZi zdCTaWv~V;@*kdr@QnYHr4ZZ2k2ci)$h+S<3^&PgXXfHCm+B-wZ9vFGh5u{;B$9m_t z58@vL8MbwHZ6>tX#%~HsT7A%Hv$s@H_ABzqcD|-E=IMVGmjRyl5DVm-@$z4x2`78b zdp)$ST;+16oRhX@*brE1oZ7rE-uhqL2SQXtT=Y?j?{3=V%sCnZcF=U z-fcPt-JvOJp&?2qp?7uH=;*7TY43jR=)pwb9U;vR#Ma)RbvrFqcj=NH=Pl z<4x-!PPM~nl>o{EeMu2oCv%Y8VY?7Jg=}Sc8}c)@QG?!9Mh(b3Ds*-y-9!ufIjcP}L~$15q|uoJ?VI0Xy6&xKM2 zQS*=?2hj`aC-*qJu`7ky~Bg%YFt=8IrF4T`|F+ zIB3BC`nO`H&tG%IBqMY_OmVk$I-rPXCb3_aXKaP+4r&BQIjuS)?W%HZ5`yt$5bw{~ znMmbFpo<0C`v7LQ&2y~n6jo-S3>uO@G+a(Hov}8)=df^O?3_Cc`YF`6BINCM*Q=kT zv#kUUXbo@;r~+f9ZjN`d3MCX;|?QFbWGN7pyG(;t*)XO6%Qz#r$YKc>+g@1l`fKL%iY z_U6yow?9h(8wyb3A<_rxTW9ZAawau5mc%6JcwB2($IvylG&UDQTTB;UeP|xcoh(zt zIj)#rW@y@Q-X9hG<~Tl5?CuQN?6iK|(>tJ!`gM1}4zgO<+0dY$>;JYl73aumMal^y z$(p3FCGT9*Zp(`1;j`vy@8@I~uQ zwYgdCxx6a&bJhjHAj^wLN{n24)h5S=W%SJ!u;j{djcM`%oU)z*Ig&OubA}$V}|eSGsjHZ00dSk|Lg8tZY{|M$41tRB9m8B z1Pj$OClbDoDh3Fz-!JihunwmusR6JgM_G!C0L2WXvg0HPBP-BBI48(&1T>l^Rt5a;;3&$OPYq9JA+Gsxr+RfXu{j{;x>JXC+AcNL zVaX$+;}ZbFb;-|_)Z7&Dh=$hfdF(n}zLT%k$i{Id(=GW6>h`!!JT~6~@pv5``&jZp zPR#RPp3wqYQU>Td(Ke`Dv&!#x2~W6jyw$p`xfD$4t`)2mqNsT~8zR-(*~sdFa?!);$%tfSg1*{mBDoi5DdXZ`)$HN`I~ZhG$_v@7)zn#ur;7(~H1XFq(l4)~eXIJMBe@ z)up!NtDqCeaj@iz?G+X44>m+HYH2Ozy%OH7&3-phNTT7e>HY!`1!eSEhPf0UXaEG<;lo+%H=sS_9(OQ@Vo52hNF|C~JAS~#d%;FC?C+M?wp(Md2j(&A~^fZMP+ zjtUwpO%gH0;yZ23MP6~-RurvUjtMYb`ath}KZMjv8~t>c!MV2#x3~n2p>k*1=3wfB z=4wu5V}R4iwglRmn8X}P*QDhFvFRM7c|wU_)Y9UkB587Xp^fFlJ^{wD-#W2< z)aunypVv^uYvzL2ov!8EI0zM|ZWOAdoDTO&jD|hQg}t>_54=RyMpjlTMBJGDzuCo` zOibRC=9e-igTcJ~>I%JvuoFG4t#y|l_P>7JdUI2kjtIRF3sv)_vbpC2*rRudd#^?Z zBy46@^dED`FZb#8<}jb}IwI*0W=U%fN2I!<(9kQ%lE7cr;j(bA#F))ds)h|9;eD7vE7<(0Q9SYaoSr9s zZ)4Utc5C>LZ&PsJ;;0#x*>&AMhcpW{6hDgLHY`!M$+Bll)L=L{-k`NuA3(-ue#4|M z3tyzd266SlJ!7<|hON)&*S}+2bE2?dv$}|prB)3~%ICyFLowZ4Fh@f(RYQrzr&RVb z2h%@BIhy5dcd~6JzwqS1Nqb{Ic37sIXwaudzlJYhmT&`q?u5d^9jB4XjW1-9h@0;{3r19Zo9;zB`>#AQJ>x#O`9YutR`EjGCRant*{aB+C-Ho z;2r!t_)t1^8p7nCc zun5SR1guikAINXXGb0TNI`5@w$`w$ib(4E{$vf4;iqDoW}}m;Yy6h zKRIZ%m9^lwH}<@XRZ@4Xm}!q;KPVTI>C^9eZ215N$zx$6qEq9c{k)tdJzg~YW?#De z;G%CK3!nYTsJbLZd3^OJO%;S4m68p?)_FhqYn~1RbZYrcdeR}1fEWK<-Ujty&F9DB zn`Qwa1QbruN(;TT)RH%+BFSQ}PPybw2AsZ; zF|3}LfB3%4_j=?(QcB_Aprs$Y4iUy-+mA5=3VxVs!`K(z@M10(UtdIEZ+DH}@NWF` zPnX}j{6H3q)uqfqqHe0&euwGCqOe7c&Y0 zT-2AvxgKGCHLBRb|G7R+=*SgpTP>RFgd_YV*xg|)5`#vo=tb5GBe6ItzwgO(_|uff zdOiwk4|N7ub|>WcH};pXz+lfCErN77Tvi6!ZK3#12`8qPL=21Mtz%tGC3~Mx7X!#)M?#woV6g=!SUDj>)wfsg3-!%!}NJymfE0h?B0oH=TI`KQYVHG;txPs z_qJRqA@-}l%`ePK$-A$T152ZrZM37#1d+mH7a&Y;v}Dfm_iLO*V|!W}6Uc<~;UQyBdzx_2$Sl4n3`r+h~L<0Yooei@I*6DJGpsdH3obc?9S8wL;c8lU!u zGI%0TCPti6oIQZyC_ryym~NEvWH(lyS&tk2&aSGa)}2%uF=C#Yiq$3F^=&DnIm|}A zBPY2fSfInSrHFxQL3%&l5w2Q|{NChEf4=YP!TDaSZwD|Bk#83aRmCC(t0`xRGBBJz zB1BpoE!ueBi0mZ05D=>qY^gq=*hE34Kb9!NoC*8J{*>z;33xH*FJ*IHp9oN4H~ys5 zd;RDBwvfAbSeNq;X`gfpKa4DVChRS3$zeAwS$?6AN4IF`EZR766B%y7iw>0RnNdW^KrW5aOF`>l};&k%Of zq~aRfYzwD|D6T7)E+sM<`rJ@_){wgG9Fy|6h8#2E!Dxl`mU=^Db%piw*IZWXIk;Uh zM@t~c4g016-`?Z2R)HF7qx!7Lb}C^zgB0Tkhvo!}fX&FL@4aOD1IN~}Wp)EMZrknq zu2qy=dCm+2S;{$06f;aL7=!^NjPMK^HPZS$Rj*Z&r6BJxB@x>W;hbWaWf5+QJ*@x6 zIODh6s(Q4)_OZa+=SSJ&mL9wALhjK5TM8?K(X0`YhbEDj#485<-mf_g#hT5n*Jbng?ldPt+((oksi~bks!k0)bCiGDqje7k#0dDTdD`19_hEjnEZ$|<| z=rkgYvu9co=i0H@kf^4_D!~=b^KEh)?NpxGa9?q?MeOMs;U2&Eh|DjFf zBk|%1d*z?r_Y8+QPFwn`p`uN6Z7@u~mhQzTFWO<|)GkaYYC|MTE#>0l zLtAoMj>xFlAZ;H1$d(+;SWd0c$v$-q>E6c|x>s^zaPGMKhwEv-PDR#~@ixxNyLb!3 zVGs!(mG}yUG-^{|xB->I(8TAw`A4O0aKYb8MCa1~8Wec#=70U-7sG^lv|ugNmXhN6 zTipmd6d%wPlyZ5do8;?MTO17aV$KeTo!5slNK`Pp#P<%gMA2vF{Rp29EzQrr8Ss!q zp@*Fo5E3P*)~Gh)L?ao>Y)Blp-xXT!d)v0`!!PxIkQ@h_8M|u*{yVU9k3-&Mw=Ii% z%=0;Eq%|y($FsK08)dqv7v!{Z1gD98W_sZh+23~t;>_*&4!HpPy`=z_w>`@~i|@a0 z@xyPB$D_B+x5ef>?!@eS8O^HtxSQIav@uGSI`kT^#E8Wki6>MWLG{-^FCQ= za+N(I5puJeE_hwJX-F^b$(7vzI;&@Tq_2?u#r4!al))d5RT4oMFPhG6?ngYMvY*7l zhu}No`ah)SdIxjW8fU#Gp3V*xi;`k5zSMO#0xvqX1_3MrkKL|3J7#2btk6ugXvQes zU0r@RL<_5#CQD$PL{9bS()o?7@OYDJE2h2{J$rGh%Xqxt2`NYSM_KkoBbT3OTIAJu3Lgic7ySbNCXX^0{Gu@Xf`X{9#BZWt{ZrbIzLwZwFHm z&N!VV5n#P)y>{2wZ-r$NAU=y481(9&D->XgLwvJ!7|kW6oHQ(djjcW>CdcRUvpp|AMB7DMkJTuat z$zeX9jHDh)$X9h8P2e#P3d;o-Kr)0-CpoyfCTB&O9WC48;XTs%gG;3&j~J7z zX#Q3Z!VXjE$9L6zOhrbcFr4N}IIp~38oh)}{5;&(@PV~c8IWNTmx^MM^zv*5aa5;2 zol{?~ly%kSu5ioq+C-7;M+z}2MK4Jt<2f=WVvrUo-<4|jN`-PTg7bLGN|Bk;1U+)k zV&}>D?;Dj~H;z7_C^U1XJCladjmaJ4VA1$+n2t5PJu=t~C%~K!#@1-4Q-|}o*A18k zBa1!xe&78kOB{8yJA-5=8`k5v@}~BSX<84vFto_rG*zC>PbTRjS`zY4CHN!?2(^;i zh#SW7TEZgFx69xcG$NwB!$WDUwcEvntG3G&bv#0SuanUy(mqW7qU1Hb$Lk`3n1ju{ zJE!u2{C>)oGF6Z7`{HxTy>4T8p2e{|ps&qV^y=NQ5YxV5x*YZ#tz5yQofIG341R2k zgp!pmVGX0Xtws)q#l-JMd?w#+WY#diu7J^(b&dV~VhyA?@H-$QZS+~_l*#Mr)a^@- zlE03)w^1K0-x@mUzZb84J1YsqSl1+TYv3M~d_;Z|7@^fHm5;ZIr8=44NZLqxw7Z^a zlom+%$cg@WTY8MS4y0*pxVL`z;*2jBv zELn=;5FTl!y3V*-j&Gf6aWBTh^Tn)oEeY8Ou?;7RJ|bymswYv|?V%m4Pp7-f!<>S= zsw>^ekhS`W?3+aXuYw}7*(#L;>*Lj#utrC_q`DaIB+E!{QylU#TMUW$xY|4)1VPoV z{tAc1oZ;rz#QBE(sR&^4r>xgF%&u7c5U9luX{{3o`X3hGWgfl=$3F&5x?!7q3*{+? zv9c7rEBPu49Y!^XvkF|!aw$IKIV_PEYim_eFBST#m~|*Ir~atpTX;v87_nLuK+0Fw zL9B9Tc(~C6h13ru`=1j~g-?R3Cs<~(re5r=4ZB1$U;#rc!=l^wSmPPBTsX??xZNJA zg+Zy&#FzJ`N&V5l<1&i9`t(YRJTgKBMjJ(i9r97#t4j@j<58vk0$P(F@y9*$r=4>5 zwp(+(W3<1aeYdC~d*F@D;0}NUG6A>u0`m0*ls+aqxk5At{BE~knR9u^<@5ZTWwOoAbcyUJk$FWE9$3MhWH&i(=?_=% zIJL!zLX@c!8Pp4IgILHo=-n#~9y{ARhL5&m2;Gpo6r4k2?$4r}+z}G7SR=Vb_KA7C z9U*)IA8D~QEcgA>uuNs*={a%SehFet$ExdJdzr(Il*A!u^a-BI6Y4VgJV0v8w2l=} z2r7R1%9N90_`QDuHOeiorxO1dWwH}YkT{&=5Q|FwNgVzY(E55OvDjp+Xk(!> zVW$4W{#X3qnk*1ZVN#(eztmTpp$hwDq`68Azs^e#-l7dBtdd{?mDNVl+T8k_xKTLl zu4Bie_k}tT56@as9y=E>A!T|khW(8It_SQ9;b88op7;3~TrETlpE&iYqIA+Eq7v1i zhPrS&^vOh-<#uX~-3k27pY`ZbfsC?mX1_(XKC8;2)%9_4TJT>RI%s|*FFb-s;F_1}Z^rj8dI%f%u%xCm5jZv~ValYbAM8Yii|Kxf)kS)%*7SAYLIV zF)Ga|Lz=x1GxAkz*IYUjjpKuKgYnnS*`D9!Gw}WD6~3C8bYNvSQ8|8({$_|Ayr!xX z%&5-P922c=M#5*6Z1>>!VXFAE%=*3kZN)bC{n@vLyTgrh;V`c#@i)Z7X|Q77W)_qj zyeg(#vE?^2aWRM!%r^55C*A3& zj5pf5q9d@GmVW`CreevALeNf>)n+K5}Ya$<|pMU;W`_uo_uwJxg(ovWqELP zL@x8GVcA?g%fa0n@1gFCMDXT15WJhXC>pPh7ZSQ7LEhgCN#*3mefuSzF4AJl{a8-Nr>}z}e79VqV$J1g!em`#ikJI@*+X0iKhuH<7_2mzZ#?|) z{CZx0k7oUhm4jHa2QbCDk$ze*tinOQ)}UO2(MI=+PH@NSuBDozN)yIyzM24TVAKP+ z_1mL@FO52XIy9}8dsv9|$M|0+&X*q!I-xKxdt4W2(Q=D(R3G%Xaa}fb2L)RMfWF^g zk5E?zHTQFf#lpD}fL;?U9<7d660Qtn7101Q|1*Zo(8W^#Rf4IVN<5tCNl1$HaC0LH z!VWIO$-Agt>dXbLW(kEgmBQowbQifj3I((&q3j# z$ou9BM7%{BUVd~p7gPMT8zw4ss2?{N*i^n>9e$ux_|fSd6^(tx=O-^4!s(oUw0)Mf z*o6(M#q-Z##<$07yg!w-NC4t9>cHM;Q5Nh zQ3$45J1647?!dDSekJPRR*9HW!I~48Qg+?7$&w?5$Mib6=|dPW)nnhg^PK1?v%+0s zhs|-*%_#xQ33t5zHGaHOF0gGng_d!nw0x-4IzoAQiFx6=osTboZ-hvrxlOqTveZAq zA`awCCVSa~{4x-yi)C|0%N()ImzaQo#o z&i0OKgZw57M*jUE5%g@5_hoiHVicvsRS9mXRvep=VAV>XU zhPSTj``))bsj|bfB-~<;>eAU@OYFh9g|IH3SgKXoMUZX!F;agr#y z>GqN3SeYsILPxxNZEb>G9H+Q~#p>9&Zgk0wMO5Z4@l*#DPzci%dug)mmMu63;+I6l z{SF$km$QqTI0&oO8)ILSZ)*M6*vN%1<-?`(*jZwx>5jRPX^H)k^PxxaT7S;Q+``x{CD`Ku*FV7WW9%ZkIT*iC$@mlFRW7 z*1bDf3AwNwz>V)ln$6%Eq0u~{Sx1X4b!v(;;^ArQS^rRD<0hJ5^D6Eg0J!-|@`-V72{_#5Y!&hg{1sw~%FCA!2<{Ewm; zKWxxD7clHeR_}Hf;;kp49H#G0)3KTZG&k#P^>{r#S&3uCa(5F3Z>kpfh^N{JVQpr5 zzU)nb2zas4c`X>J)_Od_f-T4=+@t(dSyNp>Bk(GFWLKst*%D*=cQ7JPC~$KTwV4_} zLAZ>DoHM^;-0qH6)3YOTAjeYi93H~RunoucqJBbEHk)p1h$Tp2oe6wCoXlx=o&+c3 zaopv9VW9U2CpfAElK9G#aL|F9oCzo2^dS@OZq@{cg*a0aZj#ZMzP+YP$@~ZTkPJ)cgNjn ziu~6GT?Fc-Yurq~_++~+m}-e?)ohZ)?4HQwLfzlzl7Swl5@ zE3a;rAI-x)?u_3#kuTEu$W+7R{QobG)!VjR7vMS(mwV(9;udZ>u>EZd&at|zmFoH2&-Ki%u++Ad>US|$VMJZ$ekE) zRC-MorrNWp(!wB#NswK)@3y-$jb^?d@o1^$cGF}(mu&tSh;B^)Ko;hxQotlGVkwIy zGaM-+BwwP!C=P2E6+k~z{npWz9aMfYX)Q5VH$u8pKt1v`;^3S1A#W zuR6>!%17UrtOwx2YIQd35o@Nxk1uCDG?DynNDrdu6Yn^?x&Z_%tdZ#=*W?n9%B1Y& zxrM-})l~Gt`5+L+W^0zwZ2kxXST|5N=ALQDf55_Resp&p!ln|oPw)`FMK)QEFX8l< z#VH1hY@K+GMDZcWM`Ja6FF zCo3}xD6QAJn^#j2F;ac_tHru8)Ah++SYl(d6n*g++m~11k4Z>O8Fd%Me2zs&%0Yfk z!s~GDK6Tx*xF^!@Ze%s;G)qM4ZBltV+FNUlM~7uRmBwGBmWX@J=-CwQqMp6vd530^ zT(m<~x0jx*vV^iXjsi3A;hQmSipkSlgq@P38PAV!n+KZ|av9Hw*Vory)K~-|x#07D zA0@XJf^qZ?z26N=*_rsc5%lVw00YicAr$eCN;Hc%asmWUKU5WU19Y8OPJqE%6?oD=d4PWLM@4iLYq?G1DBdoQ3uTrg}e8 z=0L0d5$hNTYBF4i<8Jy`8Ai{Xl$$>#&C^|an*1bWz#xSfyGtC*Y_!u_TbB2pYUA7v zVqVxMBM{G_gt71=9FTYLj=1L~qt!%+Jz8Uk?17GYkUxBN9_|L<#9+KP^&=} zyY?Pla7YvARjJYNdjLnKQz>STC%R$77Otyt(YV?lkutq(DUi2143P_F&62C}t>8>C zu60dU-L>x`0wHeZgU5rc8$_b|igT4@WaRTR~Rss7o0GoLcV zVDju;5Qixy^M@ENd#a8&p6qy)@BFz~$|_h>wYJ;Wu|UbMtymL)FvyVD=G7?KFc0%S;HtGosi;Q9V{Y z+RgF~1MDtz!ZA{*1YUvBy|^GAQAr;yrDC)DXv9OtGGp}Evy+BEd60D=yh+O-r3TeD z&rC&niot+#x;J#V^WVuGhK&uNW@ z(%Wdbg4k^SyCIvyTNNX_K;L>{pNO8^54Ue)WLJf3w3Tnqiux!Rf~ve$p0%`jL)KstNhBoRZ{zFmvig*ZrReO69P$etzCquugj z9!KRh8pH3pcef$H`JX2WrexgH_!z7-?A(zcyO93EEBLi^p_Q5=KHUjdnLQ0ts7A3F zXR2g;Vrn#(HHmKw2Le6){8!m6r-8Q5FDh(;XjBf;5*VE{zbNBJXT#ckHs7q{)9uZO z>aOkn5bC1NEBdHNKcyoN2|F_p^2J4k3}p?xz2<2jLqZa{90Enr3fZpa-G;8v z_I3H9-)fo1j5EbrVMv(dy!|WJdq28wObGFnSO$U`ZU~e|^$`CpTA)U@6UKzp}-WS8Xj(crrDj^6?w(!-|?)gCr2u>~UnWuDc7TzTm zyS!;_4&$tBU11u^I^(m!2C*|pOQLN~9om9wf%df(^}>-42b{p^vkJlyLvxZ4s4{O6 zr9ma!jEjWAYchcXtZcPwehqh=ZRVJUAfI1aa-A`PMO1uCn2a>g>Wq7`h9nKpun-`+DGK4Ui6aA3T!_on7nLSI?06H5I8LQasBaVe8Y?J zo;AMevJ}AyR3H^54kj0TJbS0U+|bp|8tpDM(KzVU%Ndj9TBj6~qSxUomH^5ICKW(% zV+=1%^B1Q3+RFGt!+Qt~#GK0%mNf_u2U+$&`+@oI@(0L*3z0y;?aXLrvj=j#LB1%n z@E!R`wJpif{%T`pIK1(@fx0B=c!4)C4KL7dxSG_jTBJW<*?E&*P!L}MrxEmzDH$XPhC~vzmbU)!6qynV3 zubsKkZtGN82|-mEp5Ck;IE`YXCmJmcKj`}NG?Toze}L+D(vOe6djp|j4B-{(z=(eo z;g)!)FiSoa43v`h2o;qZr_GK-b-uJC&gcE*QF&yEX1-4+B?6%Gh$8OqWu}&|H~dK+ z)FKPFFgCG(H?#S6Opq^IZm=j1SB~1)4=GGK;h87e!ff?@9_wQk6@`8kwx`QT<=mD} zo4Ok?lttcxTFgR!bs3+*_0D6f5&feurMXWcK;WNu1c-__-Te_ zu5W{o1jSgfv$0)Gi=#@hsy5A+2so}omxu{LG)0YCx4=_^Sl__tWY?zl2kj5k>ImSg zk&t^cG?n6RXn)-mkNf(`GLdyf`IhosHO;tc&@3XJ-acNjblmd$cdgrrrYrZ6Z4=kY zylW5`C30R9O>qSIcd7SVlC2-$%Uadhd+abwNz+y%VTr#&H17YOn_NVsjT0k;e-oX0 zHocnLlCn4qj`g$n|INb&zQAV>A&;JT?KuP|M~n6GY--RCGe|1*3NBlfsqd^>_cV{? znQ2Qi1FhGA*Bkf7sW;n9=nE3Pe6FqHAX}4V&nE~p)e7}D?)tK4<&$`f0Q`qGo@tuD`nSRQ1hM1b9 zA*w`S*nmF1m7csno|!^*e3ISt;q zY2U$cJwDjn+?vJsSgp2(&ZL;iZ0!6}Ci5UMKl{ zTwcQ{!`{@nPt&7Y4Jg{wJcNM8JNgCM`uO&`1t=PH7 z8=&*Q<0Z`Addgv9d4Pps;enjb;?!ApZKlk|Tf^>8n=zVRNN8Qz>u?Uci+TMJx|ecv zD~X-=S2(gP9EQad5u9E)H-3>>dhX+~`ISPa%3%XP;q;7jicZ=?r}~Hl8E%2^I3HHI49G|@A&Qi~K)-ninw8~cfc9&HbC91wpgN`~_^FYQi@bIMu3>MXyH->MPTvK(DvnH>|=d%|y}G~rq- zPIl`#9oH;#<%jTWilt7j1tP4zZraNQi2iW8t)Mp8&DWo$jRi#RKE@A>97K=?4j^c6 z0<{9=N|LhUOd-OTh2Id53v%=g45zi^naWxIVvk;R5BpZ#lXk_s9J59VkHg<;HMN9#^nS8iZUgdeM_A75DYV$GWy?e+S3iETb&TLJ;3F;*@2!95Kgm zPpyon?@(FMQ&&Od$P#jZT~NGlSbWl?l=N8TL%t$?=Pwq}h~>P~te-OMWmFTE1e>R7 z=&UwF0q({Nb@3h0&Hb&b7Bh3DgfAQp6{k)uQ?s6_ry}pY;q+*sE5_PLjS+_=R&aUq zCqX*X-mDZ0xPs?XmY|&G>ql`z{>K3_`c28%tiF#O>cVHbdM(L^49m4vAw_t!9i7HZ zSjWBBk8=eLV)z`Zwp`JALs!Oqxd|7X@H|&RTU}p12+kjX;5_gem07{dWyzBGup2tY zxTKWJlp3K8wK-9Ds(x2G6Yi~zW@2rG6ybJ;^4SsF8Hj?eK16LioQd%oyC>w$6yInc zE9_Z=aulfL$<|lepJaM*-C_KyUb0l82#c^;nomU9c|14Wgf&>?h!=F5*JLJ~>h4Fl z=!YsSR)F>Vseb2xn&;GE>VTC==O89pGS2CS(ie5K?B!>76W9Awg3%v^6{}mI zbjFoTQQPRW#N+FtwSm2drS_f}W-vM{Se%4Vd}2Jqf@6)fpZejiRQePdlpC&x-57~R zRuf@aZ8m=0)41Df?9q<^TOPUi|>e zcrQhYWPsN~SKK6NJsPXQ*YBL4Vsylp7o!-IPvy+zMJyrP2a4HcJWw}2$iPi5O(pw8 zb|p(-^naDPwu!R8POq;8LZZN;j%(SJ7;h{`tNhWX5sIQQZ*x8LcVdn*Oo}i+2=mu& z{SiPetbV3S`Q4`FH}jA#4gXh8yu$nf+`p{lq^-U~Ew)^@V7QfJY{~Lj zCZ?U%@bP-fbW=zS+#QmdHjN){rO`Q28(}NkYt~aDMR$fZb$?Wc!E3d@nQ3{F0T(b4 znZIuKF?PPEDVV{##_!O}AdK~-A%HN%eA%YnV?T0I@&Uzz5*n|QO}umsrn!YduchJN z&WHBg8o}1oU!OuDVpJ^V~go+huh99 zN93F84sNge6ygiTf8W9W;?BLEEgaxdy$1{+{o$j+aW= zb6#x1U2x(`DaVJG#)VTdC)RFsI3CpOeS3@5sKM(EBPj@wWh!sl6e(Td^y8n9A4%{lwR3Ey@f~&m2D1C=AK*oL-7%B#5ZbN}m|56I%Sv$$wg#F4{ zF8*>{C1!yUy3DnDo=FM(C(|n6VwDdvt!??yr$#%6#p$32;o4Zuu3d&UH@7`#u(^$v zYk0#x!`izw4J`dkM;?@$7!9i9q=Iih>pHGDfUPp}*{{A(5@*qvZF+y(m#`xtd!@*= z`r4c0I?;-^qx|-IAp0H4J`ij&T5522|2FGau*p09@yW4yyxfNjrBEgfo0y-)dOxVK ze+${pZqdlaus!fx`CZNJ)*QmBm~pdHN+1Dj&3cX!Ca9 zGtnXy7M=0Sw>aYt+y`&*BGu8M*xVoS1*RGCSts=K&Jexidt1z{j+hunN zzb%pv2Sk*fS}TJfKf?An^^x}vf86O`!`$VL)7M_F4Wi?Gl?)_S@&8#o-%PZHh%tK` zWWP2Jw+Uy<9zA-LG{Rp=4UdaTj{rJ4>ii9)Cg<`tpP1jr*RAn+ z>TVu&rG+V0aQ)CJOkYt*wq;n^2E60B-G$BKwo|~b7xez}94dHtX5<~4?WQ-|#*@?A ztyC2bn|M@i$4Gv7O^A7Ud48wvh3m%d?sf*5sHZvr2Wl({S~=^JI9NkAGom2R%%Mxf z%-cM1H*vtlpNZ>WKdMK)YSL3mEil!1qX^_!yv2W9Htyi~K)uO^l)N|oW=l~Sp-weM#&6b8W3tEr?Z%G3 z@NL13rtS38NoT6REt?{KxK=jr-u;k?3lT^S$L;O;BVD2q9;)B1ae%tBMYkM>Maxr2 zyiIdSAwTH=V>?s(U@tCQ?tdXRwaFfnC3B-mfHWh6SU+M81G|_ ze6>bYNDyo=BPah6NJ7{WmM&5>)1s7YNaW`iF9|JT(jRpG9$C$9Ave$KWc6l(rbLCM zG&HT1(aJLQ`<6irnkmo8>~42o!bU*_^)6B6G0cnq?Oi&95>IDh2P@GK;;7)@aoWbK zCQ)<~I&k#3ViB&iq=*TwbZjrtBHItep~V+}3O7k&B8-y@cz|p z&O1qb#Gs;ny~OmD0s|aN0g0>-Q$A+X`O5Vp>dGwZQZdLkH^$Nn_@)1Fnl!~z?B4x$VFzoKpJG)`L6u~w?`Uaoq?NlBtQr$Jf$B@ z)E`;$?C&3m6t{zH6tcxy7fufi`pmpZ7hHuk*F$9y8ssAadPVE7D-ah^T~4mKoFKTI zUI$M{xsh;E3&LDWhh>t3mz*t+8%0VkdvKl|y*|Urqb-yW}4py?j&00BFBR`iGj}O*CK;ohPy8 z=aNe$;JLqEGquyQf3UF@&`|O2fcZtyf?om!-|x zpt|E(b%?o|7vjbXB|-PA1?SJ$2Vk&zLl#7>21McWVFixdFKh8lyY|6u-{Y1C;% z;05EnuYdt8Bk}~Wk=eWOK&a_d#D{iccAJ}27&i9o+R*2gxX^pv>`%2*zICi)93R(Y zo%yQ=opTn14D1Rt@jzna_I4#~YDo_9;M@2tx-N~svJ)nz4dOovIkw(vZk5{!9oFY@ zzEG3cG!uSS$D1i{%U^9)bscQYG5YY_zD>zCHy|X$W4D!|G^|lHCjz`bvb7Ez%ujsj z9%GX<`iTD`{`|goA$m~!d-%Wf;PA9{cdzS_a(iyu7FLf>|Cn#`XN2X!!6v(qYqFJ) zC_e(7)TL;@#`PnR7vE9BVSmo?F?FvsRiWJeC`H4JsJNU$h1i(iS)cQYr#nnhGU=ZFZF!zy;6<_wm&rdMxmE)bqq9q`Rm=V znsi|IrkX|^y}OD}B*54*dcKa(0Cxzb0m0`TIU{#(xRqJG+$ZP@WSW?&w0}!@7hFaG zrmD&BP!ar7+rvf0glw8vfDz>DQP|*?+y2BFxaWR9y2>~@viSqeHNe~xC(iHYsnd?Lmy91q=CvrgviM>7jQmncB8TQ~)3JZ_r_wxG%! zT9Zr8X6OsdX_Dyv*eHLw+iFow{vDYxO0&IK%-xqPC}wewMbl@!g$hvh^F>Gjfoi57Q)aoz`tc9OnN+LAO;$Ux;WUTtgt0N(hy+`WN z!&uI%%X$bRf!?)u>V?T5#l*tndK{;ONB_7%Wfcyf7l}j8-o6L0jJ7!Iop!c zvPKlxCLD%X(0U4*x>H~^mx|^88u(z3j#^!-sr)iYJcR^S+V0N*p7!gN@qAJLbZ+R$J!1xhC+A9nCGl7^8MudVqOwN+zDY1y=i}KEXg9dN28^C7Mteb2gOPHlZR(~ z;clt!Qf&ZK*k_Ni&^|rw{}-sy5UA|W{uvOX@YI7qy;Y$8;T%*-&mOw5>DlddqPeLb zcA_<2_Z|wYuXn6$hj!BNT&*1y2Z7Z<(!1D}V`X^Jtk=!A?VfZT*Q=so-{v2KcP}CJ zVTB+CKDP@ccrgfSpM+TPpMP;4U^Ar3X=Z)?aFO&xEif%7?mrW%A-NDyr?WCy*K#@7 zkPw#Z=w%I|t;Q?vc)R;%`rz^>6tOsd#GL3Oqj6R%&CXTM+b5r5X6Owl%z`2(&ACYL zUBRLaU|GOg8^7TW_K4b(2>9p~Zo7a~ad>n%IpHbjlmUGDyP8#B#6nj8uXMr|&S1%} zfrWQ_?=`fsg&*8>f&BoO>md2VmZ%yCh0i4FXEEFyPIoGKSlmSEW?I&Xi^z)YE{WwQ z4k_9TJMC6WPkSXvGdlnOmZX3>u-i_7^U=OLX~d9m$Lzbvym5+k(!y}Mv+wU0!^vJy zHGc&GkN6Shs5ev7UoTeQ+XGO3qyp@bhIO)oxaCJkc8Jk{PJ%dd_lhbA=i1;>m&Mm% z5wX>?t1Kx}+N`gUJ*|DMF|HPNrzM2b`O#ZgX@qylBl<*p6JGT{Q4UbA=l@=JKz{d6 z6m@Zwf(MEkFquEmK(1f2#KdxhwXgH50!|UjXWd(*<4jn)8a0~x?a)X&@ zD$8rpEBGgFd1ff3Omum(I5-3&I5kNZaa}TrI^Dn4WAf5^h#~Ke8$X)t`#BG*0U1`W z5Am6#|F`;p5MCAm7VG#FdzAhdb-}3Np3YbD3WA_wXi3njtmGyRdn|zu5$<-aO@us% zFkFo8{43P(f2}Y_y6BsL@0_a5V>j_0pZxy$?RWBH5pvqmIB%+p3theXi9qe+A^|O# zf8|d^odO_NT^%{d6T1 zq-#C@Hy+$g_y7c$3o8zDv_TKY%}y50Dq_G9uX7$?loUUg$#j1RUp?nQZRGtk;>1^l zkszaDAZPTJd~|7#PkkRFVOO!UY`{yS_AtD$X=Af)ED}nHG#jE5<7&Fr%fw*t%BY&> z(FMUar0Q@c52isM^5T?Zip=~kHB@T>0a2tMAfAlnbL`DnWzw*vcu_Qixj&DJA{+Na zWPKy7;EFtd3<@f=amy?0f7P};@I&(HA90=gYdG(qDB6^lmk0&z7W%?+ubHoJpeFy| zGm%}8#Mg`6r*PF;AFrXDL{;tOz#WDXubXh8@sz*Wy^DOK?rZ$x<8Xk@kU~Lfq3%M8|b3{ z`S(zPl3u)*(tj?e96*o%^+xYPnm%K8#*p+5_G1L69(^*ta*{EFYgtV0Q4`Hnc&XzMAUFqoYm$h3~BDRJ{@whv}t+1_0WJnpN{^0WLfX0w2!EocH9; z)fzT0stL_iP(k#-)_z~a_k9#jQ+@rt$b459tE=q5{zYgp*q2uFP}91zL;;ZKXZ`t& zFO=)WYu$T@4blHUu%UVg+Ou94W}#{5B=2 zsmZrq0>>Sf*3iAQ=8UrS4AqiESt;Ce?VdUdsXmn|I`^cZ#}!6=1jXboKmv*jSpo9hkcA^U|`aCbL9}v zusxP+sOZG+bIW6qBH>flIi9ug+LUs&yvI#Bxu7|2YA=_h+;;xjZGrNDTkWhALv*&Y zt#8Hsz4ty+#Q7rw&Y;2vTteMvV_S6Lyq>|s=lX!TD%bCz#R-z(A*RQlrQZPSG(pVx zNuJ&1Y>f=+C_Q!l-V0psP)2*5AAUIXX{F}OpYc(&-9Z=p!}!@hYw)eMYN(6#6IUi0jm>Q*-rN1UI*Y_wfNHwMyY!VC3+5BBl#$M@cSD0K{-L_ZcIoj7)cy1eMXcnTImN z-SwAdx(@brr@sRKSuB@NUNPN*9mKPv&qh~=x%9?HS~j+~*`3!{R^Fq3G)>Y^QDwOM ztyg?T*kXl(=2@BHiykBKWvPOV43nyQTlRNLYOAJjhoiYdO6aQaqGz3jQalXAKa zJBN8MpEzc)7>a$pbDK!t>p3RgNkO;QAt%yxU^Z3E>Ui)0jRW@=5T$_*Y)@53e@lnm zWxuxsJ`7+u&aIzf}hLHd+QR~7~?%|qN7 zfs3p$lrh}_j~G6;62NcNL~oK1cb`1gD3;s#O7tTj0ef?rP7!_tO6jiM+!(J)I62uo zJ#nsbTlEH0)DysCA)sT9nlGMhPOWgz;p++W{vrkM+6-gXx^O1`m>miz6Y`$~$$v8; zH=%U>rS;^R{=_N!aJQ{%zuT7p0S3ln^~cA^Oy$6~6+VU|{ZWRqYeQzJ?+DHs%y0XG zwzrY$7t^KzLR3RD^DHB9Abkwm2i+=)8Xb1D&vwD~CZa{f%Q>=Tr+2l}k{h^;J7PU@ zwfc#wZEbAWaMxvPtB!;=VpE&6@l1foIKGazksPT7v#_ z*ZikKdW0i#j{I@y>WlA{!d>+O6sHGooSyKX*tS>+{dP`MI^ni-=}J?1S+(h! zyUzcj`@_LjpRPa7D_cUGvxy|U4TCj==s@SyXd3k*lTS5*`qWXq5}lN^waZ?V-@`mk zLA~*o5SUYo6-pzoT;!9jYgi~O4xxSNM6Dwvl;2rttR>*~N>@-J>v4JG? zcyj(0C1huBK_=t$YAS_NR>W+w{QcfltpicvpHjz{Ibh8EnDI&eH=h9yH7oP|$uI-^ z{XdBq@I7puXu(KdVq-M*2o5mub^4WRB{7JiBFyhNKww82e$Y@octqh)`JeMQMcC)v(mRK$wRV+|sbva!Da`W2Y zv?}vpYc>Fv&H2VH#Fm>DCeE)d>u3Dy5lAK5bW|=6PPW$jC^UzP{eG*S{x8~1d!cMQqQ>6Bq;)VzM)~O}sCEA}JzK2O@m;@5prbJYYaNIBQxLPAS;mB}cA#`%{ekAQsxRtgcMc=29#t3U`k>VXDbANS(={UCjGrhPswOTav8cqfE# z)A4QTjh*jHpK`w@J}mhv$pebX+XTl!KV-s)KMs}ISGK~fgMMT|_tkBdXpXGUNL$YY zov|JiP8kI*f#ytJf_ss9@iNskKg&dmf&vaqb~_p)m9NSD9;g0{Vczbl-81-^ zW_L@#oGLahZg>4?&*E(WVLJom;b(e`kbAC@DPO^Grc^{wU|%sQ@0Q+7q=&U1=pVbb z>-Gk}fL-*3>j|90Js9ArRF zuCZx7pP9nNShE&@y4SVd@jsO`!vrpC)gAMHy|?rC2vCNe>^ug$H0A%3p$A^?i9OhL z(sZ{oS;e?`ceKEbm!J&`JLK%?=bNw{5`E=HMG@WN*9IIfe&3TOA3A7a09$`beEj)= zm%mXy(o9v#r;9nJuddiQxQ~BP4MsRzY9#pl7b&@kt0O(p9O%Tyrf%G4TEo4mnEw2Q z%KqEy8uzWYTmq(FjwIrqp6nw}{n)H4`Zyfey0g-c3&tjuJAl!U_{FP~eCI*C7J96Z z>rJt1uhoobwA_EH#wi}mhk?YeQ{pZenSIAvuGMZUa$nPX4aSKmhCmH>c@mXxP|&ko zK$S?&nu8TY&Y#h!oDHBaozMk=rsU7LJ{6*hLJ@=qQ;mW7ZtJ}@IBO#no**iFwo*PM zY0Gu}!$NN=xf{LK(EFCDH|&KLtKD$!hm`D^WvfAQQN=@F`kBrPd5|b5j9;2g&k!1D z>w#Baz{d0{!NgdMF2@JA!4`d+hsESG>(`fR<&z~I>@2if1x@-yvT9Ef|CA>m zTm`)>hVaL=cDcMD=wbNT_~>7U3%sXdh1|W(Dchw>35O~rxKQQ;Ez3L{3M#0sfir3` z7Akjd`Guvn6_?BIS+qQr0>Q=j= zfOg7rF^-pG5Bj25M@c{*m!fb$qNYz@DHu68k9t)El%@=&Bkp-OWP#Yr%P@>ktI!nX zS!GjK1RlhcBf#{jX{Vb;klV6Aj+t=vcdtr^_S2_@&rfcruz{T=Ua&_{+AY0$ zfSIJ5fC)trDtX#(b?)j?QaG1qGHbJ;n5i;94i3O)35c(Ln~~tWYZAy9ai%1Ub&G=l zc$7Da6tq1@D@*lSKZkNfM0h<(V|noVi}0C>;p<{uap<3bhRYt^oF7~5zRCkeHwS7lvxeL9>@pz%F?gwTyi4&Kl8Qoiae zuq{}CdbQKXX9veax`8jIW{?jk#hwQUfGLOf!)1Bd_S%(L;+fs!mQU1ccDQpiE9#zA zA8g|3&n&!C z;8BF#wVn#mmM;B&?Zy%cB@9B%zw!Q4aX@_mWaur2eyIX<)$^T-#@Rc8-xmmymNCq8 z{?l(+T})VZ1++@ZSs}Na$QgK1@H#%t&%lVN*nYe>GoR8~4Vg6v6?fYeI|a=edCZ4l zl}4>8T%~Fg)1IIS}OOs9$u?e2ay*0=*dKW97tPlA?1Uj>~}l zak>ooN~t0%WHg@*ZB%*{E1vn6o7th;pdirjR6o4P{|91@NW5w_p0gz zm>yLL!uwqvu5&6)MnmyBA+t`P%OQGpPsLT;e@*WNnQVuQttT z&gQY|fA|K11*QZ3Res8ceqr^~O3&wAZI)NB7yIkiU69 zQD~E{q@OX>=dBSAkmI#~eP?s30X&$7aS}MmwLX?Ex+y9F(M~u=9uEwZ67`f)XiB9- zScv*#xUxQ5mO&B9Kux+*CH33=hQbt--eSWR0oJ236{l}7(5%>5Bkx+}awu2LqO_$( zvtkcIXVdArQ(^1%F|y+8yD3smlljy~%0o7_suFJ>xweg{&Ydo7iW#;=HY|*SoONf- zOF`xQF1THi(#IOVxo%HgVS#nPE-$*f zgMmmsdz1IZ9SB9eG1nXy5Ks(n@Y$`UTPPK94vMl3+nZ>S11l~StmCax_a^hDdMzo& zoPt`g^X~0;$qnvk3Q`nJ4-xKckmDSXJMY7j+qs@uACpPnlA-WK68$c@HzqqoB5pOE zq51Nx|FM}?YL8|~5TDtyX&)b>#RCx>Btu@sy${t=(#;+r4@9@zUEBwJMzn*TeqX<5 zUoo)0xj|WRbW$^Ycyh`%zRwFDP=};YhDzZ9hd>FQj6GJ^`6ld@@|(D`^TuzG~u&Z>*d3# z=|ZXXzE=EzTr929Drr1AZ42Z{63&Op50_Y(k8F|%Tu%M_)+Ju@FGac6sM@U7xK`#C z*efX|;a4Tx-?Xt??U!ZN{Gz0>{=9XSZKIt^N9W6?E8{Z6U=|t1@ixj70t8oGdxH+s3CchgIZdN<24m)@NWV3gYAw=Y5mSsg75AXSbIt_Q<|2 zOiCpbXLS;i&q*N8H6W%2Z)~9UbP0KlbXLG13cxMVjK@%&= zmB((0V|f%Qn#(LR=Lubje!Xe+etR|>3u>lPitm`)zHK^TA`fRH3Lw7-Jv_pWIm!j) zT^RN3tw!v=C@0$cDSD2T9YyxzoO8s*3&jo+YUkUrXm*23MiHq?2b|*CDmx8bNf84( zQJ65%vXeq*aWT^<&-et!3R|Y`$dZqFZPB5|C1Odexw~p3xYbD>>ouIpe_wy`ExBVm zx?_Z$%kkteY+dV2)y=J@$@LcDfV%w&EO@EU*`tSd+@)GMjkqdPZ*4I{CHvD_ymjq0 zU4i784;8jToy++``RvA9?Unn1>6wGNPAg5}!AqHR@)_*t6u#H&zUF2BJS<0CIRjF)*uX@DV$@cqv!msY_4#BlwnWn>~ zDW^J15-1IX)v(s$=-o}Z;FlbhW17K&o5@-q4{~yzd|iv)%Kjd6IPN&G&iQ<0CQOLS z;%Q2)@NVMcC#37ICKge#um(EP&r}T><0|{JTWoL3Nt`akr(UajLeDd+~Hk#THxat=hl`4#!+pA|L`+4NCzt_i7^4(tbtS${^E<2X} z2D3{$`GoPkr7ub1ympk7x`^*vWw<#Vx$vvxoi)Fe-w{AAu+R)l2ZSOR;Gn{E_FJHTDZxkK1v>ia;mPI2gRQ7#oZhETT zr#w8GYalf|zvy)PmGbM6wCWiHsok9?SSG{$fvO)}Y=1O1E}psVG2#=%_k{LmJ|fL5_Xqa2?F&V`C_cE)QgGj_+I)zRrtc^qA;8a1 zFX=+r2ILj6mpcpwwS z+!u-y5)*TL@H|~cx@UX=m_guFVl&=+|Hrplr286Jx7Y0B3y+3M3#8?Ut zCc49-)N%_PoM33Hqs2YwU1gTz8mxsk+J|2+F0H5GmZ2r6%}LA1^cSW`<-A+=myz#F zeDL&5H0!jW%Yp}JwP1J>D3BzRJTMT0ebklm*6yIwO=2KB|MQQz+4f-$3xwZIL1e0$ znprkQ#eK;VG3dL_OT99wq{3-*HCEgiUwq#-_uavf-YnWe@3FjrIe z^Y0SD{iBu@d!Xvv@|%nvnGffsGzKO-PBN?F9m?4T&vEz|W+T_N`|QJqUb)avB_r_6 zZ{~?qcfs5rpXXo$vg=u(3Eg5oit~7$9qU{N&g&cu@SBb$t3<7)rrjR!@4abc_?bRQ zCM=rUa&`|Beyq*Y7D6S$&dQ}Ra$y9PYtpO?5r4t8(((n_m5n;rNE9L+x6j|4zZ9r| z=Tzs$a`*!BC^N#lBQ2c3KR5OmBcDzUp1?iWPFU-P*^R51bc*=~^;^214WMfq?6+W@ z?e4?&VvLqL3$x>0U`D~eYPa{?Iks2Ta^}zYx7qV`1oZdSUtob#76o$^D*W=xM|L`*^c!*oP;9nNk%Z7t@(&Bw_9jq-w z6`=;bljJjDyu0SO>wa!Wb=ujK+R}ifRVs{9M3R$f%SP zv3x5%S*iX40=`GXPQ*&8vNzOAz-7KfPYmQDpD1&Q69Qf*C1Af{6|zI z@W4~pgeHq5bmGAiz&*kxix`*e{vL419{k0bHTVVmJZpTK44wSq#B_HKUQ_;$%%ZY_ zTT_-!$Pr!%%SH+Ky)H(N5V!(%6+{X9)myw5m-_~-Qe7>``h1m00pQH0heN9qnx64+ zt_34F@A0m#0-?-H0xYs1=jQYuN%Rs2M?#ox) zG%!7F-o`)(f!IvoOfp6zNQrsRV(P_SoOF&@3hW$c#dfo|xG$grrze6Vh_Cc-5kS8M zk3(KS)Wf-ja0xUm1T;bH;yF_uXrEgrqzmg*JzmB|_8(^DBm|FY{6!eK_{V0Q5&$N# zw1z=i$qPIp)pa8WpZZ*(i6njn(v*LXu|?o~;RujAH85IvorvI?xaQFK=86{}xceYX z+iA$md0rhw@&ikT(?IowjvysMAD-Uy^*R4q0EWf7{-^bLln5woPbVI z;Mb<5iE=1cM>9+T)2LK~=Q~0d&jVlAX_H7oiXPzJ4p>}WZM9F1=Zi#ocg`b&t0KrS zk)g@=h*#HuE}sR_W$9~C+ZF3#XHY>`zKE0lqfWrJmD#~}edZ6MFwgH(hjy->C@$4g zd2WZtUzh=hGy}&02OUSTK1$)N~DEc~+CRUhmvzew?eRFLdG}1Nqd@0Y?x3O2X%h2fE)AfKA4y7I_Q z3FdUf7j(YD^+U*WA^N)nbNmfEkFn#>wV9T11}3IgB=)afeK8%#d%5g_P{K;ueo`2A zGU0#&kI8e>y#{^eL{ZhFMk+z{&6_xk66B?M?LQpyjXuOBrzYt3);$sO=5eXRC*%M` zrt0xQ0PCLjk+I$y#{AK<)g{YZ6x7a+GEzB@nuGF?aW%Ze{ds%go&PXe(p|vsH`U^J z|Ejs008Al9Mp;KkN2B&c;9{6|CVFBS6Y|JUPpD&%THSS3D~+U^`-qDp}O@8@)Y@C#+rGMwPJ!nM==@Y4}VfG9*^%dg|sBjdSUr&qlI zgd>DTql7@0Fa*?EVRgndF&-V6BJ7+VXDR6ZN2k04N<+-LFY4T$LCc;HgV#k_P>AKR z9_s>+Fx17t!4Znp*W(EEHXyUAOWGl8aYH>m0jR!T<9i?df`LV45~=b#C%>xVBg)EU|%`>vYw3ysP8_$ za?UIEmE(3Jg!5|wAAd%ulfOS#0P&clgYFMjOHI1Ayz=(RBuGNdObCjpQcL!95f74Gm%D8j>l6;`0i+?l+z`jQZP|`=Sk$-HvXD7HIx#sdObFc=_ zcaR12BbdK5q8?;Gj#d>v-?(4`0pRG_4+6{L5nqY<2UCzb-N*$-XB_s|k;b-xB+#NI z>OLfVkb{$-$%AwGoByt1;sIpC&A3r8S86Ga2$(f>I(Nr(%*({=FggMD7fRH0H$N56 zR5^9+a;(Bg7+lB*|1voAZWg$!EDC<}e^WIKP)1MmX)MI`J|6%^Lo_XDFI+qk5y)ls zGt$F8=fysC=;n<0@J?@~r+s`9$0UY^hL*-ET;I%xM1J^;uj{Thxm)0GGz3v_wg?lV_qln`Xasm63orw#%N9}a zs3CU8)ib@43UEE7OaLu~HjnSQ;{?euQWiKOqD^#eU6iZAhb&!0$qNY=$$%n)5`5#D z^Glo(oco?`knibLjo_-Up9G!}FjXp;;Bu^#t6nqWbPOZ0>@Mlav*n|sO8x}eVJ6mJ z%Dew_e+p~_h;VwLQB+rpn@|9YJMB$_lA#pkU~wn|POO?2i_7u`@GKR)YlDYKOA!K? zi(j?L;_Z^E=UC6}(>@mWBuCrlS_)^dHKJ&@dF@B(@yvG>og|%F46Lm;fkfGw-D78e za(HN*(?m_3a+8QCv)H(UK~V6QQ_l0iXw%Muq}_F(tX4-V#AIaT@kvOQ`&NPeOaws| zJ@cxM<`;8%c1+9*U@oYGY5~CgR_*rw_9eGrqw;_w@MU+dqkyBWA)(LgKkEUy{&q^I+C<$AgLJ)GP6$uYM&<63c?CbkLJu;J6IG0i8*_#rLeT^ zb~)rLPkq_mn*2U(8c*mRwyf3YuHj{^QL1t~%&xQ>-R1s7w2+@X9BT2GpFy?@A`7EO zL*`#v%>rp;8TVrc%l1?RE+BapV@K7+Z!-A6B2|nPI?m5dqJ)y)?NIXjbmYW#KYzWK zuK2kI)(9;8tcQHPx?^z;c3P4!0=lU+8;wx)O& zO*I|6z+qeWgVF1*8)N*pE$1l3-Y)fVkI{;g6DO=iUWOImo^@wP@h($nf`>DBr4A$P z2EJ<*#=guiRleW=F~Ohy;~>I$ZUf6;?0qgp4nPrgjGapfcyR}-+b`oeIkzJ|(NJun zCJZsQXbD1f)X|oatljc2ZcR*P&63`*R9)Ve;kANWAv%tWy8Q+NI`(n5-Fk6m=(Rn^ zlw(*J7#^Q_B62Qv|NIaWlU-r^{6&v%V1LErB%C_ zA|S9Uuk>dPd_Y;Km$ z&)Xc3nalmo&fcB7M$GXZ!~fm$Ijj+|!}^o-d)gl(BWGAlv&pxcc1p}@)bV%Is7s9 zzUdE|-?jD$yPIwM!jI2R5p&!lFgs&{Q7~`kaGxFJ$wiKl8n?%GeQ233u|2|Ep3cs% zeiSS;hdOR-XqaKyk=I{4P13NOGPK?M40y8X~VY4qSwhpQz*DlpvV8yvCZ7(}^E* zo|nfbAYcHQh9~(|{cHEyhsTn@9WoWlm8JG(v6ya=2Vznt@% z_Bp>*0De0|+#k;_({UdyDxbG^@9bEMv;c*QHEm*U)AHT*AD{0qpP8!2Y{CN%BesJe z!*>4OBQ5jA2-%%*a{jC)`=xFPTVZXz6X&6B7Q10);i6O5pSi+a)%&CCucybYk6yx_ zc}^d{k^Ph@9}{pebD`YNt&2!AnlcbC)Jex;MaZC5vguzbJCGd#@%OUq^8dr#dq*|3 zz3rkD6+0>-DgwIQfTDnc6hVrjvIQwhlP(}2M2JW)q3BkWsv;mYDAG$rdP^)oM0!hr z06|)S&`BsEBxkOm?(KKKF~0k|He$Ou}KKyWX^UNO3i=63snDtP@&VM5Gv$}4#%M^Gjh@u{IMN{o^Zs< z{1Gxya-FZDtx^#-V|7h=*E#yH4XEx|2p;!I$hK7dRsnL2x&P|6K#tS!xc7f@um*V;gs>Do}H>$RWXr!tFhU_IacOlIB@4c_t1t0hO+YtBcj>IOYloGmazpPf}MCe_>8gY0onU zR{yW>-oqeQsF=Pq_ULEMp!tMpqz~uR-YigIj7`-0$;q3{G7FZ|Zw_ zl_mQxxKz*espMhR!w;pxGWzci_?DYjE&5Qcl6hiv2l=iyPEp>RyfYJG>&b%I@P`T6r9;ZC4R=FwLXxf&ODD1N=o*KiTf9LI({ zz3-nhKjhyOx*7QQlrV~0()p+i+S!5-c<0x{Hr~N;EzfvPD!hnyOfo@PAY|Y@-=>|K z%n2v^?Sadd2@BOT^E*zaeE@s5?w+JfbZGo__wDQMTXABG!Q2A6jv60KN-F-krj=kE z%Zua_5<)$3nHSbdOG^TO3YqLep=xUWBy*2|hTTaSnJf0qBiRt1i&j1?lPL zPx2d$g`n-*A&h4_c{^qIUa?k9ux}8q`$YtD6f9-NSsU!+vh+-eGNWIJ@eVoQAKbBY z5>#1u~a z0ePW2c|fyzZAr4@eH9zR!XiKBw8`zq#pc{RuLWhT!-n`}@;hn!_wT=3|F*@JV<2B0 zoU)Ot>gCPu!-KY58vVo8Ht7Lu_-)B_XDRb&SJtrLCb?ttAhzPi!e`)C&Kv`=I|aV) zKP}n6vP!$xlU*z!D0K-S2in7{)}JyI9;`gyvRlx)`t^5mbc05+oa;2MI)mym?_KS) zthby@V!W?*_~MgiUZ^1wI1n&ci;I8l@=E!7bQx%d}7eZ9$@1=hc;Z}zj__9)$ok#$tmIqvc{56X4`T~(d#rA z*%O=*Xp$!K?3p%S`Id2C%2d{}N@8C8_wP9GvCT`d+DB43*Op9$ETx^|1y>1CI80M$ zzm~hNYmMx^6mFIHRKq;e*dn-p#zKQ7{kpNSv4)05yJUcS1j&^p++X|Rb9pp)oVj{Qa^MKK=Ca|m=}S`>tp(J$g<;aR!4 zQ3`%@fe&0=yUJnQPW@G%e9*buDaIMv6CLG+AThKtsD<|y#aY|%i=AK1H0bXOlLUD~ zRE9-bybMfXmu*k1lz_`H(O^JC-v*!hPc z6)$c$67muCk<_H$M)kH5JVm2pB;R2>}tiff)$%q0T=HO+GX zDZJ=6@4`eZPh#L`XT)nzTNC^rRQgaSQX3nuSI_sJGFcjp#krbSdUi~mU*sH1zH@X# zeOcjs)5E}RZKtkRkdhAtok^6!0-)}8?318hAk(Ll;2n8<_zlRXG4^DJuvNzyx%^da zQB5;^-d8MDk-=8I?OMX~@@x)&59VR7d%l8ldRtEm?`V}XTz zE6%}mq=%DpZ&z+hz}D8rn!#um;3x&&`;y9LoD?VxARW9*B&*<*Jedh`A!{s8ipMc_c9 z{n&ZCzCVRUWZb3JoV$#0Wyk+;G$!xmCwz2J!l6u4caaTE->luR!N!&wCUa0uVj~-- zcnfF7`gdKgrMC}^UE#8uvYWpCm=1D6g<{LTeD7BMEJ<@=qS+!iYr3}@Rw?6Ah&#d( z{u01SO0h2tSr)oy4xlOn$84Z3PQ!o;nApS%bFRfy$qyt_zuM@G$HP#rl__q41NtwJ zQq%|4^OHHYvh?Ku<&64REq9-0dam;DOy#+?;YcNajx*C}`Zw}qR|4PXMdN*}QhcKx zzs&SS@Kk0AjU{ z`g|uX=DK3FU+03*DyP(u?m(clrP>Qor~2`ytn@Y`#!OM)^vfhq-E%s8){0tQsG@I( zJDH<}XG6Fgynw!8lT(3)J@$4D&w2^J)GspPoG;fk)>Pox;(RIvZgKEFwOFnqy!@z& zrKt8h6W(Rb;689#a2_~=?Sz<^Nvw8xd-fl*KKBR}#^BV!0t@}y76*0x_Hzw4?KqTQ z*1OJ4$R|-J`*r$#mn6#lJDyNYD(#}F1>_wZAv#jYAeHtpi3eP|bOtAv;;z|C04Y>i zzYMYis6k5o9Mt}Obx1n|5w=)FhTrtMeEAx)mYrspN4VSl^sKY9w|BV@d@R9`jk$0t zBg_AKn33-TCGY2KW0{I(lUVRbl zGVe(k&u6C@5K(v{(&uxwATlZ{XKpTjk`NG>U$t=9+<(--cHo5lj_s620ATpeHtY9- z&F4v-IhHzV!Qq{VWtBn0S076~kNnoL4KxQaY5L_mfuHpY8#~|=DDkt9qmNOl2Ajv5 zyujhpDB)1$(kHSHDDpD!w(L8vfer%(l$oM`clx{Zrd&r-as)Ui#GyZac0UhKPL`1c zxogW`nu=DBhqYwrjnAu zQsr_y4t+;-jL(=W7Cl&vbxdr%%p;=ZGMVv#od${v#N3Rjp(&4K65xICd^-|w!|Kfd z1G)TuZ)#UI^@=cwmusABXVql^MbDFqv`7^%^$hK#&erd#T@_8`uxexe=8F)}c)}Ej zMe#@5jtKH#&z0WkJz~7$rwSE?{03Wm7K`z@n&7CI6q2Q1d7~ZhNjZ4u56#k&Xkw*j zX9deg|A$Dc1`1Mdrd-v{I7N*1hR140*N1+h-k}DC>j#N{e{4DnHsS}$@Zl0aR`1L} z@rwlxO8Vq_F`hV8Fw2vb6)#6R{x~?;R8sqd`AIZbiuT#7w|mb?`pvmDu8kbe@xhQ4 z_wDBzZ>rq<=ecXA7Z-F$ez~2U`*$65d#2zarg+Eln431MkF|&0NA|zz<7dM*(?VvN zZPX(j`^NVwAQT!ZC`43M`i<0n;%L(a(n(dzHs$j||9CM_I%hz@XnB6)uZ}_{L^&|H zs)L0>?8hDY?ty6FNnGs$?i#GfeuwjL$OUHrKO{338XTre@R(Mj#V<9MkCplKhKGcO ziFt|}A}VjH-0Qb~jFS|UY8+lYG~qzIgX3a}wx=h9`l*sBUW#lNrA+nfDETZ&cz>lA zc-MXq)=>87%t{*fq2^AK1Qgx%?%cUk@7j~0Lw>Y;1YzObL=B0^H+sXE-rkxpSHA6y zufq22+bK&4%z|Xu*{!z+4yAz8M+5y^W|CqI0A!V4HTGn&*%#~@3PMFjZ}IHAPq912 z&zH)Bylg0e@ANBo&_QU8VL{;^Pk~3aIO_sfe^3;UhFb3_g@mB(lxEX~EK$LZ<|M^p zkEUe}MVHS$AABPPLCvY7G-J*|iALv@@A*fbLC*mzL}*__ zB%<5J?lbI8_xhj=01wb*20`IVXFftblAYEggvkJT(5@rrE(<8QFMTyv7;zR$P1rs$ zDlo>4PLRztXfVu^@f*1nnIWX3ETXrKcm(L``Sa&}m`QSZSW{5L-Dn%djC$R?sVC!v z>&uhW6Gt%7{Tj+Xo!N=b12KI^ieY%%)3)L0In4XzoC=rB zZp~BNuaaZZHJ?PZUU5<~8kNius^p-^z;KLN zWu%AIHkY~GT5Bt7q^o;-^Vg=me0-J4Y^l8=N{quwbShaMZx7C;KZBYQF)^F!x_knu~ zks#w-f{+}16mCNo@3ZJpy~+@WQZn{3%{~0<4xz0jXp%53S!_VUl8z2^M z*(I30B%tVB%{`E?`sDgxg>4S*u=0akUqaT&colfb^+DnhkoX9dIJafPjj}zG6yiZ0 zZf$Md-zy&%mz8EIp>J(1?LOO+^JgZ;y+LHUzgkc0Wqg(nk>QNDwYzh72|H`d&V=Pl z^Wp{AndXY6!acq>w~3&mIbS6+4d-9?O{Td!y}v@GY?dycfaxCUxA$d;YVN7tsDguLnCwFM;$AG!;663hnjf*CNKh5AP0Q%?tKv9x}|A@K=cz* zWB~?jKkCl#GKL{MroXOGW92C1&k;{zrY?Nl*?BNtigf>Jc93Qd**j^fM zqU@U`5F^J4aKo>EmuPMEhceTknZ@r%6udrWFF0pMFOT=A11D?<>=uVTc@kR;Ht$ZV zZ_u-_NbU-Qf#PO)bZE2TZAFkygCgmMnP2M5vv=NQI6zShP_b85uExu+E`Z7?rznAq zmU))vRcBF8Ia#WmI*mUCL0muZZ{dzJ2nm3#K#h|-ciw?>Cu|J{xN~lNQLraP2o+5x zMB{ewJlQRjhI8tTF@qU_t$zQw1jfH6JIfO)JZ2rjzZ?N|ld0qh`4{ol1{p}N3H`_F zm*vHSygxniUS^k`-J4pMZM+x*V(+6Bupnc#yOB*vX?hvj;N1g|!=cV>8k0|o1~bXs z*-iZTEW4)r;R*M%4wqMz0e_B1im{sE|7Pq0x0JxDw0Cci*_7mjei{L=BCw1oU>d$k z4!eGa#jW1Z>R>jrLH_;1_Dv>%13TTo!4nETl&3OdA2=vu^Rq^X)I3vJ6VmH%WRbAs z7ccUq?VE2!#)(bYh+dD`)?-BJ{@{>m7{Cm7kK5*wz($|Q)%#XoU+}OSoE3}z+(I&j zQs^cd`<4TDztg=H`~jGO{d;YAXef^ih%C8h&{?`Fpc*C54}A$ke4>{Fx8|s%U+E!G zE9dx*QpEGExw%9zhyPI4-s9M;8^d+l9e@ej3T6xn9!?q}i(xrdU*zMmspro}zzCk> zxj~O4B(~uauv6PxMMY!`?mgPlTVh+-Wn|`81=!n~b>_gYYgJ5b&$(Gewz@FZ$oS~P z^>B!KPIBKF)qlSSqQwpoAj;exbk^&CBUW2i0MoD9^7X_Eh{e_04-Jw6j>#K#(QpGU*b!LgpYm+p}jde7=|HR^#?3C!@kJ2 zV@pPfjkUGRos|<;)US~H6J{TMD zv25Re;J{>OR@66TMDO-@c$AUl_q}^d+#Gt9gsAM*3Nez*+9Q&-UH<|OTE3pWBF(gA`{}O z=9i6(>K1qTk z-_;%C`uaEbqYo%Nv}_A@+OLQBjimY>l#bQmua9-_XQ%Bkb4!PaH{F0pKd|L;JoLn1 zjP3#}bFF;4p%HygHpKQLRSK*I;+v+vlll2TM@ImaB$$4H4f^sN;6Nrfxf}zZk-QuU zgKoVQB0N;US{hl8ONYk$zTbn8srABuiOv7W1qj;+2zxwVN&45F`~3kvEt<;`v;GcX z%w5)pa_;SOa1ebJ|MKnE%)daY`Sx4gMW|8(#f30X)iPXY8%tF;Az7VacBnI)sVd~R zyyf^W1|%fskI*u7lfR7rvoO#yxM|h}LU)RRsuq43fgcJ2coKku%DeGd7KbVr1$E+< zr_pKdKSjY3AAkgW|5@&jhTyJC9<(2XlA_=BWa}o7LeWxgiELpp$oAB*#D4O}_|6{U zpF4jWA>p_Qq@yaa=}eh}C%AYRz&S`+unUQ~16o|7sPnaj-)`@6p)D7k`=9r&Hh^ zFMPxc2>L}ssXWZ8_vcz00Xa>9T@fH1{Qo{l12n|(eB92BaC{N4X_QaIJiEA|4+SiJ z*#1KwYUH-5{rqj{SAzoggVlfD@Hyp|J`6&dTLr^@h4jG^V!0Hbnf^i13xsA(mNsqt z5X)`M6+$AV!K6(&(=Zo0ctKfqvj*wRh zSFGM}+x^ zjqz^v0`IV6F7Jepgnz>?P`LX4a|#i)JLl_>4f*|k7e`k%L3r!-jTtH0V5Qu_vm1-M z@%)=gfUSPC5W4z{jk+lbvR+fEEszIYfogwZ<^;rBgI1X#3jAOmSYGgL0ObN%`$68i z8on(48`KJoO!RR66fqk(fc)RhbU=+X5f_bOVU4OsPe6-XD|PlS=qG}XvKWLK7dQI^ zZw1=1B4RcE&t(7=|2eoa=L6$|`+q*gZ}TdHFd%UBfpo-=b-52zy(M`=KUr)Q0W?HL z0&}%7ziTsOrT_BTrr^;C->%t!pH?m2cjA;PvD&k2Uwmht;o^^I2yR85gHSRu*~*~5 zWUrOoX~evEBS>2H2ghCY`3|U!Jqlz4r5_jlQ_uXvIRiiR8zYc)f4}M4T$-veJB|I( zJB7WZOHSx1WXTUB0|US>Bk?$HmHjc-6G2p&X18Mzs|QWN6YRERO|cRXEJhA%kv9GT zCjHN=|3Xs%V3}b<{hc4mvmIRKV!`#DtcJ+Zr!0lP`9pb*qOTqNEiv8n4ya0?*?}#; z<^?u^5`i{ou6Px4nJXqhc~FlXe!`MwK&XYH8$a}ab*O=8WA-h{2HM&Y&57qz-@g3= zfWxPxq;7)FeMz%$EB}{I1(zTSw}q|BrX_#ia>gx<5B|t@x^z&=@PY{Joo>0 zx+ISS(K4{^w2F{n|KBiO0vxz5Ci{%UhAm~IAl`cr9q~%VXzTxxX%gVL6H&**e$Zxh z_x^h-!SugOC4^F8m_k7j)mgz5%#XPiBLwYNiUD~VUC7sm!9oCFQ$2R<>>fkp0KUG_nqk@?0?^HYw zt_7`sY)P2$JOWOHykfk(8IZca4N3NKf=c*5oR8oU)lT%JfL@ppps4+y8PNc3ao+v= zy%H~ed9n*|7{|UG+H$o+ULMB|3FE~NVQ>z^wx(`4*rTsDiS0`vt<`^I`nMx}V_9xoZ#Es?};i=jHDHfn1-v?@`+`kL#WIm|WNq>z1KmrMQ)KB!|BR==a&7sVHE3brxKit`&(&vSaB?kK z>Q;hnFRD_=)z#fHssV3Ua>~2F!Z81dfzcrCvOF)B|Bo)u9ff$kF8IxxH;VzHF(}!= zOdv7FSASv+QHwT8jb@bly zvwkE21a`uvYtldNeXv;7mXl)P`SIDt8-JInx7zHa(*)q>@SR>&WKefv+3+y_+`45ZJ*? ztuqhKY-)u$O*G)c6UF6I_0*Sc_1Bz(qu#$ijGlS91bK=`D7D!jCv5#yfPkc(La9wNW zmrc9Yo5(kBb$jzY2DRK&kCLXHBfed(GjshYwPKzjcF4>5YRCa4d5hKaCnp04n69GP zO{R6EXt`b%UCH;Oh1Ge+=b0xX~RyZxpa|>)M9nMLRtxO(gUEAagIYFKt3t zn39xJYMk|CR+?k8?+rfQyejtfe4Yu_+KK-Wpzut5{CkO9Qxm&py@#;HHE7q~6pX5> z1Sq)vl`(?VE`ylL&!GueA7|O$1o&^xG%F_q{A%F8%7yJegWCqfRE(Xi!ChO8Zde65 zUcC$(K_1BVziApd{HN+Hb)Vttv!Hz`%bROffx?Zq#5#c&X%bGy168kp+{#ElY;hx& zWxi(NxC2=mTV~WQ3ETgu?^r+iKMyPgs&G=8*4F(ycei)LN%=3BX z^eVx{3|o?`+nXHj*z23Sufoi1Cb{9l%A@vWxS_tEtIVXls{M}U6Ha)Av>CI!%&SeE zWy80-o;m5aYlFRnq`2uRozujVDzin_3GVt)f?s$IQ^&m5zGz7cMYm1RN01oVxiXJ2|3>N~zQ_1P-*k<$5cnfPqH?lq;WF+H)RFhjqBQvWp! z?UvoZv;M2u9@o_(5%x#=vJ+paZ4b>%qiOYKUTb+FxC}G}G4Ip)?3!+rvzi7?A*ssp zq#{PrQ>kg&{4`BW;Y~E5{L16U3MJm<+JXaF52vC+oZC%ynYin%r%zWo;#bnA%~JdH zva*q#+Pesp_h4&>SzaO0V}XH4U~FsK;x{JoE*S!Qu$f@tWnTwwUO)1$3T6`>;!5eX zy8Ys7|NZa5K7BKYuYG{i*^qP+(>}z7J)t6JARtP&FqdFz#XXXcR1FY0rAI{H55DOk z{-9DhziBJA;yZiox!GF_W*3_snkq9gn#b(3nNG|bi|kPiQ_Gz%`c1r;N6FFaG1^9D?`W-zWw zZ>)RRgd0DnQZ-94yMoVMv4*ePtydLq-=Pg#fvXsq5eBpo8MB19T0%PCMFmhP;)Y&myUSsR-48k#EP|6Vf+OJO>3w=@`qF=x zq%x~6w`nyStS}hShZN@e`8B3I9&aJ8qNKD+n!ODPW9tUgeZH_M&SooelaNx@^bWz& zy8e`|T8(k3>#G^kud3W1YYU?ad&$egv0c`!`!h;HIS11@nBU!nat!s~9xfJ2`MTv= z$m|(at%_aZp1}vWN_Z4>3^Nx@*9U=>)-b_3qHnqo1eZiH{`Y4J1ubB_Gw~Xan zAvWlz8k!3jV{{L3VM=Is+ikNY&}88ZM#%OS$5kFVG52L_@_e@)XV<%iGhLYm>cKd_ z>?n0Z(@4+w6;bzOAEWwWb)0y6R;q82`;3~{g#?(=fcvBxj!8auH#g={dQ75ekBsy# zFEZfUAfx=RhYB7+NoPa?&!Z3mQ#9L_6#Vsq|J`Y`(WJhN_lJw!<|PMiT*A9{jc6`U zgDC;`wdLNzDW^zs91!Uj>D(tzT2>kzW}zG=Ez-hjn%LXc)b);hq6^k;m{o~8;f}q@ zn|Q5sX*5%%l#+3jGgO=P*4)0MFj;3{A=;2J{NU7O!~{xI;aWysd0P&9EUPz1PYCBBS5rJdzqk19%X3Q%;cS79 z_anVSY-QdlqaRH>Nipicy{9jS{+R`MKoX99{#9Fu)DMnVJ4<>`oc@ZBjkHJ$dqlh? zgq0P_wOVd;lM?rDd+6=oXNl2s`&`a6?zA#kYWS4?(h`lTOX5-tcOoZ}ikt=> zg=8kAG$i-j8Z!24FDY+-qeH0 zlc&uyYe34voIbClvi2%Q*&|$A(ERw~rBHtr?@LxzGJxnWseg;M!ix)KGegOUI>k|o zfDy=b?L>j!LS}RyNJ<=AS#Q9!ed?0d6g;kK`Gyb~wO92c7g$k5phnb&!+5n1n$-;f zTiZZLTlq9AG5Yn7FNA=Rufcvg7uBx-DCS-jXM-|`!5b;r92D=2c4{r>aPB10Nuu1m z+L@NqaZm}cU?0!g>S*1}` zyl_UuA6;4yb=IBCuXSc84ecrMF8H_-e4ui3YjsaDpic1!5#Cko+LtInsH7<6C3$C1 zyK6g$dQpPM`{I+35zBKscU5yyXQ`HptLFrn300ff3GT*(hXX-C;t?#H?5rw){3=rmxfD0=j0> zTC%S&6FO%_b$k->MgbTDY)O+|3k-?7lInc=JbY1I$8qf;HgC^jYbEi1g?~qIBn)Kl@jBX46 zOiris>D{gBax?iZ+~N;l$I0Us;wueo_tEQ|OY46ZibnuQI(K zTgQ98*RBVd=7Dwh%s(b;$R7gk&988TJGol6;>wpz@nGy3ZZrp6CCVtg`;UVRqnq?^=zHG&ipa*nyI7M+APUw1}a5#M5A_@JpoC%Fr! zBfz_`MqCn#j6cLx;(+DT$+n2HIJ`Fa@a5tKgR`iwbzqe$pR9!vzABXHK$!_be&Sgk zD4J(+^w>zVv?4@9*BUJ#n>y!U>jBTEAhicjQR<#26{+t(+~gLs#mX6$5zaD=y8J$- zY2#z~QZTJDi@25qw)GzRg(QU!rWp~_P|E#HV<2NhT=SLd1K`f^lME026iI*ftGB}M z@2d2T80w|EFGzPysGw7`yhAs+C(ikH={S;HVE0x>PUeKr1Ci)_yLAy0(lm3@CeMG^ zUaE^6%>0gs#2<+Kk_eg&e*hxbAAy0AQ0{ywT6UvGk{5;A*(y<;mteD}kd=N0&DZMdpDR-a-_yCbb+948j|xKEtUec~Qo%09Q~4sB^O_G!Ae zdS#b@iK6Lhd6u?X)?UH1GhMAc5A~wKjwj>s-egZS+xm-Lnnj)yl^O%GI#(WV)?N(F z?6|dBj?KeP?QTUXRH$nTmg{f!&YJY71(`NO)VU!sDmk~sAKye?x7D6{(DP{0Yoa9= zCR?@k-a~vf*u;{qM@6q%MNTJPsM55&aHiV_Z20xK>^$Iv`az9#uFP{ATZFJSFx|bA zMd)(+&j?*9*_;%+6o*Q@!vs%M?rm3Of`xvLW)QNu-ijABb> znVqBbf=|YxhorwLnYdQ0S35(M7U~3kE30de0Hpv)&6doiN4?ck=g9?2HZvznK`xHZ znb{W4wm$Df98Vy@4L-&3NLj5u5$8HlV(WQD&hgE-NM7fATTgNCDGgg;c&0w&Ql>6( zQV1%(*AncBusqYt{hMlw554QQ6WGs7G^56s7$nicDXZ0pT+%c!SH9rW{*>)7jED#3 z=A#VE(Z`R=r;$~S=b94Q+I*^vk^K96&J^@1;mSIW3cCqdCzZM{sT55Y26yG+&+RH3 zMhc>|2*ui#%F4}=z|C(W-Kq}~DB zwM-DgY0(vyYgi3&r-3Xt#1cUeWWPDU8y9mlAO|`uy^XnKre?fDW?P9_{X%2anc}XD zw;+?2nM9xGo#`O3t*jnivfWG*=GHQx1);h!`)^UJsV-OQgv)o?g6uBWbK7_TA;(d9 z@MZ?-zo0CA`PGnDK!H)X)t-!V%Xk5Oef^ZsC+nBM)|lTZGaPpB&re_}J+l?2!zI$H z%zjNwJ4kGJd5OlUjC1OHZ%`Y|hTgut)KrlgN9B0=$+If8#v)a$2pf^Reu+pYZ|Tw; zPb&y8ADr*4KBe5+Tbgd#0`SZ|XGM<(2696_fh}5O!zY{oOpAdaD~QEW-t=!7v#~>C z5X*Uz|8zNzfmW#UNKE232wAb%u}LEkJqKpFlzpOkMCVf`FggCVspJKX{|1qrK@cFoAfA-%(L|U=o1!)5- zna?t#J=NKVZd6`c_u*$#u}lS9Kk|(v;u0;33pvKV*f$!PDXI%EwX0wBh#8}&SM$y( zTLz-9;TzoNC7IJ6>yPu}O8sp&k4MQ#bbKS_$9P-SrRktfxlTtondGCAtG^vCHD{-p zTUO=o5~Aa>$5rU1u3aex_{Etx+ej_F$r<6vbMrcr7pVE$r%@eQ!PHeDU*omKcG1!- zL$~X3c$DuN`?T>=74--^ZTntVDG(1;LA}AP>##9T6?BtD{czcUp_EDmerhO7P5(za zp$4E6&Vg`dvM_fmsi{Tm5DH8|G^!~Ujic0f(+?k>>%eyw&B~XSCjqDy#qdvfJCrc- z_J-W4k}K+^t`WJW@8A|y7H74%#+f~p+-EAY9|U|P{weM7?)lcg?JnsRw5A>0lqFVj zYWq$7%X{{uCET91F#V(b@|8V%Hz~@#KT6;Gr>d&L!?*h`?b>tn*qxX4cgC2Bt}``p z(!#;#$(`%Tr1BP-xN5uw*&K$D?xBB>@}}ijGx`>XN8vR6s-bC7N@YnU%)rcl5@ucd za z0mnLNQgf(82)REMi;-V?OQ`INpsV_b3HzmL+-W8E)qUlRidNq{b6LC+&>R8uG{ zyi`%%LwTvue_2_YIa4+UgMVY@>?n_9TR)spvw722{aYaNe*PE_%!DndamUtHz8FsOlQ_ zy{RI(Nf=pA9|nd&c~nOJpfeJ%-aG7nK(Exlf6_x^S0J|x48&eiviJHSb?opmT}%i) zu%v{HY~v9jRlZyc<=o*+*P>4c3pKBeD1&uDA_iy5CQtcZ(bR-ZExZY}d6c*O)soOI zWleqlEw1q9`WA4tx;a8rNl0`9@*5MznJ~LPa92P^gorX|h^;lpB=RuPV^dx0r{z>J z^d8aSNy%#A-q;I!`0r68y63<4wCxk;)Nz85C(&-6?zQ1WWo0JnkFcbL58b1$U9A^H zP-aeLBQFAKn~2Q6F08fgI%^w+uN4$xUK1krU0cHkke|~rf6gtG`YFiz_8&WI*RIgk z-_P4ZCq1npyw2b9h=GB%0>Wy4m zR}7$al`&f~Mp5;fCo--Rg)0p6!h|CSqE)mIHDAvbtov$5PE?|8S_??pVa#P2`%w(z zZPQYilJZj2F(ZwcP8vVP^_KFxBjU%c+n*@bWV+~C*QYMmgph=qiIw%R$OwVmYd4Xs zg{01DY#B$EEk7yyZW>JLe=uy~J8b^esqdZFSP=grru4fkSKUkOOlX=;pUmw*WxjUw z8Jzx)2f0$l}Gu)L(Yehf_?EqUBjwB}WlXL$1xX{aX{ zSV(PP?9Aee6%rEjg6UJvP9sP#%;=4iA4z<=ezXYkhXGVr&S+z0!bwX3MRj zyEnoPNsE;9cvRBHUs-@9`e~37pbg! znXRrOY;4y)U*x2er>v_m?px5wlRKD6I2p~>&jJAv7) zO+EA-9$VCTCLFn*7i$Z%N; zG9Hh-$P=ZcmwikD+!<1?KIRh#rF{44vU$7N*WCsQ=D&1>At&Gip6pH z`%y7|XIj-}n8VA%$N-x5=>TWOq@+&w?X~WSukgCEdj1-?%FS*b4vMhWi_oomGE)Et zakGmI;9CKSZW=d21$Bzfs}$U~LQ;iQErkfv9Z33JGb-uSFct1)<2SRzQle6Ck)*Q6 za%C#Rx^w)->JrW};L!&{=g$>_u9UV780^@H*iVBD+6Syp^X~_Tz>i3s&<$rp2pd;c zR#^AwajUOMN2(J{VA#K@a^ZXPoTB~ipr`v5d-;@NxS#6u%?Y;3JEN8CFLTm~<4;hS)i+ggS04}Nm_)~MGogEzMKla{C+dF!Vb zEaakp1kqSvTbczf4}~VyQfTmiV!x^0;MY6C+kr^8j!J>g+|f;Gq%5FSYkRyo4My7p>&`S_; zvmOoC`fI01K0Wtk8>O0)a{Ox8Qy~j_RQJ8bCayjyC3YIB<-C$bC1HlQ$B>UX$K}#o z+8nlxN2E?DpB}KfXDwbi3m6btgutz}(XKdduqcniu_EU;m_xyF@s48-gp58Ax z0tTfaJ0$6yB49sBryrl}Iso`+C1oT)77tX#0Ylg!o!tATM<=BvZn|hu!i5$`+F53s z4fgb9`VzwzTh^SbM$#lhcSu$gy~{Iw#)i1Y?XmY?>sJr-Ju1ra`Gz=-K6e5Fy4Qkf z10;&uCxC3oEDr{Bs|4s)ajjA$OSeQI-I^k?bSp^}(D*yO7V1S)b8=3h=bH+LWTZW& zOP!#bAWcaR4~6>gQJg7qO?1JPYW7w%9Z0b(e_(ssoMv`C`9j*e`AV=Y^ACkml?L`G z*KzMKXy~v6k+K^+BKh_{H)x66#ajCXTt<0A*C@`UBvb3g@d$k9;qa*V6`xL~4+3R;;&@tSt7TsEC;h?R^y<-bfQqfMR~Zc`Vm4?uhgmkp_2 z>XjRy#FW;(K5Mh3@3>0U+(WOspXhYF1hXG*WVtw2T2RdUbR%KLBEC7&W z7Ld5!HOJtei*ghgE&Ldyejjx?@Mf!KZf>R!-A38`!cHMbnJzzKZ9~-PM z+Z0}?pF=Fi|=lg-M7M!m6x8i)DRe?X-;KQjwRy~eGHG8TrP$q z)&Oy1NpWXSY2PBlr@z=`yplUqX?Z|Y*<mGz>Y zRg_kolrYZRv$~AQ2pD5t2`sshYJFK#Gxt`(A2<_O9^LY?{OlTEZ6_6PnHsm_mvh{D zE*_XoYSU{5*g26Ur%B+B(9NQMEkhpACMaP{BDA(8*Q`HOOHOEShwQ!-)O?ZGMy`KX zBPb>&rp&>@w5XH`lG+~4x|#!$0{yRsWln79^bTOG4}Bbi?NovoQ?AY7p%V0qert)+ zix-!Xj3NA4tIsuEnqKeO5bEmcN0{GjhVm6>pHguNhqpKGWC&1BH zFtURFD{s7f+B#N=mgp)aZH4b@tIYLF92DQFa-%-~HK)~ifyGbUBqXylN~g{fjqvEB zm6|SK0@{RC!o%hKU63Ra+M_QxA_AOWUmJ_2Q@b+ORF?Z*uGC=dYKhL}=L6O!?2~yE zr>iu@HA3uviXJ#IXf?Vrm3}&$=erGr}>#{(WqS&?XDT6mTR%FA#pgu*KbGe`W%1qtyedUvKInT;+Kdh7YF{7Xt7BRmz%*e)a{e^;mtjp0t12uGmq~M^_?kK3>adnBA7tk>SUO+lP^Ehg@ej} zDiwHm0gyf5=SRj5|BPaLL6qfp!BN1e0UTXC<1-OulRBDja|(T_Y8g84XIkl~u%}Fh z1=VNd+D80W)!SZFR9EFK-ZsD>w&fLCnBKz-2k2~d`sTvFpLNyD52}-cUM|Z?s+M~k z>UhRohbL!dB2pv$ER{Q}3C_MX__4$op;=XS+Ky%wNwnw8`Bju9BUlAHQm?ngY`W#b zqFdE!_}U02Mv-0Fb01w5X%l%~YNl+xgnn|myrWlhmGbv+DZxM3@2g)*OF8kpsQ>*> z-t*s#oOp?f%+JRX1e^o?bMi``&HB$iWG5(-8#{*Dh0Ko-X6Gj9+o;tlSOH)CJ|8l1 zC|HGd?Os8xCas^i7`wfBjd5i@#0dRi|33Ub+0Wfuz!IM%DF{Gl2=siN89=!Z&7l@R zxl)oh%~acaUpNk;jFfo~wD=<^peI^u=4|Mg<`LwGFYsBgpph}JAlEqVf{|*Ra1oJ|4?3eGfeXnC%Lz>!y})@taKOzJbQ6m9cNzMcI=-JeG;gX zo}(ix&u3+*o5sb#?!#O6+=}|Rs98Bg!(70VK3t3Xqhz2&@v56={)C6+Pr>dy`Tnhq zeq#b&z}%c!eQ&4+*KJ!`{Pl+o|1b951RBb>4;Z$UN|7XGZ$*+yvNJ8xW{Hq>(n1JX zhAd-JNh(y5ohkdi@5WS$6vjT;D>3$+VHopXx6ByN`@HY}dCvEK=R4o`b>8-<>_%5?6ent49PRU(3@I~|H+@!u^8Ccmbo`k7N*skH!X2`xM*c9r25 za|D$o&dSKU4*aB1;+tG4pi5Z!@7;|!Hr2#9Hs6aw?Er_b#JyuAo$f|n<#+jbo>`gF zaPhEzPVG)v7$pGJ5v($S>irrt@wP|fc|qw6>iVxwzV>4nRDh)CQe)PS53D^~7BFJU z3q_Jazklvo1)$#{;D~(5Nf=wPw!P%H1GBpPeEoii^xnQlqTrltf>*l|Eci67eI-tq zaCdg53}#jqgI+BVbsm54J~=(Va3uc;A$qL6-Ntz*j}G3>`qA$Gz01sb8<@_dk4|yU zUm7+}%VE`oT=%id%|&Kq$Lw7;OyYtUGeIO;gjp9P=u zYWgs7okgUM=S&!xh19VVr#v-X_*+foxtLD;86zX3L&k-fxQ){mQJ#j2a}CdKe0suZ z-`kA;{5^}A;(_n#5?mO~Gjc*8K7a5j7159#c+E&`$x;*Ibm_2n_n4at{M9)oml{ik z08m>Cd4;Ibfp4PpG8zg1hZvzcR=yy~t%e#XG1kr#PDCO^IyeAv$g zGi5U|oP$g6+6#|%bSrf(=$yhvxitFO1Gf1}8EeFmoQHE8YneymT&HZG2KuWw`{8>Z z{FcF$h;@;ph@X7|RI*}`tzhMRb|HKT5l+VZ^7p15}G|PIZOoHuQZAN5=D#G0{b-`p?ldC#-#E@BurUTXuDJny8;}C|_}m zzjEK+b@bSq!+g8gcAQb1xYw;iaI^{FmD*#9X*4|Q`SFQ^P_O5o@vm)#F#oGk)~ihT zl{ch^7C+-qyxk$>=H_o`7hrh2TRh2HrX=V(_w)PRa=%{>gqB1PTnhPaQFRD8S|q!e zc^ddsLcV?{x@0{kbKt$2sY3lzX*sn|giJill%GVi{pLaGIXj(cPrQuuECKNZ0&1+p ze#y1luM=LN=CSk)?pq(DmSVFCfn(wT0k9A}TO05&^fq>FG`$ZTl0*VL2w8S?gvq@indTQ-8fi6z;tfALvp9Wam}L z&$}lJ#Y%4W0y%Ki{lQ2P#e^S?BX>o*ba0CByIcYea^158}=q9g``QSw)wL{5yMC@FQd@!_i9p1cX6yk~x?Fyh%!Xm8(Ym)`IA75S8b zT&baS3jr@FF8KvH<|?R81l|a_XYp`GtF%U_s}*)B8n}dWLkN7u+cD)s{t7>187o}| z_VZL4SVZiQM12$clAHoi4* z`2+EG@&}EWWek@e);bN;1e2$gPbyHgIj=QkM;`*0eri34;HFo3_hV6AF4wzC!%3-2 zk$xq3BhL< zvS)9@yP`Oz;P&%&dw6oAQz6J6U==5^=pD_I$ZjOqU($!#YA` zQTMza|Gd|YUtePUcjcA@C@$XRpQ|kZS&yD*lO2um>KVkh1AjEtvV&m;j6#lQHR?2( z@eq7r#0UNR;sV<7mWyImrqm9J!RLf16KQKRB;LGwn4ry1qE~5^&ev z9@QcL=Lo#Db=}K;a5NnlBm6W2mPmdht@}jE$X; z(-9LBlZnSj(_j_vrAigdp?G!Bn07c6@4f;n2Cokw5|g%t_HQg8wDWS(ps@%elPZ&d zfJuF`H#kWE`u$7t`!6pgG(vHMzxlxFL4XhAAn#y881fG6nUJ7p{qbOvjOK#d;;@K{ zzfFGUVGRXx>P`jcj{9UR@UNcWJV{*hfpZyiJ<+>6mhMa*Tpy@ix$$`cMD_bZ1?ijPB4`0wt~ z_{wkj9W27heDt??*kE9&9h0n zY10CgAQ;4o!$4K^D+G0C14K|MazHEnVjBYLlWf5s_*x@i$+CMzZKW=XX zf}{4+M}?l^NAum|N@9@Ip@CQgX}JtBtPco;E-x-j6Ntt6zNROu^Pzj?kr#mYPwvlq z2(o(x7d9O-$lf$iO&`!BQwqJl+Et|F>39qO6zbM{9q;< zfY12(ct6VujM1(D`{5Rc{T|Wop}#o29Bxp7!VR1vb?rWX@bc%5bCCXP(Xn3nZaKMZ z;kl7vWhHACAiNh~(%V;K(K7Pggp6a!rMx z?+fUtNt<_KDEO(SUnyke#dQ&2oEo`|v)}+A`q!<{!1ZOzWcCjaU@)MSAnhivT=1l& zQrU)=Q$nQ1JQFoM9;V{>yKTcyT=N$BBk6os8aDb z2huGp3`jEk^R9dL=a*oLj5JI)F4OWJm?kdZt6P5<|i(UrY0tW|UJ zZ4eFj_xJAyaf_CgmYp)T#<_WU(jtSxF~79#|9tt?{K3kps zO}UjQ^a>@V0BI(%o{B}USf(Igqb>E!yxI?9GASu=EI2q>2pr9-As^*t^Zc}#x$ND3 zj9MD&&i}1x(|u21*PMQ;9wB?Qt*tHWQXb{5?Dk6#>=ghNd~h2lU(Xa`SiwF7 z^Cz)Xm?sCum0p@6eY}n(oD0cb45}Adio9fSrlcy2{aVnqr5V^+uY(zwiNR)Ni81!4~Ho{i)8R=$JfYy_P; zunxUnuV&w@x_<|<-0gcRr1=K!_x#kl2GRfe75XpOOrw8UI{HV_=pWGBx_&kdP)GC4 z6)pjb_Qfx*r>zj!rCR{STR$1Ar-IvjI{>$`MgH~APoZ2AaJ$nE93rTkJwB*-SG1a+ zl^sLOoVf-TPR6FR@+>cW^XybQSn=}0k4*D1f)y_>+@S_BnSnQr#NHl55>vidx2$6m zJe1N2(oVq$RFVu2YyUSSm=Zj>6qNkYB)RYrMPMWSEGnYKuz(+F#nSIC*xA|&oYBsD z5bXtc2#w@o_f8_bf9t{t2p^IK1*$YyT%&_U91RxirXubSdLZW8J5pAuyetANW)WB~ zswM~1(Ap2)e(1jq-D**6{x54=lfWd$ilW4ky+&%zC zG7nR~K}{()c>y^taOuHey2aAz)J@B|WE0cOA0M*M9<^zk&K{MV>1$#K=G4~`e;PZ2 zi*jJ?2t|dL&|bD4IBEsQY5&q+yx6iFevt<0pf7GZ3FbFW?%Ges=q$~ikoap8(9lJ@_ty$AY0w5OtACBkuv7UB3$ zqn(T-n}Tk!ss$^NztPUW(azP#)8A+(h#z_VjduQxcK(faGGYEkJ69#+-)QIGL^@Cn z&o_V^9Hs6w8!*b;uZDq))5I?xO}-Y4%ofRrr%O!1 zet3iYJ@$yCKJClYli3=?3|p zgA!IjmQ!1)lLh&bV;~I~{j*BOY2`cEevpP1f19jMch73dq$wJ#U=Fso+Ix9Z*0WL!uPAAbSm!ni~BsZ$d}&}7iTpwg$8_Q;DXAblm4P_a5sIPF7$o<$9J^dhmoTOCSdT$vE#HPUYHT$j zXyznMQ|n=%o1oH==$F1Hs5K$5JTQ97{t>xJnjR!Q7;d)jy_c?Zh`=^t@Ol+P?MGd%=?OxFP#uCd4g%Ze2i19Z5|*GmH;D{w@g zgzSx*@0`|XdfG~FLCP|?H8QL-s_o?cGIO1Q547LFws`;{<-J@sAuHc`Spn-%|6-ft zzfk)$eJJ=}g^lLcSuJw?SZ9qPeep)nbS0Y0NLp{Qp_rb!$|35izA_+6 zrLLna2L-_1n#!U57vG%G*Vk8hG0{_5*LAMC@T1OM+<>Fy;Y=MR{Mm8^ z(@s@*O!7^`)^40XROg4oP7Jwq}#1^r-YPW$a0P}N=NdC)5J4_h|*LKDzb zAv*z_aLBiv4?NII@$E3DfO5toP&s4qN;%^Kt(?&@Yqgv)oS9nAn4K#x27})%9{jol zc3O`8DI(3m{*+OsV-hQ<=4@!!IOx~@hv@^COF3`XzuW!!fJVelaNL1cxZf71be z+379;0Q_r8bDl@Pb1*EIje;V|@;cNR?LUR1pp3Fg+cbXO^XI~6Shpm=XS4#5w0MJ1 z9TiBSC}E?H-oP(rSX75_N;yYzDq#=blFa@1vWQmu*peXg-!JAa(OE>%Z9<5ao|8G zw8!D&-CQB?MuoOeDT`joP`Q+GN(y<`A(%=BgstYNleBU?Fc10@>z1|4m8<9mMpBTt zeg&wwUe6D4vy$m6)TM>rsuybutm1A2N zuX0lX1GOCc_YXw2h-}mzn9BF6DJk<|UMchHC%Z~&zqsrp4t3-e;W~;OSvPkO(&u|` z1?!GT+Xa-5x(^5bhH9sq<7*$%G2NF$pcBuEbY)TnLD05D^ZRAmH|juNM;bWkE>aDI zb?o7^ z)IZ$by7e3(i;@=Lgd+K1@9&A+{=SGPctt5tSeCmgVsJ; zpnsDVbOeZ{Mmnt!U-U9CLe7W=|J!m=^oMoYES0RA>Qu_ceR&Q^xs1-dWUD7G{r)&r zv=terBF67VynMf`K`NeZ=MYQwVGtCU;l8%G0t>HC!2I$(DNSE&N`sh}Jh!Nftg=== zvaj0UsAEg2ncqG15l?B2x03j`_c8&Ohk44-PY!((!0J_!@j0qN@;^D$RG|pT09X&( zYd(HS_R<8|qslIukcUIMr#Qj3EQC1=9j3OP0}?IT@CtSRhu3pp8CZBQ?o_7DlkOsv zQ&a{9=XaJQOwS8WUwAZ$KmN8?etW?f3rX5{L7whTmaow1U^rYS?Omz>6mO_|oWKfl zcV*(JwE%QPl)YQa_HOAmAc6;`ik#h*#WeEZjd$J*X?1{uoWnjL*X(2H1x8UYfk3Mq z_Syp}rw3G8qQYsJidwe!%L)V$(S*5umxmP%vNS*_%(Qm5F@Ec zDprYZLK~)mm0YlgMoO>1jwX@brk(VQrTsXlhwcDpbHJ8@gH3i6arL+>mv02ao9G-- zk-lBT{+Y~))>Jy(MlD%h#uFUOS)4X!;Uqw&+8TcJh^MUsZM*L47Jn=t`MoaJJ#Y!K zFqH_e6$uDhX&Vcx;BEJ-0E`gT@0Y#GiL;?!2RIks2%J8;q94u!KOxY;YK_%T`w<}y zApwl;*q0h2XRT+@Bf(ymaNAK`;2{2OA(HXG$2Q$WmNBg8Lh?9#rhDqVy`jhcQ}fng zH4Xw#+j-FNz`_h#02E9W4eb_~TRn$6B&}SYgT`}7 zWu!8`tA{N*aqcFB*^`WooNnKraf5C-$9(}01YU|YqXK3S3Tm2~pHiVsgZ@Ix0P=o5Xaq`H zY>$h$4%b*Ha@PT&(O`wWqg2#FBfYN!n9$omWn7pxK!0omFFiBTBnPY$a?c3s4K<#W zi5$X0Vk1OX{!BFFs@VpY`qQyz-99j^`mvXfsnAa30Ry{*ZY0x-!s4cn;;AS5^ zENBXIWn)NQkz+R~hPz3S)1IXNV3vN7b>N_auBSBUo`W`!yw;(oAS&U2&Fagp@rBK~ynn_D zy1KEA-nV~`0)jA&eox^4Hedn;6PjS+0`Hb{0!cw=6FPux*bY4YG#2VN{}eWX#`+Bt z-}Ps#svz_laSdbyGmR&F!6dmJTCLe8MEenO5IU4`z)bJpANp(%Ke<5r5rh&<-fx^X zfHPK5_wJm75E31zdVX4FB)RBMTz?l(>3@GjQ8`%6I~V*k;3x^r-}x9$03M{8R;1+9yJr ztG%yDU!)UXRQ6@?lIMaT*f;I~j`c{02qd_Z!Xfqy6aSDyM=kWH6D(6JH+15UaC$ub zT2{z-kup2VET3#;(=*4sk8#_%Q)9wUj&pJ+F|u%fn`z73^L<^@vv)8U=Q>qRZf@0K zZtfCv;yE6cQ_t4LZg|(=F1&H~?yWzJ7Ji4@zY0BX{z|Iq=A?>s=jZ*7waQ~lBghl7 ziaDTf&4Y9AM}OSgim1uZ9(D%Z40u$)dHXBv2bUWactV}$!=L|Y$hdsi{v?Q5;TB;l z-Q-s~q|pAo;Pd{$d=6r9&VELA>{JjNo47z^ZS%Jt0|UeH9)93%X(-iVzwBUUn$ADc zuwl(n^FMU&GbjZzQ=6{9-s^#u3Hc5UJJ;N`I=ij8OdFq}w9i(UAx0ZaUcP*J(#S%X z?V_e8zF|L`Zr1hJzlx)*zomf=NgvPcUFo{;=jr?UVB|~et8-u57gBij9e=K1b({93 z8z;a1eKWGLEb#h`6c(FEuS19A=Fy&}MVF3dWm)0NawH1&!0g!e`mKH16m)2kl3VRk za0m1{w-XofTiX<9b#^DkGd(+bM>?|96QWv-6CxV+-5%O}(=41aZ-laGLl@8RCf-VE z$?tf|fRT89`~f|xHr#{ysQ;#+VrwPtm*G+2d3r0X3|Lm(ZAc%ivv zJr@giR7cx1xw|s(fHopuum97SV`Wud{19zyU8U)NqK$34@BaS6xgT&?^l5U_D3BW)8>TvJZn9ppYxO42NO*eYLm6zJ{ajaeZR|DoA=C`2|GY-d{3sjID{K^)pGk9fZg5nx;4k0cwUk29hpXG^E5+ene*;IRllZRd)79Smx3g{2W z!-~tz-`BP5ES_mjoJe~5{Q2fzUopXNqN37kD2s6>3I#?YpDXlqpEcsN02sFGrQd;( z&fHfPPCfpVhD}ID$wFdg*Xlf&fF}H(?^=ZrOWJ+Vp3EP6!lr39jUtN@0s;UO2d95O zsJor>vT5($Jy#ANI+RFqP!zuysunQVUw>)8h|raWCGrL3kl(3Up5+ztd6rUyv5j@C z&EuCh)A$CO)sGxlXiVs?Vc3`ykH{XHF#RoZcl_E@#3n}4V(dMEHIQ88hYb>pc+kMt z;H+D5zU{nPiOa3AE>rD*HpF0xSxiN8h&2xqoW_nglf*|qz=aafExAFNxnG4kk!v}C z#KTWoveVzT3ZJc_yP1lZr~R`7gjCX*&o3Z;y!VU~Qck_ma(GPbrl;T~wIYXJ0Ev@7 zIXRy`f2!S_+^m~thzN~#m)}sw+Sq@YjryYH<^2Hfck51*<)XdqJu6T#%EI=W*4#&f zh49t~OkJR-*mvEOi=Yh<;^qJk{f&=`!FsfW%t(J4*aM($2b~{Dd)5=2|Am;S|H57X z9w@L7o*iusjCmWDXVuzxByAPn%U4$+zT7~4;-6*qHsFqQMS%|Db^&|;WvnjHkw;2q z6`ud3*7k!lPB2QRRquBRsu`YK!@Q-Sv$BQl>Qw)kc>%259|HxbeW8bnT}XE$?$@%Sy*(QpS%ZyXYtMgN7!JPLq2vg;riPqtw#(tiLGQpxc@w`;Vn z1K_qxT><0S$8xXHQt!c8J#KyLX{7Q`S_=JXnJN;&cs=LDS1}K+?t>$mrmMHC4*yR! zkT0;M?QmK^;8(uXTg5zd1&V(9L-|&R&)>9N_5RY_M9(2^4;!_?!NypV{l9Ph)@W}D9UZZczpSDZ^2z+hOW8YIp$Y?s!i2ftdB&FG4H#elInedhG)`=RBjK_=d2`sFbRpk*lcBE!Ox!5Mdj z%_d3O`-3FmT`fy2);DBt25e@REmV%FX6<91`asr{h+_Nmu|q}QLS9?>d|ANG8Hi9n zZ220Gy?s(V<~Oy(i^TmDjR|CQ#fE{%Zivz}Cp|Si)d9YP{h*^AbvnW<>%eR@FJ9C! zH;-2<%D(k2`9jda-HC%;`}wogA-D`&TZYS`>(3EM##9dK$CYBSZD=3u6Dw(#I6+v4 zGM$?wcyFu=7Sm}VRn#@y<*z!&@nxYIoR?KT{5F6dFj^4+SI=~!-Dzh2Zqcp%3}i26 zRwsa^42PpfkH%eh*~_1uzyJVpc>M}M0@PLkVsB0d5cORx=T|;e57Gh;!0h}%5+nIe ztHbU&_ItI5N7p74zB+`J0CS4a~unmBe1|%9Eg~8Cw)925hcYUR* zrY62fp}aiMbjM`j;pzPK?6Tv=g&9tYJbyl2OYEOW?^ixAh^YP98mk}Dq~{)#>2ET@ z8)~b1N)^xxiIMdB_rqoUU?es75?196bhQrj;Dp~*NMAK8#)Sb?kQXjI&Cbp)ZPQOP z)V=lf^~U+E54+u;*aQ5SS*+0Q;ws%fJ4#2lb^@8S8SS_e>BQ0>^X}a~@1=!YpGNj| ze;`8Hp(3#QN*IIJbzmLBtC`=APz@?CEv^kt_m@C&>VmrZ%op>r9kR!c9cwzLG|TF9 zX9xe?yGU3F2SCxo1)*K5pmC_C15G*~mI@eXuGgm&p$^J~Myf`-WdxDEH zDoP>7`?clI2oXN(rZj8iSv7NWaiu(c`EnBn2S+gxQR6??XLRmZLu1gZSE;eFWce4l zxK^L_TliAWH=5UWhch6ByD~s_%CLJRp_?AQT2~;}y%4rQqusbOXi?_rxJR#TSFJ4C zkIRqlKP5agw=vX&Ad?D@IqjpJ8a9Ntiv=$eJ9aSn%=B)TmjgZ=*6l44ZDq)3Gupjh zk6LwQ=jHKXtnYW#)&g0t6PUkN*1JGzV-r=7F);qK+BkPX9-W7!Tp#%*C?JqxUSo9d zz=`}%P0{*q#j2{>3e7OuI)Q1OQ<^FKeNo1V=GY+aw{}SykT_reU%Ynfxr++1 z@NL1se=y7&8k}yixe^bKRk~b~p&F&=s%?!=ZHeksNZX2RcNWl1(!K?kB+e@WlTF8d z50!lX(6BIg^SbLjdsg55uj-B-NuV7X*VQ7Aq&$U^jkp5kp6VCtuTCu-6hC9<+-ldI zl{13e53^Gc+p>0bpi;L^YWFIj|H+&>DVb^6v)-n4vVdFE4*yo?%ZOsMJ43a`w6FLZ z1EHO_n-YD)PFULx?F1MM3anTi7`lQK|E2!|gIa8{aRLpKanI6t+w;a!L%x4Ur{mLq zX}<~#Ds)I{Hm|jkA{RC{bWX`Tpm2)CMcBnW2{31b>@kIQC^& z!5!{p&;9YtP%d&|M{^n;CSjD9p9Yc}`Ij5EuEErg%fQa!`p@kw5I-_7Fd$ii$`e2k zkv+A#H?Lf&YvFzJv4KOVrQuODAedc|?J)@-KYqMZv;}y~J1tq(F76;$T-ATEI76?_ z5@)NCFx_Osw8yo!?T42GF4(14iNFVOavPG;EzvMBvIS=HjU-Dz(7KNZ}8NFK0pb2`%E z^QVgS%i*#8at6NQ!3X8#^+pt&^LzDJ`fbc&Iak>TT)l?^5eHalX#6L8R`~WhPs1bd zO8;BQMntylK#Psbb8UNbm3Yr~(XrND&T|J3{xiwI*f?UwdNI?CF{85M2Pi`_%)J}+ zvHaUmhajQdLG%Ai=y8fr+f)@Og7Cy$gapg|=J}4fi3{h>z1VwbY{>6YVi(IG^R9Rq zvJ87XrQ(DCLk5h%)7({3fn>`)V0GYz|8sW*#Oq4sRIcWk-2I7S(vI0!3;K=RGivh> z5ZaL`jOk}0Y(cvhh<-edI!m=m{xAU)F+0Hg z-{5YT-9OyFuVBc-*qG-t$1|6(knZsEN26)*;C)xIeR+Hr@d7W^Co=`S` zT^-ZAkG*iBarcHD;F}M}wl%E<=_-G)=s}Tf)1UK6jqz2SZ-{yNu6Y&~Pr7VbnWoG9 zj}X=dVAlc95q?A_{S-sjyL*+?S^z4ukArARRMP+IO|RJ*6(0611xr-({&F zaLFB*-j!3QCFvoCo&!>zCtAtnEY<%#r45XcbL`@gr>NhomDDU9EW?}p-PsCxZsJ|- z%3r_8Jflw>?48XPU__Si^MB?<6UU#CNotm&eN_zp!7QC~1BRv9Fi5!M@+VGu|A1ClY#-WVmXN~K+jdkwr zzFl~CH(mn;e&cq~NKZaW$n8(p>Xo^E7TJ1?k+f0rQXL!Dfw@C+H^;AS5j^&tFeSI@ zXT68?TA7^v4%$4{EL2#lVcjM^eq!!IsY=NQo_#R8Y9;Ufo*o}D&+nN`Th5yaICQ^e zKzcp)dbSn`(d$kffOVr!o{m-+k+pBnPxFMAuZuf*Kd^PA zmqW_DAIKZ|sniqly{`}KZu?d@*7B75TdIaenCxW%d1oi*IZ!Q4Y;$!~_hFAbCFCQx zXX_(i);+wdCAx79SXUvo;XcglVNVhB_|oI=Y`kch?NJ{W)^&Q9WwICfJZ3;L91vGu zYfT(xij#+R7wK3amp)ru+5o8Na9KZ-O=qbpk)1czi^zT|k%i6acDJ67w)RYae`Rxm zUH7(*zy)*^ey!TP~Y1FS7T2l)c9b!Xm22y^6Nbqvo}t*9&mQcwWw8!>x-5 zGD1d{)#J;L54?&u?iecyk906ozB+KYQ% zLSGl6i{+<+)9`IFj$c6D>6Kh1f00Xq3_<=B5m3-gm{`meU?fNem1`u>%bisLf!@Ap z`vKX}vC)Zig_iIaB^yiJt;(ghk6jd1apCjNC-15S$SUSwbVE(c@vNc7aX9+Q`u;6C zCr?lJwP|(afgb3!K0XW>$}`^taVWZUHO#O|Aj)O27}H*@x*vtsJG-vDf!8S!=lgiMoLoAUy4rD*)(S+8IhKBADgE2rYbLYd zfR{EOTIOLL`eZqipLeV8(v+mKcwJtZX|CIh9f>ej0~k}cr@{{~=0)8U)6SmWp_X)A zhe^gDF(bW51q+mJWI_)iTL0kG7f#*OopCws`MMww zJE%HTErpwIi&BveJA1;WEf(YMY(PFjK3dt5W|Vik{?vXU#l(`aU5Vfb)d9fz7mSNK z+{nZEBUiGns}hRD^$+P>x|7=~LqQz<7BKV#q^6BSQ8qma=4IUKSe{mt>2Pw{oAA zd+P67MKpB$_@*X2&y(yn(ph4J7;4tZX$%9=^0R^&ozsNq7S|jLT#H4t$Mwjm!6Cb3 zkNNRo*Oufwkj44ch(yX+PW55)oJR}=6z{@n!tyc|-@SX6dJ);_)m5!9_54nIeogUc zZhTEssVW=D+PO*m42EyJRgqJbl#t{+($?bI=QedGc3(oW=R>!_y0VyqixmFa49ULG zj!ZL5ONLPbh$A$oz~Liph|DD{wsHx%kZa!~w24Dfl53xg+GE!pElcTJP>W_Ykvk+b z^}#XuN+%*;WL}kbZQvc!(Xf0)7zviRSm)bQ32b8lYvmn%w_At1hmad_3ZsZ`>c|KM zoPy`s`(;P=e#qOfT>~i8pNoaOYv^TFl!}Tmecpv8Hj82zFhl+`@#;A_n2q%dj@z3) zv@3o0>JmcMs;;sNxLV{9Z3gU6WXw!#&$pQ&Iz-pPvpkBTJbu}ei|lXsPpM}vR+J$T z6z0=cl0fJoqb5?ph)5nowcw)Mf371Fv&B0`gF*^V$q)x_kQZOp$21p=)c~v9=df%> z3BK4c+Wv9b+*h)n*Mp|pw#Nov`pkSLL=XL<7{5EJsTkL8S7I>Kl8We}6m;VO<|f-p zTr!GX{Bk^(MxKXWXv4xe*oBgnN{GJ`XCj$3FCl=_ofT*kR)yva%Pu z&dQD{D$PviJoWc?mfCOz0Ba_M$?x=H-3P;brz9hcO|866PbKz;be*!zU@-5(r=0@I z5m7yDiHYa76fOvEDI5~an7W2fH{m7spOY}nJtv|WRc&t?Y-=KI)zo846qu;Hi&R1+ z<4?d*LlTHF&nQjY8B>vwwj!MXKhG;F62|=}$^xGA#&yZe&WOP%6R*YGdV_rBGgZXY z6oO*JhrB*a3?h5{4jSdB;$0_!NI7CT|mWCvC>giO;GSHI7Yko zHugq>hTEO$odquz)sb#PgimLN%1;QPNJaQI^>dW3-wuCsBu^mFCN8gtB8oWn>9S3Z zf=sHQXsfJ0wTO#ebJ2-<5aC+4#UGewE}A8E1Uzx^<5VALAT=*GDZl24tziFlKY3>( zjJ3a}wh`&x{bREnJ_W9+9u|=dN0C)6eHv0P0KpJrS2ka3gCAd5-xX8j*q3OY78WMw z9_5I4cJym?T?{kT?+=(kT{ZhI7VQy#?ABLNF|qR;N;~cHO6C$&HGI59@deNpc+aN_hZHY)C z9!NRmy-AL=3pA)U8$7B|+-<)g(N^FRtxFK|<1@NpsmwCbp4|O9)c#B#Ge@-NQe56M z?9fEhcaiCzzpGjTzUrr<{RQL=rz~a$b=!(vpUq62&nQc0bDMQ?=dIS5LAt@m7GY{m zmr`rxhc4N46rL<`8Og5=c(MV%Aap5f(4Kz`L-gPF=1}d?40)%)#3xTS481-{R6$GC zjxUX@FH?L&S)2;C#mpZ_I6pthY1N*N$+zizH`1POq#kl@4s={eI4zcRY&TIExlk;v zG?OBE+~L!PAwpl#k?xPzoFm~gLmBeUhrW?q1Ez3|)*0Ad=d>G9in7;Z%c5q3@^VT- z8*SS2at49c!A9bZ9Xs>f9EKZhZ9+_bj+K)2$fR$QE!Mgzy1EIq_Fuo8!zXupHhRS} zCtoRg6^~LljTmg1@ve}sl!j5hB-Zni-ZQu0Kqxa!C;Wu&_3Q7X?W#GA_Unz6QQjKn zS1*}waP*k?Y{4t-5PR#`Sf#h=82RH=&rA!SU7~iJsKkYjuz^i*!G;L39miE=ReR|7>+)Qr(4?i6n}g8M}C|EBE|;<^Fi? zb!$B(VClfdSnY+Grx#{NnrHB)Hr}rkTn65$srhO5R3sQShYBR|EY8I|R=gXX=kAnY zonEig6IFG&s5Os_4K_~9%`NA>OYG!T+^4Y9&RmZ;#&}c;*vwx%L;CuS zlH$jAB9UU^V)>=cOi*`|QOsVL9a9l#veq0TQaPv+Y}&V@5CJq)Q^#zCv)g#Cri~rx2uoyo$j~p>)D&o3#JX@_0-K)U~+=SZ!~mO`D2J0$W? z;c=SxE?Y#o=1ZaNPLW-<^jmn(@yS@e%Sb>G?zGoeM9AeFzx@L?RFKk=>ge7XP$^lN zWZq#rjEvNyuoKPS6DpEapXs%d6&%hy^E5=8mH$c;#?0TZ%FV#) zUcDMJLwbAKr%56`22?*7Iwrs=C5r6&xTz8&$Fa7d^hT~5mE2C}(uVjM-QX_KIB`Wg zP@WD?{Q2WL%clK65NmlDKVR$A(TMl;POk^+`>pZGp)mm^6cn>GTz>T8lP>&tX^*FO zQf2OCEmk{nXNd@*AP4U>UtkM=E(^-vR_#zw0b~dKN6+dPu=V|8g=9Smlk77!M!A{J zPXeAkbx66>QXT4ctq#+ItI_DOh(hT3I~qTn?5+)od%T73;QsvvJn(y7+OKq+C_g^v zz36ECc@dD4v^>*v-x3Zx_wF!e`dC0-;rf0}rL?57MTf9<_NfDl$WCcYu5%5qNoz`Q zl&86ee}1)XS7SW#*2o=7QzOo8vj9i29lEbNEN;l1*wS+YC|yh^=<3sSgXCt5mmTRY zz-~w(Xw7D+iI&k+s#VYgj(nt~Lf@IRw!GXQ3Ht>x(ZE;{XFyebrc`mFQ9U#I zGr!m*uPQv#oS&AGwr=@2;>NWV`?Gi`DC|}0htob*?@frmWks|YNpd+BR&|(G+^NO! zpT#-d3<4Z>?Mcrc$o&8;42xco|E- ze(Kfu^IRf*txi7v0tnDFV8Ow?7WgRPppK?}^99F$acimn!d`6Jeq7e{`+GOj-EOGz zsmJ-5Gm!0=xed+Huu;>1dLKljTS4=Apz^=M*7d8`6{r;qn|xFWne=TdAPSzs{3so5 z@$W7d^r&l-XrDQPhn(He%vo2ShzxzWyN;%!vZ0V>=sWY8@SzmTZdsS$+Rl+hrfKJ< z{tb0%XZ4yUKfy6Azj)Eu92Dv^hkIR*s2o$lft!?0VylA3f@7)Tgepef@|9s!*9Dyf zb)0=tCt#_;NxgtwcxQ`(%WiIuyGuNTv5CWkCY=64Yu%} zs7sZb!uX%|%nQStFi#K3*iQCt1Bmw$2ANl(PY+L9(xZ?jT|hm7A7tpkJ5z$UZCmPO zxHFh*`SjV)Fa29lr>56!#D&XWQt~fvG8>#Ah$cQPa0f-RYSE5sy>JSktp&3ki40^J zI4aHOaiutrn&K#fUg<~H7Z5By8AYRpN^&j8%3<~J69a?dl8sq63|eYCxS~*9gFh4C z9xwOKg_pF?uj?O4tOsrF6HFA4(-R4%y7dfBg7$fQaDp z5Ue`w!){p<*31j^8fgfpo@|g3zvJ+QFZRgG>-&*Ogi%HS-w#aI{?nKN^O>T79n!b! zgu9g~OA7}(?~dfP_()Z=t|QF|p`RTt{xsyn!@p&;E$<@1Wz3vBOXg74uGEMu?HZcG zDcAR_3FVz?${{R;^ppbnaF1wLb$1@C-y+kMSSg75Xl>KSw<#S%Iyf}b!AN?^p%R+} z_XIv<`#`x^7a^u7-zd1YMTS99YRT2azBj0LL9Xw7yJKxuncT;Ad*C+{B9QTr7yOe$ zDYkvApP)F?>C)9OXAza<***2{nbU%ROQKD?^Sr-}rwEX=-I0n0kqYjp@(FK3!ZBQq?%CD zY1T2v!`OVNTJ0$uxRw(EjNEkAKOKF5(ARD#k|Dt8)~JHtiahyoDt-%ydL7j%TupB* ziv{Clu@C?N)a}tDZBd6YC6~Hv&^&+sa@*VEkDb9_AK8dgtq>uk#OInG1H1I zvm>1;z&W#g-|1pK{^A~i%+E-os=BFNC5Z&rwtPgmZAlNi(p+76tz&?ILcEdfXiT@~ zeDl1NPQWfBE;lUNLQ_>$bv{e`R`eh*8lHPIDH)&mGtY0J+are6XG@+bWxk*plvX-L zEmRj~xlu4grwct5B(oNN0#2Z05ERXv%^{EzNi#zWCNV?n#Ptp}rpeqIYtRG+$7|!~ zFfC9+#qrb(y<6k;@ziqjp&w6azZeB6?Q>vCFkG;6f~rzm{DAY)Ym#PW|GE>Moyeb5W^o0NtO% zEEblTJ_{uK54>LN|H4UfCOKK-?eOI+Uo`EI#d|)Mr&AXC^GR8*iBP?nnlB&kADczpr=1qYL>|CCskmOwS%}O>Y$)6&y z?Wo2pF|VJK7OAN+*9a>bkY(aTh$T560O zO7+S-9@!RV1w7s?@P<&J^Lx|>Xn5tTSynaj8A6#y@DfIE(Cv0rOTr%?s9wAREO!e> z)MAzfCcW?@iL4LT)KAe%OQom`*1shE%E5WGedy-70g6W@qq^FTkI1q+XAxDS9Jg)& z`wB_#3mXp7V_+2*!dve^lD$1^sKjIkh!Vew64=0 z)P(&S6r&q+R-I7f&p^JVZ~S%whk0SaXYxya@cq)h8SKzu@y%l+N4j6X0O2fOU*9kl z7k_{Ls$31fp*GdAQiaw8`I_@!7Pv4e%`JS=lWrpk(UD6 zw9MI^2RhCtiB-8xCM7u4-GYQWtC}gL54D~@XS*IXfNViuJG1}2uJcepz99&@KXo>aZy5(FTNF2^7 z9!yTbP;q<^h5}v@;tsxLkZ9x5@s?G{?fVTZ9NBvdv)FKvk)y8tvZg85^~Pw_LC^kc zHgi23s%IWq&wqtqd3*h{2LURLoT>K-Oyl4T>51}UmhNBr&_ar}r>BeN8jP$-5c%nBOD!1${+T-&k!_u3f2X&Kw>!{C1 z9v|opp8onx(kOxZCJ*VOX*3*J3et~d(8i(8r&9DlUzM2*Uat;;Own93#;t1j%$RJv zMtFQ-;V>%3iNC0P#5?b+Km!i5P+EjdD({Xt!X7mWN+P;Be|W56&G#yh$98YsL0EJy z3JWRmb!E}02~`uffm^02pkPkd$tkxI-`LPF-AyqDk^9+X5DOdVJ)bd{l%?5UUKN;j zZTNX;&XqJn=>RNPveM?Z-vt2Hokp$N#kLqN02dg<+hU*&AR!b|iY?h*0bi9HkGnRF680xguHGU&coPMgyX&s8#yX{Im{|R8>iu4Idzl z^#%_W5k%&$1Z0SWawvvaRPZl7>NhE$>N9kXChA5^{U@^5d!s*rJtMrVm%We`9Dz|n zo|`j!D%3}AkHyUMRD9402dOQacpVOU>HR1SlpcUaaaxRSIv**cyU;>}8vW+M=IJVt zXejYv8#K~|F$yLV_UNXWnv@H9oKtc)eccmrG$5csL`PZg=1oW<)$!mbm8gPi)TAiEs*VS~;{Ui&HkDmF7X zs=o{34k$#w;+d*hs^Hk6!})D3$fAI-^=p$ofGBcPM=Q-f&$p_+P)6+QQ68FfD*&Jj z+8-7Ob^ztyD5Qjkh(2iVPHnm;T0Xcs12A}9kB_HxW-Aq~_eq+0yEy}1o8NDC{Uu)C zs*l_YbgM1eDarxe6|<~CF?%j-pvU;=%hNL}AVis? z=jc(OZQ(cA{Uh3Baa>K%Cr24moB$kS;)EYrU)@2 zgFWQh`7GY5z%nlXWm552Hu#UCtrn;i0J2OVs|?U8*(^GDIz7tPE6!m^284pjg{W5S z;Epm^BFO#quTuk)Qc7?trwmetm2)q=c_p|g;U25K4`8qrlQHj0hT_%z&L%7HFE>eG z)P{FTv3jZ*IqblR>g=>U+qfrbTVRWgJjUBtYZtf9aSBm0;^bH2R7yXGFHaUaBjXTx ze7`eOjctj0`U%50fC%R0Q6G6zzckz?1N0pNd(gc0FB0K_E!H{10<0gnqT+u`mnxXL z5}VCdy^Y)q)JRb^Lm)zkCwC&S{pc z;Cw_yrSGx8Cu~xdk6~=FyZ-OUX7SX(C*l~_o(^<>uwM(JXR=YitEhHCM^?7v>Nmrn zVN4GMwn1rf)Kaf@_NTskdn57O5EBJ$r%^SUTB!2pbsFwqYxqkq;TVt z_Ep#J^7<_Xe)=6qTH-++ulq|UjpXRVXTl+{ZZZiYgKpy4Fod~cIiBFI3uL%VWRa=5cezUwMoTOTBW1)6_f#&$Qb=` z@`{L)?LnuWOQi0+@}crv2q0V@UJ!zfM{udP`q*o_os8wV5*At$KR5rDVNptM_@+53 zv-zPJs1m%k)&sY=-ZwRmYS?EvxfU=WeIGDMq~+CO!}-rIj4j%F1xHWw0GzTDo=m+K z_YpuDE*(+@BcBhdo;uZw&qfYMadrnDuJ6y7$i^23m-!kbX)L6%2)Bh*%=iEbsgJ*H zc9*pN(jo)%D1$JF(_Mgpe&Rt4B6xJOD5?k<#s`}`7zkF!of8R}FSNbeH=y(FZ8*bZ z04~oWtk0)2-;_WxssTt0JBJil6GC(@HQs6Ujb0SH%}biu9-thMqgb(&tTywx+Ab+c zCohZCnVYK!zNT*olh#R!6q%{~SD5-Ko;8J0JK3{gnx>eHphf4A|kiJ?O^Tsm}udZSUyVWh=8Na9rc` zaP$hyT`hI?$eGlEhAEZQ)ZGQ5MxYAw`0dumu&qST=6;e>*YPpL&DcLU1l3LX45j_F1=oSgvgM8JCNsAG6(W9(M z$*IXHZ8((^7R64~G}?yiQvSLRHKdA)Ka^*WQO12v;?ka1nhlBl?L-Iujmn~<*1S{P z`L*vHZsPtp#~b+P@^1MDzkTzB;VMcU{e2W2M+>ScCO|)xB9pZ2q_rX{99oaBQYjdQ zASu{vWgf%IgO{JaL~||vc6f0yT^0lJL)ELRa&EkVIAg3PQdNG&S}J3f^-VB7oaj@{ z){b46Jhb-Y(W7UKbDEJ-`IUO7qjs5IzMNqjm_K0V4X7*mnQU?LNFrQG{k7wmff`c*65z)v2*>xU&;LUf%^ahc*?% zumvb7|J~|j5RVgxP;q~23d@53a^cz%=bDZ{dOst9&m|=%lkA3CJn>|KPSd)m=dY(F zMP8^Sr)235Ry)P=1O3alpyV$v+f%Eb1K%#lao9O-zpDLE%^0bQQK$AkC(jVPR(##` zlhy2`ZAzMb^c_jPKtka^^Ev&F=!7e=GBd{_Z_-pBBq%_mlZ;sjA@g^2mbl1aM-zGx z#0yx!^Z{|Zw!VR?IY^JbP7vcBobR{B#e>C2`}Y3H8S2_vdFanfx#~A$Yt&(VyVY%cz>RGZX9L>`ARVJh=%%5 zb1+AFatFuWjKxw? zh$dm~?A0;`SR0E&QO6CsEG9jPkgkV5W>=w9{cFRmdNE7a< z35%HbpH-f<$%QoY=LU+;#M7e`X;(p`c=HqkXwqse2zzScImSQwpr}O$_p&UiWK-?bnqr_?FaLJi}cR~pK8YhHQqKvt#YZ3mZHz-f;Ze->m zaVeh>6Kx$_4ieY;%c_+CZBmIZP9dfHWhTFZve*yvjBS~Fcd9^7u4ff9P%vSBSrwMx z9)mo)Vm)BtYwPtOFhP!%sUG-V!~vI7121HjYfttst1WZ+%9V-dOumpy8W9Jxyk~5z zY3p`?{hWG;)j94^lO)+IexC7Yb`Fl>5rto2qItmtHZ#vsV5QliYiEJq&)lfTHNZ!p zyWrEpw5{1sXHe-VC|`0`_y2TN1HQaj7wo^GkoF(HG=8yn4038h6&paE3?bE^PKv9; z9OgAZ(15kF3?oxR{B2jS=pPKb@;^!fSLxdj@AXf&$8P*MY6$~w)*@CvkMC3&6wS7* zJJ9n^h*0J_i?#y-N%M+bDUhMZ0TqlfA}1|oasvr37~cFRRT@C5*1DL>*2e34{q=A3 z^M7}fC5RB!XP#`ng~52p%RDMlU}|cL_)^bSRs;ak3_x?Qn0nq)<&B<7uiMVj6a5G@ z(?RX^dSRn)&5r~4y(>4|-Y>ZVv0`uO?eA~C#Uu0#4Ndd#h%U++@XFGOpdF#THKul?QD9*>K-~12zC6EAv{&a&uk1z8jc})ug4N^hbF5{h!jgpp6MJ zKEUMs2r@Z`Z&6s&>pyQa1*^&TlHsrT&dsl;;qrfJ}NM8hkb zF#FJo12^$c%>@9a)w%$^#Kf4g<%uneBhNFy)Lg6|>)seh8WQY`${MJ_x7(9Y8DIVD z>+1pYa+Jr#cb+ZUeHY|@KEC08Ms2#EFVef8ZMV9g;|1TfWySU$q>PVS$-5CvK<%c3 z-$Fg~t`#YGS`~D5UY1fpQTRN}v+nHp4&EZ=?ju>hEnDy#R-mJIbM4BcZI;B0543^e zeHj@l)=!^3%R^PNk{P0>_B4Xtbx!%&yAwEC@6I;R_pa||?^?e?$R1U>z&A00FIx7> z6w)i?&juDq{*&$*O)p;TT`u*Y4AlWT^meJ&z{M{3n`?J~J2LNu-ou3ZA;*6>vSsd> zMn2uLk}(33Lq;<~JDc`Kb$@iZm{%*l6GY#a=c4F_$RfZ;in>!;yaoO#pm ztVnNoKE^>aJYTNq*5yu9)G;$lpc3bwmwK$>dX2hVt2ck6^?JpiOqx}e?)2&2Wd%p0a>5d#>{U1(6Sv}dI8(1ZM}x*$0@cW?SybH-(cY6-)e60#psqO2R+dnOL8s)w1zY zp~z=+ADFumGuoe8Y_vX&krfPHFIHJOxrd;h52!hzq^)Oqo!IUoSR@8YJRn>y|12S3 zJ^1GD#h(ka1J6M@13g$mj;KyYUJ4;X+RUr= zJK+3sj1HPSl+=+rTpE*OS&)9s(NXwBZSzCUrj-XE)p@HPWwgcKoB-v1VnIt1!W-d? zRW1A}Ya1+RdWmKK92bAhf;m?El&sQax8bk|k}nPftK%Yl>xaJ3njB!(&GPezu3VQw zBkt3Vj>N^_>D9R#fRp|5WNRHP9+0mgYo=fC-MFvD+XxLHNa5SI@&0H~7mef70)F2_h%f*1oXvo6vzP|J~lVAck$hn#W zpR_kVO_P8!!Ayn4|K`uz%_jK<&W5ZQPP=nJe0 z;PN#J|IqE935&3UdO0$9MKKk zLt3vv$-NQON3NoJD7pvMIPe)Xl!b89h{o+0)q)!)e%+q2mFJ^-!0?Np#vT0SYM&r| zqxHhU!eMf}$&cm+l+XhYr=8tWMSnxO>uhKIXE1?&`DZZwGnoE~O#fSCqH?`@m{`Jd Sv{8H;_|eriIFqmCaN}QYJZT01 literal 0 HcmV?d00001 diff --git a/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/__init__.py b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_deps.py b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_deps.py new file mode 100644 index 0000000000000..07f60b132c38e --- /dev/null +++ b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_deps.py @@ -0,0 +1,6 @@ +from dagster import AssetSpec, Definitions, external_assets_from_specs + +raw_logs = AssetSpec("raw_logs") +processed_logs = AssetSpec("processed_logs", deps=[raw_logs]) + +defs = Definitions(assets=external_assets_from_specs([raw_logs, processed_logs])) diff --git a/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_events_using_python_api.py b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_events_using_python_api.py new file mode 100644 index 0000000000000..5b67ec1613111 --- /dev/null +++ b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_events_using_python_api.py @@ -0,0 +1,21 @@ +from dagster import AssetSpec, Definitions, external_assets_from_specs + +asset_one = AssetSpec("asset_one") +asset_two = AssetSpec("asset_two", deps=[asset_one]) + +defs = Definitions(assets=external_assets_from_specs([asset_one, asset_two])) + + +def do_report_runless_asset_event(instance) -> None: + # start_python_api_marker + from dagster import AssetMaterialization + + # instance is a DagsterInstance. Get using DagsterInstance.get() + instance.report_runless_asset_event( + AssetMaterialization( + "asset_one", metadata={"nrows": 10, "source": "From this script."} + ) + ) + + +# end_python_api_marker diff --git a/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_using_sensor.py b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_using_sensor.py new file mode 100644 index 0000000000000..ee5bbd8394702 --- /dev/null +++ b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/external_asset_using_sensor.py @@ -0,0 +1,36 @@ +import datetime + +from dagster import ( + AssetMaterialization, + AssetSpec, + Definitions, + SensorEvaluationContext, + SensorResult, + external_asset_from_spec, + sensor, +) + + +def utc_now_str() -> str: + return datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d, %H:%M:%S") + + +@sensor() +def keep_external_asset_a_up_to_date(context: SensorEvaluationContext) -> SensorResult: + # Materialization happened in external system, but is recorded here + return SensorResult( + asset_events=[ + AssetMaterialization( + asset_key="external_asset_a", + metadata={ + "source": f'From sensor "{context.sensor_name}" at UTC time "{utc_now_str()}"' + }, + ) + ] + ) + + +defs = Definitions( + assets=[external_asset_from_spec(AssetSpec("external_asset_a"))], + sensors=[keep_external_asset_a_up_to_date], +) diff --git a/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/normal_asset_depending_on_external.py b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/normal_asset_depending_on_external.py new file mode 100644 index 0000000000000..45637c74705ad --- /dev/null +++ b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/normal_asset_depending_on_external.py @@ -0,0 +1,15 @@ +from dagster import AssetSpec, Definitions, asset, external_assets_from_specs + +raw_logs = AssetSpec("raw_logs") +processed_logs = AssetSpec("processed_logs", deps=[raw_logs]) + + +@asset(deps=[processed_logs]) +def aggregated_logs() -> None: + # Loads "processed_log" into memory and performs some aggregation + ... + + +defs = Definitions( + assets=[aggregated_logs, *external_assets_from_specs([raw_logs, processed_logs])] +) diff --git a/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/single_declaration.py b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/single_declaration.py new file mode 100644 index 0000000000000..695a4927f4e1a --- /dev/null +++ b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/single_declaration.py @@ -0,0 +1,3 @@ +from dagster import AssetSpec, Definitions, external_asset_from_spec + +defs = Definitions(assets=[external_asset_from_spec(AssetSpec("file_in_s3"))]) diff --git a/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/update_external_asset_via_op.py b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/update_external_asset_via_op.py new file mode 100644 index 0000000000000..df119e9a7b30e --- /dev/null +++ b/examples/docs_snippets/docs_snippets/concepts/assets/external_assets/update_external_asset_via_op.py @@ -0,0 +1,24 @@ +from dagster import ( + AssetMaterialization, + AssetSpec, + Definitions, + OpExecutionContext, + external_asset_from_spec, + job, + op, +) + + +@op +def an_op(context: OpExecutionContext) -> None: + context.log_event(AssetMaterialization(asset_key="external_asset")) + + +@job +def a_job() -> None: + an_op() + + +defs = Definitions( + assets=[external_asset_from_spec(AssetSpec("external_asset"))], jobs=[a_job] +) diff --git a/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/__init__.py b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_python_api.py b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_python_api.py new file mode 100644 index 0000000000000..45ad282af64e5 --- /dev/null +++ b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_python_api.py @@ -0,0 +1,11 @@ +from dagster import AssetKey, DagsterInstance +from docs_snippets.concepts.assets.external_assets.external_asset_events_using_python_api import ( + do_report_runless_asset_event, +) + + +def test_do_report_runless_asset_event() -> None: + instance = DagsterInstance.ephemeral() + do_report_runless_asset_event(instance) + + assert instance.get_latest_materialization_event(AssetKey("asset_one")) diff --git a/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_sensor.py b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_sensor.py new file mode 100644 index 0000000000000..f668d5ea61eab --- /dev/null +++ b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_asset_sensor.py @@ -0,0 +1,23 @@ +from dagster import ( + AssetKey, + AssetMaterialization, + DagsterInstance, + SensorResult, + build_sensor_context, +) +from docs_snippets.concepts.assets.external_assets.external_asset_using_sensor import ( + keep_external_asset_a_up_to_date, +) + + +def test_keep_external_asset_a_up_to_date() -> None: + instance = DagsterInstance.ephemeral() + result = keep_external_asset_a_up_to_date( + build_sensor_context( + instance=instance, sensor_name="keep_external_asset_a_up_to_date" + ) + ) + assert isinstance(result, SensorResult) + assert len(result.asset_events) == 1 + assert isinstance(result.asset_events[0], AssetMaterialization) + assert result.asset_events[0].asset_key == AssetKey("external_asset_a") diff --git a/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_decls.py b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_decls.py new file mode 100644 index 0000000000000..85c870482bb25 --- /dev/null +++ b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_decls.py @@ -0,0 +1,45 @@ +import docs_snippets.concepts.assets.external_assets.external_asset_deps +import docs_snippets.concepts.assets.external_assets.normal_asset_depending_on_external +import docs_snippets.concepts.assets.external_assets.single_declaration +from dagster import AssetKey, Definitions + + +def test_docs_snippets_concepts_external_asset_single_decl() -> None: + single_decl_defs: Definitions = ( + docs_snippets.concepts.assets.external_assets.single_declaration.defs + ) + assert single_decl_defs.get_assets_def("file_in_s3") + + +def test_docs_snippets_concepts_external_asset_external_asset_deps() -> None: + defs_with_deps: Definitions = ( + docs_snippets.concepts.assets.external_assets.external_asset_deps.defs + ) + assert defs_with_deps.get_assets_def("raw_logs") + assert defs_with_deps.get_assets_def("processed_logs") + assert defs_with_deps.get_assets_def("processed_logs").asset_deps[ + AssetKey("processed_logs") + ] == {AssetKey("raw_logs")} + + +def test_docs_snippets_normal_assets_dep_on_external() -> None: + defs: Definitions = ( + docs_snippets.concepts.assets.external_assets.normal_asset_depending_on_external.defs + ) + + from docs_snippets.concepts.assets.external_assets.normal_asset_depending_on_external import ( + aggregated_logs, + ) + + al_key = aggregated_logs.key + + assert defs.get_assets_def(al_key) + assert defs.get_assets_def(al_key).asset_deps[al_key] == { + AssetKey("processed_logs") + } + + assert ( + defs.get_implicit_global_asset_job_def() + .execute_in_process(asset_selection=[al_key]) + .success + ) diff --git a/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_with_ops.py b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_with_ops.py new file mode 100644 index 0000000000000..66e6b4c5620d0 --- /dev/null +++ b/examples/docs_snippets/docs_snippets_tests/concepts_tests/external_asset_tests/test_external_assets_with_ops.py @@ -0,0 +1,14 @@ +import docs_snippets.concepts.assets.external_assets.update_external_asset_via_op +from dagster import AssetKey, DagsterInstance, Definitions + + +def test_external_assets_update_external_asset_via_op_0() -> None: + defs: Definitions = ( + docs_snippets.concepts.assets.external_assets.update_external_asset_via_op.defs + ) + a_job_def = defs.get_job_def("a_job") + instance = DagsterInstance.ephemeral() + result = a_job_def.execute_in_process(instance=instance) + assert result.success + + assert instance.get_latest_materialization_event(AssetKey("external_asset")) diff --git a/python_modules/dagster/dagster/__init__.py b/python_modules/dagster/dagster/__init__.py index d4e60a04402da..7ec2ae028f70b 100644 --- a/python_modules/dagster/dagster/__init__.py +++ b/python_modules/dagster/dagster/__init__.py @@ -199,6 +199,10 @@ multiple_process_executor_requirements as multiple_process_executor_requirements, multiprocess_executor as multiprocess_executor, ) +from dagster._core.definitions.external_asset import ( + external_asset_from_spec as external_asset_from_spec, + external_assets_from_specs as external_assets_from_specs, +) from dagster._core.definitions.freshness_policy import FreshnessPolicy as FreshnessPolicy from dagster._core.definitions.freshness_policy_sensor_definition import ( FreshnessPolicySensorContext as FreshnessPolicySensorContext, diff --git a/python_modules/dagster/dagster/_core/definitions/external_asset.py b/python_modules/dagster/dagster/_core/definitions/external_asset.py index e248cfcd40a6a..b44b681c77896 100644 --- a/python_modules/dagster/dagster/_core/definitions/external_asset.py +++ b/python_modules/dagster/dagster/_core/definitions/external_asset.py @@ -16,6 +16,10 @@ from dagster._core.execution.context.compute import AssetExecutionContext +def external_asset_from_spec(spec: AssetSpec) -> AssetsDefinition: + return external_assets_from_specs([spec])[0] + + def external_assets_from_specs(specs: Sequence[AssetSpec]) -> List[AssetsDefinition]: """Create an external assets definition from a sequence of asset specs. diff --git a/python_modules/dagster/dagster/_core/definitions/sensor_definition.py b/python_modules/dagster/dagster/_core/definitions/sensor_definition.py index bba4703b30947..6e4507b96ab2e 100644 --- a/python_modules/dagster/dagster/_core/definitions/sensor_definition.py +++ b/python_modules/dagster/dagster/_core/definitions/sensor_definition.py @@ -196,6 +196,10 @@ def __exit__(self, *exc) -> None: def resource_defs(self) -> Optional[Mapping[str, "ResourceDefinition"]]: return self._resource_defs + @property + def sensor_name(self) -> str: + return check.not_none(self._sensor_name, "Only valid when sensor name provided") + def merge_resources(self, resources_dict: Mapping[str, Any]) -> "SensorEvaluationContext": """Merge the specified resources into this context. diff --git a/python_modules/dagster/dagster_tests/definitions_tests/test_external_assets.py b/python_modules/dagster/dagster_tests/definitions_tests/test_external_assets.py index 03b04a3f1079a..2b4644deba243 100644 --- a/python_modules/dagster/dagster_tests/definitions_tests/test_external_assets.py +++ b/python_modules/dagster/dagster_tests/definitions_tests/test_external_assets.py @@ -17,6 +17,7 @@ observable_source_asset, ) from dagster._core.definitions.asset_spec import AssetSpec +from dagster._core.definitions.decorators.asset_decorator import multi_asset from dagster._core.definitions.external_asset import ( create_external_asset_from_source_asset, external_assets_from_specs, @@ -248,3 +249,51 @@ def an_observable_source_asset() -> DataVersion: all_materializations = result.get_asset_materialization_events() assert len(all_materializations) == 0 + + +def test_external_assets_with_dependencies_manual_construction() -> None: + upstream_asset = AssetSpec("upstream_asset") + downstream_asset = AssetSpec("downstream_asset", deps=[upstream_asset]) + + @multi_asset(name="_generated_asset_def_1", specs=[upstream_asset]) + def _upstream_def(context: AssetExecutionContext) -> None: + raise Exception("do not execute") + + @multi_asset(name="_generated_asset_def_2", specs=[downstream_asset]) + def _downstream_asset(context: AssetExecutionContext) -> None: + raise Exception("do not execute") + + defs = Definitions(assets=[_upstream_def, _downstream_asset]) + assert defs + + assert defs.get_implicit_global_asset_job_def().asset_layer.asset_deps[ + AssetKey("downstream_asset") + ] == {AssetKey("upstream_asset")} + + +def test_external_asset_multi_asset() -> None: + upstream_asset = AssetSpec("upstream_asset") + downstream_asset = AssetSpec("downstream_asset", deps=[upstream_asset]) + + @multi_asset(specs=[downstream_asset, upstream_asset]) + def _generated_asset_def(context: AssetExecutionContext): + raise Exception("do not execute") + + defs = Definitions(assets=[_generated_asset_def]) + assert defs + + assert defs.get_implicit_global_asset_job_def().asset_layer.asset_deps[ + AssetKey("downstream_asset") + ] == {AssetKey("upstream_asset")} + + +def test_external_assets_with_dependencies() -> None: + upstream_asset = AssetSpec("upstream_asset") + downstream_asset = AssetSpec("downstream_asset", deps=[upstream_asset]) + + defs = Definitions(assets=external_assets_from_specs([upstream_asset, downstream_asset])) + assert defs + + assert defs.get_implicit_global_asset_job_def().asset_layer.asset_deps[ + AssetKey("downstream_asset") + ] == {AssetKey("upstream_asset")}