diff --git a/python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_auto_materialize_asset_evaluations.py b/python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_auto_materialize_asset_evaluations.py index 89a84e5bd6607..0b70f0a235293 100644 --- a/python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_auto_materialize_asset_evaluations.py +++ b/python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_auto_materialize_asset_evaluations.py @@ -26,6 +26,7 @@ FIXED_AUTO_MATERIALIZATION_SELECTOR_ID, ) from dagster._serdes import deserialize_value +from dagster._serdes.serdes import serialize_value from dagster_graphql.test.utils import execute_dagster_graphql, infer_repository from dagster_graphql_tests.graphql.graphql_context_test_suite import ( @@ -332,7 +333,7 @@ def test_automation_policy_sensor(self, graphql_context: WorkspaceRequestContext InstigatorType.SENSOR, status=InstigatorStatus.RUNNING, instigator_data=SensorInstigatorData( - cursor=AssetDaemonCursor.empty()._replace(evaluation_id=12345).serialize() + cursor=serialize_value(AssetDaemonCursor.empty(12345)) ), ) ) @@ -684,7 +685,7 @@ def _test_get_evaluations_with_partitions(self, graphql_context: WorkspaceReques def _test_current_evaluation_id(self, graphql_context: WorkspaceRequestContext): graphql_context.instance.daemon_cursor_storage.set_cursor_values( - {CURSOR_KEY: AssetDaemonCursor.empty().serialize()} + {CURSOR_KEY: serialize_value(AssetDaemonCursor.empty(0))} ) results = execute_dagster_graphql( @@ -703,13 +704,7 @@ def _test_current_evaluation_id(self, graphql_context: WorkspaceRequestContext): } graphql_context.instance.daemon_cursor_storage.set_cursor_values( - { - CURSOR_KEY: ( - AssetDaemonCursor.empty() - .with_updates(0, set(), set(), set(), {}, 42, None, [], 0) # type: ignore - .serialize() - ) - } + {CURSOR_KEY: serialize_value(AssetDaemonCursor.empty(0).with_updates(0, 1.0, [], []))} ) results = execute_dagster_graphql( diff --git a/python_modules/dagster/dagster/_core/definitions/asset_condition.py b/python_modules/dagster/dagster/_core/definitions/asset_condition.py index 592e541770548..6d8716c87e79c 100644 --- a/python_modules/dagster/dagster/_core/definitions/asset_condition.py +++ b/python_modules/dagster/dagster/_core/definitions/asset_condition.py @@ -1,6 +1,6 @@ import functools import hashlib -from abc import ABC, abstractmethod +from abc import ABC, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, AbstractSet, @@ -76,6 +76,10 @@ def equivalent_to_stored_evaluation(self, other: Optional["AssetConditionEvaluat ) def discarded_subset(self, condition: "AssetCondition") -> Optional[AssetSubset]: + """Returns the AssetSubset representing asset partitions that were discarded during this + evlauation. Note that 'discarding' is a deprecated concept that is only used for backwards + compatibility. + """ not_discard_condition = condition.not_discard_condition if not not_discard_condition or len(self.child_evaluations) != 3: return None @@ -88,7 +92,8 @@ def get_requested_or_discarded_subset(self, condition: "AssetCondition") -> Asse discarded_subset = self.discarded_subset(condition) if discarded_subset is None: return self.true_subset - return self.true_subset | discarded_subset + else: + return self.true_subset | discarded_subset def for_child(self, child_condition: "AssetCondition") -> Optional["AssetConditionEvaluation"]: """Returns the evaluation of a given child condition by finding the child evaluation that @@ -143,6 +148,10 @@ def evaluate( ) -> Tuple[AssetConditionEvaluation, Sequence[AssetConditionCursorExtras]]: raise NotImplementedError() + @abstractproperty + def description(self) -> str: + raise NotImplementedError() + def __and__(self, other: "AssetCondition") -> "AssetCondition": # group AndAutomationConditions together if isinstance(self, AndAssetCondition): @@ -187,7 +196,7 @@ def snapshot(self) -> AssetConditionSnapshot: """Returns a snapshot of this condition that can be used for serialization.""" return AssetConditionSnapshot( class_name=self.__class__.__name__, - description=str(self), + description=self.description, unique_id=self.unique_id, ) @@ -200,25 +209,29 @@ class RuleCondition( @property def unique_id(self) -> str: - parts = [self.rule.__class__.__name__, self.rule.description] + parts = [self.rule.__class__.__name__, self.description] return hashlib.md5("".join(parts).encode()).hexdigest() + @property + def description(self) -> str: + return self.rule.description + def evaluate( self, context: AssetConditionEvaluationContext ) -> Tuple[AssetConditionEvaluation, Sequence[AssetConditionCursorExtras]]: context.root_context.daemon_context._verbose_log_fn( # noqa f"Evaluating rule: {self.rule.to_snapshot()}" ) - true_subset, subsets_with_metadata = self.rule.evaluate_for_asset(context) + true_subset, subsets_with_metadata, extras = self.rule.evaluate_for_asset(context) context.root_context.daemon_context._verbose_log_fn( # noqa - f"Rule returned {true_subset.size} partitions" + f"Rule returned {true_subset.size} partitions" f"{true_subset}" ) return AssetConditionEvaluation( condition_snapshot=self.snapshot, true_subset=true_subset, candidate_subset=context.candidate_subset, subsets_with_metadata=subsets_with_metadata, - ), [AssetConditionCursorExtras(condition_snapshot=self.snapshot, extras={})] + ), [AssetConditionCursorExtras(condition_snapshot=self.snapshot, extras=extras)] class AndAssetCondition( @@ -227,6 +240,10 @@ class AndAssetCondition( ): """This class represents the condition that all of its children evaluate to true.""" + @property + def description(self) -> str: + return "All of" + def evaluate( self, context: AssetConditionEvaluationContext ) -> Tuple[AssetConditionEvaluation, Sequence[AssetConditionCursorExtras]]: @@ -253,6 +270,10 @@ class OrAssetCondition( ): """This class represents the condition that any of its children evaluate to true.""" + @property + def description(self) -> str: + return "Any of" + def evaluate( self, context: AssetConditionEvaluationContext ) -> Tuple[AssetConditionEvaluation, Sequence[AssetConditionCursorExtras]]: @@ -285,6 +306,10 @@ def __new__(cls, children: Sequence[AssetCondition]): check.invariant(len(children) == 1) return super().__new__(cls, children) + @property + def description(self) -> str: + return "Not" + @property def child(self) -> AssetCondition: return self.children[0] diff --git a/python_modules/dagster/dagster/_core/definitions/asset_condition_evaluation_context.py b/python_modules/dagster/dagster/_core/definitions/asset_condition_evaluation_context.py index b5852fb37f245..3db7b7190c533 100644 --- a/python_modules/dagster/dagster/_core/definitions/asset_condition_evaluation_context.py +++ b/python_modules/dagster/dagster/_core/definitions/asset_condition_evaluation_context.py @@ -151,7 +151,18 @@ def parent_will_update_subset(self) -> AssetSubset: subset |= parent_subset._replace(asset_key=self.asset_key) return subset - @property + @functools.cached_property + @root_property + def new_max_storage_id(self) -> Optional[int]: + """Returns the new max storage ID for this asset, if any.""" + # TODO: This is not a good way of doing this, as it opens us up to potential race conditions, + # but in the interest of keeping this PR simple, I'm leaving this logic as is. In the next + # PR, I'll update the code to return a "maximum observed record id" from inside the + # `get_asset_partitions_updated_after_cursor` call. + return self.instance_queryer.instance.event_log_storage.get_maximum_record_id() + + @functools.cached_property + @root_property def materialized_since_previous_tick_subset(self) -> AssetSubset: """Returns the set of asset partitions that were materialized since the previous tick.""" return AssetSubset.from_asset_partitions_set( @@ -160,44 +171,28 @@ def materialized_since_previous_tick_subset(self) -> AssetSubset: self.instance_queryer.get_asset_partitions_updated_after_cursor( self.asset_key, asset_partitions=None, - after_cursor=self.cursor.previous_max_storage_id if self.cursor else None, + after_cursor=self.cursor.previous_max_storage_id, respect_materialization_data_versions=False, ), ) @property - def previous_tick_requested_or_discarded_subset(self) -> AssetSubset: - if not self.cursor.previous_evaluation: + @root_property + def previous_tick_requested_subset(self) -> AssetSubset: + """The set of asset partitions that were requested (or discarded) on the previous tick.""" + previous_evaluation = self.cursor.previous_evaluation + if previous_evaluation is None: return self.empty_subset() - return self.cursor.previous_evaluation.get_requested_or_discarded_subset( - self.root_context.condition - ) + + return previous_evaluation.get_requested_or_discarded_subset(self.condition) @property def materialized_requested_or_discarded_since_previous_tick_subset(self) -> AssetSubset: """Returns the set of asset partitions that were materialized since the previous tick.""" - return ( - self.materialized_since_previous_tick_subset - | self.previous_tick_requested_or_discarded_subset - ) + return self.materialized_since_previous_tick_subset | self.previous_tick_requested_subset - @property - def never_materialized_requested_or_discarded_root_subset(self) -> AssetSubset: - if self.asset_key not in self.asset_graph.root_materializable_or_observable_asset_keys: - return self.empty_subset() - - handled_subset = ( - self.cursor.get_extras_value(self.condition, "handled_subset", AssetSubset) - or self.empty_subset() - ) - unhandled_subset = handled_subset.inverse( - self.partitions_def, - dynamic_partitions_store=self.instance_queryer, - current_time=self.evaluation_time, - ) - return unhandled_subset - self.materialized_since_previous_tick_subset - - @property + @functools.cached_property + @root_property def parent_has_updated_subset(self) -> AssetSubset: """Returns the set of asset partitions whose parents have updated since the last time this condition was evaluated. @@ -206,7 +201,7 @@ def parent_has_updated_subset(self) -> AssetSubset: self.asset_key, self.partitions_def, self.root_context.instance_queryer.asset_partitions_with_newly_updated_parents( - latest_storage_id=self.previous_max_storage_id, + latest_storage_id=self.cursor.previous_max_storage_id, child_asset_key=self.root_context.asset_key, map_old_time_partitions=False, ), diff --git a/python_modules/dagster/dagster/_core/definitions/asset_daemon_context.py b/python_modules/dagster/dagster/_core/definitions/asset_daemon_context.py index c6ebc6c22addd..41e7a08dc3ee5 100644 --- a/python_modules/dagster/dagster/_core/definitions/asset_daemon_context.py +++ b/python_modules/dagster/dagster/_core/definitions/asset_daemon_context.py @@ -31,7 +31,6 @@ get_time_partitions_def, ) from dagster._core.instance import DynamicPartitionsStore -from dagster._utils.cached_method import cached_method from ... import PartitionKeyRange from ..storage.tags import ASSET_PARTITION_RANGE_END_TAG, ASSET_PARTITION_RANGE_START_TAG @@ -134,10 +133,6 @@ def cursor(self) -> AssetDaemonCursor: def asset_graph(self) -> AssetGraph: return self.instance_queryer.asset_graph - @property - def latest_storage_id(self) -> Optional[int]: - return self.cursor.latest_storage_id - @property def auto_materialize_asset_keys(self) -> AbstractSet[AssetKey]: return self._auto_materialize_asset_keys @@ -177,46 +172,6 @@ def prefetch(self) -> None: ) self.instance_queryer.prefetch_asset_records(self.asset_records_to_prefetch) self._verbose_log_fn("Done prefetching asset records.") - self._verbose_log_fn( - f"Calculated a new latest_storage_id value of {self.get_new_latest_storage_id()}.\n" - f"Precalculating updated parents for {len(self.auto_materialize_asset_keys)} assets using previous " - f"latest_storage_id of {self.latest_storage_id}." - ) - for asset_key in self.auto_materialize_asset_keys: - self.instance_queryer.asset_partitions_with_newly_updated_parents( - latest_storage_id=self.latest_storage_id, child_asset_key=asset_key - ) - self._verbose_log_fn("Done precalculating updated parents.") - - @cached_method - def get_new_latest_storage_id(self) -> Optional[int]: - """Get the latest storage id across all cached asset records. We use this method as it uses - identical data to what is used to calculate assets with updated parents, and therefore - avoids certain classes of race conditions. - """ - new_latest_storage_id = self.latest_storage_id - for asset_key in self.auto_materialize_asset_keys_and_parents: - # ignore non-observable sources - if self.asset_graph.is_source(asset_key) and not self.asset_graph.is_observable( - asset_key - ): - continue - # ignore cases where we know for sure there's no new event - if not self.instance_queryer.asset_partition_has_materialization_or_observation( - AssetKeyPartitionKey(asset_key), after_cursor=self.latest_storage_id - ): - continue - # get the latest overall storage id for this asset key - asset_latest_storage_id = ( - self.instance_queryer.get_latest_materialization_or_observation_storage_id( - AssetKeyPartitionKey(asset_key) - ) - ) - new_latest_storage_id = max( - filter(None, [new_latest_storage_id, asset_latest_storage_id]), default=None - ) - - return new_latest_storage_id def evaluate_asset( self, @@ -241,11 +196,11 @@ def evaluate_asset( self.asset_graph.auto_materialize_policies_by_key.get(asset_key) ).to_asset_condition() - asset_cursor = self.cursor.asset_cursor_for_key(asset_key, self.asset_graph) + asset_cursor = self.cursor.get_asset_cursor(asset_key) context = AssetConditionEvaluationContext.create( asset_key=asset_key, - cursor=self.cursor.asset_cursor_for_key(asset_key, self.asset_graph), + cursor=asset_cursor, condition=asset_condition, instance_queryer=self.instance_queryer, data_time_resolver=self.data_time_resolver, @@ -254,9 +209,15 @@ def evaluate_asset( expected_data_time_mapping=expected_data_time_mapping, ) - evaluation, condition_cursor = asset_condition.evaluate(context) + evaluation, extras = asset_condition.evaluate(context) - new_asset_cursor = asset_cursor.with_updates(context, evaluation) + new_asset_cursor = AssetConditionCursor( + asset_key=asset_key, + previous_max_storage_id=context.new_max_storage_id, + previous_evaluation_timestamp=context.evaluation_time.timestamp(), + previous_evaluation=evaluation, + extras=extras, + ) expected_data_time = get_expected_data_time_for_asset_key( context, will_materialize=evaluation.true_subset.size > 0 @@ -365,24 +326,21 @@ def evaluate( return ( run_requests, self.cursor.with_updates( - latest_storage_id=self.get_new_latest_storage_id(), evaluation_id=self._evaluation_id, + asset_cursors=asset_cursors, newly_observe_requested_asset_keys=[ asset_key for run_request in auto_observe_run_requests for asset_key in cast(Sequence[AssetKey], run_request.asset_selection) ], - observe_request_timestamp=observe_request_timestamp, - evaluations=evaluations, - evaluation_time=self.instance_queryer.evaluation_time, - asset_cursors=asset_cursors, + evaluation_timestamp=self.instance_queryer.evaluation_time.timestamp(), ), # only record evaluations where something changed [ evaluation for evaluation in evaluations if not evaluation.equivalent_to_stored_evaluation( - self.cursor.latest_evaluation_by_asset_key.get(evaluation.asset_key) + self.cursor.get_previous_evaluation(evaluation.asset_key) ) ], ) diff --git a/python_modules/dagster/dagster/_core/definitions/asset_daemon_cursor.py b/python_modules/dagster/dagster/_core/definitions/asset_daemon_cursor.py index 67d584d8ce069..57418da970d9f 100644 --- a/python_modules/dagster/dagster/_core/definitions/asset_daemon_cursor.py +++ b/python_modules/dagster/dagster/_core/definitions/asset_daemon_cursor.py @@ -1,8 +1,10 @@ -import datetime +import base64 +import binascii +import functools +import gzip import json from typing import ( TYPE_CHECKING, - AbstractSet, Mapping, NamedTuple, Optional, @@ -11,52 +13,44 @@ TypeVar, ) -import dagster._check as check +from dagster._core.definitions.asset_graph_subset import AssetGraphSubset +from dagster._core.definitions.asset_subset import AssetSubset +from dagster._core.definitions.auto_materialize_rule_evaluation import ( + BackcompatAutoMaterializeAssetEvaluationSerializer, +) from dagster._core.definitions.events import AssetKey -from dagster._core.definitions.time_window_partitions import ( - TimeWindowPartitionsDefinition, - TimeWindowPartitionsSubset, +from dagster._core.definitions.partition import PartitionsDefinition +from dagster._serdes.serdes import ( + _WHITELIST_MAP, + PackableValue, + WhitelistMap, + deserialize_value, + serialize_value, + whitelist_for_serdes, ) -from dagster._serdes.serdes import PackableValue, deserialize_value, serialize_value from .asset_graph import AssetGraph -from .asset_subset import AssetSubset -from .partition import PartitionsDefinition, PartitionsSubset if TYPE_CHECKING: from .asset_condition import AssetCondition, AssetConditionEvaluation, AssetConditionSnapshot - from .asset_condition_evaluation_context import AssetConditionEvaluationContext - -ExtrasDict = Mapping[str, PackableValue] T = TypeVar("T") -def _get_placeholder_missing_condition() -> "AssetCondition": - """Temporary hard-coding of the hash of the "materialize on missing" condition. This will - no longer be necessary once we start serializing the AssetDaemonCursor. - """ - from .asset_condition import RuleCondition - from .auto_materialize_rule import MaterializeOnMissingRule - - return RuleCondition(MaterializeOnMissingRule()) - - -_PLACEHOLDER_HANDLED_SUBSET_KEY = "handled_subset" - - +@whitelist_for_serdes class AssetConditionCursorExtras(NamedTuple): - """Class to represent additional unstructured information that may be tracked by a particular - asset condition. + """Represents additional state that may be optionally saved by an AssetCondition between + evaluations. """ condition_snapshot: "AssetConditionSnapshot" - extras: ExtrasDict + extras: Mapping[str, PackableValue] +@whitelist_for_serdes class AssetConditionCursor(NamedTuple): - """Convenience class to represent the state of an individual asset being handled by the daemon. - In the future, this will be serialized as part of the cursor. + """Represents the evaluated state of an AssetConditionCursor at a certain point in time. This + information can be used to make future evaluations more efficient. """ asset_key: AssetKey @@ -97,281 +91,219 @@ def get_previous_requested_or_discarded_subset( return AssetSubset.empty(self.asset_key, partitions_def) return self.previous_evaluation.get_requested_or_discarded_subset(condition) - @property - def handled_subset(self) -> Optional[AssetSubset]: - return self.get_extras_value( - condition=_get_placeholder_missing_condition(), - key=_PLACEHOLDER_HANDLED_SUBSET_KEY, - as_type=AssetSubset, - ) - - def with_updates( - self, context: "AssetConditionEvaluationContext", evaluation: "AssetConditionEvaluation" - ) -> "AssetConditionCursor": - newly_materialized_requested_or_discarded_subset = ( - context.materialized_since_previous_tick_subset - | evaluation.get_requested_or_discarded_subset(context.condition) - ) - - handled_subset = ( - self.handled_subset or context.empty_subset() - ) | newly_materialized_requested_or_discarded_subset - - # for now, hard-code the materialized_requested_or_discarded_subset location - return self._replace( - previous_evaluation=evaluation, - extras=[ - AssetConditionCursorExtras( - condition_snapshot=_get_placeholder_missing_condition().snapshot, - extras={_PLACEHOLDER_HANDLED_SUBSET_KEY: handled_subset}, - ) - ], - ) - +@whitelist_for_serdes class AssetDaemonCursor(NamedTuple): - """State that's saved between reconciliation evaluations. + """State that's stored between daemon evaluations. Attributes: - latest_storage_id: - The latest observed storage ID across all assets. Useful for finding out what has - happened since the last tick. - handled_root_asset_keys: - Every entry is a non-partitioned asset with no parents that has been requested by this - sensor, discarded by this sensor, or has been materialized (even if not by this sensor). - handled_root_partitions_by_asset_key: - Every key is a partitioned root asset. Every value is the set of that asset's partitions - that have been requested by this sensor, discarded by this sensor, - or have been materialized (even if not by this sensor). - last_observe_request_timestamp_by_asset_key: - Every key is an observable source asset that has been auto-observed. The value is the - timestamp of the tick that requested the observation. + evaluation_id (int): The ID of the evaluation that produced this cursor. + asset_cursors (Sequence[AssetConditionCursor]): The state of each asset that the daemon + is responsible for handling. """ - latest_storage_id: Optional[int] - handled_root_asset_keys: AbstractSet[AssetKey] - handled_root_partitions_by_asset_key: Mapping[AssetKey, PartitionsSubset] evaluation_id: int + asset_cursors: Sequence[AssetConditionCursor] + + # to be deprecated once auto-observation is tied into the rest of the asset condition system last_observe_request_timestamp_by_asset_key: Mapping[AssetKey, float] - latest_evaluation_by_asset_key: Mapping[AssetKey, "AssetConditionEvaluation"] - latest_evaluation_timestamp: Optional[float] - - def was_previously_handled(self, asset_key: AssetKey) -> bool: - return asset_key in self.handled_root_asset_keys - - def asset_cursor_for_key( - self, asset_key: AssetKey, asset_graph: AssetGraph - ) -> AssetConditionCursor: - partitions_def = asset_graph.get_partitions_def(asset_key) - handled_partitions_subset = self.handled_root_partitions_by_asset_key.get(asset_key) - if handled_partitions_subset is not None: - handled_subset = AssetSubset(asset_key=asset_key, value=handled_partitions_subset) - elif asset_key in self.handled_root_asset_keys: - handled_subset = AssetSubset(asset_key=asset_key, value=True) - else: - handled_subset = AssetSubset.empty(asset_key, partitions_def) - - previous_evaluation = self.latest_evaluation_by_asset_key.get(asset_key) - return AssetConditionCursor( - asset_key=asset_key, - previous_evaluation=previous_evaluation, - previous_max_storage_id=self.latest_storage_id, - previous_evaluation_timestamp=self.latest_evaluation_timestamp, - extras=[ - AssetConditionCursorExtras( - condition_snapshot=_get_placeholder_missing_condition().snapshot, - extras={"handled_subset": handled_subset}, - ) - ], + + @staticmethod + def empty(evaluation_id: int) -> "AssetDaemonCursor": + return AssetDaemonCursor( + evaluation_id=evaluation_id, + asset_cursors=[], + last_observe_request_timestamp_by_asset_key={}, ) - def with_updates( - self, - latest_storage_id: Optional[int], - evaluation_id: int, - newly_observe_requested_asset_keys: Sequence[AssetKey], - observe_request_timestamp: float, - evaluations: Sequence["AssetConditionEvaluation"], - evaluation_time: datetime.datetime, - asset_cursors: Sequence[AssetConditionCursor], + @staticmethod + def from_serialized( + raw_cursor: Optional[str], asset_graph: Optional[AssetGraph], default_evaluation_id: int = 0 ) -> "AssetDaemonCursor": - """Returns a cursor that represents this cursor plus the updates that have happened within the - tick. + """Deserializes an AssetDaemonCursor from a string. Provides a backcompat layer for the old + manually-serialized cursor format. """ - result_last_observe_request_timestamp_by_asset_key = { - **self.last_observe_request_timestamp_by_asset_key - } - for asset_key in newly_observe_requested_asset_keys: - result_last_observe_request_timestamp_by_asset_key[ - asset_key - ] = observe_request_timestamp - - if latest_storage_id and self.latest_storage_id: - check.invariant( - latest_storage_id >= self.latest_storage_id, - "Latest storage ID should be >= previous latest storage ID", + if raw_cursor is None: + return AssetDaemonCursor.empty(default_evaluation_id) + try: + decoded = base64.b64decode(raw_cursor) + unzipped = gzip.decompress(decoded).decode("utf-8") + except (binascii.Error, gzip.BadGzipFile): + # this cursor was serialized with the old format + return backcompat_deserialize_asset_daemon_cursor_str( + raw_cursor, asset_graph, default_evaluation_id ) + return deserialize_value(unzipped, AssetDaemonCursor) - latest_evaluation_by_asset_key = { - evaluation.asset_key: evaluation for evaluation in evaluations - } - - return AssetDaemonCursor( - latest_storage_id=latest_storage_id or self.latest_storage_id, - handled_root_asset_keys={ - cursor.asset_key - for cursor in asset_cursors - if cursor.handled_subset is not None - and not cursor.handled_subset.is_partitioned - and cursor.handled_subset.bool_value - }, - handled_root_partitions_by_asset_key={ - cursor.asset_key: cursor.handled_subset.subset_value - for cursor in asset_cursors - if cursor.handled_subset is not None and cursor.handled_subset.is_partitioned - }, - evaluation_id=evaluation_id, - last_observe_request_timestamp_by_asset_key=result_last_observe_request_timestamp_by_asset_key, - latest_evaluation_by_asset_key=latest_evaluation_by_asset_key, - latest_evaluation_timestamp=evaluation_time.timestamp(), + def serialize(self) -> str: + """Serializes the cursor into a string. To do so, the cursor is first serialized using the + traditional Dagster serialization process, then gzipped, then base64 encoded, and finally + converted to a string. + """ + return base64.b64encode(gzip.compress(serialize_value(self).encode("utf-8"))).decode( + "utf-8" ) - @classmethod - def empty(cls) -> "AssetDaemonCursor": - return AssetDaemonCursor( - latest_storage_id=None, - handled_root_partitions_by_asset_key={}, - handled_root_asset_keys=set(), - evaluation_id=0, - last_observe_request_timestamp_by_asset_key={}, - latest_evaluation_by_asset_key={}, - latest_evaluation_timestamp=None, - ) + @property + @functools.lru_cache(maxsize=1) + def asset_cursors_by_key(self) -> Mapping[AssetKey, AssetConditionCursor]: + """Efficient lookup of asset cursors by asset key.""" + return {cursor.asset_key: cursor for cursor in self.asset_cursors} + + def get_asset_cursor(self, asset_key: AssetKey) -> AssetConditionCursor: + """Returns the AssetConditionCursor associated with the given asset key. If no stored + cursor exists, returns an empty cursor. + """ + return self.asset_cursors_by_key.get(asset_key) or AssetConditionCursor.empty(asset_key) - @classmethod - def from_serialized(cls, cursor: str, asset_graph: AssetGraph) -> "AssetDaemonCursor": - from .asset_condition import AssetConditionEvaluationWithRunIds - - data = json.loads(cursor) - - if isinstance(data, list): # backcompat - check.invariant(len(data) in [3, 4], "Invalid serialized cursor") - ( - latest_storage_id, - serialized_handled_root_asset_keys, - serialized_handled_root_partitions_by_asset_key, - ) = data[:3] - - evaluation_id = data[3] if len(data) == 4 else 0 - serialized_last_observe_request_timestamp_by_asset_key = {} - serialized_latest_evaluation_by_asset_key = {} - latest_evaluation_timestamp = 0 - else: - latest_storage_id = data["latest_storage_id"] - serialized_handled_root_asset_keys = data["handled_root_asset_keys"] - serialized_handled_root_partitions_by_asset_key = data[ - "handled_root_partitions_by_asset_key" - ] - evaluation_id = data["evaluation_id"] - serialized_last_observe_request_timestamp_by_asset_key = data.get( - "last_observe_request_timestamp_by_asset_key", {} - ) - serialized_latest_evaluation_by_asset_key = data.get( - "latest_evaluation_by_asset_key", {} - ) - latest_evaluation_timestamp = data.get("latest_evaluation_timestamp", 0) - - handled_root_partitions_by_asset_key = {} - for ( - key_str, - serialized_subset, - ) in serialized_handled_root_partitions_by_asset_key.items(): - key = AssetKey.from_user_string(key_str) - if key not in asset_graph.materializable_asset_keys: - continue - - partitions_def = asset_graph.get_partitions_def(key) - if partitions_def is None: - continue - - try: - # in the case that the partitions def has changed, we may not be able to deserialize - # the corresponding subset. in this case, we just use an empty subset - subset = partitions_def.deserialize_subset(serialized_subset) - # this covers the case in which the start date has changed for a time-partitioned - # asset. in reality, we should be using the can_deserialize method but because we - # are not storing the serializable unique id, we can't do that. - if ( - isinstance(subset, TimeWindowPartitionsSubset) - and isinstance(partitions_def, TimeWindowPartitionsDefinition) - and any( - time_window.start < partitions_def.start - for time_window in subset.included_time_windows - ) - ): - subset = partitions_def.empty_subset() - except: - subset = partitions_def.empty_subset() - handled_root_partitions_by_asset_key[key] = subset - - latest_evaluation_by_asset_key = {} - for key_str, serialized_evaluation in serialized_latest_evaluation_by_asset_key.items(): - key = AssetKey.from_user_string(key_str) - deserialized_evaluation = deserialize_value(serialized_evaluation) - if isinstance(deserialized_evaluation, AssetConditionEvaluationWithRunIds): - evaluation = deserialized_evaluation.evaluation - else: - evaluation = deserialized_evaluation - latest_evaluation_by_asset_key[key] = evaluation - - return cls( - latest_storage_id=latest_storage_id, - handled_root_asset_keys={ - AssetKey.from_user_string(key_str) for key_str in serialized_handled_root_asset_keys - }, - handled_root_partitions_by_asset_key=handled_root_partitions_by_asset_key, + def get_previous_evaluation(self, asset_key: AssetKey) -> Optional["AssetConditionEvaluation"]: + """Returns the previous AssetConditionEvaluation for a given asset key, if it exists.""" + cursor = self.get_asset_cursor(asset_key) + return cursor.previous_evaluation if cursor else None + + def with_updates( + self, + evaluation_id: int, + evaluation_timestamp: float, + newly_observe_requested_asset_keys: Sequence[AssetKey], + asset_cursors: Sequence[AssetConditionCursor], + ) -> "AssetDaemonCursor": + return self._replace( evaluation_id=evaluation_id, + asset_cursors=asset_cursors, last_observe_request_timestamp_by_asset_key={ - AssetKey.from_user_string(key_str): timestamp - for key_str, timestamp in serialized_last_observe_request_timestamp_by_asset_key.items() + **self.last_observe_request_timestamp_by_asset_key, + **{ + asset_key: evaluation_timestamp + for asset_key in newly_observe_requested_asset_keys + }, }, - latest_evaluation_by_asset_key=latest_evaluation_by_asset_key, - latest_evaluation_timestamp=latest_evaluation_timestamp, ) - @classmethod - def get_evaluation_id_from_serialized(cls, cursor: str) -> Optional[int]: - data = json.loads(cursor) - if isinstance(data, list): # backcompat - check.invariant(len(data) in [3, 4], "Invalid serialized cursor") - return data[3] if len(data) == 4 else None - else: - return data["evaluation_id"] + def __hash__(self) -> int: + return hash(id(self)) + + +# BACKCOMPAT + + +def get_backcompat_asset_condition_cursor( + asset_key: AssetKey, + latest_storage_id: Optional[int], + latest_timestamp: Optional[float], + latest_evaluation: Optional["AssetConditionEvaluation"], + handled_root_subset: Optional[AssetSubset], +) -> AssetConditionCursor: + """Generates an AssetDaemonCursor from information available on the old cursor format.""" + from dagster._core.definitions.asset_condition import RuleCondition + from dagster._core.definitions.auto_materialize_rule import MaterializeOnMissingRule + + return AssetConditionCursor( + asset_key=asset_key, + previous_evaluation=latest_evaluation, + previous_evaluation_timestamp=latest_timestamp, + previous_max_storage_id=latest_storage_id, + extras=[ + # the only information we need to preserve from the previous cursor is the handled + # subset + AssetConditionCursorExtras( + condition_snapshot=RuleCondition(MaterializeOnMissingRule()).snapshot, + extras={MaterializeOnMissingRule.HANDLED_SUBSET_KEY: handled_root_subset}, + ) + ], + ) - def serialize(self) -> str: - serializable_handled_root_partitions_by_asset_key = { - key.to_user_string(): subset.serialize() - for key, subset in self.handled_root_partitions_by_asset_key.items() - } - serialized = json.dumps( - { - "latest_storage_id": self.latest_storage_id, - "handled_root_asset_keys": [ - key.to_user_string() for key in self.handled_root_asset_keys - ], - "handled_root_partitions_by_asset_key": ( - serializable_handled_root_partitions_by_asset_key + +def backcompat_deserialize_asset_daemon_cursor_str( + cursor_str: str, asset_graph: Optional[AssetGraph], default_evaluation_id: int +) -> AssetDaemonCursor: + """This serves as a backcompat layer for deserializing the old cursor format. Some information + is impossible to fully recover, this will recover enough to continue operating as normal. + """ + from .asset_condition import AssetConditionEvaluationWithRunIds + + data = json.loads(cursor_str) + + if isinstance(data, list): + return AssetDaemonCursor.empty(data[-1]) + elif not isinstance(data, dict): + return AssetDaemonCursor.empty(default_evaluation_id) + + serialized_last_observe_request_timestamp_by_asset_key = data.get( + "last_observe_request_timestamp_by_asset_key", {} + ) + last_observe_request_timestamp_by_asset_key = { + AssetKey.from_user_string(key_str): timestamp + for key_str, timestamp in serialized_last_observe_request_timestamp_by_asset_key.items() + } + + partition_subsets_by_asset_key = {} + for key_str, serialized_str in data.get("handled_root_partitions_by_asset_key", {}).items(): + asset_key = AssetKey.from_user_string(key_str) + partitions_def = asset_graph.get_partitions_def(asset_key) if asset_graph else None + if not partitions_def: + continue + try: + partition_subsets_by_asset_key[asset_key] = partitions_def.deserialize_subset( + serialized_str + ) + except: + continue + + handled_root_asset_graph_subset = AssetGraphSubset( + non_partitioned_asset_keys={ + AssetKey.from_user_string(key_str) + for key_str in data.get("handled_root_asset_keys", set()) + }, + partitions_subsets_by_asset_key=partition_subsets_by_asset_key, + ) + + serialized_latest_evaluation_by_asset_key = data.get("latest_evaluation_by_asset_key", {}) + latest_evaluation_by_asset_key = {} + for key_str, serialized_evaluation in serialized_latest_evaluation_by_asset_key.items(): + key = AssetKey.from_user_string(key_str) + + class BackcompatDeserializer(BackcompatAutoMaterializeAssetEvaluationSerializer): + @property + def partitions_def(self) -> Optional[PartitionsDefinition]: + return asset_graph.get_partitions_def(key) if asset_graph else None + + # create a new WhitelistMap that can deserialize SerializedPartitionSubset objects stored + # on the old cursor format + whitelist_map = WhitelistMap( + object_serializers=_WHITELIST_MAP.object_serializers, + object_deserializers={ + **_WHITELIST_MAP.object_deserializers, + "AutoMaterializeAssetEvaluation": BackcompatDeserializer( + klass=AssetConditionEvaluationWithRunIds ), - "evaluation_id": self.evaluation_id, - "last_observe_request_timestamp_by_asset_key": { - key.to_user_string(): timestamp - for key, timestamp in self.last_observe_request_timestamp_by_asset_key.items() - }, - "latest_evaluation_by_asset_key": { - key.to_user_string(): serialize_value(evaluation) - for key, evaluation in self.latest_evaluation_by_asset_key.items() - }, - "latest_evaluation_timestamp": self.latest_evaluation_timestamp, - } + }, + enum_serializers=_WHITELIST_MAP.enum_serializers, ) - return serialized + + # these string cursors will contain AutoMaterializeAssetEvaluation objects, which get + # deserialized into AssetConditionEvaluationWithRunIds, not AssetConditionEvaluation + evaluation = deserialize_value( + serialized_evaluation, AssetConditionEvaluationWithRunIds, whitelist_map=whitelist_map + ).evaluation + latest_evaluation_by_asset_key[key] = evaluation + + asset_cursors = [] + for asset_key, latest_evaluation in latest_evaluation_by_asset_key.items(): + asset_cursors.append( + get_backcompat_asset_condition_cursor( + asset_key, + data.get("latest_storage_id"), + data.get("latest_timestamp"), + latest_evaluation, + handled_root_asset_graph_subset.get_asset_subset(asset_key, asset_graph) + if asset_graph + else None, + ) + ) + + return AssetDaemonCursor( + evaluation_id=default_evaluation_id, + asset_cursors=asset_cursors, + last_observe_request_timestamp_by_asset_key=last_observe_request_timestamp_by_asset_key, + ) diff --git a/python_modules/dagster/dagster/_core/definitions/asset_subset.py b/python_modules/dagster/dagster/_core/definitions/asset_subset.py index d41ad43a43dac..2f59f002b59a6 100644 --- a/python_modules/dagster/dagster/_core/definitions/asset_subset.py +++ b/python_modules/dagster/dagster/_core/definitions/asset_subset.py @@ -17,6 +17,10 @@ PartitionsDefinition, PartitionsSubset, ) +from dagster._core.definitions.time_window_partitions import ( + TimeWindowPartitionsDefinition, + TimeWindowPartitionsSubset, +) from dagster._serdes.serdes import NamedTupleSerializer, whitelist_for_serdes if TYPE_CHECKING: @@ -120,12 +124,47 @@ def from_asset_partitions_set( ), ) + def valid_subset(self, partitions_def: Optional[PartitionsDefinition]) -> "AssetSubset": + """Returns the asset subset representing only the asset partitions which are valid for the + current partitions def. Because AssetSubsets may be serialized at a point in time when the + set of valid partitions is different from the current set of valid partitions, this check + is sometimes necessary. + """ + if self.is_partitioned: + if partitions_def is None: + return AssetSubset.empty(self.asset_key, partitions_def) + # handle the case where a TimeWindowPartitionsDefinition has changed between + # serialization time and now + elif isinstance(self.value, TimeWindowPartitionsSubset): + if ( + not isinstance(partitions_def, TimeWindowPartitionsDefinition) + or partitions_def.cron_schedule != self.value.partitions_def.cron_schedule + or any( + time_window.start < partitions_def.start + for time_window in self.value.included_time_windows + ) + ): + return AssetSubset.empty(self.asset_key, partitions_def) + else: + return self + # TODO: handle more cases here + else: + return self + else: + return ( + self + if partitions_def is None + # was previous partitioned, is now unpartitioned + else AssetSubset.empty(self.asset_key, partitions_def) + ) + def inverse( self, partitions_def: Optional[PartitionsDefinition], current_time: Optional[datetime.datetime] = None, dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None, ) -> "AssetSubset": + """Returns the AssetSubset containing all asset partitions which are not in this AssetSubset.""" if partitions_def is None: return self._replace(value=not self.bool_value) else: diff --git a/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule.py b/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule.py index 012dc7727ec4e..26ab44fb39901 100644 --- a/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule.py +++ b/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule.py @@ -32,7 +32,9 @@ freshness_evaluation_results_for_asset_key, ) from dagster._core.definitions.multi_dimensional_partitions import MultiPartitionsDefinition -from dagster._core.definitions.time_window_partitions import get_time_partitions_def +from dagster._core.definitions.time_window_partitions import ( + get_time_partitions_def, +) from dagster._core.storage.dagster_run import RunsFilter from dagster._core.storage.tags import AUTO_MATERIALIZE_TAG from dagster._serdes.serdes import ( @@ -109,7 +111,7 @@ def add_evaluation_data_from_previous_tick( # we've explicitly said to ignore it ignore_subset = has_metadata_subset | ignore_subset - for elt in context.previous_subsets_with_metadata or []: + for elt in context.previous_subsets_with_metadata: carry_forward_subset = elt.subset - ignore_subset if carry_forward_subset.size > 0: mapping[elt.frozen_metadata] |= carry_forward_subset @@ -117,11 +119,12 @@ def add_evaluation_data_from_previous_tick( # for now, an asset is in the "true" subset if and only if we have some metadata for it true_subset = reduce(operator.or_, mapping.values(), context.empty_subset()) return ( - true_subset, + context.candidate_subset & true_subset, [ AssetSubsetWithMetadata(subset, dict(metadata)) for metadata, subset in mapping.items() ], + {}, ) @abstractmethod @@ -308,7 +311,7 @@ def missed_cron_ticks( self, context: AssetConditionEvaluationContext ) -> Sequence[datetime.datetime]: """Returns the cron ticks which have been missed since the previous cursor was generated.""" - if not context.previous_evaluation_timestamp: + if not context.cursor.previous_evaluation_timestamp: previous_dt = next( reverse_cron_string_iterator( end_timestamp=context.evaluation_time.timestamp(), @@ -319,7 +322,7 @@ def missed_cron_ticks( return [previous_dt] missed_ticks = [] for dt in cron_string_iterator( - start_timestamp=context.previous_evaluation_timestamp, + start_timestamp=context.cursor.previous_evaluation_timestamp, cron_string=self.cron_schedule, execution_timezone=self.timezone, ): @@ -400,7 +403,7 @@ def evaluate_for_asset(self, context: AssetConditionEvaluationContext) -> RuleEv - context.materialized_requested_or_discarded_since_previous_tick_subset ) - return asset_subset_to_request, [] + return asset_subset_to_request, [], {} @whitelist_for_serdes @@ -598,6 +601,8 @@ def evaluate_for_asset(self, context: AssetConditionEvaluationContext) -> RuleEv @whitelist_for_serdes class MaterializeOnMissingRule(AutoMaterializeRule, NamedTuple("_MaterializeOnMissingRule", [])): + HANDLED_SUBSET_KEY: str = "handled_subset" + @property def decision_type(self) -> AutoMaterializeDecisionType: return AutoMaterializeDecisionType.MATERIALIZE @@ -608,28 +613,26 @@ def description(self) -> str: def evaluate_for_asset(self, context: AssetConditionEvaluationContext) -> RuleEvaluationResults: """Evaluates the set of asset partitions for this asset which are missing and were not - previously discarded. Currently only applies to root asset partitions and asset partitions - with updated parents. + previously discarded. """ - missing_asset_partitions = set( - context.never_materialized_requested_or_discarded_root_subset.asset_partitions + handled_subset = ( + ( + context.cursor.get_extras_value( + context.condition, self.HANDLED_SUBSET_KEY, AssetSubset + ) + or context.empty_subset() + ).valid_subset(context.partitions_def) + | context.previous_tick_requested_subset + | context.materialized_since_previous_tick_subset ) - # in addition to missing root asset partitions, check any asset partitions with updated - # parents to see if they're missing - for candidate in context.candidate_parent_has_or_will_update_subset.asset_partitions: - if not context.instance_queryer.asset_partition_has_materialization_or_observation( - candidate - ): - missing_asset_partitions |= {candidate} - - newly_missing_subset = AssetSubset.from_asset_partitions_set( - context.asset_key, context.partitions_def, missing_asset_partitions + unhandled_subset = handled_subset.inverse( + context.partitions_def, context.evaluation_time, context.instance_queryer ) - missing_subset = newly_missing_subset | ( - context.previous_true_subset - - context.materialized_requested_or_discarded_since_previous_tick_subset + return ( + context.candidate_subset & unhandled_subset, + [], + {self.HANDLED_SUBSET_KEY: handled_subset}, ) - return missing_subset, [] @whitelist_for_serdes @@ -870,14 +873,14 @@ def evaluate_for_asset(self, context: AssetConditionEvaluationContext) -> RuleEv ).get_asset_subset(context.asset_key, context.asset_graph) if backfilling_subset.size == 0: - return context.empty_subset(), [] + return context.empty_subset(), [], {} if self.all_partitions: true_subset = context.candidate_subset else: true_subset = context.candidate_subset & backfilling_subset - return true_subset, [] + return true_subset, [], {} @whitelist_for_serdes @@ -901,6 +904,10 @@ def evaluate_for_asset(self, context: AssetConditionEvaluationContext) -> RuleEv )[self.limit :] ) - return AssetSubset.from_asset_partitions_set( - context.asset_key, context.partitions_def, rate_limited_asset_partitions - ), [] + return ( + AssetSubset.from_asset_partitions_set( + context.asset_key, context.partitions_def, rate_limited_asset_partitions + ), + [], + {}, + ) diff --git a/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py b/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py index 8d79a567fe617..1601b639c7f3b 100644 --- a/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py +++ b/python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py @@ -9,10 +9,12 @@ AbstractSet, Dict, FrozenSet, + Mapping, NamedTuple, Optional, Sequence, Tuple, + Union, cast, ) @@ -22,13 +24,14 @@ from dagster._core.definitions.metadata import MetadataMapping, MetadataValue from dagster._serdes.serdes import ( NamedTupleSerializer, + PackableValue, UnpackContext, UnpackedValue, WhitelistMap, whitelist_for_serdes, ) -from .partition import DefaultPartitionsSubset, SerializedPartitionsSubset +from .partition import DefaultPartitionsSubset, PartitionsDefinition, SerializedPartitionsSubset if TYPE_CHECKING: from dagster._core.definitions.asset_condition import AssetSubsetWithMetadata @@ -124,7 +127,9 @@ def metadata(self) -> MetadataMapping: } -RuleEvaluationResults = Tuple[AssetSubset, Sequence["AssetSubsetWithMetadata"]] +RuleEvaluationResults = Tuple[ + AssetSubset, Sequence["AssetSubsetWithMetadata"], Mapping[str, PackableValue] +] @whitelist_for_serdes @@ -141,6 +146,41 @@ class BackcompatAutoMaterializeAssetEvaluationSerializer(NamedTupleSerializer): AssetConditionEvaluationWithRunIds objects. """ + @property + def partitions_def(self) -> Optional[PartitionsDefinition]: + """We may override this property in subclasses that are created at a point in time where + we know what the current partitions definition is. If we don't know, then we will be unable + to deserialize any SerializedPartitionsSubset objects. + """ + return None + + def _get_empty_subset(self, asset_key: AssetKey, is_partitioned: bool) -> AssetSubset: + # We know this asset is partitioned, but we don't know what its partitions def is, so we + # just use a DefaultPartitionsSubset + if is_partitioned and self.partitions_def is None: + return AssetSubset(asset_key, DefaultPartitionsSubset(set())) + else: + return AssetSubset.empty(asset_key, self.partitions_def) + + def deserialize_serialized_partitions_subset_or_none( + self, + asset_key: AssetKey, + is_partitioned: bool, + serialized: Union[None, SerializedPartitionsSubset], + ) -> AssetSubset: + if serialized is None: + # Confusingly, we used `None` to indicate "all of an unpartitioned asset" in the old + # serialization scheme + return AssetSubset(asset_key, True) + elif self.partitions_def is None or not serialized.can_deserialize(self.partitions_def): + # If we don't know the partitions def, then we can't deserialize the partitions subset, + # so we just use an empty one instead. + return self._get_empty_subset(asset_key, is_partitioned) + else: + # We are in an instance of this class that knows the partitions def, so we can + # deserialize the partitions subset + return AssetSubset(asset_key, serialized.deserialize(self.partitions_def)) + def _asset_condition_snapshot_from_rule_snapshot( self, rule_snapshot: AutoMaterializeRuleSnapshot ) -> "AssetConditionSnapshot": @@ -171,28 +211,28 @@ def _get_child_rule_evaluation( condition_snapshot = self._asset_condition_snapshot_from_rule_snapshot(rule_snapshot) - if is_partitioned: - # for partitioned assets, we can't deserialize SerializedPartitionsSubset into an - # AssetSubset, so we just return a dummy empty default partition subset - value = DefaultPartitionsSubset(set()) - else: - value = len(partition_subsets_by_condition) > 0 + subsets_with_metadata = [ + AssetSubsetWithMetadata( + subset=self.deserialize_serialized_partitions_subset_or_none( + asset_key, is_partitioned, serialized + ), + metadata=rule_evaluation.evaluation_data.metadata, + ) + for rule_evaluation, serialized in partition_subsets_by_condition + if rule_evaluation.evaluation_data + ] - true_subset = AssetSubset(asset_key, value) + true_subset = self._get_empty_subset(asset_key, is_partitioned) + for _, serialized in partition_subsets_by_condition: + true_subset |= self.deserialize_serialized_partitions_subset_or_none( + asset_key, is_partitioned, serialized + ) return AssetConditionEvaluation( condition_snapshot=condition_snapshot, true_subset=true_subset, candidate_subset=None, - subsets_with_metadata=[] - if is_partitioned - else [ - AssetSubsetWithMetadata( - subset=true_subset, metadata=rule_evaluation.evaluation_data.metadata - ) - for rule_evaluation, _ in partition_subsets_by_condition - if rule_evaluation.evaluation_data - ], + subsets_with_metadata=subsets_with_metadata, ) def _get_child_decision_type_evaluation( @@ -239,7 +279,7 @@ def _get_child_decision_type_evaluation( ] unique_id = hashlib.md5("".join(unique_id_parts).encode()).hexdigest() decision_type_snapshot = AssetConditionSnapshot( - class_name=OrAssetCondition.__name__, description="", unique_id=unique_id + class_name=OrAssetCondition.__name__, description="Any of", unique_id=unique_id ) initial = ( AssetSubset(asset_key, DefaultPartitionsSubset(set())) @@ -267,7 +307,7 @@ def _get_child_decision_type_evaluation( unique_id = hashlib.md5("".join(unique_id_parts).encode()).hexdigest() return AssetConditionEvaluation( condition_snapshot=AssetConditionSnapshot( - class_name=NotAssetCondition.__name__, description="", unique_id=unique_id + class_name=NotAssetCondition.__name__, description="Not", unique_id=unique_id ), # for partitioned assets, we don't bother calculating the true subset, as we can't # properly deserialize the inner results @@ -337,7 +377,7 @@ def unpack( ] unique_id = hashlib.md5("".join(unique_id_parts).encode()).hexdigest() condition_snapshot = AssetConditionSnapshot( - class_name=AndAssetCondition.__name__, description="", unique_id=unique_id + class_name=AndAssetCondition.__name__, description="All of", unique_id=unique_id ) return AssetConditionEvaluation( diff --git a/python_modules/dagster/dagster/_core/definitions/freshness_based_auto_materialize.py b/python_modules/dagster/dagster/_core/definitions/freshness_based_auto_materialize.py index 97c9a6fa86808..99ff47c7a4b39 100644 --- a/python_modules/dagster/dagster/_core/definitions/freshness_based_auto_materialize.py +++ b/python_modules/dagster/dagster/_core/definitions/freshness_based_auto_materialize.py @@ -168,7 +168,7 @@ def freshness_evaluation_results_for_asset_key( if not context.asset_graph.get_downstream_freshness_policies( asset_key=asset_key ) or context.asset_graph.is_partitioned(asset_key): - return context.empty_subset(), [] + return context.empty_subset(), [], {} # figure out the current contents of this asset current_data_time = context.data_time_resolver.get_current_data_time(asset_key, current_time) @@ -181,7 +181,7 @@ def freshness_evaluation_results_for_asset_key( # if executing the asset on this tick would not change its data time, then return if current_data_time == expected_data_time: - return context.empty_subset(), [] + return context.empty_subset(), [], {} # calculate the data times you would expect after all currently-executing runs # were to successfully complete @@ -220,8 +220,10 @@ def freshness_evaluation_results_for_asset_key( and evaluation_data is not None ): all_subset = AssetSubset.all(asset_key, None) - return AssetSubset.all(asset_key, None), [ - AssetSubsetWithMetadata(all_subset, evaluation_data.metadata) - ] + return ( + AssetSubset.all(asset_key, None), + [AssetSubsetWithMetadata(all_subset, evaluation_data.metadata)], + {}, + ) else: - return context.empty_subset(), [] + return context.empty_subset(), [], {} diff --git a/python_modules/dagster/dagster/_daemon/asset_daemon.py b/python_modules/dagster/dagster/_daemon/asset_daemon.py index 4ab51b0f92ff5..b72755595c9d6 100644 --- a/python_modules/dagster/dagster/_daemon/asset_daemon.py +++ b/python_modules/dagster/dagster/_daemon/asset_daemon.py @@ -97,7 +97,7 @@ def get_current_evaluation_id( else None ) - return AssetDaemonCursor.get_evaluation_id_from_serialized(raw_cursor) if raw_cursor else None + return AssetDaemonCursor.from_serialized(raw_cursor, None).evaluation_id if raw_cursor else None class AutoMaterializeLaunchContext: @@ -272,11 +272,7 @@ def _run_iteration_impl( ) raw_cursor = _get_raw_cursor(instance) - stored_cursor = ( - AssetDaemonCursor.from_serialized(raw_cursor, asset_graph) - if raw_cursor - else AssetDaemonCursor.empty() - ) + stored_cursor = AssetDaemonCursor.from_serialized(raw_cursor, asset_graph) tick_retention_settings = instance.get_tick_retention_settings( InstigatorType.AUTO_MATERIALIZE diff --git a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/asset_daemon_scenario.py b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/asset_daemon_scenario.py index 11e1641b221a9..95a141341e5ee 100644 --- a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/asset_daemon_scenario.py +++ b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/asset_daemon_scenario.py @@ -187,7 +187,7 @@ class AssetDaemonScenarioState(NamedTuple): asset_specs: Sequence[Union[AssetSpec, AssetSpecWithPartitionsDef]] current_time: datetime.datetime = pendulum.now() run_requests: Sequence[RunRequest] = [] - serialized_cursor: str = AssetDaemonCursor.empty().serialize() + serialized_cursor: str = AssetDaemonCursor.empty(0).serialize() evaluations: Sequence[AssetConditionEvaluation] = [] logger: logging.Logger = logging.getLogger("dagster.amp") tick_index: int = 1 @@ -254,6 +254,9 @@ def with_asset_properties( new_asset_specs.append(spec) return self._replace(asset_specs=new_asset_specs) + def with_serialized_cursor(self, serialized_cursor: str) -> "AssetDaemonScenarioState": + return self._replace(serialized_cursor=serialized_cursor) + def with_all_eager( self, max_materializations_per_minute: int = 1 ) -> "AssetDaemonScenarioState": @@ -378,9 +381,7 @@ def _evaluate_tick_daemon( ) ) new_cursor = AssetDaemonCursor.from_serialized( - self.instance.daemon_cursor_storage.get_cursor_values({CURSOR_KEY}).get( - CURSOR_KEY, AssetDaemonCursor.empty().serialize() - ), + self.instance.daemon_cursor_storage.get_cursor_values({CURSOR_KEY}).get(CURSOR_KEY), self.asset_graph, ) new_run_requests = [ @@ -402,9 +403,9 @@ def _evaluate_tick_daemon( ] return new_run_requests, new_cursor, new_evaluations - def evaluate_tick(self) -> "AssetDaemonScenarioState": + def evaluate_tick(self, label: Optional[str] = None) -> "AssetDaemonScenarioState": self.logger.critical("********************************") - self.logger.critical(f"EVALUATING TICK {self.tick_index}") + self.logger.critical(f"EVALUATING TICK {label or self.tick_index}") self.logger.critical("********************************") with pendulum.test(self.current_time): if self.is_daemon: diff --git a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/test_scenarios.py b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/test_scenarios.py index 331bb7354a520..2177e17644516 100644 --- a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/test_scenarios.py +++ b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/test_scenarios.py @@ -3,6 +3,7 @@ from .asset_daemon_scenario import AssetDaemonScenario from .updated_scenarios.basic_scenarios import basic_scenarios from .updated_scenarios.cron_scenarios import cron_scenarios +from .updated_scenarios.cursor_migration_scenarios import cursor_migration_scenarios from .updated_scenarios.freshness_policy_scenarios import freshness_policy_scenarios from .updated_scenarios.latest_materialization_run_tag_scenarios import ( latest_materialization_run_tag_scenarios, @@ -15,6 +16,7 @@ + freshness_policy_scenarios + partition_scenarios + latest_materialization_run_tag_scenarios + + cursor_migration_scenarios ) diff --git a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/basic_scenarios.py b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/basic_scenarios.py index 293412db388ab..0c6071c76a225 100644 --- a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/basic_scenarios.py +++ b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/basic_scenarios.py @@ -166,7 +166,7 @@ .evaluate_tick() .assert_requested_runs() .with_runs(run_request(["A"])) - .evaluate_tick() + .evaluate_tick("a") .assert_requested_runs(run_request(["C"])) .assert_evaluation( "C", diff --git a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/cursor_migration_scenarios.py b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/cursor_migration_scenarios.py new file mode 100644 index 0000000000000..91a8fe9be3be0 --- /dev/null +++ b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/cursor_migration_scenarios.py @@ -0,0 +1,54 @@ +from dagster import ( + AutoMaterializeRule, +) +from dagster._core.definitions.auto_materialize_rule import DiscardOnMaxMaterializationsExceededRule + +from ..asset_daemon_scenario import ( + AssetDaemonScenario, + AssetRuleEvaluationSpec, + day_partition_key, +) +from ..base_scenario import ( + run_request, +) +from .asset_daemon_scenario_states import ( + daily_partitions_def, + one_asset, + time_partitions_start_str, +) + +cursor_migration_scenarios = [ + AssetDaemonScenario( + id="one_asset_daily_partitions_never_materialized_respect_discards_migrate_after_discard", + initial_state=one_asset.with_asset_properties(partitions_def=daily_partitions_def) + .with_current_time(time_partitions_start_str) + .with_current_time_advanced(days=30, hours=4) + .with_all_eager(), + execution_fn=lambda state: state.evaluate_tick() + .assert_requested_runs( + run_request(asset_keys=["A"], partition_key=day_partition_key(state.current_time)) + ) + .assert_evaluation( + "A", + [ + AssetRuleEvaluationSpec( + AutoMaterializeRule.materialize_on_missing(), + [day_partition_key(state.current_time, delta=-i) for i in range(30)], + ), + AssetRuleEvaluationSpec( + DiscardOnMaxMaterializationsExceededRule(limit=1), + [day_partition_key(state.current_time, delta=-i) for i in range(1, 30)], + ), + ], + num_requested=1, + ) + .with_serialized_cursor( + # this cursor was generated by running the above scenario before the cursor changes + """{"latest_storage_id": null, "handled_root_asset_keys": [], "handled_root_partitions_by_asset_key": {"A": "{\\"version\\": 1, \\"time_windows\\": [[1357344000.0, 1359936000.0]], \\"num_partitions\\": 30}"}, "evaluation_id": 1, "last_observe_request_timestamp_by_asset_key": {}, "latest_evaluation_by_asset_key": {"A": "{\\"__class__\\": \\"AutoMaterializeAssetEvaluation\\", \\"asset_key\\": {\\"__class__\\": \\"AssetKey\\", \\"path\\": [\\"A\\"]}, \\"num_discarded\\": 29, \\"num_requested\\": 1, \\"num_skipped\\": 0, \\"partition_subsets_by_condition\\": [[{\\"__class__\\": \\"AutoMaterializeRuleEvaluation\\", \\"evaluation_data\\": null, \\"rule_snapshot\\": {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"MaterializeOnMissingRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.MATERIALIZE\\"}, \\"description\\": \\"materialization is missing\\"}}, {\\"__class__\\": \\"SerializedPartitionsSubset\\", \\"serialized_partitions_def_class_name\\": \\"DailyPartitionsDefinition\\", \\"serialized_partitions_def_unique_id\\": \\"809725ad60ffac0302d5c81f6e45865e21ec0b85\\", \\"serialized_subset\\": \\"{\\\\\\"version\\\\\\": 1, \\\\\\"time_windows\\\\\\": [[1357344000.0, 1359936000.0]], \\\\\\"num_partitions\\\\\\": 30}\\"}], [{\\"__class__\\": \\"AutoMaterializeRuleEvaluation\\", \\"evaluation_data\\": null, \\"rule_snapshot\\": {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"DiscardOnMaxMaterializationsExceededRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.DISCARD\\"}, \\"description\\": \\"exceeds 1 materialization(s) per minute\\"}}, {\\"__class__\\": \\"SerializedPartitionsSubset\\", \\"serialized_partitions_def_class_name\\": \\"DailyPartitionsDefinition\\", \\"serialized_partitions_def_unique_id\\": \\"809725ad60ffac0302d5c81f6e45865e21ec0b85\\", \\"serialized_subset\\": \\"{\\\\\\"version\\\\\\": 1, \\\\\\"time_windows\\\\\\": [[1357344000.0, 1359849600.0]], \\\\\\"num_partitions\\\\\\": 29}\\"}]], \\"rule_snapshots\\": [{\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"MaterializeOnMissingRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.MATERIALIZE\\"}, \\"description\\": \\"materialization is missing\\"}, {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"SkipOnParentMissingRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.SKIP\\"}, \\"description\\": \\"waiting on upstream data to be present\\"}, {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"MaterializeOnRequiredForFreshnessRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.MATERIALIZE\\"}, \\"description\\": \\"required to meet this or downstream asset's freshness policy\\"}, {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"SkipOnParentOutdatedRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.SKIP\\"}, \\"description\\": \\"waiting on upstream data to be up to date\\"}, {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"SkipOnRequiredButNonexistentParentsRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.SKIP\\"}, \\"description\\": \\"required parent partitions do not exist\\"}, {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"MaterializeOnParentUpdatedRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.MATERIALIZE\\"}, \\"description\\": \\"upstream data has changed since latest materialization\\"}, {\\"__class__\\": \\"AutoMaterializeRuleSnapshot\\", \\"class_name\\": \\"SkipOnBackfillInProgressRule\\", \\"decision_type\\": {\\"__enum__\\": \\"AutoMaterializeDecisionType.SKIP\\"}, \\"description\\": \\"targeted by an in-progress backfill\\"}], \\"run_ids\\": {\\"__set__\\": []}}"}, "latest_evaluation_timestamp": 1359950400.0} + """ + ) + .evaluate_tick("a") + # the new cursor "remembers" that a bunch of partitions were discarded + .assert_requested_runs(), + ), +] diff --git a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/partition_scenarios.py b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/partition_scenarios.py index 7a6b50b3043e8..0a178a3a1db39 100644 --- a/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/partition_scenarios.py +++ b/python_modules/dagster/dagster_tests/definitions_tests/auto_materialize_tests/updated_scenarios/partition_scenarios.py @@ -508,7 +508,7 @@ ["A"], partition_key=hour_partition_key(time_partitions_start_datetime, delta=1) ) ) - .evaluate_tick() + .evaluate_tick("FOO") .assert_requested_runs() .with_not_started_runs() # now the start date is updated, request the new first partition key @@ -518,7 +518,7 @@ start=time_partitions_start_datetime + datetime.timedelta(days=5) ) ) - .evaluate_tick() + .evaluate_tick("BAR") .assert_requested_runs( run_request( ["A"], @@ -656,7 +656,7 @@ ["B"], partition_key=day_partition_key(time_partitions_start_datetime, delta=1) ) ) - .evaluate_tick() + .evaluate_tick("THIS ONE") .assert_requested_runs( run_request( ["C"],