Coverage for python/lsst/daf/butler/_quantum_backed.py: 25%
193 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-15 09:13 +0000
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-15 09:13 +0000
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24__all__ = ("QuantumBackedButler", "QuantumProvenanceData")
26import itertools
27import logging
28import uuid
29from collections import defaultdict
30from collections.abc import Iterable, Mapping
31from typing import TYPE_CHECKING, Any
33from deprecated.sphinx import deprecated
34from lsst.resources import ResourcePathExpression
35from pydantic import BaseModel
37from ._butlerConfig import ButlerConfig
38from ._deferredDatasetHandle import DeferredDatasetHandle
39from ._limited_butler import LimitedButler
40from .core import (
41 Config,
42 DatasetId,
43 DatasetRef,
44 DatasetType,
45 Datastore,
46 DatastoreRecordData,
47 DimensionUniverse,
48 Quantum,
49 SerializedDatastoreRecordData,
50 StorageClass,
51 StorageClassFactory,
52 ddl,
53)
54from .registry.bridge.monolithic import MonolithicDatastoreRegistryBridgeManager
55from .registry.databases.sqlite import SqliteDatabase
56from .registry.interfaces import DatastoreRegistryBridgeManager, OpaqueTableStorageManager
57from .registry.opaque import ByNameOpaqueTableStorageManager
59if TYPE_CHECKING:
60 from ._butler import Butler
62_LOG = logging.getLogger(__name__)
65class _DatasetRecordStorageManagerDatastoreConstructionMimic:
66 """A partial implementation of `DatasetRecordStorageManager` that exists
67 only to allow a `DatastoreRegistryBridgeManager` (and hence a `Datastore`)
68 to be constructed without a full `Registry`.
70 Notes
71 -----
72 The interface implemented by this class should probably be its own ABC,
73 and that ABC should probably be used in the definition of
74 `DatastoreRegistryBridgeManager`, but while prototyping I'm trying to keep
75 changes minimal.
76 """
78 @classmethod
79 def getIdColumnType(cls) -> type:
80 # Docstring inherited.
81 return ddl.GUID
83 @classmethod
84 def addDatasetForeignKey(
85 cls,
86 tableSpec: ddl.TableSpec,
87 *,
88 name: str = "dataset",
89 constraint: bool = True,
90 onDelete: str | None = None,
91 **kwargs: Any,
92 ) -> ddl.FieldSpec:
93 # Docstring inherited.
94 idFieldSpec = ddl.FieldSpec(f"{name}_id", dtype=ddl.GUID, **kwargs)
95 tableSpec.fields.add(idFieldSpec)
96 return idFieldSpec
99class QuantumBackedButler(LimitedButler):
100 """An implementation of `LimitedButler` intended to back execution of a
101 single `Quantum`.
103 Parameters
104 ----------
105 predicted_inputs : `~collections.abc.Iterable` [`DatasetId`]
106 Dataset IDs for datasets that can can be read from this butler.
107 predicted_outputs : `~collections.abc.Iterable` [`DatasetId`]
108 Dataset IDs for datasets that can be stored in this butler.
109 dimensions : `DimensionUniverse`
110 Object managing all dimension definitions.
111 datastore : `Datastore`
112 Datastore to use for all dataset I/O and existence checks.
113 storageClasses : `StorageClassFactory`
114 Object managing all storage class definitions.
116 Notes
117 -----
118 Most callers should use the `initialize` `classmethod` to construct new
119 instances instead of calling the constructor directly.
121 `QuantumBackedButler` uses a SQLite database internally, in order to reuse
122 existing `DatastoreRegistryBridge` and `OpaqueTableStorage`
123 implementations that rely SQLAlchemy. If implementations are added in the
124 future that don't rely on SQLAlchemy, it should be possible to swap them
125 in by overriding the type arguments to `initialize` (though at present,
126 `QuantumBackedButler` would still create at least an in-memory SQLite
127 database that would then go unused).`
129 We imagine `QuantumBackedButler` being used during (at least) batch
130 execution to capture `Datastore` records and save them to per-quantum
131 files, which are also a convenient place to store provenance for eventual
132 upload to a SQL-backed `Registry` (once `Registry` has tables to store
133 provenance, that is).
134 These per-quantum files can be written in two ways:
136 - The SQLite file used internally by `QuantumBackedButler` can be used
137 directly but customizing the ``filename`` argument to ``initialize``, and
138 then transferring that file to the object store after execution completes
139 (or fails; a ``try/finally`` pattern probably makes sense here).
141 - A JSON or YAML file can be written by calling `extract_provenance_data`,
142 and using ``pydantic`` methods to write the returned
143 `QuantumProvenanceData` to a file.
145 Note that at present, the SQLite file only contains datastore records, not
146 provenance, but that should be easy to address (if desired) after we
147 actually design a `Registry` schema for provenance. I also suspect that
148 we'll want to explicitly close the SQLite file somehow before trying to
149 transfer it. But I'm guessing we'd prefer to write the per-quantum files
150 as JSON anyway.
151 """
153 def __init__(
154 self,
155 predicted_inputs: Iterable[DatasetId],
156 predicted_outputs: Iterable[DatasetId],
157 dimensions: DimensionUniverse,
158 datastore: Datastore,
159 storageClasses: StorageClassFactory,
160 dataset_types: Mapping[str, DatasetType] | None = None,
161 ):
162 self._dimensions = dimensions
163 self._predicted_inputs = set(predicted_inputs)
164 self._predicted_outputs = set(predicted_outputs)
165 self._available_inputs: set[DatasetId] = set()
166 self._unavailable_inputs: set[DatasetId] = set()
167 self._actual_inputs: set[DatasetId] = set()
168 self._actual_output_refs: set[DatasetRef] = set()
169 self.datastore = datastore
170 self.storageClasses = storageClasses
171 self._dataset_types: Mapping[str, DatasetType] = {}
172 if dataset_types is not None:
173 self._dataset_types = dataset_types
174 self.datastore.set_retrieve_dataset_type_method(self._retrieve_dataset_type)
176 @classmethod
177 def initialize(
178 cls,
179 config: Config | ResourcePathExpression,
180 quantum: Quantum,
181 dimensions: DimensionUniverse,
182 filename: str = ":memory:",
183 OpaqueManagerClass: type[OpaqueTableStorageManager] = ByNameOpaqueTableStorageManager,
184 BridgeManagerClass: type[DatastoreRegistryBridgeManager] = MonolithicDatastoreRegistryBridgeManager,
185 search_paths: list[str] | None = None,
186 dataset_types: Mapping[str, DatasetType] | None = None,
187 ) -> QuantumBackedButler:
188 """Construct a new `QuantumBackedButler` from repository configuration
189 and helper types.
191 Parameters
192 ----------
193 config : `Config` or `~lsst.resources.ResourcePathExpression`
194 A butler repository root, configuration filename, or configuration
195 instance.
196 quantum : `Quantum`
197 Object describing the predicted input and output dataset relevant
198 to this butler. This must have resolved `DatasetRef` instances for
199 all inputs and outputs.
200 dimensions : `DimensionUniverse`
201 Object managing all dimension definitions.
202 filename : `str`, optional
203 Name for the SQLite database that will back this butler; defaults
204 to an in-memory database.
205 OpaqueManagerClass : `type`, optional
206 A subclass of `OpaqueTableStorageManager` to use for datastore
207 opaque records. Default is a SQL-backed implementation.
208 BridgeManagerClass : `type`, optional
209 A subclass of `DatastoreRegistryBridgeManager` to use for datastore
210 location records. Default is a SQL-backed implementation.
211 search_paths : `list` of `str`, optional
212 Additional search paths for butler configuration.
213 dataset_types: `~collections.abc.Mapping` [`str`, `DatasetType`], \
214 optional
215 Mapping of the dataset type name to its registry definition.
216 """
217 predicted_inputs = [ref.id for ref in itertools.chain.from_iterable(quantum.inputs.values())]
218 predicted_inputs += [ref.id for ref in quantum.initInputs.values()]
219 predicted_outputs = [ref.id for ref in itertools.chain.from_iterable(quantum.outputs.values())]
220 return cls._initialize(
221 config=config,
222 predicted_inputs=predicted_inputs,
223 predicted_outputs=predicted_outputs,
224 dimensions=dimensions,
225 filename=filename,
226 datastore_records=quantum.datastore_records,
227 OpaqueManagerClass=OpaqueManagerClass,
228 BridgeManagerClass=BridgeManagerClass,
229 search_paths=search_paths,
230 dataset_types=dataset_types,
231 )
233 @classmethod
234 def from_predicted(
235 cls,
236 config: Config | ResourcePathExpression,
237 predicted_inputs: Iterable[DatasetId],
238 predicted_outputs: Iterable[DatasetId],
239 dimensions: DimensionUniverse,
240 datastore_records: Mapping[str, DatastoreRecordData],
241 filename: str = ":memory:",
242 OpaqueManagerClass: type[OpaqueTableStorageManager] = ByNameOpaqueTableStorageManager,
243 BridgeManagerClass: type[DatastoreRegistryBridgeManager] = MonolithicDatastoreRegistryBridgeManager,
244 search_paths: list[str] | None = None,
245 dataset_types: Mapping[str, DatasetType] | None = None,
246 ) -> QuantumBackedButler:
247 """Construct a new `QuantumBackedButler` from sets of input and output
248 dataset IDs.
250 Parameters
251 ----------
252 config : `Config` or `~lsst.resources.ResourcePathExpression`
253 A butler repository root, configuration filename, or configuration
254 instance.
255 predicted_inputs : `~collections.abc.Iterable` [`DatasetId`]
256 Dataset IDs for datasets that can can be read from this butler.
257 predicted_outputs : `~collections.abc.Iterable` [`DatasetId`]
258 Dataset IDs for datasets that can be stored in this butler, must be
259 fully resolved.
260 dimensions : `DimensionUniverse`
261 Object managing all dimension definitions.
262 filename : `str`, optional
263 Name for the SQLite database that will back this butler; defaults
264 to an in-memory database.
265 datastore_records : `dict` [`str`, `DatastoreRecordData`] or `None`
266 Datastore records to import into a datastore.
267 OpaqueManagerClass : `type`, optional
268 A subclass of `OpaqueTableStorageManager` to use for datastore
269 opaque records. Default is a SQL-backed implementation.
270 BridgeManagerClass : `type`, optional
271 A subclass of `DatastoreRegistryBridgeManager` to use for datastore
272 location records. Default is a SQL-backed implementation.
273 search_paths : `list` of `str`, optional
274 Additional search paths for butler configuration.
275 dataset_types: `~collections.abc.Mapping` [`str`, `DatasetType`], \
276 optional
277 Mapping of the dataset type name to its registry definition.
278 """
279 return cls._initialize(
280 config=config,
281 predicted_inputs=predicted_inputs,
282 predicted_outputs=predicted_outputs,
283 dimensions=dimensions,
284 filename=filename,
285 datastore_records=datastore_records,
286 OpaqueManagerClass=OpaqueManagerClass,
287 BridgeManagerClass=BridgeManagerClass,
288 search_paths=search_paths,
289 dataset_types=dataset_types,
290 )
292 @classmethod
293 def _initialize(
294 cls,
295 *,
296 config: Config | ResourcePathExpression,
297 predicted_inputs: Iterable[DatasetId],
298 predicted_outputs: Iterable[DatasetId],
299 dimensions: DimensionUniverse,
300 filename: str = ":memory:",
301 datastore_records: Mapping[str, DatastoreRecordData] | None = None,
302 OpaqueManagerClass: type[OpaqueTableStorageManager] = ByNameOpaqueTableStorageManager,
303 BridgeManagerClass: type[DatastoreRegistryBridgeManager] = MonolithicDatastoreRegistryBridgeManager,
304 search_paths: list[str] | None = None,
305 dataset_types: Mapping[str, DatasetType] | None = None,
306 ) -> QuantumBackedButler:
307 """Internal method with common implementation used by `initialize` and
308 `for_output`.
310 Parameters
311 ----------
312 config : `Config` or `~lsst.resources.ResourcePathExpression`
313 A butler repository root, configuration filename, or configuration
314 instance.
315 predicted_inputs : `~collections.abc.Iterable` [`DatasetId`]
316 Dataset IDs for datasets that can can be read from this butler.
317 predicted_outputs : `~collections.abc.Iterable` [`DatasetId`]
318 Dataset IDs for datasets that can be stored in this butler.
319 dimensions : `DimensionUniverse`
320 Object managing all dimension definitions.
321 filename : `str`, optional
322 Name for the SQLite database that will back this butler; defaults
323 to an in-memory database.
324 datastore_records : `dict` [`str`, `DatastoreRecordData`] or `None`
325 Datastore records to import into a datastore.
326 OpaqueManagerClass : `type`, optional
327 A subclass of `OpaqueTableStorageManager` to use for datastore
328 opaque records. Default is a SQL-backed implementation.
329 BridgeManagerClass : `type`, optional
330 A subclass of `DatastoreRegistryBridgeManager` to use for datastore
331 location records. Default is a SQL-backed implementation.
332 search_paths : `list` of `str`, optional
333 Additional search paths for butler configuration.
334 dataset_types: `~collections.abc.Mapping` [`str`, `DatasetType`]
335 Mapping of the dataset type name to its registry definition.
336 """
337 butler_config = ButlerConfig(config, searchPaths=search_paths)
338 if "root" in butler_config:
339 butler_root = butler_config["root"]
340 else:
341 butler_root = butler_config.configDir
342 db = SqliteDatabase.fromUri(f"sqlite:///{filename}", origin=0)
343 with db.declareStaticTables(create=True) as context:
344 opaque_manager = OpaqueManagerClass.initialize(db, context)
345 bridge_manager = BridgeManagerClass.initialize(
346 db,
347 context,
348 opaque=opaque_manager,
349 # MyPy can tell it's a fake, but we know it shouldn't care.
350 datasets=_DatasetRecordStorageManagerDatastoreConstructionMimic, # type: ignore
351 universe=dimensions,
352 )
353 # TODO: We need to inform `Datastore` here that it needs to support
354 # predictive reads; right now that's a configuration option, but after
355 # execution butler is retired it could just be a kwarg we pass here.
356 # For now just force this option as we cannot work without it.
357 butler_config["datastore", "trust_get_request"] = True
358 datastore = Datastore.fromConfig(butler_config, bridge_manager, butler_root)
359 if datastore_records is not None:
360 datastore.import_records(datastore_records)
361 storageClasses = StorageClassFactory()
362 storageClasses.addFromConfig(butler_config)
363 return cls(
364 predicted_inputs,
365 predicted_outputs,
366 dimensions,
367 datastore,
368 storageClasses=storageClasses,
369 dataset_types=dataset_types,
370 )
372 def _retrieve_dataset_type(self, name: str) -> DatasetType | None:
373 """Return DatasetType defined in registry given dataset type name."""
374 return self._dataset_types.get(name)
376 def isWriteable(self) -> bool:
377 # Docstring inherited.
378 return True
380 @deprecated(
381 reason="Butler.get() now behaves like Butler.getDirect() when given a DatasetRef."
382 " Please use Butler.get(). Will be removed after v27.0.",
383 version="v26.0",
384 category=FutureWarning,
385 )
386 def getDirect(
387 self,
388 ref: DatasetRef,
389 *,
390 parameters: dict[str, Any] | None = None,
391 storageClass: str | StorageClass | None = None,
392 ) -> Any:
393 # Docstring inherited.
394 return self.get(ref, parameters=parameters, storageClass=storageClass)
396 def get(
397 self,
398 ref: DatasetRef,
399 /,
400 *,
401 parameters: dict[str, Any] | None = None,
402 storageClass: StorageClass | str | None = None,
403 ) -> Any:
404 try:
405 obj = super().get(
406 ref,
407 parameters=parameters,
408 storageClass=storageClass,
409 )
410 except (LookupError, FileNotFoundError, OSError):
411 self._unavailable_inputs.add(ref.id)
412 raise
413 if ref.id in self._predicted_inputs:
414 # do this after delegating to super in case that raises.
415 self._actual_inputs.add(ref.id)
416 self._available_inputs.add(ref.id)
417 return obj
419 @deprecated(
420 reason="Butler.getDeferred() now behaves like getDirectDeferred() when given a DatasetRef. "
421 "Please use Butler.getDeferred(). Will be removed after v27.0.",
422 version="v26.0",
423 category=FutureWarning,
424 )
425 def getDirectDeferred(
426 self,
427 ref: DatasetRef,
428 *,
429 parameters: dict[str, Any] | None = None,
430 storageClass: str | StorageClass | None = None,
431 ) -> DeferredDatasetHandle:
432 # Docstring inherited.
433 return self.getDeferred(ref, parameters=parameters, storageClass=storageClass)
435 def getDeferred(
436 self,
437 ref: DatasetRef,
438 /,
439 *,
440 parameters: dict[str, Any] | None = None,
441 storageClass: str | StorageClass | None = None,
442 ) -> DeferredDatasetHandle:
443 if ref.id in self._predicted_inputs:
444 # Unfortunately, we can't do this after the handle succeeds in
445 # loading, so it's conceivable here that we're marking an input
446 # as "actual" even when it's not even available.
447 self._actual_inputs.add(ref.id)
448 return super().getDeferred(ref, parameters=parameters, storageClass=storageClass)
450 def stored(self, ref: DatasetRef) -> bool:
451 # Docstring inherited.
452 stored = super().stored(ref)
453 if ref.id in self._predicted_inputs:
454 if stored:
455 self._available_inputs.add(ref.id)
456 else:
457 self._unavailable_inputs.add(ref.id)
458 return stored
460 def stored_many(
461 self,
462 refs: Iterable[DatasetRef],
463 ) -> dict[DatasetRef, bool]:
464 # Docstring inherited.
465 existence = super().stored_many(refs)
467 for ref, stored in existence.items():
468 if ref.id in self._predicted_inputs:
469 if stored:
470 self._available_inputs.add(ref.id)
471 else:
472 self._unavailable_inputs.add(ref.id)
473 return existence
475 def markInputUnused(self, ref: DatasetRef) -> None:
476 # Docstring inherited.
477 self._actual_inputs.discard(ref.id)
479 @property
480 def dimensions(self) -> DimensionUniverse:
481 # Docstring inherited.
482 return self._dimensions
484 def put(self, obj: Any, ref: DatasetRef, /) -> DatasetRef:
485 # Docstring inherited.
486 if ref.id not in self._predicted_outputs:
487 raise RuntimeError("Cannot `put` dataset that was not predicted as an output.")
488 self.datastore.put(obj, ref)
489 self._actual_output_refs.add(ref)
490 return ref
492 def pruneDatasets(
493 self,
494 refs: Iterable[DatasetRef],
495 *,
496 disassociate: bool = True,
497 unstore: bool = False,
498 tags: Iterable[str] = (),
499 purge: bool = False,
500 ) -> None:
501 # docstring inherited from LimitedButler
503 if purge:
504 if not disassociate:
505 raise TypeError("Cannot pass purge=True without disassociate=True.")
506 if not unstore:
507 raise TypeError("Cannot pass purge=True without unstore=True.")
508 elif disassociate:
509 # No tagged collections for this butler.
510 raise TypeError("Cannot pass disassociate=True without purge=True.")
512 refs = list(refs)
514 # Pruning a component of a DatasetRef makes no sense.
515 for ref in refs:
516 if ref.datasetType.component():
517 raise ValueError(f"Can not prune a component of a dataset (ref={ref})")
519 if unstore:
520 self.datastore.trash(refs)
521 if purge:
522 for ref in refs:
523 # We only care about removing them from actual output refs,
524 self._actual_output_refs.discard(ref)
526 if unstore:
527 # Point of no return for removing artifacts
528 self.datastore.emptyTrash()
530 def extract_provenance_data(self) -> QuantumProvenanceData:
531 """Extract provenance information and datastore records from this
532 butler.
534 Returns
535 -------
536 provenance : `QuantumProvenanceData`
537 A serializable struct containing input/output dataset IDs and
538 datastore records. This assumes all dataset IDs are UUIDs (just to
539 make it easier for `pydantic` to reason about the struct's types);
540 the rest of this class makes no such assumption, but the approach
541 to processing in which it's useful effectively requires UUIDs
542 anyway.
544 Notes
545 -----
546 `QuantumBackedButler` records this provenance information when its
547 methods are used, which mostly saves `~lsst.pipe.base.PipelineTask`
548 authors from having to worry about while still recording very
549 detailed information. But it has two small weaknesses:
551 - Calling `getDirectDeferred` or `getDirect` is enough to mark a
552 dataset as an "actual input", which may mark some datasets that
553 aren't actually used. We rely on task authors to use
554 `markInputUnused` to address this.
556 - We assume that the execution system will call ``datasetExistsDirect``
557 on all predicted inputs prior to execution, in order to populate the
558 "available inputs" set. This is what I envision
559 '`~lsst.ctrl.mpexec.SingleQuantumExecutor` doing after we update it
560 to use this class, but it feels fragile for this class to make such
561 a strong assumption about how it will be used, even if I can't think
562 of any other executor behavior that would make sense.
563 """
564 if not self._actual_inputs.isdisjoint(self._unavailable_inputs):
565 _LOG.warning(
566 "Inputs %s were marked as actually used (probably because a DeferredDatasetHandle) "
567 "was obtained, but did not actually exist. This task should be be using markInputUnused "
568 "directly to clarify its provenance.",
569 self._actual_inputs & self._unavailable_inputs,
570 )
571 self._actual_inputs -= self._unavailable_inputs
572 checked_inputs = self._available_inputs | self._unavailable_inputs
573 if not self._predicted_inputs == checked_inputs:
574 _LOG.warning(
575 "Execution harness did not check predicted inputs %s for existence; available inputs "
576 "recorded in provenance may be incomplete.",
577 self._predicted_inputs - checked_inputs,
578 )
579 datastore_records = self.datastore.export_records(self._actual_output_refs)
580 provenance_records = {
581 datastore_name: records.to_simple() for datastore_name, records in datastore_records.items()
582 }
584 return QuantumProvenanceData(
585 predicted_inputs=self._predicted_inputs,
586 available_inputs=self._available_inputs,
587 actual_inputs=self._actual_inputs,
588 predicted_outputs=self._predicted_outputs,
589 actual_outputs={ref.id for ref in self._actual_output_refs},
590 datastore_records=provenance_records,
591 )
594class QuantumProvenanceData(BaseModel):
595 """A serializable struct for per-quantum provenance information and
596 datastore records.
598 Notes
599 -----
600 This class slightly duplicates information from the `Quantum` class itself
601 (the `predicted_inputs` and `predicted_outputs` sets should have the same
602 IDs present in `Quantum.inputs` and `Quantum.outputs`), but overall it
603 assumes the original `Quantum` is also available to reconstruct the
604 complete provenance (e.g. by associating dataset IDs with data IDs,
605 dataset types, and `~CollectionType.RUN` names.
607 Note that ``pydantic`` method ``parse_raw()`` is not going to work
608 correctly for this class, use `direct` method instead.
609 """
611 # This class probably should have information about its execution
612 # environment (anything not controlled and recorded at the
613 # `~CollectionType.RUN` level, such as the compute node ID). but adding it
614 # now is out of scope for this prototype.
616 predicted_inputs: set[uuid.UUID]
617 """Unique IDs of datasets that were predicted as inputs to this quantum
618 when the QuantumGraph was built.
619 """
621 available_inputs: set[uuid.UUID]
622 """Unique IDs of input datasets that were actually present in the datastore
623 when this quantum was executed.
625 This is a subset of `predicted_inputs`, with the difference generally being
626 datasets were `predicted_outputs` but not `actual_outputs` of some upstream
627 task.
628 """
630 actual_inputs: set[uuid.UUID]
631 """Unique IDs of datasets that were actually used as inputs by this task.
633 This is a subset of `available_inputs`.
635 Notes
636 -----
637 The criteria for marking an input as used is that rerunning the quantum
638 with only these `actual_inputs` available must yield identical outputs.
639 This means that (for example) even just using an input to help determine
640 an output rejection criteria and then rejecting it as an outlier qualifies
641 that input as actually used.
642 """
644 predicted_outputs: set[uuid.UUID]
645 """Unique IDs of datasets that were predicted as outputs of this quantum
646 when the QuantumGraph was built.
647 """
649 actual_outputs: set[uuid.UUID]
650 """Unique IDs of datasets that were actually written when this quantum
651 was executed.
652 """
654 datastore_records: dict[str, SerializedDatastoreRecordData]
655 """Datastore records indexed by datastore name."""
657 @staticmethod
658 def collect_and_transfer(
659 butler: Butler, quanta: Iterable[Quantum], provenance: Iterable[QuantumProvenanceData]
660 ) -> None:
661 """Transfer output datasets from multiple quanta to a more permantent
662 `Butler` repository.
664 Parameters
665 ----------
666 butler : `Butler`
667 Full butler representing the data repository to transfer datasets
668 to.
669 quanta : `~collections.abc.Iterable` [ `Quantum` ]
670 Iterable of `Quantum` objects that carry information about
671 predicted outputs. May be a single-pass iterator.
672 provenance : `~collections.abc.Iterable` [ `QuantumProvenanceData` ]
673 Provenance and datastore data for each of the given quanta, in the
674 same order. May be a single-pass iterator.
676 Notes
677 -----
678 Input-output provenance data is not actually transferred yet, because
679 `Registry` has no place to store it.
681 This method probably works most efficiently if run on all quanta for a
682 single task label at once, because this will gather all datasets of
683 a particular type together into a single vectorized `Registry` import.
684 It should still behave correctly if run on smaller groups of quanta
685 or even quanta from multiple tasks.
687 Currently this method transfers datastore record data unchanged, with
688 no possibility of actually moving (e.g.) files. Datastores that are
689 present only in execution or only in the more permanent butler are
690 ignored.
691 """
692 grouped_refs = defaultdict(list)
693 summary_records: dict[str, DatastoreRecordData] = {}
694 for quantum, provenance_for_quantum in zip(quanta, provenance):
695 quantum_refs_by_id = {
696 ref.id: ref
697 for ref in itertools.chain.from_iterable(quantum.outputs.values())
698 if ref.id in provenance_for_quantum.actual_outputs
699 }
700 for ref in quantum_refs_by_id.values():
701 grouped_refs[ref.datasetType, ref.run].append(ref)
703 # merge datastore records into a summary structure
704 for datastore_name, serialized_records in provenance_for_quantum.datastore_records.items():
705 quantum_records = DatastoreRecordData.from_simple(serialized_records)
706 if (records := summary_records.get(datastore_name)) is not None:
707 records.update(quantum_records)
708 else:
709 summary_records[datastore_name] = quantum_records
711 for refs in grouped_refs.values():
712 butler.registry._importDatasets(refs)
713 butler.datastore.import_records(summary_records)
715 @classmethod
716 def parse_raw(cls, *args: Any, **kwargs: Any) -> QuantumProvenanceData:
717 raise NotImplementedError("parse_raw() is not usable for this class, use direct() instead.")
719 @classmethod
720 def direct(
721 cls,
722 *,
723 predicted_inputs: Iterable[str | uuid.UUID],
724 available_inputs: Iterable[str | uuid.UUID],
725 actual_inputs: Iterable[str | uuid.UUID],
726 predicted_outputs: Iterable[str | uuid.UUID],
727 actual_outputs: Iterable[str | uuid.UUID],
728 datastore_records: Mapping[str, Mapping],
729 ) -> QuantumProvenanceData:
730 """Construct an instance directly without validators.
732 This differs from the pydantic "construct" method in that the
733 arguments are explicitly what the model requires, and it will recurse
734 through members, constructing them from their corresponding `direct`
735 methods.
737 This method should only be called when the inputs are trusted.
738 """
740 def _to_uuid_set(uuids: Iterable[str | uuid.UUID]) -> set[uuid.UUID]:
741 """Convert input UUIDs, which could be in string representation to
742 a set of `UUID` instances.
743 """
744 return {uuid.UUID(id) if isinstance(id, str) else id for id in uuids}
746 data = QuantumProvenanceData.__new__(cls)
747 setter = object.__setattr__
748 setter(data, "predicted_inputs", _to_uuid_set(predicted_inputs))
749 setter(data, "available_inputs", _to_uuid_set(available_inputs))
750 setter(data, "actual_inputs", _to_uuid_set(actual_inputs))
751 setter(data, "predicted_outputs", _to_uuid_set(predicted_outputs))
752 setter(data, "actual_outputs", _to_uuid_set(actual_outputs))
753 setter(
754 data,
755 "datastore_records",
756 {
757 key: SerializedDatastoreRecordData.direct(**records)
758 for key, records in datastore_records.items()
759 },
760 )
761 return data