Coverage for python/lsst/pipe/base/graphBuilder.py : 22%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of pipe_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21from __future__ import annotations
23"""Module defining GraphBuilder class and related methods.
24"""
26__all__ = ['GraphBuilder']
28# -------------------------------
29# Imports of standard modules --
30# -------------------------------
31import itertools
32from collections import ChainMap
33from contextlib import contextmanager
34from dataclasses import dataclass
35from typing import Dict, Iterable, Iterator, List, Set
36import logging
39# -----------------------------
40# Imports for other modules --
41# -----------------------------
42from .connections import iterConnections
43from .pipeline import PipelineDatasetTypes, TaskDatasetTypes, TaskDef, Pipeline
44from .graph import QuantumGraph
45from lsst.daf.butler import (
46 DataCoordinate,
47 DatasetRef,
48 DatasetType,
49 DimensionGraph,
50 DimensionUniverse,
51 NamedKeyDict,
52 Quantum,
53)
54from lsst.daf.butler.registry.queries.exprParser import ParseError, ParserYacc, TreeVisitor
55from lsst.utils import doImport
57# ----------------------------------
58# Local non-exported definitions --
59# ----------------------------------
61_LOG = logging.getLogger(__name__.partition(".")[2])
64class _DatasetDict(NamedKeyDict[DatasetType, Dict[DataCoordinate, DatasetRef]]):
65 """A custom dictionary that maps `DatasetType` to a nested dictionary of
66 the known `DatasetRef` instances of that type.
68 Parameters
69 ----------
70 args
71 Positional arguments are forwarded to the `dict` constructor.
72 universe : `DimensionUniverse`
73 Universe of all possible dimensions.
74 """
75 def __init__(self, *args, universe: DimensionGraph):
76 super().__init__(*args)
77 self.universe = universe
79 @classmethod
80 def fromDatasetTypes(cls, datasetTypes: Iterable[DatasetType], *,
81 universe: DimensionUniverse) -> _DatasetDict:
82 """Construct a dictionary from a flat iterable of `DatasetType` keys.
84 Parameters
85 ----------
86 datasetTypes : `iterable` of `DatasetType`
87 DatasetTypes to use as keys for the dict. Values will be empty
88 dictionaries.
89 universe : `DimensionUniverse`
90 Universe of all possible dimensions.
92 Returns
93 -------
94 dictionary : `_DatasetDict`
95 A new `_DatasetDict` instance.
96 """
97 return cls({datasetType: {} for datasetType in datasetTypes}, universe=universe)
99 @classmethod
100 def fromSubset(cls, datasetTypes: Iterable[DatasetType], first: _DatasetDict, *rest: _DatasetDict
101 ) -> _DatasetDict:
102 """Return a new dictionary by extracting items corresponding to the
103 given keys from one or more existing dictionaries.
105 Parameters
106 ----------
107 datasetTypes : `iterable` of `DatasetType`
108 DatasetTypes to use as keys for the dict. Values will be obtained
109 by lookups against ``first`` and ``rest``.
110 first : `_DatasetDict`
111 Another dictionary from which to extract values.
112 rest
113 Additional dictionaries from which to extract values.
115 Returns
116 -------
117 dictionary : `_DatasetDict`
118 A new dictionary instance.
119 """
120 combined = ChainMap(first, *rest)
121 return cls({datasetType: combined[datasetType] for datasetType in datasetTypes},
122 universe=first.universe)
124 @property
125 def dimensions(self) -> DimensionGraph:
126 """The union of all dimensions used by all dataset types in this
127 dictionary, including implied dependencies (`DimensionGraph`).
128 """
129 base = self.universe.empty
130 if len(self) == 0:
131 return base
132 return base.union(*[datasetType.dimensions for datasetType in self.keys()])
134 def unpackSingleRefs(self) -> NamedKeyDict[DatasetType, DatasetRef]:
135 """Unpack nested single-element `DatasetRef` dicts into a new
136 mapping with `DatasetType` keys and `DatasetRef` values.
138 This method assumes that each nest contains exactly one item, as is the
139 case for all "init" datasets.
141 Returns
142 -------
143 dictionary : `NamedKeyDict`
144 Dictionary mapping `DatasetType` to `DatasetRef`, with both
145 `DatasetType` instances and string names usable as keys.
146 """
147 def getOne(refs: Dict[DataCoordinate, DatasetRef]) -> DatasetRef:
148 ref, = refs.values()
149 return ref
150 return NamedKeyDict({datasetType: getOne(refs) for datasetType, refs in self.items()})
152 def unpackMultiRefs(self) -> NamedKeyDict[DatasetType, DatasetRef]:
153 """Unpack nested multi-element `DatasetRef` dicts into a new
154 mapping with `DatasetType` keys and `set` of `DatasetRef` values.
156 Returns
157 -------
158 dictionary : `NamedKeyDict`
159 Dictionary mapping `DatasetType` to `DatasetRef`, with both
160 `DatasetType` instances and string names usable as keys.
161 """
162 return NamedKeyDict({datasetType: list(refs.values()) for datasetType, refs in self.items()})
164 def extract(self, datasetType: DatasetType, dataIds: Iterable[DataCoordinate]
165 ) -> Iterator[DatasetRef]:
166 """Iterate over the contained `DatasetRef` instances that match the
167 given `DatasetType` and data IDs.
169 Parameters
170 ----------
171 datasetType : `DatasetType`
172 Dataset type to match.
173 dataIds : `Iterable` [ `DataCoordinate` ]
174 Data IDs to match.
176 Returns
177 -------
178 refs : `Iterator` [ `DatasetRef` ]
179 DatasetRef instances for which ``ref.datasetType == datasetType``
180 and ``ref.dataId`` is in ``dataIds``.
181 """
182 refs = self[datasetType]
183 return (refs[dataId] for dataId in dataIds)
186class _QuantumScaffolding:
187 """Helper class aggregating information about a `Quantum`, used when
188 constructing a `QuantumGraph`.
190 See `_PipelineScaffolding` for a top-down description of the full
191 scaffolding data structure.
193 Parameters
194 ----------
195 task : _TaskScaffolding
196 Back-reference to the helper object for the `PipelineTask` this quantum
197 represents an execution of.
198 dataId : `DataCoordinate`
199 Data ID for this quantum.
200 """
201 def __init__(self, task: _TaskScaffolding, dataId: DataCoordinate):
202 self.task = task
203 self.dataId = dataId
204 self.inputs = _DatasetDict.fromDatasetTypes(task.inputs.keys(), universe=dataId.universe)
205 self.outputs = _DatasetDict.fromDatasetTypes(task.outputs.keys(), universe=dataId.universe)
206 self.prerequisites = _DatasetDict.fromDatasetTypes(task.prerequisites.keys(),
207 universe=dataId.universe)
209 __slots__ = ("task", "dataId", "inputs", "outputs", "prerequisites")
211 def __repr__(self):
212 return f"_QuantumScaffolding(taskDef={self.task.taskDef}, dataId={self.dataId}, ...)"
214 task: _TaskScaffolding
215 """Back-reference to the helper object for the `PipelineTask` this quantum
216 represents an execution of.
217 """
219 dataId: DataCoordinate
220 """Data ID for this quantum.
221 """
223 inputs: _DatasetDict
224 """Nested dictionary containing `DatasetRef` inputs to this quantum.
226 This is initialized to map each `DatasetType` to an empty dictionary at
227 construction. Those nested dictionaries are populated (with data IDs as
228 keys) with unresolved `DatasetRef` instances in
229 `_PipelineScaffolding.connectDataIds`.
230 """
232 outputs: _DatasetDict
233 """Nested dictionary containing `DatasetRef` outputs this quantum.
234 """
236 prerequisites: _DatasetDict
237 """Nested dictionary containing `DatasetRef` prerequisite inputs to this
238 quantum.
239 """
241 def makeQuantum(self) -> Quantum:
242 """Transform the scaffolding object into a true `Quantum` instance.
244 Returns
245 -------
246 quantum : `Quantum`
247 An actual `Quantum` instance.
248 """
249 allInputs = self.inputs.unpackMultiRefs()
250 allInputs.update(self.prerequisites.unpackMultiRefs())
251 # Give the task's Connections class an opportunity to remove some
252 # inputs, or complain if they are unacceptable.
253 # This will raise if one of the check conditions is not met, which is
254 # the intended behavior
255 allInputs = self.task.taskDef.connections.adjustQuantum(allInputs)
256 return Quantum(
257 taskName=self.task.taskDef.taskName,
258 taskClass=self.task.taskDef.taskClass,
259 dataId=self.dataId,
260 initInputs=self.task.initInputs.unpackSingleRefs(),
261 inputs=allInputs,
262 outputs=self.outputs.unpackMultiRefs(),
263 )
266@dataclass
267class _TaskScaffolding:
268 """Helper class aggregating information about a `PipelineTask`, used when
269 constructing a `QuantumGraph`.
271 See `_PipelineScaffolding` for a top-down description of the full
272 scaffolding data structure.
274 Parameters
275 ----------
276 taskDef : `TaskDef`
277 Data structure that identifies the task class and its config.
278 parent : `_PipelineScaffolding`
279 The parent data structure that will hold the instance being
280 constructed.
281 datasetTypes : `TaskDatasetTypes`
282 Data structure that categorizes the dataset types used by this task.
283 """
284 def __init__(self, taskDef: TaskDef, parent: _PipelineScaffolding, datasetTypes: TaskDatasetTypes):
285 universe = parent.dimensions.universe
286 self.taskDef = taskDef
287 self.dimensions = DimensionGraph(universe, names=taskDef.connections.dimensions)
288 assert self.dimensions.issubset(parent.dimensions)
289 # Initialize _DatasetDicts as subsets of the one or two
290 # corresponding dicts in the parent _PipelineScaffolding.
291 self.initInputs = _DatasetDict.fromSubset(datasetTypes.initInputs, parent.initInputs,
292 parent.initIntermediates)
293 self.initOutputs = _DatasetDict.fromSubset(datasetTypes.initOutputs, parent.initIntermediates,
294 parent.initOutputs)
295 self.inputs = _DatasetDict.fromSubset(datasetTypes.inputs, parent.inputs, parent.intermediates)
296 self.outputs = _DatasetDict.fromSubset(datasetTypes.outputs, parent.intermediates, parent.outputs)
297 self.prerequisites = _DatasetDict.fromSubset(datasetTypes.prerequisites, parent.prerequisites)
298 self.dataIds = set()
299 self.quanta = {}
301 def __repr__(self):
302 # Default dataclass-injected __repr__ gets caught in an infinite loop
303 # because of back-references.
304 return f"_TaskScaffolding(taskDef={self.taskDef}, ...)"
306 taskDef: TaskDef
307 """Data structure that identifies the task class and its config
308 (`TaskDef`).
309 """
311 dimensions: DimensionGraph
312 """The dimensions of a single `Quantum` of this task (`DimensionGraph`).
313 """
315 initInputs: _DatasetDict
316 """Dictionary containing information about datasets used to construct this
317 task (`_DatasetDict`).
318 """
320 initOutputs: _DatasetDict
321 """Dictionary containing information about datasets produced as a
322 side-effect of constructing this task (`_DatasetDict`).
323 """
325 inputs: _DatasetDict
326 """Dictionary containing information about datasets used as regular,
327 graph-constraining inputs to this task (`_DatasetDict`).
328 """
330 outputs: _DatasetDict
331 """Dictionary containing information about datasets produced by this task
332 (`_DatasetDict`).
333 """
335 prerequisites: _DatasetDict
336 """Dictionary containing information about input datasets that must be
337 present in the repository before any Pipeline containing this task is run
338 (`_DatasetDict`).
339 """
341 quanta: Dict[DataCoordinate, _QuantumScaffolding]
342 """Dictionary mapping data ID to a scaffolding object for the Quantum of
343 this task with that data ID.
344 """
346 def makeQuantumSet(self) -> Set[Quantum]:
347 """Create a `set` of `Quantum` from the information in ``self``.
349 Returns
350 -------
351 nodes : `set` of `Quantum
352 The `Quantum` elements corresponding to this task.
353 """
354 return set(q.makeQuantum() for q in self.quanta.values())
357@dataclass
358class _PipelineScaffolding:
359 """A helper data structure that organizes the information involved in
360 constructing a `QuantumGraph` for a `Pipeline`.
362 Parameters
363 ----------
364 pipeline : `Pipeline`
365 Sequence of tasks from which a graph is to be constructed. Must
366 have nested task classes already imported.
367 universe : `DimensionUniverse`
368 Universe of all possible dimensions.
370 Notes
371 -----
372 The scaffolding data structure contains nested data structures for both
373 tasks (`_TaskScaffolding`) and datasets (`_DatasetDict`). The dataset
374 data structures are shared between the pipeline-level structure (which
375 aggregates all datasets and categorizes them from the perspective of the
376 complete pipeline) and the individual tasks that use them as inputs and
377 outputs.
379 `QuantumGraph` construction proceeds in four steps, with each corresponding
380 to a different `_PipelineScaffolding` method:
382 1. When `_PipelineScaffolding` is constructed, we extract and categorize
383 the DatasetTypes used by the pipeline (delegating to
384 `PipelineDatasetTypes.fromPipeline`), then use these to construct the
385 nested `_TaskScaffolding` and `_DatasetDict` objects.
387 2. In `connectDataIds`, we construct and run the "Big Join Query", which
388 returns related tuples of all dimensions used to identify any regular
389 input, output, and intermediate datasets (not prerequisites). We then
390 iterate over these tuples of related dimensions, identifying the subsets
391 that correspond to distinct data IDs for each task and dataset type,
392 and then create `_QuantumScaffolding` objects.
394 3. In `resolveDatasetRefs`, we run follow-up queries against all of the
395 dataset data IDs previously identified, transforming unresolved
396 DatasetRefs into resolved DatasetRefs where appropriate. We then look
397 up prerequisite datasets for all quanta.
399 4. In `makeQuantumGraph`, we construct a `QuantumGraph` from the lists of
400 per-task `_QuantumScaffolding` objects.
401 """
402 def __init__(self, pipeline, *, registry):
403 _LOG.debug("Initializing data structures for QuantumGraph generation.")
404 self.tasks = []
405 # Aggregate and categorize the DatasetTypes in the Pipeline.
406 datasetTypes = PipelineDatasetTypes.fromPipeline(pipeline, registry=registry)
407 # Construct dictionaries that map those DatasetTypes to structures
408 # that will (later) hold addiitonal information about them.
409 for attr in ("initInputs", "initIntermediates", "initOutputs",
410 "inputs", "intermediates", "outputs", "prerequisites"):
411 setattr(self, attr, _DatasetDict.fromDatasetTypes(getattr(datasetTypes, attr),
412 universe=registry.dimensions))
413 # Aggregate all dimensions for all non-init, non-prerequisite
414 # DatasetTypes. These are the ones we'll include in the big join
415 # query.
416 self.dimensions = self.inputs.dimensions.union(self.intermediates.dimensions,
417 self.outputs.dimensions)
418 # Construct scaffolding nodes for each Task, and add backreferences
419 # to the Task from each DatasetScaffolding node.
420 # Note that there's only one scaffolding node for each DatasetType,
421 # shared by _PipelineScaffolding and all _TaskScaffoldings that
422 # reference it.
423 if isinstance(pipeline, Pipeline):
424 pipeline = pipeline.toExpandedPipeline()
425 self.tasks = [_TaskScaffolding(taskDef=taskDef, parent=self, datasetTypes=taskDatasetTypes)
426 for taskDef, taskDatasetTypes in zip(pipeline,
427 datasetTypes.byTask.values())]
429 def __repr__(self):
430 # Default dataclass-injected __repr__ gets caught in an infinite loop
431 # because of back-references.
432 return f"_PipelineScaffolding(tasks={self.tasks}, ...)"
434 tasks: List[_TaskScaffolding]
435 """Scaffolding data structures for each task in the pipeline
436 (`list` of `_TaskScaffolding`).
437 """
439 initInputs: _DatasetDict
440 """Datasets consumed but not produced when constructing the tasks in this
441 pipeline (`_DatasetDict`).
442 """
444 initIntermediates: _DatasetDict
445 """Datasets that are both consumed and produced when constructing the tasks
446 in this pipeline (`_DatasetDict`).
447 """
449 initOutputs: _DatasetDict
450 """Datasets produced but not consumed when constructing the tasks in this
451 pipeline (`_DatasetDict`).
452 """
454 inputs: _DatasetDict
455 """Datasets that are consumed but not produced when running this pipeline
456 (`_DatasetDict`).
457 """
459 intermediates: _DatasetDict
460 """Datasets that are both produced and consumed when running this pipeline
461 (`_DatasetDict`).
462 """
464 outputs: _DatasetDict
465 """Datasets produced but not consumed when when running this pipeline
466 (`_DatasetDict`).
467 """
469 prerequisites: _DatasetDict
470 """Datasets that are consumed when running this pipeline and looked up
471 per-Quantum when generating the graph (`_DatasetDict`).
472 """
474 dimensions: DimensionGraph
475 """All dimensions used by any regular input, intermediate, or output
476 (not prerequisite) dataset; the set of dimension used in the "Big Join
477 Query" (`DimensionGraph`).
479 This is required to be a superset of all task quantum dimensions.
480 """
482 @contextmanager
483 def connectDataIds(self, registry, collections, userQuery):
484 """Query for the data IDs that connect nodes in the `QuantumGraph`.
486 This method populates `_TaskScaffolding.dataIds` and
487 `_DatasetScaffolding.dataIds` (except for those in `prerequisites`).
489 Parameters
490 ----------
491 registry : `lsst.daf.butler.Registry`
492 Registry for the data repository; used for all data ID queries.
493 collections : `lsst.daf.butler.CollectionSearch`
494 Object representing the collections to search for input datasets.
495 userQuery : `str`, optional
496 User-provided expression to limit the data IDs processed.
498 Returns
499 -------
500 commonDataIds : \
501 `lsst.daf.butler.registry.queries.DataCoordinateQueryResults`
502 An interface to a database temporary table containing all data IDs
503 that will appear in this `QuantumGraph`. Returned inside a
504 context manager, which will drop the temporary table at the end of
505 the `with` block in which this method is called.
506 """
507 _LOG.debug("Building query for data IDs.")
508 # Initialization datasets always have empty data IDs.
509 emptyDataId = DataCoordinate.makeEmpty(registry.dimensions)
510 for datasetType, refs in itertools.chain(self.initInputs.items(),
511 self.initIntermediates.items(),
512 self.initOutputs.items()):
513 refs[emptyDataId] = DatasetRef(datasetType, emptyDataId)
514 # Run one big query for the data IDs for task dimensions and regular
515 # inputs and outputs. We limit the query to only dimensions that are
516 # associated with the input dataset types, but don't (yet) try to
517 # obtain the dataset_ids for those inputs.
518 _LOG.debug("Submitting data ID query and materializing results.")
519 with registry.queryDataIds(self.dimensions,
520 datasets=list(self.inputs),
521 collections=collections,
522 where=userQuery,
523 ).materialize() as commonDataIds:
524 _LOG.debug("Expanding data IDs.")
525 commonDataIds = commonDataIds.expanded()
526 _LOG.debug("Iterating over query results to associate quanta with datasets.")
527 # Iterate over query results, populating data IDs for datasets and
528 # quanta and then connecting them to each other.
529 n = 0
530 for n, commonDataId in enumerate(commonDataIds):
531 # Create DatasetRefs for all DatasetTypes from this result row,
532 # noting that we might have created some already.
533 # We remember both those that already existed and those that we
534 # create now.
535 refsForRow = {}
536 for datasetType, refs in itertools.chain(self.inputs.items(), self.intermediates.items(),
537 self.outputs.items()):
538 datasetDataId = commonDataId.subset(datasetType.dimensions)
539 ref = refs.get(datasetDataId)
540 if ref is None:
541 ref = DatasetRef(datasetType, datasetDataId)
542 refs[datasetDataId] = ref
543 refsForRow[datasetType.name] = ref
544 # Create _QuantumScaffolding objects for all tasks from this
545 # result row, noting that we might have created some already.
546 for task in self.tasks:
547 quantumDataId = commonDataId.subset(task.dimensions)
548 quantum = task.quanta.get(quantumDataId)
549 if quantum is None:
550 quantum = _QuantumScaffolding(task=task, dataId=quantumDataId)
551 task.quanta[quantumDataId] = quantum
552 # Whether this is a new quantum or an existing one, we can
553 # now associate the DatasetRefs for this row with it. The
554 # fact that a Quantum data ID and a dataset data ID both
555 # came from the same result row is what tells us they
556 # should be associated.
557 # Many of these associates will be duplicates (because
558 # another query row that differed from this one only in
559 # irrelevant dimensions already added them), and we use
560 # sets to skip.
561 for datasetType in task.inputs:
562 ref = refsForRow[datasetType.name]
563 quantum.inputs[datasetType.name][ref.dataId] = ref
564 for datasetType in task.outputs:
565 ref = refsForRow[datasetType.name]
566 quantum.outputs[datasetType.name][ref.dataId] = ref
567 _LOG.debug("Finished processing %d rows from data ID query.", n)
568 yield commonDataIds
570 def resolveDatasetRefs(self, registry, collections, run, commonDataIds, *, skipExisting=True):
571 """Perform follow up queries for each dataset data ID produced in
572 `fillDataIds`.
574 This method populates `_DatasetScaffolding.refs` (except for those in
575 `prerequisites`).
577 Parameters
578 ----------
579 registry : `lsst.daf.butler.Registry`
580 Registry for the data repository; used for all data ID queries.
581 collections : `lsst.daf.butler.CollectionSearch`
582 Object representing the collections to search for input datasets.
583 run : `str`, optional
584 Name of the `~lsst.daf.butler.CollectionType.RUN` collection for
585 output datasets, if it already exists.
586 commonDataIds : \
587 `lsst.daf.butler.registry.queries.DataCoordinateQueryResults`
588 Result of a previous call to `connectDataIds`.
589 skipExisting : `bool`, optional
590 If `True` (default), a Quantum is not created if all its outputs
591 already exist in ``run``. Ignored if ``run`` is `None`.
593 Raises
594 ------
595 OutputExistsError
596 Raised if an output dataset already exists in the output run
597 and ``skipExisting`` is `False`. The case where some but not all
598 of a quantum's outputs are present and ``skipExisting`` is `True`
599 cannot be identified at this stage, and is handled by `fillQuanta`
600 instead.
601 """
602 # Look up [init] intermediate and output datasets in the output
603 # collection, if there is an output collection.
604 if run is not None:
605 for datasetType, refs in itertools.chain(self.initIntermediates.items(),
606 self.initOutputs.items(),
607 self.intermediates.items(),
608 self.outputs.items()):
609 _LOG.debug("Resolving %d datasets for intermediate and/or output dataset %s.",
610 len(refs), datasetType.name)
611 isInit = datasetType in self.initIntermediates or datasetType in self.initOutputs
612 resolvedRefQueryResults = commonDataIds.subset(
613 datasetType.dimensions,
614 unique=True
615 ).findDatasets(
616 datasetType,
617 collections=run,
618 deduplicate=True
619 )
620 for resolvedRef in resolvedRefQueryResults:
621 # TODO: we could easily support per-DatasetType
622 # skipExisting and I could imagine that being useful - it's
623 # probably required in order to support writing initOutputs
624 # before QuantumGraph generation.
625 assert resolvedRef.dataId in refs
626 if skipExisting or isInit:
627 refs[resolvedRef.dataId] = resolvedRef
628 else:
629 raise OutputExistsError(f"Output dataset {datasetType.name} already exists in "
630 f"output RUN collection '{run}' with data ID"
631 f" {resolvedRef.dataId}.")
632 # Look up input and initInput datasets in the input collection(s).
633 for datasetType, refs in itertools.chain(self.initInputs.items(), self.inputs.items()):
634 _LOG.debug("Resolving %d datasets for input dataset %s.", len(refs), datasetType.name)
635 resolvedRefQueryResults = commonDataIds.subset(
636 datasetType.dimensions,
637 unique=True
638 ).findDatasets(
639 datasetType,
640 collections=collections,
641 deduplicate=True
642 )
643 dataIdsNotFoundYet = set(refs.keys())
644 for resolvedRef in resolvedRefQueryResults:
645 dataIdsNotFoundYet.discard(resolvedRef.dataId)
646 refs[resolvedRef.dataId] = resolvedRef
647 if dataIdsNotFoundYet:
648 raise RuntimeError(
649 f"{len(dataIdsNotFoundYet)} dataset(s) of type "
650 f"'{datasetType.name}' was/were present in a previous "
651 f"query, but could not be found now."
652 f"This is either a logic bug in QuantumGraph generation "
653 f"or the input collections have been modified since "
654 f"QuantumGraph generation began."
655 )
656 # Copy the resolved DatasetRefs to the _QuantumScaffolding objects,
657 # replacing the unresolved refs there, and then look up prerequisites.
658 for task in self.tasks:
659 _LOG.debug(
660 "Applying resolutions and finding prerequisites for %d quanta of task with label '%s'.",
661 len(task.quanta),
662 task.taskDef.label
663 )
664 lookupFunctions = {
665 c.name: c.lookupFunction
666 for c in iterConnections(task.taskDef.connections, "prerequisiteInputs")
667 if c.lookupFunction is not None
668 }
669 dataIdsToSkip = []
670 for quantum in task.quanta.values():
671 # Process outputs datasets only if there is a run to look for
672 # outputs in and skipExisting is True. Note that if
673 # skipExisting is False, any output datasets that already exist
674 # would have already caused an exception to be raised.
675 # We never update the DatasetRefs in the quantum because those
676 # should never be resolved.
677 if run is not None and skipExisting:
678 resolvedRefs = []
679 unresolvedRefs = []
680 for datasetType, originalRefs in quantum.outputs.items():
681 for ref in task.outputs.extract(datasetType, originalRefs.keys()):
682 if ref.id is not None:
683 resolvedRefs.append(ref)
684 else:
685 unresolvedRefs.append(ref)
686 if resolvedRefs:
687 if unresolvedRefs:
688 raise OutputExistsError(
689 f"Quantum {quantum.dataId} of task with label "
690 f"'{quantum.task.taskDef.label}' has some outputs that exist "
691 f"({resolvedRefs}) "
692 f"and others that don't ({unresolvedRefs})."
693 )
694 else:
695 # All outputs are already present; skip this
696 # quantum and continue to the next.
697 dataIdsToSkip.append(quantum.dataId)
698 continue
699 # Update the input DatasetRefs to the resolved ones we already
700 # searched for.
701 for datasetType, refs in quantum.inputs.items():
702 for ref in task.inputs.extract(datasetType, refs.keys()):
703 refs[ref.dataId] = ref
704 # Look up prerequisite datasets in the input collection(s).
705 # These may have dimensions that extend beyond those we queried
706 # for originally, because we want to permit those data ID
707 # values to differ across quanta and dataset types.
708 for datasetType in task.prerequisites:
709 lookupFunction = lookupFunctions.get(datasetType.name)
710 if lookupFunction is not None:
711 # PipelineTask has provided its own function to do the
712 # lookup. This always takes precedence.
713 refs = list(
714 lookupFunction(datasetType, registry, quantum.dataId, collections)
715 )
716 elif (datasetType.isCalibration()
717 and datasetType.dimensions <= quantum.dataId.graph
718 and quantum.dataId.graph.temporal):
719 # This is a master calibration lookup, which we have to
720 # handle specially because the query system can't do a
721 # temporal join on a non-dimension-based timespan yet.
722 timespan = quantum.dataId.timespan
723 try:
724 refs = [registry.findDataset(datasetType, quantum.dataId,
725 collections=collections,
726 timespan=timespan)]
727 except KeyError:
728 # This dataset type is not present in the registry,
729 # which just means there are no datasets here.
730 refs = []
731 else:
732 # Most general case.
733 refs = list(registry.queryDatasets(datasetType,
734 collections=collections,
735 dataId=quantum.dataId,
736 deduplicate=True).expanded())
737 quantum.prerequisites[datasetType].update({ref.dataId: ref for ref in refs
738 if ref is not None})
739 # Actually remove any quanta that we decided to skip above.
740 if dataIdsToSkip:
741 _LOG.debug("Pruning %d quanta for task with label '%s' because all of their outputs exist.",
742 len(dataIdsToSkip), task.taskDef.label)
743 for dataId in dataIdsToSkip:
744 del task.quanta[dataId]
746 def makeQuantumGraph(self):
747 """Create a `QuantumGraph` from the quanta already present in
748 the scaffolding data structure.
750 Returns
751 -------
752 graph : `QuantumGraph`
753 The full `QuantumGraph`.
754 """
755 graph = QuantumGraph({task.taskDef: task.makeQuantumSet() for task in self.tasks})
756 return graph
759class _InstrumentFinder(TreeVisitor):
760 """Implementation of TreeVisitor which looks for instrument name
762 Instrument should be specified as a boolean expression
764 instrument = 'string'
765 'string' = instrument
767 so we only need to find a binary operator where operator is "=",
768 one side is a string literal and other side is an identifier.
769 All visit methods return tuple of (type, value), non-useful nodes
770 return None for both type and value.
771 """
772 def __init__(self):
773 self.instruments = []
775 def visitNumericLiteral(self, value, node):
776 # do not care about numbers
777 return (None, None)
779 def visitStringLiteral(self, value, node):
780 # return type and value
781 return ("str", value)
783 def visitTimeLiteral(self, value, node):
784 # do not care about these
785 return (None, None)
787 def visitRangeLiteral(self, start, stop, stride, node):
788 # do not care about these
789 return (None, None)
791 def visitIdentifier(self, name, node):
792 if name.lower() == "instrument":
793 return ("id", "instrument")
794 return (None, None)
796 def visitUnaryOp(self, operator, operand, node):
797 # do not care about these
798 return (None, None)
800 def visitBinaryOp(self, operator, lhs, rhs, node):
801 if operator == "=":
802 if lhs == ("id", "instrument") and rhs[0] == "str":
803 self.instruments.append(rhs[1])
804 elif rhs == ("id", "instrument") and lhs[0] == "str":
805 self.instruments.append(lhs[1])
806 return (None, None)
808 def visitIsIn(self, lhs, values, not_in, node):
809 # do not care about these
810 return (None, None)
812 def visitParens(self, expression, node):
813 # do not care about these
814 return (None, None)
817def _findInstruments(queryStr):
818 """Get the names of any instrument named in the query string by searching
819 for "instrument = <value>" and similar patterns.
821 Parameters
822 ----------
823 queryStr : `str` or None
824 The query string to search, or None if there is no query.
826 Returns
827 -------
828 instruments : `list` [`str`]
829 The list of instrument names found in the query.
831 Raises
832 ------
833 ValueError
834 If the query expression can not be parsed.
835 """
836 if not queryStr:
837 return []
838 parser = ParserYacc()
839 finder = _InstrumentFinder()
840 try:
841 tree = parser.parse(queryStr)
842 except ParseError as exc:
843 raise ValueError(f"failed to parse query expression: {queryStr}") from exc
844 tree.visit(finder)
845 return finder.instruments
848# ------------------------
849# Exported definitions --
850# ------------------------
853class GraphBuilderError(Exception):
854 """Base class for exceptions generated by graph builder.
855 """
856 pass
859class OutputExistsError(GraphBuilderError):
860 """Exception generated when output datasets already exist.
861 """
862 pass
865class PrerequisiteMissingError(GraphBuilderError):
866 """Exception generated when a prerequisite dataset does not exist.
867 """
868 pass
871class GraphBuilder(object):
872 """GraphBuilder class is responsible for building task execution graph from
873 a Pipeline.
875 Parameters
876 ----------
877 registry : `~lsst.daf.butler.Registry`
878 Data butler instance.
879 skipExisting : `bool`, optional
880 If `True` (default), a Quantum is not created if all its outputs
881 already exist.
882 """
884 def __init__(self, registry, skipExisting=True):
885 self.registry = registry
886 self.dimensions = registry.dimensions
887 self.skipExisting = skipExisting
889 def makeGraph(self, pipeline, collections, run, userQuery):
890 """Create execution graph for a pipeline.
892 Parameters
893 ----------
894 pipeline : `Pipeline`
895 Pipeline definition, task names/classes and their configs.
896 collections : `lsst.daf.butler.CollectionSearch`
897 Object representing the collections to search for input datasets.
898 run : `str`, optional
899 Name of the `~lsst.daf.butler.CollectionType.RUN` collection for
900 output datasets, if it already exists.
901 userQuery : `str`
902 String which defines user-defined selection for registry, should be
903 empty or `None` if there is no restrictions on data selection.
905 Returns
906 -------
907 graph : `QuantumGraph`
909 Raises
910 ------
911 UserExpressionError
912 Raised when user expression cannot be parsed.
913 OutputExistsError
914 Raised when output datasets already exist.
915 Exception
916 Other exceptions types may be raised by underlying registry
917 classes.
918 """
919 scaffolding = _PipelineScaffolding(pipeline, registry=self.registry)
921 instrument = pipeline.getInstrument()
922 if isinstance(instrument, str):
923 instrument = doImport(instrument)
924 instrumentName = instrument.getName() if instrument else None
925 userQuery = self._verifyInstrumentRestriction(instrumentName, userQuery)
927 with scaffolding.connectDataIds(self.registry, collections, userQuery) as commonDataIds:
928 scaffolding.resolveDatasetRefs(self.registry, collections, run, commonDataIds,
929 skipExisting=self.skipExisting)
930 return scaffolding.makeQuantumGraph()
932 @staticmethod
933 def _verifyInstrumentRestriction(instrumentName, query):
934 """Add an instrument restriction to the query if it does not have one,
935 and verify that if given an instrument name that there are no other
936 instrument restrictions in the query.
938 Parameters
939 ----------
940 instrumentName : `str`
941 The name of the instrument that should appear in the query.
942 query : `str`
943 The query string.
945 Returns
946 -------
947 query : `str`
948 The query string with the instrument added to it if needed.
950 Raises
951 ------
952 RuntimeError
953 If the pipeline names an instrument and the query contains more
954 than one instrument or the name of the instrument in the query does
955 not match the instrument named by the pipeline.
956 """
957 if not instrumentName:
958 return query
959 queryInstruments = _findInstruments(query)
960 if len(queryInstruments) > 1:
961 raise RuntimeError(f"When the pipeline has an instrument (\"{instrumentName}\") the query must "
962 "have zero instruments or one instrument that matches the pipeline. "
963 f"Found these instruments in the query: {queryInstruments}.")
964 if not queryInstruments:
965 # There is not an instrument in the query, add it:
966 restriction = f"instrument = '{instrumentName}'"
967 _LOG.debug(f"Adding restriction \"{restriction}\" to query.")
968 query = f"{restriction} AND ({query})" if query else restriction # (there may not be a query)
969 elif queryInstruments[0] != instrumentName:
970 # Since there is an instrument in the query, it should match
971 # the instrument in the pipeline.
972 raise RuntimeError(f"The instrument named in the query (\"{queryInstruments[0]}\") does not "
973 f"match the instrument named by the pipeline (\"{instrumentName}\")")
974 return query