Coverage for python/lsst/ctrl/mpexec/singleQuantumExecutor.py: 9%
265 statements
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-05 10:48 +0000
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-05 10:48 +0000
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22__all__ = ["SingleQuantumExecutor"]
24# -------------------------------
25# Imports of standard modules --
26# -------------------------------
27import logging
28import os
29import sys
30import time
31import warnings
32from collections import defaultdict
33from collections.abc import Callable
34from itertools import chain
35from typing import Any, Optional, Union
37from lsst.daf.butler import (
38 Butler,
39 DatasetRef,
40 DatasetType,
41 LimitedButler,
42 NamedKeyDict,
43 Quantum,
44 UnresolvedRefWarning,
45)
46from lsst.pipe.base import (
47 AdjustQuantumHelper,
48 ButlerQuantumContext,
49 Instrument,
50 InvalidQuantumError,
51 NoWorkFound,
52 PipelineTask,
53 RepeatableQuantumError,
54 TaskDef,
55 TaskFactory,
56)
57from lsst.pipe.base.configOverrides import ConfigOverrides
59# During metadata transition phase, determine metadata class by
60# asking pipe_base
61from lsst.pipe.base.task import _TASK_FULL_METADATA_TYPE, _TASK_METADATA_TYPE
62from lsst.utils.timer import logInfo
64# -----------------------------
65# Imports for other modules --
66# -----------------------------
67from .cli.utils import _PipelineAction
68from .log_capture import LogCapture
69from .mock_task import MockButlerQuantumContext, MockPipelineTask
70from .quantumGraphExecutor import QuantumExecutor
71from .reports import QuantumReport
73# ----------------------------------
74# Local non-exported definitions --
75# ----------------------------------
77_LOG = logging.getLogger(__name__)
80class SingleQuantumExecutor(QuantumExecutor):
81 """Executor class which runs one Quantum at a time.
83 Parameters
84 ----------
85 butler : `~lsst.daf.butler.Butler` or `None`
86 Data butler, `None` means that Quantum-backed butler should be used
87 instead.
88 taskFactory : `~lsst.pipe.base.TaskFactory`
89 Instance of a task factory.
90 skipExistingIn : `list` [ `str` ], optional
91 Accepts list of collections, if all Quantum outputs already exist in
92 the specified list of collections then that Quantum will not be rerun.
93 If `None` then butler output RUN collection is searched for existing
94 datasets. If empty list then there no check for existing outputs (which
95 could result in conflicts when datasets are stored).
96 clobberOutputs : `bool`, optional
97 If `True`, then existing qauntum outputs in output run collection will
98 be removed prior to executing a quantum. If ``skipExistingIn`` is
99 defined, only partial outputs from failed quanta will be overwritten
100 (see notes). Only used when ``butler`` is not `None`.
101 enableLsstDebug : `bool`, optional
102 Enable debugging with ``lsstDebug`` facility for a task.
103 exitOnKnownError : `bool`, optional
104 If `True`, call `sys.exit` with the appropriate exit code for special
105 known exceptions, after printing a traceback, instead of letting the
106 exception propagate up to calling. This is always the behavior for
107 InvalidQuantumError.
108 mock : `bool`, optional
109 If `True` then mock task execution.
110 mock_configs : `list` [ `_PipelineAction` ], optional
111 Optional config overrides for mock tasks.
112 limited_butler_factory : `Callable`, optional
113 A method that creates a `~lsst.daf.butler.LimitedButler` instance
114 for a given Quantum. This parameter must be defined if ``butler`` is
115 `None`. If ``butler`` is not `None` then this parameter is ignored.
117 Notes
118 -----
119 There is a non-trivial interaction between ``skipExistingIn`` and
120 ``clobberOutputs`` areguments. Here is how they work together:
122 - If ``skipExistingIn`` is specified (or `None`) then those collections
123 are searched for quantum output datasets. If all outputs are found, then
124 quantum is not executed and `run` completes successfully.
125 - Otherwise if ``clobberOutputs`` is `True` then butler output RUN
126 collection is checked for existing quantum outputs. If full or partial
127 outputs are found, they are are pruned and quantum is executed.
128 - Otherwise if ``clobberOutputs`` is `False` then butler output RUN
129 collection is checked for existing quantum outputs. If any output
130 dataset is found an exception is raised.
132 This leaves the case when partial quantum outputs may be found in
133 ``skipExistingIn`` but that list does not include butler RUN collection.
134 Those partial outputs are not prunned.
135 """
137 def __init__(
138 self,
139 butler: Butler | None,
140 taskFactory: TaskFactory,
141 skipExistingIn: list[str] | None = None,
142 clobberOutputs: bool = False,
143 enableLsstDebug: bool = False,
144 exitOnKnownError: bool = False,
145 mock: bool = False,
146 mock_configs: list[_PipelineAction] | None = None,
147 limited_butler_factory: Callable[[Quantum], LimitedButler] | None = None,
148 ):
149 self.butler = butler
150 self.taskFactory = taskFactory
151 self.skipExistingIn = skipExistingIn
152 self.enableLsstDebug = enableLsstDebug
153 self.clobberOutputs = clobberOutputs
154 self.exitOnKnownError = exitOnKnownError
155 self.mock = mock
156 self.mock_configs = mock_configs if mock_configs is not None else []
157 self.limited_butler_factory = limited_butler_factory
158 self.report: Optional[QuantumReport] = None
160 if self.butler is None:
161 assert not self.mock, "Mock execution only possible with full butler"
162 assert limited_butler_factory is not None, "limited_butler_factory is needed when butler is None"
164 def execute(self, taskDef: TaskDef, quantum: Quantum) -> Quantum:
165 # Docstring inherited from QuantumExecutor.execute
166 assert quantum.dataId is not None, "Quantum DataId cannot be None"
168 if self.butler is not None:
169 self.butler.registry.refresh()
171 # Catch any exception and make a report based on that.
172 try:
173 result = self._execute(taskDef, quantum)
174 self.report = QuantumReport(dataId=quantum.dataId, taskLabel=taskDef.label)
175 return result
176 except Exception as exc:
177 self.report = QuantumReport.from_exception(
178 exception=exc,
179 dataId=quantum.dataId,
180 taskLabel=taskDef.label,
181 )
182 raise
184 def _resolve_ref(self, ref: DatasetRef, collections: Any = None) -> DatasetRef | None:
185 """Return resolved reference.
187 Parameters
188 ----------
189 ref : `DatasetRef`
190 Input reference, can be either resolved or unresolved.
191 collections :
192 Collections to search for the existing reference, only used when
193 running with full butler.
195 Notes
196 -----
197 When running with Quantum-backed butler it assumes that reference is
198 already resolved and returns input references without any checks. When
199 running with full butler, it always searches registry fof a reference
200 in specified collections, even if reference is already resolved.
201 """
202 if self.butler is not None:
203 # If running with full butler, need to re-resolve it in case
204 # collections are different.
205 with warnings.catch_warnings():
206 warnings.simplefilter("ignore", category=UnresolvedRefWarning)
207 ref = ref.unresolved()
208 return self.butler.registry.findDataset(ref.datasetType, ref.dataId, collections=collections)
209 else:
210 # In case of QBB all refs must be resolved already, do not check.
211 return ref
213 def _execute(self, taskDef: TaskDef, quantum: Quantum) -> Quantum:
214 """Internal implementation of execute()"""
215 startTime = time.time()
217 # Make a limited butler instance if needed (which should be QBB if full
218 # butler is not defined).
219 limited_butler: LimitedButler
220 if self.butler is not None:
221 limited_butler = self.butler
222 else:
223 # We check this in constructor, but mypy needs this check here.
224 assert self.limited_butler_factory is not None
225 limited_butler = self.limited_butler_factory(quantum)
227 if self.butler is not None:
228 log_capture = LogCapture.from_full(self.butler)
229 else:
230 log_capture = LogCapture.from_limited(limited_butler)
231 with log_capture.capture_logging(taskDef, quantum) as captureLog:
232 # Save detailed resource usage before task start to metadata.
233 quantumMetadata = _TASK_METADATA_TYPE()
234 logInfo(None, "prep", metadata=quantumMetadata) # type: ignore[arg-type]
236 # check whether to skip or delete old outputs, if it returns True
237 # or raises an exception do not try to store logs, as they may be
238 # already in butler.
239 captureLog.store = False
240 if self.checkExistingOutputs(quantum, taskDef, limited_butler):
241 _LOG.info(
242 "Skipping already-successful quantum for label=%s dataId=%s.",
243 taskDef.label,
244 quantum.dataId,
245 )
246 return quantum
247 captureLog.store = True
249 try:
250 quantum = self.updatedQuantumInputs(quantum, taskDef, limited_butler)
251 except NoWorkFound as exc:
252 _LOG.info(
253 "Nothing to do for task '%s' on quantum %s; saving metadata and skipping: %s",
254 taskDef.label,
255 quantum.dataId,
256 str(exc),
257 )
258 # Make empty metadata that looks something like what a
259 # do-nothing task would write (but we don't bother with empty
260 # nested PropertySets for subtasks). This is slightly
261 # duplicative with logic in pipe_base that we can't easily call
262 # from here; we'll fix this on DM-29761.
263 logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type]
264 fullMetadata = _TASK_FULL_METADATA_TYPE()
265 fullMetadata[taskDef.label] = _TASK_METADATA_TYPE()
266 fullMetadata["quantum"] = quantumMetadata
267 self.writeMetadata(quantum, fullMetadata, taskDef, limited_butler)
268 return quantum
270 # enable lsstDebug debugging
271 if self.enableLsstDebug:
272 try:
273 _LOG.debug("Will try to import debug.py")
274 import debug # type: ignore # noqa:F401
275 except ImportError:
276 _LOG.warn("No 'debug' module found.")
278 # initialize global state
279 self.initGlobals(quantum)
281 # Ensure that we are executing a frozen config
282 taskDef.config.freeze()
283 logInfo(None, "init", metadata=quantumMetadata) # type: ignore[arg-type]
284 init_input_refs = []
285 for ref in quantum.initInputs.values():
286 resolved = self._resolve_ref(ref)
287 if resolved is None:
288 raise ValueError(f"Failed to resolve init input reference {ref}")
289 init_input_refs.append(resolved)
290 task = self.taskFactory.makeTask(taskDef, limited_butler, init_input_refs)
291 logInfo(None, "start", metadata=quantumMetadata) # type: ignore[arg-type]
292 try:
293 if self.mock:
294 # Use mock task instance to execute method.
295 runTask = self._makeMockTask(taskDef)
296 else:
297 runTask = task
298 self.runQuantum(runTask, quantum, taskDef, limited_butler)
299 except Exception as e:
300 _LOG.error(
301 "Execution of task '%s' on quantum %s failed. Exception %s: %s",
302 taskDef.label,
303 quantum.dataId,
304 e.__class__.__name__,
305 str(e),
306 )
307 raise
308 logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type]
309 fullMetadata = task.getFullMetadata()
310 fullMetadata["quantum"] = quantumMetadata
311 self.writeMetadata(quantum, fullMetadata, taskDef, limited_butler)
312 stopTime = time.time()
313 _LOG.info(
314 "Execution of task '%s' on quantum %s took %.3f seconds",
315 taskDef.label,
316 quantum.dataId,
317 stopTime - startTime,
318 )
319 return quantum
321 def _makeMockTask(self, taskDef: TaskDef) -> PipelineTask:
322 """Make an instance of mock task for given TaskDef."""
323 # Make config instance and apply overrides
324 overrides = ConfigOverrides()
325 for action in self.mock_configs:
326 if action.label == taskDef.label + "-mock":
327 if action.action == "config":
328 key, _, value = action.value.partition("=")
329 overrides.addValueOverride(key, value)
330 elif action.action == "configfile":
331 overrides.addFileOverride(os.path.expandvars(action.value))
332 else:
333 raise ValueError(f"Unexpected action for mock task config overrides: {action}")
334 config = MockPipelineTask.ConfigClass()
335 overrides.applyTo(config)
337 task = MockPipelineTask(config=config, name=taskDef.label)
338 return task
340 def checkExistingOutputs(self, quantum: Quantum, taskDef: TaskDef, limited_butler: LimitedButler) -> bool:
341 """Decide whether this quantum needs to be executed.
343 If only partial outputs exist then they are removed if
344 ``clobberOutputs`` is True, otherwise an exception is raised.
346 Parameters
347 ----------
348 quantum : `~lsst.daf.butler.Quantum`
349 Quantum to check for existing outputs
350 taskDef : `~lsst.pipe.base.TaskDef`
351 Task definition structure.
353 Returns
354 -------
355 exist : `bool`
356 `True` if ``self.skipExistingIn`` is defined, and a previous
357 execution of this quanta appears to have completed successfully
358 (either because metadata was written or all datasets were written).
359 `False` otherwise.
361 Raises
362 ------
363 RuntimeError
364 Raised if some outputs exist and some not.
365 """
366 if self.skipExistingIn and taskDef.metadataDatasetName is not None:
367 # Metadata output exists; this is sufficient to assume the previous
368 # run was successful and should be skipped.
369 [metadata_ref] = quantum.outputs[taskDef.metadataDatasetName]
370 ref = self._resolve_ref(metadata_ref, self.skipExistingIn)
371 if ref is not None:
372 if limited_butler.datastore.exists(ref):
373 return True
375 # Previously we always checked for existing outputs in `butler.run`,
376 # now logic gets more complicated as we only want to skip quantum
377 # whose outputs exist in `self.skipExistingIn` but pruning should only
378 # be done for outputs existing in `butler.run`.
380 def findOutputs(
381 collections: Optional[Union[str, list[str]]]
382 ) -> tuple[list[DatasetRef], list[DatasetRef]]:
383 """Find quantum outputs in specified collections."""
384 existingRefs = []
385 missingRefs = []
386 for datasetRefs in quantum.outputs.values():
387 checkRefs: list[DatasetRef] = []
388 registryRefToQuantumRef: dict[DatasetRef, DatasetRef] = {}
389 for datasetRef in datasetRefs:
390 ref = self._resolve_ref(datasetRef, collections)
391 if ref is None:
392 missingRefs.append(datasetRef)
393 else:
394 checkRefs.append(ref)
395 registryRefToQuantumRef[ref] = datasetRef
397 # More efficient to ask the datastore in bulk for ref
398 # existence rather than one at a time.
399 existence = limited_butler.datastore.mexists(checkRefs)
400 for ref, exists in existence.items():
401 if exists:
402 existingRefs.append(ref)
403 else:
404 missingRefs.append(registryRefToQuantumRef[ref])
405 return existingRefs, missingRefs
407 # If skipExistingIn is None this will search in butler.run.
408 existingRefs, missingRefs = findOutputs(self.skipExistingIn)
409 if self.skipExistingIn:
410 if existingRefs and not missingRefs:
411 # Everything is already there, and we do not clobber complete
412 # outputs if skipExistingIn is specified.
413 return True
415 # If we are to re-run quantum then prune datasets that exists in
416 # output run collection, only if `self.clobberOutputs` is set,
417 # that only works when we have full butler.
418 if self.butler is not None:
419 # Look at butler run instead of skipExistingIn collections.
420 existingRefs, missingRefs = findOutputs(self.butler.run)
421 if existingRefs and missingRefs:
422 _LOG.debug(
423 "Partial outputs exist for task %s dataId=%s collection=%s "
424 "existingRefs=%s missingRefs=%s",
425 taskDef,
426 quantum.dataId,
427 self.butler.run,
428 existingRefs,
429 missingRefs,
430 )
431 if self.clobberOutputs:
432 # only prune
433 _LOG.info("Removing partial outputs for task %s: %s", taskDef, existingRefs)
434 self.butler.pruneDatasets(existingRefs, disassociate=True, unstore=True, purge=True)
435 return False
436 else:
437 raise RuntimeError(
438 "Registry inconsistency while checking for existing outputs:"
439 f" collection={self.butler.run} existingRefs={existingRefs}"
440 f" missingRefs={missingRefs}"
441 )
442 elif existingRefs and self.clobberOutputs and not self.skipExistingIn:
443 # Clobber complete outputs if skipExistingIn is not specified.
444 _LOG.info("Removing complete outputs for task %s: %s", taskDef, existingRefs)
445 self.butler.pruneDatasets(existingRefs, disassociate=True, unstore=True, purge=True)
446 return False
448 # need to re-run
449 return False
451 def updatedQuantumInputs(
452 self, quantum: Quantum, taskDef: TaskDef, limited_butler: LimitedButler
453 ) -> Quantum:
454 """Update quantum with extra information, returns a new updated
455 Quantum.
457 Some methods may require input DatasetRefs to have non-None
458 ``dataset_id``, but in case of intermediate dataset it may not be
459 filled during QuantumGraph construction. This method will retrieve
460 missing info from registry.
462 Parameters
463 ----------
464 quantum : `~lsst.daf.butler.Quantum`
465 Single Quantum instance.
466 taskDef : `~lsst.pipe.base.TaskDef`
467 Task definition structure.
469 Returns
470 -------
471 update : `~lsst.daf.butler.Quantum`
472 Updated Quantum instance
473 """
474 anyChanges = False
475 updatedInputs: defaultdict[DatasetType, list] = defaultdict(list)
476 for key, refsForDatasetType in quantum.inputs.items():
477 newRefsForDatasetType = updatedInputs[key]
478 for ref in refsForDatasetType:
479 # Inputs may already be resolved even if they do not exist, but
480 # we have to re-resolve them because IDs are ignored on output.
481 # Check datastore for existence first to cover calibration
482 # dataset types, as they would need a timespan for findDataset.
483 resolvedRef: DatasetRef | None
484 checked_datastore = False
485 if ref.id is not None and limited_butler.datastore.exists(ref):
486 resolvedRef = ref
487 checked_datastore = True
488 elif self.butler is not None:
489 # In case of full butler try to (re-)resolve it.
490 resolvedRef = self._resolve_ref(ref)
491 if resolvedRef is None:
492 _LOG.info("No dataset found for %s", ref)
493 continue
494 else:
495 _LOG.debug("Updated dataset ID for %s", ref)
496 else:
497 # QBB with missing intermediate
498 _LOG.info("No dataset found for %s", ref)
499 continue
501 # In case of mock execution we check that mock dataset exists
502 # instead. Mock execution is only possible with full butler.
503 if self.mock and self.butler is not None:
504 try:
505 typeName, component = ref.datasetType.nameAndComponent()
506 if component is not None:
507 mockDatasetTypeName = MockButlerQuantumContext.mockDatasetTypeName(typeName)
508 else:
509 mockDatasetTypeName = MockButlerQuantumContext.mockDatasetTypeName(
510 ref.datasetType.name
511 )
513 mockDatasetType = self.butler.registry.getDatasetType(mockDatasetTypeName)
514 except KeyError:
515 # means that mock dataset type is not there and this
516 # should be a pre-existing dataset
517 _LOG.debug("No mock dataset type for %s", ref)
518 if self.butler.datastore.exists(resolvedRef):
519 newRefsForDatasetType.append(resolvedRef)
520 else:
521 resolvedMockRef = self.butler.registry.findDataset(
522 mockDatasetType, ref.dataId, collections=self.butler.collections
523 )
524 _LOG.debug(
525 "mockRef=(%s, %s) resolvedMockRef=%s",
526 mockDatasetType,
527 ref.dataId,
528 resolvedMockRef,
529 )
530 if resolvedMockRef is not None and self.butler.datastore.exists(resolvedMockRef):
531 _LOG.debug("resolvedMockRef dataset exists")
532 newRefsForDatasetType.append(resolvedRef)
533 elif checked_datastore or limited_butler.datastore.exists(resolvedRef):
534 # We need to ask datastore if the dataset actually exists
535 # because the Registry of a local "execution butler"
536 # cannot know this (because we prepopulate it with all of
537 # the datasets that might be created).
538 newRefsForDatasetType.append(resolvedRef)
540 if len(newRefsForDatasetType) != len(refsForDatasetType):
541 anyChanges = True
542 # If we removed any input datasets, let the task check if it has enough
543 # to proceed and/or prune related datasets that it also doesn't
544 # need/produce anymore. It will raise NoWorkFound if it can't run,
545 # which we'll let propagate up. This is exactly what we run during QG
546 # generation, because a task shouldn't care whether an input is missing
547 # because some previous task didn't produce it, or because it just
548 # wasn't there during QG generation.
549 namedUpdatedInputs = NamedKeyDict[DatasetType, list[DatasetRef]](updatedInputs.items())
550 helper = AdjustQuantumHelper(namedUpdatedInputs, quantum.outputs)
551 if anyChanges:
552 assert quantum.dataId is not None, "Quantum DataId cannot be None"
553 helper.adjust_in_place(taskDef.connections, label=taskDef.label, data_id=quantum.dataId)
554 return Quantum(
555 taskName=quantum.taskName,
556 taskClass=quantum.taskClass,
557 dataId=quantum.dataId,
558 initInputs=quantum.initInputs,
559 inputs=helper.inputs,
560 outputs=helper.outputs,
561 )
563 def runQuantum(
564 self, task: PipelineTask, quantum: Quantum, taskDef: TaskDef, limited_butler: LimitedButler
565 ) -> None:
566 """Execute task on a single quantum.
568 Parameters
569 ----------
570 task : `~lsst.pipe.base.PipelineTask`
571 Task object.
572 quantum : `~lsst.daf.butler.Quantum`
573 Single Quantum instance.
574 taskDef : `~lsst.pipe.base.TaskDef`
575 Task definition structure.
576 """
577 # Create a butler that operates in the context of a quantum
578 if self.butler is None:
579 butlerQC = ButlerQuantumContext.from_limited(limited_butler, quantum)
580 else:
581 if self.mock:
582 butlerQC = MockButlerQuantumContext(self.butler, quantum)
583 else:
584 butlerQC = ButlerQuantumContext.from_full(self.butler, quantum)
586 # Get the input and output references for the task
587 inputRefs, outputRefs = taskDef.connections.buildDatasetRefs(quantum)
589 # Call task runQuantum() method. Catch a few known failure modes and
590 # translate them into specific
591 try:
592 task.runQuantum(butlerQC, inputRefs, outputRefs)
593 except NoWorkFound as err:
594 # Not an error, just an early exit.
595 _LOG.info("Task '%s' on quantum %s exited early: %s", taskDef.label, quantum.dataId, str(err))
596 pass
597 except RepeatableQuantumError as err:
598 if self.exitOnKnownError:
599 _LOG.warning("Caught repeatable quantum error for %s (%s):", taskDef, quantum.dataId)
600 _LOG.warning(err, exc_info=True)
601 sys.exit(err.EXIT_CODE)
602 else:
603 raise
604 except InvalidQuantumError as err:
605 _LOG.fatal("Invalid quantum error for %s (%s): %s", taskDef, quantum.dataId)
606 _LOG.fatal(err, exc_info=True)
607 sys.exit(err.EXIT_CODE)
609 def writeMetadata(
610 self, quantum: Quantum, metadata: Any, taskDef: TaskDef, limited_butler: LimitedButler
611 ) -> None:
612 if taskDef.metadataDatasetName is not None:
613 # DatasetRef has to be in the Quantum outputs, can lookup by name
614 try:
615 [ref] = quantum.outputs[taskDef.metadataDatasetName]
616 except LookupError as exc:
617 raise InvalidQuantumError(
618 f"Quantum outputs is missing metadata dataset type {taskDef.metadataDatasetName};"
619 " this could happen due to inconsistent options between QuantumGraph generation"
620 " and execution"
621 ) from exc
622 if self.butler is not None:
623 # Dataset ref will already be resolved. We are now required
624 # to respect the output run of the ref so can not unresolve.
625 if ref.id is not None:
626 if ref.run != self.butler.run: # This test allows for clearer error message.
627 raise RuntimeError(
628 f"Inconsistency in RUN when putting resolved ref. "
629 f"Ref has run {ref.run!r} but butler is putting it into {self.butler.run!r}"
630 )
631 self.butler.put(metadata, ref)
632 else:
633 limited_butler.put(metadata, ref)
635 def initGlobals(self, quantum: Quantum) -> None:
636 """Initialize global state needed for task execution.
638 Parameters
639 ----------
640 quantum : `~lsst.daf.butler.Quantum`
641 Single Quantum instance.
643 Notes
644 -----
645 There is an issue with initializing filters singleton which is done
646 by instrument, to avoid requiring tasks to do it in runQuantum()
647 we do it here when any dataId has an instrument dimension. Also for
648 now we only allow single instrument, verify that all instrument
649 names in all dataIds are identical.
651 This will need revision when filter singleton disappears.
652 """
653 # can only work for full butler
654 if self.butler is None:
655 return
656 oneInstrument = None
657 for datasetRefs in chain(quantum.inputs.values(), quantum.outputs.values()):
658 for datasetRef in datasetRefs:
659 dataId = datasetRef.dataId
660 instrument = dataId.get("instrument")
661 if instrument is not None:
662 if oneInstrument is not None:
663 assert ( # type: ignore
664 instrument == oneInstrument
665 ), "Currently require that only one instrument is used per graph"
666 else:
667 oneInstrument = instrument
668 Instrument.fromName(instrument, self.butler.registry)
670 def getReport(self) -> Optional[QuantumReport]:
671 # Docstring inherited from base class
672 if self.report is None:
673 raise RuntimeError("getReport() called before execute()")
674 return self.report