Coverage for python/lsst/ctrl/mpexec/singleQuantumExecutor.py: 9%

262 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2023-04-14 02:17 -0700

1# This file is part of ctrl_mpexec. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22__all__ = ["SingleQuantumExecutor"] 

23 

24# ------------------------------- 

25# Imports of standard modules -- 

26# ------------------------------- 

27import logging 

28import os 

29import sys 

30import time 

31from collections import defaultdict 

32from collections.abc import Callable 

33from itertools import chain 

34from typing import Any, Optional, Union 

35 

36from lsst.daf.butler import Butler, DatasetRef, DatasetType, LimitedButler, NamedKeyDict, Quantum 

37from lsst.pipe.base import ( 

38 AdjustQuantumHelper, 

39 ButlerQuantumContext, 

40 Instrument, 

41 InvalidQuantumError, 

42 NoWorkFound, 

43 PipelineTask, 

44 RepeatableQuantumError, 

45 TaskDef, 

46 TaskFactory, 

47) 

48from lsst.pipe.base.configOverrides import ConfigOverrides 

49 

50# During metadata transition phase, determine metadata class by 

51# asking pipe_base 

52from lsst.pipe.base.task import _TASK_FULL_METADATA_TYPE, _TASK_METADATA_TYPE 

53from lsst.utils.timer import logInfo 

54 

55# ----------------------------- 

56# Imports for other modules -- 

57# ----------------------------- 

58from .cli.utils import _PipelineAction 

59from .log_capture import LogCapture 

60from .mock_task import MockButlerQuantumContext, MockPipelineTask 

61from .quantumGraphExecutor import QuantumExecutor 

62from .reports import QuantumReport 

63 

64# ---------------------------------- 

65# Local non-exported definitions -- 

66# ---------------------------------- 

67 

68_LOG = logging.getLogger(__name__) 

69 

70 

71class SingleQuantumExecutor(QuantumExecutor): 

72 """Executor class which runs one Quantum at a time. 

73 

74 Parameters 

75 ---------- 

76 butler : `~lsst.daf.butler.Butler` or `None` 

77 Data butler, `None` means that Quantum-backed butler should be used 

78 instead. 

79 taskFactory : `~lsst.pipe.base.TaskFactory` 

80 Instance of a task factory. 

81 skipExistingIn : `list` [ `str` ], optional 

82 Accepts list of collections, if all Quantum outputs already exist in 

83 the specified list of collections then that Quantum will not be rerun. 

84 clobberOutputs : `bool`, optional 

85 If `True`, then existing outputs in output run collection will be 

86 overwritten. If ``skipExistingIn`` is defined, only outputs from 

87 failed quanta will be overwritten. Only used when ``butler`` is not 

88 `None`. 

89 enableLsstDebug : `bool`, optional 

90 Enable debugging with ``lsstDebug`` facility for a task. 

91 exitOnKnownError : `bool`, optional 

92 If `True`, call `sys.exit` with the appropriate exit code for special 

93 known exceptions, after printing a traceback, instead of letting the 

94 exception propagate up to calling. This is always the behavior for 

95 InvalidQuantumError. 

96 mock : `bool`, optional 

97 If `True` then mock task execution. 

98 mock_configs : `list` [ `_PipelineAction` ], optional 

99 Optional config overrides for mock tasks. 

100 limited_butler_factory : `Callable`, optional 

101 A method that creates a `~lsst.daf.butler.LimitedButler` instance 

102 for a given Quantum. This parameter must be defined if ``butler`` is 

103 `None`. If ``butler`` is not `None` then this parameter is ignored. 

104 """ 

105 

106 def __init__( 

107 self, 

108 butler: Butler | None, 

109 taskFactory: TaskFactory, 

110 skipExistingIn: list[str] | None = None, 

111 clobberOutputs: bool = False, 

112 enableLsstDebug: bool = False, 

113 exitOnKnownError: bool = False, 

114 mock: bool = False, 

115 mock_configs: list[_PipelineAction] | None = None, 

116 limited_butler_factory: Callable[[Quantum], LimitedButler] | None = None, 

117 ): 

118 self.butler = butler 

119 self.taskFactory = taskFactory 

120 self.skipExistingIn = skipExistingIn 

121 self.enableLsstDebug = enableLsstDebug 

122 self.clobberOutputs = clobberOutputs 

123 self.exitOnKnownError = exitOnKnownError 

124 self.mock = mock 

125 self.mock_configs = mock_configs if mock_configs is not None else [] 

126 self.limited_butler_factory = limited_butler_factory 

127 self.report: Optional[QuantumReport] = None 

128 

129 if self.butler is None: 

130 assert not self.mock, "Mock execution only possible with full butler" 

131 assert limited_butler_factory is not None, "limited_butler_factory is needed when butler is None" 

132 

133 def execute(self, taskDef: TaskDef, quantum: Quantum) -> Quantum: 

134 # Docstring inherited from QuantumExecutor.execute 

135 assert quantum.dataId is not None, "Quantum DataId cannot be None" 

136 

137 if self.butler is not None: 

138 self.butler.registry.refresh() 

139 

140 # Catch any exception and make a report based on that. 

141 try: 

142 result = self._execute(taskDef, quantum) 

143 self.report = QuantumReport(dataId=quantum.dataId, taskLabel=taskDef.label) 

144 return result 

145 except Exception as exc: 

146 self.report = QuantumReport.from_exception( 

147 exception=exc, 

148 dataId=quantum.dataId, 

149 taskLabel=taskDef.label, 

150 ) 

151 raise 

152 

153 def _resolve_ref(self, ref: DatasetRef, collections: Any = None) -> DatasetRef | None: 

154 """Return resolved reference. 

155 

156 Parameters 

157 ---------- 

158 ref : `DatasetRef` 

159 Input reference, can be either resolved or unresolved. 

160 collections : 

161 Collections to search for the existing reference, only used when 

162 running with full butler. 

163 

164 Notes 

165 ----- 

166 When running with Quantum-backed butler it assumes that reference is 

167 already resolved and returns input references without any checks. When 

168 running with full butler, it always searches registry fof a reference 

169 in specified collections, even if reference is already resolved. 

170 """ 

171 if self.butler is not None: 

172 # If running with full butler, need to re-resolve it in case 

173 # collections are different. 

174 ref = ref.unresolved() 

175 return self.butler.registry.findDataset(ref.datasetType, ref.dataId, collections=collections) 

176 else: 

177 # In case of QBB all refs must be resolved already, do not check. 

178 return ref 

179 

180 def _execute(self, taskDef: TaskDef, quantum: Quantum) -> Quantum: 

181 """Internal implementation of execute()""" 

182 startTime = time.time() 

183 

184 # Make a limited butler instance if needed (which should be QBB if full 

185 # butler is not defined). 

186 limited_butler: LimitedButler 

187 if self.butler is not None: 

188 limited_butler = self.butler 

189 else: 

190 # We check this in constructor, but mypy needs this check here. 

191 assert self.limited_butler_factory is not None 

192 limited_butler = self.limited_butler_factory(quantum) 

193 

194 if self.butler is not None: 

195 log_capture = LogCapture.from_full(self.butler) 

196 else: 

197 log_capture = LogCapture.from_limited(limited_butler) 

198 with log_capture.capture_logging(taskDef, quantum) as captureLog: 

199 # Save detailed resource usage before task start to metadata. 

200 quantumMetadata = _TASK_METADATA_TYPE() 

201 logInfo(None, "prep", metadata=quantumMetadata) # type: ignore[arg-type] 

202 

203 # check whether to skip or delete old outputs, if it returns True 

204 # or raises an exception do not try to store logs, as they may be 

205 # already in butler. 

206 captureLog.store = False 

207 if self.checkExistingOutputs(quantum, taskDef, limited_butler): 

208 _LOG.info( 

209 "Skipping already-successful quantum for label=%s dataId=%s.", 

210 taskDef.label, 

211 quantum.dataId, 

212 ) 

213 return quantum 

214 captureLog.store = True 

215 

216 try: 

217 quantum = self.updatedQuantumInputs(quantum, taskDef, limited_butler) 

218 except NoWorkFound as exc: 

219 _LOG.info( 

220 "Nothing to do for task '%s' on quantum %s; saving metadata and skipping: %s", 

221 taskDef.label, 

222 quantum.dataId, 

223 str(exc), 

224 ) 

225 # Make empty metadata that looks something like what a 

226 # do-nothing task would write (but we don't bother with empty 

227 # nested PropertySets for subtasks). This is slightly 

228 # duplicative with logic in pipe_base that we can't easily call 

229 # from here; we'll fix this on DM-29761. 

230 logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type] 

231 fullMetadata = _TASK_FULL_METADATA_TYPE() 

232 fullMetadata[taskDef.label] = _TASK_METADATA_TYPE() 

233 fullMetadata["quantum"] = quantumMetadata 

234 self.writeMetadata(quantum, fullMetadata, taskDef, limited_butler) 

235 return quantum 

236 

237 # enable lsstDebug debugging 

238 if self.enableLsstDebug: 

239 try: 

240 _LOG.debug("Will try to import debug.py") 

241 import debug # type: ignore # noqa:F401 

242 except ImportError: 

243 _LOG.warn("No 'debug' module found.") 

244 

245 # initialize global state 

246 self.initGlobals(quantum) 

247 

248 # Ensure that we are executing a frozen config 

249 taskDef.config.freeze() 

250 logInfo(None, "init", metadata=quantumMetadata) # type: ignore[arg-type] 

251 init_input_refs = [] 

252 for ref in quantum.initInputs.values(): 

253 resolved = self._resolve_ref(ref) 

254 if resolved is None: 

255 raise ValueError(f"Failed to resolve init input reference {ref}") 

256 init_input_refs.append(resolved) 

257 task = self.taskFactory.makeTask(taskDef, limited_butler, init_input_refs) 

258 logInfo(None, "start", metadata=quantumMetadata) # type: ignore[arg-type] 

259 try: 

260 if self.mock: 

261 # Use mock task instance to execute method. 

262 runTask = self._makeMockTask(taskDef) 

263 else: 

264 runTask = task 

265 self.runQuantum(runTask, quantum, taskDef, limited_butler) 

266 except Exception as e: 

267 _LOG.error( 

268 "Execution of task '%s' on quantum %s failed. Exception %s: %s", 

269 taskDef.label, 

270 quantum.dataId, 

271 e.__class__.__name__, 

272 str(e), 

273 ) 

274 raise 

275 logInfo(None, "end", metadata=quantumMetadata) # type: ignore[arg-type] 

276 fullMetadata = task.getFullMetadata() 

277 fullMetadata["quantum"] = quantumMetadata 

278 self.writeMetadata(quantum, fullMetadata, taskDef, limited_butler) 

279 stopTime = time.time() 

280 _LOG.info( 

281 "Execution of task '%s' on quantum %s took %.3f seconds", 

282 taskDef.label, 

283 quantum.dataId, 

284 stopTime - startTime, 

285 ) 

286 return quantum 

287 

288 def _makeMockTask(self, taskDef: TaskDef) -> PipelineTask: 

289 """Make an instance of mock task for given TaskDef.""" 

290 # Make config instance and apply overrides 

291 overrides = ConfigOverrides() 

292 for action in self.mock_configs: 

293 if action.label == taskDef.label + "-mock": 

294 if action.action == "config": 

295 key, _, value = action.value.partition("=") 

296 overrides.addValueOverride(key, value) 

297 elif action.action == "configfile": 

298 overrides.addFileOverride(os.path.expandvars(action.value)) 

299 else: 

300 raise ValueError(f"Unexpected action for mock task config overrides: {action}") 

301 config = MockPipelineTask.ConfigClass() 

302 overrides.applyTo(config) 

303 

304 task = MockPipelineTask(config=config, name=taskDef.label) 

305 return task 

306 

307 def checkExistingOutputs(self, quantum: Quantum, taskDef: TaskDef, limited_butler: LimitedButler) -> bool: 

308 """Decide whether this quantum needs to be executed. 

309 

310 If only partial outputs exist then they are removed if 

311 ``clobberOutputs`` is True, otherwise an exception is raised. 

312 

313 Parameters 

314 ---------- 

315 quantum : `~lsst.daf.butler.Quantum` 

316 Quantum to check for existing outputs 

317 taskDef : `~lsst.pipe.base.TaskDef` 

318 Task definition structure. 

319 

320 Returns 

321 ------- 

322 exist : `bool` 

323 `True` if ``self.skipExistingIn`` is defined, and a previous 

324 execution of this quanta appears to have completed successfully 

325 (either because metadata was written or all datasets were written). 

326 `False` otherwise. 

327 

328 Raises 

329 ------ 

330 RuntimeError 

331 Raised if some outputs exist and some not. 

332 """ 

333 if self.skipExistingIn and taskDef.metadataDatasetName is not None: 

334 # Metadata output exists; this is sufficient to assume the previous 

335 # run was successful and should be skipped. 

336 [metadata_ref] = quantum.outputs[taskDef.metadataDatasetName] 

337 ref = self._resolve_ref(metadata_ref, self.skipExistingIn) 

338 if ref is not None: 

339 if limited_butler.datastore.exists(ref): 

340 return True 

341 

342 # Previously we always checked for existing outputs in `butler.run`, 

343 # now logic gets more complicated as we only want to skip quantum 

344 # whose outputs exist in `self.skipExistingIn` but pruning should only 

345 # be done for outputs existing in `butler.run`. 

346 

347 def findOutputs( 

348 collections: Optional[Union[str, list[str]]] 

349 ) -> tuple[list[DatasetRef], list[DatasetRef]]: 

350 """Find quantum outputs in specified collections.""" 

351 existingRefs = [] 

352 missingRefs = [] 

353 for datasetRefs in quantum.outputs.values(): 

354 checkRefs: list[DatasetRef] = [] 

355 registryRefToQuantumRef: dict[DatasetRef, DatasetRef] = {} 

356 for datasetRef in datasetRefs: 

357 ref = self._resolve_ref(datasetRef, collections) 

358 if ref is None: 

359 missingRefs.append(datasetRef) 

360 else: 

361 checkRefs.append(ref) 

362 registryRefToQuantumRef[ref] = datasetRef 

363 

364 # More efficient to ask the datastore in bulk for ref 

365 # existence rather than one at a time. 

366 existence = limited_butler.datastore.mexists(checkRefs) 

367 for ref, exists in existence.items(): 

368 if exists: 

369 existingRefs.append(ref) 

370 else: 

371 missingRefs.append(registryRefToQuantumRef[ref]) 

372 return existingRefs, missingRefs 

373 

374 # If skipExistingIn is None this will search in butler.run. 

375 existingRefs, missingRefs = findOutputs(self.skipExistingIn) 

376 if self.skipExistingIn: 

377 if existingRefs and not missingRefs: 

378 # Everything is already there, and we do not clobber complete 

379 # outputs if skipExistingIn is specified. 

380 return True 

381 

382 # If we are to re-run quantum then prune datasets that exists in 

383 # output run collection, only if `self.clobberOutputs` is set, 

384 # that only works when we have full butler. 

385 if existingRefs and self.butler is not None: 

386 # Look at butler run instead of skipExistingIn collections. 

387 existingRefs, missingRefs = findOutputs(self.butler.run) 

388 if existingRefs and missingRefs: 

389 _LOG.debug( 

390 "Partial outputs exist for task %s dataId=%s collection=%s " 

391 "existingRefs=%s missingRefs=%s", 

392 taskDef, 

393 quantum.dataId, 

394 self.butler.run, 

395 existingRefs, 

396 missingRefs, 

397 ) 

398 if self.clobberOutputs: 

399 # only prune 

400 _LOG.info("Removing partial outputs for task %s: %s", taskDef, existingRefs) 

401 self.butler.pruneDatasets(existingRefs, disassociate=True, unstore=True, purge=True) 

402 return False 

403 else: 

404 raise RuntimeError( 

405 "Registry inconsistency while checking for existing outputs:" 

406 f" collection={self.butler.run} existingRefs={existingRefs}" 

407 f" missingRefs={missingRefs}" 

408 ) 

409 elif existingRefs and self.clobberOutputs and not self.skipExistingIn: 

410 # Clobber complete outputs if skipExistingIn is not specified. 

411 _LOG.info("Removing complete outputs for task %s: %s", taskDef, existingRefs) 

412 self.butler.pruneDatasets(existingRefs, disassociate=True, unstore=True, purge=True) 

413 return False 

414 

415 # need to re-run 

416 return False 

417 

418 def updatedQuantumInputs( 

419 self, quantum: Quantum, taskDef: TaskDef, limited_butler: LimitedButler 

420 ) -> Quantum: 

421 """Update quantum with extra information, returns a new updated 

422 Quantum. 

423 

424 Some methods may require input DatasetRefs to have non-None 

425 ``dataset_id``, but in case of intermediate dataset it may not be 

426 filled during QuantumGraph construction. This method will retrieve 

427 missing info from registry. 

428 

429 Parameters 

430 ---------- 

431 quantum : `~lsst.daf.butler.Quantum` 

432 Single Quantum instance. 

433 taskDef : `~lsst.pipe.base.TaskDef` 

434 Task definition structure. 

435 

436 Returns 

437 ------- 

438 update : `~lsst.daf.butler.Quantum` 

439 Updated Quantum instance 

440 """ 

441 anyChanges = False 

442 updatedInputs: defaultdict[DatasetType, list] = defaultdict(list) 

443 for key, refsForDatasetType in quantum.inputs.items(): 

444 newRefsForDatasetType = updatedInputs[key] 

445 for ref in refsForDatasetType: 

446 # Inputs may already be resolved even if they do not exist, but 

447 # we have to re-resolve them because IDs are ignored on output. 

448 # Check datastore for existence first to cover calibration 

449 # dataset types, as they would need a timespan for findDataset. 

450 resolvedRef: DatasetRef | None 

451 checked_datastore = False 

452 if ref.id is not None and limited_butler.datastore.exists(ref): 

453 resolvedRef = ref 

454 checked_datastore = True 

455 elif self.butler is not None: 

456 # In case of full butler try to (re-)resolve it. 

457 resolvedRef = self._resolve_ref(ref) 

458 if resolvedRef is None: 

459 _LOG.info("No dataset found for %s", ref) 

460 continue 

461 else: 

462 _LOG.debug("Updated dataset ID for %s", ref) 

463 else: 

464 # QBB with missing intermediate 

465 _LOG.info("No dataset found for %s", ref) 

466 continue 

467 

468 # In case of mock execution we check that mock dataset exists 

469 # instead. Mock execution is only possible with full butler. 

470 if self.mock and self.butler is not None: 

471 try: 

472 typeName, component = ref.datasetType.nameAndComponent() 

473 if component is not None: 

474 mockDatasetTypeName = MockButlerQuantumContext.mockDatasetTypeName(typeName) 

475 else: 

476 mockDatasetTypeName = MockButlerQuantumContext.mockDatasetTypeName( 

477 ref.datasetType.name 

478 ) 

479 

480 mockDatasetType = self.butler.registry.getDatasetType(mockDatasetTypeName) 

481 except KeyError: 

482 # means that mock dataset type is not there and this 

483 # should be a pre-existing dataset 

484 _LOG.debug("No mock dataset type for %s", ref) 

485 if self.butler.datastore.exists(resolvedRef): 

486 newRefsForDatasetType.append(resolvedRef) 

487 else: 

488 mockRef = DatasetRef(mockDatasetType, ref.dataId) 

489 resolvedMockRef = self.butler.registry.findDataset( 

490 mockRef.datasetType, mockRef.dataId, collections=self.butler.collections 

491 ) 

492 _LOG.debug("mockRef=%s resolvedMockRef=%s", mockRef, resolvedMockRef) 

493 if resolvedMockRef is not None and self.butler.datastore.exists(resolvedMockRef): 

494 _LOG.debug("resolvedMockRef dataset exists") 

495 newRefsForDatasetType.append(resolvedRef) 

496 elif checked_datastore or limited_butler.datastore.exists(resolvedRef): 

497 # We need to ask datastore if the dataset actually exists 

498 # because the Registry of a local "execution butler" 

499 # cannot know this (because we prepopulate it with all of 

500 # the datasets that might be created). 

501 newRefsForDatasetType.append(resolvedRef) 

502 

503 if len(newRefsForDatasetType) != len(refsForDatasetType): 

504 anyChanges = True 

505 # If we removed any input datasets, let the task check if it has enough 

506 # to proceed and/or prune related datasets that it also doesn't 

507 # need/produce anymore. It will raise NoWorkFound if it can't run, 

508 # which we'll let propagate up. This is exactly what we run during QG 

509 # generation, because a task shouldn't care whether an input is missing 

510 # because some previous task didn't produce it, or because it just 

511 # wasn't there during QG generation. 

512 namedUpdatedInputs = NamedKeyDict[DatasetType, list[DatasetRef]](updatedInputs.items()) 

513 helper = AdjustQuantumHelper(namedUpdatedInputs, quantum.outputs) 

514 if anyChanges: 

515 assert quantum.dataId is not None, "Quantum DataId cannot be None" 

516 helper.adjust_in_place(taskDef.connections, label=taskDef.label, data_id=quantum.dataId) 

517 return Quantum( 

518 taskName=quantum.taskName, 

519 taskClass=quantum.taskClass, 

520 dataId=quantum.dataId, 

521 initInputs=quantum.initInputs, 

522 inputs=helper.inputs, 

523 outputs=helper.outputs, 

524 ) 

525 

526 def runQuantum( 

527 self, task: PipelineTask, quantum: Quantum, taskDef: TaskDef, limited_butler: LimitedButler 

528 ) -> None: 

529 """Execute task on a single quantum. 

530 

531 Parameters 

532 ---------- 

533 task : `~lsst.pipe.base.PipelineTask` 

534 Task object. 

535 quantum : `~lsst.daf.butler.Quantum` 

536 Single Quantum instance. 

537 taskDef : `~lsst.pipe.base.TaskDef` 

538 Task definition structure. 

539 """ 

540 # Create a butler that operates in the context of a quantum 

541 if self.butler is None: 

542 butlerQC = ButlerQuantumContext.from_limited(limited_butler, quantum) 

543 else: 

544 if self.mock: 

545 butlerQC = MockButlerQuantumContext(self.butler, quantum) 

546 else: 

547 butlerQC = ButlerQuantumContext.from_full(self.butler, quantum) 

548 

549 # Get the input and output references for the task 

550 inputRefs, outputRefs = taskDef.connections.buildDatasetRefs(quantum) 

551 

552 # Call task runQuantum() method. Catch a few known failure modes and 

553 # translate them into specific 

554 try: 

555 task.runQuantum(butlerQC, inputRefs, outputRefs) 

556 except NoWorkFound as err: 

557 # Not an error, just an early exit. 

558 _LOG.info("Task '%s' on quantum %s exited early: %s", taskDef.label, quantum.dataId, str(err)) 

559 pass 

560 except RepeatableQuantumError as err: 

561 if self.exitOnKnownError: 

562 _LOG.warning("Caught repeatable quantum error for %s (%s):", taskDef, quantum.dataId) 

563 _LOG.warning(err, exc_info=True) 

564 sys.exit(err.EXIT_CODE) 

565 else: 

566 raise 

567 except InvalidQuantumError as err: 

568 _LOG.fatal("Invalid quantum error for %s (%s): %s", taskDef, quantum.dataId) 

569 _LOG.fatal(err, exc_info=True) 

570 sys.exit(err.EXIT_CODE) 

571 

572 def writeMetadata( 

573 self, quantum: Quantum, metadata: Any, taskDef: TaskDef, limited_butler: LimitedButler 

574 ) -> None: 

575 if taskDef.metadataDatasetName is not None: 

576 # DatasetRef has to be in the Quantum outputs, can lookup by name 

577 try: 

578 [ref] = quantum.outputs[taskDef.metadataDatasetName] 

579 except LookupError as exc: 

580 raise InvalidQuantumError( 

581 f"Quantum outputs is missing metadata dataset type {taskDef.metadataDatasetName};" 

582 " this could happen due to inconsistent options between QuantumGraph generation" 

583 " and execution" 

584 ) from exc 

585 if self.butler is not None: 

586 # Dataset ref can already be resolved, for non-QBB executor we 

587 # have to ignore that because may be overriding run 

588 # collection. 

589 if ref.id is not None: 

590 ref = ref.unresolved() 

591 self.butler.put(metadata, ref) 

592 else: 

593 limited_butler.put(metadata, ref) 

594 

595 def initGlobals(self, quantum: Quantum) -> None: 

596 """Initialize global state needed for task execution. 

597 

598 Parameters 

599 ---------- 

600 quantum : `~lsst.daf.butler.Quantum` 

601 Single Quantum instance. 

602 

603 Notes 

604 ----- 

605 There is an issue with initializing filters singleton which is done 

606 by instrument, to avoid requiring tasks to do it in runQuantum() 

607 we do it here when any dataId has an instrument dimension. Also for 

608 now we only allow single instrument, verify that all instrument 

609 names in all dataIds are identical. 

610 

611 This will need revision when filter singleton disappears. 

612 """ 

613 # can only work for full butler 

614 if self.butler is None: 

615 return 

616 oneInstrument = None 

617 for datasetRefs in chain(quantum.inputs.values(), quantum.outputs.values()): 

618 for datasetRef in datasetRefs: 

619 dataId = datasetRef.dataId 

620 instrument = dataId.get("instrument") 

621 if instrument is not None: 

622 if oneInstrument is not None: 

623 assert ( # type: ignore 

624 instrument == oneInstrument 

625 ), "Currently require that only one instrument is used per graph" 

626 else: 

627 oneInstrument = instrument 

628 Instrument.fromName(instrument, self.butler.registry) 

629 

630 def getReport(self) -> Optional[QuantumReport]: 

631 # Docstring inherited from base class 

632 if self.report is None: 

633 raise RuntimeError("getReport() called before execute()") 

634 return self.report