Coverage for python/lsst/pipe/base/tests/mocks/_pipeline_task.py: 22%

227 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-11-17 10:52 +0000

1# This file is part of pipe_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This software is dual licensed under the GNU General Public License and also 

10# under a 3-clause BSD license. Recipients may choose which of these licenses 

11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt, 

12# respectively. If you choose the GPL option then the following text applies 

13# (but note that there is still no warranty even if you opt for BSD instead): 

14# 

15# This program is free software: you can redistribute it and/or modify 

16# it under the terms of the GNU General Public License as published by 

17# the Free Software Foundation, either version 3 of the License, or 

18# (at your option) any later version. 

19# 

20# This program is distributed in the hope that it will be useful, 

21# but WITHOUT ANY WARRANTY; without even the implied warranty of 

22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

23# GNU General Public License for more details. 

24# 

25# You should have received a copy of the GNU General Public License 

26# along with this program. If not, see <http://www.gnu.org/licenses/>. 

27from __future__ import annotations 

28 

29from lsst.pipe.base.connectionTypes import BaseInput, Output 

30 

31__all__ = ( 

32 "DynamicConnectionConfig", 

33 "DynamicTestPipelineTask", 

34 "DynamicTestPipelineTaskConfig", 

35 "MockPipelineTask", 

36 "MockPipelineTaskConfig", 

37 "mock_task_defs", 

38 "mock_pipeline_graph", 

39) 

40 

41import dataclasses 

42import logging 

43from collections.abc import Collection, Iterable, Mapping 

44from typing import TYPE_CHECKING, Any, ClassVar, TypeVar 

45 

46from lsst.daf.butler import ( 

47 DataCoordinate, 

48 DatasetRef, 

49 DeferredDatasetHandle, 

50 SerializedDatasetType, 

51 SerializedDimensionGraph, 

52) 

53from lsst.pex.config import Config, ConfigDictField, ConfigurableField, Field, ListField 

54from lsst.utils.doImport import doImportType 

55from lsst.utils.introspection import get_full_type_name 

56from lsst.utils.iteration import ensure_iterable 

57 

58from ... import automatic_connection_constants as acc 

59from ... import connectionTypes as cT 

60from ...config import PipelineTaskConfig 

61from ...connections import InputQuantizedConnection, OutputQuantizedConnection, PipelineTaskConnections 

62from ...pipeline import TaskDef 

63from ...pipeline_graph import PipelineGraph 

64from ...pipelineTask import PipelineTask 

65from ._data_id_match import DataIdMatch 

66from ._storage_class import MockDataset, MockDatasetQuantum, MockStorageClass, get_mock_name 

67 

68_LOG = logging.getLogger(__name__) 

69 

70if TYPE_CHECKING: 

71 from ..._quantumContext import QuantumContext 

72 

73 

74_T = TypeVar("_T", bound=cT.BaseConnection) 

75 

76 

77def mock_task_defs( 

78 originals: Iterable[TaskDef], 

79 unmocked_dataset_types: Iterable[str] = (), 

80 force_failures: Mapping[str, tuple[str, type[Exception] | None]] | None = None, 

81) -> list[TaskDef]: 

82 """Create mocks for an iterable of TaskDefs. 

83 

84 Parameters 

85 ---------- 

86 originals : `~collections.abc.Iterable` [ `TaskDef` ] 

87 Original tasks and configuration to mock. 

88 unmocked_dataset_types : `~collections.abc.Iterable` [ `str` ], optional 

89 Names of overall-input dataset types that should not be replaced with 

90 mocks. 

91 force_failures : `~collections.abc.Mapping` [ `str`, `tuple` [ `str`, \ 

92 `type` [ `Exception` ] or `None` ] ] 

93 Mapping from original task label to a 2-tuple indicating that some 

94 quanta should raise an exception when executed. The first entry is a 

95 data ID match using the butler expression language (i.e. a string of 

96 the sort passed ass the ``where`` argument to butler query methods), 

97 while the second is the type of exception to raise when the quantum 

98 data ID matches the expression. An exception type of `None` uses 

99 the default, `ValueError`. 

100 

101 Returns 

102 ------- 

103 mocked : `list` [ `TaskDef` ] 

104 List of `TaskDef` objects using `MockPipelineTask` configurations that 

105 target the original tasks, in the same order. 

106 """ 

107 unmocked_dataset_types = tuple(unmocked_dataset_types) 

108 if force_failures is None: 

109 force_failures = {} 

110 results: list[TaskDef] = [] 

111 for original_task_def in originals: 

112 config = MockPipelineTaskConfig() 

113 config.original.retarget(original_task_def.taskClass) 

114 config.original = original_task_def.config 

115 config.unmocked_dataset_types.extend(unmocked_dataset_types) 

116 if original_task_def.label in force_failures: 

117 condition, exception_type = force_failures[original_task_def.label] 

118 config.fail_condition = condition 

119 if exception_type is not None: 

120 config.fail_exception = get_full_type_name(exception_type) 

121 mock_task_def = TaskDef( 

122 config=config, taskClass=MockPipelineTask, label=get_mock_name(original_task_def.label) 

123 ) 

124 results.append(mock_task_def) 

125 return results 

126 

127 

128def mock_pipeline_graph( 

129 original_graph: PipelineGraph, 

130 unmocked_dataset_types: Iterable[str] = (), 

131 force_failures: Mapping[str, tuple[str, type[Exception] | None]] | None = None, 

132) -> PipelineGraph: 

133 """Create mocks for a full pipeline graph. 

134 

135 Parameters 

136 ---------- 

137 original_graph : `~..pipeline_graph.PipelineGraph` 

138 Original tasks and configuration to mock. 

139 unmocked_dataset_types : `~collections.abc.Iterable` [ `str` ], optional 

140 Names of overall-input dataset types that should not be replaced with 

141 mocks. 

142 force_failures : `~collections.abc.Mapping` [ `str`, `tuple` [ `str`, \ 

143 `type` [ `Exception` ] or `None` ] ] 

144 Mapping from original task label to a 2-tuple indicating that some 

145 quanta should raise an exception when executed. The first entry is a 

146 data ID match using the butler expression language (i.e. a string of 

147 the sort passed as the ``where`` argument to butler query methods), 

148 while the second is the type of exception to raise when the quantum 

149 data ID matches the expression. An exception type of `None` uses 

150 the default, `ValueError`. 

151 

152 Returns 

153 ------- 

154 mocked : `~..pipeline_graph.PipelineGraph` 

155 Pipeline graph using `MockPipelineTask` configurations that target the 

156 original tasks. Never resolved. 

157 """ 

158 unmocked_dataset_types = tuple(unmocked_dataset_types) 

159 if force_failures is None: 

160 force_failures = {} 

161 result = PipelineGraph(description=original_graph.description) 

162 for original_task_node in original_graph.tasks.values(): 

163 config = MockPipelineTaskConfig() 

164 config.original.retarget(original_task_node.task_class) 

165 config.original = original_task_node.config 

166 config.unmocked_dataset_types.extend(unmocked_dataset_types) 

167 if original_task_node.label in force_failures: 

168 condition, exception_type = force_failures[original_task_node.label] 

169 config.fail_condition = condition 

170 if exception_type is not None: 

171 config.fail_exception = get_full_type_name(exception_type) 

172 result.add_task(get_mock_name(original_task_node.label), MockPipelineTask, config=config) 

173 return result 

174 

175 

176class BaseTestPipelineTaskConnections(PipelineTaskConnections, dimensions=()): 

177 pass 

178 

179 

180class BaseTestPipelineTaskConfig(PipelineTaskConfig, pipelineConnections=BaseTestPipelineTaskConnections): 

181 fail_condition = Field[str]( 

182 dtype=str, 

183 default="", 

184 doc=( 

185 "Condition on Data ID to raise an exception. String expression which includes attributes of " 

186 "quantum data ID using a syntax of daf_butler user expressions (e.g. 'visit = 123')." 

187 ), 

188 ) 

189 

190 fail_exception = Field[str]( 

191 dtype=str, 

192 default="builtins.ValueError", 

193 doc=( 

194 "Class name of the exception to raise when fail condition is triggered. Can be " 

195 "'lsst.pipe.base.NoWorkFound' to specify non-failure exception." 

196 ), 

197 ) 

198 

199 def data_id_match(self) -> DataIdMatch | None: 

200 if not self.fail_condition: 

201 return None 

202 return DataIdMatch(self.fail_condition) 

203 

204 

205class BaseTestPipelineTask(PipelineTask): 

206 """A base class for test-utility `PipelineTask` classes that read and write 

207 mock datasets `runQuantum`. 

208 

209 Notes 

210 ----- 

211 This class overrides `runQuantum` to read inputs and write a bit of 

212 provenance into all of its outputs (always `MockDataset` instances). It 

213 can also be configured to raise exceptions on certain data IDs. It reads 

214 `MockDataset` inputs and simulates reading inputs of other types by 

215 creating `MockDataset` inputs from their DatasetRefs. 

216 

217 Subclasses are responsible for defining connections, but init-input and 

218 init-output connections are not supported at runtime (they may be present 

219 as long as the task is never constructed). All output connections must 

220 use mock storage classes. `..Input` and `..PrerequisiteInput` connections 

221 that do not use mock storage classes will be handled by constructing a 

222 `MockDataset` from the `~lsst.daf.butler.DatasetRef` rather than actually 

223 reading them. 

224 """ 

225 

226 ConfigClass: ClassVar[type[PipelineTaskConfig]] = BaseTestPipelineTaskConfig 

227 

228 def __init__( 

229 self, 

230 *, 

231 config: BaseTestPipelineTaskConfig, 

232 initInputs: Mapping[str, Any], 

233 **kwargs: Any, 

234 ): 

235 super().__init__(config=config, **kwargs) 

236 self.fail_exception: type | None = None 

237 self.data_id_match = self.config.data_id_match() 

238 if self.data_id_match: 

239 self.fail_exception = doImportType(self.config.fail_exception) 

240 # Look for, check, and record init-inputs. 

241 task_connections = self.ConfigClass.ConnectionsClass(config=config) 

242 mock_dataset_quantum = MockDatasetQuantum(task_label=self.getName(), data_id={}, inputs={}) 

243 for connection_name in task_connections.initInputs: 

244 input_dataset = initInputs[connection_name] 

245 if not isinstance(input_dataset, MockDataset): 

246 raise TypeError( 

247 f"Expected MockDataset instance for init-input {self.getName()}.{connection_name}: " 

248 f"got {input_dataset!r} of type {type(input_dataset)!r}." 

249 ) 

250 connection = task_connections.allConnections[connection_name] 

251 if input_dataset.dataset_type.name != connection.name: 

252 raise RuntimeError( 

253 f"Incorrect dataset type name for init-input {self.getName()}.{connection_name}: " 

254 f"got {input_dataset.dataset_type.name!r}, expected {connection.name!r}." 

255 ) 

256 if input_dataset.storage_class != connection.storageClass: 

257 raise RuntimeError( 

258 f"Incorrect storage class for init-input {self.getName()}.{connection_name}: " 

259 f"got {input_dataset.storage_class!r}, expected {connection.storageClass!r}." 

260 ) 

261 # To avoid very deep provenance we trim inputs to a single 

262 # level. 

263 input_dataset.quantum = None 

264 mock_dataset_quantum.inputs[connection_name] = [input_dataset] 

265 # Add init-outputs as task instance attributes. 

266 for connection_name in task_connections.initOutputs: 

267 connection = task_connections.allConnections[connection_name] 

268 output_dataset = MockDataset( 

269 dataset_id=None, # the task has no way to get this 

270 dataset_type=SerializedDatasetType( 

271 name=connection.name, 

272 storageClass=connection.storageClass, 

273 dimensions=SerializedDimensionGraph(names=[]), 

274 ), 

275 data_id={}, 

276 run=None, # task also has no way to get this 

277 quantum=mock_dataset_quantum, 

278 output_connection_name=connection_name, 

279 ) 

280 setattr(self, connection_name, output_dataset) 

281 

282 config: BaseTestPipelineTaskConfig 

283 

284 def runQuantum( 

285 self, 

286 butlerQC: QuantumContext, 

287 inputRefs: InputQuantizedConnection, 

288 outputRefs: OutputQuantizedConnection, 

289 ) -> None: 

290 # docstring is inherited from the base class 

291 quantum = butlerQC.quantum 

292 

293 _LOG.info("Mocking execution of task '%s' on quantum %s", self.getName(), quantum.dataId) 

294 

295 assert quantum.dataId is not None, "Quantum DataId cannot be None" 

296 

297 # Possibly raise an exception. 

298 if self.data_id_match is not None and self.data_id_match.match(quantum.dataId): 

299 _LOG.info("Simulating failure of task '%s' on quantum %s", self.getName(), quantum.dataId) 

300 message = f"Simulated failure: task={self.getName()} dataId={quantum.dataId}" 

301 assert self.fail_exception is not None, "Exception type must be defined" 

302 raise self.fail_exception(message) 

303 

304 # Populate the bit of provenance we store in all outputs. 

305 _LOG.info("Reading input data for task '%s' on quantum %s", self.getName(), quantum.dataId) 

306 mock_dataset_quantum = MockDatasetQuantum( 

307 task_label=self.getName(), data_id=quantum.dataId.full.byName(), inputs={} 

308 ) 

309 for name, refs in inputRefs: 

310 inputs_list = [] 

311 ref: DatasetRef 

312 for ref in ensure_iterable(refs): 

313 if isinstance(ref.datasetType.storageClass, MockStorageClass): 

314 input_dataset = butlerQC.get(ref) 

315 if isinstance(input_dataset, DeferredDatasetHandle): 

316 input_dataset = input_dataset.get() 

317 if not isinstance(input_dataset, MockDataset): 

318 raise TypeError( 

319 f"Expected MockDataset instance for {ref}; " 

320 f"got {input_dataset!r} of type {type(input_dataset)!r}." 

321 ) 

322 # To avoid very deep provenance we trim inputs to a single 

323 # level. 

324 input_dataset.quantum = None 

325 else: 

326 input_dataset = MockDataset( 

327 dataset_id=ref.id, 

328 dataset_type=ref.datasetType.to_simple(), 

329 data_id=ref.dataId.full.byName(), 

330 run=ref.run, 

331 ) 

332 inputs_list.append(input_dataset) 

333 mock_dataset_quantum.inputs[name] = inputs_list 

334 

335 # store mock outputs 

336 for name, refs in outputRefs: 

337 for ref in ensure_iterable(refs): 

338 output = MockDataset( 

339 dataset_id=ref.id, 

340 dataset_type=ref.datasetType.to_simple(), 

341 data_id=ref.dataId.full.byName(), 

342 run=ref.run, 

343 quantum=mock_dataset_quantum, 

344 output_connection_name=name, 

345 ) 

346 butlerQC.put(output, ref) 

347 

348 _LOG.info("Finished mocking task '%s' on quantum %s", self.getName(), quantum.dataId) 

349 

350 

351class MockPipelineDefaultTargetConnections(PipelineTaskConnections, dimensions=()): 

352 pass 

353 

354 

355class MockPipelineDefaultTargetConfig( 

356 PipelineTaskConfig, pipelineConnections=MockPipelineDefaultTargetConnections 

357): 

358 pass 

359 

360 

361class MockPipelineDefaultTargetTask(PipelineTask): 

362 """A `~lsst.pipe.base.PipelineTask` class used as the default target for 

363 ``MockPipelineTaskConfig.original``. 

364 

365 This is effectively a workaround for `lsst.pex.config.ConfigurableField` 

366 not supporting ``optional=True``, but that is generally a reasonable 

367 limitation for production code and it wouldn't make sense just to support 

368 test utilities. 

369 """ 

370 

371 ConfigClass = MockPipelineDefaultTargetConfig 

372 

373 

374class MockPipelineTaskConnections(BaseTestPipelineTaskConnections, dimensions=()): 

375 """A connections class that creates mock connections from the connections 

376 of a real PipelineTask. 

377 """ 

378 

379 def __init__(self, *, config: MockPipelineTaskConfig): 

380 self.original: PipelineTaskConnections = config.original.connections.ConnectionsClass( 

381 config=config.original.value 

382 ) 

383 self.dimensions.update(self.original.dimensions) 

384 self.unmocked_dataset_types = frozenset(config.unmocked_dataset_types) 

385 for name, connection in self.original.allConnections.items(): 

386 if connection.name not in self.unmocked_dataset_types: 

387 if connection.storageClass in ( 

388 acc.CONFIG_INIT_OUTPUT_STORAGE_CLASS, 

389 acc.METADATA_OUTPUT_STORAGE_CLASS, 

390 acc.LOG_OUTPUT_STORAGE_CLASS, 

391 ): 

392 # We don't mock the automatic output connections, so if 

393 # they're used as an input in any other connection, we 

394 # can't mock them there either. 

395 storage_class_name = connection.storageClass 

396 else: 

397 # We register the mock storage class with the global 

398 # singleton here, but can only put its name in the 

399 # connection. That means the same global singleton (or one 

400 # that also has these registrations) has to be available 

401 # whenever this dataset type is used. 

402 storage_class_name = MockStorageClass.get_or_register_mock(connection.storageClass).name 

403 kwargs: dict[str, Any] = {} 

404 if hasattr(connection, "dimensions"): 

405 connection_dimensions = set(connection.dimensions) 

406 # Replace the generic "skypix" placeholder with htm7, since 

407 # that requires the dataset type to have already been 

408 # registered. 

409 if "skypix" in connection_dimensions: 

410 connection_dimensions.remove("skypix") 

411 connection_dimensions.add("htm7") 

412 kwargs["dimensions"] = connection_dimensions 

413 connection = dataclasses.replace( 

414 connection, 

415 name=get_mock_name(connection.name), 

416 storageClass=storage_class_name, 

417 **kwargs, 

418 ) 

419 elif name in self.original.outputs: 

420 raise ValueError(f"Unmocked dataset type {connection.name!r} cannot be used as an output.") 

421 elif name in self.original.initInputs: 

422 raise ValueError( 

423 f"Unmocked dataset type {connection.name!r} cannot be used as an init-input." 

424 ) 

425 elif name in self.original.initOutputs: 

426 raise ValueError( 

427 f"Unmocked dataset type {connection.name!r} cannot be used as an init-output." 

428 ) 

429 setattr(self, name, connection) 

430 

431 def getSpatialBoundsConnections(self) -> Iterable[str]: 

432 return self.original.getSpatialBoundsConnections() 

433 

434 def getTemporalBoundsConnections(self) -> Iterable[str]: 

435 return self.original.getTemporalBoundsConnections() 

436 

437 def adjustQuantum( 

438 self, 

439 inputs: dict[str, tuple[BaseInput, Collection[DatasetRef]]], 

440 outputs: dict[str, tuple[Output, Collection[DatasetRef]]], 

441 label: str, 

442 data_id: DataCoordinate, 

443 ) -> tuple[ 

444 Mapping[str, tuple[BaseInput, Collection[DatasetRef]]], 

445 Mapping[str, tuple[Output, Collection[DatasetRef]]], 

446 ]: 

447 # Convert the given mappings from the mock dataset types to the 

448 # original dataset types they were produced from. 

449 original_inputs = {} 

450 for connection_name, (_, mock_refs) in inputs.items(): 

451 original_connection = getattr(self.original, connection_name) 

452 if original_connection.name in self.unmocked_dataset_types: 

453 refs = mock_refs 

454 else: 

455 refs = MockStorageClass.unmock_dataset_refs(mock_refs) 

456 original_inputs[connection_name] = (original_connection, refs) 

457 original_outputs = {} 

458 for connection_name, (_, mock_refs) in outputs.items(): 

459 original_connection = getattr(self.original, connection_name) 

460 if original_connection.name in self.unmocked_dataset_types: 

461 refs = mock_refs 

462 else: 

463 refs = MockStorageClass.unmock_dataset_refs(mock_refs) 

464 original_outputs[connection_name] = (original_connection, refs) 

465 # Call adjustQuantum on the original connections class. 

466 adjusted_original_inputs, adjusted_original_outputs = self.original.adjustQuantum( 

467 original_inputs, original_outputs, label, data_id 

468 ) 

469 # Convert the results back to the mock dataset type.s 

470 adjusted_inputs = {} 

471 for connection_name, (original_connection, original_refs) in adjusted_original_inputs.items(): 

472 if original_connection.name in self.unmocked_dataset_types: 

473 refs = original_refs 

474 else: 

475 refs = MockStorageClass.mock_dataset_refs(original_refs) 

476 adjusted_inputs[connection_name] = (getattr(self, connection_name), refs) 

477 adjusted_outputs = {} 

478 for connection_name, (original_connection, original_refs) in adjusted_original_outputs.items(): 

479 if original_connection.name in self.unmocked_dataset_types: 

480 refs = original_refs 

481 else: 

482 refs = MockStorageClass.mock_dataset_refs(original_refs) 

483 adjusted_outputs[connection_name] = (getattr(self, connection_name), refs) 

484 return adjusted_inputs, adjusted_outputs 

485 

486 

487class MockPipelineTaskConfig(BaseTestPipelineTaskConfig, pipelineConnections=MockPipelineTaskConnections): 

488 """Configuration class for `MockPipelineTask`.""" 

489 

490 original: ConfigurableField = ConfigurableField( 

491 doc="The original task being mocked by this one.", target=MockPipelineDefaultTargetTask 

492 ) 

493 

494 unmocked_dataset_types = ListField[str]( 

495 doc=( 

496 "Names of input dataset types that should be used as-is instead " 

497 "of being mocked. May include dataset types not relevant for " 

498 "this task, which will be ignored." 

499 ), 

500 default=(), 

501 optional=False, 

502 ) 

503 

504 

505class MockPipelineTask(BaseTestPipelineTask): 

506 """A test-utility implementation of `PipelineTask` with connections 

507 generated by mocking those of a real task. 

508 

509 Notes 

510 ----- 

511 At present `MockPipelineTask` simply drops any ``initInput`` and 

512 ``initOutput`` connections present on the original, since `MockDataset` 

513 creation for those would have to happen in the code that executes the task, 

514 not in the task itself. Because `MockPipelineTask` never instantiates the 

515 mock task (just its connections class), this is a limitation on what the 

516 mocks can be used to test, not anything deeper. 

517 """ 

518 

519 ConfigClass: ClassVar[type[PipelineTaskConfig]] = MockPipelineTaskConfig 

520 

521 

522class DynamicConnectionConfig(Config): 

523 """A config class that defines a completely dynamic connection.""" 

524 

525 dataset_type_name = Field[str](doc="Name for the dataset type as seen by the butler.", dtype=str) 

526 dimensions = ListField[str](doc="Dimensions for the dataset type.", dtype=str, default=[]) 

527 storage_class = Field[str]( 

528 doc="Name of the butler storage class for the dataset type.", dtype=str, default="StructuredDataDict" 

529 ) 

530 is_calibration = Field[bool](doc="Whether this dataset type is a calibration.", dtype=bool, default=False) 

531 multiple = Field[bool]( 

532 doc="Whether this connection gets or puts multiple datasets for each quantum.", 

533 dtype=bool, 

534 default=False, 

535 ) 

536 mock_storage_class = Field[bool]( 

537 doc="Whether the storage class should actually be a mock of the storage class given.", 

538 dtype=bool, 

539 default=True, 

540 ) 

541 

542 def make_connection(self, cls: type[_T]) -> _T: 

543 storage_class = self.storage_class 

544 if self.mock_storage_class: 

545 storage_class = MockStorageClass.get_or_register_mock(storage_class).name 

546 if issubclass(cls, cT.DimensionedConnection): 

547 return cls( # type: ignore 

548 name=self.dataset_type_name, 

549 storageClass=storage_class, 

550 isCalibration=self.is_calibration, 

551 multiple=self.multiple, 

552 dimensions=frozenset(self.dimensions), 

553 ) 

554 else: 

555 return cls( 

556 name=self.dataset_type_name, 

557 storageClass=storage_class, 

558 multiple=self.multiple, 

559 ) 

560 

561 

562class DynamicTestPipelineTaskConnections(PipelineTaskConnections, dimensions=()): 

563 """A connections class whose dimensions and connections are wholly 

564 determined via configuration. 

565 """ 

566 

567 def __init__(self, *, config: DynamicTestPipelineTaskConfig): 

568 self.dimensions.update(config.dimensions) 

569 connection_config: DynamicConnectionConfig 

570 for connection_name, connection_config in config.init_inputs.items(): 

571 setattr(self, connection_name, connection_config.make_connection(cT.InitInput)) 

572 for connection_name, connection_config in config.init_outputs.items(): 

573 setattr(self, connection_name, connection_config.make_connection(cT.InitOutput)) 

574 for connection_name, connection_config in config.prerequisite_inputs.items(): 

575 setattr(self, connection_name, connection_config.make_connection(cT.PrerequisiteInput)) 

576 for connection_name, connection_config in config.inputs.items(): 

577 setattr(self, connection_name, connection_config.make_connection(cT.Input)) 

578 for connection_name, connection_config in config.outputs.items(): 

579 setattr(self, connection_name, connection_config.make_connection(cT.Output)) 

580 

581 

582class DynamicTestPipelineTaskConfig( 

583 PipelineTaskConfig, pipelineConnections=DynamicTestPipelineTaskConnections 

584): 

585 """Configuration for DynamicTestPipelineTask.""" 

586 

587 dimensions = ListField[str](doc="Dimensions for the task's quanta.", dtype=str, default=[]) 

588 init_inputs = ConfigDictField( 

589 doc=( 

590 "Init-input connections, keyed by the connection name as seen by the task. " 

591 "Must be empty if the task will be constructed." 

592 ), 

593 keytype=str, 

594 itemtype=DynamicConnectionConfig, 

595 default={}, 

596 ) 

597 init_outputs = ConfigDictField( 

598 doc=( 

599 "Init-output connections, keyed by the connection name as seen by the task. " 

600 "Must be empty if the task will be constructed." 

601 ), 

602 keytype=str, 

603 itemtype=DynamicConnectionConfig, 

604 default={}, 

605 ) 

606 prerequisite_inputs = ConfigDictField( 

607 doc="Prerequisite input connections, keyed by the connection name as seen by the task.", 

608 keytype=str, 

609 itemtype=DynamicConnectionConfig, 

610 default={}, 

611 ) 

612 inputs = ConfigDictField( 

613 doc="Regular input connections, keyed by the connection name as seen by the task.", 

614 keytype=str, 

615 itemtype=DynamicConnectionConfig, 

616 default={}, 

617 ) 

618 outputs = ConfigDictField( 

619 doc="Regular output connections, keyed by the connection name as seen by the task.", 

620 keytype=str, 

621 itemtype=DynamicConnectionConfig, 

622 default={}, 

623 ) 

624 

625 

626class DynamicTestPipelineTask(BaseTestPipelineTask): 

627 """A test-utility implementation of `PipelineTask` with dimensions and 

628 connections determined wholly from configuration. 

629 """ 

630 

631 ConfigClass: ClassVar[type[PipelineTaskConfig]] = DynamicTestPipelineTaskConfig