Coverage for tests/test_separablePipelineExecutor.py: 11%

357 statements  

« prev     ^ index     » next       coverage.py v7.2.5, created at 2023-05-02 10:22 +0000

1# This file is part of ctrl_mpexec. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22 

23import os 

24import tempfile 

25import unittest 

26 

27import lsst.daf.butler 

28import lsst.daf.butler.tests as butlerTests 

29import lsst.utils.tests 

30from lsst.ctrl.mpexec import SeparablePipelineExecutor 

31from lsst.pipe.base import Instrument, Pipeline, PipelineDatasetTypes, TaskMetadata 

32from lsst.resources import ResourcePath 

33 

34TESTDIR = os.path.abspath(os.path.dirname(__file__)) 

35 

36 

37class SeparablePipelineExecutorTests(lsst.utils.tests.TestCase): 

38 """Test the SeparablePipelineExecutor API with a trivial task.""" 

39 

40 pipeline_file = os.path.join(TESTDIR, "pipeline_separable.yaml") 

41 

42 def setUp(self): 

43 repodir = tempfile.TemporaryDirectory() 

44 # TemporaryDirectory warns on leaks; addCleanup also keeps it from 

45 # getting garbage-collected. 

46 self.addCleanup(tempfile.TemporaryDirectory.cleanup, repodir) 

47 

48 # standalone parameter forces the returned config to also include 

49 # the information from the search paths. 

50 config = lsst.daf.butler.Butler.makeRepo( 

51 repodir.name, standalone=True, searchPaths=[os.path.join(TESTDIR, "config")] 

52 ) 

53 butler = lsst.daf.butler.Butler(config, writeable=True) 

54 output = "fake" 

55 output_run = f"{output}/{Instrument.makeCollectionTimestamp()}" 

56 butler.registry.registerCollection(output_run, lsst.daf.butler.CollectionType.RUN) 

57 butler.registry.registerCollection(output, lsst.daf.butler.CollectionType.CHAINED) 

58 butler.registry.setCollectionChain(output, [output_run]) 

59 self.butler = lsst.daf.butler.Butler(butler=butler, collections=[output], run=output_run) 

60 

61 butlerTests.addDatasetType(self.butler, "input", set(), "StructuredDataDict") 

62 butlerTests.addDatasetType(self.butler, "intermediate", set(), "StructuredDataDict") 

63 butlerTests.addDatasetType(self.butler, "a_log", set(), "ButlerLogRecords") 

64 butlerTests.addDatasetType(self.butler, "a_metadata", set(), "TaskMetadata") 

65 

66 def test_pre_execute_qgraph(self): 

67 # Too hard to make a quantum graph from scratch. 

68 executor = SeparablePipelineExecutor(self.butler) 

69 pipeline = Pipeline.fromFile(self.pipeline_file) 

70 self.butler.put({"zero": 0}, "input") 

71 graph = executor.make_quantum_graph(pipeline) 

72 

73 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict") 

74 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config") 

75 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config") 

76 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords") 

77 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata") 

78 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages") 

79 

80 executor.pre_execute_qgraph( 

81 graph, 

82 register_dataset_types=False, 

83 save_init_outputs=False, 

84 save_versions=False, 

85 ) 

86 with self.assertRaises(LookupError): 

87 self.butler.datasetExists("a_config", {}, collections=[self.butler.run]) 

88 with self.assertRaises(LookupError): 

89 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {}) 

90 

91 def test_pre_execute_qgraph_unconnected(self): 

92 # Unconnected graph; see 

93 # test_make_quantum_graph_nowhere_skippartial_clobber. 

94 executor = SeparablePipelineExecutor( 

95 self.butler, 

96 skip_existing_in=[self.butler.run], 

97 clobber_output=True, 

98 ) 

99 pipeline = Pipeline.fromFile(self.pipeline_file) 

100 self.butler.put({"zero": 0}, "input") 

101 self.butler.put({"zero": 0}, "intermediate") 

102 graph = executor.make_quantum_graph(pipeline) 

103 

104 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict") 

105 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config") 

106 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config") 

107 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords") 

108 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata") 

109 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages") 

110 

111 executor.pre_execute_qgraph( 

112 graph, 

113 register_dataset_types=False, 

114 save_init_outputs=False, 

115 save_versions=False, 

116 ) 

117 with self.assertRaises(LookupError): 

118 self.butler.datasetExists("a_config", {}, collections=[self.butler.run]) 

119 with self.assertRaises(LookupError): 

120 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {}) 

121 

122 def test_pre_execute_qgraph_empty(self): 

123 executor = SeparablePipelineExecutor(self.butler) 

124 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions) 

125 

126 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict") 

127 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config") 

128 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config") 

129 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords") 

130 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata") 

131 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages") 

132 

133 executor.pre_execute_qgraph( 

134 graph, 

135 register_dataset_types=False, 

136 save_init_outputs=False, 

137 save_versions=False, 

138 ) 

139 with self.assertRaises(LookupError): 

140 self.butler.datasetExists("a_config", {}, collections=[self.butler.run]) 

141 with self.assertRaises(LookupError): 

142 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {}) 

143 

144 def test_pre_execute_qgraph_register(self): 

145 executor = SeparablePipelineExecutor(self.butler) 

146 pipeline = Pipeline.fromFile(self.pipeline_file) 

147 self.butler.put({"zero": 0}, "input") 

148 graph = executor.make_quantum_graph(pipeline) 

149 

150 executor.pre_execute_qgraph( 

151 graph, 

152 register_dataset_types=True, 

153 save_init_outputs=False, 

154 save_versions=False, 

155 ) 

156 self.assertEqual({d.name for d in self.butler.registry.queryDatasetTypes("output")}, {"output"}) 

157 self.assertEqual( 

158 {d.name for d in self.butler.registry.queryDatasetTypes("b_*")}, 

159 {"b_config", "b_log", "b_metadata"}, 

160 ) 

161 with self.assertRaises(LookupError): 

162 self.butler.datasetExists("a_config", {}, collections=[self.butler.run]) 

163 with self.assertRaises(LookupError): 

164 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {}) 

165 

166 def test_pre_execute_qgraph_init_outputs(self): 

167 # Too hard to make a quantum graph from scratch. 

168 executor = SeparablePipelineExecutor(self.butler) 

169 pipeline = Pipeline.fromFile(self.pipeline_file) 

170 self.butler.put({"zero": 0}, "input") 

171 graph = executor.make_quantum_graph(pipeline) 

172 

173 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict") 

174 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config") 

175 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config") 

176 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords") 

177 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata") 

178 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages") 

179 

180 executor.pre_execute_qgraph( 

181 graph, 

182 register_dataset_types=False, 

183 save_init_outputs=True, 

184 save_versions=False, 

185 ) 

186 self.assertTrue(self.butler.datasetExists("a_config", {}, collections=[self.butler.run])) 

187 with self.assertRaises(LookupError): 

188 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {}) 

189 

190 def test_pre_execute_qgraph_versions(self): 

191 executor = SeparablePipelineExecutor(self.butler) 

192 pipeline = Pipeline.fromFile(self.pipeline_file) 

193 self.butler.put({"zero": 0}, "input") 

194 graph = executor.make_quantum_graph(pipeline) 

195 

196 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict") 

197 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config") 

198 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config") 

199 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords") 

200 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata") 

201 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages") 

202 

203 executor.pre_execute_qgraph( 

204 graph, 

205 register_dataset_types=False, 

206 save_init_outputs=True, 

207 save_versions=True, 

208 ) 

209 self.assertTrue(self.butler.datasetExists("a_config", {}, collections=[self.butler.run])) 

210 self.assertTrue(self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {})) 

211 

212 def test_init_badinput(self): 

213 butler = lsst.daf.butler.Butler(butler=self.butler, collections=[], run="foo") 

214 

215 with self.assertRaises(ValueError): 

216 SeparablePipelineExecutor(butler) 

217 

218 def test_init_badoutput(self): 

219 butler = lsst.daf.butler.Butler(butler=self.butler, collections=["foo"]) 

220 

221 with self.assertRaises(ValueError): 

222 SeparablePipelineExecutor(butler) 

223 

224 def test_make_pipeline_full(self): 

225 executor = SeparablePipelineExecutor(self.butler) 

226 for uri in [ 

227 self.pipeline_file, 

228 ResourcePath(self.pipeline_file), 

229 ResourcePath(self.pipeline_file).geturl(), 

230 ]: 

231 pipeline = executor.make_pipeline(uri) 

232 self.assertEqual(len(pipeline), 2) 

233 self.assertEqual({t.label for t in pipeline}, {"a", "b"}) 

234 

235 def test_make_pipeline_subset(self): 

236 executor = SeparablePipelineExecutor(self.butler) 

237 path = self.pipeline_file + "#a" 

238 for uri in [ 

239 path, 

240 ResourcePath(path), 

241 ResourcePath(path).geturl(), 

242 ]: 

243 pipeline = executor.make_pipeline(uri) 

244 self.assertEqual(len(pipeline), 1) 

245 self.assertEqual({t.label for t in pipeline}, {"a"}) 

246 

247 def test_make_quantum_graph_nowhere_noskip_noclobber(self): 

248 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False) 

249 pipeline = Pipeline.fromFile(self.pipeline_file) 

250 

251 self.butler.put({"zero": 0}, "input") 

252 

253 graph = executor.make_quantum_graph(pipeline) 

254 self.assertTrue(graph.isConnected) 

255 self.assertEqual(len(graph), 2) 

256 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"}) 

257 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

258 

259 def test_make_quantum_graph_nowhere_noskip_noclobber_conflict(self): 

260 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False) 

261 pipeline = Pipeline.fromFile(self.pipeline_file) 

262 

263 self.butler.put({"zero": 0}, "input") 

264 self.butler.put({"zero": 0}, "intermediate") 

265 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

266 self.butler.put(TaskMetadata(), "a_metadata") 

267 

268 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError): 

269 executor.make_quantum_graph(pipeline) 

270 

271 # TODO: need more complex task and Butler to test 

272 # make_quantum_graph(where=...) 

273 

274 def test_make_quantum_graph_nowhere_skipnone_noclobber(self): 

275 executor = SeparablePipelineExecutor( 

276 self.butler, 

277 skip_existing_in=[self.butler.run], 

278 clobber_output=False, 

279 ) 

280 pipeline = Pipeline.fromFile(self.pipeline_file) 

281 

282 self.butler.put({"zero": 0}, "input") 

283 

284 graph = executor.make_quantum_graph(pipeline) 

285 self.assertTrue(graph.isConnected) 

286 self.assertEqual(len(graph), 2) 

287 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"}) 

288 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

289 

290 def test_make_quantum_graph_nowhere_skiptotal_noclobber(self): 

291 executor = SeparablePipelineExecutor( 

292 self.butler, 

293 skip_existing_in=[self.butler.run], 

294 clobber_output=False, 

295 ) 

296 pipeline = Pipeline.fromFile(self.pipeline_file) 

297 

298 self.butler.put({"zero": 0}, "input") 

299 self.butler.put({"zero": 0}, "intermediate") 

300 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

301 self.butler.put(TaskMetadata(), "a_metadata") 

302 

303 graph = executor.make_quantum_graph(pipeline) 

304 self.assertTrue(graph.isConnected) 

305 self.assertEqual(len(graph), 1) 

306 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"}) 

307 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

308 

309 def test_make_quantum_graph_nowhere_skippartial_noclobber(self): 

310 executor = SeparablePipelineExecutor( 

311 self.butler, 

312 skip_existing_in=[self.butler.run], 

313 clobber_output=False, 

314 ) 

315 pipeline = Pipeline.fromFile(self.pipeline_file) 

316 

317 self.butler.put({"zero": 0}, "input") 

318 self.butler.put({"zero": 0}, "intermediate") 

319 

320 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError): 

321 executor.make_quantum_graph(pipeline) 

322 

323 def test_make_quantum_graph_nowhere_noskip_clobber(self): 

324 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True) 

325 pipeline = Pipeline.fromFile(self.pipeline_file) 

326 

327 self.butler.put({"zero": 0}, "input") 

328 

329 graph = executor.make_quantum_graph(pipeline) 

330 self.assertTrue(graph.isConnected) 

331 self.assertEqual(len(graph), 2) 

332 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"}) 

333 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

334 

335 def test_make_quantum_graph_nowhere_noskip_clobber_conflict(self): 

336 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True) 

337 pipeline = Pipeline.fromFile(self.pipeline_file) 

338 

339 self.butler.put({"zero": 0}, "input") 

340 self.butler.put({"zero": 0}, "intermediate") 

341 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

342 self.butler.put(TaskMetadata(), "a_metadata") 

343 

344 graph = executor.make_quantum_graph(pipeline) 

345 self.assertTrue(graph.isConnected) 

346 self.assertEqual(len(graph), 2) 

347 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"}) 

348 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

349 

350 def test_make_quantum_graph_nowhere_skipnone_clobber(self): 

351 executor = SeparablePipelineExecutor( 

352 self.butler, 

353 skip_existing_in=[self.butler.run], 

354 clobber_output=True, 

355 ) 

356 pipeline = Pipeline.fromFile(self.pipeline_file) 

357 

358 self.butler.put({"zero": 0}, "input") 

359 

360 graph = executor.make_quantum_graph(pipeline) 

361 self.assertTrue(graph.isConnected) 

362 self.assertEqual(len(graph), 2) 

363 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"}) 

364 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

365 

366 def test_make_quantum_graph_nowhere_skiptotal_clobber(self): 

367 executor = SeparablePipelineExecutor( 

368 self.butler, 

369 skip_existing_in=[self.butler.run], 

370 clobber_output=True, 

371 ) 

372 pipeline = Pipeline.fromFile(self.pipeline_file) 

373 

374 self.butler.put({"zero": 0}, "input") 

375 self.butler.put({"zero": 0}, "intermediate") 

376 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

377 self.butler.put(TaskMetadata(), "a_metadata") 

378 

379 graph = executor.make_quantum_graph(pipeline) 

380 self.assertTrue(graph.isConnected) 

381 self.assertEqual(len(graph), 1) 

382 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"}) 

383 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

384 

385 def test_make_quantum_graph_nowhere_skippartial_clobber(self): 

386 executor = SeparablePipelineExecutor( 

387 self.butler, 

388 skip_existing_in=[self.butler.run], 

389 clobber_output=True, 

390 ) 

391 pipeline = Pipeline.fromFile(self.pipeline_file) 

392 

393 self.butler.put({"zero": 0}, "input") 

394 self.butler.put({"zero": 0}, "intermediate") 

395 

396 graph = executor.make_quantum_graph(pipeline) 

397 self.assertTrue(graph.isConnected) 

398 self.assertEqual(len(graph), 2) 

399 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"}) 

400 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"}) 

401 

402 def test_make_quantum_graph_noinput(self): 

403 executor = SeparablePipelineExecutor(self.butler) 

404 pipeline = Pipeline.fromFile(self.pipeline_file) 

405 

406 graph = executor.make_quantum_graph(pipeline) 

407 self.assertEqual(len(graph), 0) 

408 

409 def test_make_quantum_graph_alloutput_skip(self): 

410 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=[self.butler.run]) 

411 pipeline = Pipeline.fromFile(self.pipeline_file) 

412 

413 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict") 

414 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords") 

415 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata") 

416 

417 self.butler.put({"zero": 0}, "input") 

418 self.butler.put({"zero": 0}, "intermediate") 

419 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

420 self.butler.put(TaskMetadata(), "a_metadata") 

421 self.butler.put({"zero": 0}, "output") 

422 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "b_log") 

423 self.butler.put(TaskMetadata(), "b_metadata") 

424 

425 graph = executor.make_quantum_graph(pipeline) 

426 self.assertEqual(len(graph), 0) 

427 

428 def test_run_pipeline_noskip_noclobber_fullgraph(self): 

429 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False) 

430 pipeline = Pipeline.fromFile(self.pipeline_file) 

431 self.butler.put({"zero": 0}, "input") 

432 graph = executor.make_quantum_graph(pipeline) 

433 executor.pre_execute_qgraph( 

434 graph, 

435 register_dataset_types=True, 

436 save_init_outputs=True, 

437 save_versions=False, 

438 ) 

439 

440 executor.run_pipeline(graph) 

441 self.butler.registry.refresh() 

442 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1}) 

443 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2}) 

444 

445 def test_run_pipeline_noskip_noclobber_emptygraph(self): 

446 old_repo_size = self.butler.registry.queryDatasets(...).count() 

447 

448 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False) 

449 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions) 

450 executor.pre_execute_qgraph( 

451 graph, 

452 register_dataset_types=True, 

453 save_init_outputs=True, 

454 save_versions=False, 

455 ) 

456 

457 executor.run_pipeline(graph) 

458 self.butler.registry.refresh() 

459 # Empty graph execution should do nothing. 

460 self.assertEqual(self.butler.registry.queryDatasets(...).count(), old_repo_size) 

461 

462 def test_run_pipeline_skipnone_noclobber(self): 

463 executor = SeparablePipelineExecutor( 

464 self.butler, 

465 skip_existing_in=[self.butler.run], 

466 clobber_output=False, 

467 ) 

468 pipeline = Pipeline.fromFile(self.pipeline_file) 

469 self.butler.put({"zero": 0}, "input") 

470 graph = executor.make_quantum_graph(pipeline) 

471 executor.pre_execute_qgraph( 

472 graph, 

473 register_dataset_types=True, 

474 save_init_outputs=True, 

475 save_versions=False, 

476 ) 

477 

478 executor.run_pipeline(graph) 

479 self.butler.registry.refresh() 

480 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1}) 

481 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2}) 

482 

483 def test_run_pipeline_skiptotal_noclobber(self): 

484 executor = SeparablePipelineExecutor( 

485 self.butler, 

486 skip_existing_in=[self.butler.run], 

487 clobber_output=False, 

488 ) 

489 pipeline = Pipeline.fromFile(self.pipeline_file) 

490 self.butler.put({"zero": 0}, "input") 

491 self.butler.put({"zero": 0}, "intermediate") 

492 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

493 self.butler.put(TaskMetadata(), "a_metadata") 

494 graph = executor.make_quantum_graph(pipeline) 

495 executor.pre_execute_qgraph( 

496 graph, 

497 register_dataset_types=True, 

498 save_init_outputs=True, 

499 save_versions=False, 

500 ) 

501 

502 executor.run_pipeline(graph) 

503 self.butler.registry.refresh() 

504 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2}) 

505 

506 def test_run_pipeline_noskip_clobber_connected(self): 

507 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True) 

508 pipeline = Pipeline.fromFile(self.pipeline_file) 

509 self.butler.put({"zero": 0}, "input") 

510 graph = executor.make_quantum_graph(pipeline) 

511 executor.pre_execute_qgraph( 

512 graph, 

513 register_dataset_types=True, 

514 save_init_outputs=True, 

515 save_versions=False, 

516 ) 

517 

518 executor.run_pipeline(graph) 

519 self.butler.registry.refresh() 

520 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1}) 

521 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2}) 

522 

523 def test_run_pipeline_noskip_clobber_unconnected(self): 

524 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True) 

525 pipeline = Pipeline.fromFile(self.pipeline_file) 

526 self.butler.put({"zero": 0}, "input") 

527 self.butler.put({"zero": 0}, "intermediate") 

528 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

529 self.butler.put(TaskMetadata(), "a_metadata") 

530 graph = executor.make_quantum_graph(pipeline) 

531 executor.pre_execute_qgraph( 

532 graph, 

533 register_dataset_types=True, 

534 save_init_outputs=True, 

535 save_versions=False, 

536 ) 

537 

538 executor.run_pipeline(graph) 

539 self.butler.registry.refresh() 

540 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1}) 

541 # The value of output is undefined; it depends on which task ran first. 

542 self.assertTrue(self.butler.datasetExists("output", {})) 

543 

544 def test_run_pipeline_skipnone_clobber(self): 

545 executor = SeparablePipelineExecutor( 

546 self.butler, 

547 skip_existing_in=[self.butler.run], 

548 clobber_output=True, 

549 ) 

550 pipeline = Pipeline.fromFile(self.pipeline_file) 

551 self.butler.put({"zero": 0}, "input") 

552 graph = executor.make_quantum_graph(pipeline) 

553 executor.pre_execute_qgraph( 

554 graph, 

555 register_dataset_types=True, 

556 save_init_outputs=True, 

557 save_versions=False, 

558 ) 

559 

560 executor.run_pipeline(graph) 

561 self.butler.registry.refresh() 

562 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1}) 

563 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2}) 

564 

565 def test_run_pipeline_skiptotal_clobber_connected(self): 

566 executor = SeparablePipelineExecutor( 

567 self.butler, 

568 skip_existing_in=[self.butler.run], 

569 clobber_output=True, 

570 ) 

571 pipeline = Pipeline.fromFile(self.pipeline_file) 

572 self.butler.put({"zero": 0}, "input") 

573 self.butler.put({"zero": 0}, "intermediate") 

574 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log") 

575 self.butler.put(TaskMetadata(), "a_metadata") 

576 graph = executor.make_quantum_graph(pipeline) 

577 executor.pre_execute_qgraph( 

578 graph, 

579 register_dataset_types=True, 

580 save_init_outputs=True, 

581 save_versions=False, 

582 ) 

583 

584 executor.run_pipeline(graph) 

585 self.butler.registry.refresh() 

586 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2}) 

587 

588 def test_run_pipeline_skippartial_clobber_unconnected(self): 

589 executor = SeparablePipelineExecutor( 

590 self.butler, 

591 skip_existing_in=[self.butler.run], 

592 clobber_output=True, 

593 ) 

594 pipeline = Pipeline.fromFile(self.pipeline_file) 

595 self.butler.put({"zero": 0}, "input") 

596 self.butler.put({"zero": 0}, "intermediate") 

597 graph = executor.make_quantum_graph(pipeline) 

598 executor.pre_execute_qgraph( 

599 graph, 

600 register_dataset_types=True, 

601 save_init_outputs=True, 

602 save_versions=False, 

603 ) 

604 

605 executor.run_pipeline(graph) 

606 self.butler.registry.refresh() 

607 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1}) 

608 # The value of output is undefined; it depends on which task ran first. 

609 self.assertTrue(self.butler.datasetExists("output", {})) 

610 

611 

612class MemoryTester(lsst.utils.tests.MemoryTestCase): 

613 pass 

614 

615 

616def setup_module(module): 

617 lsst.utils.tests.init() 

618 

619 

620if __name__ == "__main__": 620 ↛ 621line 620 didn't jump to line 621, because the condition on line 620 was never true

621 lsst.utils.tests.init() 

622 unittest.main()