Coverage for tests/test_separablePipelineExecutor.py: 12%
348 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-11-18 10:51 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-11-18 10:51 +0000
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This software is dual licensed under the GNU General Public License and also
10# under a 3-clause BSD license. Recipients may choose which of these licenses
11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
12# respectively. If you choose the GPL option then the following text applies
13# (but note that there is still no warranty even if you opt for BSD instead):
14#
15# This program is free software: you can redistribute it and/or modify
16# it under the terms of the GNU General Public License as published by
17# the Free Software Foundation, either version 3 of the License, or
18# (at your option) any later version.
19#
20# This program is distributed in the hope that it will be useful,
21# but WITHOUT ANY WARRANTY; without even the implied warranty of
22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23# GNU General Public License for more details.
24#
25# You should have received a copy of the GNU General Public License
26# along with this program. If not, see <https://www.gnu.org/licenses/>.
29import os
30import tempfile
31import unittest
33import lsst.daf.butler
34import lsst.daf.butler.tests as butlerTests
35import lsst.pex.config
36import lsst.utils.tests
37from lsst.ctrl.mpexec import SeparablePipelineExecutor
38from lsst.pipe.base import Instrument, Pipeline, PipelineDatasetTypes, TaskMetadata
39from lsst.resources import ResourcePath
41TESTDIR = os.path.abspath(os.path.dirname(__file__))
44class SeparablePipelineExecutorTests(lsst.utils.tests.TestCase):
45 """Test the SeparablePipelineExecutor API with a trivial task."""
47 pipeline_file = os.path.join(TESTDIR, "pipeline_separable.yaml")
49 def setUp(self):
50 repodir = tempfile.TemporaryDirectory()
51 # TemporaryDirectory warns on leaks; addCleanup also keeps it from
52 # getting garbage-collected.
53 self.addCleanup(tempfile.TemporaryDirectory.cleanup, repodir)
55 # standalone parameter forces the returned config to also include
56 # the information from the search paths.
57 config = lsst.daf.butler.Butler.makeRepo(
58 repodir.name, standalone=True, searchPaths=[os.path.join(TESTDIR, "config")]
59 )
60 butler = lsst.daf.butler.Butler.from_config(config, writeable=True)
61 output = "fake"
62 output_run = f"{output}/{Instrument.makeCollectionTimestamp()}"
63 butler.registry.registerCollection(output_run, lsst.daf.butler.CollectionType.RUN)
64 butler.registry.registerCollection(output, lsst.daf.butler.CollectionType.CHAINED)
65 butler.registry.setCollectionChain(output, [output_run])
66 self.butler = lsst.daf.butler.Butler.from_config(butler=butler, collections=[output], run=output_run)
68 butlerTests.addDatasetType(self.butler, "input", set(), "StructuredDataDict")
69 butlerTests.addDatasetType(self.butler, "intermediate", set(), "StructuredDataDict")
70 butlerTests.addDatasetType(self.butler, "a_log", set(), "ButlerLogRecords")
71 butlerTests.addDatasetType(self.butler, "a_metadata", set(), "TaskMetadata")
72 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
74 def test_pre_execute_qgraph(self):
75 # Too hard to make a quantum graph from scratch.
76 executor = SeparablePipelineExecutor(self.butler)
77 pipeline = Pipeline.fromFile(self.pipeline_file)
78 self.butler.put({"zero": 0}, "input")
79 graph = executor.make_quantum_graph(pipeline)
81 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
82 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
83 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
84 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
85 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
87 executor.pre_execute_qgraph(
88 graph,
89 register_dataset_types=False,
90 save_init_outputs=False,
91 save_versions=False,
92 )
93 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
94 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
96 def test_pre_execute_qgraph_unconnected(self):
97 # Unconnected graph; see
98 # test_make_quantum_graph_nowhere_skippartial_clobber.
99 executor = SeparablePipelineExecutor(
100 self.butler,
101 skip_existing_in=[self.butler.run],
102 clobber_output=True,
103 )
104 pipeline = Pipeline.fromFile(self.pipeline_file)
105 self.butler.put({"zero": 0}, "input")
106 self.butler.put({"zero": 0}, "intermediate")
107 graph = executor.make_quantum_graph(pipeline)
109 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
110 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
111 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
112 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
113 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
115 executor.pre_execute_qgraph(
116 graph,
117 register_dataset_types=False,
118 save_init_outputs=False,
119 save_versions=False,
120 )
121 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
122 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
124 def test_pre_execute_qgraph_empty(self):
125 executor = SeparablePipelineExecutor(self.butler)
126 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
128 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
129 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
130 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
131 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
132 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
134 executor.pre_execute_qgraph(
135 graph,
136 register_dataset_types=False,
137 save_init_outputs=False,
138 save_versions=False,
139 )
140 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
141 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
143 def test_pre_execute_qgraph_register(self):
144 executor = SeparablePipelineExecutor(self.butler)
145 pipeline = Pipeline.fromFile(self.pipeline_file)
146 self.butler.put({"zero": 0}, "input")
147 graph = executor.make_quantum_graph(pipeline)
149 executor.pre_execute_qgraph(
150 graph,
151 register_dataset_types=True,
152 save_init_outputs=False,
153 save_versions=False,
154 )
155 self.assertEqual({d.name for d in self.butler.registry.queryDatasetTypes("output")}, {"output"})
156 self.assertEqual(
157 {d.name for d in self.butler.registry.queryDatasetTypes("b_*")},
158 {"b_config", "b_log", "b_metadata"},
159 )
160 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
161 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
163 def test_pre_execute_qgraph_init_outputs(self):
164 # Too hard to make a quantum graph from scratch.
165 executor = SeparablePipelineExecutor(self.butler)
166 pipeline = Pipeline.fromFile(self.pipeline_file)
167 self.butler.put({"zero": 0}, "input")
168 graph = executor.make_quantum_graph(pipeline)
170 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
171 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
172 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
173 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
174 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
176 executor.pre_execute_qgraph(
177 graph,
178 register_dataset_types=False,
179 save_init_outputs=True,
180 save_versions=False,
181 )
182 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
183 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
185 def test_pre_execute_qgraph_versions(self):
186 executor = SeparablePipelineExecutor(self.butler)
187 pipeline = Pipeline.fromFile(self.pipeline_file)
188 self.butler.put({"zero": 0}, "input")
189 graph = executor.make_quantum_graph(pipeline)
191 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
192 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
193 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
194 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
195 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
197 executor.pre_execute_qgraph(
198 graph,
199 register_dataset_types=False,
200 save_init_outputs=True,
201 save_versions=True,
202 )
203 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
204 self.assertTrue(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
206 def test_init_badinput(self):
207 butler = lsst.daf.butler.Butler.from_config(butler=self.butler, collections=[], run="foo")
209 with self.assertRaises(ValueError):
210 SeparablePipelineExecutor(butler)
212 def test_init_badoutput(self):
213 butler = lsst.daf.butler.Butler.from_config(butler=self.butler, collections=["foo"])
215 with self.assertRaises(ValueError):
216 SeparablePipelineExecutor(butler)
218 def test_make_pipeline_full(self):
219 executor = SeparablePipelineExecutor(self.butler)
220 for uri in [
221 self.pipeline_file,
222 ResourcePath(self.pipeline_file),
223 ResourcePath(self.pipeline_file).geturl(),
224 ]:
225 pipeline = executor.make_pipeline(uri)
226 self.assertEqual(len(pipeline), 2)
227 self.assertEqual({t.label for t in pipeline}, {"a", "b"})
229 def test_make_pipeline_subset(self):
230 executor = SeparablePipelineExecutor(self.butler)
231 path = self.pipeline_file + "#a"
232 for uri in [
233 path,
234 ResourcePath(path),
235 ResourcePath(path).geturl(),
236 ]:
237 pipeline = executor.make_pipeline(uri)
238 self.assertEqual(len(pipeline), 1)
239 self.assertEqual({t.label for t in pipeline}, {"a"})
241 def test_make_quantum_graph_nowhere_noskip_noclobber(self):
242 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
243 pipeline = Pipeline.fromFile(self.pipeline_file)
245 self.butler.put({"zero": 0}, "input")
247 graph = executor.make_quantum_graph(pipeline)
248 self.assertTrue(graph.isConnected)
249 self.assertEqual(len(graph), 2)
250 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
251 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
253 def test_make_quantum_graph_nowhere_noskip_noclobber_conflict(self):
254 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
255 pipeline = Pipeline.fromFile(self.pipeline_file)
257 self.butler.put({"zero": 0}, "input")
258 self.butler.put({"zero": 0}, "intermediate")
259 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
260 self.butler.put(TaskMetadata(), "a_metadata")
262 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
263 executor.make_quantum_graph(pipeline)
265 # TODO: need more complex task and Butler to test
266 # make_quantum_graph(where=...)
268 def test_make_quantum_graph_nowhere_skipnone_noclobber(self):
269 executor = SeparablePipelineExecutor(
270 self.butler,
271 skip_existing_in=[self.butler.run],
272 clobber_output=False,
273 )
274 pipeline = Pipeline.fromFile(self.pipeline_file)
276 self.butler.put({"zero": 0}, "input")
278 graph = executor.make_quantum_graph(pipeline)
279 self.assertTrue(graph.isConnected)
280 self.assertEqual(len(graph), 2)
281 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
282 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
284 def test_make_quantum_graph_nowhere_skiptotal_noclobber(self):
285 executor = SeparablePipelineExecutor(
286 self.butler,
287 skip_existing_in=[self.butler.run],
288 clobber_output=False,
289 )
290 pipeline = Pipeline.fromFile(self.pipeline_file)
292 self.butler.put({"zero": 0}, "input")
293 self.butler.put({"zero": 0}, "intermediate")
294 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
295 self.butler.put(TaskMetadata(), "a_metadata")
296 self.butler.put(lsst.pex.config.Config(), "a_config")
298 graph = executor.make_quantum_graph(pipeline)
299 self.assertTrue(graph.isConnected)
300 self.assertEqual(len(graph), 1)
301 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
302 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
304 def test_make_quantum_graph_nowhere_skippartial_noclobber(self):
305 executor = SeparablePipelineExecutor(
306 self.butler,
307 skip_existing_in=[self.butler.run],
308 clobber_output=False,
309 )
310 pipeline = Pipeline.fromFile(self.pipeline_file)
312 self.butler.put({"zero": 0}, "input")
313 self.butler.put({"zero": 0}, "intermediate")
315 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
316 executor.make_quantum_graph(pipeline)
318 def test_make_quantum_graph_nowhere_noskip_clobber(self):
319 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
320 pipeline = Pipeline.fromFile(self.pipeline_file)
322 self.butler.put({"zero": 0}, "input")
324 graph = executor.make_quantum_graph(pipeline)
325 self.assertTrue(graph.isConnected)
326 self.assertEqual(len(graph), 2)
327 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
328 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
330 def test_make_quantum_graph_nowhere_noskip_clobber_conflict(self):
331 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
332 pipeline = Pipeline.fromFile(self.pipeline_file)
334 self.butler.put({"zero": 0}, "input")
335 self.butler.put({"zero": 0}, "intermediate")
336 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
337 self.butler.put(TaskMetadata(), "a_metadata")
339 graph = executor.make_quantum_graph(pipeline)
340 self.assertTrue(graph.isConnected)
341 self.assertEqual(len(graph), 2)
342 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
343 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
345 def test_make_quantum_graph_nowhere_skipnone_clobber(self):
346 executor = SeparablePipelineExecutor(
347 self.butler,
348 skip_existing_in=[self.butler.run],
349 clobber_output=True,
350 )
351 pipeline = Pipeline.fromFile(self.pipeline_file)
353 self.butler.put({"zero": 0}, "input")
355 graph = executor.make_quantum_graph(pipeline)
356 self.assertTrue(graph.isConnected)
357 self.assertEqual(len(graph), 2)
358 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
359 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
361 def test_make_quantum_graph_nowhere_skiptotal_clobber(self):
362 executor = SeparablePipelineExecutor(
363 self.butler,
364 skip_existing_in=[self.butler.run],
365 clobber_output=True,
366 )
367 pipeline = Pipeline.fromFile(self.pipeline_file)
369 self.butler.put({"zero": 0}, "input")
370 self.butler.put({"zero": 0}, "intermediate")
371 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
372 self.butler.put(TaskMetadata(), "a_metadata")
373 self.butler.put(lsst.pex.config.Config(), "a_config")
375 graph = executor.make_quantum_graph(pipeline)
376 self.assertTrue(graph.isConnected)
377 self.assertEqual(len(graph), 1)
378 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
379 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
381 def test_make_quantum_graph_nowhere_skippartial_clobber(self):
382 executor = SeparablePipelineExecutor(
383 self.butler,
384 skip_existing_in=[self.butler.run],
385 clobber_output=True,
386 )
387 pipeline = Pipeline.fromFile(self.pipeline_file)
389 self.butler.put({"zero": 0}, "input")
390 self.butler.put({"zero": 0}, "intermediate")
392 graph = executor.make_quantum_graph(pipeline)
393 self.assertTrue(graph.isConnected)
394 self.assertEqual(len(graph), 2)
395 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
396 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
398 def test_make_quantum_graph_noinput(self):
399 executor = SeparablePipelineExecutor(self.butler)
400 pipeline = Pipeline.fromFile(self.pipeline_file)
402 graph = executor.make_quantum_graph(pipeline)
403 self.assertEqual(len(graph), 0)
405 def test_make_quantum_graph_alloutput_skip(self):
406 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=[self.butler.run])
407 pipeline = Pipeline.fromFile(self.pipeline_file)
409 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
410 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
411 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
412 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
414 self.butler.put({"zero": 0}, "input")
415 self.butler.put({"zero": 0}, "intermediate")
416 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
417 self.butler.put(TaskMetadata(), "a_metadata")
418 self.butler.put({"zero": 0}, "output")
419 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "b_log")
420 self.butler.put(TaskMetadata(), "b_metadata")
421 self.butler.put(lsst.pex.config.Config(), "a_config")
422 self.butler.put(lsst.pex.config.Config(), "b_config")
424 graph = executor.make_quantum_graph(pipeline)
425 self.assertEqual(len(graph), 0)
427 def test_run_pipeline_noskip_noclobber_fullgraph(self):
428 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
429 pipeline = Pipeline.fromFile(self.pipeline_file)
430 self.butler.put({"zero": 0}, "input")
431 graph = executor.make_quantum_graph(pipeline)
432 executor.pre_execute_qgraph(
433 graph,
434 register_dataset_types=True,
435 save_init_outputs=True,
436 save_versions=False,
437 )
439 executor.run_pipeline(graph)
440 self.butler.registry.refresh()
441 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
442 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
444 def test_run_pipeline_noskip_noclobber_emptygraph(self):
445 old_repo_size = self.butler.registry.queryDatasets(...).count()
447 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
448 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
449 executor.pre_execute_qgraph(
450 graph,
451 register_dataset_types=True,
452 save_init_outputs=True,
453 save_versions=False,
454 )
456 executor.run_pipeline(graph)
457 self.butler.registry.refresh()
458 # Empty graph execution should do nothing.
459 self.assertEqual(self.butler.registry.queryDatasets(...).count(), old_repo_size)
461 def test_run_pipeline_skipnone_noclobber(self):
462 executor = SeparablePipelineExecutor(
463 self.butler,
464 skip_existing_in=[self.butler.run],
465 clobber_output=False,
466 )
467 pipeline = Pipeline.fromFile(self.pipeline_file)
468 self.butler.put({"zero": 0}, "input")
469 graph = executor.make_quantum_graph(pipeline)
470 executor.pre_execute_qgraph(
471 graph,
472 register_dataset_types=True,
473 save_init_outputs=True,
474 save_versions=False,
475 )
477 executor.run_pipeline(graph)
478 self.butler.registry.refresh()
479 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
480 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
482 def test_run_pipeline_skiptotal_noclobber(self):
483 executor = SeparablePipelineExecutor(
484 self.butler,
485 skip_existing_in=[self.butler.run],
486 clobber_output=False,
487 )
488 pipeline = Pipeline.fromFile(self.pipeline_file)
489 self.butler.put({"zero": 0}, "input")
490 self.butler.put({"zero": 0}, "intermediate")
491 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
492 self.butler.put(TaskMetadata(), "a_metadata")
493 self.butler.put(lsst.pex.config.Config(), "a_config")
494 graph = executor.make_quantum_graph(pipeline)
495 executor.pre_execute_qgraph(
496 graph,
497 register_dataset_types=True,
498 save_init_outputs=True,
499 save_versions=False,
500 )
502 executor.run_pipeline(graph)
503 self.butler.registry.refresh()
504 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
506 def test_run_pipeline_noskip_clobber_connected(self):
507 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
508 pipeline = Pipeline.fromFile(self.pipeline_file)
509 self.butler.put({"zero": 0}, "input")
510 graph = executor.make_quantum_graph(pipeline)
511 executor.pre_execute_qgraph(
512 graph,
513 register_dataset_types=True,
514 save_init_outputs=True,
515 save_versions=False,
516 )
518 executor.run_pipeline(graph)
519 self.butler.registry.refresh()
520 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
521 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
523 def test_run_pipeline_noskip_clobber_unconnected(self):
524 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
525 pipeline = Pipeline.fromFile(self.pipeline_file)
526 self.butler.put({"zero": 0}, "input")
527 self.butler.put({"zero": 0}, "intermediate")
528 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
529 self.butler.put(TaskMetadata(), "a_metadata")
530 graph = executor.make_quantum_graph(pipeline)
531 executor.pre_execute_qgraph(
532 graph,
533 register_dataset_types=True,
534 save_init_outputs=True,
535 save_versions=False,
536 )
538 executor.run_pipeline(graph)
539 self.butler.registry.refresh()
540 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
541 # The value of output is undefined; it depends on which task ran first.
542 self.assertTrue(self.butler.exists("output", {}))
544 def test_run_pipeline_skipnone_clobber(self):
545 executor = SeparablePipelineExecutor(
546 self.butler,
547 skip_existing_in=[self.butler.run],
548 clobber_output=True,
549 )
550 pipeline = Pipeline.fromFile(self.pipeline_file)
551 self.butler.put({"zero": 0}, "input")
552 graph = executor.make_quantum_graph(pipeline)
553 executor.pre_execute_qgraph(
554 graph,
555 register_dataset_types=True,
556 save_init_outputs=True,
557 save_versions=False,
558 )
560 executor.run_pipeline(graph)
561 self.butler.registry.refresh()
562 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
563 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
565 def test_run_pipeline_skiptotal_clobber_connected(self):
566 executor = SeparablePipelineExecutor(
567 self.butler,
568 skip_existing_in=[self.butler.run],
569 clobber_output=True,
570 )
571 pipeline = Pipeline.fromFile(self.pipeline_file)
572 self.butler.put({"zero": 0}, "input")
573 self.butler.put({"zero": 0}, "intermediate")
574 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
575 self.butler.put(TaskMetadata(), "a_metadata")
576 self.butler.put(lsst.pex.config.Config(), "a_config")
577 graph = executor.make_quantum_graph(pipeline)
578 executor.pre_execute_qgraph(
579 graph,
580 register_dataset_types=True,
581 save_init_outputs=True,
582 save_versions=False,
583 )
585 executor.run_pipeline(graph)
586 self.butler.registry.refresh()
587 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
589 def test_run_pipeline_skippartial_clobber_unconnected(self):
590 executor = SeparablePipelineExecutor(
591 self.butler,
592 skip_existing_in=[self.butler.run],
593 clobber_output=True,
594 )
595 pipeline = Pipeline.fromFile(self.pipeline_file)
596 self.butler.put({"zero": 0}, "input")
597 self.butler.put({"zero": 0}, "intermediate")
598 graph = executor.make_quantum_graph(pipeline)
599 executor.pre_execute_qgraph(
600 graph,
601 register_dataset_types=True,
602 save_init_outputs=True,
603 save_versions=False,
604 )
606 executor.run_pipeline(graph)
607 self.butler.registry.refresh()
608 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
609 # The value of output is undefined; it depends on which task ran first.
610 self.assertTrue(self.butler.exists("output", {}))
613class MemoryTester(lsst.utils.tests.MemoryTestCase):
614 """Generic test for file leaks."""
617def setup_module(module):
618 """Set up the module for pytest."""
619 lsst.utils.tests.init()
622if __name__ == "__main__":
623 lsst.utils.tests.init()
624 unittest.main()