Coverage for tests/test_separablePipelineExecutor.py: 12%
345 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-09 02:48 -0700
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-09 02:48 -0700
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
23import os
24import tempfile
25import unittest
27import lsst.daf.butler
28import lsst.daf.butler.tests as butlerTests
29import lsst.utils.tests
30from lsst.ctrl.mpexec import SeparablePipelineExecutor
31from lsst.pipe.base import Instrument, Pipeline, PipelineDatasetTypes, TaskMetadata
32from lsst.resources import ResourcePath
34TESTDIR = os.path.abspath(os.path.dirname(__file__))
37class SeparablePipelineExecutorTests(lsst.utils.tests.TestCase):
38 """Test the SeparablePipelineExecutor API with a trivial task."""
40 pipeline_file = os.path.join(TESTDIR, "pipeline_separable.yaml")
42 def setUp(self):
43 repodir = tempfile.TemporaryDirectory()
44 # TemporaryDirectory warns on leaks; addCleanup also keeps it from
45 # getting garbage-collected.
46 self.addCleanup(tempfile.TemporaryDirectory.cleanup, repodir)
48 # standalone parameter forces the returned config to also include
49 # the information from the search paths.
50 config = lsst.daf.butler.Butler.makeRepo(
51 repodir.name, standalone=True, searchPaths=[os.path.join(TESTDIR, "config")]
52 )
53 butler = lsst.daf.butler.Butler(config, writeable=True)
54 output = "fake"
55 output_run = f"{output}/{Instrument.makeCollectionTimestamp()}"
56 butler.registry.registerCollection(output_run, lsst.daf.butler.CollectionType.RUN)
57 butler.registry.registerCollection(output, lsst.daf.butler.CollectionType.CHAINED)
58 butler.registry.setCollectionChain(output, [output_run])
59 self.butler = lsst.daf.butler.Butler(butler=butler, collections=[output], run=output_run)
61 butlerTests.addDatasetType(self.butler, "input", set(), "StructuredDataDict")
62 butlerTests.addDatasetType(self.butler, "intermediate", set(), "StructuredDataDict")
63 butlerTests.addDatasetType(self.butler, "a_log", set(), "ButlerLogRecords")
64 butlerTests.addDatasetType(self.butler, "a_metadata", set(), "TaskMetadata")
66 def test_pre_execute_qgraph(self):
67 # Too hard to make a quantum graph from scratch.
68 executor = SeparablePipelineExecutor(self.butler)
69 pipeline = Pipeline.fromFile(self.pipeline_file)
70 self.butler.put({"zero": 0}, "input")
71 graph = executor.make_quantum_graph(pipeline)
73 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
74 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
75 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
76 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
77 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
78 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
80 executor.pre_execute_qgraph(
81 graph,
82 register_dataset_types=False,
83 save_init_outputs=False,
84 save_versions=False,
85 )
86 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
87 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
89 def test_pre_execute_qgraph_unconnected(self):
90 # Unconnected graph; see
91 # test_make_quantum_graph_nowhere_skippartial_clobber.
92 executor = SeparablePipelineExecutor(
93 self.butler,
94 skip_existing_in=[self.butler.run],
95 clobber_output=True,
96 )
97 pipeline = Pipeline.fromFile(self.pipeline_file)
98 self.butler.put({"zero": 0}, "input")
99 self.butler.put({"zero": 0}, "intermediate")
100 graph = executor.make_quantum_graph(pipeline)
102 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
103 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
104 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
105 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
106 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
107 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
109 executor.pre_execute_qgraph(
110 graph,
111 register_dataset_types=False,
112 save_init_outputs=False,
113 save_versions=False,
114 )
115 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
116 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
118 def test_pre_execute_qgraph_empty(self):
119 executor = SeparablePipelineExecutor(self.butler)
120 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
122 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
123 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
124 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
125 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
126 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
127 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
129 executor.pre_execute_qgraph(
130 graph,
131 register_dataset_types=False,
132 save_init_outputs=False,
133 save_versions=False,
134 )
135 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
136 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
138 def test_pre_execute_qgraph_register(self):
139 executor = SeparablePipelineExecutor(self.butler)
140 pipeline = Pipeline.fromFile(self.pipeline_file)
141 self.butler.put({"zero": 0}, "input")
142 graph = executor.make_quantum_graph(pipeline)
144 executor.pre_execute_qgraph(
145 graph,
146 register_dataset_types=True,
147 save_init_outputs=False,
148 save_versions=False,
149 )
150 self.assertEqual({d.name for d in self.butler.registry.queryDatasetTypes("output")}, {"output"})
151 self.assertEqual(
152 {d.name for d in self.butler.registry.queryDatasetTypes("b_*")},
153 {"b_config", "b_log", "b_metadata"},
154 )
155 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
156 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
158 def test_pre_execute_qgraph_init_outputs(self):
159 # Too hard to make a quantum graph from scratch.
160 executor = SeparablePipelineExecutor(self.butler)
161 pipeline = Pipeline.fromFile(self.pipeline_file)
162 self.butler.put({"zero": 0}, "input")
163 graph = executor.make_quantum_graph(pipeline)
165 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
166 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
167 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
168 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
169 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
170 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
172 executor.pre_execute_qgraph(
173 graph,
174 register_dataset_types=False,
175 save_init_outputs=True,
176 save_versions=False,
177 )
178 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
179 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
181 def test_pre_execute_qgraph_versions(self):
182 executor = SeparablePipelineExecutor(self.butler)
183 pipeline = Pipeline.fromFile(self.pipeline_file)
184 self.butler.put({"zero": 0}, "input")
185 graph = executor.make_quantum_graph(pipeline)
187 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
188 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
189 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
190 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
191 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
192 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
194 executor.pre_execute_qgraph(
195 graph,
196 register_dataset_types=False,
197 save_init_outputs=True,
198 save_versions=True,
199 )
200 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
201 self.assertTrue(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
203 def test_init_badinput(self):
204 butler = lsst.daf.butler.Butler(butler=self.butler, collections=[], run="foo")
206 with self.assertRaises(ValueError):
207 SeparablePipelineExecutor(butler)
209 def test_init_badoutput(self):
210 butler = lsst.daf.butler.Butler(butler=self.butler, collections=["foo"])
212 with self.assertRaises(ValueError):
213 SeparablePipelineExecutor(butler)
215 def test_make_pipeline_full(self):
216 executor = SeparablePipelineExecutor(self.butler)
217 for uri in [
218 self.pipeline_file,
219 ResourcePath(self.pipeline_file),
220 ResourcePath(self.pipeline_file).geturl(),
221 ]:
222 pipeline = executor.make_pipeline(uri)
223 self.assertEqual(len(pipeline), 2)
224 self.assertEqual({t.label for t in pipeline}, {"a", "b"})
226 def test_make_pipeline_subset(self):
227 executor = SeparablePipelineExecutor(self.butler)
228 path = self.pipeline_file + "#a"
229 for uri in [
230 path,
231 ResourcePath(path),
232 ResourcePath(path).geturl(),
233 ]:
234 pipeline = executor.make_pipeline(uri)
235 self.assertEqual(len(pipeline), 1)
236 self.assertEqual({t.label for t in pipeline}, {"a"})
238 def test_make_quantum_graph_nowhere_noskip_noclobber(self):
239 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
240 pipeline = Pipeline.fromFile(self.pipeline_file)
242 self.butler.put({"zero": 0}, "input")
244 graph = executor.make_quantum_graph(pipeline)
245 self.assertTrue(graph.isConnected)
246 self.assertEqual(len(graph), 2)
247 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
248 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
250 def test_make_quantum_graph_nowhere_noskip_noclobber_conflict(self):
251 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
252 pipeline = Pipeline.fromFile(self.pipeline_file)
254 self.butler.put({"zero": 0}, "input")
255 self.butler.put({"zero": 0}, "intermediate")
256 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
257 self.butler.put(TaskMetadata(), "a_metadata")
259 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
260 executor.make_quantum_graph(pipeline)
262 # TODO: need more complex task and Butler to test
263 # make_quantum_graph(where=...)
265 def test_make_quantum_graph_nowhere_skipnone_noclobber(self):
266 executor = SeparablePipelineExecutor(
267 self.butler,
268 skip_existing_in=[self.butler.run],
269 clobber_output=False,
270 )
271 pipeline = Pipeline.fromFile(self.pipeline_file)
273 self.butler.put({"zero": 0}, "input")
275 graph = executor.make_quantum_graph(pipeline)
276 self.assertTrue(graph.isConnected)
277 self.assertEqual(len(graph), 2)
278 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
279 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
281 def test_make_quantum_graph_nowhere_skiptotal_noclobber(self):
282 executor = SeparablePipelineExecutor(
283 self.butler,
284 skip_existing_in=[self.butler.run],
285 clobber_output=False,
286 )
287 pipeline = Pipeline.fromFile(self.pipeline_file)
289 self.butler.put({"zero": 0}, "input")
290 self.butler.put({"zero": 0}, "intermediate")
291 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
292 self.butler.put(TaskMetadata(), "a_metadata")
294 graph = executor.make_quantum_graph(pipeline)
295 self.assertTrue(graph.isConnected)
296 self.assertEqual(len(graph), 1)
297 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
298 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
300 def test_make_quantum_graph_nowhere_skippartial_noclobber(self):
301 executor = SeparablePipelineExecutor(
302 self.butler,
303 skip_existing_in=[self.butler.run],
304 clobber_output=False,
305 )
306 pipeline = Pipeline.fromFile(self.pipeline_file)
308 self.butler.put({"zero": 0}, "input")
309 self.butler.put({"zero": 0}, "intermediate")
311 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
312 executor.make_quantum_graph(pipeline)
314 def test_make_quantum_graph_nowhere_noskip_clobber(self):
315 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
316 pipeline = Pipeline.fromFile(self.pipeline_file)
318 self.butler.put({"zero": 0}, "input")
320 graph = executor.make_quantum_graph(pipeline)
321 self.assertTrue(graph.isConnected)
322 self.assertEqual(len(graph), 2)
323 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
324 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
326 def test_make_quantum_graph_nowhere_noskip_clobber_conflict(self):
327 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
328 pipeline = Pipeline.fromFile(self.pipeline_file)
330 self.butler.put({"zero": 0}, "input")
331 self.butler.put({"zero": 0}, "intermediate")
332 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
333 self.butler.put(TaskMetadata(), "a_metadata")
335 graph = executor.make_quantum_graph(pipeline)
336 self.assertTrue(graph.isConnected)
337 self.assertEqual(len(graph), 2)
338 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
339 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
341 def test_make_quantum_graph_nowhere_skipnone_clobber(self):
342 executor = SeparablePipelineExecutor(
343 self.butler,
344 skip_existing_in=[self.butler.run],
345 clobber_output=True,
346 )
347 pipeline = Pipeline.fromFile(self.pipeline_file)
349 self.butler.put({"zero": 0}, "input")
351 graph = executor.make_quantum_graph(pipeline)
352 self.assertTrue(graph.isConnected)
353 self.assertEqual(len(graph), 2)
354 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
355 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
357 def test_make_quantum_graph_nowhere_skiptotal_clobber(self):
358 executor = SeparablePipelineExecutor(
359 self.butler,
360 skip_existing_in=[self.butler.run],
361 clobber_output=True,
362 )
363 pipeline = Pipeline.fromFile(self.pipeline_file)
365 self.butler.put({"zero": 0}, "input")
366 self.butler.put({"zero": 0}, "intermediate")
367 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
368 self.butler.put(TaskMetadata(), "a_metadata")
370 graph = executor.make_quantum_graph(pipeline)
371 self.assertTrue(graph.isConnected)
372 self.assertEqual(len(graph), 1)
373 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
374 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
376 def test_make_quantum_graph_nowhere_skippartial_clobber(self):
377 executor = SeparablePipelineExecutor(
378 self.butler,
379 skip_existing_in=[self.butler.run],
380 clobber_output=True,
381 )
382 pipeline = Pipeline.fromFile(self.pipeline_file)
384 self.butler.put({"zero": 0}, "input")
385 self.butler.put({"zero": 0}, "intermediate")
387 graph = executor.make_quantum_graph(pipeline)
388 self.assertTrue(graph.isConnected)
389 self.assertEqual(len(graph), 2)
390 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
391 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
393 def test_make_quantum_graph_noinput(self):
394 executor = SeparablePipelineExecutor(self.butler)
395 pipeline = Pipeline.fromFile(self.pipeline_file)
397 graph = executor.make_quantum_graph(pipeline)
398 self.assertEqual(len(graph), 0)
400 def test_make_quantum_graph_alloutput_skip(self):
401 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=[self.butler.run])
402 pipeline = Pipeline.fromFile(self.pipeline_file)
404 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
405 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
406 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
408 self.butler.put({"zero": 0}, "input")
409 self.butler.put({"zero": 0}, "intermediate")
410 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
411 self.butler.put(TaskMetadata(), "a_metadata")
412 self.butler.put({"zero": 0}, "output")
413 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "b_log")
414 self.butler.put(TaskMetadata(), "b_metadata")
416 graph = executor.make_quantum_graph(pipeline)
417 self.assertEqual(len(graph), 0)
419 def test_run_pipeline_noskip_noclobber_fullgraph(self):
420 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
421 pipeline = Pipeline.fromFile(self.pipeline_file)
422 self.butler.put({"zero": 0}, "input")
423 graph = executor.make_quantum_graph(pipeline)
424 executor.pre_execute_qgraph(
425 graph,
426 register_dataset_types=True,
427 save_init_outputs=True,
428 save_versions=False,
429 )
431 executor.run_pipeline(graph)
432 self.butler.registry.refresh()
433 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
434 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
436 def test_run_pipeline_noskip_noclobber_emptygraph(self):
437 old_repo_size = self.butler.registry.queryDatasets(...).count()
439 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
440 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
441 executor.pre_execute_qgraph(
442 graph,
443 register_dataset_types=True,
444 save_init_outputs=True,
445 save_versions=False,
446 )
448 executor.run_pipeline(graph)
449 self.butler.registry.refresh()
450 # Empty graph execution should do nothing.
451 self.assertEqual(self.butler.registry.queryDatasets(...).count(), old_repo_size)
453 def test_run_pipeline_skipnone_noclobber(self):
454 executor = SeparablePipelineExecutor(
455 self.butler,
456 skip_existing_in=[self.butler.run],
457 clobber_output=False,
458 )
459 pipeline = Pipeline.fromFile(self.pipeline_file)
460 self.butler.put({"zero": 0}, "input")
461 graph = executor.make_quantum_graph(pipeline)
462 executor.pre_execute_qgraph(
463 graph,
464 register_dataset_types=True,
465 save_init_outputs=True,
466 save_versions=False,
467 )
469 executor.run_pipeline(graph)
470 self.butler.registry.refresh()
471 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
472 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
474 def test_run_pipeline_skiptotal_noclobber(self):
475 executor = SeparablePipelineExecutor(
476 self.butler,
477 skip_existing_in=[self.butler.run],
478 clobber_output=False,
479 )
480 pipeline = Pipeline.fromFile(self.pipeline_file)
481 self.butler.put({"zero": 0}, "input")
482 self.butler.put({"zero": 0}, "intermediate")
483 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
484 self.butler.put(TaskMetadata(), "a_metadata")
485 graph = executor.make_quantum_graph(pipeline)
486 executor.pre_execute_qgraph(
487 graph,
488 register_dataset_types=True,
489 save_init_outputs=True,
490 save_versions=False,
491 )
493 executor.run_pipeline(graph)
494 self.butler.registry.refresh()
495 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
497 def test_run_pipeline_noskip_clobber_connected(self):
498 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
499 pipeline = Pipeline.fromFile(self.pipeline_file)
500 self.butler.put({"zero": 0}, "input")
501 graph = executor.make_quantum_graph(pipeline)
502 executor.pre_execute_qgraph(
503 graph,
504 register_dataset_types=True,
505 save_init_outputs=True,
506 save_versions=False,
507 )
509 executor.run_pipeline(graph)
510 self.butler.registry.refresh()
511 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
512 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
514 def test_run_pipeline_noskip_clobber_unconnected(self):
515 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
516 pipeline = Pipeline.fromFile(self.pipeline_file)
517 self.butler.put({"zero": 0}, "input")
518 self.butler.put({"zero": 0}, "intermediate")
519 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
520 self.butler.put(TaskMetadata(), "a_metadata")
521 graph = executor.make_quantum_graph(pipeline)
522 executor.pre_execute_qgraph(
523 graph,
524 register_dataset_types=True,
525 save_init_outputs=True,
526 save_versions=False,
527 )
529 executor.run_pipeline(graph)
530 self.butler.registry.refresh()
531 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
532 # The value of output is undefined; it depends on which task ran first.
533 self.assertTrue(self.butler.exists("output", {}))
535 def test_run_pipeline_skipnone_clobber(self):
536 executor = SeparablePipelineExecutor(
537 self.butler,
538 skip_existing_in=[self.butler.run],
539 clobber_output=True,
540 )
541 pipeline = Pipeline.fromFile(self.pipeline_file)
542 self.butler.put({"zero": 0}, "input")
543 graph = executor.make_quantum_graph(pipeline)
544 executor.pre_execute_qgraph(
545 graph,
546 register_dataset_types=True,
547 save_init_outputs=True,
548 save_versions=False,
549 )
551 executor.run_pipeline(graph)
552 self.butler.registry.refresh()
553 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
554 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
556 def test_run_pipeline_skiptotal_clobber_connected(self):
557 executor = SeparablePipelineExecutor(
558 self.butler,
559 skip_existing_in=[self.butler.run],
560 clobber_output=True,
561 )
562 pipeline = Pipeline.fromFile(self.pipeline_file)
563 self.butler.put({"zero": 0}, "input")
564 self.butler.put({"zero": 0}, "intermediate")
565 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
566 self.butler.put(TaskMetadata(), "a_metadata")
567 graph = executor.make_quantum_graph(pipeline)
568 executor.pre_execute_qgraph(
569 graph,
570 register_dataset_types=True,
571 save_init_outputs=True,
572 save_versions=False,
573 )
575 executor.run_pipeline(graph)
576 self.butler.registry.refresh()
577 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
579 def test_run_pipeline_skippartial_clobber_unconnected(self):
580 executor = SeparablePipelineExecutor(
581 self.butler,
582 skip_existing_in=[self.butler.run],
583 clobber_output=True,
584 )
585 pipeline = Pipeline.fromFile(self.pipeline_file)
586 self.butler.put({"zero": 0}, "input")
587 self.butler.put({"zero": 0}, "intermediate")
588 graph = executor.make_quantum_graph(pipeline)
589 executor.pre_execute_qgraph(
590 graph,
591 register_dataset_types=True,
592 save_init_outputs=True,
593 save_versions=False,
594 )
596 executor.run_pipeline(graph)
597 self.butler.registry.refresh()
598 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
599 # The value of output is undefined; it depends on which task ran first.
600 self.assertTrue(self.butler.exists("output", {}))
603class MemoryTester(lsst.utils.tests.MemoryTestCase):
604 pass
607def setup_module(module):
608 lsst.utils.tests.init()
611if __name__ == "__main__":
612 lsst.utils.tests.init()
613 unittest.main()