Coverage for tests/test_separablePipelineExecutor.py: 12%
359 statements
« prev ^ index » next coverage.py v7.2.3, created at 2023-04-20 10:51 +0000
« prev ^ index » next coverage.py v7.2.3, created at 2023-04-20 10:51 +0000
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
23import os
24import tempfile
25import unittest
27import lsst.daf.butler
28import lsst.daf.butler.tests as butlerTests
29import lsst.utils.tests
30from lsst.ctrl.mpexec import SeparablePipelineExecutor
31from lsst.pipe.base import Instrument, Pipeline, PipelineDatasetTypes, TaskMetadata
32from lsst.resources import ResourcePath
34TESTDIR = os.path.abspath(os.path.dirname(__file__))
37class SeparablePipelineExecutorTests(lsst.utils.tests.TestCase):
38 """Test the SeparablePipelineExecutor API with a trivial task."""
40 pipeline_file = os.path.join(TESTDIR, "pipeline_separable.yaml")
42 def setUp(self):
43 repodir = tempfile.TemporaryDirectory()
44 # TemporaryDirectory warns on leaks; addCleanup also keeps it from
45 # getting garbage-collected.
46 self.addCleanup(tempfile.TemporaryDirectory.cleanup, repodir)
48 # standalone parameter forces the returned config to also include
49 # the information from the search paths.
50 config = lsst.daf.butler.Butler.makeRepo(
51 repodir.name, standalone=True, searchPaths=[os.path.join(TESTDIR, "config")]
52 )
53 butler = lsst.daf.butler.Butler(config, writeable=True)
54 output = "fake"
55 output_run = f"{output}/{Instrument.makeCollectionTimestamp()}"
56 butler.registry.registerCollection(output_run, lsst.daf.butler.CollectionType.RUN)
57 butler.registry.registerCollection(output, lsst.daf.butler.CollectionType.CHAINED)
58 butler.registry.setCollectionChain(output, [output_run])
59 self.butler = lsst.daf.butler.Butler(butler=butler, collections=[output], run=output_run)
61 butlerTests.addDatasetType(self.butler, "input", set(), "StructuredDataDict")
62 butlerTests.addDatasetType(self.butler, "intermediate", set(), "StructuredDataDict")
63 butlerTests.addDatasetType(self.butler, "a_log", set(), "ButlerLogRecords")
64 butlerTests.addDatasetType(self.butler, "a_metadata", set(), "TaskMetadata")
66 def test_pre_execute_qgraph(self):
67 # Too hard to make a quantum graph from scratch.
68 executor = SeparablePipelineExecutor(self.butler)
69 pipeline = Pipeline.fromFile(self.pipeline_file)
70 self.butler.put({"zero": 0}, "input")
71 graph = executor.make_quantum_graph(pipeline)
73 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
74 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
75 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
76 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
77 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
78 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
80 executor.pre_execute_qgraph(
81 graph,
82 register_dataset_types=False,
83 save_init_outputs=False,
84 save_versions=False,
85 )
86 with self.assertRaises(LookupError):
87 self.butler.datasetExists("a_config", {}, collections=[self.butler.run])
88 with self.assertRaises(LookupError):
89 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {})
91 def test_pre_execute_qgraph_unconnected(self):
92 # Unconnected graph; see
93 # test_make_quantum_graph_nowhere_skippartial_clobber.
94 executor = SeparablePipelineExecutor(
95 self.butler,
96 skip_existing_in=[self.butler.run],
97 clobber_output=True,
98 )
99 pipeline = Pipeline.fromFile(self.pipeline_file)
100 self.butler.put({"zero": 0}, "input")
101 self.butler.put({"zero": 0}, "intermediate")
102 graph = executor.make_quantum_graph(pipeline)
104 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
105 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
106 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
107 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
108 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
109 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
111 executor.pre_execute_qgraph(
112 graph,
113 register_dataset_types=False,
114 save_init_outputs=False,
115 save_versions=False,
116 )
117 with self.assertRaises(LookupError):
118 self.butler.datasetExists("a_config", {}, collections=[self.butler.run])
119 with self.assertRaises(LookupError):
120 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {})
122 def test_pre_execute_qgraph_empty(self):
123 executor = SeparablePipelineExecutor(self.butler)
124 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
126 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
127 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
128 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
129 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
130 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
131 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
133 executor.pre_execute_qgraph(
134 graph,
135 register_dataset_types=False,
136 save_init_outputs=False,
137 save_versions=False,
138 )
139 with self.assertRaises(LookupError):
140 self.butler.datasetExists("a_config", {}, collections=[self.butler.run])
141 with self.assertRaises(LookupError):
142 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {})
144 def test_pre_execute_qgraph_register(self):
145 executor = SeparablePipelineExecutor(self.butler)
146 pipeline = Pipeline.fromFile(self.pipeline_file)
147 self.butler.put({"zero": 0}, "input")
148 graph = executor.make_quantum_graph(pipeline)
150 executor.pre_execute_qgraph(
151 graph,
152 register_dataset_types=True,
153 save_init_outputs=False,
154 save_versions=False,
155 )
156 self.assertEqual({d.name for d in self.butler.registry.queryDatasetTypes("output")}, {"output"})
157 self.assertEqual(
158 {d.name for d in self.butler.registry.queryDatasetTypes("b_*")},
159 {"b_config", "b_log", "b_metadata"},
160 )
161 with self.assertRaises(LookupError):
162 self.butler.datasetExists("a_config", {}, collections=[self.butler.run])
163 with self.assertRaises(LookupError):
164 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {})
166 def test_pre_execute_qgraph_init_outputs(self):
167 # Too hard to make a quantum graph from scratch.
168 executor = SeparablePipelineExecutor(self.butler)
169 pipeline = Pipeline.fromFile(self.pipeline_file)
170 self.butler.put({"zero": 0}, "input")
171 graph = executor.make_quantum_graph(pipeline)
173 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
174 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
175 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
176 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
177 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
178 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
180 executor.pre_execute_qgraph(
181 graph,
182 register_dataset_types=False,
183 save_init_outputs=True,
184 save_versions=False,
185 )
186 self.assertTrue(self.butler.datasetExists("a_config", {}, collections=[self.butler.run]))
187 with self.assertRaises(LookupError):
188 self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {})
190 def test_pre_execute_qgraph_versions(self):
191 executor = SeparablePipelineExecutor(self.butler)
192 pipeline = Pipeline.fromFile(self.pipeline_file)
193 self.butler.put({"zero": 0}, "input")
194 graph = executor.make_quantum_graph(pipeline)
196 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
197 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
198 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
199 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
200 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
201 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
203 executor.pre_execute_qgraph(
204 graph,
205 register_dataset_types=False,
206 save_init_outputs=True,
207 save_versions=True,
208 )
209 self.assertTrue(self.butler.datasetExists("a_config", {}, collections=[self.butler.run]))
210 self.assertTrue(self.butler.datasetExists(PipelineDatasetTypes.packagesDatasetName, {}))
212 def test_init_badinput(self):
213 butler = lsst.daf.butler.Butler(butler=self.butler, collections=[], run="foo")
215 with self.assertRaises(ValueError):
216 SeparablePipelineExecutor(butler)
218 def test_init_badoutput(self):
219 butler = lsst.daf.butler.Butler(butler=self.butler, collections=["foo"])
221 with self.assertRaises(ValueError):
222 SeparablePipelineExecutor(butler)
224 def test_make_pipeline_full(self):
225 executor = SeparablePipelineExecutor(self.butler)
226 for uri in [
227 self.pipeline_file,
228 ResourcePath(self.pipeline_file),
229 ResourcePath(self.pipeline_file).geturl(),
230 ]:
231 pipeline = executor.make_pipeline(uri)
232 self.assertEqual(len(pipeline), 2)
233 self.assertEqual({t.label for t in pipeline}, {"a", "b"})
235 def test_make_pipeline_subset(self):
236 executor = SeparablePipelineExecutor(self.butler)
237 path = self.pipeline_file + "#a"
238 for uri in [
239 path,
240 ResourcePath(path),
241 ResourcePath(path).geturl(),
242 ]:
243 pipeline = executor.make_pipeline(uri)
244 self.assertEqual(len(pipeline), 1)
245 self.assertEqual({t.label for t in pipeline}, {"a"})
247 def test_make_quantum_graph_nowhere_noskip_noclobber(self):
248 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
249 pipeline = Pipeline.fromFile(self.pipeline_file)
251 self.butler.put({"zero": 0}, "input")
253 graph = executor.make_quantum_graph(pipeline)
254 self.assertTrue(graph.isConnected)
255 self.assertEqual(len(graph), 2)
256 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
257 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
259 def test_make_quantum_graph_nowhere_noskip_noclobber_conflict(self):
260 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
261 pipeline = Pipeline.fromFile(self.pipeline_file)
263 self.butler.put({"zero": 0}, "input")
264 self.butler.put({"zero": 0}, "intermediate")
265 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
266 self.butler.put(TaskMetadata(), "a_metadata")
268 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
269 executor.make_quantum_graph(pipeline)
271 # TODO: need more complex task and Butler to test
272 # make_quantum_graph(where=...)
274 def test_make_quantum_graph_nowhere_skipnone_noclobber(self):
275 executor = SeparablePipelineExecutor(
276 self.butler,
277 skip_existing_in=[self.butler.run],
278 clobber_output=False,
279 )
280 pipeline = Pipeline.fromFile(self.pipeline_file)
282 self.butler.put({"zero": 0}, "input")
284 graph = executor.make_quantum_graph(pipeline)
285 self.assertTrue(graph.isConnected)
286 self.assertEqual(len(graph), 2)
287 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
288 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
290 def test_make_quantum_graph_nowhere_skiptotal_noclobber(self):
291 executor = SeparablePipelineExecutor(
292 self.butler,
293 skip_existing_in=[self.butler.run],
294 clobber_output=False,
295 )
296 pipeline = Pipeline.fromFile(self.pipeline_file)
298 self.butler.put({"zero": 0}, "input")
299 self.butler.put({"zero": 0}, "intermediate")
300 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
301 self.butler.put(TaskMetadata(), "a_metadata")
303 graph = executor.make_quantum_graph(pipeline)
304 self.assertTrue(graph.isConnected)
305 self.assertEqual(len(graph), 1)
306 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
307 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
309 def test_make_quantum_graph_nowhere_skippartial_noclobber(self):
310 executor = SeparablePipelineExecutor(
311 self.butler,
312 skip_existing_in=[self.butler.run],
313 clobber_output=False,
314 )
315 pipeline = Pipeline.fromFile(self.pipeline_file)
317 self.butler.put({"zero": 0}, "input")
318 self.butler.put({"zero": 0}, "intermediate")
320 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
321 executor.make_quantum_graph(pipeline)
323 def test_make_quantum_graph_nowhere_noskip_clobber(self):
324 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
325 pipeline = Pipeline.fromFile(self.pipeline_file)
327 self.butler.put({"zero": 0}, "input")
329 graph = executor.make_quantum_graph(pipeline)
330 self.assertTrue(graph.isConnected)
331 self.assertEqual(len(graph), 2)
332 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
333 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
335 def test_make_quantum_graph_nowhere_noskip_clobber_conflict(self):
336 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
337 pipeline = Pipeline.fromFile(self.pipeline_file)
339 self.butler.put({"zero": 0}, "input")
340 self.butler.put({"zero": 0}, "intermediate")
341 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
342 self.butler.put(TaskMetadata(), "a_metadata")
344 graph = executor.make_quantum_graph(pipeline)
345 self.assertFalse(graph.isConnected) # Both tasks run, but can use old values for b
346 self.assertEqual(len(graph), 2)
347 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a", "b"})
348 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"a", "b"})
350 def test_make_quantum_graph_nowhere_skipnone_clobber(self):
351 executor = SeparablePipelineExecutor(
352 self.butler,
353 skip_existing_in=[self.butler.run],
354 clobber_output=True,
355 )
356 pipeline = Pipeline.fromFile(self.pipeline_file)
358 self.butler.put({"zero": 0}, "input")
360 graph = executor.make_quantum_graph(pipeline)
361 self.assertTrue(graph.isConnected)
362 self.assertEqual(len(graph), 2)
363 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
364 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
366 def test_make_quantum_graph_nowhere_skiptotal_clobber(self):
367 executor = SeparablePipelineExecutor(
368 self.butler,
369 skip_existing_in=[self.butler.run],
370 clobber_output=True,
371 )
372 pipeline = Pipeline.fromFile(self.pipeline_file)
374 self.butler.put({"zero": 0}, "input")
375 self.butler.put({"zero": 0}, "intermediate")
376 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
377 self.butler.put(TaskMetadata(), "a_metadata")
379 graph = executor.make_quantum_graph(pipeline)
380 self.assertTrue(graph.isConnected)
381 self.assertEqual(len(graph), 1)
382 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
383 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
385 def test_make_quantum_graph_nowhere_skippartial_clobber(self):
386 executor = SeparablePipelineExecutor(
387 self.butler,
388 skip_existing_in=[self.butler.run],
389 clobber_output=True,
390 )
391 pipeline = Pipeline.fromFile(self.pipeline_file)
393 self.butler.put({"zero": 0}, "input")
394 self.butler.put({"zero": 0}, "intermediate")
396 graph = executor.make_quantum_graph(pipeline)
397 self.assertFalse(graph.isConnected) # Both tasks run, but can use old values for b
398 self.assertEqual(len(graph), 2)
399 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a", "b"})
400 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"a", "b"})
402 def test_make_quantum_graph_noinput(self):
403 executor = SeparablePipelineExecutor(self.butler)
404 pipeline = Pipeline.fromFile(self.pipeline_file)
406 graph = executor.make_quantum_graph(pipeline)
407 self.assertEqual(len(graph), 0)
409 def test_make_quantum_graph_alloutput_skip(self):
410 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=[self.butler.run])
411 pipeline = Pipeline.fromFile(self.pipeline_file)
413 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
414 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
415 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
417 self.butler.put({"zero": 0}, "input")
418 self.butler.put({"zero": 0}, "intermediate")
419 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
420 self.butler.put(TaskMetadata(), "a_metadata")
421 self.butler.put({"zero": 0}, "output")
422 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "b_log")
423 self.butler.put(TaskMetadata(), "b_metadata")
425 graph = executor.make_quantum_graph(pipeline)
426 self.assertEqual(len(graph), 0)
428 def test_run_pipeline_noskip_noclobber_fullgraph(self):
429 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
430 pipeline = Pipeline.fromFile(self.pipeline_file)
431 self.butler.put({"zero": 0}, "input")
432 graph = executor.make_quantum_graph(pipeline)
433 executor.pre_execute_qgraph(
434 graph,
435 register_dataset_types=True,
436 save_init_outputs=True,
437 save_versions=False,
438 )
440 executor.run_pipeline(graph)
441 self.butler.registry.refresh()
442 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
443 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
445 def test_run_pipeline_noskip_noclobber_emptygraph(self):
446 old_repo_size = self.butler.registry.queryDatasets(...).count()
448 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
449 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
450 executor.pre_execute_qgraph(
451 graph,
452 register_dataset_types=True,
453 save_init_outputs=True,
454 save_versions=False,
455 )
457 executor.run_pipeline(graph)
458 self.butler.registry.refresh()
459 # Empty graph execution should do nothing.
460 self.assertEqual(self.butler.registry.queryDatasets(...).count(), old_repo_size)
462 def test_run_pipeline_skipnone_noclobber(self):
463 executor = SeparablePipelineExecutor(
464 self.butler,
465 skip_existing_in=[self.butler.run],
466 clobber_output=False,
467 )
468 pipeline = Pipeline.fromFile(self.pipeline_file)
469 self.butler.put({"zero": 0}, "input")
470 graph = executor.make_quantum_graph(pipeline)
471 executor.pre_execute_qgraph(
472 graph,
473 register_dataset_types=True,
474 save_init_outputs=True,
475 save_versions=False,
476 )
478 executor.run_pipeline(graph)
479 self.butler.registry.refresh()
480 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
481 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
483 def test_run_pipeline_skiptotal_noclobber(self):
484 executor = SeparablePipelineExecutor(
485 self.butler,
486 skip_existing_in=[self.butler.run],
487 clobber_output=False,
488 )
489 pipeline = Pipeline.fromFile(self.pipeline_file)
490 self.butler.put({"zero": 0}, "input")
491 self.butler.put({"zero": 0}, "intermediate")
492 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
493 self.butler.put(TaskMetadata(), "a_metadata")
494 graph = executor.make_quantum_graph(pipeline)
495 executor.pre_execute_qgraph(
496 graph,
497 register_dataset_types=True,
498 save_init_outputs=True,
499 save_versions=False,
500 )
502 executor.run_pipeline(graph)
503 self.butler.registry.refresh()
504 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
506 def test_run_pipeline_noskip_clobber_connected(self):
507 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
508 pipeline = Pipeline.fromFile(self.pipeline_file)
509 self.butler.put({"zero": 0}, "input")
510 graph = executor.make_quantum_graph(pipeline)
511 executor.pre_execute_qgraph(
512 graph,
513 register_dataset_types=True,
514 save_init_outputs=True,
515 save_versions=False,
516 )
518 executor.run_pipeline(graph)
519 self.butler.registry.refresh()
520 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
521 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
523 @unittest.skip("Will not work until DM-38601 is fixed.")
524 def test_run_pipeline_noskip_clobber_unconnected(self):
525 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
526 pipeline = Pipeline.fromFile(self.pipeline_file)
527 self.butler.put({"zero": 0}, "input")
528 self.butler.put({"zero": 0}, "intermediate")
529 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
530 self.butler.put(TaskMetadata(), "a_metadata")
531 graph = executor.make_quantum_graph(pipeline)
532 executor.pre_execute_qgraph(
533 graph,
534 register_dataset_types=True,
535 save_init_outputs=True,
536 save_versions=False,
537 )
539 executor.run_pipeline(graph)
540 self.butler.registry.refresh()
541 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
542 # The value of output is undefined; it depends on which task ran first.
543 self.assertTrue(self.butler.datasetExists("output", {}))
545 def test_run_pipeline_skipnone_clobber(self):
546 executor = SeparablePipelineExecutor(
547 self.butler,
548 skip_existing_in=[self.butler.run],
549 clobber_output=True,
550 )
551 pipeline = Pipeline.fromFile(self.pipeline_file)
552 self.butler.put({"zero": 0}, "input")
553 graph = executor.make_quantum_graph(pipeline)
554 executor.pre_execute_qgraph(
555 graph,
556 register_dataset_types=True,
557 save_init_outputs=True,
558 save_versions=False,
559 )
561 executor.run_pipeline(graph)
562 self.butler.registry.refresh()
563 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
564 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
566 def test_run_pipeline_skiptotal_clobber_connected(self):
567 executor = SeparablePipelineExecutor(
568 self.butler,
569 skip_existing_in=[self.butler.run],
570 clobber_output=True,
571 )
572 pipeline = Pipeline.fromFile(self.pipeline_file)
573 self.butler.put({"zero": 0}, "input")
574 self.butler.put({"zero": 0}, "intermediate")
575 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
576 self.butler.put(TaskMetadata(), "a_metadata")
577 graph = executor.make_quantum_graph(pipeline)
578 executor.pre_execute_qgraph(
579 graph,
580 register_dataset_types=True,
581 save_init_outputs=True,
582 save_versions=False,
583 )
585 executor.run_pipeline(graph)
586 self.butler.registry.refresh()
587 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
589 @unittest.skip("Bad behavior with unconnected graph; Middleware will investigate after DM-33027")
590 def test_run_pipeline_skippartial_clobber_unconnected(self):
591 executor = SeparablePipelineExecutor(
592 self.butler,
593 skip_existing_in=[self.butler.run],
594 clobber_output=True,
595 )
596 pipeline = Pipeline.fromFile(self.pipeline_file)
597 self.butler.put({"zero": 0}, "input")
598 self.butler.put({"zero": 0}, "intermediate")
599 graph = executor.make_quantum_graph(pipeline)
600 executor.pre_execute_qgraph(
601 graph,
602 register_dataset_types=True,
603 save_init_outputs=True,
604 save_versions=False,
605 )
607 executor.run_pipeline(graph)
608 self.butler.registry.refresh()
609 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
610 # The value of output is undefined; it depends on which task ran first.
611 self.assertTrue(self.butler.datasetExists("output", {}))
614class MemoryTester(lsst.utils.tests.MemoryTestCase):
615 pass
618def setup_module(module):
619 lsst.utils.tests.init()
622if __name__ == "__main__": 622 ↛ 623line 622 didn't jump to line 623, because the condition on line 622 was never true
623 lsst.utils.tests.init()
624 unittest.main()