Coverage for tests/test_separablePipelineExecutor.py: 12%
348 statements
« prev ^ index » next coverage.py v7.3.0, created at 2023-09-01 09:30 +0000
« prev ^ index » next coverage.py v7.3.0, created at 2023-09-01 09:30 +0000
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
23import os
24import tempfile
25import unittest
27import lsst.daf.butler
28import lsst.daf.butler.tests as butlerTests
29import lsst.pex.config
30import lsst.utils.tests
31from lsst.ctrl.mpexec import SeparablePipelineExecutor
32from lsst.pipe.base import Instrument, Pipeline, PipelineDatasetTypes, TaskMetadata
33from lsst.resources import ResourcePath
35TESTDIR = os.path.abspath(os.path.dirname(__file__))
38class SeparablePipelineExecutorTests(lsst.utils.tests.TestCase):
39 """Test the SeparablePipelineExecutor API with a trivial task."""
41 pipeline_file = os.path.join(TESTDIR, "pipeline_separable.yaml")
43 def setUp(self):
44 repodir = tempfile.TemporaryDirectory()
45 # TemporaryDirectory warns on leaks; addCleanup also keeps it from
46 # getting garbage-collected.
47 self.addCleanup(tempfile.TemporaryDirectory.cleanup, repodir)
49 # standalone parameter forces the returned config to also include
50 # the information from the search paths.
51 config = lsst.daf.butler.Butler.makeRepo(
52 repodir.name, standalone=True, searchPaths=[os.path.join(TESTDIR, "config")]
53 )
54 butler = lsst.daf.butler.Butler(config, writeable=True)
55 output = "fake"
56 output_run = f"{output}/{Instrument.makeCollectionTimestamp()}"
57 butler.registry.registerCollection(output_run, lsst.daf.butler.CollectionType.RUN)
58 butler.registry.registerCollection(output, lsst.daf.butler.CollectionType.CHAINED)
59 butler.registry.setCollectionChain(output, [output_run])
60 self.butler = lsst.daf.butler.Butler(butler=butler, collections=[output], run=output_run)
62 butlerTests.addDatasetType(self.butler, "input", set(), "StructuredDataDict")
63 butlerTests.addDatasetType(self.butler, "intermediate", set(), "StructuredDataDict")
64 butlerTests.addDatasetType(self.butler, "a_log", set(), "ButlerLogRecords")
65 butlerTests.addDatasetType(self.butler, "a_metadata", set(), "TaskMetadata")
66 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
68 def test_pre_execute_qgraph(self):
69 # Too hard to make a quantum graph from scratch.
70 executor = SeparablePipelineExecutor(self.butler)
71 pipeline = Pipeline.fromFile(self.pipeline_file)
72 self.butler.put({"zero": 0}, "input")
73 graph = executor.make_quantum_graph(pipeline)
75 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
76 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
77 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
78 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
79 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
81 executor.pre_execute_qgraph(
82 graph,
83 register_dataset_types=False,
84 save_init_outputs=False,
85 save_versions=False,
86 )
87 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
88 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
90 def test_pre_execute_qgraph_unconnected(self):
91 # Unconnected graph; see
92 # test_make_quantum_graph_nowhere_skippartial_clobber.
93 executor = SeparablePipelineExecutor(
94 self.butler,
95 skip_existing_in=[self.butler.run],
96 clobber_output=True,
97 )
98 pipeline = Pipeline.fromFile(self.pipeline_file)
99 self.butler.put({"zero": 0}, "input")
100 self.butler.put({"zero": 0}, "intermediate")
101 graph = executor.make_quantum_graph(pipeline)
103 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
104 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
105 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
106 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
107 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
109 executor.pre_execute_qgraph(
110 graph,
111 register_dataset_types=False,
112 save_init_outputs=False,
113 save_versions=False,
114 )
115 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
116 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
118 def test_pre_execute_qgraph_empty(self):
119 executor = SeparablePipelineExecutor(self.butler)
120 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
122 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
123 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
124 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
125 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
126 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
128 executor.pre_execute_qgraph(
129 graph,
130 register_dataset_types=False,
131 save_init_outputs=False,
132 save_versions=False,
133 )
134 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
135 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
137 def test_pre_execute_qgraph_register(self):
138 executor = SeparablePipelineExecutor(self.butler)
139 pipeline = Pipeline.fromFile(self.pipeline_file)
140 self.butler.put({"zero": 0}, "input")
141 graph = executor.make_quantum_graph(pipeline)
143 executor.pre_execute_qgraph(
144 graph,
145 register_dataset_types=True,
146 save_init_outputs=False,
147 save_versions=False,
148 )
149 self.assertEqual({d.name for d in self.butler.registry.queryDatasetTypes("output")}, {"output"})
150 self.assertEqual(
151 {d.name for d in self.butler.registry.queryDatasetTypes("b_*")},
152 {"b_config", "b_log", "b_metadata"},
153 )
154 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
155 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
157 def test_pre_execute_qgraph_init_outputs(self):
158 # Too hard to make a quantum graph from scratch.
159 executor = SeparablePipelineExecutor(self.butler)
160 pipeline = Pipeline.fromFile(self.pipeline_file)
161 self.butler.put({"zero": 0}, "input")
162 graph = executor.make_quantum_graph(pipeline)
164 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
165 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
166 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
167 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
168 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
170 executor.pre_execute_qgraph(
171 graph,
172 register_dataset_types=False,
173 save_init_outputs=True,
174 save_versions=False,
175 )
176 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
177 self.assertFalse(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
179 def test_pre_execute_qgraph_versions(self):
180 executor = SeparablePipelineExecutor(self.butler)
181 pipeline = Pipeline.fromFile(self.pipeline_file)
182 self.butler.put({"zero": 0}, "input")
183 graph = executor.make_quantum_graph(pipeline)
185 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
186 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
187 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
188 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
189 butlerTests.addDatasetType(self.butler, PipelineDatasetTypes.packagesDatasetName, set(), "Packages")
191 executor.pre_execute_qgraph(
192 graph,
193 register_dataset_types=False,
194 save_init_outputs=True,
195 save_versions=True,
196 )
197 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
198 self.assertTrue(self.butler.exists(PipelineDatasetTypes.packagesDatasetName, {}))
200 def test_init_badinput(self):
201 butler = lsst.daf.butler.Butler(butler=self.butler, collections=[], run="foo")
203 with self.assertRaises(ValueError):
204 SeparablePipelineExecutor(butler)
206 def test_init_badoutput(self):
207 butler = lsst.daf.butler.Butler(butler=self.butler, collections=["foo"])
209 with self.assertRaises(ValueError):
210 SeparablePipelineExecutor(butler)
212 def test_make_pipeline_full(self):
213 executor = SeparablePipelineExecutor(self.butler)
214 for uri in [
215 self.pipeline_file,
216 ResourcePath(self.pipeline_file),
217 ResourcePath(self.pipeline_file).geturl(),
218 ]:
219 pipeline = executor.make_pipeline(uri)
220 self.assertEqual(len(pipeline), 2)
221 self.assertEqual({t.label for t in pipeline}, {"a", "b"})
223 def test_make_pipeline_subset(self):
224 executor = SeparablePipelineExecutor(self.butler)
225 path = self.pipeline_file + "#a"
226 for uri in [
227 path,
228 ResourcePath(path),
229 ResourcePath(path).geturl(),
230 ]:
231 pipeline = executor.make_pipeline(uri)
232 self.assertEqual(len(pipeline), 1)
233 self.assertEqual({t.label for t in pipeline}, {"a"})
235 def test_make_quantum_graph_nowhere_noskip_noclobber(self):
236 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
237 pipeline = Pipeline.fromFile(self.pipeline_file)
239 self.butler.put({"zero": 0}, "input")
241 graph = executor.make_quantum_graph(pipeline)
242 self.assertTrue(graph.isConnected)
243 self.assertEqual(len(graph), 2)
244 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
245 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
247 def test_make_quantum_graph_nowhere_noskip_noclobber_conflict(self):
248 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
249 pipeline = Pipeline.fromFile(self.pipeline_file)
251 self.butler.put({"zero": 0}, "input")
252 self.butler.put({"zero": 0}, "intermediate")
253 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
254 self.butler.put(TaskMetadata(), "a_metadata")
256 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
257 executor.make_quantum_graph(pipeline)
259 # TODO: need more complex task and Butler to test
260 # make_quantum_graph(where=...)
262 def test_make_quantum_graph_nowhere_skipnone_noclobber(self):
263 executor = SeparablePipelineExecutor(
264 self.butler,
265 skip_existing_in=[self.butler.run],
266 clobber_output=False,
267 )
268 pipeline = Pipeline.fromFile(self.pipeline_file)
270 self.butler.put({"zero": 0}, "input")
272 graph = executor.make_quantum_graph(pipeline)
273 self.assertTrue(graph.isConnected)
274 self.assertEqual(len(graph), 2)
275 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
276 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
278 def test_make_quantum_graph_nowhere_skiptotal_noclobber(self):
279 executor = SeparablePipelineExecutor(
280 self.butler,
281 skip_existing_in=[self.butler.run],
282 clobber_output=False,
283 )
284 pipeline = Pipeline.fromFile(self.pipeline_file)
286 self.butler.put({"zero": 0}, "input")
287 self.butler.put({"zero": 0}, "intermediate")
288 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
289 self.butler.put(TaskMetadata(), "a_metadata")
290 self.butler.put(lsst.pex.config.Config(), "a_config")
292 graph = executor.make_quantum_graph(pipeline)
293 self.assertTrue(graph.isConnected)
294 self.assertEqual(len(graph), 1)
295 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
296 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
298 def test_make_quantum_graph_nowhere_skippartial_noclobber(self):
299 executor = SeparablePipelineExecutor(
300 self.butler,
301 skip_existing_in=[self.butler.run],
302 clobber_output=False,
303 )
304 pipeline = Pipeline.fromFile(self.pipeline_file)
306 self.butler.put({"zero": 0}, "input")
307 self.butler.put({"zero": 0}, "intermediate")
309 with self.assertRaises(lsst.pipe.base.graphBuilder.OutputExistsError):
310 executor.make_quantum_graph(pipeline)
312 def test_make_quantum_graph_nowhere_noskip_clobber(self):
313 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
314 pipeline = Pipeline.fromFile(self.pipeline_file)
316 self.butler.put({"zero": 0}, "input")
318 graph = executor.make_quantum_graph(pipeline)
319 self.assertTrue(graph.isConnected)
320 self.assertEqual(len(graph), 2)
321 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
322 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
324 def test_make_quantum_graph_nowhere_noskip_clobber_conflict(self):
325 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
326 pipeline = Pipeline.fromFile(self.pipeline_file)
328 self.butler.put({"zero": 0}, "input")
329 self.butler.put({"zero": 0}, "intermediate")
330 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
331 self.butler.put(TaskMetadata(), "a_metadata")
333 graph = executor.make_quantum_graph(pipeline)
334 self.assertTrue(graph.isConnected)
335 self.assertEqual(len(graph), 2)
336 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
337 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
339 def test_make_quantum_graph_nowhere_skipnone_clobber(self):
340 executor = SeparablePipelineExecutor(
341 self.butler,
342 skip_existing_in=[self.butler.run],
343 clobber_output=True,
344 )
345 pipeline = Pipeline.fromFile(self.pipeline_file)
347 self.butler.put({"zero": 0}, "input")
349 graph = executor.make_quantum_graph(pipeline)
350 self.assertTrue(graph.isConnected)
351 self.assertEqual(len(graph), 2)
352 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
353 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
355 def test_make_quantum_graph_nowhere_skiptotal_clobber(self):
356 executor = SeparablePipelineExecutor(
357 self.butler,
358 skip_existing_in=[self.butler.run],
359 clobber_output=True,
360 )
361 pipeline = Pipeline.fromFile(self.pipeline_file)
363 self.butler.put({"zero": 0}, "input")
364 self.butler.put({"zero": 0}, "intermediate")
365 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
366 self.butler.put(TaskMetadata(), "a_metadata")
367 self.butler.put(lsst.pex.config.Config(), "a_config")
369 graph = executor.make_quantum_graph(pipeline)
370 self.assertTrue(graph.isConnected)
371 self.assertEqual(len(graph), 1)
372 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
373 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
375 def test_make_quantum_graph_nowhere_skippartial_clobber(self):
376 executor = SeparablePipelineExecutor(
377 self.butler,
378 skip_existing_in=[self.butler.run],
379 clobber_output=True,
380 )
381 pipeline = Pipeline.fromFile(self.pipeline_file)
383 self.butler.put({"zero": 0}, "input")
384 self.butler.put({"zero": 0}, "intermediate")
386 graph = executor.make_quantum_graph(pipeline)
387 self.assertTrue(graph.isConnected)
388 self.assertEqual(len(graph), 2)
389 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
390 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
392 def test_make_quantum_graph_noinput(self):
393 executor = SeparablePipelineExecutor(self.butler)
394 pipeline = Pipeline.fromFile(self.pipeline_file)
396 graph = executor.make_quantum_graph(pipeline)
397 self.assertEqual(len(graph), 0)
399 def test_make_quantum_graph_alloutput_skip(self):
400 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=[self.butler.run])
401 pipeline = Pipeline.fromFile(self.pipeline_file)
403 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
404 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
405 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
406 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
408 self.butler.put({"zero": 0}, "input")
409 self.butler.put({"zero": 0}, "intermediate")
410 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
411 self.butler.put(TaskMetadata(), "a_metadata")
412 self.butler.put({"zero": 0}, "output")
413 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "b_log")
414 self.butler.put(TaskMetadata(), "b_metadata")
415 self.butler.put(lsst.pex.config.Config(), "a_config")
416 self.butler.put(lsst.pex.config.Config(), "b_config")
418 graph = executor.make_quantum_graph(pipeline)
419 self.assertEqual(len(graph), 0)
421 def test_run_pipeline_noskip_noclobber_fullgraph(self):
422 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
423 pipeline = Pipeline.fromFile(self.pipeline_file)
424 self.butler.put({"zero": 0}, "input")
425 graph = executor.make_quantum_graph(pipeline)
426 executor.pre_execute_qgraph(
427 graph,
428 register_dataset_types=True,
429 save_init_outputs=True,
430 save_versions=False,
431 )
433 executor.run_pipeline(graph)
434 self.butler.registry.refresh()
435 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
436 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
438 def test_run_pipeline_noskip_noclobber_emptygraph(self):
439 old_repo_size = self.butler.registry.queryDatasets(...).count()
441 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
442 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
443 executor.pre_execute_qgraph(
444 graph,
445 register_dataset_types=True,
446 save_init_outputs=True,
447 save_versions=False,
448 )
450 executor.run_pipeline(graph)
451 self.butler.registry.refresh()
452 # Empty graph execution should do nothing.
453 self.assertEqual(self.butler.registry.queryDatasets(...).count(), old_repo_size)
455 def test_run_pipeline_skipnone_noclobber(self):
456 executor = SeparablePipelineExecutor(
457 self.butler,
458 skip_existing_in=[self.butler.run],
459 clobber_output=False,
460 )
461 pipeline = Pipeline.fromFile(self.pipeline_file)
462 self.butler.put({"zero": 0}, "input")
463 graph = executor.make_quantum_graph(pipeline)
464 executor.pre_execute_qgraph(
465 graph,
466 register_dataset_types=True,
467 save_init_outputs=True,
468 save_versions=False,
469 )
471 executor.run_pipeline(graph)
472 self.butler.registry.refresh()
473 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
474 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
476 def test_run_pipeline_skiptotal_noclobber(self):
477 executor = SeparablePipelineExecutor(
478 self.butler,
479 skip_existing_in=[self.butler.run],
480 clobber_output=False,
481 )
482 pipeline = Pipeline.fromFile(self.pipeline_file)
483 self.butler.put({"zero": 0}, "input")
484 self.butler.put({"zero": 0}, "intermediate")
485 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
486 self.butler.put(TaskMetadata(), "a_metadata")
487 self.butler.put(lsst.pex.config.Config(), "a_config")
488 graph = executor.make_quantum_graph(pipeline)
489 executor.pre_execute_qgraph(
490 graph,
491 register_dataset_types=True,
492 save_init_outputs=True,
493 save_versions=False,
494 )
496 executor.run_pipeline(graph)
497 self.butler.registry.refresh()
498 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
500 def test_run_pipeline_noskip_clobber_connected(self):
501 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
502 pipeline = Pipeline.fromFile(self.pipeline_file)
503 self.butler.put({"zero": 0}, "input")
504 graph = executor.make_quantum_graph(pipeline)
505 executor.pre_execute_qgraph(
506 graph,
507 register_dataset_types=True,
508 save_init_outputs=True,
509 save_versions=False,
510 )
512 executor.run_pipeline(graph)
513 self.butler.registry.refresh()
514 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
515 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
517 def test_run_pipeline_noskip_clobber_unconnected(self):
518 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
519 pipeline = Pipeline.fromFile(self.pipeline_file)
520 self.butler.put({"zero": 0}, "input")
521 self.butler.put({"zero": 0}, "intermediate")
522 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
523 self.butler.put(TaskMetadata(), "a_metadata")
524 graph = executor.make_quantum_graph(pipeline)
525 executor.pre_execute_qgraph(
526 graph,
527 register_dataset_types=True,
528 save_init_outputs=True,
529 save_versions=False,
530 )
532 executor.run_pipeline(graph)
533 self.butler.registry.refresh()
534 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
535 # The value of output is undefined; it depends on which task ran first.
536 self.assertTrue(self.butler.exists("output", {}))
538 def test_run_pipeline_skipnone_clobber(self):
539 executor = SeparablePipelineExecutor(
540 self.butler,
541 skip_existing_in=[self.butler.run],
542 clobber_output=True,
543 )
544 pipeline = Pipeline.fromFile(self.pipeline_file)
545 self.butler.put({"zero": 0}, "input")
546 graph = executor.make_quantum_graph(pipeline)
547 executor.pre_execute_qgraph(
548 graph,
549 register_dataset_types=True,
550 save_init_outputs=True,
551 save_versions=False,
552 )
554 executor.run_pipeline(graph)
555 self.butler.registry.refresh()
556 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
557 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
559 def test_run_pipeline_skiptotal_clobber_connected(self):
560 executor = SeparablePipelineExecutor(
561 self.butler,
562 skip_existing_in=[self.butler.run],
563 clobber_output=True,
564 )
565 pipeline = Pipeline.fromFile(self.pipeline_file)
566 self.butler.put({"zero": 0}, "input")
567 self.butler.put({"zero": 0}, "intermediate")
568 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
569 self.butler.put(TaskMetadata(), "a_metadata")
570 self.butler.put(lsst.pex.config.Config(), "a_config")
571 graph = executor.make_quantum_graph(pipeline)
572 executor.pre_execute_qgraph(
573 graph,
574 register_dataset_types=True,
575 save_init_outputs=True,
576 save_versions=False,
577 )
579 executor.run_pipeline(graph)
580 self.butler.registry.refresh()
581 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
583 def test_run_pipeline_skippartial_clobber_unconnected(self):
584 executor = SeparablePipelineExecutor(
585 self.butler,
586 skip_existing_in=[self.butler.run],
587 clobber_output=True,
588 )
589 pipeline = Pipeline.fromFile(self.pipeline_file)
590 self.butler.put({"zero": 0}, "input")
591 self.butler.put({"zero": 0}, "intermediate")
592 graph = executor.make_quantum_graph(pipeline)
593 executor.pre_execute_qgraph(
594 graph,
595 register_dataset_types=True,
596 save_init_outputs=True,
597 save_versions=False,
598 )
600 executor.run_pipeline(graph)
601 self.butler.registry.refresh()
602 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
603 # The value of output is undefined; it depends on which task ran first.
604 self.assertTrue(self.butler.exists("output", {}))
607class MemoryTester(lsst.utils.tests.MemoryTestCase):
608 """Generic test for file leaks."""
611def setup_module(module):
612 """Set up the module for pytest."""
613 lsst.utils.tests.init()
616if __name__ == "__main__":
617 lsst.utils.tests.init()
618 unittest.main()