Coverage for tests/test_separablePipelineExecutor.py: 12%
348 statements
« prev ^ index » next coverage.py v7.5.0, created at 2024-05-03 09:59 +0000
« prev ^ index » next coverage.py v7.5.0, created at 2024-05-03 09:59 +0000
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This software is dual licensed under the GNU General Public License and also
10# under a 3-clause BSD license. Recipients may choose which of these licenses
11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
12# respectively. If you choose the GPL option then the following text applies
13# (but note that there is still no warranty even if you opt for BSD instead):
14#
15# This program is free software: you can redistribute it and/or modify
16# it under the terms of the GNU General Public License as published by
17# the Free Software Foundation, either version 3 of the License, or
18# (at your option) any later version.
19#
20# This program is distributed in the hope that it will be useful,
21# but WITHOUT ANY WARRANTY; without even the implied warranty of
22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23# GNU General Public License for more details.
24#
25# You should have received a copy of the GNU General Public License
26# along with this program. If not, see <https://www.gnu.org/licenses/>.
29import os
30import tempfile
31import unittest
33import lsst.daf.butler
34import lsst.daf.butler.tests as butlerTests
35import lsst.pex.config
36import lsst.utils.tests
37from lsst.ctrl.mpexec import SeparablePipelineExecutor
38from lsst.pipe.base import Instrument, Pipeline, TaskMetadata
39from lsst.pipe.base.automatic_connection_constants import PACKAGES_INIT_OUTPUT_NAME
40from lsst.pipe.base.quantum_graph_builder import OutputExistsError
41from lsst.resources import ResourcePath
43TESTDIR = os.path.abspath(os.path.dirname(__file__))
46class SeparablePipelineExecutorTests(lsst.utils.tests.TestCase):
47 """Test the SeparablePipelineExecutor API with a trivial task."""
49 pipeline_file = os.path.join(TESTDIR, "pipeline_separable.yaml")
51 def setUp(self):
52 repodir = tempfile.TemporaryDirectory()
53 # TemporaryDirectory warns on leaks; addCleanup also keeps it from
54 # getting garbage-collected.
55 self.addCleanup(tempfile.TemporaryDirectory.cleanup, repodir)
57 # standalone parameter forces the returned config to also include
58 # the information from the search paths.
59 config = lsst.daf.butler.Butler.makeRepo(
60 repodir.name, standalone=True, searchPaths=[os.path.join(TESTDIR, "config")]
61 )
62 butler = lsst.daf.butler.Butler.from_config(config, writeable=True)
63 output = "fake"
64 output_run = f"{output}/{Instrument.makeCollectionTimestamp()}"
65 butler.registry.registerCollection(output_run, lsst.daf.butler.CollectionType.RUN)
66 butler.registry.registerCollection(output, lsst.daf.butler.CollectionType.CHAINED)
67 butler.registry.setCollectionChain(output, [output_run])
68 self.butler = lsst.daf.butler.Butler.from_config(butler=butler, collections=[output], run=output_run)
70 butlerTests.addDatasetType(self.butler, "input", set(), "StructuredDataDict")
71 butlerTests.addDatasetType(self.butler, "intermediate", set(), "StructuredDataDict")
72 butlerTests.addDatasetType(self.butler, "a_log", set(), "ButlerLogRecords")
73 butlerTests.addDatasetType(self.butler, "a_metadata", set(), "TaskMetadata")
74 butlerTests.addDatasetType(self.butler, "a_config", set(), "Config")
76 def test_pre_execute_qgraph(self):
77 # Too hard to make a quantum graph from scratch.
78 executor = SeparablePipelineExecutor(self.butler)
79 pipeline = Pipeline.fromFile(self.pipeline_file)
80 self.butler.put({"zero": 0}, "input")
81 graph = executor.make_quantum_graph(pipeline)
83 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
84 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
85 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
86 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
87 butlerTests.addDatasetType(self.butler, PACKAGES_INIT_OUTPUT_NAME, set(), "Packages")
89 executor.pre_execute_qgraph(
90 graph,
91 register_dataset_types=False,
92 save_init_outputs=False,
93 save_versions=False,
94 )
95 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
96 self.assertFalse(self.butler.exists(PACKAGES_INIT_OUTPUT_NAME, {}))
98 def test_pre_execute_qgraph_unconnected(self):
99 # Unconnected graph; see
100 # test_make_quantum_graph_nowhere_skippartial_clobber.
101 executor = SeparablePipelineExecutor(
102 self.butler,
103 skip_existing_in=[self.butler.run],
104 clobber_output=True,
105 )
106 pipeline = Pipeline.fromFile(self.pipeline_file)
107 self.butler.put({"zero": 0}, "input")
108 self.butler.put({"zero": 0}, "intermediate")
109 graph = executor.make_quantum_graph(pipeline)
111 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
112 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
113 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
114 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
115 butlerTests.addDatasetType(self.butler, PACKAGES_INIT_OUTPUT_NAME, set(), "Packages")
117 executor.pre_execute_qgraph(
118 graph,
119 register_dataset_types=False,
120 save_init_outputs=False,
121 save_versions=False,
122 )
123 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
124 self.assertFalse(self.butler.exists(PACKAGES_INIT_OUTPUT_NAME, {}))
126 def test_pre_execute_qgraph_empty(self):
127 executor = SeparablePipelineExecutor(self.butler)
128 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
130 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
131 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
132 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
133 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
134 butlerTests.addDatasetType(self.butler, PACKAGES_INIT_OUTPUT_NAME, set(), "Packages")
136 executor.pre_execute_qgraph(
137 graph,
138 register_dataset_types=False,
139 save_init_outputs=False,
140 save_versions=False,
141 )
142 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
143 self.assertFalse(self.butler.exists(PACKAGES_INIT_OUTPUT_NAME, {}))
145 def test_pre_execute_qgraph_register(self):
146 executor = SeparablePipelineExecutor(self.butler)
147 pipeline = Pipeline.fromFile(self.pipeline_file)
148 self.butler.put({"zero": 0}, "input")
149 graph = executor.make_quantum_graph(pipeline)
151 executor.pre_execute_qgraph(
152 graph,
153 register_dataset_types=True,
154 save_init_outputs=False,
155 save_versions=False,
156 )
157 self.assertEqual({d.name for d in self.butler.registry.queryDatasetTypes("output")}, {"output"})
158 self.assertEqual(
159 {d.name for d in self.butler.registry.queryDatasetTypes("b_*")},
160 {"b_config", "b_log", "b_metadata"},
161 )
162 self.assertFalse(self.butler.exists("a_config", {}, collections=[self.butler.run]))
163 self.assertFalse(self.butler.exists(PACKAGES_INIT_OUTPUT_NAME, {}))
165 def test_pre_execute_qgraph_init_outputs(self):
166 # Too hard to make a quantum graph from scratch.
167 executor = SeparablePipelineExecutor(self.butler)
168 pipeline = Pipeline.fromFile(self.pipeline_file)
169 self.butler.put({"zero": 0}, "input")
170 graph = executor.make_quantum_graph(pipeline)
172 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
173 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
174 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
175 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
176 butlerTests.addDatasetType(self.butler, PACKAGES_INIT_OUTPUT_NAME, set(), "Packages")
178 executor.pre_execute_qgraph(
179 graph,
180 register_dataset_types=False,
181 save_init_outputs=True,
182 save_versions=False,
183 )
184 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
185 self.assertFalse(self.butler.exists(PACKAGES_INIT_OUTPUT_NAME, {}))
187 def test_pre_execute_qgraph_versions(self):
188 executor = SeparablePipelineExecutor(self.butler)
189 pipeline = Pipeline.fromFile(self.pipeline_file)
190 self.butler.put({"zero": 0}, "input")
191 graph = executor.make_quantum_graph(pipeline)
193 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
194 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
195 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
196 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
197 butlerTests.addDatasetType(self.butler, PACKAGES_INIT_OUTPUT_NAME, set(), "Packages")
199 executor.pre_execute_qgraph(
200 graph,
201 register_dataset_types=False,
202 save_init_outputs=True,
203 save_versions=True,
204 )
205 self.assertTrue(self.butler.exists("a_config", {}, collections=[self.butler.run]))
206 self.assertTrue(self.butler.exists(PACKAGES_INIT_OUTPUT_NAME, {}))
208 def test_init_badinput(self):
209 butler = lsst.daf.butler.Butler.from_config(butler=self.butler, collections=[], run="foo")
211 with self.assertRaises(ValueError):
212 SeparablePipelineExecutor(butler)
214 def test_init_badoutput(self):
215 butler = lsst.daf.butler.Butler.from_config(butler=self.butler, collections=["foo"])
217 with self.assertRaises(ValueError):
218 SeparablePipelineExecutor(butler)
220 def test_make_pipeline_full(self):
221 executor = SeparablePipelineExecutor(self.butler)
222 for uri in [
223 self.pipeline_file,
224 ResourcePath(self.pipeline_file),
225 ResourcePath(self.pipeline_file).geturl(),
226 ]:
227 pipeline_graph = executor.make_pipeline(uri).to_graph()
228 self.assertEqual(set(pipeline_graph.tasks), {"a", "b"})
230 def test_make_pipeline_subset(self):
231 executor = SeparablePipelineExecutor(self.butler)
232 path = self.pipeline_file + "#a"
233 for uri in [
234 path,
235 ResourcePath(path),
236 ResourcePath(path).geturl(),
237 ]:
238 pipeline_graph = executor.make_pipeline(uri).to_graph()
239 self.assertEqual(set(pipeline_graph.tasks), {"a"})
241 def test_make_quantum_graph_nowhere_noskip_noclobber(self):
242 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
243 pipeline = Pipeline.fromFile(self.pipeline_file)
245 self.butler.put({"zero": 0}, "input")
247 graph = executor.make_quantum_graph(pipeline)
248 self.assertTrue(graph.isConnected)
249 self.assertEqual(len(graph), 2)
250 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
251 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
253 def test_make_quantum_graph_nowhere_noskip_noclobber_conflict(self):
254 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
255 pipeline = Pipeline.fromFile(self.pipeline_file)
257 self.butler.put({"zero": 0}, "input")
258 self.butler.put({"zero": 0}, "intermediate")
259 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
260 self.butler.put(TaskMetadata(), "a_metadata")
262 with self.assertRaises(OutputExistsError):
263 executor.make_quantum_graph(pipeline)
265 # TODO: need more complex task and Butler to test
266 # make_quantum_graph(where=...)
268 def test_make_quantum_graph_nowhere_skipnone_noclobber(self):
269 executor = SeparablePipelineExecutor(
270 self.butler,
271 skip_existing_in=[self.butler.run],
272 clobber_output=False,
273 )
274 pipeline = Pipeline.fromFile(self.pipeline_file)
276 self.butler.put({"zero": 0}, "input")
278 graph = executor.make_quantum_graph(pipeline)
279 self.assertTrue(graph.isConnected)
280 self.assertEqual(len(graph), 2)
281 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
282 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
284 def test_make_quantum_graph_nowhere_skiptotal_noclobber(self):
285 executor = SeparablePipelineExecutor(
286 self.butler,
287 skip_existing_in=[self.butler.run],
288 clobber_output=False,
289 )
290 pipeline = Pipeline.fromFile(self.pipeline_file)
292 self.butler.put({"zero": 0}, "input")
293 self.butler.put({"zero": 0}, "intermediate")
294 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
295 self.butler.put(TaskMetadata(), "a_metadata")
296 self.butler.put(lsst.pex.config.Config(), "a_config")
298 graph = executor.make_quantum_graph(pipeline)
299 self.assertTrue(graph.isConnected)
300 self.assertEqual(len(graph), 1)
301 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
302 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
304 def test_make_quantum_graph_nowhere_skippartial_noclobber(self):
305 executor = SeparablePipelineExecutor(
306 self.butler,
307 skip_existing_in=[self.butler.run],
308 clobber_output=False,
309 )
310 pipeline = Pipeline.fromFile(self.pipeline_file)
312 self.butler.put({"zero": 0}, "input")
313 self.butler.put({"zero": 0}, "intermediate")
315 with self.assertRaises(OutputExistsError):
316 executor.make_quantum_graph(pipeline)
318 def test_make_quantum_graph_nowhere_noskip_clobber(self):
319 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
320 pipeline = Pipeline.fromFile(self.pipeline_file)
322 self.butler.put({"zero": 0}, "input")
324 graph = executor.make_quantum_graph(pipeline)
325 self.assertTrue(graph.isConnected)
326 self.assertEqual(len(graph), 2)
327 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
328 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
330 def test_make_quantum_graph_nowhere_noskip_clobber_conflict(self):
331 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
332 pipeline = Pipeline.fromFile(self.pipeline_file)
334 self.butler.put({"zero": 0}, "input")
335 self.butler.put({"zero": 0}, "intermediate")
336 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
337 self.butler.put(TaskMetadata(), "a_metadata")
339 graph = executor.make_quantum_graph(pipeline)
340 self.assertTrue(graph.isConnected)
341 self.assertEqual(len(graph), 2)
342 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
343 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
345 def test_make_quantum_graph_nowhere_skipnone_clobber(self):
346 executor = SeparablePipelineExecutor(
347 self.butler,
348 skip_existing_in=[self.butler.run],
349 clobber_output=True,
350 )
351 pipeline = Pipeline.fromFile(self.pipeline_file)
353 self.butler.put({"zero": 0}, "input")
355 graph = executor.make_quantum_graph(pipeline)
356 self.assertTrue(graph.isConnected)
357 self.assertEqual(len(graph), 2)
358 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
359 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
361 def test_make_quantum_graph_nowhere_skiptotal_clobber(self):
362 executor = SeparablePipelineExecutor(
363 self.butler,
364 skip_existing_in=[self.butler.run],
365 clobber_output=True,
366 )
367 pipeline = Pipeline.fromFile(self.pipeline_file)
369 self.butler.put({"zero": 0}, "input")
370 self.butler.put({"zero": 0}, "intermediate")
371 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
372 self.butler.put(TaskMetadata(), "a_metadata")
373 self.butler.put(lsst.pex.config.Config(), "a_config")
375 graph = executor.make_quantum_graph(pipeline)
376 self.assertTrue(graph.isConnected)
377 self.assertEqual(len(graph), 1)
378 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"b"})
379 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
381 def test_make_quantum_graph_nowhere_skippartial_clobber(self):
382 executor = SeparablePipelineExecutor(
383 self.butler,
384 skip_existing_in=[self.butler.run],
385 clobber_output=True,
386 )
387 pipeline = Pipeline.fromFile(self.pipeline_file)
389 self.butler.put({"zero": 0}, "input")
390 self.butler.put({"zero": 0}, "intermediate")
392 graph = executor.make_quantum_graph(pipeline)
393 self.assertTrue(graph.isConnected)
394 self.assertEqual(len(graph), 2)
395 self.assertEqual({q.taskDef.label for q in graph.inputQuanta}, {"a"})
396 self.assertEqual({q.taskDef.label for q in graph.outputQuanta}, {"b"})
398 def test_make_quantum_graph_noinput(self):
399 executor = SeparablePipelineExecutor(self.butler)
400 pipeline = Pipeline.fromFile(self.pipeline_file)
402 graph = executor.make_quantum_graph(pipeline)
403 self.assertEqual(len(graph), 0)
405 def test_make_quantum_graph_alloutput_skip(self):
406 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=[self.butler.run])
407 pipeline = Pipeline.fromFile(self.pipeline_file)
409 butlerTests.addDatasetType(self.butler, "output", set(), "StructuredDataDict")
410 butlerTests.addDatasetType(self.butler, "b_log", set(), "ButlerLogRecords")
411 butlerTests.addDatasetType(self.butler, "b_metadata", set(), "TaskMetadata")
412 butlerTests.addDatasetType(self.butler, "b_config", set(), "Config")
414 self.butler.put({"zero": 0}, "input")
415 self.butler.put({"zero": 0}, "intermediate")
416 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
417 self.butler.put(TaskMetadata(), "a_metadata")
418 self.butler.put({"zero": 0}, "output")
419 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "b_log")
420 self.butler.put(TaskMetadata(), "b_metadata")
421 self.butler.put(lsst.pex.config.Config(), "a_config")
422 self.butler.put(lsst.pex.config.Config(), "b_config")
424 graph = executor.make_quantum_graph(pipeline)
425 self.assertEqual(len(graph), 0)
427 def test_run_pipeline_noskip_noclobber_fullgraph(self):
428 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
429 pipeline = Pipeline.fromFile(self.pipeline_file)
430 self.butler.put({"zero": 0}, "input")
431 graph = executor.make_quantum_graph(pipeline)
432 executor.pre_execute_qgraph(
433 graph,
434 register_dataset_types=True,
435 save_init_outputs=True,
436 save_versions=False,
437 )
439 executor.run_pipeline(graph)
440 self.butler.registry.refresh()
441 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
442 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
444 def test_run_pipeline_noskip_noclobber_emptygraph(self):
445 old_repo_size = self.butler.registry.queryDatasets(...).count()
447 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=False)
448 graph = lsst.pipe.base.QuantumGraph({}, universe=self.butler.dimensions)
449 executor.pre_execute_qgraph(
450 graph,
451 register_dataset_types=True,
452 save_init_outputs=True,
453 save_versions=False,
454 )
456 executor.run_pipeline(graph)
457 self.butler.registry.refresh()
458 # Empty graph execution should do nothing.
459 self.assertEqual(self.butler.registry.queryDatasets(...).count(), old_repo_size)
461 def test_run_pipeline_skipnone_noclobber(self):
462 executor = SeparablePipelineExecutor(
463 self.butler,
464 skip_existing_in=[self.butler.run],
465 clobber_output=False,
466 )
467 pipeline = Pipeline.fromFile(self.pipeline_file)
468 self.butler.put({"zero": 0}, "input")
469 graph = executor.make_quantum_graph(pipeline)
470 executor.pre_execute_qgraph(
471 graph,
472 register_dataset_types=True,
473 save_init_outputs=True,
474 save_versions=False,
475 )
477 executor.run_pipeline(graph)
478 self.butler.registry.refresh()
479 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
480 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
482 def test_run_pipeline_skiptotal_noclobber(self):
483 executor = SeparablePipelineExecutor(
484 self.butler,
485 skip_existing_in=[self.butler.run],
486 clobber_output=False,
487 )
488 pipeline = Pipeline.fromFile(self.pipeline_file)
489 self.butler.put({"zero": 0}, "input")
490 self.butler.put({"zero": 0}, "intermediate")
491 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
492 self.butler.put(TaskMetadata(), "a_metadata")
493 self.butler.put(lsst.pex.config.Config(), "a_config")
494 graph = executor.make_quantum_graph(pipeline)
495 executor.pre_execute_qgraph(
496 graph,
497 register_dataset_types=True,
498 save_init_outputs=True,
499 save_versions=False,
500 )
502 executor.run_pipeline(graph)
503 self.butler.registry.refresh()
504 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
506 def test_run_pipeline_noskip_clobber_connected(self):
507 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
508 pipeline = Pipeline.fromFile(self.pipeline_file)
509 self.butler.put({"zero": 0}, "input")
510 graph = executor.make_quantum_graph(pipeline)
511 executor.pre_execute_qgraph(
512 graph,
513 register_dataset_types=True,
514 save_init_outputs=True,
515 save_versions=False,
516 )
518 executor.run_pipeline(graph)
519 self.butler.registry.refresh()
520 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
521 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
523 def test_run_pipeline_noskip_clobber_unconnected(self):
524 executor = SeparablePipelineExecutor(self.butler, skip_existing_in=None, clobber_output=True)
525 pipeline = Pipeline.fromFile(self.pipeline_file)
526 self.butler.put({"zero": 0}, "input")
527 self.butler.put({"zero": 0}, "intermediate")
528 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
529 self.butler.put(TaskMetadata(), "a_metadata")
530 graph = executor.make_quantum_graph(pipeline)
531 executor.pre_execute_qgraph(
532 graph,
533 register_dataset_types=True,
534 save_init_outputs=True,
535 save_versions=False,
536 )
538 executor.run_pipeline(graph)
539 self.butler.registry.refresh()
540 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
541 # The value of output is undefined; it depends on which task ran first.
542 self.assertTrue(self.butler.exists("output", {}))
544 def test_run_pipeline_skipnone_clobber(self):
545 executor = SeparablePipelineExecutor(
546 self.butler,
547 skip_existing_in=[self.butler.run],
548 clobber_output=True,
549 )
550 pipeline = Pipeline.fromFile(self.pipeline_file)
551 self.butler.put({"zero": 0}, "input")
552 graph = executor.make_quantum_graph(pipeline)
553 executor.pre_execute_qgraph(
554 graph,
555 register_dataset_types=True,
556 save_init_outputs=True,
557 save_versions=False,
558 )
560 executor.run_pipeline(graph)
561 self.butler.registry.refresh()
562 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
563 self.assertEqual(self.butler.get("output"), {"zero": 0, "one": 1, "two": 2})
565 def test_run_pipeline_skiptotal_clobber_connected(self):
566 executor = SeparablePipelineExecutor(
567 self.butler,
568 skip_existing_in=[self.butler.run],
569 clobber_output=True,
570 )
571 pipeline = Pipeline.fromFile(self.pipeline_file)
572 self.butler.put({"zero": 0}, "input")
573 self.butler.put({"zero": 0}, "intermediate")
574 self.butler.put(lsst.daf.butler.ButlerLogRecords.from_records([]), "a_log")
575 self.butler.put(TaskMetadata(), "a_metadata")
576 self.butler.put(lsst.pex.config.Config(), "a_config")
577 graph = executor.make_quantum_graph(pipeline)
578 executor.pre_execute_qgraph(
579 graph,
580 register_dataset_types=True,
581 save_init_outputs=True,
582 save_versions=False,
583 )
585 executor.run_pipeline(graph)
586 self.butler.registry.refresh()
587 self.assertEqual(self.butler.get("output"), {"zero": 0, "two": 2})
589 def test_run_pipeline_skippartial_clobber_unconnected(self):
590 executor = SeparablePipelineExecutor(
591 self.butler,
592 skip_existing_in=[self.butler.run],
593 clobber_output=True,
594 )
595 pipeline = Pipeline.fromFile(self.pipeline_file)
596 self.butler.put({"zero": 0}, "input")
597 self.butler.put({"zero": 0}, "intermediate")
598 graph = executor.make_quantum_graph(pipeline)
599 executor.pre_execute_qgraph(
600 graph,
601 register_dataset_types=True,
602 save_init_outputs=True,
603 save_versions=False,
604 )
606 executor.run_pipeline(graph)
607 self.butler.registry.refresh()
608 self.assertEqual(self.butler.get("intermediate"), {"zero": 0, "one": 1})
609 # The value of output is undefined; it depends on which task ran first.
610 self.assertTrue(self.butler.exists("output", {}))
613class MemoryTester(lsst.utils.tests.MemoryTestCase):
614 """Generic test for file leaks."""
617def setup_module(module):
618 """Set up the module for pytest.
620 Parameters
621 ----------
622 module : `~types.ModuleType`
623 Module to set up.
624 """
625 lsst.utils.tests.init()
628if __name__ == "__main__":
629 lsst.utils.tests.init()
630 unittest.main()