Coverage for tests/test_cmdLineFwk.py: 17%
381 statements
« prev ^ index » next coverage.py v7.1.0, created at 2023-02-05 18:04 -0800
« prev ^ index » next coverage.py v7.1.0, created at 2023-02-05 18:04 -0800
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22"""Simple unit test for cmdLineFwk module.
23"""
25import click
26from types import SimpleNamespace
27import contextlib
28import copy
29from dataclasses import dataclass
30import logging
31import os
32import pickle
33import re
34import shutil
35import tempfile
36from typing import NamedTuple
37import unittest
39from lsst.ctrl.mpexec import CmdLineFwk, MPGraphExecutorError
40from lsst.ctrl.mpexec.cli.opt import run_options
41from lsst.ctrl.mpexec.cli.utils import (
42 _ACTION_ADD_TASK,
43 _ACTION_CONFIG,
44 _ACTION_CONFIG_FILE,
45 _ACTION_ADD_INSTRUMENT,
46 PipetaskCommand,
47)
48from lsst.daf.butler import Config, Quantum, Registry
49from lsst.daf.butler.registry import RegistryConfig
50from lsst.obs.base import Instrument
51import lsst.pex.config as pexConfig
52from lsst.pipe.base import (Pipeline, PipelineTaskConfig, QuantumGraph, TaskDef, PipelineTaskConnections)
53import lsst.pipe.base.connectionTypes as cT
54import lsst.utils.tests
55from lsst.pipe.base.tests.simpleQGraph import (
56 AddTaskFactoryMock,
57 makeSimpleButler,
58 makeSimplePipeline,
59 makeSimpleQGraph,
60 populateButler)
61from lsst.utils.tests import temporaryDirectory
64logging.basicConfig(level=getattr(logging, os.environ.get("UNIT_TEST_LOGGING_LEVEL", "INFO"), logging.INFO))
66# Have to monkey-patch Instrument.fromName() to not retrieve non-existing
67# instrument from registry, these tests can run fine without actual instrument
68# and implementing full mock for Instrument is too complicated.
69Instrument.fromName = lambda name, reg: None 69 ↛ exitline 69 didn't run the lambda on line 69
72@contextlib.contextmanager
73def makeTmpFile(contents=None, suffix=None):
74 """Context manager for generating temporary file name.
76 Temporary file is deleted on exiting context.
78 Parameters
79 ----------
80 contents : `bytes`
81 Data to write into a file.
82 """
83 fd, tmpname = tempfile.mkstemp(suffix=suffix)
84 if contents:
85 os.write(fd, contents)
86 os.close(fd)
87 yield tmpname
88 with contextlib.suppress(OSError):
89 os.remove(tmpname)
92@contextlib.contextmanager
93def makeSQLiteRegistry(create=True):
94 """Context manager to create new empty registry database.
96 Yields
97 ------
98 config : `RegistryConfig`
99 Registry configuration for initialized registry database.
100 """
101 with temporaryDirectory() as tmpdir:
102 uri = f"sqlite:///{tmpdir}/gen3.sqlite"
103 config = RegistryConfig()
104 config["db"] = uri
105 if create:
106 Registry.createFromConfig(config)
107 yield config
110class SimpleConnections(PipelineTaskConnections, dimensions=(),
111 defaultTemplates={"template": "simple"}):
112 schema = cT.InitInput(doc="Schema",
113 name="{template}schema",
114 storageClass="SourceCatalog")
117class SimpleConfig(PipelineTaskConfig, pipelineConnections=SimpleConnections):
118 field = pexConfig.Field(dtype=str, doc="arbitrary string")
120 def setDefaults(self):
121 PipelineTaskConfig.setDefaults(self)
124def _makeArgs(registryConfig=None, **kwargs):
125 """Return parsed command line arguments.
127 By default butler_config is set to `Config` populated with some defaults,
128 it can be overridden completely by keyword argument.
130 Parameters
131 ----------
132 cmd : `str`, optional
133 Produce arguments for this pipetask command.
134 registryConfig : `RegistryConfig`, optional
135 Override for registry configuration.
136 **kwargs
137 Overrides for other arguments.
138 """
139 # Use a mock to get the default value of arguments to 'run'.
141 mock = unittest.mock.Mock()
143 @click.command(cls=PipetaskCommand)
144 @run_options()
145 def fake_run(ctx, **kwargs):
146 """Fake "pipetask run" command for gathering input arguments.
148 The arguments & options should always match the arguments & options in
149 the "real" command function `lsst.ctrl.mpexec.cli.cmd.run`.
150 """
151 mock(**kwargs)
153 runner = click.testing.CliRunner()
154 # --butler-config is the only required option
155 result = runner.invoke(fake_run, "--butler-config /")
156 if result.exit_code != 0:
157 raise RuntimeError(f"Failure getting default args from 'fake_run': {result}")
158 mock.assert_called_once()
159 args = mock.call_args[1]
160 args["enableLsstDebug"] = args.pop("debug")
161 args["execution_butler_location"] = args.pop("save_execution_butler")
162 if "pipeline_actions" not in args:
163 args["pipeline_actions"] = []
164 args = SimpleNamespace(**args)
166 # override butler_config with our defaults
167 if "butler_config" not in kwargs:
168 args.butler_config = Config()
169 if registryConfig:
170 args.butler_config["registry"] = registryConfig
171 # The default datastore has a relocatable root, so we need to specify
172 # some root here for it to use
173 args.butler_config.configFile = "."
175 # override arguments from keyword parameters
176 for key, value in kwargs.items():
177 setattr(args, key, value)
178 return args
181class FakeDSType(NamedTuple):
182 name: str
185@dataclass(frozen=True)
186class FakeDSRef:
187 datasetType: str
188 dataId: tuple
191# Task class name used by tests, needs to be importable
192_TASK_CLASS = "lsst.pipe.base.tests.simpleQGraph.AddTask"
195def _makeQGraph():
196 """Make a trivial QuantumGraph with one quantum.
198 The only thing that we need to do with this quantum graph is to pickle
199 it, the quanta in this graph are not usable for anything else.
201 Returns
202 -------
203 qgraph : `~lsst.pipe.base.QuantumGraph`
204 """
205 taskDef = TaskDef(taskName=_TASK_CLASS, config=SimpleConfig())
206 quanta = [Quantum(taskName=_TASK_CLASS,
207 inputs={FakeDSType("A"): [FakeDSRef("A", (1, 2))]})] # type: ignore
208 qgraph = QuantumGraph({taskDef: set(quanta)})
209 return qgraph
212class CmdLineFwkTestCase(unittest.TestCase):
213 """A test case for CmdLineFwk
214 """
216 def testMakePipeline(self):
217 """Tests for CmdLineFwk.makePipeline method
218 """
219 fwk = CmdLineFwk()
221 # make empty pipeline
222 args = _makeArgs()
223 pipeline = fwk.makePipeline(args)
224 self.assertIsInstance(pipeline, Pipeline)
225 self.assertEqual(len(pipeline), 0)
227 # few tests with serialization
228 with makeTmpFile() as tmpname:
229 # make empty pipeline and store it in a file
230 args = _makeArgs(save_pipeline=tmpname)
231 pipeline = fwk.makePipeline(args)
232 self.assertIsInstance(pipeline, Pipeline)
234 # read pipeline from a file
235 args = _makeArgs(pipeline=tmpname)
236 pipeline = fwk.makePipeline(args)
237 self.assertIsInstance(pipeline, Pipeline)
238 self.assertEqual(len(pipeline), 0)
240 # single task pipeline, task name can be anything here
241 actions = [
242 _ACTION_ADD_TASK("TaskOne:task1")
243 ]
244 args = _makeArgs(pipeline_actions=actions)
245 pipeline = fwk.makePipeline(args)
246 self.assertIsInstance(pipeline, Pipeline)
247 self.assertEqual(len(pipeline), 1)
249 # many task pipeline
250 actions = [
251 _ACTION_ADD_TASK("TaskOne:task1a"),
252 _ACTION_ADD_TASK("TaskTwo:task2"),
253 _ACTION_ADD_TASK("TaskOne:task1b")
254 ]
255 args = _makeArgs(pipeline_actions=actions)
256 pipeline = fwk.makePipeline(args)
257 self.assertIsInstance(pipeline, Pipeline)
258 self.assertEqual(len(pipeline), 3)
260 # single task pipeline with config overrides, need real task class
261 actions = [
262 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
263 _ACTION_CONFIG("task:addend=100")
264 ]
265 args = _makeArgs(pipeline_actions=actions)
266 pipeline = fwk.makePipeline(args)
267 taskDefs = list(pipeline.toExpandedPipeline())
268 self.assertEqual(len(taskDefs), 1)
269 self.assertEqual(taskDefs[0].config.addend, 100)
271 overrides = b"config.addend = 1000\n"
272 with makeTmpFile(overrides) as tmpname:
273 actions = [
274 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
275 _ACTION_CONFIG_FILE("task:" + tmpname)
276 ]
277 args = _makeArgs(pipeline_actions=actions)
278 pipeline = fwk.makePipeline(args)
279 taskDefs = list(pipeline.toExpandedPipeline())
280 self.assertEqual(len(taskDefs), 1)
281 self.assertEqual(taskDefs[0].config.addend, 1000)
283 # Check --instrument option, for now it only checks that it does not
284 # crash.
285 actions = [
286 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
287 _ACTION_ADD_INSTRUMENT("Instrument")
288 ]
289 args = _makeArgs(pipeline_actions=actions)
290 pipeline = fwk.makePipeline(args)
292 def testMakeGraphFromSave(self):
293 """Tests for CmdLineFwk.makeGraph method.
295 Only most trivial case is tested that does not do actual graph
296 building.
297 """
298 fwk = CmdLineFwk()
300 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig:
302 # make non-empty graph and store it in a file
303 qgraph = _makeQGraph()
304 with open(tmpname, "wb") as saveFile:
305 qgraph.save(saveFile)
306 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
307 qgraph = fwk.makeGraph(None, args)
308 self.assertIsInstance(qgraph, QuantumGraph)
309 self.assertEqual(len(qgraph), 1)
311 # will fail if graph id does not match
312 args = _makeArgs(
313 qgraph=tmpname,
314 qgraph_id="R2-D2 is that you?",
315 registryConfig=registryConfig,
316 execution_butler_location=None
317 )
318 with self.assertRaisesRegex(ValueError, "graphID does not match"):
319 fwk.makeGraph(None, args)
321 # save with wrong object type
322 with open(tmpname, "wb") as saveFile:
323 pickle.dump({}, saveFile)
324 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
325 with self.assertRaises(ValueError):
326 fwk.makeGraph(None, args)
328 # reading empty graph from pickle should work but makeGraph()
329 # will return None and make a warning
330 qgraph = QuantumGraph(dict())
331 with open(tmpname, "wb") as saveFile:
332 qgraph.save(saveFile)
333 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
334 with self.assertWarnsRegex(UserWarning, "QuantumGraph is empty"):
335 # this also tests that warning is generated for empty graph
336 qgraph = fwk.makeGraph(None, args)
337 self.assertIs(qgraph, None)
339 def testShowPipeline(self):
340 """Test for --show options for pipeline.
341 """
342 fwk = CmdLineFwk()
344 actions = [
345 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
346 _ACTION_CONFIG("task:addend=100")
347 ]
348 args = _makeArgs(pipeline_actions=actions)
349 pipeline = fwk.makePipeline(args)
351 args.show = ["pipeline"]
352 fwk.showInfo(args, pipeline)
353 args.show = ["config"]
354 fwk.showInfo(args, pipeline)
355 args.show = ["history=task::addend"]
356 fwk.showInfo(args, pipeline)
357 args.show = ["tasks"]
358 fwk.showInfo(args, pipeline)
361class CmdLineFwkTestCaseWithButler(unittest.TestCase):
362 """A test case for CmdLineFwk
363 """
365 def setUp(self):
366 super().setUpClass()
367 self.root = tempfile.mkdtemp()
368 self.nQuanta = 5
369 self.pipeline = makeSimplePipeline(nQuanta=self.nQuanta)
371 def tearDown(self):
372 shutil.rmtree(self.root, ignore_errors=True)
373 super().tearDownClass()
375 def testSimpleQGraph(self):
376 """Test successfull execution of trivial quantum graph.
377 """
378 args = _makeArgs(butler_config=self.root, input="test", output="output")
379 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
380 populateButler(self.pipeline, butler)
382 fwk = CmdLineFwk()
383 taskFactory = AddTaskFactoryMock()
385 qgraph = fwk.makeGraph(self.pipeline, args)
386 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
387 self.assertEqual(len(qgraph), self.nQuanta)
389 # run whole thing
390 fwk.runPipeline(qgraph, taskFactory, args)
391 self.assertEqual(taskFactory.countExec, self.nQuanta)
393 def testSimpleQGraphNoSkipExisting_inputs(self):
394 """Test for case when output data for one task already appears in
395 _input_ collection, but no ``--extend-run`` or ``-skip-existing``
396 option is present.
397 """
398 args = _makeArgs(
399 butler_config=self.root,
400 input="test",
401 output="output",
402 )
403 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
404 populateButler(
405 self.pipeline, butler,
406 datasetTypes={args.input: [
407 "add_dataset0",
408 "add_dataset1", "add2_dataset1",
409 "add_init_output1",
410 "task0_config",
411 "task0_metadata",
412 "task0_log",
413 ]}
414 )
416 fwk = CmdLineFwk()
417 taskFactory = AddTaskFactoryMock()
419 qgraph = fwk.makeGraph(self.pipeline, args)
420 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
421 # With current implementation graph has all nQuanta quanta, but when
422 # executing one quantum is skipped.
423 self.assertEqual(len(qgraph), self.nQuanta)
425 # run whole thing
426 fwk.runPipeline(qgraph, taskFactory, args)
427 self.assertEqual(taskFactory.countExec, self.nQuanta)
429 def testSimpleQGraphSkipExisting_inputs(self):
430 """Test for ``--skip-existing`` with output data for one task already
431 appears in _input_ collection. No ``--extend-run`` option is needed
432 for this case.
433 """
434 args = _makeArgs(
435 butler_config=self.root,
436 input="test",
437 output="output",
438 skip_existing_in=("test", ),
439 )
440 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
441 populateButler(
442 self.pipeline, butler,
443 datasetTypes={args.input: [
444 "add_dataset0",
445 "add_dataset1", "add2_dataset1",
446 "add_init_output1",
447 "task0_config",
448 "task0_metadata",
449 "task0_log",
450 ]}
451 )
453 fwk = CmdLineFwk()
454 taskFactory = AddTaskFactoryMock()
456 qgraph = fwk.makeGraph(self.pipeline, args)
457 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
458 self.assertEqual(len(qgraph), self.nQuanta - 1)
460 # run whole thing
461 fwk.runPipeline(qgraph, taskFactory, args)
462 self.assertEqual(taskFactory.countExec, self.nQuanta - 1)
464 def testSimpleQGraphSkipExisting_outputs(self):
465 """Test for ``--skip-existing`` with output data for one task already
466 appears in _output_ collection. The ``--extend-run`` option is needed
467 for this case.
468 """
469 args = _makeArgs(
470 butler_config=self.root,
471 input="test",
472 output_run="output/run",
473 skip_existing_in=("output/run", ),
474 )
475 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
476 populateButler(
477 self.pipeline, butler, datasetTypes={
478 args.input: ["add_dataset0"],
479 args.output_run: [
480 "add_dataset1", "add2_dataset1",
481 "add_init_output1",
482 "task0_metadata",
483 "task0_log",
484 ]
485 }
486 )
488 fwk = CmdLineFwk()
489 taskFactory = AddTaskFactoryMock()
491 # fails without --extend-run
492 with self.assertRaisesRegex(ValueError, "--extend-run was not given"):
493 qgraph = fwk.makeGraph(self.pipeline, args)
495 # retry with --extend-run
496 args.extend_run = True
497 qgraph = fwk.makeGraph(self.pipeline, args)
499 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
500 # Graph does not include quantum for first task
501 self.assertEqual(len(qgraph), self.nQuanta - 1)
503 # run whole thing
504 fwk.runPipeline(qgraph, taskFactory, args)
505 self.assertEqual(taskFactory.countExec, self.nQuanta - 1)
507 def testSimpleQGraphOutputsFail(self):
508 """Test continuing execution of trivial quantum graph with partial
509 outputs.
510 """
511 args = _makeArgs(butler_config=self.root, input="test", output="output")
512 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
513 populateButler(self.pipeline, butler)
515 fwk = CmdLineFwk()
516 taskFactory = AddTaskFactoryMock(stopAt=3)
518 qgraph = fwk.makeGraph(self.pipeline, args)
519 self.assertEqual(len(qgraph), self.nQuanta)
521 # run first three quanta
522 with self.assertRaises(MPGraphExecutorError):
523 fwk.runPipeline(qgraph, taskFactory, args)
524 self.assertEqual(taskFactory.countExec, 3)
526 butler.registry.refresh()
528 # drop one of the two outputs from one task
529 ref1 = butler.registry.findDataset("add2_dataset2", collections=args.output,
530 instrument="INSTR", detector=0)
531 self.assertIsNotNone(ref1)
532 # also drop the metadata output
533 ref2 = butler.registry.findDataset("task1_metadata", collections=args.output,
534 instrument="INSTR", detector=0)
535 self.assertIsNotNone(ref2)
536 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True)
538 taskFactory.stopAt = -1
539 args.skip_existing_in = (args.output, )
540 args.extend_run = True
541 args.no_versions = True
542 with self.assertRaises(MPGraphExecutorError):
543 fwk.runPipeline(qgraph, taskFactory, args)
545 def testSimpleQGraphClobberOutputs(self):
546 """Test continuing execution of trivial quantum graph with
547 --clobber-outputs.
548 """
549 args = _makeArgs(butler_config=self.root, input="test", output="output")
550 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
551 populateButler(self.pipeline, butler)
553 fwk = CmdLineFwk()
554 taskFactory = AddTaskFactoryMock(stopAt=3)
556 qgraph = fwk.makeGraph(self.pipeline, args)
558 # should have one task and number of quanta
559 self.assertEqual(len(qgraph), self.nQuanta)
561 # run first three quanta
562 with self.assertRaises(MPGraphExecutorError):
563 fwk.runPipeline(qgraph, taskFactory, args)
564 self.assertEqual(taskFactory.countExec, 3)
566 butler.registry.refresh()
568 # drop one of the two outputs from one task
569 ref1 = butler.registry.findDataset("add2_dataset2", collections=args.output,
570 dataId=dict(instrument="INSTR", detector=0))
571 self.assertIsNotNone(ref1)
572 # also drop the metadata output
573 ref2 = butler.registry.findDataset("task1_metadata", collections=args.output,
574 dataId=dict(instrument="INSTR", detector=0))
575 self.assertIsNotNone(ref2)
576 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True)
578 taskFactory.stopAt = -1
579 args.skip_existing = True
580 args.extend_run = True
581 args.clobber_outputs = True
582 args.no_versions = True
583 fwk.runPipeline(qgraph, taskFactory, args)
584 # number of executed quanta is incremented
585 self.assertEqual(taskFactory.countExec, self.nQuanta + 1)
587 def testSimpleQGraphReplaceRun(self):
588 """Test repeated execution of trivial quantum graph with
589 --replace-run.
590 """
591 args = _makeArgs(
592 butler_config=self.root,
593 input="test",
594 output="output",
595 output_run="output/run1")
596 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
597 populateButler(self.pipeline, butler)
599 fwk = CmdLineFwk()
600 taskFactory = AddTaskFactoryMock()
602 qgraph = fwk.makeGraph(self.pipeline, args)
604 # should have one task and number of quanta
605 self.assertEqual(len(qgraph), self.nQuanta)
607 # deep copy is needed because quanta are updated in place
608 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
609 self.assertEqual(taskFactory.countExec, self.nQuanta)
611 # need to refresh collections explicitly (or make new butler/registry)
612 butler.registry.refresh()
613 collections = set(butler.registry.queryCollections(...))
614 self.assertEqual(collections, {"test", "output", "output/run1"})
616 # number of datasets written by pipeline:
617 # - nQuanta of init_outputs
618 # - nQuanta of configs
619 # - packages (single dataset)
620 # - nQuanta * two output datasets
621 # - nQuanta of metadata
622 # - nQuanta of log output
623 n_outputs = self.nQuanta * 6 + 1
624 refs = butler.registry.queryDatasets(..., collections="output/run1")
625 self.assertEqual(len(list(refs)), n_outputs)
627 # re-run with --replace-run (--inputs is ignored, as long as it hasn't
628 # changed)
629 args.replace_run = True
630 args.output_run = "output/run2"
631 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
633 butler.registry.refresh()
634 collections = set(butler.registry.queryCollections(...))
635 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2"})
637 # new output collection
638 refs = butler.registry.queryDatasets(..., collections="output/run2")
639 self.assertEqual(len(list(refs)), n_outputs)
641 # old output collection is still there
642 refs = butler.registry.queryDatasets(..., collections="output/run1")
643 self.assertEqual(len(list(refs)), n_outputs)
645 # re-run with --replace-run and --prune-replaced=unstore
646 args.replace_run = True
647 args.prune_replaced = "unstore"
648 args.output_run = "output/run3"
649 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
651 butler.registry.refresh()
652 collections = set(butler.registry.queryCollections(...))
653 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run3"})
655 # new output collection
656 refs = butler.registry.queryDatasets(..., collections="output/run3")
657 self.assertEqual(len(list(refs)), n_outputs)
659 # old output collection is still there, and it has all datasets but
660 # non-InitOutputs are not in datastore
661 refs = butler.registry.queryDatasets(..., collections="output/run2")
662 refs = list(refs)
663 self.assertEqual(len(refs), n_outputs)
664 initOutNameRe = re.compile("packages|task.*_config|add_init_output.*")
665 for ref in refs:
666 if initOutNameRe.fullmatch(ref.datasetType.name):
667 butler.get(ref, collections="output/run2")
668 else:
669 with self.assertRaises(FileNotFoundError):
670 butler.get(ref, collections="output/run2")
672 # re-run with --replace-run and --prune-replaced=purge
673 # This time also remove --input; passing the same inputs that we
674 # started with and not passing inputs at all should be equivalent.
675 args.input = None
676 args.replace_run = True
677 args.prune_replaced = "purge"
678 args.output_run = "output/run4"
679 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
681 butler.registry.refresh()
682 collections = set(butler.registry.queryCollections(...))
683 # output/run3 should disappear now
684 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
686 # new output collection
687 refs = butler.registry.queryDatasets(..., collections="output/run4")
688 self.assertEqual(len(list(refs)), n_outputs)
690 # Trying to run again with inputs that aren't exactly what we started
691 # with is an error, and the kind that should not modify the data repo.
692 with self.assertRaises(ValueError):
693 args.input = ["test", "output/run2"]
694 args.prune_replaced = None
695 args.replace_run = True
696 args.output_run = "output/run5"
697 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
698 butler.registry.refresh()
699 collections = set(butler.registry.queryCollections(...))
700 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
701 with self.assertRaises(ValueError):
702 args.input = ["output/run2", "test"]
703 args.prune_replaced = None
704 args.replace_run = True
705 args.output_run = "output/run6"
706 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
707 butler.registry.refresh()
708 collections = set(butler.registry.queryCollections(...))
709 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
711 def testSubgraph(self):
712 """Test successfull execution of trivial quantum graph.
713 """
714 args = _makeArgs(butler_config=self.root, input="test", output="output")
715 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
716 populateButler(self.pipeline, butler)
718 fwk = CmdLineFwk()
719 qgraph = fwk.makeGraph(self.pipeline, args)
721 # Select first two nodes for execution. This depends on node ordering
722 # which I assume is the same as execution order.
723 nNodes = 2
724 nodeIds = [node.nodeId.number for node in qgraph]
725 nodeIds = nodeIds[:nNodes]
727 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
728 self.assertEqual(len(qgraph), self.nQuanta)
730 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig:
731 with open(tmpname, "wb") as saveFile:
732 qgraph.save(saveFile)
734 args = _makeArgs(qgraph=tmpname, qgraph_node_id=nodeIds, registryConfig=registryConfig,
735 execution_butler_location=None)
736 fwk = CmdLineFwk()
738 # load graph, should only read a subset
739 qgraph = fwk.makeGraph(pipeline=None, args=args)
740 self.assertEqual(len(qgraph), nNodes)
742 def testShowGraph(self):
743 """Test for --show options for quantum graph.
744 """
745 fwk = CmdLineFwk()
747 nQuanta = 2
748 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
750 args = _makeArgs(show=["graph"])
751 fwk.showInfo(args, pipeline=None, graph=qgraph)
753 def testShowGraphWorkflow(self):
754 fwk = CmdLineFwk()
756 nQuanta = 2
757 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
759 args = _makeArgs(show=["workflow"])
760 fwk.showInfo(args, pipeline=None, graph=qgraph)
762 # TODO: cannot test "uri" option presently, it instanciates
763 # butler from command line options and there is no way to pass butler
764 # mock to that code.
767class MyMemoryTestCase(lsst.utils.tests.MemoryTestCase):
768 pass
771def setup_module(module):
772 lsst.utils.tests.init()
775if __name__ == "__main__": 775 ↛ 776line 775 didn't jump to line 776, because the condition on line 775 was never true
776 lsst.utils.tests.init()
777 unittest.main()