Coverage for tests/test_cmdLineFwk.py : 19%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22"""Simple unit test for cmdLineFwk module.
23"""
25import click
26from types import SimpleNamespace
27import contextlib
28import copy
29from dataclasses import dataclass
30import logging
31import os
32import pickle
33import shutil
34import tempfile
35from typing import NamedTuple
36import unittest
38from lsst.ctrl.mpexec.cmdLineFwk import CmdLineFwk
39from lsst.ctrl.mpexec.cli.opt import run_options
40from lsst.ctrl.mpexec.cli.utils import (
41 _ACTION_ADD_TASK,
42 _ACTION_CONFIG,
43 _ACTION_CONFIG_FILE,
44 _ACTION_ADD_INSTRUMENT,
45 PipetaskCommand,
46)
47from lsst.daf.butler import Config, Quantum, Registry
48from lsst.daf.butler.registry import RegistryConfig
49from lsst.obs.base import Instrument
50import lsst.pex.config as pexConfig
51from lsst.pipe.base import (Pipeline, PipelineTask, PipelineTaskConfig,
52 QuantumGraph, TaskDef, TaskFactory,
53 PipelineTaskConnections)
54import lsst.pipe.base.connectionTypes as cT
55import lsst.utils.tests
56from lsst.pipe.base.tests.simpleQGraph import (AddTaskFactoryMock, makeSimpleQGraph)
57from lsst.utils.tests import temporaryDirectory
60logging.basicConfig(level=logging.INFO)
62# Have to monkey-patch Instrument.fromName() to not retrieve non-existing
63# instrument from registry, these tests can run fine without actual instrument
64# and implementing full mock for Instrument is too complicated.
65Instrument.fromName = lambda name, reg: None 65 ↛ exitline 65 didn't run the lambda on line 65
68@contextlib.contextmanager
69def makeTmpFile(contents=None, suffix=None):
70 """Context manager for generating temporary file name.
72 Temporary file is deleted on exiting context.
74 Parameters
75 ----------
76 contents : `bytes`
77 Data to write into a file.
78 """
79 fd, tmpname = tempfile.mkstemp(suffix=suffix)
80 if contents:
81 os.write(fd, contents)
82 os.close(fd)
83 yield tmpname
84 with contextlib.suppress(OSError):
85 os.remove(tmpname)
88@contextlib.contextmanager
89def makeSQLiteRegistry(create=True):
90 """Context manager to create new empty registry database.
92 Yields
93 ------
94 config : `RegistryConfig`
95 Registry configuration for initialized registry database.
96 """
97 with temporaryDirectory() as tmpdir:
98 uri = f"sqlite:///{tmpdir}/gen3.sqlite"
99 config = RegistryConfig()
100 config["db"] = uri
101 if create:
102 Registry.createFromConfig(config)
103 yield config
106class SimpleConnections(PipelineTaskConnections, dimensions=(),
107 defaultTemplates={"template": "simple"}):
108 schema = cT.InitInput(doc="Schema",
109 name="{template}schema",
110 storageClass="SourceCatalog")
113class SimpleConfig(PipelineTaskConfig, pipelineConnections=SimpleConnections):
114 field = pexConfig.Field(dtype=str, doc="arbitrary string")
116 def setDefaults(self):
117 PipelineTaskConfig.setDefaults(self)
120class TaskOne(PipelineTask):
121 ConfigClass = SimpleConfig
122 _DefaultName = "taskOne"
125class TaskTwo(PipelineTask):
126 ConfigClass = SimpleConfig
127 _DefaultName = "taskTwo"
130class TaskFactoryMock(TaskFactory):
131 def loadTaskClass(self, taskName):
132 if taskName == "TaskOne":
133 return TaskOne, "TaskOne"
134 elif taskName == "TaskTwo":
135 return TaskTwo, "TaskTwo"
137 def makeTask(self, taskClass, name, config, overrides, butler):
138 if config is None:
139 config = taskClass.ConfigClass()
140 if overrides:
141 overrides.applyTo(config)
142 return taskClass(config=config, butler=butler, name=name)
145def _makeArgs(registryConfig=None, **kwargs):
146 """Return parsed command line arguments.
148 By default butler_config is set to `Config` populated with some defaults,
149 it can be overridden completely by keyword argument.
151 Parameters
152 ----------
153 cmd : `str`, optional
154 Produce arguments for this pipetask command.
155 registryConfig : `RegistryConfig`, optional
156 Override for registry configuration.
157 **kwargs
158 Overrides for other arguments.
159 """
160 # Use a mock to get the default value of arguments to 'run'.
162 mock = unittest.mock.Mock()
164 @click.command(cls=PipetaskCommand)
165 @run_options()
166 def fake_run(ctx, **kwargs):
167 """Fake "pipetask run" command for gathering input arguments.
169 The arguments & options should always match the arguments & options in
170 the "real" command function `lsst.ctrl.mpexec.cli.cmd.run`.
171 """
172 mock(**kwargs)
174 runner = click.testing.CliRunner()
175 result = runner.invoke(fake_run)
176 if result.exit_code != 0:
177 raise RuntimeError(f"Failure getting default args from 'fake_run': {result}")
178 mock.assert_called_once()
179 args = mock.call_args[1]
180 args["enableLsstDebug"] = args.pop("debug")
181 if "pipeline_actions" not in args:
182 args["pipeline_actions"] = []
183 args = SimpleNamespace(**args)
185 # override butler_config with our defaults
186 args.butler_config = Config()
187 if registryConfig:
188 args.butler_config["registry"] = registryConfig
189 # The default datastore has a relocatable root, so we need to specify
190 # some root here for it to use
191 args.butler_config.configFile = "."
192 # override arguments from keyword parameters
193 for key, value in kwargs.items():
194 setattr(args, key, value)
195 return args
198class FakeTaskDef(NamedTuple):
199 name: str
202@dataclass(frozen=True)
203class FakeDSRef:
204 datasetType: str
205 dataId: tuple
208def _makeQGraph():
209 """Make a trivial QuantumGraph with one quantum.
211 The only thing that we need to do with this quantum graph is to pickle
212 it, the quanta in this graph are not usable for anything else.
214 Returns
215 -------
216 qgraph : `~lsst.pipe.base.QuantumGraph`
217 """
219 # The task name in TaskDef needs to be a real importable name, use one
220 # that is sure to exist.
221 taskDef = TaskDef(taskName="lsst.pipe.base.Struct", config=SimpleConfig())
222 quanta = [Quantum(taskName="lsst.pipe.base.Struct",
223 inputs={FakeTaskDef("A"): FakeDSRef("A", (1, 2))})] # type: ignore
224 qgraph = QuantumGraph({taskDef: set(quanta)})
225 return qgraph
228class CmdLineFwkTestCase(unittest.TestCase):
229 """A test case for CmdLineFwk
230 """
232 def testMakePipeline(self):
233 """Tests for CmdLineFwk.makePipeline method
234 """
235 fwk = CmdLineFwk()
237 # make empty pipeline
238 args = _makeArgs()
239 pipeline = fwk.makePipeline(args)
240 self.assertIsInstance(pipeline, Pipeline)
241 self.assertEqual(len(pipeline), 0)
243 # few tests with serialization
244 with makeTmpFile() as tmpname:
245 # make empty pipeline and store it in a file
246 args = _makeArgs(save_pipeline=tmpname)
247 pipeline = fwk.makePipeline(args)
248 self.assertIsInstance(pipeline, Pipeline)
250 # read pipeline from a file
251 args = _makeArgs(pipeline=tmpname)
252 pipeline = fwk.makePipeline(args)
253 self.assertIsInstance(pipeline, Pipeline)
254 self.assertEqual(len(pipeline), 0)
256 # single task pipeline
257 actions = [
258 _ACTION_ADD_TASK("TaskOne:task1")
259 ]
260 args = _makeArgs(pipeline_actions=actions)
261 pipeline = fwk.makePipeline(args)
262 self.assertIsInstance(pipeline, Pipeline)
263 self.assertEqual(len(pipeline), 1)
265 # many task pipeline
266 actions = [
267 _ACTION_ADD_TASK("TaskOne:task1a"),
268 _ACTION_ADD_TASK("TaskTwo:task2"),
269 _ACTION_ADD_TASK("TaskOne:task1b")
270 ]
271 args = _makeArgs(pipeline_actions=actions)
272 pipeline = fwk.makePipeline(args)
273 self.assertIsInstance(pipeline, Pipeline)
274 self.assertEqual(len(pipeline), 3)
276 # single task pipeline with config overrides, cannot use TaskOne, need
277 # something that can be imported with `doImport()`
278 actions = [
279 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"),
280 _ACTION_CONFIG("task:addend=100")
281 ]
282 args = _makeArgs(pipeline_actions=actions)
283 pipeline = fwk.makePipeline(args)
284 taskDefs = list(pipeline.toExpandedPipeline())
285 self.assertEqual(len(taskDefs), 1)
286 self.assertEqual(taskDefs[0].config.addend, 100)
288 overrides = b"config.addend = 1000\n"
289 with makeTmpFile(overrides) as tmpname:
290 actions = [
291 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"),
292 _ACTION_CONFIG_FILE("task:" + tmpname)
293 ]
294 args = _makeArgs(pipeline_actions=actions)
295 pipeline = fwk.makePipeline(args)
296 taskDefs = list(pipeline.toExpandedPipeline())
297 self.assertEqual(len(taskDefs), 1)
298 self.assertEqual(taskDefs[0].config.addend, 1000)
300 # Check --instrument option, for now it only checks that it does not
301 # crash.
302 actions = [
303 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"),
304 _ACTION_ADD_INSTRUMENT("Instrument")
305 ]
306 args = _makeArgs(pipeline_actions=actions)
307 pipeline = fwk.makePipeline(args)
309 def testMakeGraphFromSave(self):
310 """Tests for CmdLineFwk.makeGraph method.
312 Only most trivial case is tested that does not do actual graph
313 building.
314 """
315 fwk = CmdLineFwk()
317 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig:
319 # make non-empty graph and store it in a file
320 qgraph = _makeQGraph()
321 with open(tmpname, "wb") as saveFile:
322 qgraph.save(saveFile)
323 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
324 qgraph = fwk.makeGraph(None, args)
325 self.assertIsInstance(qgraph, QuantumGraph)
326 self.assertEqual(len(qgraph), 1)
328 # will fail if graph id does not match
329 args = _makeArgs(
330 qgraph=tmpname,
331 qgraph_id="R2-D2 is that you?",
332 registryConfig=registryConfig,
333 execution_butler_location=None
334 )
335 with self.assertRaisesRegex(ValueError, "graphID does not match"):
336 fwk.makeGraph(None, args)
338 # save with wrong object type
339 with open(tmpname, "wb") as saveFile:
340 pickle.dump({}, saveFile)
341 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
342 with self.assertRaises(ValueError):
343 fwk.makeGraph(None, args)
345 # reading empty graph from pickle should work but makeGraph()
346 # will return None and make a warning
347 qgraph = QuantumGraph(dict())
348 with open(tmpname, "wb") as saveFile:
349 qgraph.save(saveFile)
350 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
351 with self.assertWarnsRegex(UserWarning, "QuantumGraph is empty"):
352 # this also tests that warning is generated for empty graph
353 qgraph = fwk.makeGraph(None, args)
354 self.assertIs(qgraph, None)
356 def testShowPipeline(self):
357 """Test for --show options for pipeline.
358 """
359 fwk = CmdLineFwk()
361 actions = [
362 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"),
363 _ACTION_CONFIG("task:addend=100")
364 ]
365 args = _makeArgs(pipeline_actions=actions)
366 pipeline = fwk.makePipeline(args)
368 args.show = ["pipeline"]
369 fwk.showInfo(args, pipeline)
370 args.show = ["config"]
371 fwk.showInfo(args, pipeline)
372 args.show = ["history=task::addend"]
373 fwk.showInfo(args, pipeline)
374 args.show = ["tasks"]
375 fwk.showInfo(args, pipeline)
378class CmdLineFwkTestCaseWithButler(unittest.TestCase):
379 """A test case for CmdLineFwk
380 """
382 def setUp(self):
383 super().setUpClass()
384 self.root = tempfile.mkdtemp()
386 def tearDown(self):
387 shutil.rmtree(self.root, ignore_errors=True)
388 super().tearDownClass()
390 def testSimpleQGraph(self):
391 """Test successfull execution of trivial quantum graph.
392 """
394 nQuanta = 5
395 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
397 self.assertEqual(len(qgraph.taskGraph), 5)
398 self.assertEqual(len(qgraph), nQuanta)
400 args = _makeArgs()
401 fwk = CmdLineFwk()
402 taskFactory = AddTaskFactoryMock()
404 # run whole thing
405 fwk.runPipeline(qgraph, taskFactory, args, butler=butler)
406 self.assertEqual(taskFactory.countExec, nQuanta)
408 def testSimpleQGraphSkipExisting(self):
409 """Test continuing execution of trivial quantum graph with
410 ``--skip-existing``.
411 """
413 nQuanta = 5
414 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root, inMemory=False)
416 self.assertEqual(len(qgraph.taskGraph), 5)
417 self.assertEqual(len(qgraph), nQuanta)
419 args = _makeArgs()
420 fwk = CmdLineFwk()
421 taskFactory = AddTaskFactoryMock(stopAt=3)
423 # run first three quanta
424 with self.assertRaises(RuntimeError):
425 fwk.runPipeline(qgraph, taskFactory, args, butler=butler)
426 self.assertEqual(taskFactory.countExec, 3)
428 # Failed task still makes _log dataset, have to delete this before
429 # retry if not using clobber-outputs.
430 ref = butler.registry.findDataset("task3_log", instrument="INSTR", detector=0)
431 self.assertIsNotNone(ref)
432 butler.pruneDatasets([ref], disassociate=True, unstore=True, purge=True)
434 # run remaining ones
435 taskFactory.stopAt = -1
436 args.skip_existing = True
437 args.extend_run = True
438 args.no_versions = True
439 fwk.runPipeline(qgraph, taskFactory, args, butler=butler)
440 self.assertEqual(taskFactory.countExec, nQuanta)
442 def testSimpleQGraphOutputsFail(self):
443 """Test continuing execution of trivial quantum graph with partial
444 outputs.
445 """
447 nQuanta = 5
448 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root, inMemory=False)
450 # should have one task and number of quanta
451 self.assertEqual(len(qgraph), nQuanta)
453 args = _makeArgs()
454 fwk = CmdLineFwk()
455 taskFactory = AddTaskFactoryMock(stopAt=3)
457 # run first three quanta
458 with self.assertRaises(RuntimeError):
459 fwk.runPipeline(qgraph, taskFactory, args, butler=butler)
460 self.assertEqual(taskFactory.countExec, 3)
462 # drop one of the two outputs from one task
463 ref1 = butler.registry.findDataset("add2_dataset2", instrument="INSTR", detector=0)
464 self.assertIsNotNone(ref1)
465 # also drop the metadata output
466 ref2 = butler.registry.findDataset("task1_metadata", instrument="INSTR", detector=0)
467 self.assertIsNotNone(ref2)
468 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True)
470 taskFactory.stopAt = -1
471 args.skip_existing = True
472 args.extend_run = True
473 args.no_versions = True
474 excRe = "Registry inconsistency while checking for existing outputs.*"
475 with self.assertRaisesRegex(RuntimeError, excRe):
476 fwk.runPipeline(qgraph, taskFactory, args, butler=butler)
478 def testSimpleQGraphClobberOutputs(self):
479 """Test continuing execution of trivial quantum graph with
480 --clobber-outputs.
481 """
483 nQuanta = 5
484 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root, inMemory=False)
486 # should have one task and number of quanta
487 self.assertEqual(len(qgraph), nQuanta)
489 args = _makeArgs()
490 fwk = CmdLineFwk()
491 taskFactory = AddTaskFactoryMock(stopAt=3)
493 # run first three quanta
494 with self.assertRaises(RuntimeError):
495 fwk.runPipeline(qgraph, taskFactory, args, butler=butler)
496 self.assertEqual(taskFactory.countExec, 3)
498 # drop one of the two outputs from one task
499 ref1 = butler.registry.findDataset("add2_dataset2", instrument="INSTR", detector=0)
500 self.assertIsNotNone(ref1)
501 # also drop the metadata output
502 ref2 = butler.registry.findDataset("task1_metadata", instrument="INSTR", detector=0)
503 self.assertIsNotNone(ref2)
504 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True)
506 taskFactory.stopAt = -1
507 args.skip_existing = True
508 args.extend_run = True
509 args.clobber_outputs = True
510 args.no_versions = True
511 fwk.runPipeline(qgraph, taskFactory, args, butler=butler)
512 # number of executed quanta is incremented
513 self.assertEqual(taskFactory.countExec, nQuanta + 1)
515 def testSimpleQGraphReplaceRun(self):
516 """Test repeated execution of trivial quantum graph with
517 --replace-run.
518 """
520 # need non-memory registry in this case
521 nQuanta = 5
522 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root, inMemory=False)
524 # should have one task and number of quanta
525 self.assertEqual(len(qgraph), nQuanta)
527 fwk = CmdLineFwk()
528 taskFactory = AddTaskFactoryMock()
530 # run whole thing
531 args = _makeArgs(
532 butler_config=self.root,
533 input="test",
534 output="output",
535 output_run="output/run1")
536 # deep copy is needed because quanta are updated in place
537 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
538 self.assertEqual(taskFactory.countExec, nQuanta)
540 # need to refresh collections explicitly (or make new butler/registry)
541 butler.registry.refresh()
542 collections = set(butler.registry.queryCollections(...))
543 self.assertEqual(collections, {"test", "output", "output/run1"})
545 # number of datasets written by pipeline:
546 # - nQuanta of init_outputs
547 # - nQuanta of configs
548 # - packages (single dataset)
549 # - nQuanta * two output datasets
550 # - nQuanta of metadata
551 # - nQuanta of log output
552 n_outputs = nQuanta * 6 + 1
553 refs = butler.registry.queryDatasets(..., collections="output/run1")
554 self.assertEqual(len(list(refs)), n_outputs)
556 # re-run with --replace-run (--inputs is ignored, as long as it hasn't
557 # changed)
558 args.replace_run = True
559 args.output_run = "output/run2"
560 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
562 butler.registry.refresh()
563 collections = set(butler.registry.queryCollections(...))
564 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2"})
566 # new output collection
567 refs = butler.registry.queryDatasets(..., collections="output/run2")
568 self.assertEqual(len(list(refs)), n_outputs)
570 # old output collection is still there
571 refs = butler.registry.queryDatasets(..., collections="output/run1")
572 self.assertEqual(len(list(refs)), n_outputs)
574 # re-run with --replace-run and --prune-replaced=unstore
575 args.replace_run = True
576 args.prune_replaced = "unstore"
577 args.output_run = "output/run3"
578 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
580 butler.registry.refresh()
581 collections = set(butler.registry.queryCollections(...))
582 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run3"})
584 # new output collection
585 refs = butler.registry.queryDatasets(..., collections="output/run3")
586 self.assertEqual(len(list(refs)), n_outputs)
588 # old output collection is still there, and it has all datasets but
589 # they are not in datastore
590 refs = butler.registry.queryDatasets(..., collections="output/run2")
591 refs = list(refs)
592 self.assertEqual(len(refs), n_outputs)
593 with self.assertRaises(FileNotFoundError):
594 butler.get(refs[0], collections="output/run2")
596 # re-run with --replace-run and --prune-replaced=purge
597 # This time also remove --input; passing the same inputs that we
598 # started with and not passing inputs at all should be equivalent.
599 args.input = None
600 args.replace_run = True
601 args.prune_replaced = "purge"
602 args.output_run = "output/run4"
603 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
605 butler.registry.refresh()
606 collections = set(butler.registry.queryCollections(...))
607 # output/run3 should disappear now
608 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
610 # new output collection
611 refs = butler.registry.queryDatasets(..., collections="output/run4")
612 self.assertEqual(len(list(refs)), n_outputs)
614 # Trying to run again with inputs that aren't exactly what we started
615 # with is an error, and the kind that should not modify the data repo.
616 with self.assertRaises(ValueError):
617 args.input = ["test", "output/run2"]
618 args.prune_replaced = None
619 args.replace_run = True
620 args.output_run = "output/run5"
621 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
622 butler.registry.refresh()
623 collections = set(butler.registry.queryCollections(...))
624 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
625 with self.assertRaises(ValueError):
626 args.input = ["output/run2", "test"]
627 args.prune_replaced = None
628 args.replace_run = True
629 args.output_run = "output/run6"
630 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
631 butler.registry.refresh()
632 collections = set(butler.registry.queryCollections(...))
633 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
635 def testSubgraph(self):
636 """Test successfull execution of trivial quantum graph.
637 """
638 nQuanta = 5
639 nNodes = 2
640 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
642 # Select first two nodes for execution. This depends on node ordering
643 # which I assume is the same as execution order.
644 nodeIds = [node.nodeId.number for node in qgraph]
645 nodeIds = nodeIds[:nNodes]
647 self.assertEqual(len(qgraph.taskGraph), 5)
648 self.assertEqual(len(qgraph), nQuanta)
650 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig:
651 with open(tmpname, "wb") as saveFile:
652 qgraph.save(saveFile)
654 args = _makeArgs(qgraph=tmpname, qgraph_node_id=nodeIds, registryConfig=registryConfig,
655 execution_butler_location=None)
656 fwk = CmdLineFwk()
658 # load graph, should only read a subset
659 qgraph = fwk.makeGraph(pipeline=None, args=args)
660 self.assertEqual(len(qgraph), nNodes)
662 def testShowGraph(self):
663 """Test for --show options for quantum graph.
664 """
665 fwk = CmdLineFwk()
667 nQuanta = 2
668 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
670 args = _makeArgs(show=["graph"])
671 fwk.showInfo(args, pipeline=None, graph=qgraph)
673 def testShowGraphWorkflow(self):
674 fwk = CmdLineFwk()
676 nQuanta = 2
677 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
679 args = _makeArgs(show=["workflow"])
680 fwk.showInfo(args, pipeline=None, graph=qgraph)
682 # TODO: cannot test "uri" option presently, it instanciates
683 # butler from command line options and there is no way to pass butler
684 # mock to that code.
687class MyMemoryTestCase(lsst.utils.tests.MemoryTestCase):
688 pass
691def setup_module(module):
692 lsst.utils.tests.init()
695if __name__ == "__main__": 695 ↛ 696line 695 didn't jump to line 696, because the condition on line 695 was never true
696 lsst.utils.tests.init()
697 unittest.main()