Coverage for tests/test_cmdLineFwk.py: 20%
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of ctrl_mpexec.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22"""Simple unit test for cmdLineFwk module.
23"""
25import click
26from types import SimpleNamespace
27import contextlib
28import copy
29from dataclasses import dataclass
30import logging
31import os
32import pickle
33import re
34import shutil
35import tempfile
36from typing import NamedTuple
37import unittest
39from lsst.ctrl.mpexec.cmdLineFwk import CmdLineFwk
40from lsst.ctrl.mpexec.cli.opt import run_options
41from lsst.ctrl.mpexec.cli.utils import (
42 _ACTION_ADD_TASK,
43 _ACTION_CONFIG,
44 _ACTION_CONFIG_FILE,
45 _ACTION_ADD_INSTRUMENT,
46 PipetaskCommand,
47)
48from lsst.daf.butler import (Config, Quantum, Registry, DimensionUniverse, DatasetRef, DataCoordinate)
49from lsst.daf.butler.core.datasets.type import DatasetType
50from lsst.daf.butler.registry import RegistryConfig
51from lsst.obs.base import Instrument
52import lsst.pex.config as pexConfig
53from lsst.pipe.base import (Pipeline, PipelineTaskConfig, QuantumGraph, TaskDef, PipelineTaskConnections)
54from lsst.pipe.base.graphBuilder import DatasetQueryConstraintVariant as DQCVariant
55import lsst.pipe.base.connectionTypes as cT
56import lsst.utils.tests
57from lsst.pipe.base.tests.simpleQGraph import (
58 AddTaskFactoryMock,
59 makeSimpleButler,
60 makeSimplePipeline,
61 makeSimpleQGraph,
62 populateButler,
63 AddTask)
64from lsst.utils.tests import temporaryDirectory
67logging.basicConfig(level=getattr(logging, os.environ.get("UNIT_TEST_LOGGING_LEVEL", "INFO"), logging.INFO))
69# Have to monkey-patch Instrument.fromName() to not retrieve non-existing
70# instrument from registry, these tests can run fine without actual instrument
71# and implementing full mock for Instrument is too complicated.
72Instrument.fromName = lambda name, reg: None 72 ↛ exitline 72 didn't run the lambda on line 72
75@contextlib.contextmanager
76def makeTmpFile(contents=None, suffix=None):
77 """Context manager for generating temporary file name.
79 Temporary file is deleted on exiting context.
81 Parameters
82 ----------
83 contents : `bytes`
84 Data to write into a file.
85 """
86 fd, tmpname = tempfile.mkstemp(suffix=suffix)
87 if contents:
88 os.write(fd, contents)
89 os.close(fd)
90 yield tmpname
91 with contextlib.suppress(OSError):
92 os.remove(tmpname)
95@contextlib.contextmanager
96def makeSQLiteRegistry(create=True):
97 """Context manager to create new empty registry database.
99 Yields
100 ------
101 config : `RegistryConfig`
102 Registry configuration for initialized registry database.
103 """
104 with temporaryDirectory() as tmpdir:
105 uri = f"sqlite:///{tmpdir}/gen3.sqlite"
106 config = RegistryConfig()
107 config["db"] = uri
108 if create:
109 Registry.createFromConfig(config)
110 yield config
113class SimpleConnections(PipelineTaskConnections, dimensions=(),
114 defaultTemplates={"template": "simple"}):
115 schema = cT.InitInput(doc="Schema",
116 name="{template}schema",
117 storageClass="SourceCatalog")
120class SimpleConfig(PipelineTaskConfig, pipelineConnections=SimpleConnections):
121 field = pexConfig.Field(dtype=str, doc="arbitrary string")
123 def setDefaults(self):
124 PipelineTaskConfig.setDefaults(self)
127def _makeArgs(registryConfig=None, **kwargs):
128 """Return parsed command line arguments.
130 By default butler_config is set to `Config` populated with some defaults,
131 it can be overridden completely by keyword argument.
133 Parameters
134 ----------
135 cmd : `str`, optional
136 Produce arguments for this pipetask command.
137 registryConfig : `RegistryConfig`, optional
138 Override for registry configuration.
139 **kwargs
140 Overrides for other arguments.
141 """
142 # Use a mock to get the default value of arguments to 'run'.
144 mock = unittest.mock.Mock()
146 @click.command(cls=PipetaskCommand)
147 @run_options()
148 def fake_run(ctx, **kwargs):
149 """Fake "pipetask run" command for gathering input arguments.
151 The arguments & options should always match the arguments & options in
152 the "real" command function `lsst.ctrl.mpexec.cli.cmd.run`.
153 """
154 mock(**kwargs)
156 runner = click.testing.CliRunner()
157 # --butler-config is the only required option
158 result = runner.invoke(fake_run, "--butler-config /")
159 if result.exit_code != 0:
160 raise RuntimeError(f"Failure getting default args from 'fake_run': {result}")
161 mock.assert_called_once()
162 args = mock.call_args[1]
163 args["enableLsstDebug"] = args.pop("debug")
164 args["execution_butler_location"] = args.pop("save_execution_butler")
165 if "pipeline_actions" not in args:
166 args["pipeline_actions"] = []
167 args = SimpleNamespace(**args)
169 # override butler_config with our defaults
170 if "butler_config" not in kwargs:
171 args.butler_config = Config()
172 if registryConfig:
173 args.butler_config["registry"] = registryConfig
174 # The default datastore has a relocatable root, so we need to specify
175 # some root here for it to use
176 args.butler_config.configFile = "."
178 # override arguments from keyword parameters
179 for key, value in kwargs.items():
180 setattr(args, key, value)
181 args.dataset_query_constraint = DQCVariant.fromExpression(args.dataset_query_constraint)
182 return args
185class FakeDSType(NamedTuple):
186 name: str
189@dataclass(frozen=True)
190class FakeDSRef:
191 datasetType: str
192 dataId: tuple
194 def isComponent(self):
195 return False
198# Task class name used by tests, needs to be importable
199_TASK_CLASS = "lsst.pipe.base.tests.simpleQGraph.AddTask"
202def _makeQGraph():
203 """Make a trivial QuantumGraph with one quantum.
205 The only thing that we need to do with this quantum graph is to pickle
206 it, the quanta in this graph are not usable for anything else.
208 Returns
209 -------
210 qgraph : `~lsst.pipe.base.QuantumGraph`
211 """
212 config = Config({
213 "version": 1,
214 "skypix": {
215 "common": "htm7",
216 "htm": {
217 "class": "lsst.sphgeom.HtmPixelization",
218 "max_level": 24,
219 }
220 },
221 "elements": {
222 "A": {
223 "keys": [{
224 "name": "id",
225 "type": "int",
226 }],
227 "storage": {
228 "cls": "lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage",
229 },
230 },
231 "B": {
232 "keys": [{
233 "name": "id",
234 "type": "int",
235 }],
236 "storage": {
237 "cls": "lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage",
238 },
239 }
240 },
241 "packers": {}
242 })
243 universe = DimensionUniverse(config=config)
244 fakeDSType = DatasetType("A", tuple(), storageClass="ExposureF", universe=universe)
245 taskDef = TaskDef(taskName=_TASK_CLASS, config=AddTask.ConfigClass(), taskClass=AddTask)
246 quanta = [Quantum(taskName=_TASK_CLASS,
247 inputs={fakeDSType:
248 [DatasetRef(fakeDSType,
249 DataCoordinate.standardize({"A": 1, "B": 2},
250 universe=universe))]})] # type: ignore
251 qgraph = QuantumGraph({taskDef: set(quanta)})
252 return qgraph
255class CmdLineFwkTestCase(unittest.TestCase):
256 """A test case for CmdLineFwk
257 """
259 def testMakePipeline(self):
260 """Tests for CmdLineFwk.makePipeline method
261 """
262 fwk = CmdLineFwk()
264 # make empty pipeline
265 args = _makeArgs()
266 pipeline = fwk.makePipeline(args)
267 self.assertIsInstance(pipeline, Pipeline)
268 self.assertEqual(len(pipeline), 0)
270 # few tests with serialization
271 with makeTmpFile() as tmpname:
272 # make empty pipeline and store it in a file
273 args = _makeArgs(save_pipeline=tmpname)
274 pipeline = fwk.makePipeline(args)
275 self.assertIsInstance(pipeline, Pipeline)
277 # read pipeline from a file
278 args = _makeArgs(pipeline=tmpname)
279 pipeline = fwk.makePipeline(args)
280 self.assertIsInstance(pipeline, Pipeline)
281 self.assertEqual(len(pipeline), 0)
283 # single task pipeline, task name can be anything here
284 actions = [
285 _ACTION_ADD_TASK("TaskOne:task1")
286 ]
287 args = _makeArgs(pipeline_actions=actions)
288 pipeline = fwk.makePipeline(args)
289 self.assertIsInstance(pipeline, Pipeline)
290 self.assertEqual(len(pipeline), 1)
292 # many task pipeline
293 actions = [
294 _ACTION_ADD_TASK("TaskOne:task1a"),
295 _ACTION_ADD_TASK("TaskTwo:task2"),
296 _ACTION_ADD_TASK("TaskOne:task1b")
297 ]
298 args = _makeArgs(pipeline_actions=actions)
299 pipeline = fwk.makePipeline(args)
300 self.assertIsInstance(pipeline, Pipeline)
301 self.assertEqual(len(pipeline), 3)
303 # single task pipeline with config overrides, need real task class
304 actions = [
305 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
306 _ACTION_CONFIG("task:addend=100")
307 ]
308 args = _makeArgs(pipeline_actions=actions)
309 pipeline = fwk.makePipeline(args)
310 taskDefs = list(pipeline.toExpandedPipeline())
311 self.assertEqual(len(taskDefs), 1)
312 self.assertEqual(taskDefs[0].config.addend, 100)
314 overrides = b"config.addend = 1000\n"
315 with makeTmpFile(overrides) as tmpname:
316 actions = [
317 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
318 _ACTION_CONFIG_FILE("task:" + tmpname)
319 ]
320 args = _makeArgs(pipeline_actions=actions)
321 pipeline = fwk.makePipeline(args)
322 taskDefs = list(pipeline.toExpandedPipeline())
323 self.assertEqual(len(taskDefs), 1)
324 self.assertEqual(taskDefs[0].config.addend, 1000)
326 # Check --instrument option, for now it only checks that it does not
327 # crash.
328 actions = [
329 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
330 _ACTION_ADD_INSTRUMENT("Instrument")
331 ]
332 args = _makeArgs(pipeline_actions=actions)
333 pipeline = fwk.makePipeline(args)
335 def testMakeGraphFromSave(self):
336 """Tests for CmdLineFwk.makeGraph method.
338 Only most trivial case is tested that does not do actual graph
339 building.
340 """
341 fwk = CmdLineFwk()
343 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig:
345 # make non-empty graph and store it in a file
346 qgraph = _makeQGraph()
347 with open(tmpname, "wb") as saveFile:
348 qgraph.save(saveFile)
349 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
350 qgraph = fwk.makeGraph(None, args)
351 self.assertIsInstance(qgraph, QuantumGraph)
352 self.assertEqual(len(qgraph), 1)
354 # will fail if graph id does not match
355 args = _makeArgs(
356 qgraph=tmpname,
357 qgraph_id="R2-D2 is that you?",
358 registryConfig=registryConfig,
359 execution_butler_location=None
360 )
361 with self.assertRaisesRegex(ValueError, "graphID does not match"):
362 fwk.makeGraph(None, args)
364 # save with wrong object type
365 with open(tmpname, "wb") as saveFile:
366 pickle.dump({}, saveFile)
367 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
368 with self.assertRaises(ValueError):
369 fwk.makeGraph(None, args)
371 # reading empty graph from pickle should work but makeGraph()
372 # will return None and make a warning
373 qgraph = QuantumGraph(dict())
374 with open(tmpname, "wb") as saveFile:
375 qgraph.save(saveFile)
376 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None)
377 with self.assertWarnsRegex(UserWarning, "QuantumGraph is empty"):
378 # this also tests that warning is generated for empty graph
379 qgraph = fwk.makeGraph(None, args)
380 self.assertIs(qgraph, None)
382 def testShowPipeline(self):
383 """Test for --show options for pipeline.
384 """
385 fwk = CmdLineFwk()
387 actions = [
388 _ACTION_ADD_TASK(f"{_TASK_CLASS}:task"),
389 _ACTION_CONFIG("task:addend=100")
390 ]
391 args = _makeArgs(pipeline_actions=actions)
392 pipeline = fwk.makePipeline(args)
394 args.show = ["pipeline"]
395 fwk.showInfo(args, pipeline)
396 args.show = ["config"]
397 fwk.showInfo(args, pipeline)
398 args.show = ["history=task::addend"]
399 fwk.showInfo(args, pipeline)
400 args.show = ["tasks"]
401 fwk.showInfo(args, pipeline)
404class CmdLineFwkTestCaseWithButler(unittest.TestCase):
405 """A test case for CmdLineFwk
406 """
408 def setUp(self):
409 super().setUpClass()
410 self.root = tempfile.mkdtemp()
411 self.nQuanta = 5
412 self.pipeline = makeSimplePipeline(nQuanta=self.nQuanta)
414 def tearDown(self):
415 shutil.rmtree(self.root, ignore_errors=True)
416 super().tearDownClass()
418 def testSimpleQGraph(self):
419 """Test successfull execution of trivial quantum graph.
420 """
421 args = _makeArgs(butler_config=self.root, input="test", output="output")
422 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
423 populateButler(self.pipeline, butler)
425 fwk = CmdLineFwk()
426 taskFactory = AddTaskFactoryMock()
428 qgraph = fwk.makeGraph(self.pipeline, args)
429 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
430 self.assertEqual(len(qgraph), self.nQuanta)
432 # run whole thing
433 fwk.runPipeline(qgraph, taskFactory, args)
434 self.assertEqual(taskFactory.countExec, self.nQuanta)
436 def testSimpleQGraphNoSkipExisting_inputs(self):
437 """Test for case when output data for one task already appears in
438 _input_ collection, but no ``--extend-run`` or ``-skip-existing``
439 option is present.
440 """
441 args = _makeArgs(
442 butler_config=self.root,
443 input="test",
444 output="output",
445 )
446 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
447 populateButler(
448 self.pipeline, butler,
449 datasetTypes={args.input: [
450 "add_dataset0",
451 "add_dataset1", "add2_dataset1",
452 "add_init_output1",
453 "task0_config",
454 "task0_metadata",
455 "task0_log",
456 ]}
457 )
459 fwk = CmdLineFwk()
460 taskFactory = AddTaskFactoryMock()
462 qgraph = fwk.makeGraph(self.pipeline, args)
463 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
464 # With current implementation graph has all nQuanta quanta, but when
465 # executing one quantum is skipped.
466 self.assertEqual(len(qgraph), self.nQuanta)
468 # run whole thing
469 fwk.runPipeline(qgraph, taskFactory, args)
470 self.assertEqual(taskFactory.countExec, self.nQuanta)
472 def testSimpleQGraphSkipExisting_inputs(self):
473 """Test for ``--skip-existing`` with output data for one task already
474 appears in _input_ collection. No ``--extend-run`` option is needed
475 for this case.
476 """
477 args = _makeArgs(
478 butler_config=self.root,
479 input="test",
480 output="output",
481 skip_existing_in=("test", ),
482 )
483 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
484 populateButler(
485 self.pipeline, butler,
486 datasetTypes={args.input: [
487 "add_dataset0",
488 "add_dataset1", "add2_dataset1",
489 "add_init_output1",
490 "task0_config",
491 "task0_metadata",
492 "task0_log",
493 ]}
494 )
496 fwk = CmdLineFwk()
497 taskFactory = AddTaskFactoryMock()
499 qgraph = fwk.makeGraph(self.pipeline, args)
500 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
501 self.assertEqual(len(qgraph), self.nQuanta - 1)
503 # run whole thing
504 fwk.runPipeline(qgraph, taskFactory, args)
505 self.assertEqual(taskFactory.countExec, self.nQuanta - 1)
507 def testSimpleQGraphSkipExisting_outputs(self):
508 """Test for ``--skip-existing`` with output data for one task already
509 appears in _output_ collection. The ``--extend-run`` option is needed
510 for this case.
511 """
512 args = _makeArgs(
513 butler_config=self.root,
514 input="test",
515 output_run="output/run",
516 skip_existing_in=("output/run", ),
517 )
518 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
519 populateButler(
520 self.pipeline, butler, datasetTypes={
521 args.input: ["add_dataset0"],
522 args.output_run: [
523 "add_dataset1", "add2_dataset1",
524 "add_init_output1",
525 "task0_metadata",
526 "task0_log",
527 ]
528 }
529 )
531 fwk = CmdLineFwk()
532 taskFactory = AddTaskFactoryMock()
534 # fails without --extend-run
535 with self.assertRaisesRegex(ValueError, "--extend-run was not given"):
536 qgraph = fwk.makeGraph(self.pipeline, args)
538 # retry with --extend-run
539 args.extend_run = True
540 qgraph = fwk.makeGraph(self.pipeline, args)
542 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
543 # Graph does not include quantum for first task
544 self.assertEqual(len(qgraph), self.nQuanta - 1)
546 # run whole thing
547 fwk.runPipeline(qgraph, taskFactory, args)
548 self.assertEqual(taskFactory.countExec, self.nQuanta - 1)
550 def testSimpleQGraphOutputsFail(self):
551 """Test continuing execution of trivial quantum graph with partial
552 outputs.
553 """
554 args = _makeArgs(butler_config=self.root, input="test", output="output")
555 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
556 populateButler(self.pipeline, butler)
558 fwk = CmdLineFwk()
559 taskFactory = AddTaskFactoryMock(stopAt=3)
561 qgraph = fwk.makeGraph(self.pipeline, args)
562 self.assertEqual(len(qgraph), self.nQuanta)
564 # run first three quanta
565 with self.assertRaises(RuntimeError):
566 fwk.runPipeline(qgraph, taskFactory, args)
567 self.assertEqual(taskFactory.countExec, 3)
569 butler.registry.refresh()
571 # drop one of the two outputs from one task
572 ref1 = butler.registry.findDataset("add2_dataset2", collections=args.output,
573 instrument="INSTR", detector=0)
574 self.assertIsNotNone(ref1)
575 # also drop the metadata output
576 ref2 = butler.registry.findDataset("task1_metadata", collections=args.output,
577 instrument="INSTR", detector=0)
578 self.assertIsNotNone(ref2)
579 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True)
581 taskFactory.stopAt = -1
582 args.skip_existing_in = (args.output, )
583 args.extend_run = True
584 args.no_versions = True
585 excRe = "Registry inconsistency while checking for existing outputs.*"
586 with self.assertRaisesRegex(RuntimeError, excRe):
587 fwk.runPipeline(qgraph, taskFactory, args)
589 def testSimpleQGraphClobberOutputs(self):
590 """Test continuing execution of trivial quantum graph with
591 --clobber-outputs.
592 """
593 args = _makeArgs(butler_config=self.root, input="test", output="output")
594 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
595 populateButler(self.pipeline, butler)
597 fwk = CmdLineFwk()
598 taskFactory = AddTaskFactoryMock(stopAt=3)
600 qgraph = fwk.makeGraph(self.pipeline, args)
602 # should have one task and number of quanta
603 self.assertEqual(len(qgraph), self.nQuanta)
605 # run first three quanta
606 with self.assertRaises(RuntimeError):
607 fwk.runPipeline(qgraph, taskFactory, args)
608 self.assertEqual(taskFactory.countExec, 3)
610 butler.registry.refresh()
612 # drop one of the two outputs from one task
613 ref1 = butler.registry.findDataset("add2_dataset2", collections=args.output,
614 dataId=dict(instrument="INSTR", detector=0))
615 self.assertIsNotNone(ref1)
616 # also drop the metadata output
617 ref2 = butler.registry.findDataset("task1_metadata", collections=args.output,
618 dataId=dict(instrument="INSTR", detector=0))
619 self.assertIsNotNone(ref2)
620 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True)
622 taskFactory.stopAt = -1
623 args.skip_existing = True
624 args.extend_run = True
625 args.clobber_outputs = True
626 args.no_versions = True
627 fwk.runPipeline(qgraph, taskFactory, args)
628 # number of executed quanta is incremented
629 self.assertEqual(taskFactory.countExec, self.nQuanta + 1)
631 def testSimpleQGraphReplaceRun(self):
632 """Test repeated execution of trivial quantum graph with
633 --replace-run.
634 """
635 args = _makeArgs(
636 butler_config=self.root,
637 input="test",
638 output="output",
639 output_run="output/run1")
640 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
641 populateButler(self.pipeline, butler)
643 fwk = CmdLineFwk()
644 taskFactory = AddTaskFactoryMock()
646 qgraph = fwk.makeGraph(self.pipeline, args)
648 # should have one task and number of quanta
649 self.assertEqual(len(qgraph), self.nQuanta)
651 # deep copy is needed because quanta are updated in place
652 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
653 self.assertEqual(taskFactory.countExec, self.nQuanta)
655 # need to refresh collections explicitly (or make new butler/registry)
656 butler.registry.refresh()
657 collections = set(butler.registry.queryCollections(...))
658 self.assertEqual(collections, {"test", "output", "output/run1"})
660 # number of datasets written by pipeline:
661 # - nQuanta of init_outputs
662 # - nQuanta of configs
663 # - packages (single dataset)
664 # - nQuanta * two output datasets
665 # - nQuanta of metadata
666 # - nQuanta of log output
667 n_outputs = self.nQuanta * 6 + 1
668 refs = butler.registry.queryDatasets(..., collections="output/run1")
669 self.assertEqual(len(list(refs)), n_outputs)
671 # re-run with --replace-run (--inputs is ignored, as long as it hasn't
672 # changed)
673 args.replace_run = True
674 args.output_run = "output/run2"
675 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
677 butler.registry.refresh()
678 collections = set(butler.registry.queryCollections(...))
679 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2"})
681 # new output collection
682 refs = butler.registry.queryDatasets(..., collections="output/run2")
683 self.assertEqual(len(list(refs)), n_outputs)
685 # old output collection is still there
686 refs = butler.registry.queryDatasets(..., collections="output/run1")
687 self.assertEqual(len(list(refs)), n_outputs)
689 # re-run with --replace-run and --prune-replaced=unstore
690 args.replace_run = True
691 args.prune_replaced = "unstore"
692 args.output_run = "output/run3"
693 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
695 butler.registry.refresh()
696 collections = set(butler.registry.queryCollections(...))
697 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run3"})
699 # new output collection
700 refs = butler.registry.queryDatasets(..., collections="output/run3")
701 self.assertEqual(len(list(refs)), n_outputs)
703 # old output collection is still there, and it has all datasets but
704 # non-InitOutputs are not in datastore
705 refs = butler.registry.queryDatasets(..., collections="output/run2")
706 refs = list(refs)
707 self.assertEqual(len(refs), n_outputs)
708 initOutNameRe = re.compile("packages|task.*_config|add_init_output.*")
709 for ref in refs:
710 if initOutNameRe.fullmatch(ref.datasetType.name):
711 butler.get(ref, collections="output/run2")
712 else:
713 with self.assertRaises(FileNotFoundError):
714 butler.get(ref, collections="output/run2")
716 # re-run with --replace-run and --prune-replaced=purge
717 # This time also remove --input; passing the same inputs that we
718 # started with and not passing inputs at all should be equivalent.
719 args.input = None
720 args.replace_run = True
721 args.prune_replaced = "purge"
722 args.output_run = "output/run4"
723 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
725 butler.registry.refresh()
726 collections = set(butler.registry.queryCollections(...))
727 # output/run3 should disappear now
728 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
730 # new output collection
731 refs = butler.registry.queryDatasets(..., collections="output/run4")
732 self.assertEqual(len(list(refs)), n_outputs)
734 # Trying to run again with inputs that aren't exactly what we started
735 # with is an error, and the kind that should not modify the data repo.
736 with self.assertRaises(ValueError):
737 args.input = ["test", "output/run2"]
738 args.prune_replaced = None
739 args.replace_run = True
740 args.output_run = "output/run5"
741 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
742 butler.registry.refresh()
743 collections = set(butler.registry.queryCollections(...))
744 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
745 with self.assertRaises(ValueError):
746 args.input = ["output/run2", "test"]
747 args.prune_replaced = None
748 args.replace_run = True
749 args.output_run = "output/run6"
750 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args)
751 butler.registry.refresh()
752 collections = set(butler.registry.queryCollections(...))
753 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"})
755 def testSubgraph(self):
756 """Test successfull execution of trivial quantum graph.
757 """
758 args = _makeArgs(butler_config=self.root, input="test", output="output")
759 butler = makeSimpleButler(self.root, run=args.input, inMemory=False)
760 populateButler(self.pipeline, butler)
762 fwk = CmdLineFwk()
763 qgraph = fwk.makeGraph(self.pipeline, args)
765 # Select first two nodes for execution. This depends on node ordering
766 # which I assume is the same as execution order.
767 nNodes = 2
768 nodeIds = [node.nodeId for node in qgraph]
769 nodeIds = nodeIds[:nNodes]
771 self.assertEqual(len(qgraph.taskGraph), self.nQuanta)
772 self.assertEqual(len(qgraph), self.nQuanta)
774 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig:
775 with open(tmpname, "wb") as saveFile:
776 qgraph.save(saveFile)
778 args = _makeArgs(qgraph=tmpname, qgraph_node_id=nodeIds, registryConfig=registryConfig,
779 execution_butler_location=None)
780 fwk = CmdLineFwk()
782 # load graph, should only read a subset
783 qgraph = fwk.makeGraph(pipeline=None, args=args)
784 self.assertEqual(len(qgraph), nNodes)
786 def testShowGraph(self):
787 """Test for --show options for quantum graph.
788 """
789 fwk = CmdLineFwk()
791 nQuanta = 2
792 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
794 args = _makeArgs(show=["graph"])
795 fwk.showInfo(args, pipeline=None, graph=qgraph)
797 def testShowGraphWorkflow(self):
798 fwk = CmdLineFwk()
800 nQuanta = 2
801 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root)
803 args = _makeArgs(show=["workflow"])
804 fwk.showInfo(args, pipeline=None, graph=qgraph)
806 # TODO: cannot test "uri" option presently, it instanciates
807 # butler from command line options and there is no way to pass butler
808 # mock to that code.
811class MyMemoryTestCase(lsst.utils.tests.MemoryTestCase):
812 pass
815def setup_module(module):
816 lsst.utils.tests.init()
819if __name__ == "__main__": 819 ↛ 820line 819 didn't jump to line 820, because the condition on line 819 was never true
820 lsst.utils.tests.init()
821 unittest.main()