Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of ctrl_mpexec. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22"""Simple unit test for cmdLineFwk module. 

23""" 

24 

25import click 

26from types import SimpleNamespace 

27import contextlib 

28import copy 

29from dataclasses import dataclass 

30import logging 

31import os 

32import pickle 

33import shutil 

34import tempfile 

35from typing import NamedTuple 

36import unittest 

37 

38from lsst.ctrl.mpexec.cmdLineFwk import CmdLineFwk 

39from lsst.ctrl.mpexec.cli.opt import run_options 

40from lsst.ctrl.mpexec.cli.utils import ( 

41 _ACTION_ADD_TASK, 

42 _ACTION_CONFIG, 

43 _ACTION_CONFIG_FILE, 

44 _ACTION_ADD_INSTRUMENT, 

45 PipetaskCommand, 

46) 

47from lsst.daf.butler import Config, Quantum, Registry 

48from lsst.daf.butler.registry import RegistryConfig 

49from lsst.obs.base import Instrument 

50import lsst.pex.config as pexConfig 

51from lsst.pipe.base import (Pipeline, PipelineTask, PipelineTaskConfig, 

52 QuantumGraph, TaskDef, TaskFactory, 

53 PipelineTaskConnections) 

54import lsst.pipe.base.connectionTypes as cT 

55import lsst.utils.tests 

56from lsst.pipe.base.tests.simpleQGraph import (AddTaskFactoryMock, makeSimpleQGraph) 

57from lsst.utils.tests import temporaryDirectory 

58 

59 

60logging.basicConfig(level=logging.INFO) 

61 

62# Have to monkey-patch Instrument.fromName() to not retrieve non-existing 

63# instrument from registry, these tests can run fine without actual instrument 

64# and implementing full mock for Instrument is too complicated. 

65Instrument.fromName = lambda name, reg: None 65 ↛ exitline 65 didn't run the lambda on line 65

66 

67 

68@contextlib.contextmanager 

69def makeTmpFile(contents=None, suffix=None): 

70 """Context manager for generating temporary file name. 

71 

72 Temporary file is deleted on exiting context. 

73 

74 Parameters 

75 ---------- 

76 contents : `bytes` 

77 Data to write into a file. 

78 """ 

79 fd, tmpname = tempfile.mkstemp(suffix=suffix) 

80 if contents: 

81 os.write(fd, contents) 

82 os.close(fd) 

83 yield tmpname 

84 with contextlib.suppress(OSError): 

85 os.remove(tmpname) 

86 

87 

88@contextlib.contextmanager 

89def makeSQLiteRegistry(create=True): 

90 """Context manager to create new empty registry database. 

91 

92 Yields 

93 ------ 

94 config : `RegistryConfig` 

95 Registry configuration for initialized registry database. 

96 """ 

97 with temporaryDirectory() as tmpdir: 

98 uri = f"sqlite:///{tmpdir}/gen3.sqlite" 

99 config = RegistryConfig() 

100 config["db"] = uri 

101 if create: 

102 Registry.createFromConfig(config) 

103 yield config 

104 

105 

106class SimpleConnections(PipelineTaskConnections, dimensions=(), 

107 defaultTemplates={"template": "simple"}): 

108 schema = cT.InitInput(doc="Schema", 

109 name="{template}schema", 

110 storageClass="SourceCatalog") 

111 

112 

113class SimpleConfig(PipelineTaskConfig, pipelineConnections=SimpleConnections): 

114 field = pexConfig.Field(dtype=str, doc="arbitrary string") 

115 

116 def setDefaults(self): 

117 PipelineTaskConfig.setDefaults(self) 

118 

119 

120class TaskOne(PipelineTask): 

121 ConfigClass = SimpleConfig 

122 _DefaultName = "taskOne" 

123 

124 

125class TaskTwo(PipelineTask): 

126 ConfigClass = SimpleConfig 

127 _DefaultName = "taskTwo" 

128 

129 

130class TaskFactoryMock(TaskFactory): 

131 def loadTaskClass(self, taskName): 

132 if taskName == "TaskOne": 

133 return TaskOne, "TaskOne" 

134 elif taskName == "TaskTwo": 

135 return TaskTwo, "TaskTwo" 

136 

137 def makeTask(self, taskClass, name, config, overrides, butler): 

138 if config is None: 

139 config = taskClass.ConfigClass() 

140 if overrides: 

141 overrides.applyTo(config) 

142 return taskClass(config=config, butler=butler, name=name) 

143 

144 

145def _makeArgs(registryConfig=None, **kwargs): 

146 """Return parsed command line arguments. 

147 

148 By default butler_config is set to `Config` populated with some defaults, 

149 it can be overridden completely by keyword argument. 

150 

151 Parameters 

152 ---------- 

153 cmd : `str`, optional 

154 Produce arguments for this pipetask command. 

155 registryConfig : `RegistryConfig`, optional 

156 Override for registry configuration. 

157 **kwargs 

158 Overrides for other arguments. 

159 """ 

160 # Use a mock to get the default value of arguments to 'run'. 

161 

162 mock = unittest.mock.Mock() 

163 

164 @click.command(cls=PipetaskCommand) 

165 @run_options() 

166 def fake_run(ctx, **kwargs): 

167 """Fake "pipetask run" command for gathering input arguments. 

168 

169 The arguments & options should always match the arguments & options in 

170 the "real" command function `lsst.ctrl.mpexec.cli.cmd.run`. 

171 """ 

172 mock(**kwargs) 

173 

174 runner = click.testing.CliRunner() 

175 result = runner.invoke(fake_run) 

176 if result.exit_code != 0: 

177 raise RuntimeError(f"Failure getting default args from 'fake_run': {result}") 

178 mock.assert_called_once() 

179 args = mock.call_args[1] 

180 args["enableLsstDebug"] = args.pop("debug") 

181 if "pipeline_actions" not in args: 

182 args["pipeline_actions"] = [] 

183 args = SimpleNamespace(**args) 

184 

185 # override butler_config with our defaults 

186 args.butler_config = Config() 

187 if registryConfig: 

188 args.butler_config["registry"] = registryConfig 

189 # The default datastore has a relocatable root, so we need to specify 

190 # some root here for it to use 

191 args.butler_config.configFile = "." 

192 # override arguments from keyword parameters 

193 for key, value in kwargs.items(): 

194 setattr(args, key, value) 

195 return args 

196 

197 

198class FakeTaskDef(NamedTuple): 

199 name: str 

200 

201 

202@dataclass(frozen=True) 

203class FakeDSRef: 

204 datasetType: str 

205 dataId: tuple 

206 

207 

208def _makeQGraph(): 

209 """Make a trivial QuantumGraph with one quantum. 

210 

211 The only thing that we need to do with this quantum graph is to pickle 

212 it, the quanta in this graph are not usable for anything else. 

213 

214 Returns 

215 ------- 

216 qgraph : `~lsst.pipe.base.QuantumGraph` 

217 """ 

218 

219 # The task name in TaskDef needs to be a real importable name, use one that is sure to exist 

220 taskDef = TaskDef(taskName="lsst.pipe.base.Struct", config=SimpleConfig()) 

221 quanta = [Quantum(taskName="lsst.pipe.base.Struct", 

222 inputs={FakeTaskDef("A"): FakeDSRef("A", (1, 2))})] # type: ignore 

223 qgraph = QuantumGraph({taskDef: set(quanta)}) 

224 return qgraph 

225 

226 

227class CmdLineFwkTestCase(unittest.TestCase): 

228 """A test case for CmdLineFwk 

229 """ 

230 

231 def testMakePipeline(self): 

232 """Tests for CmdLineFwk.makePipeline method 

233 """ 

234 fwk = CmdLineFwk() 

235 

236 # make empty pipeline 

237 args = _makeArgs() 

238 pipeline = fwk.makePipeline(args) 

239 self.assertIsInstance(pipeline, Pipeline) 

240 self.assertEqual(len(pipeline), 0) 

241 

242 # few tests with serialization 

243 with makeTmpFile() as tmpname: 

244 # make empty pipeline and store it in a file 

245 args = _makeArgs(save_pipeline=tmpname) 

246 pipeline = fwk.makePipeline(args) 

247 self.assertIsInstance(pipeline, Pipeline) 

248 

249 # read pipeline from a file 

250 args = _makeArgs(pipeline=tmpname) 

251 pipeline = fwk.makePipeline(args) 

252 self.assertIsInstance(pipeline, Pipeline) 

253 self.assertEqual(len(pipeline), 0) 

254 

255 # single task pipeline 

256 actions = [ 

257 _ACTION_ADD_TASK("TaskOne:task1") 

258 ] 

259 args = _makeArgs(pipeline_actions=actions) 

260 pipeline = fwk.makePipeline(args) 

261 self.assertIsInstance(pipeline, Pipeline) 

262 self.assertEqual(len(pipeline), 1) 

263 

264 # many task pipeline 

265 actions = [ 

266 _ACTION_ADD_TASK("TaskOne:task1a"), 

267 _ACTION_ADD_TASK("TaskTwo:task2"), 

268 _ACTION_ADD_TASK("TaskOne:task1b") 

269 ] 

270 args = _makeArgs(pipeline_actions=actions) 

271 pipeline = fwk.makePipeline(args) 

272 self.assertIsInstance(pipeline, Pipeline) 

273 self.assertEqual(len(pipeline), 3) 

274 

275 # single task pipeline with config overrides, cannot use TaskOne, need 

276 # something that can be imported with `doImport()` 

277 actions = [ 

278 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"), 

279 _ACTION_CONFIG("task:addend=100") 

280 ] 

281 args = _makeArgs(pipeline_actions=actions) 

282 pipeline = fwk.makePipeline(args) 

283 taskDefs = list(pipeline.toExpandedPipeline()) 

284 self.assertEqual(len(taskDefs), 1) 

285 self.assertEqual(taskDefs[0].config.addend, 100) 

286 

287 overrides = b"config.addend = 1000\n" 

288 with makeTmpFile(overrides) as tmpname: 

289 actions = [ 

290 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"), 

291 _ACTION_CONFIG_FILE("task:" + tmpname) 

292 ] 

293 args = _makeArgs(pipeline_actions=actions) 

294 pipeline = fwk.makePipeline(args) 

295 taskDefs = list(pipeline.toExpandedPipeline()) 

296 self.assertEqual(len(taskDefs), 1) 

297 self.assertEqual(taskDefs[0].config.addend, 1000) 

298 

299 # Check --instrument option, for now it only checks that it does not crash 

300 actions = [ 

301 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"), 

302 _ACTION_ADD_INSTRUMENT("Instrument") 

303 ] 

304 args = _makeArgs(pipeline_actions=actions) 

305 pipeline = fwk.makePipeline(args) 

306 

307 def testMakeGraphFromSave(self): 

308 """Tests for CmdLineFwk.makeGraph method. 

309 

310 Only most trivial case is tested that does not do actual graph 

311 building. 

312 """ 

313 fwk = CmdLineFwk() 

314 

315 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig: 

316 

317 # make non-empty graph and store it in a file 

318 qgraph = _makeQGraph() 

319 with open(tmpname, "wb") as saveFile: 

320 qgraph.save(saveFile) 

321 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None) 

322 qgraph = fwk.makeGraph(None, args) 

323 self.assertIsInstance(qgraph, QuantumGraph) 

324 self.assertEqual(len(qgraph), 1) 

325 

326 # will fail if graph id does not match 

327 args = _makeArgs( 

328 qgraph=tmpname, 

329 qgraph_id="R2-D2 is that you?", 

330 registryConfig=registryConfig, 

331 execution_butler_location=None 

332 ) 

333 with self.assertRaisesRegex(ValueError, "graphID does not match"): 

334 fwk.makeGraph(None, args) 

335 

336 # save with wrong object type 

337 with open(tmpname, "wb") as saveFile: 

338 pickle.dump({}, saveFile) 

339 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None) 

340 with self.assertRaises(ValueError): 

341 fwk.makeGraph(None, args) 

342 

343 # reading empty graph from pickle should work but makeGraph() 

344 # will return None and make a warning 

345 qgraph = QuantumGraph(dict()) 

346 with open(tmpname, "wb") as saveFile: 

347 qgraph.save(saveFile) 

348 args = _makeArgs(qgraph=tmpname, registryConfig=registryConfig, execution_butler_location=None) 

349 with self.assertWarnsRegex(UserWarning, "QuantumGraph is empty"): 

350 # this also tests that warning is generated for empty graph 

351 qgraph = fwk.makeGraph(None, args) 

352 self.assertIs(qgraph, None) 

353 

354 def testShowPipeline(self): 

355 """Test for --show options for pipeline. 

356 """ 

357 fwk = CmdLineFwk() 

358 

359 actions = [ 

360 _ACTION_ADD_TASK("lsst.pipe.base.tests.simpleQGraph.AddTask:task"), 

361 _ACTION_CONFIG("task:addend=100") 

362 ] 

363 args = _makeArgs(pipeline_actions=actions) 

364 pipeline = fwk.makePipeline(args) 

365 

366 args.show = ["pipeline"] 

367 fwk.showInfo(args, pipeline) 

368 args.show = ["config"] 

369 fwk.showInfo(args, pipeline) 

370 args.show = ["history=task::addend"] 

371 fwk.showInfo(args, pipeline) 

372 args.show = ["tasks"] 

373 fwk.showInfo(args, pipeline) 

374 

375 

376class CmdLineFwkTestCaseWithButler(unittest.TestCase): 

377 """A test case for CmdLineFwk 

378 """ 

379 

380 def setUp(self): 

381 super().setUpClass() 

382 self.root = tempfile.mkdtemp() 

383 

384 def tearDown(self): 

385 shutil.rmtree(self.root, ignore_errors=True) 

386 super().tearDownClass() 

387 

388 def testSimpleQGraph(self): 

389 """Test successfull execution of trivial quantum graph. 

390 """ 

391 

392 nQuanta = 5 

393 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root) 

394 

395 self.assertEqual(len(qgraph.taskGraph), 5) 

396 self.assertEqual(len(qgraph), nQuanta) 

397 

398 args = _makeArgs() 

399 fwk = CmdLineFwk() 

400 taskFactory = AddTaskFactoryMock() 

401 

402 # run whole thing 

403 fwk.runPipeline(qgraph, taskFactory, args, butler=butler) 

404 self.assertEqual(taskFactory.countExec, nQuanta) 

405 

406 def testSimpleQGraphSkipExisting(self): 

407 """Test continuing execution of trivial quantum graph with --skip-existing. 

408 """ 

409 

410 nQuanta = 5 

411 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root) 

412 

413 self.assertEqual(len(qgraph.taskGraph), 5) 

414 self.assertEqual(len(qgraph), nQuanta) 

415 

416 args = _makeArgs() 

417 fwk = CmdLineFwk() 

418 taskFactory = AddTaskFactoryMock(stopAt=3) 

419 

420 # run first three quanta 

421 with self.assertRaises(RuntimeError): 

422 fwk.runPipeline(qgraph, taskFactory, args, butler=butler) 

423 self.assertEqual(taskFactory.countExec, 3) 

424 

425 # run remaining ones 

426 taskFactory.stopAt = -1 

427 args.skip_existing = True 

428 args.extend_run = True 

429 args.no_versions = True 

430 fwk.runPipeline(qgraph, taskFactory, args, butler=butler) 

431 self.assertEqual(taskFactory.countExec, nQuanta) 

432 

433 def testSimpleQGraphOutputsFail(self): 

434 """Test continuing execution of trivial quantum graph with partial 

435 outputs. 

436 """ 

437 

438 nQuanta = 5 

439 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root) 

440 

441 # should have one task and number of quanta 

442 self.assertEqual(len(qgraph), nQuanta) 

443 

444 args = _makeArgs() 

445 fwk = CmdLineFwk() 

446 taskFactory = AddTaskFactoryMock(stopAt=3) 

447 

448 # run first three quanta 

449 with self.assertRaises(RuntimeError): 

450 fwk.runPipeline(qgraph, taskFactory, args, butler=butler) 

451 self.assertEqual(taskFactory.countExec, 3) 

452 

453 # drop one of the two outputs from one task 

454 ref1 = butler.registry.findDataset("add2_dataset2", instrument="INSTR", detector=0) 

455 self.assertIsNotNone([ref1]) 

456 # also drop the metadata output 

457 ref2 = butler.registry.findDataset("task1_metadata", instrument="INSTR", detector=0) 

458 self.assertIsNotNone(ref2) 

459 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True) 

460 

461 taskFactory.stopAt = -1 

462 args.skip_existing = True 

463 args.extend_run = True 

464 args.no_versions = True 

465 excRe = "Registry inconsistency while checking for existing outputs.*" 

466 with self.assertRaisesRegex(RuntimeError, excRe): 

467 fwk.runPipeline(qgraph, taskFactory, args, butler=butler) 

468 

469 def testSimpleQGraphClobberOutputs(self): 

470 """Test continuing execution of trivial quantum graph with 

471 --clobber-outputs. 

472 """ 

473 

474 nQuanta = 5 

475 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root) 

476 

477 # should have one task and number of quanta 

478 self.assertEqual(len(qgraph), nQuanta) 

479 

480 args = _makeArgs() 

481 fwk = CmdLineFwk() 

482 taskFactory = AddTaskFactoryMock(stopAt=3) 

483 

484 # run first three quanta 

485 with self.assertRaises(RuntimeError): 

486 fwk.runPipeline(qgraph, taskFactory, args, butler=butler) 

487 self.assertEqual(taskFactory.countExec, 3) 

488 

489 # drop one of the two outputs from one task 

490 ref1 = butler.registry.findDataset("add2_dataset2", instrument="INSTR", detector=0) 

491 self.assertIsNotNone(ref1) 

492 # also drop the metadata output 

493 ref2 = butler.registry.findDataset("task1_metadata", instrument="INSTR", detector=0) 

494 self.assertIsNotNone(ref2) 

495 butler.pruneDatasets([ref1, ref2], disassociate=True, unstore=True, purge=True) 

496 

497 taskFactory.stopAt = -1 

498 args.skip_existing = True 

499 args.extend_run = True 

500 args.clobber_outputs = True 

501 args.no_versions = True 

502 fwk.runPipeline(qgraph, taskFactory, args, butler=butler) 

503 # number of executed quanta is incremented 

504 self.assertEqual(taskFactory.countExec, nQuanta + 1) 

505 

506 def testSimpleQGraphReplaceRun(self): 

507 """Test repeated execution of trivial quantum graph with 

508 --replace-run. 

509 """ 

510 

511 # need non-memory registry in this case 

512 nQuanta = 5 

513 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root, inMemory=False) 

514 

515 # should have one task and number of quanta 

516 self.assertEqual(len(qgraph), nQuanta) 

517 

518 fwk = CmdLineFwk() 

519 taskFactory = AddTaskFactoryMock() 

520 

521 # run whole thing 

522 args = _makeArgs( 

523 butler_config=self.root, 

524 input="test", 

525 output="output", 

526 output_run="output/run1") 

527 # deep copy is needed because quanta are updated in place 

528 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args) 

529 self.assertEqual(taskFactory.countExec, nQuanta) 

530 

531 # need to refresh collections explicitly (or make new butler/registry) 

532 butler.registry.refresh() 

533 collections = set(butler.registry.queryCollections(...)) 

534 self.assertEqual(collections, {"test", "output", "output/run1"}) 

535 

536 # number of datasets written by pipeline: 

537 # - nQuanta of init_outputs 

538 # - nQuanta of configs 

539 # - packages (single dataset) 

540 # - nQuanta * two output datasets 

541 # - nQuanta of metadata 

542 n_outputs = nQuanta * 5 + 1 

543 refs = butler.registry.queryDatasets(..., collections="output/run1") 

544 self.assertEqual(len(list(refs)), n_outputs) 

545 

546 # re-run with --replace-run (--inputs is ignored, as long as it hasn't 

547 # changed) 

548 args.replace_run = True 

549 args.output_run = "output/run2" 

550 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args) 

551 

552 butler.registry.refresh() 

553 collections = set(butler.registry.queryCollections(...)) 

554 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2"}) 

555 

556 # new output collection 

557 refs = butler.registry.queryDatasets(..., collections="output/run2") 

558 self.assertEqual(len(list(refs)), n_outputs) 

559 

560 # old output collection is still there 

561 refs = butler.registry.queryDatasets(..., collections="output/run1") 

562 self.assertEqual(len(list(refs)), n_outputs) 

563 

564 # re-run with --replace-run and --prune-replaced=unstore 

565 args.replace_run = True 

566 args.prune_replaced = "unstore" 

567 args.output_run = "output/run3" 

568 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args) 

569 

570 butler.registry.refresh() 

571 collections = set(butler.registry.queryCollections(...)) 

572 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run3"}) 

573 

574 # new output collection 

575 refs = butler.registry.queryDatasets(..., collections="output/run3") 

576 self.assertEqual(len(list(refs)), n_outputs) 

577 

578 # old output collection is still there, and it has all datasets but 

579 # they are not in datastore 

580 refs = butler.registry.queryDatasets(..., collections="output/run2") 

581 refs = list(refs) 

582 self.assertEqual(len(refs), n_outputs) 

583 with self.assertRaises(FileNotFoundError): 

584 butler.get(refs[0], collections="output/run2") 

585 

586 # re-run with --replace-run and --prune-replaced=purge 

587 # This time also remove --input; passing the same inputs that we 

588 # started with and not passing inputs at all should be equivalent. 

589 args.input = None 

590 args.replace_run = True 

591 args.prune_replaced = "purge" 

592 args.output_run = "output/run4" 

593 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args) 

594 

595 butler.registry.refresh() 

596 collections = set(butler.registry.queryCollections(...)) 

597 # output/run3 should disappear now 

598 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"}) 

599 

600 # new output collection 

601 refs = butler.registry.queryDatasets(..., collections="output/run4") 

602 self.assertEqual(len(list(refs)), n_outputs) 

603 

604 # Trying to run again with inputs that aren't exactly what we started 

605 # with is an error, and the kind that should not modify the data repo. 

606 with self.assertRaises(ValueError): 

607 args.input = ["test", "output/run2"] 

608 args.prune_replaced = None 

609 args.replace_run = True 

610 args.output_run = "output/run5" 

611 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args) 

612 butler.registry.refresh() 

613 collections = set(butler.registry.queryCollections(...)) 

614 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"}) 

615 with self.assertRaises(ValueError): 

616 args.input = ["output/run2", "test"] 

617 args.prune_replaced = None 

618 args.replace_run = True 

619 args.output_run = "output/run6" 

620 fwk.runPipeline(copy.deepcopy(qgraph), taskFactory, args) 

621 butler.registry.refresh() 

622 collections = set(butler.registry.queryCollections(...)) 

623 self.assertEqual(collections, {"test", "output", "output/run1", "output/run2", "output/run4"}) 

624 

625 def testSubgraph(self): 

626 """Test successfull execution of trivial quantum graph. 

627 """ 

628 nQuanta = 5 

629 nNodes = 2 

630 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root) 

631 

632 # Select first two nodes for execution. This depends on node ordering 

633 # which I assume is the same as execution order. 

634 nodeIds = [node.nodeId.number for node in qgraph] 

635 nodeIds = nodeIds[:nNodes] 

636 

637 self.assertEqual(len(qgraph.taskGraph), 5) 

638 self.assertEqual(len(qgraph), nQuanta) 

639 

640 with makeTmpFile(suffix=".qgraph") as tmpname, makeSQLiteRegistry() as registryConfig: 

641 with open(tmpname, "wb") as saveFile: 

642 qgraph.save(saveFile) 

643 

644 args = _makeArgs(qgraph=tmpname, qgraph_node_id=nodeIds, registryConfig=registryConfig, 

645 execution_butler_location=None) 

646 fwk = CmdLineFwk() 

647 

648 # load graph, should only read a subset 

649 qgraph = fwk.makeGraph(pipeline=None, args=args) 

650 self.assertEqual(len(qgraph), nNodes) 

651 

652 def testShowGraph(self): 

653 """Test for --show options for quantum graph. 

654 """ 

655 fwk = CmdLineFwk() 

656 

657 nQuanta = 2 

658 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root) 

659 

660 args = _makeArgs(show=["graph"]) 

661 fwk.showInfo(args, pipeline=None, graph=qgraph) 

662 

663 def testShowGraphWorkflow(self): 

664 fwk = CmdLineFwk() 

665 

666 nQuanta = 2 

667 butler, qgraph = makeSimpleQGraph(nQuanta, root=self.root) 

668 

669 args = _makeArgs(show=["workflow"]) 

670 fwk.showInfo(args, pipeline=None, graph=qgraph) 

671 

672 # TODO: cannot test "uri" option presently, it instanciates 

673 # butler from command line options and there is no way to pass butler 

674 # mock to that code. 

675 

676 

677class MyMemoryTestCase(lsst.utils.tests.MemoryTestCase): 

678 pass 

679 

680 

681def setup_module(module): 

682 lsst.utils.tests.init() 

683 

684 

685if __name__ == "__main__": 685 ↛ 686line 685 didn't jump to line 686, because the condition on line 685 was never true

686 lsst.utils.tests.init() 

687 unittest.main()