Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

# This file is part of ctrl_mpexec. 

# 

# Developed for the LSST Data Management System. 

# This product includes software developed by the LSST Project 

# (http://www.lsst.org). 

# See the COPYRIGHT file at the top-level directory of this distribution 

# for details of code ownership. 

# 

# This program is free software: you can redistribute it and/or modify 

# it under the terms of the GNU General Public License as published by 

# the Free Software Foundation, either version 3 of the License, or 

# (at your option) any later version. 

# 

# This program is distributed in the hope that it will be useful, 

# but WITHOUT ANY WARRANTY; without even the implied warranty of 

# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

# GNU General Public License for more details. 

# 

# You should have received a copy of the GNU General Public License 

# along with this program. If not, see <http://www.gnu.org/licenses/>. 

 

"""Module defining CmdLineFwk class and related methods. 

""" 

 

__all__ = ['CmdLineFwk'] 

 

# ------------------------------- 

# Imports of standard modules -- 

# ------------------------------- 

import fnmatch 

import logging 

import pickle 

import re 

import sys 

import warnings 

 

# ----------------------------- 

# Imports for other modules -- 

# ----------------------------- 

from lsst.daf.butler import Butler, DatasetOriginInfoDef 

import lsst.log 

import lsst.pex.config as pexConfig 

from lsst.pipe.base import GraphBuilder, PipelineBuilder, Pipeline, QuantumGraph 

from .cmdLineParser import makeParser 

from .dotTools import graph2dot, pipeline2dot 

from .mpGraphExecutor import MPGraphExecutor 

from .preExecInit import PreExecInit 

from .taskFactory import TaskFactory 

from .taskLoader import (TaskLoader, KIND_PIPELINETASK) 

from . import util 

 

# ---------------------------------- 

# Local non-exported definitions -- 

# ---------------------------------- 

 

# logging properties 

_LOG_PROP = """\ 

log4j.rootLogger=INFO, A1 

log4j.appender.A1=ConsoleAppender 

log4j.appender.A1.Target=System.err 

log4j.appender.A1.layout=PatternLayout 

log4j.appender.A1.layout.ConversionPattern={} 

""" 

 

_LOG = logging.getLogger(__name__.partition(".")[2]) 

 

# ------------------------ 

# Exported definitions -- 

# ------------------------ 

 

 

class CmdLineFwk: 

"""PipelineTask framework which executes tasks from command line. 

 

In addition to executing tasks this activator provides additional methods 

for task management like dumping configuration or execution chain. 

""" 

 

MP_TIMEOUT = 9999 # Default timeout (sec) for multiprocessing 

 

def __init__(self): 

pass 

 

def parseAndRun(self, argv=None): 

""" 

This method is a main entry point for this class, it parses command 

line and executes all commands. 

 

Parameters 

---------- 

argv : `list` of `str`, optional 

list of command line arguments, if not specified then 

`sys.argv[1:]` is used 

""" 

 

if argv is None: 

argv = sys.argv[1:] 

 

# start with parsing command line, only do partial parsing now as 

# the tasks can add more arguments later 

parser = makeParser() 

args = parser.parse_args(argv) 

 

# First thing to do is to setup logging. 

self.configLog(args.longlog, args.loglevel) 

 

taskLoader = TaskLoader(args.packages) 

taskFactory = TaskFactory(taskLoader) 

 

if args.subcommand == "list": 

# just dump some info about where things may be found 

return self.doList(taskLoader, args.show, args.show_headers) 

 

# make pipeline out of command line arguments (can return empty pipeline) 

try: 

pipeline = self.makePipeline(taskFactory, args) 

except Exception as exc: 

print("Failed to build pipeline: {}".format(exc), file=sys.stderr) 

raise 

 

if args.subcommand == "build": 

# stop here but process --show option first 

self.showInfo(args.show, pipeline, None) 

return 0 

 

# make quantum graph 

try: 

qgraph = self.makeGraph(pipeline, taskFactory, args) 

except Exception as exc: 

print("Failed to build graph: {}".format(exc), file=sys.stderr) 

raise 

 

# optionally dump some info 

self.showInfo(args.show, pipeline, qgraph) 

 

if args.subcommand == "qgraph": 

# stop here 

return 0 

 

# execute 

if args.subcommand == "run": 

return self.runPipeline(qgraph, taskFactory, args) 

 

@staticmethod 

def configLog(longlog, logLevels): 

"""Configure logging system. 

 

Parameters 

---------- 

longlog : `bool` 

If True then make log messages appear in "long format" 

logLevels : `list` of `tuple` 

per-component logging levels, each item in the list is a tuple 

(component, level), `component` is a logger name or `None` for root 

logger, `level` is a logging level name ('DEBUG', 'INFO', etc.) 

""" 

if longlog: 

message_fmt = "%-5p %d{yyyy-MM-ddThh:mm:ss.sss} %c (%X{LABEL})(%F:%L)- %m%n" 

else: 

message_fmt = "%c %p: %m%n" 

 

# global logging config 

lsst.log.configure_prop(_LOG_PROP.format(message_fmt)) 

 

# Forward all Python logging to lsst.log 

lgr = logging.getLogger() 

lgr.setLevel(logging.INFO) # same as in log4cxx config above 

lgr.addHandler(lsst.log.LogHandler()) 

 

# also capture warnings and send them to logging 

logging.captureWarnings(True) 

 

# configure individual loggers 

for component, level in logLevels: 

level = getattr(lsst.log.Log, level.upper(), None) 

if level is not None: 

# set logging level for lsst.log 

logger = lsst.log.Log.getLogger(component or "") 

logger.setLevel(level) 

# set logging level for Python logging 

pyLevel = lsst.log.LevelTranslator.lsstLog2logging(level) 

logging.getLogger(component).setLevel(pyLevel) 

 

def doList(self, taskLoader, show, show_headers): 

"""Implementation of the "list" command. 

 

Parameters 

---------- 

taskLoader : `TaskLoader` 

show : `list` of `str` 

List of items to show. 

show_headers : `bool` 

True to display additional headers 

""" 

 

if not show: 

show = ["pipeline-tasks"] 

 

if "packages" in show: 

if show_headers: 

print() 

print("Modules search path") 

print("-------------------") 

for pkg in sorted(taskLoader.packages): 

print(pkg) 

 

if "modules" in show: 

try: 

modules = taskLoader.modules() 

except ImportError as exc: 

print("Failed to import package, check --package option or $PYTHONPATH:", exc, 

file=sys.stderr) 

return 2 

modules = [(name, "package" if flag else "module") for name, flag in sorted(modules)] 

headers = None 

if show_headers: 

print() 

headers = ("Module or package name", "Type ") 

util.printTable(modules, headers) 

 

if "tasks" in show or "pipeline-tasks" in show: 

try: 

tasks = taskLoader.tasks() 

except ImportError as exc: 

print("Failed to import package, check --packages option or PYTHONPATH:", exc, 

file=sys.stderr) 

return 2 

 

if "tasks" not in show: 

# only show pipeline-tasks 

tasks = [(name, kind) for name, kind in tasks if kind == KIND_PIPELINETASK] 

tasks.sort() 

 

headers = None 

if show_headers: 

print() 

headers = ("Task class name", "Kind ") 

util.printTable(tasks, headers) 

 

def makePipeline(self, taskFactory, args): 

"""Build a pipeline from command line arguments. 

 

Parameters 

---------- 

taskFactory : `~lsst.pipe.base.TaskFactory` 

Task factory. 

args : `argparse.Namespace` 

Parsed command line 

 

Returns 

------- 

pipeline : `~lsst.pipe.base.Pipeline` 

""" 

# read existing pipeline from pickle file 

pipeline = None 

if args.pipeline: 

with open(args.pipeline, 'rb') as pickleFile: 

pipeline = pickle.load(pickleFile) 

if not isinstance(pipeline, Pipeline): 

raise TypeError("Pipeline pickle file has incorrect object type: {}".format( 

type(pipeline))) 

 

pipeBuilder = PipelineBuilder(taskFactory, pipeline) 

 

# loop over all pipeline actions and apply them in order 

for action in args.pipeline_actions: 

 

if action.action == "new_task": 

 

pipeBuilder.addTask(action.value, action.label) 

 

elif action.action == "delete_task": 

 

pipeBuilder.deleteTask(action.label) 

 

elif action.action == "move_task": 

 

pipeBuilder.moveTask(action.label, action.value) 

 

elif action.action == "relabel": 

 

pipeBuilder.labelTask(action.label, action.value) 

 

elif action.action == "config": 

 

pipeBuilder.configOverride(action.label, action.value) 

 

elif action.action == "configfile": 

 

pipeBuilder.configOverrideFile(action.label, action.value) 

 

elif action.action == 'name_templates': 

 

pipeBuilder.substituteDatatypeNames(action.label, action.value) 

 

else: 

 

raise ValueError(f"Unexpected pipeline action: {action.action}") 

 

pipeline = pipeBuilder.pipeline(args.order_pipeline) 

 

if args.save_pipeline: 

with open(args.save_pipeline, "wb") as pickleFile: 

pickle.dump(pipeline, pickleFile) 

 

if args.pipeline_dot: 

pipeline2dot(pipeline, args.pipeline_dot, taskFactory) 

 

return pipeline 

 

def makeGraph(self, pipeline, taskFactory, args): 

"""Build a graph from command line arguments. 

 

Parameters 

---------- 

pipeline : `~lsst.pipe.base.Pipeline` 

Pipeline, can be empty or ``None`` if graph is read from pickle 

file. 

taskFactory : `~lsst.pipe.base.TaskFactory` 

Task factory. 

args : `argparse.Namespace` 

Parsed command line 

 

Returns 

------- 

graph : `~lsst.pipe.base.QuantumGraph` 

""" 

if args.qgraph: 

 

with open(args.qgraph, 'rb') as pickleFile: 

qgraph = pickle.load(pickleFile) 

if not isinstance(qgraph, QuantumGraph): 

raise TypeError("QuantumGraph pickle file has incorrect object type: {}".format( 

type(qgraph))) 

 

# pipeline can not be provided in this case 

if pipeline: 

raise ValueError("Pipeline must not be given when quantum graph is read from file.") 

 

else: 

 

if not pipeline: 

raise ValueError("Pipeline must be given for quantum graph construction.") 

 

# build collection names 

inputs = args.input.copy() 

defaultInputs = inputs.pop("", None) 

outputs = args.output.copy() 

defaultOutputs = outputs.pop("", None) 

 

# Make butler instance. From this Butler we only need Registry 

# instance. Input/output collections are handled by pre-flight 

# and we don't want to be constrained here by Butler's restrictions 

# on collection names. 

collection = defaultInputs[0] if defaultInputs else None 

butler = Butler(config=args.butler_config, collection=collection) 

 

# if default input collections are not given on command line then 

# use one from Butler (has to be configured in butler config) 

if not defaultInputs: 

defaultInputs = [butler.collection] 

coll = DatasetOriginInfoDef(defaultInputs=defaultInputs, 

defaultOutput=defaultOutputs, 

inputOverrides=inputs, 

outputOverrides=outputs) 

 

# make execution plan (a.k.a. DAG) for pipeline 

graphBuilder = GraphBuilder(taskFactory, butler.registry, args.skip_existing) 

qgraph = graphBuilder.makeGraph(pipeline, coll, args.data_query) 

 

# count quanta in graph and give a warning if it's empty 

nQuanta = sum(1 for q in qgraph.quanta()) 

if nQuanta == 0: 

warnings.warn("QuantumGraph is empty", stacklevel=2) 

else: 

_LOG.info("QuantumGraph contains %d quanta for %d tasks", 

nQuanta, len(qgraph)) 

 

if args.save_qgraph: 

with open(args.save_qgraph, "wb") as pickleFile: 

pickle.dump(qgraph, pickleFile) 

 

if args.qgraph_dot: 

graph2dot(qgraph, args.qgraph_dot) 

 

return qgraph 

 

def runPipeline(self, graph, taskFactory, args): 

"""Execute complete QuantumGraph. 

 

Parameters 

---------- 

graph : `QuantumGraph` 

Execution graph. 

taskFactory : `~lsst.pipe.base.TaskFactory` 

Task factory. 

args : `argparse.Namespace` 

Parsed command line 

""" 

# If default output collection is given then use it to override 

# butler-configured one. 

run = args.output.get("", None) 

 

# make butler instance 

butler = Butler(config=args.butler_config, run=run) 

 

# at this point we require that output collection was defined 

if not butler.run: 

raise ValueError("no output collection defined in data butler") 

 

preExecInit = PreExecInit(butler) 

preExecInit.initialize(graph, taskFactory, 

registerDatasetTypes=args.register_dataset_types, 

saveInitOutputs=not args.skip_init_writes, 

updateOutputCollection=True) 

 

if not args.init_only: 

executor = MPGraphExecutor(numProc=args.processes, timeout=self.MP_TIMEOUT) 

executor.execute(graph, butler, taskFactory) 

 

def showInfo(self, showOpts, pipeline, graph): 

"""Display useful info about pipeline and environment. 

 

Parameters 

---------- 

showOpts : `list` of `str` 

Defines what to show 

pipeline : `Pipeline` 

Pipeline definition 

graph : `QuantumGraph` 

Execution graph. 

""" 

 

for what in showOpts: 

showCommand, _, showArgs = what.partition("=") 

 

if showCommand in ["pipeline", "config", "history", "tasks"]: 

if not pipeline: 

_LOG.warning("Pipeline is required for --show=%s", showCommand) 

continue 

 

if showCommand == "pipeline": 

for taskDef in pipeline: 

print(taskDef) 

elif showCommand == "config": 

self._showConfig(pipeline, showArgs) 

elif showCommand == "history": 

self._showConfigHistory(pipeline, showArgs) 

elif showCommand == "tasks": 

self._showTaskHierarchy(pipeline) 

elif showCommand == "graph": 

if graph: 

self._showGraph(graph) 

else: 

print("Unknown value for show: %s (choose from '%s')" % 

(what, "', '".join("pipeline config[=XXX] history=XXX tasks graph".split())), 

file=sys.stderr) 

sys.exit(1) 

 

def _showConfig(self, pipeline, showArgs): 

"""Show task configuration 

 

Parameters 

---------- 

pipeline : `Pipeline` 

Pipeline definition 

showArgs : `str` 

Defines what to show 

""" 

matConfig = re.search(r"^(?:(\w+)::)?(?:config.)?(.+)?", showArgs) 

taskName = matConfig.group(1) 

pattern = matConfig.group(2) 

if pattern: 

class FilteredStream: 

"""A file object that only prints lines that match the glob "pattern" 

 

N.b. Newlines are silently discarded and reinserted; crude but effective. 

""" 

 

def __init__(self, pattern): 

# obey case if pattern isn't lowecase or requests NOIGNORECASE 

mat = re.search(r"(.*):NOIGNORECASE$", pattern) 

 

if mat: 

pattern = mat.group(1) 

self._pattern = re.compile(fnmatch.translate(pattern)) 

else: 

if pattern != pattern.lower(): 

print(u"Matching \"%s\" without regard to case " 

"(append :NOIGNORECASE to prevent this)" % (pattern,), file=sys.stdout) 

self._pattern = re.compile(fnmatch.translate(pattern), re.IGNORECASE) 

 

def write(self, showStr): 

showStr = showStr.rstrip() 

# Strip off doc string line(s) and cut off at "=" for string matching 

matchStr = showStr.split("\n")[-1].split("=")[0] 

if self._pattern.search(matchStr): 

print(u"\n" + showStr) 

 

fd = FilteredStream(pattern) 

else: 

fd = sys.stdout 

 

tasks = util.filterTasks(pipeline, taskName) 

if not tasks: 

print("Pipeline has not tasks named {}".format(taskName), file=sys.stderr) 

sys.exit(1) 

 

for taskDef in tasks: 

print("### Configuration for task `{}'".format(taskDef.taskName)) 

taskDef.config.saveToStream(fd, "config") 

 

def _showConfigHistory(self, pipeline, showArgs): 

"""Show history for task configuration 

 

Parameters 

---------- 

pipeline : `Pipeline` 

Pipeline definition 

showArgs : `str` 

Defines what to show 

""" 

 

taskName = None 

pattern = None 

matHistory = re.search(r"^(?:(\w+)::)(?:config[.])?(.+)", showArgs) 

if matHistory: 

taskName = matHistory.group(1) 

pattern = matHistory.group(2) 

print(showArgs, taskName, pattern) 

if not pattern: 

print("Please provide a value with --show history (e.g. history=Task::param)", file=sys.stderr) 

sys.exit(1) 

 

tasks = util.filterTasks(pipeline, taskName) 

if not tasks: 

print("Pipeline has not tasks named {}".format(taskName), file=sys.stderr) 

sys.exit(1) 

 

pattern = pattern.split(".") 

cpath, cname = pattern[:-1], pattern[-1] 

found = False 

for taskDef in tasks: 

hconfig = taskDef.config 

for i, cpt in enumerate(cpath): 

hconfig = getattr(hconfig, cpt, None) 

if hconfig is None: 

break 

 

if hconfig is not None and hasattr(hconfig, cname): 

print("### Configuration field for task `{}'".format(taskDef.taskName)) 

print(pexConfig.history.format(hconfig, cname)) 

found = True 

 

if not found: 

print("None of the tasks has field named {}".format(showArgs), file=sys.stderr) 

sys.exit(1) 

 

def _showTaskHierarchy(self, pipeline): 

"""Print task hierarchy to stdout 

 

Parameters 

---------- 

pipeline: `Pipeline` 

""" 

for taskDef in pipeline: 

print("### Subtasks for task `{}'".format(taskDef.taskName)) 

 

for configName, taskName in util.subTaskIter(taskDef.config): 

print("{}: {}".format(configName, taskName)) 

 

def _showGraph(self, graph): 

"""Print task hierarchy to stdout 

 

Parameters 

---------- 

graph : `QuantumGraph` 

Execution graph. 

""" 

for taskNodes in graph: 

print(taskNodes.taskDef) 

 

for iq, quantum in enumerate(taskNodes.quanta): 

print(" Quantum {}:".format(iq)) 

print(" inputs:") 

for key, refs in quantum.predictedInputs.items(): 

dataIds = ["DataId({})".format(ref.dataId) for ref in refs] 

print(" {}: [{}]".format(key, ", ".join(dataIds))) 

print(" outputs:") 

for key, refs in quantum.outputs.items(): 

dataIds = ["DataId({})".format(ref.dataId) for ref in refs] 

print(" {}: [{}]".format(key, ", ".join(dataIds)))